mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-08-11 19:29:02 +08:00
merge
This commit is contained in:
commit
b528b747c1
@ -52,64 +52,64 @@ else()
|
||||
endif()
|
||||
|
||||
if(CMAKE_COMPILER_IS_GNUCXX)
|
||||
if(CMAKE_SYSTEM_NAME MATCHES Linux)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wnon-virtual-dtor -Wno-long-long -ansi -Wundef -Wcast-align -Wchar-subscripts -Wall -W -Wpointer-arith -Wwrite-strings -Wformat-security -fexceptions -fno-check-new -fno-common -fstrict-aliasing")
|
||||
set(CMAKE_CXX_FLAGS_DEBUG "-g3")
|
||||
set(CMAKE_CXX_FLAGS_RELEASE "-g0 -O2")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wnon-virtual-dtor -Wno-long-long -ansi -Wundef -Wcast-align -Wchar-subscripts -Wall -W -Wpointer-arith -Wwrite-strings -Wformat-security -fexceptions -fno-check-new -fno-common -fstrict-aliasing")
|
||||
set(CMAKE_CXX_FLAGS_DEBUG "-g3")
|
||||
set(CMAKE_CXX_FLAGS_RELEASE "-g0 -O2")
|
||||
|
||||
check_cxx_compiler_flag("-Wno-variadic-macros" COMPILER_SUPPORT_WNOVARIADICMACRO)
|
||||
if(COMPILER_SUPPORT_WNOVARIADICMACRO)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-variadic-macros")
|
||||
endif()
|
||||
check_cxx_compiler_flag("-Wno-variadic-macros" COMPILER_SUPPORT_WNOVARIADICMACRO)
|
||||
if(COMPILER_SUPPORT_WNOVARIADICMACRO)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-variadic-macros")
|
||||
endif()
|
||||
|
||||
check_cxx_compiler_flag("-Wextra" COMPILER_SUPPORT_WEXTRA)
|
||||
if(COMPILER_SUPPORT_WEXTRA)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wextra")
|
||||
endif()
|
||||
check_cxx_compiler_flag("-Wextra" COMPILER_SUPPORT_WEXTRA)
|
||||
if(COMPILER_SUPPORT_WEXTRA)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wextra")
|
||||
endif()
|
||||
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pedantic")
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pedantic")
|
||||
|
||||
option(EIGEN_TEST_SSE2 "Enable/Disable SSE2 in tests/examples" OFF)
|
||||
if(EIGEN_TEST_SSE2)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse2")
|
||||
message("Enabling SSE2 in tests/examples")
|
||||
endif()
|
||||
option(EIGEN_TEST_SSE2 "Enable/Disable SSE2 in tests/examples" OFF)
|
||||
if(EIGEN_TEST_SSE2)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse2")
|
||||
message("Enabling SSE2 in tests/examples")
|
||||
endif()
|
||||
|
||||
option(EIGEN_TEST_SSE3 "Enable/Disable SSE3 in tests/examples" OFF)
|
||||
if(EIGEN_TEST_SSE3)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse3")
|
||||
message("Enabling SSE3 in tests/examples")
|
||||
endif()
|
||||
option(EIGEN_TEST_SSE3 "Enable/Disable SSE3 in tests/examples" OFF)
|
||||
if(EIGEN_TEST_SSE3)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse3")
|
||||
message("Enabling SSE3 in tests/examples")
|
||||
endif()
|
||||
|
||||
option(EIGEN_TEST_SSSE3 "Enable/Disable SSSE3 in tests/examples" OFF)
|
||||
if(EIGEN_TEST_SSSE3)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mssse3")
|
||||
message("Enabling SSSE3 in tests/examples")
|
||||
endif()
|
||||
option(EIGEN_TEST_SSSE3 "Enable/Disable SSSE3 in tests/examples" OFF)
|
||||
if(EIGEN_TEST_SSSE3)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mssse3")
|
||||
message("Enabling SSSE3 in tests/examples")
|
||||
endif()
|
||||
|
||||
option(EIGEN_TEST_SSE4_1 "Enable/Disable SSE4.1 in tests/examples" OFF)
|
||||
if(EIGEN_TEST_SSE4_1)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse4.1")
|
||||
message("Enabling SSE4.1 in tests/examples")
|
||||
endif()
|
||||
option(EIGEN_TEST_SSE4_1 "Enable/Disable SSE4.1 in tests/examples" OFF)
|
||||
if(EIGEN_TEST_SSE4_1)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse4.1")
|
||||
message("Enabling SSE4.1 in tests/examples")
|
||||
endif()
|
||||
|
||||
option(EIGEN_TEST_SSE4_2 "Enable/Disable SSE4.2 in tests/examples" OFF)
|
||||
if(EIGEN_TEST_SSE4_2)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse4.2")
|
||||
message("Enabling SSE4.2 in tests/examples")
|
||||
endif()
|
||||
option(EIGEN_TEST_SSE4_2 "Enable/Disable SSE4.2 in tests/examples" OFF)
|
||||
if(EIGEN_TEST_SSE4_2)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -msse4.2")
|
||||
message("Enabling SSE4.2 in tests/examples")
|
||||
endif()
|
||||
|
||||
option(EIGEN_TEST_ALTIVEC "Enable/Disable altivec in tests/examples" OFF)
|
||||
if(EIGEN_TEST_ALTIVEC)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -maltivec -mabi=altivec")
|
||||
message("Enabling AltiVec in tests/examples")
|
||||
endif()
|
||||
|
||||
endif(CMAKE_SYSTEM_NAME MATCHES Linux)
|
||||
option(EIGEN_TEST_ALTIVEC "Enable/Disable altivec in tests/examples" OFF)
|
||||
if(EIGEN_TEST_ALTIVEC)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -maltivec -mabi=altivec")
|
||||
message("Enabling AltiVec in tests/examples")
|
||||
endif()
|
||||
endif(CMAKE_COMPILER_IS_GNUCXX)
|
||||
|
||||
if(MSVC)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHsc")
|
||||
# C4127 - conditional expression is constant
|
||||
# C4505 - unreferenced local function has been removed
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /EHsc /wd4127 /wd4505")
|
||||
string(REGEX REPLACE "/W[0-9]" "/W4" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
|
||||
option(EIGEN_TEST_SSE2 "Enable/Disable SSE2 in tests/examples" OFF)
|
||||
if(EIGEN_TEST_SSE2)
|
||||
if(NOT CMAKE_CL_64)
|
||||
@ -128,9 +128,6 @@ endif(EIGEN_TEST_NO_EXPLICIT_VECTORIZATION)
|
||||
|
||||
option(EIGEN_TEST_C++0x "Enables all C++0x features." OFF)
|
||||
|
||||
option(EIGEN_TEST_MAX_WARNING_LEVEL "Sets the warning level to /Wall while building the unit tests." OFF)
|
||||
mark_as_advanced(EIGEN_TEST_MAX_WARNING_LEVEL)
|
||||
|
||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
|
||||
|
||||
set(INCLUDE_INSTALL_DIR
|
||||
|
@ -164,7 +164,7 @@ struct Dense {};
|
||||
#include "src/Core/Functors.h"
|
||||
#include "src/Core/DenseBase.h"
|
||||
#include "src/Core/MatrixBase.h"
|
||||
#include "src/Core/AnyMatrixBase.h"
|
||||
#include "src/Core/EigenBase.h"
|
||||
#include "src/Core/Coeffs.h"
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN // work around Doxygen bug triggered by Assign.h r814874
|
||||
|
@ -41,7 +41,7 @@ class Array
|
||||
EIGEN_DENSE_PUBLIC_INTERFACE(Array)
|
||||
|
||||
enum { Options = _Options };
|
||||
typedef typename Base::PlainMatrixType PlainMatrixType;
|
||||
typedef typename Base::PlainObject PlainObject;
|
||||
|
||||
protected:
|
||||
using Base::m_storage;
|
||||
@ -61,7 +61,7 @@ class Array
|
||||
* the usage of 'using'. This should be done only for operator=.
|
||||
*/
|
||||
template<typename OtherDerived>
|
||||
EIGEN_STRONG_INLINE Array& operator=(const AnyMatrixBase<OtherDerived> &other)
|
||||
EIGEN_STRONG_INLINE Array& operator=(const EigenBase<OtherDerived> &other)
|
||||
{
|
||||
return Base::operator=(other);
|
||||
}
|
||||
@ -196,9 +196,9 @@ class Array
|
||||
other.evalTo(*this);
|
||||
}
|
||||
|
||||
/** \sa MatrixBase::operator=(const AnyMatrixBase<OtherDerived>&) */
|
||||
/** \sa MatrixBase::operator=(const EigenBase<OtherDerived>&) */
|
||||
template<typename OtherDerived>
|
||||
EIGEN_STRONG_INLINE Array(const AnyMatrixBase<OtherDerived> &other)
|
||||
EIGEN_STRONG_INLINE Array(const EigenBase<OtherDerived> &other)
|
||||
: Base(other.derived().rows() * other.derived().cols(), other.derived().rows(), other.derived().cols())
|
||||
{
|
||||
Base::_check_template_params();
|
||||
|
@ -97,7 +97,7 @@ template<typename Derived> class ArrayBase
|
||||
/** \internal the plain matrix type corresponding to this expression. Note that is not necessarily
|
||||
* exactly the return type of eval(): in the case of plain matrices, the return type of eval() is a const
|
||||
* reference to a matrix, not a matrix! It is however guaranteed that the return type of eval() is either
|
||||
* PlainMatrixType or const PlainMatrixType&.
|
||||
* PlainObject or const PlainObject&.
|
||||
*/
|
||||
typedef Array<typename ei_traits<Derived>::Scalar,
|
||||
ei_traits<Derived>::RowsAtCompileTime,
|
||||
@ -105,7 +105,7 @@ template<typename Derived> class ArrayBase
|
||||
AutoAlign | (ei_traits<Derived>::Flags&RowMajorBit ? RowMajor : ColMajor),
|
||||
ei_traits<Derived>::MaxRowsAtCompileTime,
|
||||
ei_traits<Derived>::MaxColsAtCompileTime
|
||||
> PlainMatrixType;
|
||||
> PlainObject;
|
||||
|
||||
|
||||
/** \internal Represents a matrix with all coefficients equal to one another*/
|
||||
|
@ -462,7 +462,7 @@ template<typename ExpressionType, int Direction> class VectorwiseOp
|
||||
|
||||
const Homogeneous<ExpressionType,Direction> homogeneous() const;
|
||||
|
||||
typedef typename ExpressionType::PlainMatrixType CrossReturnType;
|
||||
typedef typename ExpressionType::PlainObject CrossReturnType;
|
||||
template<typename OtherDerived>
|
||||
const CrossReturnType cross(const MatrixBase<OtherDerived>& other) const;
|
||||
|
||||
|
@ -319,10 +319,10 @@ bool LDLT<MatrixType>::solveInPlace(MatrixBase<Derived> &bAndX) const
|
||||
* \returns the Cholesky decomposition with full pivoting without square root of \c *this
|
||||
*/
|
||||
template<typename Derived>
|
||||
inline const LDLT<typename MatrixBase<Derived>::PlainMatrixType>
|
||||
inline const LDLT<typename MatrixBase<Derived>::PlainObject>
|
||||
MatrixBase<Derived>::ldlt() const
|
||||
{
|
||||
return derived();
|
||||
return LDLT<PlainObject>(derived());
|
||||
}
|
||||
|
||||
#endif // EIGEN_LDLT_H
|
||||
|
@ -299,20 +299,20 @@ bool LLT<MatrixType,_UpLo>::solveInPlace(MatrixBase<Derived> &bAndX) const
|
||||
* \returns the LLT decomposition of \c *this
|
||||
*/
|
||||
template<typename Derived>
|
||||
inline const LLT<typename MatrixBase<Derived>::PlainMatrixType>
|
||||
inline const LLT<typename MatrixBase<Derived>::PlainObject>
|
||||
MatrixBase<Derived>::llt() const
|
||||
{
|
||||
return LLT<PlainMatrixType>(derived());
|
||||
return LLT<PlainObject>(derived());
|
||||
}
|
||||
|
||||
/** \cholesky_module
|
||||
* \returns the LLT decomposition of \c *this
|
||||
*/
|
||||
template<typename MatrixType, unsigned int UpLo>
|
||||
inline const LLT<typename SelfAdjointView<MatrixType, UpLo>::PlainMatrixType, UpLo>
|
||||
inline const LLT<typename SelfAdjointView<MatrixType, UpLo>::PlainObject, UpLo>
|
||||
SelfAdjointView<MatrixType, UpLo>::llt() const
|
||||
{
|
||||
return LLT<PlainMatrixType,UpLo>(m_matrix);
|
||||
return LLT<PlainObject,UpLo>(m_matrix);
|
||||
}
|
||||
|
||||
#endif // EIGEN_LLT_H
|
||||
|
@ -57,7 +57,7 @@ struct ei_traits<BandMatrix<_Scalar,Rows,Cols,Supers,Subs,Options> >
|
||||
};
|
||||
|
||||
template<typename _Scalar, int Rows, int Cols, int Supers, int Subs, int Options>
|
||||
class BandMatrix : public AnyMatrixBase<BandMatrix<_Scalar,Rows,Cols,Supers,Subs,Options> >
|
||||
class BandMatrix : public EigenBase<BandMatrix<_Scalar,Rows,Cols,Supers,Subs,Options> >
|
||||
{
|
||||
public:
|
||||
|
||||
|
@ -40,7 +40,7 @@ template<typename Derived> class DenseBase
|
||||
: public ei_special_scalar_op_base<Derived,typename ei_traits<Derived>::Scalar,
|
||||
typename NumTraits<typename ei_traits<Derived>::Scalar>::Real>
|
||||
#else
|
||||
: public AnyMatrixBase<Derived>
|
||||
: public EigenBase<Derived>
|
||||
#endif // not EIGEN_PARSED_BY_DOXYGEN
|
||||
{
|
||||
public:
|
||||
@ -53,8 +53,8 @@ template<typename Derived> class DenseBase
|
||||
typedef typename ei_traits<Derived>::Scalar Scalar;
|
||||
typedef typename ei_packet_traits<Scalar>::type PacketScalar;
|
||||
|
||||
using AnyMatrixBase<Derived>::derived;
|
||||
using AnyMatrixBase<Derived>::const_cast_derived;
|
||||
using EigenBase<Derived>::derived;
|
||||
using EigenBase<Derived>::const_cast_derived;
|
||||
#endif // not EIGEN_PARSED_BY_DOXYGEN
|
||||
|
||||
enum {
|
||||
@ -124,6 +124,8 @@ template<typename Derived> class DenseBase
|
||||
* constructed from this one. See the \ref flags "list of flags".
|
||||
*/
|
||||
|
||||
IsRowMajor = int(Flags) & RowMajorBit, /**< True if this expression is row major. */
|
||||
|
||||
CoeffReadCost = ei_traits<Derived>::CoeffReadCost,
|
||||
/**< This is a rough measure of how expensive it is to read one coefficient from
|
||||
* this expression.
|
||||
@ -214,13 +216,13 @@ template<typename Derived> class DenseBase
|
||||
Derived& operator=(const DenseBase& other);
|
||||
|
||||
template<typename OtherDerived>
|
||||
Derived& operator=(const AnyMatrixBase<OtherDerived> &other);
|
||||
Derived& operator=(const EigenBase<OtherDerived> &other);
|
||||
|
||||
template<typename OtherDerived>
|
||||
Derived& operator+=(const AnyMatrixBase<OtherDerived> &other);
|
||||
Derived& operator+=(const EigenBase<OtherDerived> &other);
|
||||
|
||||
template<typename OtherDerived>
|
||||
Derived& operator-=(const AnyMatrixBase<OtherDerived> &other);
|
||||
Derived& operator-=(const EigenBase<OtherDerived> &other);
|
||||
|
||||
template<typename OtherDerived>
|
||||
Derived& operator=(const ReturnByValue<OtherDerived>& func);
|
||||
|
@ -44,7 +44,7 @@ class DenseStorageBase : public _Base<Derived>
|
||||
public:
|
||||
enum { Options = _Options };
|
||||
typedef _Base<Derived> Base;
|
||||
typedef typename Base::PlainMatrixType PlainMatrixType;
|
||||
typedef typename Base::PlainObject PlainObject;
|
||||
typedef typename Base::Scalar Scalar;
|
||||
typedef typename Base::PacketScalar PacketScalar;
|
||||
using Base::RowsAtCompileTime;
|
||||
@ -355,19 +355,19 @@ class DenseStorageBase : public _Base<Derived>
|
||||
// EIGEN_INITIALIZE_BY_ZERO_IF_THAT_OPTION_IS_ENABLED
|
||||
}
|
||||
|
||||
/** \copydoc MatrixBase::operator=(const AnyMatrixBase<OtherDerived>&)
|
||||
/** \copydoc MatrixBase::operator=(const EigenBase<OtherDerived>&)
|
||||
*/
|
||||
template<typename OtherDerived>
|
||||
EIGEN_STRONG_INLINE Derived& operator=(const AnyMatrixBase<OtherDerived> &other)
|
||||
EIGEN_STRONG_INLINE Derived& operator=(const EigenBase<OtherDerived> &other)
|
||||
{
|
||||
resize(other.derived().rows(), other.derived().cols());
|
||||
Base::operator=(other.derived());
|
||||
return this->derived();
|
||||
}
|
||||
|
||||
/** \sa MatrixBase::operator=(const AnyMatrixBase<OtherDerived>&) */
|
||||
/** \sa MatrixBase::operator=(const EigenBase<OtherDerived>&) */
|
||||
template<typename OtherDerived>
|
||||
EIGEN_STRONG_INLINE DenseStorageBase(const AnyMatrixBase<OtherDerived> &other)
|
||||
EIGEN_STRONG_INLINE DenseStorageBase(const EigenBase<OtherDerived> &other)
|
||||
: m_storage(other.derived().rows() * other.derived().cols(), other.derived().rows(), other.derived().cols())
|
||||
{
|
||||
_check_template_params();
|
||||
@ -544,7 +544,7 @@ struct ei_conservative_resize_like_impl
|
||||
{
|
||||
if (_this.rows() == rows && _this.cols() == cols) return;
|
||||
EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(Derived)
|
||||
typename Derived::PlainMatrixType tmp(rows,cols);
|
||||
typename Derived::PlainObject tmp(rows,cols);
|
||||
const int common_rows = std::min(rows, _this.rows());
|
||||
const int common_cols = std::min(cols, _this.cols());
|
||||
tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols);
|
||||
@ -563,7 +563,7 @@ struct ei_conservative_resize_like_impl
|
||||
EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(Derived)
|
||||
EIGEN_STATIC_ASSERT_DYNAMIC_SIZE(OtherDerived)
|
||||
|
||||
typename Derived::PlainMatrixType tmp(other);
|
||||
typename Derived::PlainObject tmp(other);
|
||||
const int common_rows = std::min(tmp.rows(), _this.rows());
|
||||
const int common_cols = std::min(tmp.cols(), _this.cols());
|
||||
tmp.block(0,0,common_rows,common_cols) = _this.block(0,0,common_rows,common_cols);
|
||||
@ -577,7 +577,7 @@ struct ei_conservative_resize_like_impl<Derived,OtherDerived,true>
|
||||
static void run(DenseBase<Derived>& _this, int size)
|
||||
{
|
||||
if (_this.size() == size) return;
|
||||
typename Derived::PlainMatrixType tmp(size);
|
||||
typename Derived::PlainObject tmp(size);
|
||||
const int common_size = std::min<int>(_this.size(),size);
|
||||
tmp.segment(0,common_size) = _this.segment(0,common_size);
|
||||
_this.derived().swap(tmp);
|
||||
@ -588,7 +588,7 @@ struct ei_conservative_resize_like_impl<Derived,OtherDerived,true>
|
||||
if (_this.rows() == other.rows() && _this.cols() == other.cols()) return;
|
||||
|
||||
// segment(...) will check whether Derived/OtherDerived are vectors!
|
||||
typename Derived::PlainMatrixType tmp(other);
|
||||
typename Derived::PlainObject tmp(other);
|
||||
const int common_size = std::min<int>(_this.size(),tmp.size());
|
||||
tmp.segment(0,common_size) = _this.segment(0,common_size);
|
||||
_this.derived().swap(tmp);
|
||||
|
@ -28,7 +28,7 @@
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
template<typename Derived>
|
||||
class DiagonalBase : public AnyMatrixBase<Derived>
|
||||
class DiagonalBase : public EigenBase<Derived>
|
||||
{
|
||||
public:
|
||||
typedef typename ei_traits<Derived>::DiagonalVectorType DiagonalVectorType;
|
||||
|
@ -299,7 +299,7 @@ inline typename NumTraits<typename ei_traits<Derived>::Scalar>::Real MatrixBase<
|
||||
* \sa norm(), normalize()
|
||||
*/
|
||||
template<typename Derived>
|
||||
inline const typename MatrixBase<Derived>::PlainMatrixType
|
||||
inline const typename MatrixBase<Derived>::PlainObject
|
||||
MatrixBase<Derived>::normalized() const
|
||||
{
|
||||
typedef typename ei_nested<Derived>::type Nested;
|
||||
|
@ -23,21 +23,21 @@
|
||||
// License and a copy of the GNU General Public License along with
|
||||
// Eigen. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#ifndef EIGEN_ANYMATRIXBASE_H
|
||||
#define EIGEN_ANYMATRIXBASE_H
|
||||
#ifndef EIGEN_EIGENBASE_H
|
||||
#define EIGEN_EIGENBASE_H
|
||||
|
||||
|
||||
/** Common base class for all classes T such that MatrixBase has an operator=(T) and a constructor MatrixBase(T).
|
||||
*
|
||||
* In other words, an AnyMatrixBase object is an object that can be copied into a MatrixBase.
|
||||
* In other words, an EigenBase object is an object that can be copied into a MatrixBase.
|
||||
*
|
||||
* Besides MatrixBase-derived classes, this also includes special matrix classes such as diagonal matrices, etc.
|
||||
*
|
||||
* Notice that this class is trivial, it is only used to disambiguate overloaded functions.
|
||||
*/
|
||||
template<typename Derived> struct AnyMatrixBase
|
||||
template<typename Derived> struct EigenBase
|
||||
{
|
||||
// typedef typename ei_plain_matrix_type<Derived>::type PlainMatrixType;
|
||||
// typedef typename ei_plain_matrix_type<Derived>::type PlainObject;
|
||||
|
||||
/** \returns a reference to the derived object */
|
||||
Derived& derived() { return *static_cast<Derived*>(this); }
|
||||
@ -45,7 +45,7 @@ template<typename Derived> struct AnyMatrixBase
|
||||
const Derived& derived() const { return *static_cast<const Derived*>(this); }
|
||||
|
||||
inline Derived& const_cast_derived() const
|
||||
{ return *static_cast<Derived*>(const_cast<AnyMatrixBase*>(this)); }
|
||||
{ return *static_cast<Derived*>(const_cast<EigenBase*>(this)); }
|
||||
|
||||
/** \returns the number of rows. \sa cols(), RowsAtCompileTime */
|
||||
inline int rows() const { return derived().rows(); }
|
||||
@ -61,7 +61,7 @@ template<typename Derived> struct AnyMatrixBase
|
||||
{
|
||||
// This is the default implementation,
|
||||
// derived class can reimplement it in a more optimized way.
|
||||
typename Dest::PlainMatrixType res(rows(),cols());
|
||||
typename Dest::PlainObject res(rows(),cols());
|
||||
evalTo(res);
|
||||
dst += res;
|
||||
}
|
||||
@ -71,7 +71,7 @@ template<typename Derived> struct AnyMatrixBase
|
||||
{
|
||||
// This is the default implementation,
|
||||
// derived class can reimplement it in a more optimized way.
|
||||
typename Dest::PlainMatrixType res(rows(),cols());
|
||||
typename Dest::PlainObject res(rows(),cols());
|
||||
evalTo(res);
|
||||
dst -= res;
|
||||
}
|
||||
@ -108,7 +108,7 @@ template<typename Derived> struct AnyMatrixBase
|
||||
*/
|
||||
template<typename Derived>
|
||||
template<typename OtherDerived>
|
||||
Derived& DenseBase<Derived>::operator=(const AnyMatrixBase<OtherDerived> &other)
|
||||
Derived& DenseBase<Derived>::operator=(const EigenBase<OtherDerived> &other)
|
||||
{
|
||||
other.derived().evalTo(derived());
|
||||
return derived();
|
||||
@ -116,7 +116,7 @@ Derived& DenseBase<Derived>::operator=(const AnyMatrixBase<OtherDerived> &other)
|
||||
|
||||
template<typename Derived>
|
||||
template<typename OtherDerived>
|
||||
Derived& DenseBase<Derived>::operator+=(const AnyMatrixBase<OtherDerived> &other)
|
||||
Derived& DenseBase<Derived>::operator+=(const EigenBase<OtherDerived> &other)
|
||||
{
|
||||
other.derived().addTo(derived());
|
||||
return derived();
|
||||
@ -124,7 +124,7 @@ Derived& DenseBase<Derived>::operator+=(const AnyMatrixBase<OtherDerived> &other
|
||||
|
||||
template<typename Derived>
|
||||
template<typename OtherDerived>
|
||||
Derived& DenseBase<Derived>::operator-=(const AnyMatrixBase<OtherDerived> &other)
|
||||
Derived& DenseBase<Derived>::operator-=(const EigenBase<OtherDerived> &other)
|
||||
{
|
||||
other.derived().subTo(derived());
|
||||
return derived();
|
||||
@ -137,7 +137,7 @@ Derived& DenseBase<Derived>::operator-=(const AnyMatrixBase<OtherDerived> &other
|
||||
template<typename Derived>
|
||||
template<typename OtherDerived>
|
||||
inline Derived&
|
||||
MatrixBase<Derived>::operator*=(const AnyMatrixBase<OtherDerived> &other)
|
||||
MatrixBase<Derived>::operator*=(const EigenBase<OtherDerived> &other)
|
||||
{
|
||||
other.derived().applyThisOnTheRight(derived());
|
||||
return derived();
|
||||
@ -146,7 +146,7 @@ MatrixBase<Derived>::operator*=(const AnyMatrixBase<OtherDerived> &other)
|
||||
/** replaces \c *this by \c *this * \a other. It is equivalent to MatrixBase::operator*=() */
|
||||
template<typename Derived>
|
||||
template<typename OtherDerived>
|
||||
inline void MatrixBase<Derived>::applyOnTheRight(const AnyMatrixBase<OtherDerived> &other)
|
||||
inline void MatrixBase<Derived>::applyOnTheRight(const EigenBase<OtherDerived> &other)
|
||||
{
|
||||
other.derived().applyThisOnTheRight(derived());
|
||||
}
|
||||
@ -154,9 +154,9 @@ inline void MatrixBase<Derived>::applyOnTheRight(const AnyMatrixBase<OtherDerive
|
||||
/** replaces \c *this by \c *this * \a other. */
|
||||
template<typename Derived>
|
||||
template<typename OtherDerived>
|
||||
inline void MatrixBase<Derived>::applyOnTheLeft(const AnyMatrixBase<OtherDerived> &other)
|
||||
inline void MatrixBase<Derived>::applyOnTheLeft(const EigenBase<OtherDerived> &other)
|
||||
{
|
||||
other.derived().applyThisOnTheLeft(derived());
|
||||
}
|
||||
|
||||
#endif // EIGEN_ANYMATRIXBASE_H
|
||||
#endif // EIGEN_EIGENBASE_H
|
@ -109,7 +109,7 @@ template<typename ExpressionType, unsigned int Added, unsigned int Removed> clas
|
||||
const ExpressionType& _expression() const { return m_matrix; }
|
||||
|
||||
template<typename OtherDerived>
|
||||
typename ExpressionType::PlainMatrixType solveTriangular(const MatrixBase<OtherDerived>& other) const;
|
||||
typename ExpressionType::PlainObject solveTriangular(const MatrixBase<OtherDerived>& other) const;
|
||||
|
||||
template<typename OtherDerived>
|
||||
void solveTriangularInPlace(const MatrixBase<OtherDerived>& other) const;
|
||||
|
@ -139,7 +139,7 @@ class Matrix
|
||||
|
||||
EIGEN_DENSE_PUBLIC_INTERFACE(Matrix)
|
||||
|
||||
typedef typename Base::PlainMatrixType PlainMatrixType;
|
||||
typedef typename Base::PlainObject PlainObject;
|
||||
|
||||
enum { NeedsToAlign = (!(Options&DontAlign))
|
||||
&& SizeAtCompileTime!=Dynamic && ((sizeof(Scalar)*SizeAtCompileTime)%16)==0 };
|
||||
@ -181,10 +181,10 @@ class Matrix
|
||||
|
||||
/**
|
||||
* \brief Copies the generic expression \a other into *this.
|
||||
* \copydetails DenseBase::operator=(const AnyMatrixBase<OtherDerived> &other)
|
||||
* \copydetails DenseBase::operator=(const EigenBase<OtherDerived> &other)
|
||||
*/
|
||||
template<typename OtherDerived>
|
||||
EIGEN_STRONG_INLINE Matrix& operator=(const AnyMatrixBase<OtherDerived> &other)
|
||||
EIGEN_STRONG_INLINE Matrix& operator=(const EigenBase<OtherDerived> &other)
|
||||
{
|
||||
return Base::operator=(other);
|
||||
}
|
||||
@ -297,10 +297,10 @@ class Matrix
|
||||
}
|
||||
|
||||
/** \brief Copy constructor for generic expressions.
|
||||
* \sa MatrixBase::operator=(const AnyMatrixBase<OtherDerived>&)
|
||||
* \sa MatrixBase::operator=(const EigenBase<OtherDerived>&)
|
||||
*/
|
||||
template<typename OtherDerived>
|
||||
EIGEN_STRONG_INLINE Matrix(const AnyMatrixBase<OtherDerived> &other)
|
||||
EIGEN_STRONG_INLINE Matrix(const EigenBase<OtherDerived> &other)
|
||||
: Base(other.derived().rows() * other.derived().cols(), other.derived().rows(), other.derived().cols())
|
||||
{
|
||||
Base::_check_template_params();
|
||||
|
@ -121,7 +121,7 @@ template<typename Derived> class MatrixBase
|
||||
*
|
||||
* This is not necessarily exactly the return type of eval(). In the case of plain matrices,
|
||||
* the return type of eval() is a const reference to a matrix, not a matrix! It is however guaranteed
|
||||
* that the return type of eval() is either PlainMatrixType or const PlainMatrixType&.
|
||||
* that the return type of eval() is either PlainObject or const PlainObject&.
|
||||
*/
|
||||
typedef Matrix<typename ei_traits<Derived>::Scalar,
|
||||
ei_traits<Derived>::RowsAtCompileTime,
|
||||
@ -129,8 +129,7 @@ template<typename Derived> class MatrixBase
|
||||
AutoAlign | (ei_traits<Derived>::Flags&RowMajorBit ? RowMajor : ColMajor),
|
||||
ei_traits<Derived>::MaxRowsAtCompileTime,
|
||||
ei_traits<Derived>::MaxColsAtCompileTime
|
||||
> PlainMatrixType;
|
||||
// typedef typename ei_plain_matrix_type<Derived>::type PlainMatrixType;
|
||||
> PlainObject;
|
||||
|
||||
#ifndef EIGEN_PARSED_BY_DOXYGEN
|
||||
/** \internal Represents a matrix with all coefficients equal to one another*/
|
||||
@ -193,13 +192,13 @@ template<typename Derived> class MatrixBase
|
||||
lazyProduct(const MatrixBase<OtherDerived> &other) const;
|
||||
|
||||
template<typename OtherDerived>
|
||||
Derived& operator*=(const AnyMatrixBase<OtherDerived>& other);
|
||||
Derived& operator*=(const EigenBase<OtherDerived>& other);
|
||||
|
||||
template<typename OtherDerived>
|
||||
void applyOnTheLeft(const AnyMatrixBase<OtherDerived>& other);
|
||||
void applyOnTheLeft(const EigenBase<OtherDerived>& other);
|
||||
|
||||
template<typename OtherDerived>
|
||||
void applyOnTheRight(const AnyMatrixBase<OtherDerived>& other);
|
||||
void applyOnTheRight(const EigenBase<OtherDerived>& other);
|
||||
|
||||
template<typename DiagonalDerived>
|
||||
const DiagonalProduct<Derived, DiagonalDerived, OnTheRight>
|
||||
@ -212,7 +211,7 @@ template<typename Derived> class MatrixBase
|
||||
RealScalar stableNorm() const;
|
||||
RealScalar blueNorm() const;
|
||||
RealScalar hypotNorm() const;
|
||||
const PlainMatrixType normalized() const;
|
||||
const PlainObject normalized() const;
|
||||
void normalize();
|
||||
|
||||
const AdjointReturnType adjoint() const;
|
||||
@ -301,9 +300,9 @@ template<typename Derived> class MatrixBase
|
||||
|
||||
/////////// LU module ///////////
|
||||
|
||||
const FullPivLU<PlainMatrixType> fullPivLu() const;
|
||||
const PartialPivLU<PlainMatrixType> partialPivLu() const;
|
||||
const PartialPivLU<PlainMatrixType> lu() const;
|
||||
const FullPivLU<PlainObject> fullPivLu() const;
|
||||
const PartialPivLU<PlainObject> partialPivLu() const;
|
||||
const PartialPivLU<PlainObject> lu() const;
|
||||
const ei_inverse_impl<Derived> inverse() const;
|
||||
template<typename ResultType>
|
||||
void computeInverseAndDetWithCheck(
|
||||
@ -322,29 +321,29 @@ template<typename Derived> class MatrixBase
|
||||
|
||||
/////////// Cholesky module ///////////
|
||||
|
||||
const LLT<PlainMatrixType> llt() const;
|
||||
const LDLT<PlainMatrixType> ldlt() const;
|
||||
const LLT<PlainObject> llt() const;
|
||||
const LDLT<PlainObject> ldlt() const;
|
||||
|
||||
/////////// QR module ///////////
|
||||
|
||||
const HouseholderQR<PlainMatrixType> householderQr() const;
|
||||
const ColPivHouseholderQR<PlainMatrixType> colPivHouseholderQr() const;
|
||||
const FullPivHouseholderQR<PlainMatrixType> fullPivHouseholderQr() const;
|
||||
const HouseholderQR<PlainObject> householderQr() const;
|
||||
const ColPivHouseholderQR<PlainObject> colPivHouseholderQr() const;
|
||||
const FullPivHouseholderQR<PlainObject> fullPivHouseholderQr() const;
|
||||
|
||||
EigenvaluesReturnType eigenvalues() const;
|
||||
RealScalar operatorNorm() const;
|
||||
|
||||
/////////// SVD module ///////////
|
||||
|
||||
SVD<PlainMatrixType> svd() const;
|
||||
SVD<PlainObject> svd() const;
|
||||
|
||||
/////////// Geometry module ///////////
|
||||
|
||||
template<typename OtherDerived>
|
||||
PlainMatrixType cross(const MatrixBase<OtherDerived>& other) const;
|
||||
PlainObject cross(const MatrixBase<OtherDerived>& other) const;
|
||||
template<typename OtherDerived>
|
||||
PlainMatrixType cross3(const MatrixBase<OtherDerived>& other) const;
|
||||
PlainMatrixType unitOrthogonal(void) const;
|
||||
PlainObject cross3(const MatrixBase<OtherDerived>& other) const;
|
||||
PlainObject unitOrthogonal(void) const;
|
||||
Matrix<Scalar,3,1> eulerAngles(int a0, int a1, int a2) const;
|
||||
const ScalarMultipleReturnType operator*(const UniformScaling<Scalar>& s) const;
|
||||
enum {
|
||||
|
@ -55,7 +55,7 @@ struct ei_traits<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime> >
|
||||
{};
|
||||
|
||||
template<int SizeAtCompileTime, int MaxSizeAtCompileTime>
|
||||
class PermutationMatrix : public AnyMatrixBase<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime> >
|
||||
class PermutationMatrix : public EigenBase<PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime> >
|
||||
{
|
||||
public:
|
||||
|
||||
@ -144,7 +144,7 @@ class PermutationMatrix : public AnyMatrixBase<PermutationMatrix<SizeAtCompileTi
|
||||
|
||||
/** \returns a Matrix object initialized from this permutation matrix. Notice that it
|
||||
* is inefficient to return this Matrix object by value. For efficiency, favor using
|
||||
* the Matrix constructor taking AnyMatrixBase objects.
|
||||
* the Matrix constructor taking EigenBase objects.
|
||||
*/
|
||||
DenseMatrixType toDenseMatrix() const
|
||||
{
|
||||
@ -280,7 +280,7 @@ operator*(const PermutationMatrix<SizeAtCompileTime, MaxSizeAtCompileTime> &perm
|
||||
template<typename PermutationType, typename MatrixType, int Side>
|
||||
struct ei_traits<ei_permut_matrix_product_retval<PermutationType, MatrixType, Side> >
|
||||
{
|
||||
typedef typename MatrixType::PlainMatrixType ReturnMatrixType;
|
||||
typedef typename MatrixType::PlainObject ReturnType;
|
||||
};
|
||||
|
||||
template<typename PermutationType, typename MatrixType, int Side>
|
||||
|
@ -50,8 +50,8 @@ class GeneralProduct;
|
||||
template<int Rows, int Cols, int Depth> struct ei_product_type_selector;
|
||||
|
||||
enum {
|
||||
Large = Dynamic,
|
||||
Small = Dynamic/2
|
||||
Large = 2,
|
||||
Small = 3
|
||||
};
|
||||
|
||||
template<typename Lhs, typename Rhs> struct ei_product_type
|
||||
|
@ -88,7 +88,7 @@ class ProductBase : public MatrixBase<Derived>
|
||||
|
||||
public:
|
||||
|
||||
typedef typename Base::PlainMatrixType PlainMatrixType;
|
||||
typedef typename Base::PlainObject PlainObject;
|
||||
|
||||
ProductBase(const Lhs& lhs, const Rhs& rhs)
|
||||
: m_lhs(lhs), m_rhs(rhs)
|
||||
@ -116,8 +116,8 @@ class ProductBase : public MatrixBase<Derived>
|
||||
const _LhsNested& lhs() const { return m_lhs; }
|
||||
const _RhsNested& rhs() const { return m_rhs; }
|
||||
|
||||
// Implicit convertion to the nested type (trigger the evaluation of the product)
|
||||
operator const PlainMatrixType& () const
|
||||
// Implicit conversion to the nested type (trigger the evaluation of the product)
|
||||
operator const PlainObject& () const
|
||||
{
|
||||
m_result.resize(m_lhs.rows(), m_rhs.cols());
|
||||
this->evalTo(m_result);
|
||||
@ -139,7 +139,7 @@ class ProductBase : public MatrixBase<Derived>
|
||||
const LhsNested m_lhs;
|
||||
const RhsNested m_rhs;
|
||||
|
||||
mutable PlainMatrixType m_result;
|
||||
mutable PlainObject m_result;
|
||||
|
||||
private:
|
||||
|
||||
@ -152,10 +152,10 @@ class ProductBase : public MatrixBase<Derived>
|
||||
|
||||
// here we need to overload the nested rule for products
|
||||
// such that the nested type is a const reference to a plain matrix
|
||||
template<typename Lhs, typename Rhs, int Mode, int N, typename PlainMatrixType>
|
||||
struct ei_nested<GeneralProduct<Lhs,Rhs,Mode>, N, PlainMatrixType>
|
||||
template<typename Lhs, typename Rhs, int Mode, int N, typename PlainObject>
|
||||
struct ei_nested<GeneralProduct<Lhs,Rhs,Mode>, N, PlainObject>
|
||||
{
|
||||
typedef PlainMatrixType const& type;
|
||||
typedef PlainObject const& type;
|
||||
};
|
||||
|
||||
template<typename NestedProduct>
|
||||
|
@ -31,7 +31,7 @@
|
||||
*/
|
||||
template<typename Derived>
|
||||
struct ei_traits<ReturnByValue<Derived> >
|
||||
: public ei_traits<typename ei_traits<Derived>::ReturnMatrixType>
|
||||
: public ei_traits<typename ei_traits<Derived>::ReturnType>
|
||||
{
|
||||
enum {
|
||||
// FIXME had to remove the DirectAccessBit for usage like
|
||||
@ -42,7 +42,7 @@ struct ei_traits<ReturnByValue<Derived> >
|
||||
// The fact that I had to do that shows that when doing xpr.block() with a non-direct-access xpr,
|
||||
// even if xpr has the EvalBeforeNestingBit, the block() doesn't use direct access on the evaluated
|
||||
// xpr.
|
||||
Flags = (ei_traits<typename ei_traits<Derived>::ReturnMatrixType>::Flags
|
||||
Flags = (ei_traits<typename ei_traits<Derived>::ReturnType>::Flags
|
||||
| EvalBeforeNestingBit) & ~DirectAccessBit
|
||||
};
|
||||
};
|
||||
@ -51,18 +51,18 @@ struct ei_traits<ReturnByValue<Derived> >
|
||||
* So the only way that nesting it in an expression can work, is by evaluating it into a plain matrix.
|
||||
* So ei_nested always gives the plain return matrix type.
|
||||
*/
|
||||
template<typename Derived,int n,typename PlainMatrixType>
|
||||
struct ei_nested<ReturnByValue<Derived>, n, PlainMatrixType>
|
||||
template<typename Derived,int n,typename PlainObject>
|
||||
struct ei_nested<ReturnByValue<Derived>, n, PlainObject>
|
||||
{
|
||||
typedef typename ei_traits<Derived>::ReturnMatrixType type;
|
||||
typedef typename ei_traits<Derived>::ReturnType type;
|
||||
};
|
||||
|
||||
template<typename Derived> class ReturnByValue
|
||||
: public ei_traits<Derived>::ReturnMatrixType::template MakeBase<ReturnByValue<Derived> >::Type
|
||||
: public ei_traits<Derived>::ReturnType::template MakeBase<ReturnByValue<Derived> >::Type
|
||||
{
|
||||
public:
|
||||
typedef typename ei_traits<Derived>::ReturnMatrixType ReturnMatrixType;
|
||||
typedef typename ReturnMatrixType::template MakeBase<ReturnByValue<Derived> >::Type Base;
|
||||
typedef typename ei_traits<Derived>::ReturnType ReturnType;
|
||||
typedef typename ReturnType::template MakeBase<ReturnByValue<Derived> >::Type Base;
|
||||
EIGEN_DENSE_PUBLIC_INTERFACE(ReturnByValue)
|
||||
|
||||
template<typename Dest>
|
||||
|
@ -68,7 +68,7 @@ template<typename MatrixType, unsigned int UpLo> class SelfAdjointView
|
||||
enum {
|
||||
Mode = ei_traits<SelfAdjointView>::Mode
|
||||
};
|
||||
typedef typename MatrixType::PlainMatrixType PlainMatrixType;
|
||||
typedef typename MatrixType::PlainObject PlainObject;
|
||||
|
||||
inline SelfAdjointView(const MatrixType& matrix) : m_matrix(matrix)
|
||||
{ ei_assert(ei_are_flags_consistent<Mode>::ret); }
|
||||
@ -146,8 +146,8 @@ template<typename MatrixType, unsigned int UpLo> class SelfAdjointView
|
||||
|
||||
/////////// Cholesky module ///////////
|
||||
|
||||
const LLT<PlainMatrixType, UpLo> llt() const;
|
||||
const LDLT<PlainMatrixType> ldlt() const;
|
||||
const LLT<PlainObject, UpLo> llt() const;
|
||||
const LDLT<PlainObject> ldlt() const;
|
||||
|
||||
protected:
|
||||
const typename MatrixType::Nested m_matrix;
|
||||
|
@ -124,8 +124,8 @@ template<typename Derived>
|
||||
inline Derived& DenseBase<Derived>::operator*=(const Scalar& other)
|
||||
{
|
||||
SelfCwiseBinaryOp<ei_scalar_product_op<Scalar>, Derived> tmp(derived());
|
||||
typedef typename Derived::PlainMatrixType PlainMatrixType;
|
||||
tmp = PlainMatrixType::Constant(rows(),cols(),other);
|
||||
typedef typename Derived::PlainObject PlainObject;
|
||||
tmp = PlainObject::Constant(rows(),cols(),other);
|
||||
return derived();
|
||||
}
|
||||
|
||||
@ -133,8 +133,8 @@ template<typename Derived>
|
||||
inline Derived& DenseBase<Derived>::operator/=(const Scalar& other)
|
||||
{
|
||||
SelfCwiseBinaryOp<typename ei_meta_if<NumTraits<Scalar>::HasFloatingPoint,ei_scalar_product_op<Scalar>,ei_scalar_quotient_op<Scalar> >::ret, Derived> tmp(derived());
|
||||
typedef typename Derived::PlainMatrixType PlainMatrixType;
|
||||
tmp = PlainMatrixType::Constant(rows(),cols(), NumTraits<Scalar>::HasFloatingPoint ? Scalar(1)/other : other);
|
||||
typedef typename Derived::PlainObject PlainObject;
|
||||
tmp = PlainObject::Constant(rows(),cols(), NumTraits<Scalar>::HasFloatingPoint ? Scalar(1)/other : other);
|
||||
return derived();
|
||||
}
|
||||
|
||||
|
@ -32,7 +32,7 @@
|
||||
*
|
||||
* \brief Base class for triangular part in a matrix
|
||||
*/
|
||||
template<typename Derived> class TriangularBase : public AnyMatrixBase<Derived>
|
||||
template<typename Derived> class TriangularBase : public EigenBase<Derived>
|
||||
{
|
||||
public:
|
||||
|
||||
@ -148,7 +148,7 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularView
|
||||
typedef TriangularBase<TriangularView> Base;
|
||||
typedef typename ei_traits<TriangularView>::Scalar Scalar;
|
||||
typedef _MatrixType MatrixType;
|
||||
typedef typename MatrixType::PlainMatrixType DenseMatrixType;
|
||||
typedef typename MatrixType::PlainObject DenseMatrixType;
|
||||
typedef typename MatrixType::Nested MatrixTypeNested;
|
||||
typedef typename ei_cleantype<MatrixTypeNested>::type _MatrixTypeNested;
|
||||
|
||||
|
@ -109,7 +109,7 @@ class CoeffBasedProduct
|
||||
|
||||
typedef MatrixBase<CoeffBasedProduct> Base;
|
||||
EIGEN_DENSE_PUBLIC_INTERFACE(CoeffBasedProduct)
|
||||
typedef typename Base::PlainMatrixType PlainMatrixType;
|
||||
typedef typename Base::PlainObject PlainObject;
|
||||
|
||||
private:
|
||||
|
||||
@ -181,8 +181,8 @@ class CoeffBasedProduct
|
||||
return res;
|
||||
}
|
||||
|
||||
// Implicit convertion to the nested type (trigger the evaluation of the product)
|
||||
operator const PlainMatrixType& () const
|
||||
// Implicit conversion to the nested type (trigger the evaluation of the product)
|
||||
operator const PlainObject& () const
|
||||
{
|
||||
m_result.lazyAssign(*this);
|
||||
return m_result;
|
||||
@ -205,15 +205,15 @@ class CoeffBasedProduct
|
||||
const LhsNested m_lhs;
|
||||
const RhsNested m_rhs;
|
||||
|
||||
mutable PlainMatrixType m_result;
|
||||
mutable PlainObject m_result;
|
||||
};
|
||||
|
||||
// here we need to overload the nested rule for products
|
||||
// such that the nested type is a const reference to a plain matrix
|
||||
template<typename Lhs, typename Rhs, int N, typename PlainMatrixType>
|
||||
struct ei_nested<CoeffBasedProduct<Lhs,Rhs,EvalBeforeNestingBit|EvalBeforeAssigningBit>, N, PlainMatrixType>
|
||||
template<typename Lhs, typename Rhs, int N, typename PlainObject>
|
||||
struct ei_nested<CoeffBasedProduct<Lhs,Rhs,EvalBeforeNestingBit|EvalBeforeAssigningBit>, N, PlainObject>
|
||||
{
|
||||
typedef PlainMatrixType const& type;
|
||||
typedef PlainObject const& type;
|
||||
};
|
||||
|
||||
/***************************************************************************
|
||||
|
@ -27,6 +27,12 @@
|
||||
|
||||
#ifndef EIGEN_EXTERN_INSTANTIATIONS
|
||||
|
||||
#ifdef EIGEN_HAS_FUSE_CJMADD
|
||||
#define CJMADD(A,B,C,T) C = cj.pmadd(A,B,C);
|
||||
#else
|
||||
#define CJMADD(A,B,C,T) T = A; T = cj.pmul(T,B); C = ei_padd(C,T);
|
||||
#endif
|
||||
|
||||
// optimized GEneral packed Block * packed Panel product kernel
|
||||
template<typename Scalar, int mr, int nr, typename Conj>
|
||||
struct ei_gebp_kernel
|
||||
@ -74,65 +80,111 @@ struct ei_gebp_kernel
|
||||
const Scalar* blB = &blockB[j2*strideB*PacketSize+offsetB*nr];
|
||||
for(int k=0; k<peeled_kc; k+=4)
|
||||
{
|
||||
PacketType B0, B1, B2, B3, A0, A1;
|
||||
if(nr==2)
|
||||
{
|
||||
PacketType B0, T0, A0, A1;
|
||||
|
||||
A0 = ei_pload(&blA[0*PacketSize]);
|
||||
A1 = ei_pload(&blA[1*PacketSize]);
|
||||
B0 = ei_pload(&blB[0*PacketSize]);
|
||||
B1 = ei_pload(&blB[1*PacketSize]);
|
||||
C0 = cj.pmadd(A0, B0, C0);
|
||||
if(nr==4) B2 = ei_pload(&blB[2*PacketSize]);
|
||||
C4 = cj.pmadd(A1, B0, C4);
|
||||
if(nr==4) B3 = ei_pload(&blB[3*PacketSize]);
|
||||
B0 = ei_pload(&blB[(nr==4 ? 4 : 2)*PacketSize]);
|
||||
C1 = cj.pmadd(A0, B1, C1);
|
||||
C5 = cj.pmadd(A1, B1, C5);
|
||||
B1 = ei_pload(&blB[(nr==4 ? 5 : 3)*PacketSize]);
|
||||
if(nr==4) C2 = cj.pmadd(A0, B2, C2);
|
||||
if(nr==4) C6 = cj.pmadd(A1, B2, C6);
|
||||
if(nr==4) B2 = ei_pload(&blB[6*PacketSize]);
|
||||
if(nr==4) C3 = cj.pmadd(A0, B3, C3);
|
||||
A0 = ei_pload(&blA[2*PacketSize]);
|
||||
if(nr==4) C7 = cj.pmadd(A1, B3, C7);
|
||||
A1 = ei_pload(&blA[3*PacketSize]);
|
||||
if(nr==4) B3 = ei_pload(&blB[7*PacketSize]);
|
||||
C0 = cj.pmadd(A0, B0, C0);
|
||||
C4 = cj.pmadd(A1, B0, C4);
|
||||
B0 = ei_pload(&blB[(nr==4 ? 8 : 4)*PacketSize]);
|
||||
C1 = cj.pmadd(A0, B1, C1);
|
||||
C5 = cj.pmadd(A1, B1, C5);
|
||||
B1 = ei_pload(&blB[(nr==4 ? 9 : 5)*PacketSize]);
|
||||
if(nr==4) C2 = cj.pmadd(A0, B2, C2);
|
||||
if(nr==4) C6 = cj.pmadd(A1, B2, C6);
|
||||
if(nr==4) B2 = ei_pload(&blB[10*PacketSize]);
|
||||
if(nr==4) C3 = cj.pmadd(A0, B3, C3);
|
||||
A0 = ei_pload(&blA[4*PacketSize]);
|
||||
if(nr==4) C7 = cj.pmadd(A1, B3, C7);
|
||||
A1 = ei_pload(&blA[5*PacketSize]);
|
||||
if(nr==4) B3 = ei_pload(&blB[11*PacketSize]);
|
||||
A0 = ei_pload(&blA[0*PacketSize]);
|
||||
A1 = ei_pload(&blA[1*PacketSize]);
|
||||
B0 = ei_pload(&blB[0*PacketSize]);
|
||||
CJMADD(A0,B0,C0,T0);
|
||||
CJMADD(A1,B0,C4,T0);
|
||||
B0 = ei_pload(&blB[1*PacketSize]);
|
||||
CJMADD(A0,B0,C1,T0);
|
||||
CJMADD(A1,B0,C5,T0);
|
||||
|
||||
C0 = cj.pmadd(A0, B0, C0);
|
||||
C4 = cj.pmadd(A1, B0, C4);
|
||||
B0 = ei_pload(&blB[(nr==4 ? 12 : 6)*PacketSize]);
|
||||
C1 = cj.pmadd(A0, B1, C1);
|
||||
C5 = cj.pmadd(A1, B1, C5);
|
||||
B1 = ei_pload(&blB[(nr==4 ? 13 : 7)*PacketSize]);
|
||||
if(nr==4) C2 = cj.pmadd(A0, B2, C2);
|
||||
if(nr==4) C6 = cj.pmadd(A1, B2, C6);
|
||||
if(nr==4) B2 = ei_pload(&blB[14*PacketSize]);
|
||||
if(nr==4) C3 = cj.pmadd(A0, B3, C3);
|
||||
A0 = ei_pload(&blA[6*PacketSize]);
|
||||
if(nr==4) C7 = cj.pmadd(A1, B3, C7);
|
||||
A1 = ei_pload(&blA[7*PacketSize]);
|
||||
if(nr==4) B3 = ei_pload(&blB[15*PacketSize]);
|
||||
C0 = cj.pmadd(A0, B0, C0);
|
||||
C4 = cj.pmadd(A1, B0, C4);
|
||||
C1 = cj.pmadd(A0, B1, C1);
|
||||
C5 = cj.pmadd(A1, B1, C5);
|
||||
if(nr==4) C2 = cj.pmadd(A0, B2, C2);
|
||||
if(nr==4) C6 = cj.pmadd(A1, B2, C6);
|
||||
if(nr==4) C3 = cj.pmadd(A0, B3, C3);
|
||||
if(nr==4) C7 = cj.pmadd(A1, B3, C7);
|
||||
A0 = ei_pload(&blA[2*PacketSize]);
|
||||
A1 = ei_pload(&blA[3*PacketSize]);
|
||||
B0 = ei_pload(&blB[2*PacketSize]);
|
||||
CJMADD(A0,B0,C0,T0);
|
||||
CJMADD(A1,B0,C4,T0);
|
||||
B0 = ei_pload(&blB[3*PacketSize]);
|
||||
CJMADD(A0,B0,C1,T0);
|
||||
CJMADD(A1,B0,C5,T0);
|
||||
|
||||
A0 = ei_pload(&blA[4*PacketSize]);
|
||||
A1 = ei_pload(&blA[5*PacketSize]);
|
||||
B0 = ei_pload(&blB[4*PacketSize]);
|
||||
CJMADD(A0,B0,C0,T0);
|
||||
CJMADD(A1,B0,C4,T0);
|
||||
B0 = ei_pload(&blB[5*PacketSize]);
|
||||
CJMADD(A0,B0,C1,T0);
|
||||
CJMADD(A1,B0,C5,T0);
|
||||
|
||||
A0 = ei_pload(&blA[6*PacketSize]);
|
||||
A1 = ei_pload(&blA[7*PacketSize]);
|
||||
B0 = ei_pload(&blB[6*PacketSize]);
|
||||
CJMADD(A0,B0,C0,T0);
|
||||
CJMADD(A1,B0,C4,T0);
|
||||
B0 = ei_pload(&blB[7*PacketSize]);
|
||||
CJMADD(A0,B0,C1,T0);
|
||||
CJMADD(A1,B0,C5,T0);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
PacketType B0, B1, B2, B3, A0, A1;
|
||||
PacketType T0, T1;
|
||||
|
||||
A0 = ei_pload(&blA[0*PacketSize]);
|
||||
A1 = ei_pload(&blA[1*PacketSize]);
|
||||
B0 = ei_pload(&blB[0*PacketSize]);
|
||||
B1 = ei_pload(&blB[1*PacketSize]);
|
||||
|
||||
CJMADD(A0,B0,C0,T0);
|
||||
if(nr==4) B2 = ei_pload(&blB[2*PacketSize]);
|
||||
CJMADD(A1,B0,C4,T1);
|
||||
if(nr==4) B3 = ei_pload(&blB[3*PacketSize]);
|
||||
B0 = ei_pload(&blB[(nr==4 ? 4 : 2)*PacketSize]);
|
||||
CJMADD(A0,B1,C1,T0);
|
||||
CJMADD(A1,B1,C5,T1);
|
||||
B1 = ei_pload(&blB[(nr==4 ? 5 : 3)*PacketSize]);
|
||||
if(nr==4) { CJMADD(A0,B2,C2,T0); }
|
||||
if(nr==4) { CJMADD(A1,B2,C6,T1); }
|
||||
if(nr==4) B2 = ei_pload(&blB[6*PacketSize]);
|
||||
if(nr==4) { CJMADD(A0,B3,C3,T0); }
|
||||
A0 = ei_pload(&blA[2*PacketSize]);
|
||||
if(nr==4) { CJMADD(A1,B3,C7,T1); }
|
||||
A1 = ei_pload(&blA[3*PacketSize]);
|
||||
if(nr==4) B3 = ei_pload(&blB[7*PacketSize]);
|
||||
CJMADD(A0,B0,C0,T0);
|
||||
CJMADD(A1,B0,C4,T1);
|
||||
B0 = ei_pload(&blB[(nr==4 ? 8 : 4)*PacketSize]);
|
||||
CJMADD(A0,B1,C1,T0);
|
||||
CJMADD(A1,B1,C5,T1);
|
||||
B1 = ei_pload(&blB[(nr==4 ? 9 : 5)*PacketSize]);
|
||||
if(nr==4) { CJMADD(A0,B2,C2,T0); }
|
||||
if(nr==4) { CJMADD(A1,B2,C6,T1); }
|
||||
if(nr==4) B2 = ei_pload(&blB[10*PacketSize]);
|
||||
if(nr==4) { CJMADD(A0,B3,C3,T0); }
|
||||
A0 = ei_pload(&blA[4*PacketSize]);
|
||||
if(nr==4) { CJMADD(A1,B3,C7,T1); }
|
||||
A1 = ei_pload(&blA[5*PacketSize]);
|
||||
if(nr==4) B3 = ei_pload(&blB[11*PacketSize]);
|
||||
|
||||
CJMADD(A0,B0,C0,T0);
|
||||
CJMADD(A1,B0,C4,T1);
|
||||
B0 = ei_pload(&blB[(nr==4 ? 12 : 6)*PacketSize]);
|
||||
CJMADD(A0,B1,C1,T0);
|
||||
CJMADD(A1,B1,C5,T1);
|
||||
B1 = ei_pload(&blB[(nr==4 ? 13 : 7)*PacketSize]);
|
||||
if(nr==4) { CJMADD(A0,B2,C2,T0); }
|
||||
if(nr==4) { CJMADD(A1,B2,C6,T1); }
|
||||
if(nr==4) B2 = ei_pload(&blB[14*PacketSize]);
|
||||
if(nr==4) { CJMADD(A0,B3,C3,T0); }
|
||||
A0 = ei_pload(&blA[6*PacketSize]);
|
||||
if(nr==4) { CJMADD(A1,B3,C7,T1); }
|
||||
A1 = ei_pload(&blA[7*PacketSize]);
|
||||
if(nr==4) B3 = ei_pload(&blB[15*PacketSize]);
|
||||
CJMADD(A0,B0,C0,T0);
|
||||
CJMADD(A1,B0,C4,T1);
|
||||
CJMADD(A0,B1,C1,T0);
|
||||
CJMADD(A1,B1,C5,T1);
|
||||
if(nr==4) { CJMADD(A0,B2,C2,T0); }
|
||||
if(nr==4) { CJMADD(A1,B2,C6,T1); }
|
||||
if(nr==4) { CJMADD(A0,B3,C3,T0); }
|
||||
if(nr==4) { CJMADD(A1,B3,C7,T1); }
|
||||
}
|
||||
|
||||
blB += 4*nr*PacketSize;
|
||||
blA += 4*mr;
|
||||
@ -140,22 +192,40 @@ struct ei_gebp_kernel
|
||||
// process remaining peeled loop
|
||||
for(int k=peeled_kc; k<depth; k++)
|
||||
{
|
||||
PacketType B0, B1, B2, B3, A0, A1;
|
||||
if(nr==2)
|
||||
{
|
||||
PacketType B0, T0, A0, A1;
|
||||
|
||||
A0 = ei_pload(&blA[0*PacketSize]);
|
||||
A1 = ei_pload(&blA[1*PacketSize]);
|
||||
B0 = ei_pload(&blB[0*PacketSize]);
|
||||
B1 = ei_pload(&blB[1*PacketSize]);
|
||||
C0 = cj.pmadd(A0, B0, C0);
|
||||
if(nr==4) B2 = ei_pload(&blB[2*PacketSize]);
|
||||
C4 = cj.pmadd(A1, B0, C4);
|
||||
if(nr==4) B3 = ei_pload(&blB[3*PacketSize]);
|
||||
C1 = cj.pmadd(A0, B1, C1);
|
||||
C5 = cj.pmadd(A1, B1, C5);
|
||||
if(nr==4) C2 = cj.pmadd(A0, B2, C2);
|
||||
if(nr==4) C6 = cj.pmadd(A1, B2, C6);
|
||||
if(nr==4) C3 = cj.pmadd(A0, B3, C3);
|
||||
if(nr==4) C7 = cj.pmadd(A1, B3, C7);
|
||||
A0 = ei_pload(&blA[0*PacketSize]);
|
||||
A1 = ei_pload(&blA[1*PacketSize]);
|
||||
B0 = ei_pload(&blB[0*PacketSize]);
|
||||
CJMADD(A0,B0,C0,T0);
|
||||
CJMADD(A1,B0,C4,T0);
|
||||
B0 = ei_pload(&blB[1*PacketSize]);
|
||||
CJMADD(A0,B0,C1,T0);
|
||||
CJMADD(A1,B0,C5,T0);
|
||||
}
|
||||
else
|
||||
{
|
||||
PacketType B0, B1, B2, B3, A0, A1, T0, T1;
|
||||
|
||||
A0 = ei_pload(&blA[0*PacketSize]);
|
||||
A1 = ei_pload(&blA[1*PacketSize]);
|
||||
B0 = ei_pload(&blB[0*PacketSize]);
|
||||
B1 = ei_pload(&blB[1*PacketSize]);
|
||||
|
||||
CJMADD(A0,B0,C0,T0);
|
||||
if(nr==4) B2 = ei_pload(&blB[2*PacketSize]);
|
||||
CJMADD(A1,B0,C4,T1);
|
||||
if(nr==4) B3 = ei_pload(&blB[3*PacketSize]);
|
||||
B0 = ei_pload(&blB[(nr==4 ? 4 : 2)*PacketSize]);
|
||||
CJMADD(A0,B1,C1,T0);
|
||||
CJMADD(A1,B1,C5,T1);
|
||||
if(nr==4) { CJMADD(A0,B2,C2,T0); }
|
||||
if(nr==4) { CJMADD(A1,B2,C6,T1); }
|
||||
if(nr==4) { CJMADD(A0,B3,C3,T0); }
|
||||
if(nr==4) { CJMADD(A1,B3,C7,T1); }
|
||||
}
|
||||
|
||||
blB += nr*PacketSize;
|
||||
blA += mr;
|
||||
@ -189,45 +259,79 @@ struct ei_gebp_kernel
|
||||
const Scalar* blB = &blockB[j2*strideB*PacketSize+offsetB*nr];
|
||||
for(int k=0; k<peeled_kc; k+=4)
|
||||
{
|
||||
PacketType B0, B1, B2, B3, A0;
|
||||
if(nr==2)
|
||||
{
|
||||
PacketType B0, T0, A0;
|
||||
|
||||
A0 = ei_pload(&blA[0*PacketSize]);
|
||||
B0 = ei_pload(&blB[0*PacketSize]);
|
||||
B1 = ei_pload(&blB[1*PacketSize]);
|
||||
C0 = cj.pmadd(A0, B0, C0);
|
||||
if(nr==4) B2 = ei_pload(&blB[2*PacketSize]);
|
||||
if(nr==4) B3 = ei_pload(&blB[3*PacketSize]);
|
||||
B0 = ei_pload(&blB[(nr==4 ? 4 : 2)*PacketSize]);
|
||||
C1 = cj.pmadd(A0, B1, C1);
|
||||
B1 = ei_pload(&blB[(nr==4 ? 5 : 3)*PacketSize]);
|
||||
if(nr==4) C2 = cj.pmadd(A0, B2, C2);
|
||||
if(nr==4) B2 = ei_pload(&blB[6*PacketSize]);
|
||||
if(nr==4) C3 = cj.pmadd(A0, B3, C3);
|
||||
A0 = ei_pload(&blA[1*PacketSize]);
|
||||
if(nr==4) B3 = ei_pload(&blB[7*PacketSize]);
|
||||
C0 = cj.pmadd(A0, B0, C0);
|
||||
B0 = ei_pload(&blB[(nr==4 ? 8 : 4)*PacketSize]);
|
||||
C1 = cj.pmadd(A0, B1, C1);
|
||||
B1 = ei_pload(&blB[(nr==4 ? 9 : 5)*PacketSize]);
|
||||
if(nr==4) C2 = cj.pmadd(A0, B2, C2);
|
||||
if(nr==4) B2 = ei_pload(&blB[10*PacketSize]);
|
||||
if(nr==4) C3 = cj.pmadd(A0, B3, C3);
|
||||
A0 = ei_pload(&blA[2*PacketSize]);
|
||||
if(nr==4) B3 = ei_pload(&blB[11*PacketSize]);
|
||||
A0 = ei_pload(&blA[0*PacketSize]);
|
||||
B0 = ei_pload(&blB[0*PacketSize]);
|
||||
CJMADD(A0,B0,C0,T0);
|
||||
B0 = ei_pload(&blB[1*PacketSize]);
|
||||
CJMADD(A0,B0,C1,T0);
|
||||
|
||||
C0 = cj.pmadd(A0, B0, C0);
|
||||
B0 = ei_pload(&blB[(nr==4 ? 12 : 6)*PacketSize]);
|
||||
C1 = cj.pmadd(A0, B1, C1);
|
||||
B1 = ei_pload(&blB[(nr==4 ? 13 : 7)*PacketSize]);
|
||||
if(nr==4) C2 = cj.pmadd(A0, B2, C2);
|
||||
if(nr==4) B2 = ei_pload(&blB[14*PacketSize]);
|
||||
if(nr==4) C3 = cj.pmadd(A0, B3, C3);
|
||||
A0 = ei_pload(&blA[3*PacketSize]);
|
||||
if(nr==4) B3 = ei_pload(&blB[15*PacketSize]);
|
||||
C0 = cj.pmadd(A0, B0, C0);
|
||||
C1 = cj.pmadd(A0, B1, C1);
|
||||
if(nr==4) C2 = cj.pmadd(A0, B2, C2);
|
||||
if(nr==4) C3 = cj.pmadd(A0, B3, C3);
|
||||
A0 = ei_pload(&blA[1*PacketSize]);
|
||||
B0 = ei_pload(&blB[2*PacketSize]);
|
||||
CJMADD(A0,B0,C0,T0);
|
||||
B0 = ei_pload(&blB[3*PacketSize]);
|
||||
CJMADD(A0,B0,C1,T0);
|
||||
|
||||
A0 = ei_pload(&blA[2*PacketSize]);
|
||||
B0 = ei_pload(&blB[4*PacketSize]);
|
||||
CJMADD(A0,B0,C0,T0);
|
||||
B0 = ei_pload(&blB[5*PacketSize]);
|
||||
CJMADD(A0,B0,C1,T0);
|
||||
|
||||
A0 = ei_pload(&blA[3*PacketSize]);
|
||||
B0 = ei_pload(&blB[6*PacketSize]);
|
||||
CJMADD(A0,B0,C0,T0);
|
||||
B0 = ei_pload(&blB[7*PacketSize]);
|
||||
CJMADD(A0,B0,C1,T0);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
||||
PacketType B0, B1, B2, B3, A0;
|
||||
PacketType T0, T1;
|
||||
|
||||
A0 = ei_pload(&blA[0*PacketSize]);
|
||||
B0 = ei_pload(&blB[0*PacketSize]);
|
||||
B1 = ei_pload(&blB[1*PacketSize]);
|
||||
|
||||
CJMADD(A0,B0,C0,T0);
|
||||
if(nr==4) B2 = ei_pload(&blB[2*PacketSize]);
|
||||
if(nr==4) B3 = ei_pload(&blB[3*PacketSize]);
|
||||
B0 = ei_pload(&blB[(nr==4 ? 4 : 2)*PacketSize]);
|
||||
CJMADD(A0,B1,C1,T1);
|
||||
B1 = ei_pload(&blB[(nr==4 ? 5 : 3)*PacketSize]);
|
||||
if(nr==4) { CJMADD(A0,B2,C2,T0); }
|
||||
if(nr==4) B2 = ei_pload(&blB[6*PacketSize]);
|
||||
if(nr==4) { CJMADD(A0,B3,C3,T1); }
|
||||
A0 = ei_pload(&blA[1*PacketSize]);
|
||||
if(nr==4) B3 = ei_pload(&blB[7*PacketSize]);
|
||||
CJMADD(A0,B0,C0,T0);
|
||||
B0 = ei_pload(&blB[(nr==4 ? 8 : 4)*PacketSize]);
|
||||
CJMADD(A0,B1,C1,T1);
|
||||
B1 = ei_pload(&blB[(nr==4 ? 9 : 5)*PacketSize]);
|
||||
if(nr==4) { CJMADD(A0,B2,C2,T0); }
|
||||
if(nr==4) B2 = ei_pload(&blB[10*PacketSize]);
|
||||
if(nr==4) { CJMADD(A0,B3,C3,T1); }
|
||||
A0 = ei_pload(&blA[2*PacketSize]);
|
||||
if(nr==4) B3 = ei_pload(&blB[11*PacketSize]);
|
||||
|
||||
CJMADD(A0,B0,C0,T0);
|
||||
B0 = ei_pload(&blB[(nr==4 ? 12 : 6)*PacketSize]);
|
||||
CJMADD(A0,B1,C1,T1);
|
||||
B1 = ei_pload(&blB[(nr==4 ? 13 : 7)*PacketSize]);
|
||||
if(nr==4) { CJMADD(A0,B2,C2,T0); }
|
||||
if(nr==4) B2 = ei_pload(&blB[14*PacketSize]);
|
||||
if(nr==4) { CJMADD(A0,B3,C3,T1); }
|
||||
A0 = ei_pload(&blA[3*PacketSize]);
|
||||
if(nr==4) B3 = ei_pload(&blB[15*PacketSize]);
|
||||
CJMADD(A0,B0,C0,T0);
|
||||
CJMADD(A0,B1,C1,T1);
|
||||
if(nr==4) { CJMADD(A0,B2,C2,T0); }
|
||||
if(nr==4) { CJMADD(A0,B3,C3,T1); }
|
||||
}
|
||||
|
||||
blB += 4*nr*PacketSize;
|
||||
blA += 4*PacketSize;
|
||||
@ -235,17 +339,32 @@ struct ei_gebp_kernel
|
||||
// process remaining peeled loop
|
||||
for(int k=peeled_kc; k<depth; k++)
|
||||
{
|
||||
PacketType B0, B1, B2, B3, A0;
|
||||
if(nr==2)
|
||||
{
|
||||
PacketType B0, T0, A0;
|
||||
|
||||
A0 = ei_pload(&blA[0*PacketSize]);
|
||||
B0 = ei_pload(&blB[0*PacketSize]);
|
||||
B1 = ei_pload(&blB[1*PacketSize]);
|
||||
C0 = cj.pmadd(A0, B0, C0);
|
||||
if(nr==4) B2 = ei_pload(&blB[2*PacketSize]);
|
||||
if(nr==4) B3 = ei_pload(&blB[3*PacketSize]);
|
||||
C1 = cj.pmadd(A0, B1, C1);
|
||||
if(nr==4) C2 = cj.pmadd(A0, B2, C2);
|
||||
if(nr==4) C3 = cj.pmadd(A0, B3, C3);
|
||||
A0 = ei_pload(&blA[0*PacketSize]);
|
||||
B0 = ei_pload(&blB[0*PacketSize]);
|
||||
CJMADD(A0,B0,C0,T0);
|
||||
B0 = ei_pload(&blB[1*PacketSize]);
|
||||
CJMADD(A0,B0,C1,T0);
|
||||
}
|
||||
else
|
||||
{
|
||||
PacketType B0, B1, B2, B3, A0;
|
||||
PacketType T0, T1;
|
||||
|
||||
A0 = ei_pload(&blA[0*PacketSize]);
|
||||
B0 = ei_pload(&blB[0*PacketSize]);
|
||||
B1 = ei_pload(&blB[1*PacketSize]);
|
||||
if(nr==4) B2 = ei_pload(&blB[2*PacketSize]);
|
||||
if(nr==4) B3 = ei_pload(&blB[3*PacketSize]);
|
||||
|
||||
CJMADD(A0,B0,C0,T0);
|
||||
CJMADD(A0,B1,C1,T1);
|
||||
if(nr==4) { CJMADD(A0,B2,C2,T0); }
|
||||
if(nr==4) { CJMADD(A0,B3,C3,T1); }
|
||||
}
|
||||
|
||||
blB += nr*PacketSize;
|
||||
blA += PacketSize;
|
||||
@ -268,17 +387,32 @@ struct ei_gebp_kernel
|
||||
const Scalar* blB = &blockB[j2*strideB*PacketSize+offsetB*nr];
|
||||
for(int k=0; k<depth; k++)
|
||||
{
|
||||
Scalar B0, B1, B2, B3, A0;
|
||||
if(nr==2)
|
||||
{
|
||||
Scalar B0, T0, A0;
|
||||
|
||||
A0 = blA[k];
|
||||
B0 = blB[0*PacketSize];
|
||||
B1 = blB[1*PacketSize];
|
||||
C0 = cj.pmadd(A0, B0, C0);
|
||||
if(nr==4) B2 = blB[2*PacketSize];
|
||||
if(nr==4) B3 = blB[3*PacketSize];
|
||||
C1 = cj.pmadd(A0, B1, C1);
|
||||
if(nr==4) C2 = cj.pmadd(A0, B2, C2);
|
||||
if(nr==4) C3 = cj.pmadd(A0, B3, C3);
|
||||
A0 = blA[0*PacketSize];
|
||||
B0 = blB[0*PacketSize];
|
||||
CJMADD(A0,B0,C0,T0);
|
||||
B0 = blB[1*PacketSize];
|
||||
CJMADD(A0,B0,C1,T0);
|
||||
}
|
||||
else
|
||||
{
|
||||
Scalar B0, B1, B2, B3, A0;
|
||||
Scalar T0, T1;
|
||||
|
||||
A0 = blA[k];
|
||||
B0 = blB[0*PacketSize];
|
||||
B1 = blB[1*PacketSize];
|
||||
if(nr==4) B2 = blB[2*PacketSize];
|
||||
if(nr==4) B3 = blB[3*PacketSize];
|
||||
|
||||
CJMADD(A0,B0,C0,T0);
|
||||
CJMADD(A0,B1,C1,T1);
|
||||
if(nr==4) { CJMADD(A0,B2,C2,T0); }
|
||||
if(nr==4) { CJMADD(A0,B3,C3,T1); }
|
||||
}
|
||||
|
||||
blB += nr*PacketSize;
|
||||
}
|
||||
@ -310,13 +444,13 @@ struct ei_gebp_kernel
|
||||
const Scalar* blB = &blockB[j2*strideB*PacketSize+offsetB];
|
||||
for(int k=0; k<depth; k++)
|
||||
{
|
||||
PacketType B0, A0, A1;
|
||||
PacketType B0, A0, A1, T0, T1;
|
||||
|
||||
A0 = ei_pload(&blA[0*PacketSize]);
|
||||
A1 = ei_pload(&blA[1*PacketSize]);
|
||||
B0 = ei_pload(&blB[0*PacketSize]);
|
||||
C0 = cj.pmadd(A0, B0, C0);
|
||||
C4 = cj.pmadd(A1, B0, C4);
|
||||
CJMADD(A0,B0,C0,T0);
|
||||
CJMADD(A1,B0,C4,T1);
|
||||
|
||||
blB += PacketSize;
|
||||
blA += mr;
|
||||
@ -334,7 +468,7 @@ struct ei_gebp_kernel
|
||||
#endif
|
||||
|
||||
PacketType C0 = ei_ploadu(&res[(j2+0)*resStride + i]);
|
||||
|
||||
|
||||
const Scalar* blB = &blockB[j2*strideB*PacketSize+offsetB];
|
||||
for(int k=0; k<depth; k++)
|
||||
{
|
||||
@ -363,6 +497,8 @@ struct ei_gebp_kernel
|
||||
}
|
||||
};
|
||||
|
||||
#undef CJMADD
|
||||
|
||||
// pack a block of the lhs
|
||||
// The travesal is as follow (mr==4):
|
||||
// 0 4 8 12 ...
|
||||
@ -474,7 +610,7 @@ struct ei_gemm_pack_rhs<Scalar, nr, ColMajor, PanelMode>
|
||||
// skip what we have after
|
||||
if(PanelMode) count += PacketSize * nr * (stride-offset-depth);
|
||||
}
|
||||
|
||||
|
||||
// copy the remaining columns one at a time (nr==1)
|
||||
for(int j2=packet_cols; j2<cols; ++j2)
|
||||
{
|
||||
|
@ -166,7 +166,7 @@ template<typename XprType> struct ei_blas_traits
|
||||
};
|
||||
typedef typename ei_meta_if<int(ActualAccess)==HasDirectAccess,
|
||||
ExtractType,
|
||||
typename _ExtractType::PlainMatrixType
|
||||
typename _ExtractType::PlainObject
|
||||
>::ret DirectLinearAccessType;
|
||||
static inline ExtractType extract(const XprType& x) { return x; }
|
||||
static inline Scalar extractScalarFactor(const XprType&) { return Scalar(1); }
|
||||
@ -227,7 +227,7 @@ struct ei_blas_traits<Transpose<NestedXpr> >
|
||||
typedef Transpose<typename Base::_ExtractType> _ExtractType;
|
||||
typedef typename ei_meta_if<int(Base::ActualAccess)==HasDirectAccess,
|
||||
ExtractType,
|
||||
typename ExtractType::PlainMatrixType
|
||||
typename ExtractType::PlainObject
|
||||
>::ret DirectLinearAccessType;
|
||||
enum {
|
||||
IsTransposed = Base::IsTransposed ? 0 : 1
|
||||
|
@ -28,7 +28,7 @@
|
||||
template<typename T> struct ei_traits;
|
||||
template<typename T> struct NumTraits;
|
||||
|
||||
template<typename Derived> struct AnyMatrixBase;
|
||||
template<typename Derived> struct EigenBase;
|
||||
|
||||
template<typename _Scalar, int _Rows, int _Cols,
|
||||
int _Options = EIGEN_DEFAULT_MATRIX_STORAGE_ORDER_OPTION | AutoAlign,
|
||||
|
@ -147,7 +147,7 @@ template<typename T, typename StorageType = typename ei_traits<T>::StorageType>
|
||||
template<typename T> struct ei_eval<T,Dense>
|
||||
{
|
||||
typedef typename ei_plain_matrix_type<T>::type type;
|
||||
// typedef typename T::PlainMatrixType type;
|
||||
// typedef typename T::PlainObject type;
|
||||
// typedef T::Matrix<typename ei_traits<T>::Scalar,
|
||||
// ei_traits<T>::RowsAtCompileTime,
|
||||
// ei_traits<T>::ColsAtCompileTime,
|
||||
@ -201,6 +201,18 @@ template<typename T> struct ei_plain_matrix_type_row_major
|
||||
// we should be able to get rid of this one too
|
||||
template<typename T> struct ei_must_nest_by_value { enum { ret = false }; };
|
||||
|
||||
template<class T>
|
||||
struct ei_is_reference
|
||||
{
|
||||
enum { ret = false };
|
||||
};
|
||||
|
||||
template<class T>
|
||||
struct ei_is_reference<T&>
|
||||
{
|
||||
enum { ret = true };
|
||||
};
|
||||
|
||||
/**
|
||||
* The reference selector for template expressions. The idea is that we don't
|
||||
* need to use references for expressions since they are light weight proxy
|
||||
@ -234,7 +246,7 @@ struct ei_ref_selector
|
||||
* const Matrix3d&, because the internal logic of ei_nested determined that since a was already a matrix, there was no point
|
||||
* in copying it into another matrix.
|
||||
*/
|
||||
template<typename T, int n=1, typename PlainMatrixType = typename ei_eval<T>::type> struct ei_nested
|
||||
template<typename T, int n=1, typename PlainObject = typename ei_eval<T>::type> struct ei_nested
|
||||
{
|
||||
enum {
|
||||
CostEval = (n+1) * int(NumTraits<typename ei_traits<T>::Scalar>::ReadCost),
|
||||
@ -244,7 +256,7 @@ template<typename T, int n=1, typename PlainMatrixType = typename ei_eval<T>::ty
|
||||
typedef typename ei_meta_if<
|
||||
( int(ei_traits<T>::Flags) & EvalBeforeNestingBit ) ||
|
||||
( int(CostEval) <= int(CostNoEval) ),
|
||||
PlainMatrixType,
|
||||
PlainObject,
|
||||
typename ei_ref_selector<T>::type
|
||||
>::ret type;
|
||||
};
|
||||
@ -258,7 +270,7 @@ template<unsigned int Flags> struct ei_are_flags_consistent
|
||||
* overloads for complex types */
|
||||
template<typename Derived,typename Scalar,typename OtherScalar,
|
||||
bool EnableIt = !ei_is_same_type<Scalar,OtherScalar>::ret >
|
||||
struct ei_special_scalar_op_base : public AnyMatrixBase<Derived>
|
||||
struct ei_special_scalar_op_base : public EigenBase<Derived>
|
||||
{
|
||||
// dummy operator* so that the
|
||||
// "using ei_special_scalar_op_base::operator*" compiles
|
||||
@ -266,7 +278,7 @@ struct ei_special_scalar_op_base : public AnyMatrixBase<Derived>
|
||||
};
|
||||
|
||||
template<typename Derived,typename Scalar,typename OtherScalar>
|
||||
struct ei_special_scalar_op_base<Derived,Scalar,OtherScalar,true> : public AnyMatrixBase<Derived>
|
||||
struct ei_special_scalar_op_base<Derived,Scalar,OtherScalar,true> : public EigenBase<Derived>
|
||||
{
|
||||
const CwiseUnaryOp<ei_scalar_multiple2_op<Scalar,OtherScalar>, Derived>
|
||||
operator*(const OtherScalar& scalar) const
|
||||
|
@ -37,7 +37,7 @@ const unsigned int UnitLowerTriangular = UnitLower;
|
||||
|
||||
template<typename ExpressionType, unsigned int Added, unsigned int Removed>
|
||||
template<typename OtherDerived>
|
||||
typename ExpressionType::PlainMatrixType
|
||||
typename ExpressionType::PlainObject
|
||||
Flagged<ExpressionType,Added,Removed>::solveTriangular(const MatrixBase<OtherDerived>& other) const
|
||||
{
|
||||
return m_matrix.template triangularView<Added>.solve(other.derived());
|
||||
|
@ -276,7 +276,7 @@ inline Matrix<typename NumTraits<typename ei_traits<Derived>::Scalar>::Real, ei_
|
||||
MatrixBase<Derived>::eigenvalues() const
|
||||
{
|
||||
ei_assert(Flags&SelfAdjoint);
|
||||
return SelfAdjointEigenSolver<typename Derived::PlainMatrixType>(eval(),false).eigenvalues();
|
||||
return SelfAdjointEigenSolver<typename Derived::PlainObject>(eval(),false).eigenvalues();
|
||||
}
|
||||
|
||||
template<typename Derived, bool IsSelfAdjoint>
|
||||
@ -296,7 +296,7 @@ template<typename Derived> struct ei_operatorNorm_selector<Derived, false>
|
||||
static inline typename NumTraits<typename ei_traits<Derived>::Scalar>::Real
|
||||
operatorNorm(const MatrixBase<Derived>& m)
|
||||
{
|
||||
typename Derived::PlainMatrixType m_eval(m);
|
||||
typename Derived::PlainObject m_eval(m);
|
||||
// FIXME if it is really guaranteed that the eigenvalues are already sorted,
|
||||
// then we don't need to compute a maxCoeff() here, comparing the 1st and last ones is enough.
|
||||
return ei_sqrt(
|
||||
|
@ -213,9 +213,9 @@ struct ei_traits<ei_homogeneous_left_product_impl<Homogeneous<MatrixType,Vertica
|
||||
typedef Matrix<typename ei_traits<MatrixType>::Scalar,
|
||||
Lhs::RowsAtCompileTime,
|
||||
MatrixType::ColsAtCompileTime,
|
||||
MatrixType::PlainMatrixType::Options,
|
||||
MatrixType::PlainObject::Options,
|
||||
Lhs::MaxRowsAtCompileTime,
|
||||
MatrixType::MaxColsAtCompileTime> ReturnMatrixType;
|
||||
MatrixType::MaxColsAtCompileTime> ReturnType;
|
||||
};
|
||||
|
||||
template<typename MatrixType,typename Lhs>
|
||||
@ -251,9 +251,9 @@ struct ei_traits<ei_homogeneous_right_product_impl<Homogeneous<MatrixType,Horizo
|
||||
typedef Matrix<typename ei_traits<MatrixType>::Scalar,
|
||||
MatrixType::RowsAtCompileTime,
|
||||
Rhs::ColsAtCompileTime,
|
||||
MatrixType::PlainMatrixType::Options,
|
||||
MatrixType::PlainObject::Options,
|
||||
MatrixType::MaxRowsAtCompileTime,
|
||||
Rhs::MaxColsAtCompileTime> ReturnMatrixType;
|
||||
Rhs::MaxColsAtCompileTime> ReturnType;
|
||||
};
|
||||
|
||||
template<typename MatrixType,typename Rhs>
|
||||
|
@ -35,7 +35,7 @@
|
||||
*/
|
||||
template<typename Derived>
|
||||
template<typename OtherDerived>
|
||||
inline typename MatrixBase<Derived>::PlainMatrixType
|
||||
inline typename MatrixBase<Derived>::PlainObject
|
||||
MatrixBase<Derived>::cross(const MatrixBase<OtherDerived>& other) const
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived,3)
|
||||
@ -79,7 +79,7 @@ struct ei_cross3_impl {
|
||||
*/
|
||||
template<typename Derived>
|
||||
template<typename OtherDerived>
|
||||
inline typename MatrixBase<Derived>::PlainMatrixType
|
||||
inline typename MatrixBase<Derived>::PlainObject
|
||||
MatrixBase<Derived>::cross3(const MatrixBase<OtherDerived>& other) const
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Derived,4)
|
||||
@ -210,7 +210,7 @@ struct ei_unitOrthogonal_selector<Derived,2>
|
||||
* \sa cross()
|
||||
*/
|
||||
template<typename Derived>
|
||||
typename MatrixBase<Derived>::PlainMatrixType
|
||||
typename MatrixBase<Derived>::PlainObject
|
||||
MatrixBase<Derived>::unitOrthogonal() const
|
||||
{
|
||||
EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
|
||||
|
@ -211,6 +211,7 @@ public:
|
||||
template<typename _Scalar>
|
||||
struct ei_traits<Quaternion<_Scalar> >
|
||||
{
|
||||
typedef Quaternion<_Scalar> PlainObject;
|
||||
typedef _Scalar Scalar;
|
||||
typedef Matrix<_Scalar,4,1> Coefficients;
|
||||
enum{
|
||||
|
@ -74,12 +74,12 @@ class RotationBase
|
||||
*/
|
||||
template<typename OtherDerived>
|
||||
EIGEN_STRONG_INLINE typename ei_rotation_base_generic_product_selector<Derived,OtherDerived,OtherDerived::IsVectorAtCompileTime>::ReturnType
|
||||
operator*(const AnyMatrixBase<OtherDerived>& e) const
|
||||
operator*(const EigenBase<OtherDerived>& e) const
|
||||
{ return ei_rotation_base_generic_product_selector<Derived,OtherDerived>::run(derived(), e.derived()); }
|
||||
|
||||
/** \returns the concatenation of a linear transformation \a l with the rotation \a r */
|
||||
template<typename OtherDerived> friend
|
||||
inline RotationMatrixType operator*(const AnyMatrixBase<OtherDerived>& l, const Derived& r)
|
||||
inline RotationMatrixType operator*(const EigenBase<OtherDerived>& l, const Derived& r)
|
||||
{ return l.derived() * r.toRotationMatrix(); }
|
||||
|
||||
/** \returns the concatenation of the rotation \c *this with a transformation \a t */
|
||||
|
@ -226,14 +226,14 @@ public:
|
||||
|
||||
/** Constructs and initializes a transformation from a Dim^2 or a (Dim+1)^2 matrix. */
|
||||
template<typename OtherDerived>
|
||||
inline explicit Transform(const AnyMatrixBase<OtherDerived>& other)
|
||||
inline explicit Transform(const EigenBase<OtherDerived>& other)
|
||||
{
|
||||
ei_transform_construct_from_matrix<OtherDerived,Mode,Dim,HDim>::run(this, other.derived());
|
||||
}
|
||||
|
||||
/** Set \c *this from a Dim^2 or (Dim+1)^2 matrix. */
|
||||
template<typename OtherDerived>
|
||||
inline Transform& operator=(const AnyMatrixBase<OtherDerived>& other)
|
||||
inline Transform& operator=(const EigenBase<OtherDerived>& other)
|
||||
{
|
||||
ei_transform_construct_from_matrix<OtherDerived,Mode,Dim,HDim>::run(this, other.derived());
|
||||
return *this;
|
||||
@ -310,7 +310,7 @@ public:
|
||||
// note: this function is defined here because some compilers cannot find the respective declaration
|
||||
template<typename OtherDerived>
|
||||
inline const typename ei_transform_right_product_impl<OtherDerived,Mode,_Dim,_Dim+1>::ResultType
|
||||
operator * (const AnyMatrixBase<OtherDerived> &other) const
|
||||
operator * (const EigenBase<OtherDerived> &other) const
|
||||
{ return ei_transform_right_product_impl<OtherDerived,Mode,Dim,HDim>::run(*this,other.derived()); }
|
||||
|
||||
/** \returns the product expression of a transformation matrix \a a times a transform \a b
|
||||
@ -322,11 +322,11 @@ public:
|
||||
*/
|
||||
template<typename OtherDerived> friend
|
||||
inline const typename ei_transform_left_product_impl<OtherDerived,Mode,_Dim,_Dim+1>::ResultType
|
||||
operator * (const AnyMatrixBase<OtherDerived> &a, const Transform &b)
|
||||
operator * (const EigenBase<OtherDerived> &a, const Transform &b)
|
||||
{ return ei_transform_left_product_impl<OtherDerived,Mode,Dim,HDim>::run(a.derived(),b); }
|
||||
|
||||
template<typename OtherDerived>
|
||||
inline Transform& operator*=(const AnyMatrixBase<OtherDerived>& other) { return *this = *this * other; }
|
||||
inline Transform& operator*=(const EigenBase<OtherDerived>& other) { return *this = *this * other; }
|
||||
|
||||
/** Contatenates two transformations */
|
||||
inline const Transform operator * (const Transform& other) const
|
||||
@ -1021,7 +1021,7 @@ struct ei_transform_construct_from_matrix<Other, AffineCompact,Dim,HDim, HDim,HD
|
||||
};
|
||||
|
||||
/*********************************************************
|
||||
*** Specializations of operator* with a AnyMatrixBase ***
|
||||
*** Specializations of operator* with a EigenBase ***
|
||||
*********************************************************/
|
||||
|
||||
// ei_general_product_return_type is a generalization of ProductReturnType, for all types (including e.g. DiagonalBase...),
|
||||
|
@ -93,7 +93,7 @@ public:
|
||||
|
||||
/** Concatenates a translation and a linear transformation */
|
||||
template<typename OtherDerived>
|
||||
inline AffineTransformType operator* (const AnyMatrixBase<OtherDerived>& linear) const;
|
||||
inline AffineTransformType operator* (const EigenBase<OtherDerived>& linear) const;
|
||||
|
||||
/** Concatenates a translation and a rotation */
|
||||
template<typename Derived>
|
||||
@ -103,7 +103,7 @@ public:
|
||||
/** \returns the concatenation of a linear transformation \a l with the translation \a t */
|
||||
// its a nightmare to define a templated friend function outside its declaration
|
||||
template<typename OtherDerived> friend
|
||||
inline AffineTransformType operator*(const AnyMatrixBase<OtherDerived>& linear, const Translation& t)
|
||||
inline AffineTransformType operator*(const EigenBase<OtherDerived>& linear, const Translation& t)
|
||||
{
|
||||
AffineTransformType res;
|
||||
res.matrix().setZero();
|
||||
@ -182,7 +182,7 @@ Translation<Scalar,Dim>::operator* (const UniformScaling<Scalar>& other) const
|
||||
template<typename Scalar, int Dim>
|
||||
template<typename OtherDerived>
|
||||
inline typename Translation<Scalar,Dim>::AffineTransformType
|
||||
Translation<Scalar,Dim>::operator* (const AnyMatrixBase<OtherDerived>& linear) const
|
||||
Translation<Scalar,Dim>::operator* (const EigenBase<OtherDerived>& linear) const
|
||||
{
|
||||
AffineTransformType res;
|
||||
res.matrix().setZero();
|
||||
|
@ -99,7 +99,7 @@ void MatrixBase<Derived>::applyHouseholderOnTheLeft(
|
||||
const Scalar& tau,
|
||||
Scalar* workspace)
|
||||
{
|
||||
Map<Matrix<Scalar, 1, Base::ColsAtCompileTime, PlainMatrixType::Options, 1, Base::MaxColsAtCompileTime> > tmp(workspace,cols());
|
||||
Map<Matrix<Scalar, 1, Base::ColsAtCompileTime, PlainObject::Options, 1, Base::MaxColsAtCompileTime> > tmp(workspace,cols());
|
||||
Block<Derived, EssentialPart::SizeAtCompileTime, Derived::ColsAtCompileTime> bottom(derived(), 1, 0, rows()-1, cols());
|
||||
tmp.noalias() = essential.adjoint() * bottom;
|
||||
tmp += this->row(0);
|
||||
@ -114,7 +114,7 @@ void MatrixBase<Derived>::applyHouseholderOnTheRight(
|
||||
const Scalar& tau,
|
||||
Scalar* workspace)
|
||||
{
|
||||
Map<Matrix<Scalar, Base::RowsAtCompileTime, 1, PlainMatrixType::Options, Base::MaxRowsAtCompileTime, 1> > tmp(workspace,rows());
|
||||
Map<Matrix<Scalar, Base::RowsAtCompileTime, 1, PlainObject::Options, Base::MaxRowsAtCompileTime, 1> > tmp(workspace,rows());
|
||||
Block<Derived, Derived::RowsAtCompileTime, EssentialPart::SizeAtCompileTime> right(derived(), 0, 1, rows(), cols()-1);
|
||||
tmp.noalias() = right * essential.conjugate();
|
||||
tmp += this->col(0);
|
||||
|
@ -97,7 +97,7 @@ template<typename OtherScalarType, typename MatrixType> struct ei_matrix_type_ti
|
||||
};
|
||||
|
||||
template<typename VectorsType, typename CoeffsType, int Side> class HouseholderSequence
|
||||
: public AnyMatrixBase<HouseholderSequence<VectorsType,CoeffsType,Side> >
|
||||
: public EigenBase<HouseholderSequence<VectorsType,CoeffsType,Side> >
|
||||
{
|
||||
enum {
|
||||
RowsAtCompileTime = ei_traits<HouseholderSequence>::RowsAtCompileTime,
|
||||
|
@ -251,6 +251,7 @@ template<typename _MatrixType> class FullPivLU
|
||||
{
|
||||
m_usePrescribedThreshold = true;
|
||||
m_prescribedThreshold = threshold;
|
||||
return *this;
|
||||
}
|
||||
|
||||
/** Allows to come back to the default behavior, letting Eigen use its default formula for
|
||||
@ -630,7 +631,7 @@ struct ei_solve_retval<FullPivLU<_MatrixType>, Rhs>
|
||||
return;
|
||||
}
|
||||
|
||||
typename Rhs::PlainMatrixType c(rhs().rows(), rhs().cols());
|
||||
typename Rhs::PlainObject c(rhs().rows(), rhs().cols());
|
||||
|
||||
// Step 1
|
||||
c = dec().permutationP() * rhs();
|
||||
@ -670,10 +671,10 @@ struct ei_solve_retval<FullPivLU<_MatrixType>, Rhs>
|
||||
* \sa class FullPivLU
|
||||
*/
|
||||
template<typename Derived>
|
||||
inline const FullPivLU<typename MatrixBase<Derived>::PlainMatrixType>
|
||||
inline const FullPivLU<typename MatrixBase<Derived>::PlainObject>
|
||||
MatrixBase<Derived>::fullPivLu() const
|
||||
{
|
||||
return FullPivLU<PlainMatrixType>(eval());
|
||||
return FullPivLU<PlainObject>(eval());
|
||||
}
|
||||
|
||||
#endif // EIGEN_LU_H
|
||||
|
@ -238,7 +238,7 @@ struct ei_compute_inverse_and_det_with_check<MatrixType, ResultType, 4>
|
||||
template<typename MatrixType>
|
||||
struct ei_traits<ei_inverse_impl<MatrixType> >
|
||||
{
|
||||
typedef typename MatrixType::PlainMatrixType ReturnMatrixType;
|
||||
typedef typename MatrixType::PlainObject ReturnType;
|
||||
};
|
||||
|
||||
template<typename MatrixType>
|
||||
@ -327,7 +327,7 @@ inline void MatrixBase<Derived>::computeInverseAndDetWithCheck(
|
||||
typedef typename ei_meta_if<
|
||||
RowsAtCompileTime == 2,
|
||||
typename ei_cleantype<typename ei_nested<Derived, 2>::type>::type,
|
||||
PlainMatrixType
|
||||
PlainObject
|
||||
>::ret MatrixType;
|
||||
ei_compute_inverse_and_det_with_check<MatrixType, ResultType>::run
|
||||
(derived(), absDeterminantThreshold, inverse, determinant, invertible);
|
||||
|
@ -442,10 +442,10 @@ struct ei_solve_retval<PartialPivLU<_MatrixType>, Rhs>
|
||||
* \sa class PartialPivLU
|
||||
*/
|
||||
template<typename Derived>
|
||||
inline const PartialPivLU<typename MatrixBase<Derived>::PlainMatrixType>
|
||||
inline const PartialPivLU<typename MatrixBase<Derived>::PlainObject>
|
||||
MatrixBase<Derived>::partialPivLu() const
|
||||
{
|
||||
return PartialPivLU<PlainMatrixType>(eval());
|
||||
return PartialPivLU<PlainObject>(eval());
|
||||
}
|
||||
|
||||
/** \lu_module
|
||||
@ -457,10 +457,10 @@ MatrixBase<Derived>::partialPivLu() const
|
||||
* \sa class PartialPivLU
|
||||
*/
|
||||
template<typename Derived>
|
||||
inline const PartialPivLU<typename MatrixBase<Derived>::PlainMatrixType>
|
||||
inline const PartialPivLU<typename MatrixBase<Derived>::PlainObject>
|
||||
MatrixBase<Derived>::lu() const
|
||||
{
|
||||
return PartialPivLU<PlainMatrixType>(eval());
|
||||
return PartialPivLU<PlainObject>(eval());
|
||||
}
|
||||
|
||||
#endif // EIGEN_PARTIALLU_H
|
||||
|
@ -441,7 +441,7 @@ struct ei_solve_retval<ColPivHouseholderQR<_MatrixType>, Rhs>
|
||||
return;
|
||||
}
|
||||
|
||||
typename Rhs::PlainMatrixType c(rhs());
|
||||
typename Rhs::PlainObject c(rhs());
|
||||
|
||||
// Note that the matrix Q = H_0^* H_1^*... so its inverse is Q^* = (H_0 H_1 ...)^T
|
||||
c.applyOnTheLeft(householderSequence(
|
||||
@ -458,7 +458,7 @@ struct ei_solve_retval<ColPivHouseholderQR<_MatrixType>, Rhs>
|
||||
.solveInPlace(c.corner(TopLeft, nonzero_pivots, c.cols()));
|
||||
|
||||
|
||||
typename Rhs::PlainMatrixType d(c);
|
||||
typename Rhs::PlainObject d(c);
|
||||
d.corner(TopLeft, nonzero_pivots, c.cols())
|
||||
= dec().matrixQR()
|
||||
.corner(TopLeft, nonzero_pivots, nonzero_pivots)
|
||||
@ -486,10 +486,10 @@ typename ColPivHouseholderQR<MatrixType>::HouseholderSequenceType ColPivHousehol
|
||||
* \sa class ColPivHouseholderQR
|
||||
*/
|
||||
template<typename Derived>
|
||||
const ColPivHouseholderQR<typename MatrixBase<Derived>::PlainMatrixType>
|
||||
const ColPivHouseholderQR<typename MatrixBase<Derived>::PlainObject>
|
||||
MatrixBase<Derived>::colPivHouseholderQr() const
|
||||
{
|
||||
return ColPivHouseholderQR<PlainMatrixType>(eval());
|
||||
return ColPivHouseholderQR<PlainObject>(eval());
|
||||
}
|
||||
|
||||
|
||||
|
@ -352,7 +352,7 @@ struct ei_solve_retval<FullPivHouseholderQR<_MatrixType>, Rhs>
|
||||
return;
|
||||
}
|
||||
|
||||
typename Rhs::PlainMatrixType c(rhs());
|
||||
typename Rhs::PlainObject c(rhs());
|
||||
|
||||
Matrix<Scalar,1,Rhs::ColsAtCompileTime> temp(rhs().cols());
|
||||
for (int k = 0; k < dec().rank(); ++k)
|
||||
@ -413,10 +413,10 @@ typename FullPivHouseholderQR<MatrixType>::MatrixQType FullPivHouseholderQR<Matr
|
||||
* \sa class FullPivHouseholderQR
|
||||
*/
|
||||
template<typename Derived>
|
||||
const FullPivHouseholderQR<typename MatrixBase<Derived>::PlainMatrixType>
|
||||
const FullPivHouseholderQR<typename MatrixBase<Derived>::PlainObject>
|
||||
MatrixBase<Derived>::fullPivHouseholderQr() const
|
||||
{
|
||||
return FullPivHouseholderQR<PlainMatrixType>(eval());
|
||||
return FullPivHouseholderQR<PlainObject>(eval());
|
||||
}
|
||||
|
||||
#endif // EIGEN_FULLPIVOTINGHOUSEHOLDERQR_H
|
||||
|
@ -221,7 +221,7 @@ struct ei_solve_retval<HouseholderQR<_MatrixType>, Rhs>
|
||||
const int rank = std::min(rows, cols);
|
||||
ei_assert(rhs().rows() == rows);
|
||||
|
||||
typename Rhs::PlainMatrixType c(rhs());
|
||||
typename Rhs::PlainObject c(rhs());
|
||||
|
||||
// Note that the matrix Q = H_0^* H_1^*... so its inverse is Q^* = (H_0 H_1 ...)^T
|
||||
c.applyOnTheLeft(householderSequence(
|
||||
@ -246,10 +246,10 @@ struct ei_solve_retval<HouseholderQR<_MatrixType>, Rhs>
|
||||
* \sa class HouseholderQR
|
||||
*/
|
||||
template<typename Derived>
|
||||
const HouseholderQR<typename MatrixBase<Derived>::PlainMatrixType>
|
||||
const HouseholderQR<typename MatrixBase<Derived>::PlainObject>
|
||||
MatrixBase<Derived>::householderQr() const
|
||||
{
|
||||
return HouseholderQR<PlainMatrixType>(eval());
|
||||
return HouseholderQR<PlainObject>(eval());
|
||||
}
|
||||
|
||||
|
||||
|
@ -555,10 +555,10 @@ void SVD<MatrixType>::computeScalingRotation(ScalingType *scaling, RotationType
|
||||
* \returns the SVD decomposition of \c *this
|
||||
*/
|
||||
template<typename Derived>
|
||||
inline SVD<typename MatrixBase<Derived>::PlainMatrixType>
|
||||
inline SVD<typename MatrixBase<Derived>::PlainObject>
|
||||
MatrixBase<Derived>::svd() const
|
||||
{
|
||||
return SVD<PlainMatrixType>(derived());
|
||||
return SVD<PlainObject>(derived());
|
||||
}
|
||||
|
||||
#endif // EIGEN_SVD_H
|
||||
|
@ -141,10 +141,10 @@ UpperBidiagonalization<_MatrixType>& UpperBidiagonalization<_MatrixType>::comput
|
||||
* \sa class Bidiagonalization
|
||||
*/
|
||||
template<typename Derived>
|
||||
const UpperBidiagonalization<typename MatrixBase<Derived>::PlainMatrixType>
|
||||
const UpperBidiagonalization<typename MatrixBase<Derived>::PlainObject>
|
||||
MatrixBase<Derived>::bidiagonalization() const
|
||||
{
|
||||
return UpperBidiagonalization<PlainMatrixType>(eval());
|
||||
return UpperBidiagonalization<PlainObject>(eval());
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -36,7 +36,7 @@
|
||||
*
|
||||
*
|
||||
*/
|
||||
template<typename Derived> class SparseMatrixBase : public AnyMatrixBase<Derived>
|
||||
template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
|
||||
{
|
||||
public:
|
||||
|
||||
@ -109,7 +109,7 @@ template<typename Derived> class SparseMatrixBase : public AnyMatrixBase<Derived
|
||||
Transpose<Derived>
|
||||
>::ret AdjointReturnType;
|
||||
|
||||
typedef SparseMatrix<Scalar, Flags&RowMajorBit ? RowMajor : ColMajor> PlainMatrixType;
|
||||
typedef SparseMatrix<Scalar, Flags&RowMajorBit ? RowMajor : ColMajor> PlainObject;
|
||||
|
||||
#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::SparseMatrixBase
|
||||
#include "../plugins/CommonCwiseUnaryOps.h"
|
||||
@ -396,7 +396,7 @@ template<typename Derived> class SparseMatrixBase : public AnyMatrixBase<Derived
|
||||
template<typename OtherDerived> Scalar dot(const SparseMatrixBase<OtherDerived>& other) const;
|
||||
RealScalar squaredNorm() const;
|
||||
RealScalar norm() const;
|
||||
// const PlainMatrixType normalized() const;
|
||||
// const PlainObject normalized() const;
|
||||
// void normalize();
|
||||
|
||||
Transpose<Derived> transpose() { return derived(); }
|
||||
|
@ -93,8 +93,8 @@ template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView
|
||||
template<typename DerivedU>
|
||||
SparseSelfAdjointView& rankUpdate(const MatrixBase<DerivedU>& u, Scalar alpha = Scalar(1));
|
||||
|
||||
// const SparseLLT<PlainMatrixType, UpLo> llt() const;
|
||||
// const SparseLDLT<PlainMatrixType, UpLo> ldlt() const;
|
||||
// const SparseLLT<PlainObject, UpLo> llt() const;
|
||||
// const SparseLDLT<PlainObject, UpLo> ldlt() const;
|
||||
|
||||
protected:
|
||||
|
||||
|
@ -40,7 +40,7 @@ struct ei_traits<ei_image_retval_base<DecompositionType> >
|
||||
MatrixType::Options,
|
||||
MatrixType::MaxRowsAtCompileTime, // the image matrix will consist of columns from the original matrix,
|
||||
MatrixType::MaxColsAtCompileTime // so it has the same number of rows and at most as many columns.
|
||||
> ReturnMatrixType;
|
||||
> ReturnType;
|
||||
};
|
||||
|
||||
template<typename _DecompositionType> struct ei_image_retval_base
|
||||
|
@ -42,7 +42,7 @@ struct ei_traits<ei_kernel_retval_base<DecompositionType> >
|
||||
MatrixType::MaxColsAtCompileTime, // see explanation for 2nd template parameter
|
||||
MatrixType::MaxColsAtCompileTime // the kernel is a subspace of the domain space,
|
||||
// whose dimension is the number of columns of the original matrix
|
||||
> ReturnMatrixType;
|
||||
> ReturnType;
|
||||
};
|
||||
|
||||
template<typename _DecompositionType> struct ei_kernel_retval_base
|
||||
|
@ -35,9 +35,9 @@ struct ei_traits<ei_solve_retval_base<DecompositionType, Rhs> >
|
||||
typedef Matrix<typename Rhs::Scalar,
|
||||
MatrixType::ColsAtCompileTime,
|
||||
Rhs::ColsAtCompileTime,
|
||||
Rhs::PlainMatrixType::Options,
|
||||
Rhs::PlainObject::Options,
|
||||
MatrixType::MaxColsAtCompileTime,
|
||||
Rhs::MaxColsAtCompileTime> ReturnMatrixType;
|
||||
Rhs::MaxColsAtCompileTime> ReturnType;
|
||||
};
|
||||
|
||||
template<typename _DecompositionType, typename Rhs> struct ei_solve_retval_base
|
||||
|
@ -23,24 +23,31 @@
|
||||
// License and a copy of the GNU General Public License along with
|
||||
// Eigen. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#ifndef EIGEN_BENCH_TIMER_H
|
||||
#define EIGEN_BENCH_TIMER_H
|
||||
#ifndef EIGEN_BENCH_TIMERR_H
|
||||
#define EIGEN_BENCH_TIMERR_H
|
||||
|
||||
#if defined(_WIN32) || defined(__CYGWIN__)
|
||||
#define NOMINMAX
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#include <windows.h>
|
||||
#else
|
||||
#include <sys/time.h>
|
||||
#include <time.h>
|
||||
#include <unistd.h>
|
||||
#endif
|
||||
|
||||
#include <cmath>
|
||||
#include <cstdlib>
|
||||
#include <numeric>
|
||||
|
||||
namespace Eigen
|
||||
{
|
||||
|
||||
enum {
|
||||
CPU_TIMER = 0,
|
||||
REAL_TIMER = 1
|
||||
};
|
||||
|
||||
/** Elapsed time timer keeping the best try.
|
||||
*
|
||||
* On POSIX platforms we use clock_gettime with CLOCK_PROCESS_CPUTIME_ID.
|
||||
@ -52,37 +59,58 @@ class BenchTimer
|
||||
{
|
||||
public:
|
||||
|
||||
BenchTimer()
|
||||
{
|
||||
BenchTimer()
|
||||
{
|
||||
#if defined(_WIN32) || defined(__CYGWIN__)
|
||||
LARGE_INTEGER freq;
|
||||
QueryPerformanceFrequency(&freq);
|
||||
m_frequency = (double)freq.QuadPart;
|
||||
#endif
|
||||
reset();
|
||||
reset();
|
||||
}
|
||||
|
||||
~BenchTimer() {}
|
||||
|
||||
inline void reset(void) {m_best = 1e6;}
|
||||
inline void start(void) {m_start = getTime();}
|
||||
inline void stop(void)
|
||||
inline void reset()
|
||||
{
|
||||
m_best = std::min(m_best, getTime() - m_start);
|
||||
m_bests.fill(1e9);
|
||||
m_totals.setZero();
|
||||
}
|
||||
inline void start()
|
||||
{
|
||||
m_starts[CPU_TIMER] = getCpuTime();
|
||||
m_starts[REAL_TIMER] = getRealTime();
|
||||
}
|
||||
inline void stop()
|
||||
{
|
||||
m_times[CPU_TIMER] = getCpuTime() - m_starts[CPU_TIMER];
|
||||
m_times[REAL_TIMER] = getRealTime() - m_starts[REAL_TIMER];
|
||||
m_bests = m_bests.cwiseMin(m_times);
|
||||
m_totals += m_times;
|
||||
}
|
||||
|
||||
/** Return the best elapsed time in seconds.
|
||||
/** Return the elapsed time in seconds between the last start/stop pair
|
||||
*/
|
||||
inline double value(void)
|
||||
inline double value(int TIMER = CPU_TIMER)
|
||||
{
|
||||
return m_best;
|
||||
return m_times[TIMER];
|
||||
}
|
||||
|
||||
#if defined(_WIN32) || defined(__CYGWIN__)
|
||||
inline double getTime(void)
|
||||
#else
|
||||
static inline double getTime(void)
|
||||
#endif
|
||||
/** Return the best elapsed time in seconds
|
||||
*/
|
||||
inline double best(int TIMER = CPU_TIMER)
|
||||
{
|
||||
return m_bests[TIMER];
|
||||
}
|
||||
|
||||
/** Return the total elapsed time in seconds.
|
||||
*/
|
||||
inline double total(int TIMER = CPU_TIMER)
|
||||
{
|
||||
return m_totals[TIMER];
|
||||
}
|
||||
|
||||
inline double getCpuTime()
|
||||
{
|
||||
#ifdef WIN32
|
||||
LARGE_INTEGER query_ticks;
|
||||
@ -95,14 +123,42 @@ public:
|
||||
#endif
|
||||
}
|
||||
|
||||
inline double getRealTime()
|
||||
{
|
||||
#ifdef WIN32
|
||||
SYSTEMTIME st;
|
||||
GetSystemTime(&st);
|
||||
return (double)st.wSecond + 1.e-3 * (double)st.wMilliseconds;
|
||||
#else
|
||||
struct timeval tv;
|
||||
struct timezone tz;
|
||||
gettimeofday(&tv, &tz);
|
||||
return (double)tv.tv_sec + 1.e-6 * (double)tv.tv_usec;
|
||||
#endif
|
||||
}
|
||||
|
||||
protected:
|
||||
#if defined(_WIN32) || defined(__CYGWIN__)
|
||||
double m_frequency;
|
||||
#endif
|
||||
double m_best, m_start;
|
||||
Vector2d m_starts;
|
||||
Vector2d m_times;
|
||||
Vector2d m_bests;
|
||||
Vector2d m_totals;
|
||||
|
||||
};
|
||||
|
||||
#define BENCH(TIMER,TRIES,REP,CODE) { \
|
||||
TIMER.reset(); \
|
||||
for(int uglyvarname1=0; uglyvarname1<TRIES; ++uglyvarname1){ \
|
||||
TIMER.start(); \
|
||||
for(int uglyvarname2=0; uglyvarname2<REP; ++uglyvarname2){ \
|
||||
CODE; \
|
||||
} \
|
||||
TIMER.stop(); \
|
||||
} \
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif // EIGEN_BENCH_TIMER_H
|
||||
#endif // EIGEN_BENCH_TIMERR_H
|
||||
|
@ -2,10 +2,11 @@
|
||||
|
||||
# gcc : CXX="g++ -finline-limit=10000 -ftemplate-depth-2000 --param max-inline-recursive-depth=2000"
|
||||
# icc : CXX="icpc -fast -no-inline-max-size -fno-exceptions"
|
||||
CXX=${CXX-g++ -finline-limit=10000 -ftemplate-depth-2000 --param max-inline-recursive-depth=2000} # default value
|
||||
|
||||
for ((i=1; i<16; ++i)); do
|
||||
echo "Matrix size: $i x $i :"
|
||||
$CXX -O3 -I.. -DNDEBUG benchmark.cpp -DMATSIZE=$i -DEIGEN_UNROLLING_LIMIT=1024 -DEIGEN_UNROLLING_LIMIT=400 -o benchmark && time ./benchmark >/dev/null
|
||||
$CXX -O3 -I.. -DNDEBUG benchmark.cpp -DMATSIZE=$i -DEIGEN_UNROLLING_LIMIT=400 -o benchmark && time ./benchmark >/dev/null
|
||||
$CXX -O3 -I.. -DNDEBUG -finline-limit=10000 benchmark.cpp -DMATSIZE=$i -DEIGEN_DONT_USE_UNROLLED_LOOPS=1 -o benchmark && time ./benchmark >/dev/null
|
||||
echo " "
|
||||
done
|
||||
|
@ -1,4 +1,5 @@
|
||||
#!/bin/bash
|
||||
CXX=${CXX-g++} # default value unless caller has defined CXX
|
||||
echo "Fixed size 3x3, column-major, -DNDEBUG"
|
||||
$CXX -O3 -I .. -DNDEBUG benchmark.cpp -o benchmark && time ./benchmark >/dev/null
|
||||
echo "Fixed size 3x3, column-major, with asserts"
|
||||
|
@ -232,14 +232,6 @@ if(CMAKE_COMPILER_IS_GNUCXX)
|
||||
if(EIGEN_TEST_C++0x)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=gnu++0x")
|
||||
endif(EIGEN_TEST_C++0x)
|
||||
if(EIGEN_TEST_MAX_WARNING_LEVEL)
|
||||
CHECK_CXX_COMPILER_FLAG("-Wno-variadic-macros" FLAG_VARIADIC)
|
||||
if(FLAG_VARIADIC)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wconversion -Wno-variadic-macros")
|
||||
else(FLAG_VARIADIC)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wconversion")
|
||||
endif(FLAG_VARIADIC)
|
||||
endif(EIGEN_TEST_MAX_WARNING_LEVEL)
|
||||
if(CMAKE_SYSTEM_NAME MATCHES Linux)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COVERAGE_FLAGS} -g2")
|
||||
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} ${COVERAGE_FLAGS} -O2 -g2")
|
||||
@ -248,9 +240,4 @@ if(CMAKE_COMPILER_IS_GNUCXX)
|
||||
endif(CMAKE_SYSTEM_NAME MATCHES Linux)
|
||||
elseif(MSVC)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /D_CRT_SECURE_NO_WARNINGS /D_SCL_SECURE_NO_WARNINGS")
|
||||
if(EIGEN_TEST_MAX_WARNING_LEVEL)
|
||||
# C4127 - conditional expression is constant
|
||||
# C4505 - unreferenced local function has been removed
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W4 /wd4127 /wd4505")
|
||||
endif(EIGEN_TEST_MAX_WARNING_LEVEL)
|
||||
endif(CMAKE_COMPILER_IS_GNUCXX)
|
||||
|
@ -62,7 +62,7 @@ class BandMatrix : public MultiplierBase<BandMatrix<_Scalar,Supers,Subs,Options>
|
||||
MaxColsAtCompileTime = ei_traits<BandMatrix>::MaxColsAtCompileTime
|
||||
};
|
||||
typedef typename ei_traits<BandMatrix>::Scalar Scalar;
|
||||
typedef Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> PlainMatrixType;
|
||||
typedef Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> PlainObject;
|
||||
|
||||
protected:
|
||||
enum {
|
||||
@ -125,9 +125,9 @@ class BandMatrix : public MultiplierBase<BandMatrix<_Scalar,Supers,Subs,Options>
|
||||
// inline VectorBlock<DataType,Size> subDiagonal()
|
||||
// { return VectorBlock<DataType,Size>(m_data,0,m_size.value()); }
|
||||
|
||||
PlainMatrixType toDense() const
|
||||
PlainObject toDense() const
|
||||
{
|
||||
PlainMatrixType res(rows(),cols());
|
||||
PlainObject res(rows(),cols());
|
||||
res.setZero();
|
||||
res.diagonal() = diagonal();
|
||||
for (int i=1; i<=supers();++i)
|
||||
|
@ -51,8 +51,8 @@ template<typename MatrixType> void lu_non_invertible()
|
||||
cols2 = cols = MatrixType::ColsAtCompileTime;
|
||||
}
|
||||
|
||||
typedef typename ei_kernel_retval_base<FullPivLU<MatrixType> >::ReturnMatrixType KernelMatrixType;
|
||||
typedef typename ei_image_retval_base<FullPivLU<MatrixType> >::ReturnMatrixType ImageMatrixType;
|
||||
typedef typename ei_kernel_retval_base<FullPivLU<MatrixType> >::ReturnType KernelMatrixType;
|
||||
typedef typename ei_image_retval_base<FullPivLU<MatrixType> >::ReturnType ImageMatrixType;
|
||||
typedef Matrix<typename MatrixType::Scalar, Dynamic, Dynamic> DynamicMatrixType;
|
||||
typedef Matrix<typename MatrixType::Scalar, MatrixType::ColsAtCompileTime, MatrixType::ColsAtCompileTime>
|
||||
CMatrixType;
|
||||
|
@ -563,6 +563,8 @@ template<typename DerType> struct NumTraits<AutoDiffScalar<DerType> >
|
||||
AddCost = 1,
|
||||
MulCost = 1
|
||||
};
|
||||
inline static Real epsilon() { return std::numeric_limits<Real>::epsilon(); }
|
||||
inline static Real dummy_precision() { return NumTraits<Real>::dummy_precision(); }
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -313,7 +313,7 @@ template<typename Derived> struct MatrixExponentialReturnValue
|
||||
inline void evalTo(ResultType& result) const
|
||||
{
|
||||
const typename ei_eval<Derived>::type srcEvaluated = m_src.eval();
|
||||
MatrixExponential<typename Derived::PlainMatrixType> me(srcEvaluated);
|
||||
MatrixExponential<typename Derived::PlainObject> me(srcEvaluated);
|
||||
me.compute(result);
|
||||
}
|
||||
|
||||
@ -327,7 +327,7 @@ template<typename Derived> struct MatrixExponentialReturnValue
|
||||
template<typename Derived>
|
||||
struct ei_traits<MatrixExponentialReturnValue<Derived> >
|
||||
{
|
||||
typedef typename Derived::PlainMatrixType ReturnMatrixType;
|
||||
typedef typename Derived::PlainObject ReturnType;
|
||||
};
|
||||
|
||||
/** \ingroup MatrixFunctions_Module
|
||||
|
@ -178,9 +178,9 @@ class MatrixFunction<MatrixType, 1>
|
||||
*
|
||||
* This is morally a \c static \c const \c Scalar, but only
|
||||
* integers can be static constant class members in C++. The
|
||||
* separation constant is set to 0.01, a value taken from the
|
||||
* separation constant is set to 0.1, a value taken from the
|
||||
* paper by Davies and Higham. */
|
||||
static const RealScalar separation() { return static_cast<RealScalar>(0.01); }
|
||||
static const RealScalar separation() { return static_cast<RealScalar>(0.1); }
|
||||
};
|
||||
|
||||
/** \brief Constructor.
|
||||
@ -492,14 +492,12 @@ typename MatrixFunction<MatrixType,1>::DynMatrixType MatrixFunction<MatrixType,1
|
||||
template<typename Derived> class MatrixFunctionReturnValue
|
||||
: public ReturnByValue<MatrixFunctionReturnValue<Derived> >
|
||||
{
|
||||
private:
|
||||
public:
|
||||
|
||||
typedef typename ei_traits<Derived>::Scalar Scalar;
|
||||
typedef typename ei_stem_function<Scalar>::type StemFunction;
|
||||
|
||||
public:
|
||||
|
||||
/** \brief Constructor.
|
||||
/** \brief Constructor.
|
||||
*
|
||||
* \param[in] A %Matrix (expression) forming the argument of the
|
||||
* matrix function.
|
||||
@ -516,7 +514,7 @@ template<typename Derived> class MatrixFunctionReturnValue
|
||||
inline void evalTo(ResultType& result) const
|
||||
{
|
||||
const typename ei_eval<Derived>::type Aevaluated = m_A.eval();
|
||||
MatrixFunction<typename Derived::PlainMatrixType> mf(Aevaluated, m_f);
|
||||
MatrixFunction<typename Derived::PlainObject> mf(Aevaluated, m_f);
|
||||
mf.compute(result);
|
||||
}
|
||||
|
||||
@ -531,7 +529,7 @@ template<typename Derived> class MatrixFunctionReturnValue
|
||||
template<typename Derived>
|
||||
struct ei_traits<MatrixFunctionReturnValue<Derived> >
|
||||
{
|
||||
typedef typename Derived::PlainMatrixType ReturnMatrixType;
|
||||
typedef typename Derived::PlainObject ReturnType;
|
||||
};
|
||||
|
||||
|
||||
|
@ -57,7 +57,7 @@ class HybridNonLinearSolver
|
||||
{
|
||||
public:
|
||||
HybridNonLinearSolver(FunctorType &_functor)
|
||||
: functor(_functor) { nfev=njev=iter = 0; fnorm= 0.; }
|
||||
: functor(_functor) { nfev=njev=iter = 0; fnorm= 0.; useExternalScaling=false;}
|
||||
|
||||
struct Parameters {
|
||||
Parameters()
|
||||
@ -84,36 +84,18 @@ public:
|
||||
const Scalar tol = ei_sqrt(NumTraits<Scalar>::epsilon())
|
||||
);
|
||||
|
||||
HybridNonLinearSolverSpace::Status solveInit(
|
||||
FVectorType &x,
|
||||
const int mode=1
|
||||
);
|
||||
HybridNonLinearSolverSpace::Status solveOneStep(
|
||||
FVectorType &x,
|
||||
const int mode=1
|
||||
);
|
||||
HybridNonLinearSolverSpace::Status solve(
|
||||
FVectorType &x,
|
||||
const int mode=1
|
||||
);
|
||||
HybridNonLinearSolverSpace::Status solveInit(FVectorType &x);
|
||||
HybridNonLinearSolverSpace::Status solveOneStep(FVectorType &x);
|
||||
HybridNonLinearSolverSpace::Status solve(FVectorType &x);
|
||||
|
||||
HybridNonLinearSolverSpace::Status hybrd1(
|
||||
FVectorType &x,
|
||||
const Scalar tol = ei_sqrt(NumTraits<Scalar>::epsilon())
|
||||
);
|
||||
|
||||
HybridNonLinearSolverSpace::Status solveNumericalDiffInit(
|
||||
FVectorType &x,
|
||||
const int mode=1
|
||||
);
|
||||
HybridNonLinearSolverSpace::Status solveNumericalDiffOneStep(
|
||||
FVectorType &x,
|
||||
const int mode=1
|
||||
);
|
||||
HybridNonLinearSolverSpace::Status solveNumericalDiff(
|
||||
FVectorType &x,
|
||||
const int mode=1
|
||||
);
|
||||
HybridNonLinearSolverSpace::Status solveNumericalDiffInit(FVectorType &x);
|
||||
HybridNonLinearSolverSpace::Status solveNumericalDiffOneStep(FVectorType &x);
|
||||
HybridNonLinearSolverSpace::Status solveNumericalDiff(FVectorType &x);
|
||||
|
||||
void resetParameters(void) { parameters = Parameters(); }
|
||||
Parameters parameters;
|
||||
@ -124,6 +106,7 @@ public:
|
||||
int njev;
|
||||
int iter;
|
||||
Scalar fnorm;
|
||||
bool useExternalScaling;
|
||||
private:
|
||||
FunctorType &functor;
|
||||
int n;
|
||||
@ -160,18 +143,13 @@ HybridNonLinearSolver<FunctorType,Scalar>::hybrj1(
|
||||
parameters.maxfev = 100*(n+1);
|
||||
parameters.xtol = tol;
|
||||
diag.setConstant(n, 1.);
|
||||
return solve(
|
||||
x,
|
||||
2
|
||||
);
|
||||
useExternalScaling = true;
|
||||
return solve(x);
|
||||
}
|
||||
|
||||
template<typename FunctorType, typename Scalar>
|
||||
HybridNonLinearSolverSpace::Status
|
||||
HybridNonLinearSolver<FunctorType,Scalar>::solveInit(
|
||||
FVectorType &x,
|
||||
const int mode
|
||||
)
|
||||
HybridNonLinearSolver<FunctorType,Scalar>::solveInit(FVectorType &x)
|
||||
{
|
||||
n = x.size();
|
||||
|
||||
@ -179,9 +157,9 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveInit(
|
||||
fvec.resize(n);
|
||||
qtf.resize(n);
|
||||
fjac.resize(n, n);
|
||||
if (mode != 2)
|
||||
if (!useExternalScaling)
|
||||
diag.resize(n);
|
||||
assert( (mode!=2 || diag.size()==n) || "When using mode==2, the caller must provide a valid 'diag'");
|
||||
assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'");
|
||||
|
||||
/* Function Body */
|
||||
nfev = 0;
|
||||
@ -190,7 +168,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveInit(
|
||||
/* check the input parameters for errors. */
|
||||
if (n <= 0 || parameters.xtol < 0. || parameters.maxfev <= 0 || parameters.factor <= 0. )
|
||||
return HybridNonLinearSolverSpace::ImproperInputParameters;
|
||||
if (mode == 2)
|
||||
if (useExternalScaling)
|
||||
for (int j = 0; j < n; ++j)
|
||||
if (diag[j] <= 0.)
|
||||
return HybridNonLinearSolverSpace::ImproperInputParameters;
|
||||
@ -214,10 +192,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveInit(
|
||||
|
||||
template<typename FunctorType, typename Scalar>
|
||||
HybridNonLinearSolverSpace::Status
|
||||
HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(
|
||||
FVectorType &x,
|
||||
const int mode
|
||||
)
|
||||
HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(FVectorType &x)
|
||||
{
|
||||
int j;
|
||||
std::vector<PlanarRotation<Scalar> > v_givens(n), w_givens(n);
|
||||
@ -231,10 +206,10 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(
|
||||
|
||||
wa2 = fjac.colwise().blueNorm();
|
||||
|
||||
/* on the first iteration and if mode is 1, scale according */
|
||||
/* on the first iteration and if external scaling is not used, scale according */
|
||||
/* to the norms of the columns of the initial jacobian. */
|
||||
if (iter == 1) {
|
||||
if (mode != 2)
|
||||
if (!useExternalScaling)
|
||||
for (j = 0; j < n; ++j)
|
||||
diag[j] = (wa2[j]==0.) ? 1. : wa2[j];
|
||||
|
||||
@ -260,7 +235,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(
|
||||
qtf = fjac.transpose() * fvec;
|
||||
|
||||
/* rescale if necessary. */
|
||||
if (mode != 2)
|
||||
if (!useExternalScaling)
|
||||
diag = diag.cwiseMax(wa2);
|
||||
|
||||
while (true) {
|
||||
@ -372,14 +347,11 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveOneStep(
|
||||
|
||||
template<typename FunctorType, typename Scalar>
|
||||
HybridNonLinearSolverSpace::Status
|
||||
HybridNonLinearSolver<FunctorType,Scalar>::solve(
|
||||
FVectorType &x,
|
||||
const int mode
|
||||
)
|
||||
HybridNonLinearSolver<FunctorType,Scalar>::solve(FVectorType &x)
|
||||
{
|
||||
HybridNonLinearSolverSpace::Status status = solveInit(x, mode);
|
||||
HybridNonLinearSolverSpace::Status status = solveInit(x);
|
||||
while (status==HybridNonLinearSolverSpace::Running)
|
||||
status = solveOneStep(x, mode);
|
||||
status = solveOneStep(x);
|
||||
return status;
|
||||
}
|
||||
|
||||
@ -403,18 +375,13 @@ HybridNonLinearSolver<FunctorType,Scalar>::hybrd1(
|
||||
parameters.xtol = tol;
|
||||
|
||||
diag.setConstant(n, 1.);
|
||||
return solveNumericalDiff(
|
||||
x,
|
||||
2
|
||||
);
|
||||
useExternalScaling = true;
|
||||
return solveNumericalDiff(x);
|
||||
}
|
||||
|
||||
template<typename FunctorType, typename Scalar>
|
||||
HybridNonLinearSolverSpace::Status
|
||||
HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffInit(
|
||||
FVectorType &x,
|
||||
const int mode
|
||||
)
|
||||
HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffInit(FVectorType &x)
|
||||
{
|
||||
n = x.size();
|
||||
|
||||
@ -425,10 +392,9 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffInit(
|
||||
qtf.resize(n);
|
||||
fjac.resize(n, n);
|
||||
fvec.resize(n);
|
||||
if (mode != 2)
|
||||
if (!useExternalScaling)
|
||||
diag.resize(n);
|
||||
assert( (mode!=2 || diag.size()==n) || "When using mode==2, the caller must provide a valid 'diag'");
|
||||
|
||||
assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'");
|
||||
|
||||
/* Function Body */
|
||||
nfev = 0;
|
||||
@ -437,7 +403,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffInit(
|
||||
/* check the input parameters for errors. */
|
||||
if (n <= 0 || parameters.xtol < 0. || parameters.maxfev <= 0 || parameters.nb_of_subdiagonals< 0 || parameters.nb_of_superdiagonals< 0 || parameters.factor <= 0. )
|
||||
return HybridNonLinearSolverSpace::ImproperInputParameters;
|
||||
if (mode == 2)
|
||||
if (useExternalScaling)
|
||||
for (int j = 0; j < n; ++j)
|
||||
if (diag[j] <= 0.)
|
||||
return HybridNonLinearSolverSpace::ImproperInputParameters;
|
||||
@ -461,10 +427,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffInit(
|
||||
|
||||
template<typename FunctorType, typename Scalar>
|
||||
HybridNonLinearSolverSpace::Status
|
||||
HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(
|
||||
FVectorType &x,
|
||||
const int mode
|
||||
)
|
||||
HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(FVectorType &x)
|
||||
{
|
||||
int j;
|
||||
std::vector<PlanarRotation<Scalar> > v_givens(n), w_givens(n);
|
||||
@ -480,10 +443,10 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(
|
||||
|
||||
wa2 = fjac.colwise().blueNorm();
|
||||
|
||||
/* on the first iteration and if mode is 1, scale according */
|
||||
/* on the first iteration and if external scaling is not used, scale according */
|
||||
/* to the norms of the columns of the initial jacobian. */
|
||||
if (iter == 1) {
|
||||
if (mode != 2)
|
||||
if (!useExternalScaling)
|
||||
for (j = 0; j < n; ++j)
|
||||
diag[j] = (wa2[j]==0.) ? 1. : wa2[j];
|
||||
|
||||
@ -509,7 +472,7 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(
|
||||
qtf = fjac.transpose() * fvec;
|
||||
|
||||
/* rescale if necessary. */
|
||||
if (mode != 2)
|
||||
if (!useExternalScaling)
|
||||
diag = diag.cwiseMax(wa2);
|
||||
|
||||
while (true) {
|
||||
@ -621,14 +584,11 @@ HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiffOneStep(
|
||||
|
||||
template<typename FunctorType, typename Scalar>
|
||||
HybridNonLinearSolverSpace::Status
|
||||
HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiff(
|
||||
FVectorType &x,
|
||||
const int mode
|
||||
)
|
||||
HybridNonLinearSolver<FunctorType,Scalar>::solveNumericalDiff(FVectorType &x)
|
||||
{
|
||||
HybridNonLinearSolverSpace::Status status = solveNumericalDiffInit(x, mode);
|
||||
HybridNonLinearSolverSpace::Status status = solveNumericalDiffInit(x);
|
||||
while (status==HybridNonLinearSolverSpace::Running)
|
||||
status = solveNumericalDiffOneStep(x, mode);
|
||||
status = solveNumericalDiffOneStep(x);
|
||||
return status;
|
||||
}
|
||||
|
||||
|
@ -61,7 +61,7 @@ class LevenbergMarquardt
|
||||
{
|
||||
public:
|
||||
LevenbergMarquardt(FunctorType &_functor)
|
||||
: functor(_functor) { nfev = njev = iter = 0; fnorm=gnorm = 0.; }
|
||||
: functor(_functor) { nfev = njev = iter = 0; fnorm = gnorm = 0.; useExternalScaling=false; }
|
||||
|
||||
struct Parameters {
|
||||
Parameters()
|
||||
@ -87,18 +87,9 @@ public:
|
||||
const Scalar tol = ei_sqrt(NumTraits<Scalar>::epsilon())
|
||||
);
|
||||
|
||||
LevenbergMarquardtSpace::Status minimize(
|
||||
FVectorType &x,
|
||||
const int mode=1
|
||||
);
|
||||
LevenbergMarquardtSpace::Status minimizeInit(
|
||||
FVectorType &x,
|
||||
const int mode=1
|
||||
);
|
||||
LevenbergMarquardtSpace::Status minimizeOneStep(
|
||||
FVectorType &x,
|
||||
const int mode=1
|
||||
);
|
||||
LevenbergMarquardtSpace::Status minimize(FVectorType &x);
|
||||
LevenbergMarquardtSpace::Status minimizeInit(FVectorType &x);
|
||||
LevenbergMarquardtSpace::Status minimizeOneStep(FVectorType &x);
|
||||
|
||||
static LevenbergMarquardtSpace::Status lmdif1(
|
||||
FunctorType &functor,
|
||||
@ -112,18 +103,9 @@ public:
|
||||
const Scalar tol = ei_sqrt(NumTraits<Scalar>::epsilon())
|
||||
);
|
||||
|
||||
LevenbergMarquardtSpace::Status minimizeOptimumStorage(
|
||||
FVectorType &x,
|
||||
const int mode=1
|
||||
);
|
||||
LevenbergMarquardtSpace::Status minimizeOptimumStorageInit(
|
||||
FVectorType &x,
|
||||
const int mode=1
|
||||
);
|
||||
LevenbergMarquardtSpace::Status minimizeOptimumStorageOneStep(
|
||||
FVectorType &x,
|
||||
const int mode=1
|
||||
);
|
||||
LevenbergMarquardtSpace::Status minimizeOptimumStorage(FVectorType &x);
|
||||
LevenbergMarquardtSpace::Status minimizeOptimumStorageInit(FVectorType &x);
|
||||
LevenbergMarquardtSpace::Status minimizeOptimumStorageOneStep(FVectorType &x);
|
||||
|
||||
void resetParameters(void) { parameters = Parameters(); }
|
||||
|
||||
@ -135,6 +117,7 @@ public:
|
||||
int njev;
|
||||
int iter;
|
||||
Scalar fnorm, gnorm;
|
||||
bool useExternalScaling;
|
||||
|
||||
Scalar lm_param(void) { return par; }
|
||||
private:
|
||||
@ -175,24 +158,18 @@ LevenbergMarquardt<FunctorType,Scalar>::lmder1(
|
||||
|
||||
template<typename FunctorType, typename Scalar>
|
||||
LevenbergMarquardtSpace::Status
|
||||
LevenbergMarquardt<FunctorType,Scalar>::minimize(
|
||||
FVectorType &x,
|
||||
const int mode
|
||||
)
|
||||
LevenbergMarquardt<FunctorType,Scalar>::minimize(FVectorType &x)
|
||||
{
|
||||
LevenbergMarquardtSpace::Status status = minimizeInit(x, mode);
|
||||
LevenbergMarquardtSpace::Status status = minimizeInit(x);
|
||||
do {
|
||||
status = minimizeOneStep(x, mode);
|
||||
status = minimizeOneStep(x);
|
||||
} while (status==LevenbergMarquardtSpace::Running);
|
||||
return status;
|
||||
}
|
||||
|
||||
template<typename FunctorType, typename Scalar>
|
||||
LevenbergMarquardtSpace::Status
|
||||
LevenbergMarquardt<FunctorType,Scalar>::minimizeInit(
|
||||
FVectorType &x,
|
||||
const int mode
|
||||
)
|
||||
LevenbergMarquardt<FunctorType,Scalar>::minimizeInit(FVectorType &x)
|
||||
{
|
||||
n = x.size();
|
||||
m = functor.values();
|
||||
@ -201,9 +178,9 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeInit(
|
||||
wa4.resize(m);
|
||||
fvec.resize(m);
|
||||
fjac.resize(m, n);
|
||||
if (mode != 2)
|
||||
if (!useExternalScaling)
|
||||
diag.resize(n);
|
||||
assert( (mode!=2 || diag.size()==n) || "When using mode==2, the caller must provide a valid 'diag'");
|
||||
assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'");
|
||||
qtf.resize(n);
|
||||
|
||||
/* Function Body */
|
||||
@ -214,7 +191,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeInit(
|
||||
if (n <= 0 || m < n || parameters.ftol < 0. || parameters.xtol < 0. || parameters.gtol < 0. || parameters.maxfev <= 0 || parameters.factor <= 0.)
|
||||
return LevenbergMarquardtSpace::ImproperInputParameters;
|
||||
|
||||
if (mode == 2)
|
||||
if (useExternalScaling)
|
||||
for (int j = 0; j < n; ++j)
|
||||
if (diag[j] <= 0.)
|
||||
return LevenbergMarquardtSpace::ImproperInputParameters;
|
||||
@ -235,10 +212,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeInit(
|
||||
|
||||
template<typename FunctorType, typename Scalar>
|
||||
LevenbergMarquardtSpace::Status
|
||||
LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(
|
||||
FVectorType &x,
|
||||
const int mode
|
||||
)
|
||||
LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(FVectorType &x)
|
||||
{
|
||||
int j;
|
||||
|
||||
@ -257,10 +231,10 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(
|
||||
fjac = qrfac.matrixQR();
|
||||
permutation = qrfac.colsPermutation();
|
||||
|
||||
/* on the first iteration and if mode is 1, scale according */
|
||||
/* on the first iteration and if external scaling is not used, scale according */
|
||||
/* to the norms of the columns of the initial jacobian. */
|
||||
if (iter == 1) {
|
||||
if (mode != 2)
|
||||
if (!useExternalScaling)
|
||||
for (j = 0; j < n; ++j)
|
||||
diag[j] = (wa2[j]==0.)? 1. : wa2[j];
|
||||
|
||||
@ -290,7 +264,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOneStep(
|
||||
return LevenbergMarquardtSpace::CosinusTooSmall;
|
||||
|
||||
/* rescale if necessary. */
|
||||
if (mode != 2)
|
||||
if (!useExternalScaling)
|
||||
diag = diag.cwiseMax(wa2);
|
||||
|
||||
do {
|
||||
@ -406,10 +380,7 @@ LevenbergMarquardt<FunctorType,Scalar>::lmstr1(
|
||||
|
||||
template<typename FunctorType, typename Scalar>
|
||||
LevenbergMarquardtSpace::Status
|
||||
LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageInit(
|
||||
FVectorType &x,
|
||||
const int mode
|
||||
)
|
||||
LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageInit(FVectorType &x)
|
||||
{
|
||||
n = x.size();
|
||||
m = functor.values();
|
||||
@ -423,9 +394,9 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageInit(
|
||||
// The purpose it to only use a nxn matrix, instead of mxn here, so
|
||||
// that we can handle cases where m>>n :
|
||||
fjac.resize(n, n);
|
||||
if (mode != 2)
|
||||
if (!useExternalScaling)
|
||||
diag.resize(n);
|
||||
assert( (mode!=2 || diag.size()==n) || "When using mode==2, the caller must provide a valid 'diag'");
|
||||
assert( (!useExternalScaling || diag.size()==n) || "When useExternalScaling is set, the caller must provide a valid 'diag'");
|
||||
qtf.resize(n);
|
||||
|
||||
/* Function Body */
|
||||
@ -436,7 +407,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageInit(
|
||||
if (n <= 0 || m < n || parameters.ftol < 0. || parameters.xtol < 0. || parameters.gtol < 0. || parameters.maxfev <= 0 || parameters.factor <= 0.)
|
||||
return LevenbergMarquardtSpace::ImproperInputParameters;
|
||||
|
||||
if (mode == 2)
|
||||
if (useExternalScaling)
|
||||
for (int j = 0; j < n; ++j)
|
||||
if (diag[j] <= 0.)
|
||||
return LevenbergMarquardtSpace::ImproperInputParameters;
|
||||
@ -458,10 +429,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageInit(
|
||||
|
||||
template<typename FunctorType, typename Scalar>
|
||||
LevenbergMarquardtSpace::Status
|
||||
LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(
|
||||
FVectorType &x,
|
||||
const int mode
|
||||
)
|
||||
LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(FVectorType &x)
|
||||
{
|
||||
int i, j;
|
||||
bool sing;
|
||||
@ -514,10 +482,10 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(
|
||||
}
|
||||
}
|
||||
|
||||
/* on the first iteration and if mode is 1, scale according */
|
||||
/* on the first iteration and if external scaling is not used, scale according */
|
||||
/* to the norms of the columns of the initial jacobian. */
|
||||
if (iter == 1) {
|
||||
if (mode != 2)
|
||||
if (!useExternalScaling)
|
||||
for (j = 0; j < n; ++j)
|
||||
diag[j] = (wa2[j]==0.)? 1. : wa2[j];
|
||||
|
||||
@ -541,7 +509,7 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(
|
||||
return LevenbergMarquardtSpace::CosinusTooSmall;
|
||||
|
||||
/* rescale if necessary. */
|
||||
if (mode != 2)
|
||||
if (!useExternalScaling)
|
||||
diag = diag.cwiseMax(wa2);
|
||||
|
||||
do {
|
||||
@ -635,14 +603,11 @@ LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorageOneStep(
|
||||
|
||||
template<typename FunctorType, typename Scalar>
|
||||
LevenbergMarquardtSpace::Status
|
||||
LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorage(
|
||||
FVectorType &x,
|
||||
const int mode
|
||||
)
|
||||
LevenbergMarquardt<FunctorType,Scalar>::minimizeOptimumStorage(FVectorType &x)
|
||||
{
|
||||
LevenbergMarquardtSpace::Status status = minimizeOptimumStorageInit(x, mode);
|
||||
LevenbergMarquardtSpace::Status status = minimizeOptimumStorageInit(x);
|
||||
do {
|
||||
status = minimizeOptimumStorageOneStep(x, mode);
|
||||
status = minimizeOptimumStorageOneStep(x);
|
||||
} while (status==LevenbergMarquardtSpace::Running);
|
||||
return status;
|
||||
}
|
||||
|
@ -36,7 +36,7 @@
|
||||
* \param Derived
|
||||
*
|
||||
*/
|
||||
template<typename Derived> class SkylineMatrixBase : public AnyMatrixBase<Derived> {
|
||||
template<typename Derived> class SkylineMatrixBase : public EigenBase<Derived> {
|
||||
public:
|
||||
|
||||
typedef typename ei_traits<Derived>::Scalar Scalar;
|
||||
|
@ -317,7 +317,8 @@ void testHybrj()
|
||||
hybrj_functor functor;
|
||||
HybridNonLinearSolver<hybrj_functor> solver(functor);
|
||||
solver.diag.setConstant(n, 1.);
|
||||
info = solver.solve(x, 2);
|
||||
solver.useExternalScaling = true;
|
||||
info = solver.solve(x);
|
||||
|
||||
// check return value
|
||||
VERIFY( 1 == info);
|
||||
@ -401,7 +402,8 @@ void testHybrd()
|
||||
solver.parameters.nb_of_subdiagonals = 1;
|
||||
solver.parameters.nb_of_superdiagonals = 1;
|
||||
solver.diag.setConstant(n, 1.);
|
||||
info = solver.solveNumericalDiff(x, 2);
|
||||
solver.useExternalScaling = true;
|
||||
info = solver.solveNumericalDiff(x);
|
||||
|
||||
// check return value
|
||||
VERIFY( 1 == info);
|
||||
|
@ -57,7 +57,7 @@ void test2dRotation(double tol)
|
||||
angle = static_cast<T>(pow(10, i / 5. - 2));
|
||||
B << cos(angle), sin(angle), -sin(angle), cos(angle);
|
||||
|
||||
ei_matrix_function(angle*A, expfn, &C);
|
||||
C = ei_matrix_function(angle*A, expfn);
|
||||
std::cout << "test2dRotation: i = " << i << " error funm = " << relerr(C, B);
|
||||
VERIFY(C.isApprox(B, static_cast<T>(tol)));
|
||||
|
||||
@ -82,7 +82,7 @@ void test2dHyperbolicRotation(double tol)
|
||||
A << 0, angle*imagUnit, -angle*imagUnit, 0;
|
||||
B << ch, sh*imagUnit, -sh*imagUnit, ch;
|
||||
|
||||
ei_matrix_function(A, expfn, &C);
|
||||
C = ei_matrix_function(A, expfn);
|
||||
std::cout << "test2dHyperbolicRotation: i = " << i << " error funm = " << relerr(C, B);
|
||||
VERIFY(C.isApprox(B, static_cast<T>(tol)));
|
||||
|
||||
@ -106,7 +106,7 @@ void testPascal(double tol)
|
||||
for (int j=0; j<=i; j++)
|
||||
B(i,j) = static_cast<T>(binom(i,j));
|
||||
|
||||
ei_matrix_function(A, expfn, &C);
|
||||
C = ei_matrix_function(A, expfn);
|
||||
std::cout << "testPascal: size = " << size << " error funm = " << relerr(C, B);
|
||||
VERIFY(C.isApprox(B, static_cast<T>(tol)));
|
||||
|
||||
@ -132,10 +132,9 @@ void randomTest(const MatrixType& m, double tol)
|
||||
for(int i = 0; i < g_repeat; i++) {
|
||||
m1 = MatrixType::Random(rows, cols);
|
||||
|
||||
ei_matrix_function(m1, expfn, &m2);
|
||||
ei_matrix_function(-m1, expfn, &m3);
|
||||
m2 = ei_matrix_function(m1, expfn) * ei_matrix_function(-m1, expfn);
|
||||
std::cout << "randomTest: error funm = " << relerr(identity, m2 * m3);
|
||||
VERIFY(identity.isApprox(m2 * m3, static_cast<RealScalar>(tol)));
|
||||
VERIFY(identity.isApprox(m2, static_cast<RealScalar>(tol)));
|
||||
|
||||
m2 = ei_matrix_exponential(m1) * ei_matrix_exponential(-m1);
|
||||
std::cout << " error expm = " << relerr(identity, m2) << "\n";
|
||||
|
@ -109,11 +109,10 @@ template<typename MatrixType>
|
||||
void testHyperbolicFunctions(const MatrixType& A)
|
||||
{
|
||||
for (int i = 0; i < g_repeat; i++) {
|
||||
MatrixType sinhA = ei_matrix_sinh(A);
|
||||
MatrixType coshA = ei_matrix_cosh(A);
|
||||
MatrixType expA = ei_matrix_exponential(A);
|
||||
VERIFY_IS_APPROX(sinhA, (expA - expA.inverse())/2);
|
||||
VERIFY_IS_APPROX(coshA, (expA + expA.inverse())/2);
|
||||
MatrixType expmA = ei_matrix_exponential(-A);
|
||||
VERIFY_IS_APPROX(ei_matrix_sinh(A), (expA - expmA) / 2);
|
||||
VERIFY_IS_APPROX(ei_matrix_cosh(A), (expA + expmA) / 2);
|
||||
}
|
||||
}
|
||||
|
||||
@ -134,14 +133,15 @@ void testGonioFunctions(const MatrixType& A)
|
||||
ComplexMatrix Ac = A.template cast<ComplexScalar>();
|
||||
|
||||
ComplexMatrix exp_iA = ei_matrix_exponential(imagUnit * Ac);
|
||||
ComplexMatrix exp_miA = ei_matrix_exponential(-imagUnit * Ac);
|
||||
|
||||
MatrixType sinA = ei_matrix_sin(A);
|
||||
ComplexMatrix sinAc = sinA.template cast<ComplexScalar>();
|
||||
VERIFY_IS_APPROX(sinAc, (exp_iA - exp_iA.inverse()) / (two*imagUnit));
|
||||
VERIFY_IS_APPROX(sinAc, (exp_iA - exp_miA) / (two*imagUnit));
|
||||
|
||||
MatrixType cosA = ei_matrix_cos(A);
|
||||
ComplexMatrix cosAc = cosA.template cast<ComplexScalar>();
|
||||
VERIFY_IS_APPROX(cosAc, (exp_iA + exp_iA.inverse()) / 2);
|
||||
VERIFY_IS_APPROX(cosAc, (exp_iA + exp_miA) / 2);
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user