mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-08-12 11:49:02 +08:00
Pulled latest updates from the Eigen main trunk.
This commit is contained in:
commit
4304c73542
@ -449,12 +449,12 @@ set ( EIGEN_INCLUDE_DIR ${INCLUDE_INSTALL_DIR} )
|
||||
set ( EIGEN_INCLUDE_DIRS ${EIGEN_INCLUDE_DIR} )
|
||||
set ( EIGEN_ROOT_DIR ${CMAKE_INSTALL_PREFIX} )
|
||||
|
||||
configure_file ( ${CMAKE_SOURCE_DIR}/cmake/Eigen3Config.cmake.in
|
||||
configure_file ( ${CMAKE_CURRENT_SOURCE_DIR}/cmake/Eigen3Config.cmake.in
|
||||
${CMAKE_CURRENT_BINARY_DIR}/Eigen3Config.cmake
|
||||
@ONLY ESCAPE_QUOTES
|
||||
)
|
||||
|
||||
install ( FILES ${CMAKE_SOURCE_DIR}/cmake/UseEigen3.cmake
|
||||
install ( FILES ${CMAKE_CURRENT_SOURCE_DIR}/cmake/UseEigen3.cmake
|
||||
${CMAKE_CURRENT_BINARY_DIR}/Eigen3Config.cmake
|
||||
DESTINATION ${EIGEN_CONFIG_CMAKE_PATH}
|
||||
)
|
||||
|
@ -84,7 +84,7 @@ struct traits<Block<XprType, BlockRows, BlockCols, InnerPanel> > : traits<XprTyp
|
||||
&& (InnerStrideAtCompileTime == 1)
|
||||
? PacketAccessBit : 0,
|
||||
MaskAlignedBit = (InnerPanel && (OuterStrideAtCompileTime!=Dynamic) && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % EIGEN_ALIGN_BYTES) == 0)) ? AlignedBit : 0,
|
||||
FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1) ? LinearAccessBit : 0,
|
||||
FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1 || (InnerPanel && (traits<XprType>::Flags&LinearAccessBit))) ? LinearAccessBit : 0,
|
||||
FlagsLvalueBit = is_lvalue<XprType>::value ? LvalueBit : 0,
|
||||
FlagsRowMajorBit = IsRowMajor ? RowMajorBit : 0,
|
||||
Flags0 = traits<XprType>::Flags & ( (HereditaryBits & ~RowMajorBit) |
|
||||
|
@ -250,6 +250,8 @@ template<typename Derived> class MapBase<Derived, WriteAccessors>
|
||||
using Base::Base::operator=;
|
||||
};
|
||||
|
||||
#undef EIGEN_STATIC_ASSERT_INDEX_BASED_ACCESS
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
#endif // EIGEN_MAPBASE_H
|
||||
|
@ -1,4 +1,5 @@
|
||||
ADD_SUBDIRECTORY(SSE)
|
||||
ADD_SUBDIRECTORY(AltiVec)
|
||||
ADD_SUBDIRECTORY(NEON)
|
||||
ADD_SUBDIRECTORY(AVX)
|
||||
ADD_SUBDIRECTORY(Default)
|
||||
|
@ -10,12 +10,6 @@
|
||||
#ifndef EIGEN_GENERAL_BLOCK_PANEL_H
|
||||
#define EIGEN_GENERAL_BLOCK_PANEL_H
|
||||
|
||||
#ifdef USE_IACA
|
||||
#include "iacaMarks.h"
|
||||
#else
|
||||
#define IACA_START
|
||||
#define IACA_END
|
||||
#endif
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
@ -805,7 +799,6 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,mr,nr,ConjugateLhs,ConjugateRhs>
|
||||
|
||||
blB += pk*4*RhsProgress;
|
||||
blA += pk*3*Traits::LhsProgress;
|
||||
IACA_END
|
||||
}
|
||||
// process remaining peeled loop
|
||||
for(Index k=peeled_kc; k<depth; k++)
|
||||
|
@ -263,7 +263,7 @@ EIGEN_DONT_INLINE void product_triangular_matrix_matrix<Scalar,Index,Mode,false,
|
||||
Index mc = (std::min)(rows,blocking.mc()); // cache block size along the M direction
|
||||
|
||||
std::size_t sizeA = kc*mc;
|
||||
std::size_t sizeB = kc*cols;
|
||||
std::size_t sizeB = kc*cols+EIGEN_ALIGN_BYTES/sizeof(Scalar);
|
||||
|
||||
ei_declare_aligned_stack_constructed_variable(Scalar, blockA, sizeA, blocking.blockA());
|
||||
ei_declare_aligned_stack_constructed_variable(Scalar, blockB, sizeB, blocking.blockB());
|
||||
|
@ -348,13 +348,13 @@ namespace Eigen {
|
||||
#elif defined(__clang__) // workaround clang bug (see http://forum.kde.org/viewtopic.php?f=74&t=102653)
|
||||
#define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \
|
||||
using Base::operator =; \
|
||||
EIGEN_STRONG_INLINE Derived& operator=(const Derived& other) { Base::operator=(other); return *this; } \
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const Derived& other) { Base::operator=(other); return *this; } \
|
||||
template <typename OtherDerived> \
|
||||
EIGEN_STRONG_INLINE Derived& operator=(const DenseBase<OtherDerived>& other) { Base::operator=(other.derived()); return *this; }
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const DenseBase<OtherDerived>& other) { Base::operator=(other.derived()); return *this; }
|
||||
#else
|
||||
#define EIGEN_INHERIT_ASSIGNMENT_EQUAL_OPERATOR(Derived) \
|
||||
using Base::operator =; \
|
||||
EIGEN_STRONG_INLINE Derived& operator=(const Derived& other) \
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Derived& operator=(const Derived& other) \
|
||||
{ \
|
||||
Base::operator=(other); \
|
||||
return *this; \
|
||||
|
@ -767,9 +767,9 @@ namespace internal {
|
||||
|
||||
#ifdef EIGEN_CPUID
|
||||
|
||||
inline bool cpuid_is_vendor(int abcd[4], const char* vendor)
|
||||
inline bool cpuid_is_vendor(int abcd[4], const int vendor[3])
|
||||
{
|
||||
return abcd[1]==(reinterpret_cast<const int*>(vendor))[0] && abcd[3]==(reinterpret_cast<const int*>(vendor))[1] && abcd[2]==(reinterpret_cast<const int*>(vendor))[2];
|
||||
return abcd[1]==vendor[0] && abcd[3]==vendor[1] && abcd[2]==vendor[2];
|
||||
}
|
||||
|
||||
inline void queryCacheSizes_intel_direct(int& l1, int& l2, int& l3)
|
||||
@ -911,13 +911,16 @@ inline void queryCacheSizes(int& l1, int& l2, int& l3)
|
||||
{
|
||||
#ifdef EIGEN_CPUID
|
||||
int abcd[4];
|
||||
const int GenuineIntel[] = {0x756e6547, 0x49656e69, 0x6c65746e};
|
||||
const int AuthenticAMD[] = {0x68747541, 0x69746e65, 0x444d4163};
|
||||
const int AMDisbetter_[] = {0x69444d41, 0x74656273, 0x21726574}; // "AMDisbetter!"
|
||||
|
||||
// identify the CPU vendor
|
||||
EIGEN_CPUID(abcd,0x0,0);
|
||||
int max_std_funcs = abcd[1];
|
||||
if(cpuid_is_vendor(abcd,"GenuineIntel"))
|
||||
if(cpuid_is_vendor(abcd,GenuineIntel))
|
||||
queryCacheSizes_intel(l1,l2,l3,max_std_funcs);
|
||||
else if(cpuid_is_vendor(abcd,"AuthenticAMD") || cpuid_is_vendor(abcd,"AMDisbetter!"))
|
||||
else if(cpuid_is_vendor(abcd,AuthenticAMD) || cpuid_is_vendor(abcd,AMDisbetter_))
|
||||
queryCacheSizes_amd(l1,l2,l3);
|
||||
else
|
||||
// by default let's use Intel's API
|
||||
|
@ -587,7 +587,7 @@ inline Derived& QuaternionBase<Derived>::setFromTwoVectors(const MatrixBase<Deri
|
||||
// which yields a singular value problem
|
||||
if (c < Scalar(-1)+NumTraits<Scalar>::dummy_precision())
|
||||
{
|
||||
c = max<Scalar>(c,-1);
|
||||
c = (max)(c,Scalar(-1));
|
||||
Matrix<Scalar,2,3> m; m << v0.transpose(), v1.transpose();
|
||||
JacobiSVD<Matrix<Scalar,2,3> > svd(m, ComputeFullV);
|
||||
Vector3 axis = svd.matrixV().col(2);
|
||||
|
@ -194,9 +194,9 @@ public:
|
||||
/** type of the matrix used to represent the linear part of the transformation */
|
||||
typedef Matrix<Scalar,Dim,Dim,Options> LinearMatrixType;
|
||||
/** type of read/write reference to the linear part of the transformation */
|
||||
typedef Block<MatrixType,Dim,Dim,int(Mode)==(AffineCompact)> LinearPart;
|
||||
typedef Block<MatrixType,Dim,Dim,int(Mode)==(AffineCompact) && (Options&RowMajor)==0> LinearPart;
|
||||
/** type of read reference to the linear part of the transformation */
|
||||
typedef const Block<ConstMatrixType,Dim,Dim,int(Mode)==(AffineCompact)> ConstLinearPart;
|
||||
typedef const Block<ConstMatrixType,Dim,Dim,int(Mode)==(AffineCompact) && (Options&RowMajor)==0> ConstLinearPart;
|
||||
/** type of read/write reference to the affine part of the transformation */
|
||||
typedef typename internal::conditional<int(Mode)==int(AffineCompact),
|
||||
MatrixType&,
|
||||
|
@ -113,7 +113,7 @@ umeyama(const MatrixBase<Derived>& src, const MatrixBase<OtherDerived>& dst, boo
|
||||
const Index n = src.cols(); // number of measurements
|
||||
|
||||
// required for demeaning ...
|
||||
const RealScalar one_over_n = 1 / static_cast<RealScalar>(n);
|
||||
const RealScalar one_over_n = RealScalar(1) / static_cast<RealScalar>(n);
|
||||
|
||||
// computation of mean
|
||||
const VectorType src_mean = src.rowwise().sum() * one_over_n;
|
||||
@ -136,16 +136,16 @@ umeyama(const MatrixBase<Derived>& src, const MatrixBase<OtherDerived>& dst, boo
|
||||
|
||||
// Eq. (39)
|
||||
VectorType S = VectorType::Ones(m);
|
||||
if (sigma.determinant()<0) S(m-1) = -1;
|
||||
if (sigma.determinant()<Scalar(0)) S(m-1) = Scalar(-1);
|
||||
|
||||
// Eq. (40) and (43)
|
||||
const VectorType& d = svd.singularValues();
|
||||
Index rank = 0; for (Index i=0; i<m; ++i) if (!internal::isMuchSmallerThan(d.coeff(i),d.coeff(0))) ++rank;
|
||||
if (rank == m-1) {
|
||||
if ( svd.matrixU().determinant() * svd.matrixV().determinant() > 0 ) {
|
||||
if ( svd.matrixU().determinant() * svd.matrixV().determinant() > Scalar(0) ) {
|
||||
Rt.block(0,0,m,m).noalias() = svd.matrixU()*svd.matrixV().transpose();
|
||||
} else {
|
||||
const Scalar s = S(m-1); S(m-1) = -1;
|
||||
const Scalar s = S(m-1); S(m-1) = Scalar(-1);
|
||||
Rt.block(0,0,m,m).noalias() = svd.matrixU() * S.asDiagonal() * svd.matrixV().transpose();
|
||||
S(m-1) = s;
|
||||
}
|
||||
@ -156,7 +156,7 @@ umeyama(const MatrixBase<Derived>& src, const MatrixBase<OtherDerived>& dst, boo
|
||||
if (with_scaling)
|
||||
{
|
||||
// Eq. (42)
|
||||
const Scalar c = 1/src_var * svd.singularValues().dot(S);
|
||||
const Scalar c = Scalar(1)/src_var * svd.singularValues().dot(S);
|
||||
|
||||
// Eq. (41)
|
||||
Rt.col(m).head(m) = dst_mean;
|
||||
|
@ -20,10 +20,11 @@ namespace Eigen {
|
||||
*
|
||||
* \param MatrixType the type of the matrix of which we are computing the LU decomposition
|
||||
*
|
||||
* This class represents a LU decomposition of any matrix, with complete pivoting: the matrix A
|
||||
* is decomposed as A = PLUQ where L is unit-lower-triangular, U is upper-triangular, and P and Q
|
||||
* are permutation matrices. This is a rank-revealing LU decomposition. The eigenvalues (diagonal
|
||||
* coefficients) of U are sorted in such a way that any zeros are at the end.
|
||||
* This class represents a LU decomposition of any matrix, with complete pivoting: the matrix A is
|
||||
* decomposed as \f$ A = P^{-1} L U Q^{-1} \f$ where L is unit-lower-triangular, U is
|
||||
* upper-triangular, and P and Q are permutation matrices. This is a rank-revealing LU
|
||||
* decomposition. The eigenvalues (diagonal coefficients) of U are sorted in such a way that any
|
||||
* zeros are at the end.
|
||||
*
|
||||
* This decomposition provides the generic approach to solving systems of linear equations, computing
|
||||
* the rank, invertibility, inverse, kernel, and determinant.
|
||||
@ -511,8 +512,8 @@ typename internal::traits<MatrixType>::Scalar FullPivLU<MatrixType>::determinant
|
||||
}
|
||||
|
||||
/** \returns the matrix represented by the decomposition,
|
||||
* i.e., it returns the product: P^{-1} L U Q^{-1}.
|
||||
* This function is provided for debug purpose. */
|
||||
* i.e., it returns the product: \f$ P^{-1} L U Q^{-1} \f$.
|
||||
* This function is provided for debug purposes. */
|
||||
template<typename MatrixType>
|
||||
MatrixType FullPivLU<MatrixType>::reconstructedMatrix() const
|
||||
{
|
||||
|
@ -1139,7 +1139,7 @@ EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& Sparse
|
||||
m_data.value(p) = m_data.value(p-1);
|
||||
--p;
|
||||
}
|
||||
eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exist, you must call coeffRef to this end");
|
||||
eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exists, you must call coeffRef to this end");
|
||||
|
||||
m_innerNonZeros[outer]++;
|
||||
|
||||
|
@ -11,7 +11,7 @@
|
||||
#ifndef EIGEN_STDDEQUE_H
|
||||
#define EIGEN_STDDEQUE_H
|
||||
|
||||
#include "Eigen/src/StlSupport/details.h"
|
||||
#include "details.h"
|
||||
|
||||
// Define the explicit instantiation (e.g. necessary for the Intel compiler)
|
||||
#if defined(__INTEL_COMPILER) || defined(__GNUC__)
|
||||
|
@ -10,7 +10,7 @@
|
||||
#ifndef EIGEN_STDLIST_H
|
||||
#define EIGEN_STDLIST_H
|
||||
|
||||
#include "Eigen/src/StlSupport/details.h"
|
||||
#include "details.h"
|
||||
|
||||
// Define the explicit instantiation (e.g. necessary for the Intel compiler)
|
||||
#if defined(__INTEL_COMPILER) || defined(__GNUC__)
|
||||
|
@ -11,7 +11,7 @@
|
||||
#ifndef EIGEN_STDVECTOR_H
|
||||
#define EIGEN_STDVECTOR_H
|
||||
|
||||
#include "Eigen/src/StlSupport/details.h"
|
||||
#include "details.h"
|
||||
|
||||
/**
|
||||
* This section contains a convenience MACRO which allows an easy specialization of
|
||||
|
3
README.md
Normal file
3
README.md
Normal file
@ -0,0 +1,3 @@
|
||||
**Eigen is a C++ template library for linear algebra: matrices, vectors, numerical solvers, and related algorithms.**
|
||||
|
||||
For more information go to http://eigen.tuxfamily.org/.
|
76
bench/dense_solvers.cpp
Normal file
76
bench/dense_solvers.cpp
Normal file
@ -0,0 +1,76 @@
|
||||
#include <iostream>
|
||||
#include "BenchTimer.h"
|
||||
#include <Eigen/Dense>
|
||||
#include <map>
|
||||
#include <string>
|
||||
using namespace Eigen;
|
||||
|
||||
std::map<std::string,Array<float,1,4> > results;
|
||||
|
||||
template<typename Scalar,int Size>
|
||||
void bench(int id, int size = Size)
|
||||
{
|
||||
typedef Matrix<Scalar,Size,Size> Mat;
|
||||
Mat A(size,size);
|
||||
A.setRandom();
|
||||
A = A*A.adjoint();
|
||||
BenchTimer t_llt, t_ldlt, t_lu, t_fplu, t_qr, t_cpqr, t_fpqr, t_jsvd;
|
||||
|
||||
int tries = 3;
|
||||
int rep = 1000/size;
|
||||
if(rep==0) rep = 1;
|
||||
rep = rep*rep;
|
||||
|
||||
LLT<Mat> llt(A);
|
||||
LDLT<Mat> ldlt(A);
|
||||
PartialPivLU<Mat> lu(A);
|
||||
FullPivLU<Mat> fplu(A);
|
||||
HouseholderQR<Mat> qr(A);
|
||||
ColPivHouseholderQR<Mat> cpqr(A);
|
||||
FullPivHouseholderQR<Mat> fpqr(A);
|
||||
JacobiSVD<Mat> jsvd(A.rows(),A.cols());
|
||||
|
||||
BENCH(t_llt, tries, rep, llt.compute(A));
|
||||
BENCH(t_ldlt, tries, rep, ldlt.compute(A));
|
||||
BENCH(t_lu, tries, rep, lu.compute(A));
|
||||
BENCH(t_fplu, tries, rep, fplu.compute(A));
|
||||
BENCH(t_qr, tries, rep, qr.compute(A));
|
||||
BENCH(t_cpqr, tries, rep, cpqr.compute(A));
|
||||
BENCH(t_fpqr, tries, rep, fpqr.compute(A));
|
||||
if(size<500) // JacobiSVD is really too slow for too large matrices
|
||||
BENCH(t_jsvd, tries, rep, jsvd.compute(A,ComputeFullU|ComputeFullV));
|
||||
|
||||
results["LLT"][id] = t_llt.best();
|
||||
results["LDLT"][id] = t_ldlt.best();
|
||||
results["PartialPivLU"][id] = t_lu.best();
|
||||
results["FullPivLU"][id] = t_fplu.best();
|
||||
results["HouseholderQR"][id] = t_qr.best();
|
||||
results["ColPivHouseholderQR"][id] = t_cpqr.best();
|
||||
results["FullPivHouseholderQR"][id] = t_fpqr.best();
|
||||
results["JacobiSVD"][id] = size<500 ? t_jsvd.best() : 0;
|
||||
}
|
||||
|
||||
int main()
|
||||
{
|
||||
const int small = 8;
|
||||
const int medium = 100;
|
||||
const int large = 1000;
|
||||
const int xl = 4000;
|
||||
|
||||
bench<float,small>(0);
|
||||
bench<float,Dynamic>(1,medium);
|
||||
bench<float,Dynamic>(2,large);
|
||||
bench<float,Dynamic>(3,xl);
|
||||
|
||||
IOFormat fmt(3, 0, " \t", "\n", "", "");
|
||||
|
||||
std::cout << "solver/size " << small << "\t" << medium << "\t" << large << "\t" << xl << "\n";
|
||||
std::cout << "LLT (ms) " << (results["LLT"]/1000.).format(fmt) << "\n";
|
||||
std::cout << "LDLT (%) " << (results["LDLT"]/results["LLT"]).format(fmt) << "\n";
|
||||
std::cout << "PartialPivLU (%) " << (results["PartialPivLU"]/results["LLT"]).format(fmt) << "\n";
|
||||
std::cout << "FullPivLU (%) " << (results["FullPivLU"]/results["LLT"]).format(fmt) << "\n";
|
||||
std::cout << "HouseholderQR (%) " << (results["HouseholderQR"]/results["LLT"]).format(fmt) << "\n";
|
||||
std::cout << "ColPivHouseholderQR (%) " << (results["ColPivHouseholderQR"]/results["LLT"]).format(fmt) << "\n";
|
||||
std::cout << "FullPivHouseholderQR (%) " << (results["FullPivHouseholderQR"]/results["LLT"]).format(fmt) << "\n";
|
||||
std::cout << "JacobiSVD (%) " << (results["JacobiSVD"]/results["LLT"]).format(fmt) << "\n";
|
||||
}
|
@ -1,9 +1,6 @@
|
||||
|
||||
This directory contains a BLAS library built on top of Eigen.
|
||||
|
||||
This is currently a work in progress which is far to be ready for use,
|
||||
but feel free to contribute to it if you wish.
|
||||
|
||||
This module is not built by default. In order to compile it, you need to
|
||||
type 'make blas' from within your build dir.
|
||||
|
||||
|
@ -49,7 +49,7 @@ else()
|
||||
set(EIGEN_MAKECOMMAND_PLACEHOLDER "${EIGEN_BUILD_COMMAND}")
|
||||
endif()
|
||||
|
||||
configure_file(${CMAKE_BINARY_DIR}/DartConfiguration.tcl ${CMAKE_BINARY_DIR}/DartConfiguration.tcl)
|
||||
configure_file(${CMAKE_CURRENT_BINARY_DIR}/DartConfiguration.tcl ${CMAKE_BINARY_DIR}/DartConfiguration.tcl)
|
||||
|
||||
# restore default CMAKE_MAKE_PROGRAM
|
||||
set(CMAKE_MAKE_PROGRAM ${CMAKE_MAKE_PROGRAM_SAVE})
|
||||
@ -58,7 +58,7 @@ set(CMAKE_MAKE_PROGRAM ${CMAKE_MAKE_PROGRAM_SAVE})
|
||||
unset(CMAKE_MAKE_PROGRAM_SAVE)
|
||||
unset(EIGEN_MAKECOMMAND_PLACEHOLDER)
|
||||
|
||||
configure_file(${CMAKE_SOURCE_DIR}/CTestCustom.cmake.in ${CMAKE_BINARY_DIR}/CTestCustom.cmake)
|
||||
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/CTestCustom.cmake.in ${CMAKE_BINARY_DIR}/CTestCustom.cmake)
|
||||
|
||||
# some documentation of this function would be nice
|
||||
ei_init_testing()
|
||||
|
@ -49,7 +49,7 @@ class EigenMatrixPrinter:
|
||||
regex = re.compile('\<.*\>')
|
||||
m = regex.findall(tag)[0][1:-1]
|
||||
template_params = m.split(',')
|
||||
template_params = map(lambda x:x.replace(" ", ""), template_params)
|
||||
template_params = [x.replace(" ", "") for x in template_params]
|
||||
|
||||
if template_params[1] == '-0x00000000000000001' or template_params[1] == '-0x000000001' or template_params[1] == '-1':
|
||||
self.rows = val['m_storage']['m_rows']
|
||||
@ -88,8 +88,11 @@ class EigenMatrixPrinter:
|
||||
|
||||
def __iter__ (self):
|
||||
return self
|
||||
|
||||
|
||||
def next(self):
|
||||
return self.__next__() # Python 2.x compatibility
|
||||
|
||||
def __next__(self):
|
||||
|
||||
row = self.currentRow
|
||||
col = self.currentCol
|
||||
@ -151,8 +154,11 @@ class EigenQuaternionPrinter:
|
||||
|
||||
def __iter__ (self):
|
||||
return self
|
||||
|
||||
|
||||
def next(self):
|
||||
return self.__next__() # Python 2.x compatibility
|
||||
|
||||
def __next__(self):
|
||||
element = self.currentElement
|
||||
|
||||
if self.currentElement >= 4: #there are 4 elements in a quanternion
|
||||
|
@ -41,8 +41,8 @@ MatrixXd::Ones(rows,cols) // ones(rows,cols)
|
||||
C.setOnes(rows,cols) // C = ones(rows,cols)
|
||||
MatrixXd::Random(rows,cols) // rand(rows,cols)*2-1 // MatrixXd::Random returns uniform random numbers in (-1, 1).
|
||||
C.setRandom(rows,cols) // C = rand(rows,cols)*2-1
|
||||
VectorXd::LinSpace(size,low,high) // linspace(low,high,size)'
|
||||
v.setLinSpace(size,low,high) // v = linspace(low,high,size)'
|
||||
VectorXd::LinSpaced(size,low,high) // linspace(low,high,size)'
|
||||
v.setLinSpaced(size,low,high) // v = linspace(low,high,size)'
|
||||
|
||||
|
||||
// Matrix slicing and blocks. All expressions listed here are read/write.
|
||||
@ -168,6 +168,8 @@ x.cross(y) // cross(x, y) Requires #include <Eigen/Geometry>
|
||||
A.cast<double>(); // double(A)
|
||||
A.cast<float>(); // single(A)
|
||||
A.cast<int>(); // int32(A)
|
||||
A.real(); // real(A)
|
||||
A.imag(); // imag(A)
|
||||
// if the original type equals destination type, no work is done
|
||||
|
||||
// Note that for most operations Eigen requires all operands to have the same type:
|
||||
|
@ -5,6 +5,11 @@ add_dependencies(buildtests eigen2_buildtests)
|
||||
|
||||
add_definitions("-DEIGEN2_SUPPORT_STAGE10_FULL_EIGEN2_API")
|
||||
|
||||
# Disable unused warnings for this module
|
||||
# As EIGEN2 support is deprecated, it is not really worth fixing them
|
||||
ei_add_cxx_compiler_flag("-Wno-unused-local-typedefs")
|
||||
ei_add_cxx_compiler_flag("-Wno-unused-but-set-variable")
|
||||
|
||||
ei_add_test(eigen2_meta)
|
||||
ei_add_test(eigen2_sizeof)
|
||||
ei_add_test(eigen2_dynalloc)
|
||||
|
@ -114,10 +114,9 @@ void test_eigensolver_generic()
|
||||
CALL_SUBTEST_2(
|
||||
{
|
||||
MatrixXd A(1,1);
|
||||
A(0,0) = std::sqrt(-1.);
|
||||
A(0,0) = std::sqrt(-1.); // is Not-a-Number
|
||||
Eigen::EigenSolver<MatrixXd> solver(A);
|
||||
MatrixXd V(1, 1);
|
||||
V(0,0) = solver.eigenvectors()(0,0).real();
|
||||
VERIFY_IS_EQUAL(solver.info(), NumericalIssue);
|
||||
}
|
||||
);
|
||||
|
||||
|
@ -186,7 +186,7 @@ template<typename Scalar> void packetmath()
|
||||
{
|
||||
for (int i=0; i<PacketSize*2; ++i)
|
||||
ref[i] = data1[i/PacketSize];
|
||||
Packet A0, A1, A2, A3;
|
||||
Packet A0, A1;
|
||||
internal::pbroadcast2<Packet>(data1, A0, A1);
|
||||
internal::pstore(data2+0*PacketSize, A0);
|
||||
internal::pstore(data2+1*PacketSize, A1);
|
||||
|
@ -10,7 +10,7 @@
|
||||
#ifndef EIGEN_CXX11_TENSORSYMMETRY_MODULE
|
||||
#define EIGEN_CXX11_TENSORSYMMETRY_MODULE
|
||||
|
||||
#include <Eigen/CXX11/Tensor>
|
||||
#include <unsupported/Eigen/CXX11/Tensor>
|
||||
|
||||
#include <Eigen/src/Core/util/DisableStupidWarnings.h>
|
||||
|
||||
|
@ -42,14 +42,14 @@ struct numeric_list<T, n, nn...> { constexpr static std::size_t count = sizeof..
|
||||
* typename gen_numeric_list_repeated<int, 0, 5>::type numeric_list<int, 0,0,0,0,0>
|
||||
*/
|
||||
|
||||
template<typename T, std::size_t n, T... ii> struct gen_numeric_list : gen_numeric_list<T, n-1, n-1, ii...> {};
|
||||
template<typename T, T... ii> struct gen_numeric_list<T, 0, ii...> { typedef numeric_list<T, ii...> type; };
|
||||
template<typename T, std::size_t n, T start = 0, T... ii> struct gen_numeric_list : gen_numeric_list<T, n-1, start, start + n-1, ii...> {};
|
||||
template<typename T, T start, T... ii> struct gen_numeric_list<T, 0, start, ii...> { typedef numeric_list<T, ii...> type; };
|
||||
|
||||
template<typename T, std::size_t n, T... ii> struct gen_numeric_list_reversed : gen_numeric_list_reversed<T, n-1, ii..., n-1> {};
|
||||
template<typename T, T... ii> struct gen_numeric_list_reversed<T, 0, ii...> { typedef numeric_list<T, ii...> type; };
|
||||
template<typename T, std::size_t n, T start = 0, T... ii> struct gen_numeric_list_reversed : gen_numeric_list_reversed<T, n-1, start, ii..., start + n-1> {};
|
||||
template<typename T, T start, T... ii> struct gen_numeric_list_reversed<T, 0, start, ii...> { typedef numeric_list<T, ii...> type; };
|
||||
|
||||
template<typename T, std::size_t n, T a, T b, T... ii> struct gen_numeric_list_swapped_pair : gen_numeric_list_swapped_pair<T, n-1, a, b, (n-1) == a ? b : ((n-1) == b ? a : (n-1)), ii...> {};
|
||||
template<typename T, T a, T b, T... ii> struct gen_numeric_list_swapped_pair<T, 0, a, b, ii...> { typedef numeric_list<T, ii...> type; };
|
||||
template<typename T, std::size_t n, T a, T b, T start = 0, T... ii> struct gen_numeric_list_swapped_pair : gen_numeric_list_swapped_pair<T, n-1, a, b, start, (start + n-1) == a ? b : ((start + n-1) == b ? a : (start + n-1)), ii...> {};
|
||||
template<typename T, T a, T b, T start, T... ii> struct gen_numeric_list_swapped_pair<T, 0, a, b, start, ii...> { typedef numeric_list<T, ii...> type; };
|
||||
|
||||
template<typename T, std::size_t n, T V, T... nn> struct gen_numeric_list_repeated : gen_numeric_list_repeated<T, n-1, V, V, nn...> {};
|
||||
template<typename T, T V, T... nn> struct gen_numeric_list_repeated<T, 0, V, nn...> { typedef numeric_list<T, nn...> type; };
|
||||
|
@ -48,13 +48,15 @@ namespace internal {
|
||||
* - libstdc++ from version 4.7 onwards has it nevertheless,
|
||||
* so use that
|
||||
* - libstdc++ older versions: use _M_instance directly
|
||||
* - libc++ all versions so far: use __elems_ directly
|
||||
* - libc++ from version 3.4 onwards has it IF compiled with
|
||||
* -std=c++1y
|
||||
* - libc++ older versions or -std=c++11: use __elems_ directly
|
||||
* - all other libs: use std::get to be portable, but
|
||||
* this may not be constexpr
|
||||
*/
|
||||
#if defined(__GLIBCXX__) && __GLIBCXX__ < 20120322
|
||||
#define STD_GET_ARR_HACK a._M_instance[I]
|
||||
#elif defined(_LIBCPP_VERSION)
|
||||
#elif defined(_LIBCPP_VERSION) && (!defined(_LIBCPP_STD_VER) || _LIBCPP_STD_VER <= 11)
|
||||
#define STD_GET_ARR_HACK a.__elems_[I]
|
||||
#else
|
||||
#define STD_GET_ARR_HACK std::template get<I, T, N>(a)
|
||||
|
@ -58,13 +58,6 @@ namespace Eigen {
|
||||
* \ref TopicStorageOrders
|
||||
*/
|
||||
|
||||
namespace internal {
|
||||
|
||||
/* Forward-declaration required for the symmetry support. */
|
||||
template<typename Tensor_, typename Symmetry_, int Flags = 0> class tensor_symmetry_value_setter;
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
template<typename Scalar_, std::size_t NumIndices_, int Options_>
|
||||
class Tensor : public TensorBase<Tensor<Scalar_, NumIndices_, Options_> >
|
||||
{
|
||||
@ -274,20 +267,6 @@ class Tensor : public TensorBase<Tensor<Scalar_, NumIndices_, Options_> >
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef EIGEN_HAS_VARIADIC_TEMPLATES
|
||||
template<typename Symmetry_, typename... IndexTypes>
|
||||
internal::tensor_symmetry_value_setter<Self, Symmetry_> symCoeff(const Symmetry_& symmetry, Index firstIndex, IndexTypes... otherIndices)
|
||||
{
|
||||
return symCoeff(symmetry, array<Index, NumIndices>{{firstIndex, otherIndices...}});
|
||||
}
|
||||
|
||||
template<typename Symmetry_, typename... IndexTypes>
|
||||
internal::tensor_symmetry_value_setter<Self, Symmetry_> symCoeff(const Symmetry_& symmetry, array<Index, NumIndices> const& indices)
|
||||
{
|
||||
return internal::tensor_symmetry_value_setter<Self, Symmetry_>(*this, symmetry, indices);
|
||||
}
|
||||
#endif
|
||||
|
||||
protected:
|
||||
bool checkIndexRange(const array<Index, NumIndices>& indices) const
|
||||
{
|
||||
|
@ -15,7 +15,7 @@ namespace Eigen {
|
||||
class DynamicSGroup
|
||||
{
|
||||
public:
|
||||
inline explicit DynamicSGroup(std::size_t numIndices) : m_numIndices(numIndices), m_elements(), m_generators(), m_globalFlags(0) { m_elements.push_back(ge(Generator(0, 0, 0))); }
|
||||
inline explicit DynamicSGroup() : m_numIndices(1), m_elements(), m_generators(), m_globalFlags(0) { m_elements.push_back(ge(Generator(0, 0, 0))); }
|
||||
inline DynamicSGroup(const DynamicSGroup& o) : m_numIndices(o.m_numIndices), m_elements(o.m_elements), m_generators(o.m_generators), m_globalFlags(o.m_globalFlags) { }
|
||||
inline DynamicSGroup(DynamicSGroup&& o) : m_numIndices(o.m_numIndices), m_elements(), m_generators(o.m_generators), m_globalFlags(o.m_globalFlags) { std::swap(m_elements, o.m_elements); }
|
||||
inline DynamicSGroup& operator=(const DynamicSGroup& o) { m_numIndices = o.m_numIndices; m_elements = o.m_elements; m_generators = o.m_generators; m_globalFlags = o.m_globalFlags; return *this; }
|
||||
@ -33,7 +33,7 @@ class DynamicSGroup
|
||||
template<typename Op, typename RV, typename Index, std::size_t N, typename... Args>
|
||||
inline RV apply(const std::array<Index, N>& idx, RV initial, Args&&... args) const
|
||||
{
|
||||
eigen_assert(N == m_numIndices);
|
||||
eigen_assert(N >= m_numIndices && "Can only apply symmetry group to objects that have at least the required amount of indices.");
|
||||
for (std::size_t i = 0; i < size(); i++)
|
||||
initial = Op::run(h_permute(i, idx, typename internal::gen_numeric_list<int, N>::type()), m_elements[i].flags, initial, std::forward<Args>(args)...);
|
||||
return initial;
|
||||
@ -42,7 +42,7 @@ class DynamicSGroup
|
||||
template<typename Op, typename RV, typename Index, typename... Args>
|
||||
inline RV apply(const std::vector<Index>& idx, RV initial, Args&&... args) const
|
||||
{
|
||||
eigen_assert(idx.size() == m_numIndices);
|
||||
eigen_assert(idx.size() >= m_numIndices && "Can only apply symmetry group to objects that have at least the required amount of indices.");
|
||||
for (std::size_t i = 0; i < size(); i++)
|
||||
initial = Op::run(h_permute(i, idx), m_elements[i].flags, initial, std::forward<Args>(args)...);
|
||||
return initial;
|
||||
@ -50,6 +50,19 @@ class DynamicSGroup
|
||||
|
||||
inline int globalFlags() const { return m_globalFlags; }
|
||||
inline std::size_t size() const { return m_elements.size(); }
|
||||
|
||||
template<typename Tensor_, typename... IndexTypes>
|
||||
inline internal::tensor_symmetry_value_setter<Tensor_, DynamicSGroup> operator()(Tensor_& tensor, typename Tensor_::Index firstIndex, IndexTypes... otherIndices) const
|
||||
{
|
||||
static_assert(sizeof...(otherIndices) + 1 == Tensor_::NumIndices, "Number of indices used to access a tensor coefficient must be equal to the rank of the tensor.");
|
||||
return operator()(tensor, std::array<typename Tensor_::Index, Tensor_::NumIndices>{{firstIndex, otherIndices...}});
|
||||
}
|
||||
|
||||
template<typename Tensor_>
|
||||
inline internal::tensor_symmetry_value_setter<Tensor_, DynamicSGroup> operator()(Tensor_& tensor, std::array<typename Tensor_::Index, Tensor_::NumIndices> const& indices) const
|
||||
{
|
||||
return internal::tensor_symmetry_value_setter<Tensor_, DynamicSGroup>(tensor, *this, indices);
|
||||
}
|
||||
private:
|
||||
struct GroupElement {
|
||||
std::vector<int> representation;
|
||||
@ -77,7 +90,7 @@ class DynamicSGroup
|
||||
template<typename Index, std::size_t N, int... n>
|
||||
inline std::array<Index, N> h_permute(std::size_t which, const std::array<Index, N>& idx, internal::numeric_list<int, n...>) const
|
||||
{
|
||||
return std::array<Index, N>{{ idx[m_elements[which].representation[n]]... }};
|
||||
return std::array<Index, N>{{ idx[n >= m_numIndices ? n : m_elements[which].representation[n]]... }};
|
||||
}
|
||||
|
||||
template<typename Index>
|
||||
@ -87,6 +100,8 @@ class DynamicSGroup
|
||||
result.reserve(idx.size());
|
||||
for (auto k : m_elements[which].representation)
|
||||
result.push_back(idx[k]);
|
||||
for (std::size_t i = m_numIndices; i < idx.size(); i++)
|
||||
result.push_back(idx[i]);
|
||||
return result;
|
||||
}
|
||||
|
||||
@ -135,18 +150,18 @@ class DynamicSGroup
|
||||
};
|
||||
|
||||
// dynamic symmetry group that auto-adds the template parameters in the constructor
|
||||
template<std::size_t NumIndices, typename... Gen>
|
||||
template<typename... Gen>
|
||||
class DynamicSGroupFromTemplateArgs : public DynamicSGroup
|
||||
{
|
||||
public:
|
||||
inline DynamicSGroupFromTemplateArgs() : DynamicSGroup(NumIndices)
|
||||
inline DynamicSGroupFromTemplateArgs() : DynamicSGroup()
|
||||
{
|
||||
add_all(internal::type_list<Gen...>());
|
||||
}
|
||||
inline DynamicSGroupFromTemplateArgs(DynamicSGroupFromTemplateArgs const& other) : DynamicSGroup(other) { }
|
||||
inline DynamicSGroupFromTemplateArgs(DynamicSGroupFromTemplateArgs&& other) : DynamicSGroup(other) { }
|
||||
inline DynamicSGroupFromTemplateArgs<NumIndices, Gen...>& operator=(const DynamicSGroupFromTemplateArgs<NumIndices, Gen...>& o) { DynamicSGroup::operator=(o); return *this; }
|
||||
inline DynamicSGroupFromTemplateArgs<NumIndices, Gen...>& operator=(DynamicSGroupFromTemplateArgs<NumIndices, Gen...>&& o) { DynamicSGroup::operator=(o); return *this; }
|
||||
inline DynamicSGroupFromTemplateArgs<Gen...>& operator=(const DynamicSGroupFromTemplateArgs<Gen...>& o) { DynamicSGroup::operator=(o); return *this; }
|
||||
inline DynamicSGroupFromTemplateArgs<Gen...>& operator=(DynamicSGroupFromTemplateArgs<Gen...>&& o) { DynamicSGroup::operator=(o); return *this; }
|
||||
|
||||
private:
|
||||
template<typename Gen1, typename... GenNext>
|
||||
@ -168,18 +183,32 @@ inline DynamicSGroup::GroupElement DynamicSGroup::mul(GroupElement g1, GroupElem
|
||||
|
||||
GroupElement result;
|
||||
result.representation.reserve(m_numIndices);
|
||||
for (std::size_t i = 0; i < m_numIndices; i++)
|
||||
result.representation.push_back(g2.representation[g1.representation[i]]);
|
||||
for (std::size_t i = 0; i < m_numIndices; i++) {
|
||||
int v = g2.representation[g1.representation[i]];
|
||||
eigen_assert(v >= 0);
|
||||
result.representation.push_back(v);
|
||||
}
|
||||
result.flags = g1.flags ^ g2.flags;
|
||||
return result;
|
||||
}
|
||||
|
||||
inline void DynamicSGroup::add(int one, int two, int flags)
|
||||
{
|
||||
eigen_assert(one >= 0 && (std::size_t)one < m_numIndices);
|
||||
eigen_assert(two >= 0 && (std::size_t)two < m_numIndices);
|
||||
eigen_assert(one >= 0);
|
||||
eigen_assert(two >= 0);
|
||||
eigen_assert(one != two);
|
||||
Generator g{one, two ,flags};
|
||||
|
||||
if ((std::size_t)one >= m_numIndices || (std::size_t)two >= m_numIndices) {
|
||||
std::size_t newNumIndices = (one > two) ? one : two + 1;
|
||||
for (auto& gelem : m_elements) {
|
||||
gelem.representation.reserve(newNumIndices);
|
||||
for (std::size_t i = m_numIndices; i < newNumIndices; i++)
|
||||
gelem.representation.push_back(i);
|
||||
}
|
||||
m_numIndices = newNumIndices;
|
||||
}
|
||||
|
||||
Generator g{one, two, flags};
|
||||
GroupElement e = ge(g);
|
||||
|
||||
/* special case for first generator */
|
||||
|
@ -114,20 +114,24 @@ struct tensor_static_symgroup_equality
|
||||
template<std::size_t NumIndices, typename... Gen>
|
||||
struct tensor_static_symgroup
|
||||
{
|
||||
typedef StaticSGroup<NumIndices, Gen...> type;
|
||||
typedef StaticSGroup<Gen...> type;
|
||||
constexpr static std::size_t size = type::static_size;
|
||||
};
|
||||
|
||||
template<typename Index, std::size_t N, int... ii>
|
||||
constexpr static inline std::array<Index, N> tensor_static_symgroup_index_permute(std::array<Index, N> idx, internal::numeric_list<int, ii...>)
|
||||
template<typename Index, std::size_t N, int... ii, int... jj>
|
||||
constexpr static inline std::array<Index, N> tensor_static_symgroup_index_permute(std::array<Index, N> idx, internal::numeric_list<int, ii...>, internal::numeric_list<int, jj...>)
|
||||
{
|
||||
return {{ idx[ii]... }};
|
||||
return {{ idx[ii]..., idx[jj]... }};
|
||||
}
|
||||
|
||||
template<typename Index, int... ii>
|
||||
static inline std::vector<Index> tensor_static_symgroup_index_permute(std::vector<Index> idx, internal::numeric_list<int, ii...>)
|
||||
{
|
||||
return {{ idx[ii]... }};
|
||||
std::vector<Index> result{{ idx[ii]... }};
|
||||
std::size_t target_size = idx.size();
|
||||
for (std::size_t i = result.size(); i < target_size; i++)
|
||||
result.push_back(idx[i]);
|
||||
return result;
|
||||
}
|
||||
|
||||
template<typename T> struct tensor_static_symgroup_do_apply;
|
||||
@ -135,32 +139,35 @@ template<typename T> struct tensor_static_symgroup_do_apply;
|
||||
template<typename first, typename... next>
|
||||
struct tensor_static_symgroup_do_apply<internal::type_list<first, next...>>
|
||||
{
|
||||
template<typename Op, typename RV, typename Index, std::size_t N, typename... Args>
|
||||
static inline RV run(const std::array<Index, N>& idx, RV initial, Args&&... args)
|
||||
template<typename Op, typename RV, std::size_t SGNumIndices, typename Index, std::size_t NumIndices, typename... Args>
|
||||
static inline RV run(const std::array<Index, NumIndices>& idx, RV initial, Args&&... args)
|
||||
{
|
||||
initial = Op::run(tensor_static_symgroup_index_permute(idx, typename first::indices()), first::flags, initial, std::forward<Args>(args)...);
|
||||
return tensor_static_symgroup_do_apply<internal::type_list<next...>>::template run<Op>(idx, initial, args...);
|
||||
static_assert(NumIndices >= SGNumIndices, "Can only apply symmetry group to objects that have at least the required amount of indices.");
|
||||
typedef typename internal::gen_numeric_list<int, NumIndices - SGNumIndices, SGNumIndices>::type remaining_indices;
|
||||
initial = Op::run(tensor_static_symgroup_index_permute(idx, typename first::indices(), remaining_indices()), first::flags, initial, std::forward<Args>(args)...);
|
||||
return tensor_static_symgroup_do_apply<internal::type_list<next...>>::template run<Op, RV, SGNumIndices>(idx, initial, args...);
|
||||
}
|
||||
|
||||
template<typename Op, typename RV, typename Index, typename... Args>
|
||||
template<typename Op, typename RV, std::size_t SGNumIndices, typename Index, typename... Args>
|
||||
static inline RV run(const std::vector<Index>& idx, RV initial, Args&&... args)
|
||||
{
|
||||
eigen_assert(idx.size() >= SGNumIndices && "Can only apply symmetry group to objects that have at least the required amount of indices.");
|
||||
initial = Op::run(tensor_static_symgroup_index_permute(idx, typename first::indices()), first::flags, initial, std::forward<Args>(args)...);
|
||||
return tensor_static_symgroup_do_apply<internal::type_list<next...>>::template run<Op>(idx, initial, args...);
|
||||
return tensor_static_symgroup_do_apply<internal::type_list<next...>>::template run<Op, RV, SGNumIndices>(idx, initial, args...);
|
||||
}
|
||||
};
|
||||
|
||||
template<EIGEN_TPL_PP_SPEC_HACK_DEF(typename, empty)>
|
||||
struct tensor_static_symgroup_do_apply<internal::type_list<EIGEN_TPL_PP_SPEC_HACK_USE(empty)>>
|
||||
{
|
||||
template<typename Op, typename RV, typename Index, std::size_t N, typename... Args>
|
||||
static inline RV run(const std::array<Index, N>&, RV initial, Args&&...)
|
||||
template<typename Op, typename RV, std::size_t SGNumIndices, typename Index, std::size_t NumIndices, typename... Args>
|
||||
static inline RV run(const std::array<Index, NumIndices>&, RV initial, Args&&...)
|
||||
{
|
||||
// do nothing
|
||||
return initial;
|
||||
}
|
||||
|
||||
template<typename Op, typename RV, typename Index, typename... Args>
|
||||
template<typename Op, typename RV, std::size_t SGNumIndices, typename Index, typename... Args>
|
||||
static inline RV run(const std::vector<Index>&, RV initial, Args&&...)
|
||||
{
|
||||
// do nothing
|
||||
@ -170,9 +177,10 @@ struct tensor_static_symgroup_do_apply<internal::type_list<EIGEN_TPL_PP_SPEC_HAC
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
template<std::size_t NumIndices, typename... Gen>
|
||||
template<typename... Gen>
|
||||
class StaticSGroup
|
||||
{
|
||||
constexpr static std::size_t NumIndices = internal::tensor_symmetry_num_indices<Gen...>::value;
|
||||
typedef internal::group_theory::enumerate_group_elements<
|
||||
internal::tensor_static_symgroup_multiply,
|
||||
internal::tensor_static_symgroup_equality,
|
||||
@ -182,20 +190,20 @@ class StaticSGroup
|
||||
typedef typename group_elements::type ge;
|
||||
public:
|
||||
constexpr inline StaticSGroup() {}
|
||||
constexpr inline StaticSGroup(const StaticSGroup<NumIndices, Gen...>&) {}
|
||||
constexpr inline StaticSGroup(StaticSGroup<NumIndices, Gen...>&&) {}
|
||||
constexpr inline StaticSGroup(const StaticSGroup<Gen...>&) {}
|
||||
constexpr inline StaticSGroup(StaticSGroup<Gen...>&&) {}
|
||||
|
||||
template<typename Op, typename RV, typename Index, typename... Args>
|
||||
static inline RV apply(const std::array<Index, NumIndices>& idx, RV initial, Args&&... args)
|
||||
template<typename Op, typename RV, typename Index, std::size_t N, typename... Args>
|
||||
static inline RV apply(const std::array<Index, N>& idx, RV initial, Args&&... args)
|
||||
{
|
||||
return internal::tensor_static_symgroup_do_apply<ge>::template run<Op, RV>(idx, initial, args...);
|
||||
return internal::tensor_static_symgroup_do_apply<ge>::template run<Op, RV, NumIndices>(idx, initial, args...);
|
||||
}
|
||||
|
||||
template<typename Op, typename RV, typename Index, typename... Args>
|
||||
static inline RV apply(const std::vector<Index>& idx, RV initial, Args&&... args)
|
||||
{
|
||||
eigen_assert(idx.size() == NumIndices);
|
||||
return internal::tensor_static_symgroup_do_apply<ge>::template run<Op, RV>(idx, initial, args...);
|
||||
return internal::tensor_static_symgroup_do_apply<ge>::template run<Op, RV, NumIndices>(idx, initial, args...);
|
||||
}
|
||||
|
||||
constexpr static std::size_t static_size = ge::count;
|
||||
@ -204,6 +212,19 @@ class StaticSGroup
|
||||
return ge::count;
|
||||
}
|
||||
constexpr static inline int globalFlags() { return group_elements::global_flags; }
|
||||
|
||||
template<typename Tensor_, typename... IndexTypes>
|
||||
inline internal::tensor_symmetry_value_setter<Tensor_, StaticSGroup<Gen...>> operator()(Tensor_& tensor, typename Tensor_::Index firstIndex, IndexTypes... otherIndices) const
|
||||
{
|
||||
static_assert(sizeof...(otherIndices) + 1 == Tensor_::NumIndices, "Number of indices used to access a tensor coefficient must be equal to the rank of the tensor.");
|
||||
return operator()(tensor, std::array<typename Tensor_::Index, Tensor_::NumIndices>{{firstIndex, otherIndices...}});
|
||||
}
|
||||
|
||||
template<typename Tensor_>
|
||||
inline internal::tensor_symmetry_value_setter<Tensor_, StaticSGroup<Gen...>> operator()(Tensor_& tensor, std::array<typename Tensor_::Index, Tensor_::NumIndices> const& indices) const
|
||||
{
|
||||
return internal::tensor_symmetry_value_setter<Tensor_, StaticSGroup<Gen...>>(tensor, *this, indices);
|
||||
}
|
||||
};
|
||||
|
||||
} // end namespace Eigen
|
||||
|
@ -30,6 +30,7 @@ template<std::size_t NumIndices, typename... Sym> struct tenso
|
||||
template<bool instantiate, std::size_t NumIndices, typename... Sym> struct tensor_static_symgroup_if;
|
||||
template<typename Tensor_> struct tensor_symmetry_calculate_flags;
|
||||
template<typename Tensor_> struct tensor_symmetry_assign_value;
|
||||
template<typename... Sym> struct tensor_symmetry_num_indices;
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
@ -94,7 +95,7 @@ class DynamicSGroup;
|
||||
* This class is a child class of DynamicSGroup. It uses the template arguments
|
||||
* specified to initialize itself.
|
||||
*/
|
||||
template<std::size_t NumIndices, typename... Gen>
|
||||
template<typename... Gen>
|
||||
class DynamicSGroupFromTemplateArgs;
|
||||
|
||||
/** \class StaticSGroup
|
||||
@ -116,7 +117,7 @@ class DynamicSGroupFromTemplateArgs;
|
||||
* group becomes too large. (In that case, unrolling may not even be
|
||||
* beneficial.)
|
||||
*/
|
||||
template<std::size_t NumIndices, typename... Gen>
|
||||
template<typename... Gen>
|
||||
class StaticSGroup;
|
||||
|
||||
/** \class SGroup
|
||||
@ -131,24 +132,50 @@ class StaticSGroup;
|
||||
* \sa StaticSGroup
|
||||
* \sa DynamicSGroup
|
||||
*/
|
||||
template<std::size_t NumIndices, typename... Gen>
|
||||
class SGroup : public internal::tensor_symmetry_pre_analysis<NumIndices, Gen...>::root_type
|
||||
template<typename... Gen>
|
||||
class SGroup : public internal::tensor_symmetry_pre_analysis<internal::tensor_symmetry_num_indices<Gen...>::value, Gen...>::root_type
|
||||
{
|
||||
public:
|
||||
constexpr static std::size_t NumIndices = internal::tensor_symmetry_num_indices<Gen...>::value;
|
||||
typedef typename internal::tensor_symmetry_pre_analysis<NumIndices, Gen...>::root_type Base;
|
||||
|
||||
// make standard constructors + assignment operators public
|
||||
inline SGroup() : Base() { }
|
||||
inline SGroup(const SGroup<NumIndices, Gen...>& other) : Base(other) { }
|
||||
inline SGroup(SGroup<NumIndices, Gen...>&& other) : Base(other) { }
|
||||
inline SGroup<NumIndices, Gen...>& operator=(const SGroup<NumIndices, Gen...>& other) { Base::operator=(other); return *this; }
|
||||
inline SGroup<NumIndices, Gen...>& operator=(SGroup<NumIndices, Gen...>&& other) { Base::operator=(other); return *this; }
|
||||
inline SGroup(const SGroup<Gen...>& other) : Base(other) { }
|
||||
inline SGroup(SGroup<Gen...>&& other) : Base(other) { }
|
||||
inline SGroup<Gen...>& operator=(const SGroup<Gen...>& other) { Base::operator=(other); return *this; }
|
||||
inline SGroup<Gen...>& operator=(SGroup<Gen...>&& other) { Base::operator=(other); return *this; }
|
||||
|
||||
// all else is defined in the base class
|
||||
};
|
||||
|
||||
namespace internal {
|
||||
|
||||
template<typename... Sym> struct tensor_symmetry_num_indices
|
||||
{
|
||||
constexpr static std::size_t value = 1;
|
||||
};
|
||||
|
||||
template<int One_, int Two_, typename... Sym> struct tensor_symmetry_num_indices<Symmetry<One_, Two_>, Sym...>
|
||||
{
|
||||
private:
|
||||
constexpr static std::size_t One = static_cast<std::size_t>(One_);
|
||||
constexpr static std::size_t Two = static_cast<std::size_t>(Two_);
|
||||
constexpr static std::size_t Three = tensor_symmetry_num_indices<Sym...>::value;
|
||||
|
||||
// don't use std::max, since it's not constexpr until C++14...
|
||||
constexpr static std::size_t maxOneTwoPlusOne = ((One > Two) ? One : Two) + 1;
|
||||
public:
|
||||
constexpr static std::size_t value = (maxOneTwoPlusOne > Three) ? maxOneTwoPlusOne : Three;
|
||||
};
|
||||
|
||||
template<int One_, int Two_, typename... Sym> struct tensor_symmetry_num_indices<AntiSymmetry<One_, Two_>, Sym...>
|
||||
: public tensor_symmetry_num_indices<Symmetry<One_, Two_>, Sym...> {};
|
||||
template<int One_, int Two_, typename... Sym> struct tensor_symmetry_num_indices<Hermiticity<One_, Two_>, Sym...>
|
||||
: public tensor_symmetry_num_indices<Symmetry<One_, Two_>, Sym...> {};
|
||||
template<int One_, int Two_, typename... Sym> struct tensor_symmetry_num_indices<AntiHermiticity<One_, Two_>, Sym...>
|
||||
: public tensor_symmetry_num_indices<Symmetry<One_, Two_>, Sym...> {};
|
||||
|
||||
/** \internal
|
||||
*
|
||||
* \class tensor_symmetry_pre_analysis
|
||||
@ -199,7 +226,7 @@ namespace internal {
|
||||
template<std::size_t NumIndices>
|
||||
struct tensor_symmetry_pre_analysis<NumIndices>
|
||||
{
|
||||
typedef StaticSGroup<NumIndices> root_type;
|
||||
typedef StaticSGroup<> root_type;
|
||||
};
|
||||
|
||||
template<std::size_t NumIndices, typename Gen_, typename... Gens_>
|
||||
@ -212,7 +239,7 @@ struct tensor_symmetry_pre_analysis<NumIndices, Gen_, Gens_...>
|
||||
|
||||
typedef typename conditional<
|
||||
possible_size == 0 || possible_size >= max_static_elements,
|
||||
DynamicSGroupFromTemplateArgs<NumIndices, Gen_, Gens_...>,
|
||||
DynamicSGroupFromTemplateArgs<Gen_, Gens_...>,
|
||||
typename helper::type
|
||||
>::type root_type;
|
||||
};
|
||||
@ -266,7 +293,7 @@ struct tensor_symmetry_calculate_flags
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Tensor_, typename Symmetry_, int Flags>
|
||||
template<typename Tensor_, typename Symmetry_, int Flags = 0>
|
||||
class tensor_symmetry_value_setter
|
||||
{
|
||||
public:
|
||||
|
@ -41,7 +41,7 @@ class PolynomialSolverBase
|
||||
protected:
|
||||
template< typename OtherPolynomial >
|
||||
inline void setPolynomial( const OtherPolynomial& poly ){
|
||||
m_roots.resize(poly.size()); }
|
||||
m_roots.resize(poly.size()-1); }
|
||||
|
||||
public:
|
||||
template< typename OtherPolynomial >
|
||||
@ -316,7 +316,7 @@ class PolynomialSolverBase
|
||||
* - real roots with greatest, smallest absolute real value.
|
||||
* - greatest, smallest real roots.
|
||||
*
|
||||
* WARNING: this polynomial solver is experimental, part of the unsuported Eigen modules.
|
||||
* WARNING: this polynomial solver is experimental, part of the unsupported Eigen modules.
|
||||
*
|
||||
*
|
||||
* Currently a QR algorithm is used to compute the eigenvalues of the companion matrix of
|
||||
@ -345,10 +345,19 @@ class PolynomialSolver : public PolynomialSolverBase<_Scalar,_Deg>
|
||||
void compute( const OtherPolynomial& poly )
|
||||
{
|
||||
eigen_assert( Scalar(0) != poly[poly.size()-1] );
|
||||
internal::companion<Scalar,_Deg> companion( poly );
|
||||
companion.balance();
|
||||
m_eigenSolver.compute( companion.denseMatrix() );
|
||||
m_roots = m_eigenSolver.eigenvalues();
|
||||
eigen_assert( poly.size() > 1 );
|
||||
if(poly.size() > 2 )
|
||||
{
|
||||
internal::companion<Scalar,_Deg> companion( poly );
|
||||
companion.balance();
|
||||
m_eigenSolver.compute( companion.denseMatrix() );
|
||||
m_roots = m_eigenSolver.eigenvalues();
|
||||
}
|
||||
else if(poly.size () == 2)
|
||||
{
|
||||
m_roots.resize(1);
|
||||
m_roots[0] = -poly[0]/poly[1];
|
||||
}
|
||||
}
|
||||
|
||||
public:
|
||||
@ -376,10 +385,18 @@ class PolynomialSolver<_Scalar,1> : public PolynomialSolverBase<_Scalar,1>
|
||||
template< typename OtherPolynomial >
|
||||
void compute( const OtherPolynomial& poly )
|
||||
{
|
||||
eigen_assert( Scalar(0) != poly[poly.size()-1] );
|
||||
m_roots[0] = -poly[0]/poly[poly.size()-1];
|
||||
eigen_assert( poly.size() == 2 );
|
||||
eigen_assert( Scalar(0) != poly[1] );
|
||||
m_roots[0] = -poly[0]/poly[1];
|
||||
}
|
||||
|
||||
public:
|
||||
template< typename OtherPolynomial >
|
||||
inline PolynomialSolver( const OtherPolynomial& poly ){
|
||||
compute( poly ); }
|
||||
|
||||
inline PolynomialSolver(){}
|
||||
|
||||
protected:
|
||||
using PS_Base::m_roots;
|
||||
};
|
||||
|
@ -104,6 +104,7 @@ if(EIGEN_TEST_CXX11)
|
||||
ei_add_test(cxx11_tensor_assign "-std=c++0x")
|
||||
ei_add_test(cxx11_tensor_comparisons "-std=c++0x")
|
||||
ei_add_test(cxx11_tensor_contraction "-std=c++0x")
|
||||
ei_add_test(cxx11_tensor_convolution "-std=c++0x")
|
||||
ei_add_test(cxx11_tensor_expr "-std=c++0x")
|
||||
ei_add_test(cxx11_tensor_map "-std=c++0x")
|
||||
ei_add_test(cxx11_tensor_device "-std=c++0x")
|
||||
|
@ -91,18 +91,36 @@ static void test_gen_numeric_list()
|
||||
VERIFY((is_same<typename gen_numeric_list<int, 5>::type, numeric_list<int, 0, 1, 2, 3, 4>>::value));
|
||||
VERIFY((is_same<typename gen_numeric_list<int, 10>::type, numeric_list<int, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9>>::value));
|
||||
|
||||
VERIFY((is_same<typename gen_numeric_list<int, 0, 42>::type, numeric_list<int>>::value));
|
||||
VERIFY((is_same<typename gen_numeric_list<int, 1, 42>::type, numeric_list<int, 42>>::value));
|
||||
VERIFY((is_same<typename gen_numeric_list<int, 2, 42>::type, numeric_list<int, 42, 43>>::value));
|
||||
VERIFY((is_same<typename gen_numeric_list<int, 5, 42>::type, numeric_list<int, 42, 43, 44, 45, 46>>::value));
|
||||
VERIFY((is_same<typename gen_numeric_list<int, 10, 42>::type, numeric_list<int, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51>>::value));
|
||||
|
||||
VERIFY((is_same<typename gen_numeric_list_reversed<int, 0>::type, numeric_list<int>>::value));
|
||||
VERIFY((is_same<typename gen_numeric_list_reversed<int, 1>::type, numeric_list<int, 0>>::value));
|
||||
VERIFY((is_same<typename gen_numeric_list_reversed<int, 2>::type, numeric_list<int, 1, 0>>::value));
|
||||
VERIFY((is_same<typename gen_numeric_list_reversed<int, 5>::type, numeric_list<int, 4, 3, 2, 1, 0>>::value));
|
||||
VERIFY((is_same<typename gen_numeric_list_reversed<int, 10>::type, numeric_list<int, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0>>::value));
|
||||
|
||||
VERIFY((is_same<typename gen_numeric_list_reversed<int, 0, 42>::type, numeric_list<int>>::value));
|
||||
VERIFY((is_same<typename gen_numeric_list_reversed<int, 1, 42>::type, numeric_list<int, 42>>::value));
|
||||
VERIFY((is_same<typename gen_numeric_list_reversed<int, 2, 42>::type, numeric_list<int, 43, 42>>::value));
|
||||
VERIFY((is_same<typename gen_numeric_list_reversed<int, 5, 42>::type, numeric_list<int, 46, 45, 44, 43, 42>>::value));
|
||||
VERIFY((is_same<typename gen_numeric_list_reversed<int, 10, 42>::type, numeric_list<int, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42>>::value));
|
||||
|
||||
VERIFY((is_same<typename gen_numeric_list_swapped_pair<int, 0, 2, 3>::type, numeric_list<int>>::value));
|
||||
VERIFY((is_same<typename gen_numeric_list_swapped_pair<int, 1, 2, 3>::type, numeric_list<int, 0>>::value));
|
||||
VERIFY((is_same<typename gen_numeric_list_swapped_pair<int, 2, 2, 3>::type, numeric_list<int, 0, 1>>::value));
|
||||
VERIFY((is_same<typename gen_numeric_list_swapped_pair<int, 5, 2, 3>::type, numeric_list<int, 0, 1, 3, 2, 4>>::value));
|
||||
VERIFY((is_same<typename gen_numeric_list_swapped_pair<int, 10, 2, 3>::type, numeric_list<int, 0, 1, 3, 2, 4, 5, 6, 7, 8, 9>>::value));
|
||||
|
||||
VERIFY((is_same<typename gen_numeric_list_swapped_pair<int, 0, 44, 45, 42>::type, numeric_list<int>>::value));
|
||||
VERIFY((is_same<typename gen_numeric_list_swapped_pair<int, 1, 44, 45, 42>::type, numeric_list<int, 42>>::value));
|
||||
VERIFY((is_same<typename gen_numeric_list_swapped_pair<int, 2, 44, 45, 42>::type, numeric_list<int, 42, 43>>::value));
|
||||
VERIFY((is_same<typename gen_numeric_list_swapped_pair<int, 5, 44, 45, 42>::type, numeric_list<int, 42, 43, 45, 44, 46>>::value));
|
||||
VERIFY((is_same<typename gen_numeric_list_swapped_pair<int, 10, 44, 45, 42>::type, numeric_list<int, 42, 43, 45, 44, 46, 47, 48, 49, 50, 51>>::value));
|
||||
|
||||
VERIFY((is_same<typename gen_numeric_list_repeated<int, 0, 0>::type, numeric_list<int>>::value));
|
||||
VERIFY((is_same<typename gen_numeric_list_repeated<int, 1, 0>::type, numeric_list<int, 0>>::value));
|
||||
VERIFY((is_same<typename gen_numeric_list_repeated<int, 2, 0>::type, numeric_list<int, 0, 0>>::value));
|
||||
|
@ -32,8 +32,8 @@ using Eigen::GlobalImagFlag;
|
||||
|
||||
// helper function to determine if the compiler intantiated a static
|
||||
// or dynamic symmetry group
|
||||
template<std::size_t NumIndices, typename... Sym>
|
||||
bool isDynGroup(StaticSGroup<NumIndices, Sym...> const& dummy)
|
||||
template<typename... Sym>
|
||||
bool isDynGroup(StaticSGroup<Sym...> const& dummy)
|
||||
{
|
||||
(void)dummy;
|
||||
return false;
|
||||
@ -86,7 +86,7 @@ static void test_symgroups_static()
|
||||
std::array<int, 7> identity{{0,1,2,3,4,5,6}};
|
||||
|
||||
// Simple static symmetry group
|
||||
StaticSGroup<7,
|
||||
StaticSGroup<
|
||||
AntiSymmetry<0,1>,
|
||||
Hermiticity<0,2>
|
||||
> group;
|
||||
@ -113,7 +113,7 @@ static void test_symgroups_dynamic()
|
||||
identity.push_back(i);
|
||||
|
||||
// Simple dynamic symmetry group
|
||||
DynamicSGroup group(7);
|
||||
DynamicSGroup group;
|
||||
group.add(0,1,NegationFlag);
|
||||
group.add(0,2,ConjugationFlag);
|
||||
|
||||
@ -143,7 +143,7 @@ static void test_symgroups_selection()
|
||||
{
|
||||
// Do the same test as in test_symgroups_static but
|
||||
// require selection via SGroup
|
||||
SGroup<7,
|
||||
SGroup<
|
||||
AntiSymmetry<0,1>,
|
||||
Hermiticity<0,2>
|
||||
> group;
|
||||
@ -168,7 +168,7 @@ static void test_symgroups_selection()
|
||||
// simple factorizing group: 5 generators, 2^5 = 32 elements
|
||||
// selection should make this dynamic, although static group
|
||||
// can still be reasonably generated
|
||||
SGroup<10,
|
||||
SGroup<
|
||||
Symmetry<0,1>,
|
||||
Symmetry<2,3>,
|
||||
Symmetry<4,5>,
|
||||
@ -196,7 +196,7 @@ static void test_symgroups_selection()
|
||||
// no verify that we could also generate a static group
|
||||
// with these generators
|
||||
found.clear();
|
||||
StaticSGroup<10,
|
||||
StaticSGroup<
|
||||
Symmetry<0,1>,
|
||||
Symmetry<2,3>,
|
||||
Symmetry<4,5>,
|
||||
@ -211,7 +211,7 @@ static void test_symgroups_selection()
|
||||
|
||||
{
|
||||
// try to create a HUGE group
|
||||
SGroup<7,
|
||||
SGroup<
|
||||
Symmetry<0,1>,
|
||||
Symmetry<1,2>,
|
||||
Symmetry<2,3>,
|
||||
@ -657,11 +657,11 @@ static void test_symgroups_selection()
|
||||
|
||||
static void test_tensor_epsilon()
|
||||
{
|
||||
SGroup<3, AntiSymmetry<0,1>, AntiSymmetry<1,2>> sym;
|
||||
SGroup<AntiSymmetry<0,1>, AntiSymmetry<1,2>> sym;
|
||||
Tensor<int, 3> epsilon(3,3,3);
|
||||
|
||||
epsilon.setZero();
|
||||
epsilon.symCoeff(sym, 0, 1, 2) = 1;
|
||||
sym(epsilon, 0, 1, 2) = 1;
|
||||
|
||||
for (int i = 0; i < 3; i++) {
|
||||
for (int j = 0; j < 3; j++) {
|
||||
@ -674,7 +674,7 @@ static void test_tensor_epsilon()
|
||||
|
||||
static void test_tensor_sym()
|
||||
{
|
||||
SGroup<4, Symmetry<0,1>, Symmetry<2,3>> sym;
|
||||
SGroup<Symmetry<0,1>, Symmetry<2,3>> sym;
|
||||
Tensor<int, 4> t(10,10,10,10);
|
||||
|
||||
t.setZero();
|
||||
@ -683,7 +683,7 @@ static void test_tensor_sym()
|
||||
for (int k = l; k < 10; k++) {
|
||||
for (int j = 0; j < 10; j++) {
|
||||
for (int i = j; i < 10; i++) {
|
||||
t.symCoeff(sym, i, j, k, l) = (i + j) * (k + l);
|
||||
sym(t, i, j, k, l) = (i + j) * (k + l);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -703,7 +703,7 @@ static void test_tensor_sym()
|
||||
|
||||
static void test_tensor_asym()
|
||||
{
|
||||
SGroup<4, AntiSymmetry<0,1>, AntiSymmetry<2,3>> sym;
|
||||
SGroup<AntiSymmetry<0,1>, AntiSymmetry<2,3>> sym;
|
||||
Tensor<int, 4> t(10,10,10,10);
|
||||
|
||||
t.setZero();
|
||||
@ -712,7 +712,7 @@ static void test_tensor_asym()
|
||||
for (int k = l + 1; k < 10; k++) {
|
||||
for (int j = 0; j < 10; j++) {
|
||||
for (int i = j + 1; i < 10; i++) {
|
||||
t.symCoeff(sym, i, j, k, l) = ((i * j) + (k * l));
|
||||
sym(t, i, j, k, l) = ((i * j) + (k * l));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -740,7 +740,7 @@ static void test_tensor_asym()
|
||||
|
||||
static void test_tensor_dynsym()
|
||||
{
|
||||
DynamicSGroup sym(4);
|
||||
DynamicSGroup sym;
|
||||
sym.addSymmetry(0,1);
|
||||
sym.addSymmetry(2,3);
|
||||
Tensor<int, 4> t(10,10,10,10);
|
||||
@ -751,7 +751,7 @@ static void test_tensor_dynsym()
|
||||
for (int k = l; k < 10; k++) {
|
||||
for (int j = 0; j < 10; j++) {
|
||||
for (int i = j; i < 10; i++) {
|
||||
t.symCoeff(sym, i, j, k, l) = (i + j) * (k + l);
|
||||
sym(t, i, j, k, l) = (i + j) * (k + l);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -770,7 +770,7 @@ static void test_tensor_dynsym()
|
||||
|
||||
static void test_tensor_randacc()
|
||||
{
|
||||
SGroup<4, Symmetry<0,1>, Symmetry<2,3>> sym;
|
||||
SGroup<Symmetry<0,1>, Symmetry<2,3>> sym;
|
||||
Tensor<int, 4> t(10,10,10,10);
|
||||
|
||||
t.setZero();
|
||||
@ -787,7 +787,7 @@ static void test_tensor_randacc()
|
||||
std::swap(i, j);
|
||||
if (k < l)
|
||||
std::swap(k, l);
|
||||
t.symCoeff(sym, i, j, k, l) = (i + j) * (k + l);
|
||||
sym(t, i, j, k, l) = (i + j) * (k + l);
|
||||
}
|
||||
|
||||
for (int l = 0; l < 10; l++) {
|
||||
|
@ -38,6 +38,9 @@ bool aux_evalSolver( const POLYNOMIAL& pols, SOLVER& psolve )
|
||||
|
||||
const Index deg = pols.size()-1;
|
||||
|
||||
// Test template constructor from coefficient vector
|
||||
SOLVER solve_constr (pols);
|
||||
|
||||
psolve.compute( pols );
|
||||
const RootsType& roots( psolve.roots() );
|
||||
EvalRootsType evr( deg );
|
||||
@ -210,5 +213,6 @@ void test_polynomialsolver()
|
||||
CALL_SUBTEST_10((polynomialsolver<double,Dynamic>(
|
||||
internal::random<int>(9,13)
|
||||
)) );
|
||||
CALL_SUBTEST_11((polynomialsolver<float,Dynamic>(1)) );
|
||||
}
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user