Merged eigen/eigen into default

This commit is contained in:
Rasmus Larsen 2016-05-18 15:21:50 -07:00
commit b1e080c752
19 changed files with 176 additions and 44 deletions

View File

@ -217,7 +217,7 @@ template<typename OtherDerived>
EIGEN_STRONG_INLINE Derived &
ArrayBase<Derived>::operator/=(const ArrayBase<OtherDerived>& other)
{
call_assignment(derived(), other.derived(), internal::div_assign_op<Scalar>());
call_assignment(derived(), other.derived(), internal::div_assign_op<Scalar,typename OtherDerived::Scalar>());
return derived();
}

View File

@ -104,23 +104,23 @@ template<typename DstScalar,typename SrcScalar> struct functor_is_product_like<m
* \brief Template functor for scalar/packet assignment with diviving
*
*/
template<typename Scalar> struct div_assign_op {
template<typename DstScalar, typename SrcScalar=DstScalar> struct div_assign_op {
EIGEN_EMPTY_STRUCT_CTOR(div_assign_op)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(Scalar& a, const Scalar& b) const { a /= b; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void assignCoeff(DstScalar& a, const SrcScalar& b) const { a /= b; }
template<int Alignment, typename Packet>
EIGEN_STRONG_INLINE void assignPacket(Scalar* a, const Packet& b) const
{ internal::pstoret<Scalar,Packet,Alignment>(a,internal::pdiv(internal::ploadt<Packet,Alignment>(a),b)); }
EIGEN_STRONG_INLINE void assignPacket(DstScalar* a, const Packet& b) const
{ internal::pstoret<DstScalar,Packet,Alignment>(a,internal::pdiv(internal::ploadt<Packet,Alignment>(a),b)); }
};
template<typename Scalar>
struct functor_traits<div_assign_op<Scalar> > {
template<typename DstScalar, typename SrcScalar>
struct functor_traits<div_assign_op<DstScalar,SrcScalar> > {
enum {
Cost = NumTraits<Scalar>::ReadCost + NumTraits<Scalar>::MulCost,
PacketAccess = packet_traits<Scalar>::HasDiv
Cost = NumTraits<DstScalar>::ReadCost + NumTraits<DstScalar>::MulCost,
PacketAccess = is_same<DstScalar,SrcScalar>::value && packet_traits<DstScalar>::HasDiv
};
};
template<typename DstScalar,typename SrcScalar> struct functor_is_product_like<div_assign_op<DstScalar,SrcScalar> > { enum { ret = 1 }; };
/** \internal
* \brief Template functor for scalar/packet assignment with swapping

View File

@ -199,7 +199,7 @@ const unsigned int HereditaryBits = RowMajorBit
/** \ingroup enums
* Enum containing possible values for the \c Mode or \c UpLo parameter of
* MatrixBase::selfadjointView() and MatrixBase::triangularView(), and selfadjoint solvers. */
enum TriangularOptions {
enum UpLoType {
/** View matrix as a lower triangular matrix. */
Lower=0x1,
/** View matrix as an upper triangular matrix. */
@ -224,7 +224,7 @@ enum TriangularOptions {
/** \ingroup enums
* Enum for indicating whether a buffer is aligned or not. */
enum AlignmentOptions {
enum AlignmentType {
Unaligned=0, /**< Data pointer has no specific alignment. */
Aligned8=8, /**< Data pointer is aligned on a 8 bytes boundary. */
Aligned16=16, /**< Data pointer is aligned on a 16 bytes boundary. */
@ -273,7 +273,7 @@ enum DirectionType {
/** \internal \ingroup enums
* Enum to specify how to traverse the entries of a matrix. */
enum TraversalOptions {
enum TraversalType {
/** \internal Default traversal, no vectorization, no index-based access */
DefaultTraversal,
/** \internal No vectorization, use index-based access to have only one for loop instead of 2 nested loops */
@ -295,7 +295,7 @@ enum TraversalOptions {
/** \internal \ingroup enums
* Enum to specify whether to unroll loops when traversing over the entries of a matrix. */
enum UnrollingOptions {
enum UnrollingType {
/** \internal Do not unroll loops. */
NoUnrolling,
/** \internal Unroll only the inner loop, but not the outer loop. */
@ -307,7 +307,7 @@ enum UnrollingOptions {
/** \internal \ingroup enums
* Enum to specify whether to use the default (built-in) implementation or the specialization. */
enum SpecializedOptions {
enum SpecializedType {
Specialized,
BuiltIn
};
@ -328,7 +328,7 @@ enum StorageOptions {
/** \ingroup enums
* Enum for specifying whether to apply or solve on the left or right. */
enum SolverOptions {
enum SideType {
/** Apply transformation on the left. */
OnTheLeft = 1,
/** Apply transformation on the right. */
@ -353,7 +353,7 @@ enum Default_t { Default };
/** \internal \ingroup enums
* Used in AmbiVector. */
enum AmbiVectorOptions {
enum AmbiVectorMode {
IsDense = 0,
IsSparse
};
@ -480,7 +480,7 @@ namespace Architecture
/** \internal \ingroup enums
* Enum used as template parameter in Product and product evaluators. */
enum ProductType
enum ProductImplType
{ DefaultProduct=0, LazyProduct, AliasFreeProduct, CoeffBasedProductMode, LazyCoeffBasedProductMode, OuterProduct, InnerProduct, GemvProduct, GemmProduct };
/** \internal \ingroup enums

View File

@ -414,7 +414,7 @@ SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
if(n==1)
{
m_eivalues.coeffRef(0,0) = numext::real(matrix(0,0));
m_eivalues.coeffRef(0,0) = numext::real(matrix.coeff(0,0));
if(computeEigenvectors)
m_eivec.setOnes(n,n);
m_info = Success;

View File

@ -143,7 +143,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,C
// If the result is tall and thin (in the extreme case a column vector)
// then it is faster to sort the coefficients inplace instead of transposing twice.
// FIXME, the following heuristic is probably not very good.
if(lhs.rows()>=rhs.cols())
if(lhs.rows()>rhs.cols())
{
ColMajorMatrix resCol(lhs.rows(),rhs.cols());
// perform sorted insertion

View File

@ -131,14 +131,14 @@ struct Assignment<DstXprType, Product<Lhs,Rhs,AliasFreeProduct>, internal::sub_a
};
template<typename Lhs, typename Rhs, int Options>
struct evaluator<SparseView<Product<Lhs, Rhs, Options> > >
struct unary_evaluator<SparseView<Product<Lhs, Rhs, Options> >, IteratorBased>
: public evaluator<typename Product<Lhs, Rhs, DefaultProduct>::PlainObject>
{
typedef SparseView<Product<Lhs, Rhs, Options> > XprType;
typedef typename XprType::PlainObject PlainObject;
typedef evaluator<PlainObject> Base;
explicit evaluator(const XprType& xpr)
explicit unary_evaluator(const XprType& xpr)
: m_result(xpr.rows(), xpr.cols())
{
using std::abs;
@ -147,13 +147,13 @@ struct evaluator<SparseView<Product<Lhs, Rhs, Options> > >
typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
LhsNested lhsNested(xpr.nestedExpression().lhs());
RhsNested rhsNested(xpr.nestedExpression().rhs());
internal::sparse_sparse_product_with_pruning_selector<typename remove_all<LhsNested>::type,
typename remove_all<RhsNested>::type, PlainObject>::run(lhsNested,rhsNested,m_result,
abs(xpr.reference())*xpr.epsilon());
}
protected:
protected:
PlainObject m_result;
};

View File

@ -259,6 +259,8 @@ ei_add_test(dense_storage)
ei_add_test(ctorleak)
ei_add_test(mpl2only)
add_executable(bug1213 bug1213.cpp bug1213_main.cpp)
check_cxx_compiler_flag("-ffast-math" COMPILER_SUPPORT_FASTMATH)
if(COMPILER_SUPPORT_FASTMATH)
set(EIGEN_FASTMATH_FLAGS "-ffast-math")

13
test/bug1213.cpp Normal file
View File

@ -0,0 +1,13 @@
// This anonymous enum is essential to trigger the linking issue
enum {
Foo
};
#include "bug1213.h"
bool bug1213_1(const Eigen::Vector3f& x)
{
return bug1213_2(x);
}

8
test/bug1213.h Normal file
View File

@ -0,0 +1,8 @@
#include <Eigen/Core>
template<typename T, int dim>
bool bug1213_2(const Eigen::Matrix<T,dim,1>& x);
bool bug1213_1(const Eigen::Vector3f& x);

18
test/bug1213_main.cpp Normal file
View File

@ -0,0 +1,18 @@
// This is a regression unit regarding a weird linking issue with gcc.
#include "bug1213.h"
int main()
{
return 0;
}
template<typename T, int dim>
bool bug1213_2(const Eigen::Matrix<T,dim,1>& )
{
return true;
}
template bool bug1213_2<float,3>(const Eigen::Vector3f&);

View File

@ -184,6 +184,18 @@ template<int SizeAtCompileType> void mixingtypes(int size = SizeAtCompileType)
Mat_cd((scd * mcd * md.template cast<CD>().eval()).template triangularView<Upper>()));
VERIFY_IS_APPROX(Mat_cd(rcd.template triangularView<Upper>() = scd * md * mcd),
Mat_cd((scd * md.template cast<CD>().eval() * mcd).template triangularView<Upper>()));
VERIFY_IS_APPROX( md.array() * mcd.array(), md.template cast<CD>().eval().array() * mcd.array() );
VERIFY_IS_APPROX( mcd.array() * md.array(), mcd.array() * md.template cast<CD>().eval().array() );
// VERIFY_IS_APPROX( md.array() / mcd.array(), md.template cast<CD>().eval().array() / mcd.array() );
VERIFY_IS_APPROX( mcd.array() / md.array(), mcd.array() / md.template cast<CD>().eval().array() );
rcd = mcd;
VERIFY_IS_APPROX( rcd.array() *= md.array(), mcd.array() * md.template cast<CD>().eval().array() );
rcd = mcd;
VERIFY_IS_APPROX( rcd.array() /= md.array(), mcd.array() / md.template cast<CD>().eval().array() );
}
void test_mixingtypes()

View File

@ -372,6 +372,12 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re
SparseMatrixType m2(rows, rows);
initSparse<Scalar>(density, refMat2, m2);
VERIFY_IS_APPROX(m2.eval(), refMat2.sparseView().eval());
// sparse view on expressions:
VERIFY_IS_APPROX((s1*m2).eval(), (s1*refMat2).sparseView().eval());
VERIFY_IS_APPROX((m2+m2).eval(), (refMat2+refMat2).sparseView().eval());
VERIFY_IS_APPROX((m2*m2).eval(), (refMat2.lazyProduct(refMat2)).sparseView().eval());
VERIFY_IS_APPROX((m2*m2).eval(), (refMat2*refMat2).sparseView().eval());
}
// test diagonal

View File

@ -7,8 +7,26 @@
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
static long int nb_temporaries;
inline void on_temporary_creation() {
// here's a great place to set a breakpoint when debugging failures in this test!
nb_temporaries++;
}
#define EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN { on_temporary_creation(); }
#include "sparse.h"
#define VERIFY_EVALUATION_COUNT(XPR,N) {\
nb_temporaries = 0; \
CALL_SUBTEST( XPR ); \
if(nb_temporaries!=N) std::cerr << "nb_temporaries == " << nb_temporaries << "\n"; \
VERIFY( (#XPR) && nb_temporaries==N ); \
}
template<typename SparseMatrixType> void sparse_product()
{
typedef typename SparseMatrixType::StorageIndex StorageIndex;
@ -76,6 +94,24 @@ template<typename SparseMatrixType> void sparse_product()
VERIFY_IS_APPROX(m4=(m2t.transpose()*m3t.transpose()).pruned(0), refMat4=refMat2t.transpose()*refMat3t.transpose());
VERIFY_IS_APPROX(m4=(m2*m3t.transpose()).pruned(0), refMat4=refMat2*refMat3t.transpose());
// make sure the right product implementation is called:
if((!SparseMatrixType::IsRowMajor) && m2.rows()<=m3.cols())
{
VERIFY_EVALUATION_COUNT(m4 = m2*m3, 3); // 1 temp for the result + 2 for transposing and get a sorted result.
VERIFY_EVALUATION_COUNT(m4 = (m2*m3).pruned(0), 1);
VERIFY_EVALUATION_COUNT(m4 = (m2*m3).eval().pruned(0), 4);
}
// and that pruning is effective:
{
DenseMatrix Ad(2,2);
Ad << -1, 1, 1, 1;
SparseMatrixType As(Ad.sparseView()), B(2,2);
VERIFY_IS_EQUAL( (As*As.transpose()).eval().nonZeros(), 4);
VERIFY_IS_EQUAL( (Ad*Ad.transpose()).eval().sparseView().eval().nonZeros(), 2);
VERIFY_IS_EQUAL( (As*As.transpose()).pruned(1e-6).eval().nonZeros(), 2);
}
// dense ?= sparse * sparse
VERIFY_IS_APPROX(dm4 =m2*m3, refMat4 =refMat2*refMat3);
VERIFY_IS_APPROX(dm4+=m2*m3, refMat4+=refMat2*refMat3);

View File

@ -87,7 +87,7 @@ struct functor_traits<scalar_sigmoid_op<T> > {
// Standard reduction functors
template <typename T> struct SumReducer
{
static const bool PacketAccess = true;
static const bool PacketAccess = packet_traits<T>::HasAdd;
static const bool IsStateful = false;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const {
@ -121,7 +121,7 @@ template <typename T> struct SumReducer
template <typename T> struct MeanReducer
{
static const bool PacketAccess = !NumTraits<T>::IsInteger;
static const bool PacketAccess = packet_traits<T>::HasAdd && !NumTraits<T>::IsInteger;
static const bool IsStateful = true;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
@ -164,7 +164,7 @@ template <typename T> struct MeanReducer
template <typename T> struct MaxReducer
{
static const bool PacketAccess = true;
static const bool PacketAccess = packet_traits<T>::HasMax;
static const bool IsStateful = false;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const {
@ -197,7 +197,7 @@ template <typename T> struct MaxReducer
template <typename T> struct MinReducer
{
static const bool PacketAccess = true;
static const bool PacketAccess = packet_traits<T>::HasMin;
static const bool IsStateful = false;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const {
@ -231,7 +231,7 @@ template <typename T> struct MinReducer
template <typename T> struct ProdReducer
{
static const bool PacketAccess = true;
static const bool PacketAccess = packet_traits<T>::HasMul;
static const bool IsStateful = false;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void reduce(const T t, T* accum) const {

View File

@ -101,7 +101,7 @@ class AutoDiffScalar
template<typename OtherDerType>
AutoDiffScalar(const AutoDiffScalar<OtherDerType>& other
#ifndef EIGEN_PARSED_BY_DOXYGEN
, typename internal::enable_if<internal::is_same<Scalar,typename OtherDerType::Scalar>::value,void*>::type = 0
, typename internal::enable_if<internal::is_same<Scalar, typename internal::traits<typename internal::remove_all<OtherDerType>::type>::Scalar>::value,void*>::type = 0
#endif
)
: m_value(other.value()), m_derivatives(other.derivatives())
@ -548,13 +548,25 @@ inline const AutoDiffScalar<DerType>& real(const AutoDiffScalar<DerType>& x) {
template<typename DerType>
inline typename DerType::Scalar imag(const AutoDiffScalar<DerType>&) { return 0.; }
template<typename DerType, typename T>
inline AutoDiffScalar<DerType> (min)(const AutoDiffScalar<DerType>& x, const T& y) { return (x <= y ? x : y); }
inline AutoDiffScalar<typename Eigen::internal::remove_all<DerType>::type::PlainObject> (min)(const AutoDiffScalar<DerType>& x, const T& y) {
typedef AutoDiffScalar<typename Eigen::internal::remove_all<DerType>::type::PlainObject> ADS;
return (x <= y ? ADS(x) : ADS(y));
}
template<typename DerType, typename T>
inline AutoDiffScalar<DerType> (max)(const AutoDiffScalar<DerType>& x, const T& y) { return (x >= y ? x : y); }
inline AutoDiffScalar<typename Eigen::internal::remove_all<DerType>::type::PlainObject> (max)(const AutoDiffScalar<DerType>& x, const T& y) {
typedef AutoDiffScalar<typename Eigen::internal::remove_all<DerType>::type::PlainObject> ADS;
return (x >= y ? ADS(x) : ADS(y));
}
template<typename DerType, typename T>
inline AutoDiffScalar<DerType> (min)(const T& x, const AutoDiffScalar<DerType>& y) { return (x < y ? x : y); }
inline AutoDiffScalar<typename Eigen::internal::remove_all<DerType>::type::PlainObject> (min)(const T& x, const AutoDiffScalar<DerType>& y) {
typedef AutoDiffScalar<typename Eigen::internal::remove_all<DerType>::type::PlainObject> ADS;
return (x < y ? ADS(x) : ADS(y));
}
template<typename DerType, typename T>
inline AutoDiffScalar<DerType> (max)(const T& x, const AutoDiffScalar<DerType>& y) { return (x > y ? x : y); }
inline AutoDiffScalar<typename Eigen::internal::remove_all<DerType>::type::PlainObject> (max)(const T& x, const AutoDiffScalar<DerType>& y) {
typedef AutoDiffScalar<typename Eigen::internal::remove_all<DerType>::type::PlainObject> ADS;
return (x > y ? ADS(x) : ADS(y));
}
EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(abs,
using std::abs;
@ -590,7 +602,7 @@ EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(log,
template<typename DerType>
inline const Eigen::AutoDiffScalar<Eigen::CwiseUnaryOp<Eigen::internal::scalar_multiple_op<typename internal::traits<typename internal::remove_all<DerType>::type>::Scalar>, const typename internal::remove_all<DerType>::type> >
pow(const Eigen::AutoDiffScalar<DerType>& x, typename internal::traits<typename internal::remove_all<DerType>::type>::Scalar y)
pow(const Eigen::AutoDiffScalar<DerType>& x, const typename internal::traits<typename internal::remove_all<DerType>::type>::Scalar &y)
{
using namespace Eigen;
typedef typename internal::remove_all<DerType>::type DerTypeCleaned;

View File

@ -404,11 +404,10 @@ struct matrix_function_compute<MatrixType, 0>
typedef internal::traits<MatrixType> Traits;
typedef typename Traits::Scalar Scalar;
static const int Rows = Traits::RowsAtCompileTime, Cols = Traits::ColsAtCompileTime;
static const int Options = MatrixType::Options;
static const int MaxRows = Traits::MaxRowsAtCompileTime, MaxCols = Traits::MaxColsAtCompileTime;
typedef std::complex<Scalar> ComplexScalar;
typedef Matrix<ComplexScalar, Rows, Cols, Options, MaxRows, MaxCols> ComplexMatrix;
typedef Matrix<ComplexScalar, Rows, Cols, 0, MaxRows, MaxCols> ComplexMatrix;
ComplexMatrix CA = A.template cast<ComplexScalar>();
ComplexMatrix Cresult;
@ -509,9 +508,8 @@ template<typename Derived> class MatrixFunctionReturnValue
typedef internal::traits<NestedEvalTypeClean> Traits;
static const int RowsAtCompileTime = Traits::RowsAtCompileTime;
static const int ColsAtCompileTime = Traits::ColsAtCompileTime;
static const int Options = NestedEvalTypeClean::Options;
typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
typedef Matrix<ComplexScalar, Dynamic, Dynamic, Options, RowsAtCompileTime, ColsAtCompileTime> DynMatrixType;
typedef Matrix<ComplexScalar, Dynamic, Dynamic, 0, RowsAtCompileTime, ColsAtCompileTime> DynMatrixType;
typedef internal::MatrixFunctionAtomic<DynMatrixType> AtomicType;
AtomicType atomic(m_f);

View File

@ -334,9 +334,8 @@ public:
typedef internal::traits<DerivedEvalTypeClean> Traits;
static const int RowsAtCompileTime = Traits::RowsAtCompileTime;
static const int ColsAtCompileTime = Traits::ColsAtCompileTime;
static const int Options = DerivedEvalTypeClean::Options;
typedef std::complex<typename NumTraits<Scalar>::Real> ComplexScalar;
typedef Matrix<ComplexScalar, Dynamic, Dynamic, Options, RowsAtCompileTime, ColsAtCompileTime> DynMatrixType;
typedef Matrix<ComplexScalar, Dynamic, Dynamic, 0, RowsAtCompileTime, ColsAtCompileTime> DynMatrixType;
typedef internal::MatrixLogarithmAtomic<DynMatrixType> AtomicType;
AtomicType atomic;

View File

@ -383,7 +383,7 @@ class MatrixPower : internal::noncopyable
private:
typedef std::complex<RealScalar> ComplexScalar;
typedef Matrix<ComplexScalar, Dynamic, Dynamic, MatrixType::Options,
typedef Matrix<ComplexScalar, Dynamic, Dynamic, 0,
MatrixType::RowsAtCompileTime, MatrixType::ColsAtCompileTime> ComplexMatrix;
/** \brief Reference to the base of matrix power. */

View File

@ -207,7 +207,32 @@ void test_autodiff_hessian()
VERIFY_IS_APPROX(y.derivatives()(1).derivatives(), -std::sin(s1*s3+s2*s4)*Vector2d(s3*s4,s4*s4));
}
double bug_1222() {
typedef Eigen::AutoDiffScalar<Eigen::Vector3d> AD;
const double _cv1_3 = 1.0;
const AD chi_3 = 1.0;
// this line did not work, because operator+ returns ADS<DerType&>, which then cannot be converted to ADS<DerType>
const AD denom = chi_3 + _cv1_3;
return denom.value();
}
double bug_1223() {
using std::min;
typedef Eigen::AutoDiffScalar<Eigen::Vector3d> AD;
const double _cv1_3 = 1.0;
const AD chi_3 = 1.0;
const AD denom = 1.0;
// failed because implementation of min attempts to construct ADS<DerType&> via constructor AutoDiffScalar(const Real& value)
// without initializing m_derivatives (which is a reference in this case)
#define EIGEN_TEST_SPACE
const AD t = min EIGEN_TEST_SPACE (denom / chi_3, 1.0);
const AD t2 = min EIGEN_TEST_SPACE (denom / (chi_3 * _cv1_3), 1.0);
return t.value() + t2.value();
}
void test_autodiff()
{
@ -217,5 +242,8 @@ void test_autodiff()
CALL_SUBTEST_3( test_autodiff_jacobian<1>() );
CALL_SUBTEST_4( test_autodiff_hessian<1>() );
}
bug_1222();
bug_1223();
}