Remove Evaluators in 3.2 branch.

This commit is contained in:
Gael Guennebaud 2013-07-17 10:00:36 +02:00
parent 2c288b3949
commit 2872d964f4
7 changed files with 0 additions and 2921 deletions

View File

@ -350,13 +350,6 @@ using std::ptrdiff_t;
#include "src/Core/ArrayBase.h"
#include "src/Core/ArrayWrapper.h"
#ifdef EIGEN_ENABLE_EVALUATORS
#include "src/Core/Product.h"
#include "src/Core/CoreEvaluators.h"
#include "src/Core/AssignEvaluator.h"
#include "src/Core/ProductEvaluators.h"
#endif
#ifdef EIGEN_USE_BLAS
#include "src/Core/products/GeneralMatrixMatrix_MKL.h"
#include "src/Core/products/GeneralMatrixVector_MKL.h"

View File

@ -1,754 +0,0 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2011 Benoit Jacob <jacob.benoit.1@gmail.com>
// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2011-2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_ASSIGN_EVALUATOR_H
#define EIGEN_ASSIGN_EVALUATOR_H
namespace Eigen {
// This implementation is based on Assign.h
namespace internal {
/***************************************************************************
* Part 1 : the logic deciding a strategy for traversal and unrolling *
***************************************************************************/
// copy_using_evaluator_traits is based on assign_traits
template <typename Derived, typename OtherDerived>
struct copy_using_evaluator_traits
{
public:
enum {
DstIsAligned = Derived::Flags & AlignedBit,
DstHasDirectAccess = Derived::Flags & DirectAccessBit,
SrcIsAligned = OtherDerived::Flags & AlignedBit,
JointAlignment = bool(DstIsAligned) && bool(SrcIsAligned) ? Aligned : Unaligned,
SrcEvalBeforeAssign = (evaluator_traits<OtherDerived>::HasEvalTo == 1)
};
private:
enum {
InnerSize = int(Derived::IsVectorAtCompileTime) ? int(Derived::SizeAtCompileTime)
: int(Derived::Flags)&RowMajorBit ? int(Derived::ColsAtCompileTime)
: int(Derived::RowsAtCompileTime),
InnerMaxSize = int(Derived::IsVectorAtCompileTime) ? int(Derived::MaxSizeAtCompileTime)
: int(Derived::Flags)&RowMajorBit ? int(Derived::MaxColsAtCompileTime)
: int(Derived::MaxRowsAtCompileTime),
MaxSizeAtCompileTime = Derived::SizeAtCompileTime,
PacketSize = packet_traits<typename Derived::Scalar>::size
};
enum {
StorageOrdersAgree = (int(Derived::IsRowMajor) == int(OtherDerived::IsRowMajor)),
MightVectorize = StorageOrdersAgree
&& (int(Derived::Flags) & int(OtherDerived::Flags) & ActualPacketAccessBit),
MayInnerVectorize = MightVectorize && int(InnerSize)!=Dynamic && int(InnerSize)%int(PacketSize)==0
&& int(DstIsAligned) && int(SrcIsAligned),
MayLinearize = StorageOrdersAgree && (int(Derived::Flags) & int(OtherDerived::Flags) & LinearAccessBit),
MayLinearVectorize = MightVectorize && MayLinearize && DstHasDirectAccess
&& (DstIsAligned || MaxSizeAtCompileTime == Dynamic),
/* If the destination isn't aligned, we have to do runtime checks and we don't unroll,
so it's only good for large enough sizes. */
MaySliceVectorize = MightVectorize && DstHasDirectAccess
&& (int(InnerMaxSize)==Dynamic || int(InnerMaxSize)>=3*PacketSize)
/* slice vectorization can be slow, so we only want it if the slices are big, which is
indicated by InnerMaxSize rather than InnerSize, think of the case of a dynamic block
in a fixed-size matrix */
};
public:
enum {
Traversal = int(SrcEvalBeforeAssign) ? int(AllAtOnceTraversal)
: int(MayInnerVectorize) ? int(InnerVectorizedTraversal)
: int(MayLinearVectorize) ? int(LinearVectorizedTraversal)
: int(MaySliceVectorize) ? int(SliceVectorizedTraversal)
: int(MayLinearize) ? int(LinearTraversal)
: int(DefaultTraversal),
Vectorized = int(Traversal) == InnerVectorizedTraversal
|| int(Traversal) == LinearVectorizedTraversal
|| int(Traversal) == SliceVectorizedTraversal
};
private:
enum {
UnrollingLimit = EIGEN_UNROLLING_LIMIT * (Vectorized ? int(PacketSize) : 1),
MayUnrollCompletely = int(Derived::SizeAtCompileTime) != Dynamic
&& int(OtherDerived::CoeffReadCost) != Dynamic
&& int(Derived::SizeAtCompileTime) * int(OtherDerived::CoeffReadCost) <= int(UnrollingLimit),
MayUnrollInner = int(InnerSize) != Dynamic
&& int(OtherDerived::CoeffReadCost) != Dynamic
&& int(InnerSize) * int(OtherDerived::CoeffReadCost) <= int(UnrollingLimit)
};
public:
enum {
Unrolling = (int(Traversal) == int(InnerVectorizedTraversal) || int(Traversal) == int(DefaultTraversal))
? (
int(MayUnrollCompletely) ? int(CompleteUnrolling)
: int(MayUnrollInner) ? int(InnerUnrolling)
: int(NoUnrolling)
)
: int(Traversal) == int(LinearVectorizedTraversal)
? ( bool(MayUnrollCompletely) && bool(DstIsAligned) ? int(CompleteUnrolling)
: int(NoUnrolling) )
: int(Traversal) == int(LinearTraversal)
? ( bool(MayUnrollCompletely) ? int(CompleteUnrolling)
: int(NoUnrolling) )
: int(NoUnrolling)
};
#ifdef EIGEN_DEBUG_ASSIGN
static void debug()
{
EIGEN_DEBUG_VAR(DstIsAligned)
EIGEN_DEBUG_VAR(SrcIsAligned)
EIGEN_DEBUG_VAR(JointAlignment)
EIGEN_DEBUG_VAR(InnerSize)
EIGEN_DEBUG_VAR(InnerMaxSize)
EIGEN_DEBUG_VAR(PacketSize)
EIGEN_DEBUG_VAR(StorageOrdersAgree)
EIGEN_DEBUG_VAR(MightVectorize)
EIGEN_DEBUG_VAR(MayLinearize)
EIGEN_DEBUG_VAR(MayInnerVectorize)
EIGEN_DEBUG_VAR(MayLinearVectorize)
EIGEN_DEBUG_VAR(MaySliceVectorize)
EIGEN_DEBUG_VAR(Traversal)
EIGEN_DEBUG_VAR(UnrollingLimit)
EIGEN_DEBUG_VAR(MayUnrollCompletely)
EIGEN_DEBUG_VAR(MayUnrollInner)
EIGEN_DEBUG_VAR(Unrolling)
}
#endif
};
/***************************************************************************
* Part 2 : meta-unrollers
***************************************************************************/
/************************
*** Default traversal ***
************************/
template<typename DstEvaluatorType, typename SrcEvaluatorType, int Index, int Stop>
struct copy_using_evaluator_DefaultTraversal_CompleteUnrolling
{
typedef typename DstEvaluatorType::XprType DstXprType;
enum {
outer = Index / DstXprType::InnerSizeAtCompileTime,
inner = Index % DstXprType::InnerSizeAtCompileTime
};
static EIGEN_STRONG_INLINE void run(DstEvaluatorType &dstEvaluator,
SrcEvaluatorType &srcEvaluator)
{
dstEvaluator.copyCoeffByOuterInner(outer, inner, srcEvaluator);
copy_using_evaluator_DefaultTraversal_CompleteUnrolling
<DstEvaluatorType, SrcEvaluatorType, Index+1, Stop>
::run(dstEvaluator, srcEvaluator);
}
};
template<typename DstEvaluatorType, typename SrcEvaluatorType, int Stop>
struct copy_using_evaluator_DefaultTraversal_CompleteUnrolling<DstEvaluatorType, SrcEvaluatorType, Stop, Stop>
{
static EIGEN_STRONG_INLINE void run(DstEvaluatorType&, SrcEvaluatorType&) { }
};
template<typename DstEvaluatorType, typename SrcEvaluatorType, int Index, int Stop>
struct copy_using_evaluator_DefaultTraversal_InnerUnrolling
{
static EIGEN_STRONG_INLINE void run(DstEvaluatorType &dstEvaluator,
SrcEvaluatorType &srcEvaluator,
int outer)
{
dstEvaluator.copyCoeffByOuterInner(outer, Index, srcEvaluator);
copy_using_evaluator_DefaultTraversal_InnerUnrolling
<DstEvaluatorType, SrcEvaluatorType, Index+1, Stop>
::run(dstEvaluator, srcEvaluator, outer);
}
};
template<typename DstEvaluatorType, typename SrcEvaluatorType, int Stop>
struct copy_using_evaluator_DefaultTraversal_InnerUnrolling<DstEvaluatorType, SrcEvaluatorType, Stop, Stop>
{
static EIGEN_STRONG_INLINE void run(DstEvaluatorType&, SrcEvaluatorType&, int) { }
};
/***********************
*** Linear traversal ***
***********************/
template<typename DstEvaluatorType, typename SrcEvaluatorType, int Index, int Stop>
struct copy_using_evaluator_LinearTraversal_CompleteUnrolling
{
static EIGEN_STRONG_INLINE void run(DstEvaluatorType &dstEvaluator,
SrcEvaluatorType &srcEvaluator)
{
dstEvaluator.copyCoeff(Index, srcEvaluator);
copy_using_evaluator_LinearTraversal_CompleteUnrolling
<DstEvaluatorType, SrcEvaluatorType, Index+1, Stop>
::run(dstEvaluator, srcEvaluator);
}
};
template<typename DstEvaluatorType, typename SrcEvaluatorType, int Stop>
struct copy_using_evaluator_LinearTraversal_CompleteUnrolling<DstEvaluatorType, SrcEvaluatorType, Stop, Stop>
{
static EIGEN_STRONG_INLINE void run(DstEvaluatorType&, SrcEvaluatorType&) { }
};
/**************************
*** Inner vectorization ***
**************************/
template<typename DstEvaluatorType, typename SrcEvaluatorType, int Index, int Stop>
struct copy_using_evaluator_innervec_CompleteUnrolling
{
typedef typename DstEvaluatorType::XprType DstXprType;
typedef typename SrcEvaluatorType::XprType SrcXprType;
enum {
outer = Index / DstXprType::InnerSizeAtCompileTime,
inner = Index % DstXprType::InnerSizeAtCompileTime,
JointAlignment = copy_using_evaluator_traits<DstXprType,SrcXprType>::JointAlignment
};
static EIGEN_STRONG_INLINE void run(DstEvaluatorType &dstEvaluator,
SrcEvaluatorType &srcEvaluator)
{
dstEvaluator.template copyPacketByOuterInner<Aligned, JointAlignment>(outer, inner, srcEvaluator);
enum { NextIndex = Index + packet_traits<typename DstXprType::Scalar>::size };
copy_using_evaluator_innervec_CompleteUnrolling
<DstEvaluatorType, SrcEvaluatorType, NextIndex, Stop>
::run(dstEvaluator, srcEvaluator);
}
};
template<typename DstEvaluatorType, typename SrcEvaluatorType, int Stop>
struct copy_using_evaluator_innervec_CompleteUnrolling<DstEvaluatorType, SrcEvaluatorType, Stop, Stop>
{
static EIGEN_STRONG_INLINE void run(DstEvaluatorType&, SrcEvaluatorType&) { }
};
template<typename DstEvaluatorType, typename SrcEvaluatorType, int Index, int Stop>
struct copy_using_evaluator_innervec_InnerUnrolling
{
static EIGEN_STRONG_INLINE void run(DstEvaluatorType &dstEvaluator,
SrcEvaluatorType &srcEvaluator,
int outer)
{
dstEvaluator.template copyPacketByOuterInner<Aligned, Aligned>(outer, Index, srcEvaluator);
typedef typename DstEvaluatorType::XprType DstXprType;
enum { NextIndex = Index + packet_traits<typename DstXprType::Scalar>::size };
copy_using_evaluator_innervec_InnerUnrolling
<DstEvaluatorType, SrcEvaluatorType, NextIndex, Stop>
::run(dstEvaluator, srcEvaluator, outer);
}
};
template<typename DstEvaluatorType, typename SrcEvaluatorType, int Stop>
struct copy_using_evaluator_innervec_InnerUnrolling<DstEvaluatorType, SrcEvaluatorType, Stop, Stop>
{
static EIGEN_STRONG_INLINE void run(DstEvaluatorType&, SrcEvaluatorType&, int) { }
};
/***************************************************************************
* Part 3 : implementation of all cases
***************************************************************************/
// copy_using_evaluator_impl is based on assign_impl
template<typename DstXprType, typename SrcXprType,
int Traversal = copy_using_evaluator_traits<DstXprType, SrcXprType>::Traversal,
int Unrolling = copy_using_evaluator_traits<DstXprType, SrcXprType>::Unrolling>
struct copy_using_evaluator_impl;
/************************
*** Default traversal ***
************************/
template<typename DstXprType, typename SrcXprType>
struct copy_using_evaluator_impl<DstXprType, SrcXprType, DefaultTraversal, NoUnrolling>
{
static void run(DstXprType& dst, const SrcXprType& src)
{
typedef typename evaluator<DstXprType>::type DstEvaluatorType;
typedef typename evaluator<SrcXprType>::type SrcEvaluatorType;
typedef typename DstXprType::Index Index;
DstEvaluatorType dstEvaluator(dst);
SrcEvaluatorType srcEvaluator(src);
for(Index outer = 0; outer < dst.outerSize(); ++outer) {
for(Index inner = 0; inner < dst.innerSize(); ++inner) {
dstEvaluator.copyCoeffByOuterInner(outer, inner, srcEvaluator);
}
}
}
};
template<typename DstXprType, typename SrcXprType>
struct copy_using_evaluator_impl<DstXprType, SrcXprType, DefaultTraversal, CompleteUnrolling>
{
static EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src)
{
typedef typename evaluator<DstXprType>::type DstEvaluatorType;
typedef typename evaluator<SrcXprType>::type SrcEvaluatorType;
DstEvaluatorType dstEvaluator(dst);
SrcEvaluatorType srcEvaluator(src);
copy_using_evaluator_DefaultTraversal_CompleteUnrolling
<DstEvaluatorType, SrcEvaluatorType, 0, DstXprType::SizeAtCompileTime>
::run(dstEvaluator, srcEvaluator);
}
};
template<typename DstXprType, typename SrcXprType>
struct copy_using_evaluator_impl<DstXprType, SrcXprType, DefaultTraversal, InnerUnrolling>
{
typedef typename DstXprType::Index Index;
static EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src)
{
typedef typename evaluator<DstXprType>::type DstEvaluatorType;
typedef typename evaluator<SrcXprType>::type SrcEvaluatorType;
DstEvaluatorType dstEvaluator(dst);
SrcEvaluatorType srcEvaluator(src);
const Index outerSize = dst.outerSize();
for(Index outer = 0; outer < outerSize; ++outer)
copy_using_evaluator_DefaultTraversal_InnerUnrolling
<DstEvaluatorType, SrcEvaluatorType, 0, DstXprType::InnerSizeAtCompileTime>
::run(dstEvaluator, srcEvaluator, outer);
}
};
/***************************
*** Linear vectorization ***
***************************/
template <bool IsAligned = false>
struct unaligned_copy_using_evaluator_impl
{
// if IsAligned = true, then do nothing
template <typename SrcEvaluatorType, typename DstEvaluatorType>
static EIGEN_STRONG_INLINE void run(const SrcEvaluatorType&, DstEvaluatorType&,
typename SrcEvaluatorType::Index, typename SrcEvaluatorType::Index) {}
};
template <>
struct unaligned_copy_using_evaluator_impl<false>
{
// MSVC must not inline this functions. If it does, it fails to optimize the
// packet access path.
#ifdef _MSC_VER
template <typename DstEvaluatorType, typename SrcEvaluatorType>
static EIGEN_DONT_INLINE void run(DstEvaluatorType &dstEvaluator,
const SrcEvaluatorType &srcEvaluator,
typename DstEvaluatorType::Index start,
typename DstEvaluatorType::Index end)
#else
template <typename DstEvaluatorType, typename SrcEvaluatorType>
static EIGEN_STRONG_INLINE void run(DstEvaluatorType &dstEvaluator,
const SrcEvaluatorType &srcEvaluator,
typename DstEvaluatorType::Index start,
typename DstEvaluatorType::Index end)
#endif
{
for (typename DstEvaluatorType::Index index = start; index < end; ++index)
dstEvaluator.copyCoeff(index, srcEvaluator);
}
};
template<typename DstXprType, typename SrcXprType>
struct copy_using_evaluator_impl<DstXprType, SrcXprType, LinearVectorizedTraversal, NoUnrolling>
{
static EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src)
{
typedef typename evaluator<DstXprType>::type DstEvaluatorType;
typedef typename evaluator<SrcXprType>::type SrcEvaluatorType;
typedef typename DstXprType::Index Index;
DstEvaluatorType dstEvaluator(dst);
SrcEvaluatorType srcEvaluator(src);
const Index size = dst.size();
typedef packet_traits<typename DstXprType::Scalar> PacketTraits;
enum {
packetSize = PacketTraits::size,
dstIsAligned = int(copy_using_evaluator_traits<DstXprType,SrcXprType>::DstIsAligned),
dstAlignment = PacketTraits::AlignedOnScalar ? Aligned : dstIsAligned,
srcAlignment = copy_using_evaluator_traits<DstXprType,SrcXprType>::JointAlignment
};
const Index alignedStart = dstIsAligned ? 0 : internal::first_aligned(&dstEvaluator.coeffRef(0), size);
const Index alignedEnd = alignedStart + ((size-alignedStart)/packetSize)*packetSize;
unaligned_copy_using_evaluator_impl<dstIsAligned!=0>::run(dstEvaluator, srcEvaluator, 0, alignedStart);
for(Index index = alignedStart; index < alignedEnd; index += packetSize)
{
dstEvaluator.template copyPacket<dstAlignment, srcAlignment>(index, srcEvaluator);
}
unaligned_copy_using_evaluator_impl<>::run(dstEvaluator, srcEvaluator, alignedEnd, size);
}
};
template<typename DstXprType, typename SrcXprType>
struct copy_using_evaluator_impl<DstXprType, SrcXprType, LinearVectorizedTraversal, CompleteUnrolling>
{
typedef typename DstXprType::Index Index;
static EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src)
{
typedef typename evaluator<DstXprType>::type DstEvaluatorType;
typedef typename evaluator<SrcXprType>::type SrcEvaluatorType;
DstEvaluatorType dstEvaluator(dst);
SrcEvaluatorType srcEvaluator(src);
enum { size = DstXprType::SizeAtCompileTime,
packetSize = packet_traits<typename DstXprType::Scalar>::size,
alignedSize = (size/packetSize)*packetSize };
copy_using_evaluator_innervec_CompleteUnrolling
<DstEvaluatorType, SrcEvaluatorType, 0, alignedSize>
::run(dstEvaluator, srcEvaluator);
copy_using_evaluator_DefaultTraversal_CompleteUnrolling
<DstEvaluatorType, SrcEvaluatorType, alignedSize, size>
::run(dstEvaluator, srcEvaluator);
}
};
/**************************
*** Inner vectorization ***
**************************/
template<typename DstXprType, typename SrcXprType>
struct copy_using_evaluator_impl<DstXprType, SrcXprType, InnerVectorizedTraversal, NoUnrolling>
{
inline static void run(DstXprType &dst, const SrcXprType &src)
{
typedef typename evaluator<DstXprType>::type DstEvaluatorType;
typedef typename evaluator<SrcXprType>::type SrcEvaluatorType;
typedef typename DstXprType::Index Index;
DstEvaluatorType dstEvaluator(dst);
SrcEvaluatorType srcEvaluator(src);
const Index innerSize = dst.innerSize();
const Index outerSize = dst.outerSize();
const Index packetSize = packet_traits<typename DstXprType::Scalar>::size;
for(Index outer = 0; outer < outerSize; ++outer)
for(Index inner = 0; inner < innerSize; inner+=packetSize) {
dstEvaluator.template copyPacketByOuterInner<Aligned, Aligned>(outer, inner, srcEvaluator);
}
}
};
template<typename DstXprType, typename SrcXprType>
struct copy_using_evaluator_impl<DstXprType, SrcXprType, InnerVectorizedTraversal, CompleteUnrolling>
{
static EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src)
{
typedef typename evaluator<DstXprType>::type DstEvaluatorType;
typedef typename evaluator<SrcXprType>::type SrcEvaluatorType;
DstEvaluatorType dstEvaluator(dst);
SrcEvaluatorType srcEvaluator(src);
copy_using_evaluator_innervec_CompleteUnrolling
<DstEvaluatorType, SrcEvaluatorType, 0, DstXprType::SizeAtCompileTime>
::run(dstEvaluator, srcEvaluator);
}
};
template<typename DstXprType, typename SrcXprType>
struct copy_using_evaluator_impl<DstXprType, SrcXprType, InnerVectorizedTraversal, InnerUnrolling>
{
typedef typename DstXprType::Index Index;
static EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src)
{
typedef typename evaluator<DstXprType>::type DstEvaluatorType;
typedef typename evaluator<SrcXprType>::type SrcEvaluatorType;
DstEvaluatorType dstEvaluator(dst);
SrcEvaluatorType srcEvaluator(src);
const Index outerSize = dst.outerSize();
for(Index outer = 0; outer < outerSize; ++outer)
copy_using_evaluator_innervec_InnerUnrolling
<DstEvaluatorType, SrcEvaluatorType, 0, DstXprType::InnerSizeAtCompileTime>
::run(dstEvaluator, srcEvaluator, outer);
}
};
/***********************
*** Linear traversal ***
***********************/
template<typename DstXprType, typename SrcXprType>
struct copy_using_evaluator_impl<DstXprType, SrcXprType, LinearTraversal, NoUnrolling>
{
inline static void run(DstXprType &dst, const SrcXprType &src)
{
typedef typename evaluator<DstXprType>::type DstEvaluatorType;
typedef typename evaluator<SrcXprType>::type SrcEvaluatorType;
typedef typename DstXprType::Index Index;
DstEvaluatorType dstEvaluator(dst);
SrcEvaluatorType srcEvaluator(src);
const Index size = dst.size();
for(Index i = 0; i < size; ++i)
dstEvaluator.copyCoeff(i, srcEvaluator);
}
};
template<typename DstXprType, typename SrcXprType>
struct copy_using_evaluator_impl<DstXprType, SrcXprType, LinearTraversal, CompleteUnrolling>
{
static EIGEN_STRONG_INLINE void run(DstXprType &dst, const SrcXprType &src)
{
typedef typename evaluator<DstXprType>::type DstEvaluatorType;
typedef typename evaluator<SrcXprType>::type SrcEvaluatorType;
DstEvaluatorType dstEvaluator(dst);
SrcEvaluatorType srcEvaluator(src);
copy_using_evaluator_LinearTraversal_CompleteUnrolling
<DstEvaluatorType, SrcEvaluatorType, 0, DstXprType::SizeAtCompileTime>
::run(dstEvaluator, srcEvaluator);
}
};
/**************************
*** Slice vectorization ***
***************************/
template<typename DstXprType, typename SrcXprType>
struct copy_using_evaluator_impl<DstXprType, SrcXprType, SliceVectorizedTraversal, NoUnrolling>
{
inline static void run(DstXprType &dst, const SrcXprType &src)
{
typedef typename evaluator<DstXprType>::type DstEvaluatorType;
typedef typename evaluator<SrcXprType>::type SrcEvaluatorType;
typedef typename DstXprType::Index Index;
DstEvaluatorType dstEvaluator(dst);
SrcEvaluatorType srcEvaluator(src);
typedef packet_traits<typename DstXprType::Scalar> PacketTraits;
enum {
packetSize = PacketTraits::size,
alignable = PacketTraits::AlignedOnScalar,
dstAlignment = alignable ? Aligned : int(copy_using_evaluator_traits<DstXprType,SrcXprType>::DstIsAligned)
};
const Index packetAlignedMask = packetSize - 1;
const Index innerSize = dst.innerSize();
const Index outerSize = dst.outerSize();
const Index alignedStep = alignable ? (packetSize - dst.outerStride() % packetSize) & packetAlignedMask : 0;
Index alignedStart = ((!alignable) || copy_using_evaluator_traits<DstXprType,SrcXprType>::DstIsAligned) ? 0
: internal::first_aligned(&dstEvaluator.coeffRef(0,0), innerSize);
for(Index outer = 0; outer < outerSize; ++outer)
{
const Index alignedEnd = alignedStart + ((innerSize-alignedStart) & ~packetAlignedMask);
// do the non-vectorizable part of the assignment
for(Index inner = 0; inner<alignedStart ; ++inner) {
dstEvaluator.copyCoeffByOuterInner(outer, inner, srcEvaluator);
}
// do the vectorizable part of the assignment
for(Index inner = alignedStart; inner<alignedEnd; inner+=packetSize) {
dstEvaluator.template copyPacketByOuterInner<dstAlignment, Unaligned>(outer, inner, srcEvaluator);
}
// do the non-vectorizable part of the assignment
for(Index inner = alignedEnd; inner<innerSize ; ++inner) {
dstEvaluator.copyCoeffByOuterInner(outer, inner, srcEvaluator);
}
alignedStart = std::min<Index>((alignedStart+alignedStep)%packetSize, innerSize);
}
}
};
/****************************
*** All-at-once traversal ***
****************************/
template<typename DstXprType, typename SrcXprType>
struct copy_using_evaluator_impl<DstXprType, SrcXprType, AllAtOnceTraversal, NoUnrolling>
{
inline static void run(DstXprType &dst, const SrcXprType &src)
{
typedef typename evaluator<DstXprType>::type DstEvaluatorType;
typedef typename evaluator<SrcXprType>::type SrcEvaluatorType;
DstEvaluatorType dstEvaluator(dst);
SrcEvaluatorType srcEvaluator(src);
// Evaluate rhs in temporary to prevent aliasing problems in a = a * a;
// TODO: Do not pass the xpr object to evalTo()
srcEvaluator.evalTo(dstEvaluator, dst);
}
};
/***************************************************************************
* Part 4 : Entry points
***************************************************************************/
// Based on DenseBase::LazyAssign()
template<typename DstXprType, template <typename> class StorageBase, typename SrcXprType>
EIGEN_STRONG_INLINE
const DstXprType& copy_using_evaluator(const NoAlias<DstXprType, StorageBase>& dst,
const EigenBase<SrcXprType>& src)
{
return noalias_copy_using_evaluator(dst.expression(), src.derived());
}
template<typename XprType, int AssumeAliasing = evaluator_traits<XprType>::AssumeAliasing>
struct AddEvalIfAssumingAliasing;
template<typename XprType>
struct AddEvalIfAssumingAliasing<XprType, 0>
{
static const XprType& run(const XprType& xpr)
{
return xpr;
}
};
template<typename XprType>
struct AddEvalIfAssumingAliasing<XprType, 1>
{
static const EvalToTemp<XprType> run(const XprType& xpr)
{
return EvalToTemp<XprType>(xpr);
}
};
template<typename DstXprType, typename SrcXprType>
EIGEN_STRONG_INLINE
const DstXprType& copy_using_evaluator(const EigenBase<DstXprType>& dst, const EigenBase<SrcXprType>& src)
{
return noalias_copy_using_evaluator(dst.const_cast_derived(),
AddEvalIfAssumingAliasing<SrcXprType>::run(src.derived()));
}
template<typename DstXprType, typename SrcXprType>
EIGEN_STRONG_INLINE
const DstXprType& noalias_copy_using_evaluator(const PlainObjectBase<DstXprType>& dst, const EigenBase<SrcXprType>& src)
{
#ifdef EIGEN_DEBUG_ASSIGN
internal::copy_using_evaluator_traits<DstXprType, SrcXprType>::debug();
#endif
#ifdef EIGEN_NO_AUTOMATIC_RESIZING
eigen_assert((dst.size()==0 || (IsVectorAtCompileTime ? (dst.size() == src.size())
: (dst.rows() == src.rows() && dst.cols() == src.cols())))
&& "Size mismatch. Automatic resizing is disabled because EIGEN_NO_AUTOMATIC_RESIZING is defined");
#else
dst.const_cast_derived().resizeLike(src.derived());
#endif
return copy_using_evaluator_without_resizing(dst.const_cast_derived(), src.derived());
}
template<typename DstXprType, typename SrcXprType>
EIGEN_STRONG_INLINE
const DstXprType& noalias_copy_using_evaluator(const EigenBase<DstXprType>& dst, const EigenBase<SrcXprType>& src)
{
return copy_using_evaluator_without_resizing(dst.const_cast_derived(), src.derived());
}
template<typename DstXprType, typename SrcXprType>
const DstXprType& copy_using_evaluator_without_resizing(const DstXprType& dst, const SrcXprType& src)
{
#ifdef EIGEN_DEBUG_ASSIGN
internal::copy_using_evaluator_traits<DstXprType, SrcXprType>::debug();
#endif
eigen_assert(dst.rows() == src.rows() && dst.cols() == src.cols());
copy_using_evaluator_impl<DstXprType, SrcXprType>::run(const_cast<DstXprType&>(dst), src);
return dst;
}
// Based on DenseBase::swap()
// TODO: Chech whether we need to do something special for swapping two
// Arrays or Matrices.
template<typename DstXprType, typename SrcXprType>
void swap_using_evaluator(const DstXprType& dst, const SrcXprType& src)
{
copy_using_evaluator(SwapWrapper<DstXprType>(const_cast<DstXprType&>(dst)), src);
}
// Based on MatrixBase::operator+= (in CwiseBinaryOp.h)
template<typename DstXprType, typename SrcXprType>
void add_assign_using_evaluator(const MatrixBase<DstXprType>& dst, const MatrixBase<SrcXprType>& src)
{
typedef typename DstXprType::Scalar Scalar;
SelfCwiseBinaryOp<internal::scalar_sum_op<Scalar>, DstXprType, SrcXprType> tmp(dst.const_cast_derived());
copy_using_evaluator(tmp, src.derived());
}
// Based on ArrayBase::operator+=
template<typename DstXprType, typename SrcXprType>
void add_assign_using_evaluator(const ArrayBase<DstXprType>& dst, const ArrayBase<SrcXprType>& src)
{
typedef typename DstXprType::Scalar Scalar;
SelfCwiseBinaryOp<internal::scalar_sum_op<Scalar>, DstXprType, SrcXprType> tmp(dst.const_cast_derived());
copy_using_evaluator(tmp, src.derived());
}
// TODO: Add add_assign_using_evaluator for EigenBase ?
template<typename DstXprType, typename SrcXprType>
void subtract_assign_using_evaluator(const MatrixBase<DstXprType>& dst, const MatrixBase<SrcXprType>& src)
{
typedef typename DstXprType::Scalar Scalar;
SelfCwiseBinaryOp<internal::scalar_difference_op<Scalar>, DstXprType, SrcXprType> tmp(dst.const_cast_derived());
copy_using_evaluator(tmp, src.derived());
}
template<typename DstXprType, typename SrcXprType>
void subtract_assign_using_evaluator(const ArrayBase<DstXprType>& dst, const ArrayBase<SrcXprType>& src)
{
typedef typename DstXprType::Scalar Scalar;
SelfCwiseBinaryOp<internal::scalar_difference_op<Scalar>, DstXprType, SrcXprType> tmp(dst.const_cast_derived());
copy_using_evaluator(tmp, src.derived());
}
template<typename DstXprType, typename SrcXprType>
void multiply_assign_using_evaluator(const ArrayBase<DstXprType>& dst, const ArrayBase<SrcXprType>& src)
{
typedef typename DstXprType::Scalar Scalar;
SelfCwiseBinaryOp<internal::scalar_product_op<Scalar>, DstXprType, SrcXprType> tmp(dst.const_cast_derived());
copy_using_evaluator(tmp, src.derived());
}
template<typename DstXprType, typename SrcXprType>
void divide_assign_using_evaluator(const ArrayBase<DstXprType>& dst, const ArrayBase<SrcXprType>& src)
{
typedef typename DstXprType::Scalar Scalar;
SelfCwiseBinaryOp<internal::scalar_quotient_op<Scalar>, DstXprType, SrcXprType> tmp(dst.const_cast_derived());
copy_using_evaluator(tmp, src.derived());
}
} // namespace internal
} // end namespace Eigen
#endif // EIGEN_ASSIGN_EVALUATOR_H

File diff suppressed because it is too large Load Diff

View File

@ -1,107 +0,0 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_PRODUCT_H
#define EIGEN_PRODUCT_H
namespace Eigen {
template<typename Lhs, typename Rhs> class Product;
template<typename Lhs, typename Rhs, typename StorageKind> class ProductImpl;
/** \class Product
* \ingroup Core_Module
*
* \brief Expression of the product of two arbitrary matrices or vectors
*
* \param Lhs the type of the left-hand side expression
* \param Rhs the type of the right-hand side expression
*
* This class represents an expression of the product of two arbitrary matrices.
*
*/
// Use ProductReturnType to get correct traits, in particular vectorization flags
namespace internal {
template<typename Lhs, typename Rhs>
struct traits<Product<Lhs, Rhs> >
: traits<typename ProductReturnType<Lhs, Rhs>::Type>
{
// We want A+B*C to be of type Product<Matrix, Sum> and not Product<Matrix, Matrix>
// TODO: This flag should eventually go in a separate evaluator traits class
enum {
Flags = traits<typename ProductReturnType<Lhs, Rhs>::Type>::Flags & ~EvalBeforeNestingBit
};
};
} // end namespace internal
template<typename Lhs, typename Rhs>
class Product : public ProductImpl<Lhs,Rhs,typename internal::promote_storage_type<typename internal::traits<Lhs>::StorageKind,
typename internal::traits<Rhs>::StorageKind>::ret>
{
public:
typedef typename ProductImpl<
Lhs, Rhs,
typename internal::promote_storage_type<typename Lhs::StorageKind,
typename Rhs::StorageKind>::ret>::Base Base;
EIGEN_GENERIC_PUBLIC_INTERFACE(Product)
typedef typename Lhs::Nested LhsNested;
typedef typename Rhs::Nested RhsNested;
typedef typename internal::remove_all<LhsNested>::type LhsNestedCleaned;
typedef typename internal::remove_all<RhsNested>::type RhsNestedCleaned;
Product(const Lhs& lhs, const Rhs& rhs) : m_lhs(lhs), m_rhs(rhs)
{
eigen_assert(lhs.cols() == rhs.rows()
&& "invalid matrix product"
&& "if you wanted a coeff-wise or a dot product use the respective explicit functions");
}
inline Index rows() const { return m_lhs.rows(); }
inline Index cols() const { return m_rhs.cols(); }
const LhsNestedCleaned& lhs() const { return m_lhs; }
const RhsNestedCleaned& rhs() const { return m_rhs; }
protected:
LhsNested m_lhs;
RhsNested m_rhs;
};
template<typename Lhs, typename Rhs>
class ProductImpl<Lhs,Rhs,Dense> : public internal::dense_xpr_base<Product<Lhs,Rhs> >::type
{
typedef Product<Lhs, Rhs> Derived;
public:
typedef typename internal::dense_xpr_base<Product<Lhs, Rhs> >::type Base;
EIGEN_DENSE_PUBLIC_INTERFACE(Derived)
};
/***************************************************************************
* Implementation of matrix base methods
***************************************************************************/
/** \internal used to test the evaluator only
*/
template<typename Lhs,typename Rhs>
const Product<Lhs,Rhs>
prod(const Lhs& lhs, const Rhs& rhs)
{
return Product<Lhs,Rhs>(lhs,rhs);
}
} // end namespace Eigen
#endif // EIGEN_PRODUCT_H

View File

@ -1,411 +0,0 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// Copyright (C) 2006-2008 Benoit Jacob <jacob.benoit.1@gmail.com>
// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
// Copyright (C) 2011 Jitse Niesen <jitse@maths.leeds.ac.uk>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_PRODUCTEVALUATORS_H
#define EIGEN_PRODUCTEVALUATORS_H
namespace Eigen {
namespace internal {
// We can evaluate the product either all at once, like GeneralProduct and its evalTo() function, or
// traverse the matrix coefficient by coefficient, like CoeffBasedProduct. Use the existing logic
// in ProductReturnType to decide.
template<typename XprType, typename ProductType>
struct product_evaluator_dispatcher;
template<typename Lhs, typename Rhs>
struct evaluator_impl<Product<Lhs, Rhs> >
: product_evaluator_dispatcher<Product<Lhs, Rhs>, typename ProductReturnType<Lhs, Rhs>::Type>
{
typedef Product<Lhs, Rhs> XprType;
typedef product_evaluator_dispatcher<XprType, typename ProductReturnType<Lhs, Rhs>::Type> Base;
evaluator_impl(const XprType& xpr) : Base(xpr)
{ }
};
template<typename XprType, typename ProductType>
struct product_evaluator_traits_dispatcher;
template<typename Lhs, typename Rhs>
struct evaluator_traits<Product<Lhs, Rhs> >
: product_evaluator_traits_dispatcher<Product<Lhs, Rhs>, typename ProductReturnType<Lhs, Rhs>::Type>
{
static const int AssumeAliasing = 1;
};
// Case 1: Evaluate all at once
//
// We can view the GeneralProduct class as a part of the product evaluator.
// Four sub-cases: InnerProduct, OuterProduct, GemmProduct and GemvProduct.
// InnerProduct is special because GeneralProduct does not have an evalTo() method in this case.
template<typename Lhs, typename Rhs>
struct product_evaluator_traits_dispatcher<Product<Lhs, Rhs>, GeneralProduct<Lhs, Rhs, InnerProduct> >
{
static const int HasEvalTo = 0;
};
template<typename Lhs, typename Rhs>
struct product_evaluator_dispatcher<Product<Lhs, Rhs>, GeneralProduct<Lhs, Rhs, InnerProduct> >
: public evaluator<typename Product<Lhs, Rhs>::PlainObject>::type
{
typedef Product<Lhs, Rhs> XprType;
typedef typename XprType::PlainObject PlainObject;
typedef typename evaluator<PlainObject>::type evaluator_base;
// TODO: Computation is too early (?)
product_evaluator_dispatcher(const XprType& xpr) : evaluator_base(m_result)
{
m_result.coeffRef(0,0) = (xpr.lhs().transpose().cwiseProduct(xpr.rhs())).sum();
}
protected:
PlainObject m_result;
};
// For the other three subcases, simply call the evalTo() method of GeneralProduct
// TODO: GeneralProduct should take evaluators, not expression objects.
template<typename Lhs, typename Rhs, int ProductType>
struct product_evaluator_traits_dispatcher<Product<Lhs, Rhs>, GeneralProduct<Lhs, Rhs, ProductType> >
{
static const int HasEvalTo = 1;
};
template<typename Lhs, typename Rhs, int ProductType>
struct product_evaluator_dispatcher<Product<Lhs, Rhs>, GeneralProduct<Lhs, Rhs, ProductType> >
{
typedef Product<Lhs, Rhs> XprType;
typedef typename XprType::PlainObject PlainObject;
typedef typename evaluator<PlainObject>::type evaluator_base;
product_evaluator_dispatcher(const XprType& xpr) : m_xpr(xpr)
{ }
template<typename DstEvaluatorType, typename DstXprType>
void evalTo(DstEvaluatorType /* not used */, DstXprType& dst)
{
dst.resize(m_xpr.rows(), m_xpr.cols());
GeneralProduct<Lhs, Rhs, ProductType>(m_xpr.lhs(), m_xpr.rhs()).evalTo(dst);
}
protected:
const XprType& m_xpr;
};
// Case 2: Evaluate coeff by coeff
//
// This is mostly taken from CoeffBasedProduct.h
// The main difference is that we add an extra argument to the etor_product_*_impl::run() function
// for the inner dimension of the product, because evaluator object do not know their size.
template<int Traversal, int UnrollingIndex, typename Lhs, typename Rhs, typename RetScalar>
struct etor_product_coeff_impl;
template<int StorageOrder, int UnrollingIndex, typename Lhs, typename Rhs, typename Packet, int LoadMode>
struct etor_product_packet_impl;
template<typename Lhs, typename Rhs, typename LhsNested, typename RhsNested, int Flags>
struct product_evaluator_traits_dispatcher<Product<Lhs, Rhs>, CoeffBasedProduct<LhsNested, RhsNested, Flags> >
{
static const int HasEvalTo = 0;
};
template<typename Lhs, typename Rhs, typename LhsNested, typename RhsNested, int Flags>
struct product_evaluator_dispatcher<Product<Lhs, Rhs>, CoeffBasedProduct<LhsNested, RhsNested, Flags> >
: evaluator_impl_base<Product<Lhs, Rhs> >
{
typedef Product<Lhs, Rhs> XprType;
typedef CoeffBasedProduct<LhsNested, RhsNested, Flags> CoeffBasedProductType;
product_evaluator_dispatcher(const XprType& xpr)
: m_lhsImpl(xpr.lhs()),
m_rhsImpl(xpr.rhs()),
m_innerDim(xpr.lhs().cols())
{ }
typedef typename XprType::Index Index;
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename XprType::PacketScalar PacketScalar;
typedef typename XprType::PacketReturnType PacketReturnType;
// Everything below here is taken from CoeffBasedProduct.h
enum {
RowsAtCompileTime = traits<CoeffBasedProductType>::RowsAtCompileTime,
PacketSize = packet_traits<Scalar>::size,
InnerSize = traits<CoeffBasedProductType>::InnerSize,
CoeffReadCost = traits<CoeffBasedProductType>::CoeffReadCost,
Unroll = CoeffReadCost != Dynamic && CoeffReadCost <= EIGEN_UNROLLING_LIMIT,
CanVectorizeInner = traits<CoeffBasedProductType>::CanVectorizeInner
};
typedef typename evaluator<Lhs>::type LhsEtorType;
typedef typename evaluator<Rhs>::type RhsEtorType;
typedef etor_product_coeff_impl<CanVectorizeInner ? InnerVectorizedTraversal : DefaultTraversal,
Unroll ? InnerSize-1 : Dynamic,
LhsEtorType, RhsEtorType, Scalar> CoeffImpl;
const CoeffReturnType coeff(Index row, Index col) const
{
Scalar res;
CoeffImpl::run(row, col, m_lhsImpl, m_rhsImpl, m_innerDim, res);
return res;
}
/* Allow index-based non-packet access. It is impossible though to allow index-based packed access,
* which is why we don't set the LinearAccessBit.
*/
const CoeffReturnType coeff(Index index) const
{
Scalar res;
const Index row = RowsAtCompileTime == 1 ? 0 : index;
const Index col = RowsAtCompileTime == 1 ? index : 0;
CoeffImpl::run(row, col, m_lhsImpl, m_rhsImpl, m_innerDim, res);
return res;
}
template<int LoadMode>
const PacketReturnType packet(Index row, Index col) const
{
PacketScalar res;
typedef etor_product_packet_impl<Flags&RowMajorBit ? RowMajor : ColMajor,
Unroll ? InnerSize-1 : Dynamic,
LhsEtorType, RhsEtorType, PacketScalar, LoadMode> PacketImpl;
PacketImpl::run(row, col, m_lhsImpl, m_rhsImpl, m_innerDim, res);
return res;
}
protected:
typename evaluator<Lhs>::type m_lhsImpl;
typename evaluator<Rhs>::type m_rhsImpl;
// TODO: Get rid of m_innerDim if known at compile time
Index m_innerDim;
};
/***************************************************************************
* Normal product .coeff() implementation (with meta-unrolling)
***************************************************************************/
/**************************************
*** Scalar path - no vectorization ***
**************************************/
template<int UnrollingIndex, typename Lhs, typename Rhs, typename RetScalar>
struct etor_product_coeff_impl<DefaultTraversal, UnrollingIndex, Lhs, Rhs, RetScalar>
{
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, RetScalar &res)
{
etor_product_coeff_impl<DefaultTraversal, UnrollingIndex-1, Lhs, Rhs, RetScalar>::run(row, col, lhs, rhs, innerDim, res);
res += lhs.coeff(row, UnrollingIndex) * rhs.coeff(UnrollingIndex, col);
}
};
template<typename Lhs, typename Rhs, typename RetScalar>
struct etor_product_coeff_impl<DefaultTraversal, 0, Lhs, Rhs, RetScalar>
{
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, RetScalar &res)
{
res = lhs.coeff(row, 0) * rhs.coeff(0, col);
}
};
template<typename Lhs, typename Rhs, typename RetScalar>
struct etor_product_coeff_impl<DefaultTraversal, Dynamic, Lhs, Rhs, RetScalar>
{
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, RetScalar& res)
{
eigen_assert(innerDim>0 && "you are using a non initialized matrix");
res = lhs.coeff(row, 0) * rhs.coeff(0, col);
for(Index i = 1; i < innerDim; ++i)
res += lhs.coeff(row, i) * rhs.coeff(i, col);
}
};
/*******************************************
*** Scalar path with inner vectorization ***
*******************************************/
template<int UnrollingIndex, typename Lhs, typename Rhs, typename Packet>
struct etor_product_coeff_vectorized_unroller
{
typedef typename Lhs::Index Index;
enum { PacketSize = packet_traits<typename Lhs::Scalar>::size };
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, typename Lhs::PacketScalar &pres)
{
etor_product_coeff_vectorized_unroller<UnrollingIndex-PacketSize, Lhs, Rhs, Packet>::run(row, col, lhs, rhs, innerDim, pres);
pres = padd(pres, pmul( lhs.template packet<Aligned>(row, UnrollingIndex) , rhs.template packet<Aligned>(UnrollingIndex, col) ));
}
};
template<typename Lhs, typename Rhs, typename Packet>
struct etor_product_coeff_vectorized_unroller<0, Lhs, Rhs, Packet>
{
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, typename Lhs::PacketScalar &pres)
{
pres = pmul(lhs.template packet<Aligned>(row, 0) , rhs.template packet<Aligned>(0, col));
}
};
template<int UnrollingIndex, typename Lhs, typename Rhs, typename RetScalar>
struct etor_product_coeff_impl<InnerVectorizedTraversal, UnrollingIndex, Lhs, Rhs, RetScalar>
{
typedef typename Lhs::PacketScalar Packet;
typedef typename Lhs::Index Index;
enum { PacketSize = packet_traits<typename Lhs::Scalar>::size };
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, RetScalar &res)
{
Packet pres;
etor_product_coeff_vectorized_unroller<UnrollingIndex+1-PacketSize, Lhs, Rhs, Packet>::run(row, col, lhs, rhs, innerDim, pres);
etor_product_coeff_impl<DefaultTraversal,UnrollingIndex,Lhs,Rhs,RetScalar>::run(row, col, lhs, rhs, innerDim, res);
res = predux(pres);
}
};
template<typename Lhs, typename Rhs, int LhsRows = Lhs::RowsAtCompileTime, int RhsCols = Rhs::ColsAtCompileTime>
struct etor_product_coeff_vectorized_dyn_selector
{
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, typename Lhs::Scalar &res)
{
res = lhs.row(row).transpose().cwiseProduct(rhs.col(col)).sum();
}
};
// NOTE the 3 following specializations are because taking .col(0) on a vector is a bit slower
// NOTE maybe they are now useless since we have a specialization for Block<Matrix>
template<typename Lhs, typename Rhs, int RhsCols>
struct etor_product_coeff_vectorized_dyn_selector<Lhs,Rhs,1,RhsCols>
{
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index /*row*/, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, typename Lhs::Scalar &res)
{
res = lhs.transpose().cwiseProduct(rhs.col(col)).sum();
}
};
template<typename Lhs, typename Rhs, int LhsRows>
struct etor_product_coeff_vectorized_dyn_selector<Lhs,Rhs,LhsRows,1>
{
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index row, Index /*col*/, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, typename Lhs::Scalar &res)
{
res = lhs.row(row).transpose().cwiseProduct(rhs).sum();
}
};
template<typename Lhs, typename Rhs>
struct etor_product_coeff_vectorized_dyn_selector<Lhs,Rhs,1,1>
{
typedef typename Lhs::Index Index;
EIGEN_STRONG_INLINE void run(Index /*row*/, Index /*col*/, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, typename Lhs::Scalar &res)
{
res = lhs.transpose().cwiseProduct(rhs).sum();
}
};
template<typename Lhs, typename Rhs, typename RetScalar>
struct etor_product_coeff_impl<InnerVectorizedTraversal, Dynamic, Lhs, Rhs, RetScalar>
{
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, typename Lhs::Scalar &res)
{
etor_product_coeff_vectorized_dyn_selector<Lhs,Rhs>::run(row, col, lhs, rhs, innerDim, res);
}
};
/*******************
*** Packet path ***
*******************/
template<int UnrollingIndex, typename Lhs, typename Rhs, typename Packet, int LoadMode>
struct etor_product_packet_impl<RowMajor, UnrollingIndex, Lhs, Rhs, Packet, LoadMode>
{
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet &res)
{
etor_product_packet_impl<RowMajor, UnrollingIndex-1, Lhs, Rhs, Packet, LoadMode>::run(row, col, lhs, rhs, innerDim, res);
res = pmadd(pset1<Packet>(lhs.coeff(row, UnrollingIndex)), rhs.template packet<LoadMode>(UnrollingIndex, col), res);
}
};
template<int UnrollingIndex, typename Lhs, typename Rhs, typename Packet, int LoadMode>
struct etor_product_packet_impl<ColMajor, UnrollingIndex, Lhs, Rhs, Packet, LoadMode>
{
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet &res)
{
etor_product_packet_impl<ColMajor, UnrollingIndex-1, Lhs, Rhs, Packet, LoadMode>::run(row, col, lhs, rhs, innerDim, res);
res = pmadd(lhs.template packet<LoadMode>(row, UnrollingIndex), pset1<Packet>(rhs.coeff(UnrollingIndex, col)), res);
}
};
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
struct etor_product_packet_impl<RowMajor, 0, Lhs, Rhs, Packet, LoadMode>
{
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, Packet &res)
{
res = pmul(pset1<Packet>(lhs.coeff(row, 0)),rhs.template packet<LoadMode>(0, col));
}
};
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
struct etor_product_packet_impl<ColMajor, 0, Lhs, Rhs, Packet, LoadMode>
{
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index /*innerDim*/, Packet &res)
{
res = pmul(lhs.template packet<LoadMode>(row, 0), pset1<Packet>(rhs.coeff(0, col)));
}
};
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
struct etor_product_packet_impl<RowMajor, Dynamic, Lhs, Rhs, Packet, LoadMode>
{
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet& res)
{
eigen_assert(innerDim>0 && "you are using a non initialized matrix");
res = pmul(pset1<Packet>(lhs.coeff(row, 0)),rhs.template packet<LoadMode>(0, col));
for(Index i = 1; i < innerDim; ++i)
res = pmadd(pset1<Packet>(lhs.coeff(row, i)), rhs.template packet<LoadMode>(i, col), res);
}
};
template<typename Lhs, typename Rhs, typename Packet, int LoadMode>
struct etor_product_packet_impl<ColMajor, Dynamic, Lhs, Rhs, Packet, LoadMode>
{
typedef typename Lhs::Index Index;
static EIGEN_STRONG_INLINE void run(Index row, Index col, const Lhs& lhs, const Rhs& rhs, Index innerDim, Packet& res)
{
eigen_assert(innerDim>0 && "you are using a non initialized matrix");
res = pmul(lhs.template packet<LoadMode>(row, 0), pset1<Packet>(rhs.coeff(0, col)));
for(Index i = 1; i < innerDim; ++i)
res = pmadd(lhs.template packet<LoadMode>(row, i), pset1<Packet>(rhs.coeff(i, col)), res);
}
};
} // end namespace internal
} // end namespace Eigen
#endif // EIGEN_PRODUCT_EVALUATORS_H

View File

@ -218,7 +218,6 @@ ei_add_test(nullary)
ei_add_test(nesting_ops "${CMAKE_CXX_FLAGS_DEBUG}")
ei_add_test(zerosized)
ei_add_test(dontalign)
ei_add_test(evaluators)
ei_add_test(sizeoverflow)
ei_add_test(prec_inverse_4x4)
ei_add_test(vectorwiseop)

View File

@ -1,330 +0,0 @@
#define EIGEN_ENABLE_EVALUATORS
#include "main.h"
using internal::copy_using_evaluator;
using namespace std;
#define VERIFY_IS_APPROX_EVALUATOR(DEST,EXPR) VERIFY_IS_APPROX(copy_using_evaluator(DEST,(EXPR)), (EXPR).eval());
#define VERIFY_IS_APPROX_EVALUATOR2(DEST,EXPR,REF) VERIFY_IS_APPROX(copy_using_evaluator(DEST,(EXPR)), (REF).eval());
void test_evaluators()
{
// Testing Matrix evaluator and Transpose
Vector2d v = Vector2d::Random();
const Vector2d v_const(v);
Vector2d v2;
RowVector2d w;
VERIFY_IS_APPROX_EVALUATOR(v2, v);
VERIFY_IS_APPROX_EVALUATOR(v2, v_const);
// Testing Transpose
VERIFY_IS_APPROX_EVALUATOR(w, v.transpose()); // Transpose as rvalue
VERIFY_IS_APPROX_EVALUATOR(w, v_const.transpose());
copy_using_evaluator(w.transpose(), v); // Transpose as lvalue
VERIFY_IS_APPROX(w,v.transpose().eval());
copy_using_evaluator(w.transpose(), v_const);
VERIFY_IS_APPROX(w,v_const.transpose().eval());
// Testing Array evaluator
{
ArrayXXf a(2,3);
ArrayXXf b(3,2);
a << 1,2,3, 4,5,6;
const ArrayXXf a_const(a);
VERIFY_IS_APPROX_EVALUATOR(b, a.transpose());
VERIFY_IS_APPROX_EVALUATOR(b, a_const.transpose());
// Testing CwiseNullaryOp evaluator
copy_using_evaluator(w, RowVector2d::Random());
VERIFY((w.array() >= -1).all() && (w.array() <= 1).all()); // not easy to test ...
VERIFY_IS_APPROX_EVALUATOR(w, RowVector2d::Zero());
VERIFY_IS_APPROX_EVALUATOR(w, RowVector2d::Constant(3));
// mix CwiseNullaryOp and transpose
VERIFY_IS_APPROX_EVALUATOR(w, Vector2d::Zero().transpose());
}
{
// test product expressions
int s = internal::random<int>(1,100);
MatrixXf a(s,s), b(s,s), c(s,s), d(s,s);
a.setRandom();
b.setRandom();
c.setRandom();
d.setRandom();
VERIFY_IS_APPROX_EVALUATOR(d, (a + b));
VERIFY_IS_APPROX_EVALUATOR(d, (a + b).transpose());
VERIFY_IS_APPROX_EVALUATOR2(d, prod(a,b), a*b);
VERIFY_IS_APPROX_EVALUATOR2(d.noalias(), prod(a,b), a*b);
VERIFY_IS_APPROX_EVALUATOR2(d, prod(a,b) + c, a*b + c);
VERIFY_IS_APPROX_EVALUATOR2(d, s * prod(a,b), s * a*b);
VERIFY_IS_APPROX_EVALUATOR2(d, prod(a,b).transpose(), (a*b).transpose());
VERIFY_IS_APPROX_EVALUATOR2(d, prod(a,b) + prod(b,c), a*b + b*c);
// check that prod works even with aliasing present
c = a*a;
copy_using_evaluator(a, prod(a,a));
VERIFY_IS_APPROX(a,c);
}
{
// test product with all possible sizes
int s = internal::random<int>(1,100);
Matrix<float, 1, 1> m11, res11; m11.setRandom(1,1);
Matrix<float, 1, 4> m14, res14; m14.setRandom(1,4);
Matrix<float, 1,Dynamic> m1X, res1X; m1X.setRandom(1,s);
Matrix<float, 4, 1> m41, res41; m41.setRandom(4,1);
Matrix<float, 4, 4> m44, res44; m44.setRandom(4,4);
Matrix<float, 4,Dynamic> m4X, res4X; m4X.setRandom(4,s);
Matrix<float,Dynamic, 1> mX1, resX1; mX1.setRandom(s,1);
Matrix<float,Dynamic, 4> mX4, resX4; mX4.setRandom(s,4);
Matrix<float,Dynamic,Dynamic> mXX, resXX; mXX.setRandom(s,s);
VERIFY_IS_APPROX_EVALUATOR2(res11, prod(m11,m11), m11*m11);
VERIFY_IS_APPROX_EVALUATOR2(res11, prod(m14,m41), m14*m41);
VERIFY_IS_APPROX_EVALUATOR2(res11, prod(m1X,mX1), m1X*mX1);
VERIFY_IS_APPROX_EVALUATOR2(res14, prod(m11,m14), m11*m14);
VERIFY_IS_APPROX_EVALUATOR2(res14, prod(m14,m44), m14*m44);
VERIFY_IS_APPROX_EVALUATOR2(res14, prod(m1X,mX4), m1X*mX4);
VERIFY_IS_APPROX_EVALUATOR2(res1X, prod(m11,m1X), m11*m1X);
VERIFY_IS_APPROX_EVALUATOR2(res1X, prod(m14,m4X), m14*m4X);
VERIFY_IS_APPROX_EVALUATOR2(res1X, prod(m1X,mXX), m1X*mXX);
VERIFY_IS_APPROX_EVALUATOR2(res41, prod(m41,m11), m41*m11);
VERIFY_IS_APPROX_EVALUATOR2(res41, prod(m44,m41), m44*m41);
VERIFY_IS_APPROX_EVALUATOR2(res41, prod(m4X,mX1), m4X*mX1);
VERIFY_IS_APPROX_EVALUATOR2(res44, prod(m41,m14), m41*m14);
VERIFY_IS_APPROX_EVALUATOR2(res44, prod(m44,m44), m44*m44);
VERIFY_IS_APPROX_EVALUATOR2(res44, prod(m4X,mX4), m4X*mX4);
VERIFY_IS_APPROX_EVALUATOR2(res4X, prod(m41,m1X), m41*m1X);
VERIFY_IS_APPROX_EVALUATOR2(res4X, prod(m44,m4X), m44*m4X);
VERIFY_IS_APPROX_EVALUATOR2(res4X, prod(m4X,mXX), m4X*mXX);
VERIFY_IS_APPROX_EVALUATOR2(resX1, prod(mX1,m11), mX1*m11);
VERIFY_IS_APPROX_EVALUATOR2(resX1, prod(mX4,m41), mX4*m41);
VERIFY_IS_APPROX_EVALUATOR2(resX1, prod(mXX,mX1), mXX*mX1);
VERIFY_IS_APPROX_EVALUATOR2(resX4, prod(mX1,m14), mX1*m14);
VERIFY_IS_APPROX_EVALUATOR2(resX4, prod(mX4,m44), mX4*m44);
VERIFY_IS_APPROX_EVALUATOR2(resX4, prod(mXX,mX4), mXX*mX4);
VERIFY_IS_APPROX_EVALUATOR2(resXX, prod(mX1,m1X), mX1*m1X);
VERIFY_IS_APPROX_EVALUATOR2(resXX, prod(mX4,m4X), mX4*m4X);
VERIFY_IS_APPROX_EVALUATOR2(resXX, prod(mXX,mXX), mXX*mXX);
}
{
ArrayXXf a(2,3);
ArrayXXf b(3,2);
a << 1,2,3, 4,5,6;
const ArrayXXf a_const(a);
// this does not work because Random is eval-before-nested:
// copy_using_evaluator(w, Vector2d::Random().transpose());
// test CwiseUnaryOp
VERIFY_IS_APPROX_EVALUATOR(v2, 3 * v);
VERIFY_IS_APPROX_EVALUATOR(w, (3 * v).transpose());
VERIFY_IS_APPROX_EVALUATOR(b, (a + 3).transpose());
VERIFY_IS_APPROX_EVALUATOR(b, (2 * a_const + 3).transpose());
// test CwiseBinaryOp
VERIFY_IS_APPROX_EVALUATOR(v2, v + Vector2d::Ones());
VERIFY_IS_APPROX_EVALUATOR(w, (v + Vector2d::Ones()).transpose().cwiseProduct(RowVector2d::Constant(3)));
// dynamic matrices and arrays
MatrixXd mat1(6,6), mat2(6,6);
VERIFY_IS_APPROX_EVALUATOR(mat1, MatrixXd::Identity(6,6));
VERIFY_IS_APPROX_EVALUATOR(mat2, mat1);
copy_using_evaluator(mat2.transpose(), mat1);
VERIFY_IS_APPROX(mat2.transpose(), mat1);
ArrayXXd arr1(6,6), arr2(6,6);
VERIFY_IS_APPROX_EVALUATOR(arr1, ArrayXXd::Constant(6,6, 3.0));
VERIFY_IS_APPROX_EVALUATOR(arr2, arr1);
// test automatic resizing
mat2.resize(3,3);
VERIFY_IS_APPROX_EVALUATOR(mat2, mat1);
arr2.resize(9,9);
VERIFY_IS_APPROX_EVALUATOR(arr2, arr1);
// test direct traversal
Matrix3f m3;
Array33f a3;
VERIFY_IS_APPROX_EVALUATOR(m3, Matrix3f::Identity()); // matrix, nullary
// TODO: find a way to test direct traversal with array
VERIFY_IS_APPROX_EVALUATOR(m3.transpose(), Matrix3f::Identity().transpose()); // transpose
VERIFY_IS_APPROX_EVALUATOR(m3, 2 * Matrix3f::Identity()); // unary
VERIFY_IS_APPROX_EVALUATOR(m3, Matrix3f::Identity() + Matrix3f::Zero()); // binary
VERIFY_IS_APPROX_EVALUATOR(m3.block(0,0,2,2), Matrix3f::Identity().block(1,1,2,2)); // block
// test linear traversal
VERIFY_IS_APPROX_EVALUATOR(m3, Matrix3f::Zero()); // matrix, nullary
VERIFY_IS_APPROX_EVALUATOR(a3, Array33f::Zero()); // array
VERIFY_IS_APPROX_EVALUATOR(m3.transpose(), Matrix3f::Zero().transpose()); // transpose
VERIFY_IS_APPROX_EVALUATOR(m3, 2 * Matrix3f::Zero()); // unary
VERIFY_IS_APPROX_EVALUATOR(m3, Matrix3f::Zero() + m3); // binary
// test inner vectorization
Matrix4f m4, m4src = Matrix4f::Random();
Array44f a4, a4src = Matrix4f::Random();
VERIFY_IS_APPROX_EVALUATOR(m4, m4src); // matrix
VERIFY_IS_APPROX_EVALUATOR(a4, a4src); // array
VERIFY_IS_APPROX_EVALUATOR(m4.transpose(), m4src.transpose()); // transpose
// TODO: find out why Matrix4f::Zero() does not allow inner vectorization
VERIFY_IS_APPROX_EVALUATOR(m4, 2 * m4src); // unary
VERIFY_IS_APPROX_EVALUATOR(m4, m4src + m4src); // binary
// test linear vectorization
MatrixXf mX(6,6), mXsrc = MatrixXf::Random(6,6);
ArrayXXf aX(6,6), aXsrc = ArrayXXf::Random(6,6);
VERIFY_IS_APPROX_EVALUATOR(mX, mXsrc); // matrix
VERIFY_IS_APPROX_EVALUATOR(aX, aXsrc); // array
VERIFY_IS_APPROX_EVALUATOR(mX.transpose(), mXsrc.transpose()); // transpose
VERIFY_IS_APPROX_EVALUATOR(mX, MatrixXf::Zero(6,6)); // nullary
VERIFY_IS_APPROX_EVALUATOR(mX, 2 * mXsrc); // unary
VERIFY_IS_APPROX_EVALUATOR(mX, mXsrc + mXsrc); // binary
// test blocks and slice vectorization
VERIFY_IS_APPROX_EVALUATOR(m4, (mXsrc.block<4,4>(1,0)));
VERIFY_IS_APPROX_EVALUATOR(aX, ArrayXXf::Constant(10, 10, 3.0).block(2, 3, 6, 6));
Matrix4f m4ref = m4;
copy_using_evaluator(m4.block(1, 1, 2, 3), m3.bottomRows(2));
m4ref.block(1, 1, 2, 3) = m3.bottomRows(2);
VERIFY_IS_APPROX(m4, m4ref);
mX.setIdentity(20,20);
MatrixXf mXref = MatrixXf::Identity(20,20);
mXsrc = MatrixXf::Random(9,12);
copy_using_evaluator(mX.block(4, 4, 9, 12), mXsrc);
mXref.block(4, 4, 9, 12) = mXsrc;
VERIFY_IS_APPROX(mX, mXref);
// test Map
const float raw[3] = {1,2,3};
float buffer[3] = {0,0,0};
Vector3f v3;
Array3f a3f;
VERIFY_IS_APPROX_EVALUATOR(v3, Map<const Vector3f>(raw));
VERIFY_IS_APPROX_EVALUATOR(a3f, Map<const Array3f>(raw));
Vector3f::Map(buffer) = 2*v3;
VERIFY(buffer[0] == 2);
VERIFY(buffer[1] == 4);
VERIFY(buffer[2] == 6);
// test CwiseUnaryView
mat1.setRandom();
mat2.setIdentity();
MatrixXcd matXcd(6,6), matXcd_ref(6,6);
copy_using_evaluator(matXcd.real(), mat1);
copy_using_evaluator(matXcd.imag(), mat2);
matXcd_ref.real() = mat1;
matXcd_ref.imag() = mat2;
VERIFY_IS_APPROX(matXcd, matXcd_ref);
// test Select
VERIFY_IS_APPROX_EVALUATOR(aX, (aXsrc > 0).select(aXsrc, -aXsrc));
// test Replicate
mXsrc = MatrixXf::Random(6, 6);
VectorXf vX = VectorXf::Random(6);
mX.resize(6, 6);
VERIFY_IS_APPROX_EVALUATOR(mX, mXsrc.colwise() + vX);
matXcd.resize(12, 12);
VERIFY_IS_APPROX_EVALUATOR(matXcd, matXcd_ref.replicate(2,2));
VERIFY_IS_APPROX_EVALUATOR(matXcd, (matXcd_ref.replicate<2,2>()));
// test partial reductions
VectorXd vec1(6);
VERIFY_IS_APPROX_EVALUATOR(vec1, mat1.rowwise().sum());
VERIFY_IS_APPROX_EVALUATOR(vec1, mat1.colwise().sum().transpose());
// test MatrixWrapper and ArrayWrapper
mat1.setRandom(6,6);
arr1.setRandom(6,6);
VERIFY_IS_APPROX_EVALUATOR(mat2, arr1.matrix());
VERIFY_IS_APPROX_EVALUATOR(arr2, mat1.array());
VERIFY_IS_APPROX_EVALUATOR(mat2, (arr1 + 2).matrix());
VERIFY_IS_APPROX_EVALUATOR(arr2, mat1.array() + 2);
mat2.array() = arr1 * arr1;
VERIFY_IS_APPROX(mat2, (arr1 * arr1).matrix());
arr2.matrix() = MatrixXd::Identity(6,6);
VERIFY_IS_APPROX(arr2, MatrixXd::Identity(6,6).array());
// test Reverse
VERIFY_IS_APPROX_EVALUATOR(arr2, arr1.reverse());
VERIFY_IS_APPROX_EVALUATOR(arr2, arr1.colwise().reverse());
VERIFY_IS_APPROX_EVALUATOR(arr2, arr1.rowwise().reverse());
arr2.reverse() = arr1;
VERIFY_IS_APPROX(arr2, arr1.reverse());
mat2.array() = mat1.array().reverse();
VERIFY_IS_APPROX(mat2.array(), mat1.array().reverse());
// test Diagonal
VERIFY_IS_APPROX_EVALUATOR(vec1, mat1.diagonal());
vec1.resize(5);
VERIFY_IS_APPROX_EVALUATOR(vec1, mat1.diagonal(1));
VERIFY_IS_APPROX_EVALUATOR(vec1, mat1.diagonal<-1>());
vec1.setRandom();
mat2 = mat1;
copy_using_evaluator(mat1.diagonal(1), vec1);
mat2.diagonal(1) = vec1;
VERIFY_IS_APPROX(mat1, mat2);
copy_using_evaluator(mat1.diagonal<-1>(), mat1.diagonal(1));
mat2.diagonal<-1>() = mat2.diagonal(1);
VERIFY_IS_APPROX(mat1, mat2);
}
{
// test swapping
MatrixXd mat1, mat2, mat1ref, mat2ref;
mat1ref = mat1 = MatrixXd::Random(6, 6);
mat2ref = mat2 = 2 * mat1 + MatrixXd::Identity(6, 6);
swap_using_evaluator(mat1, mat2);
mat1ref.swap(mat2ref);
VERIFY_IS_APPROX(mat1, mat1ref);
VERIFY_IS_APPROX(mat2, mat2ref);
swap_using_evaluator(mat1.block(0, 0, 3, 3), mat2.block(3, 3, 3, 3));
mat1ref.block(0, 0, 3, 3).swap(mat2ref.block(3, 3, 3, 3));
VERIFY_IS_APPROX(mat1, mat1ref);
VERIFY_IS_APPROX(mat2, mat2ref);
swap_using_evaluator(mat1.row(2), mat2.col(3).transpose());
mat1.row(2).swap(mat2.col(3).transpose());
VERIFY_IS_APPROX(mat1, mat1ref);
VERIFY_IS_APPROX(mat2, mat2ref);
}
{
// test compound assignment
const Matrix4d mat_const = Matrix4d::Random();
Matrix4d mat, mat_ref;
mat = mat_ref = Matrix4d::Identity();
add_assign_using_evaluator(mat, mat_const);
mat_ref += mat_const;
VERIFY_IS_APPROX(mat, mat_ref);
subtract_assign_using_evaluator(mat.row(1), 2*mat.row(2));
mat_ref.row(1) -= 2*mat_ref.row(2);
VERIFY_IS_APPROX(mat, mat_ref);
const ArrayXXf arr_const = ArrayXXf::Random(5,3);
ArrayXXf arr, arr_ref;
arr = arr_ref = ArrayXXf::Constant(5, 3, 0.5);
multiply_assign_using_evaluator(arr, arr_const);
arr_ref *= arr_const;
VERIFY_IS_APPROX(arr, arr_ref);
divide_assign_using_evaluator(arr.row(1), arr.row(2) + 1);
arr_ref.row(1) /= (arr_ref.row(2) + 1);
VERIFY_IS_APPROX(arr, arr_ref);
}
}