mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-06-04 18:54:00 +08:00
Rip out make_coherent, add CoherentPadOp.
This commit is contained in:
parent
edaf9e16bc
commit
23f6c26857
@ -33,6 +33,7 @@ namespace Eigen {
|
||||
#include "../../Eigen/src/Core/util/DisableStupidWarnings.h"
|
||||
|
||||
// IWYU pragma: begin_exports
|
||||
#include "src/AutoDiff/CoherentPadOp.h"
|
||||
#include "src/AutoDiff/AutoDiffScalar.h"
|
||||
// #include "src/AutoDiff/AutoDiffVector.h"
|
||||
#include "src/AutoDiff/AutoDiffJacobian.h"
|
||||
|
@ -17,21 +17,52 @@ namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
template <typename A, typename B>
|
||||
struct make_coherent_impl {
|
||||
static void run(A&, B&) {}
|
||||
};
|
||||
|
||||
// resize a to match b is a.size()==0, and conversely.
|
||||
template <typename A, typename B>
|
||||
void make_coherent(const A& a, const B& b) {
|
||||
make_coherent_impl<A, B>::run(a.const_cast_derived(), b.const_cast_derived());
|
||||
}
|
||||
|
||||
template <typename DerivativeType, bool Enable>
|
||||
struct auto_diff_special_op;
|
||||
|
||||
} // end namespace internal
|
||||
template <typename DerivativeType, typename OtherDerivativeType, typename EnableIf = void>
|
||||
struct maybe_coherent_pad_helper {
|
||||
static constexpr int SizeAtCompileTime = DerivativeType::SizeAtCompileTime == Dynamic ||
|
||||
OtherDerivativeType::SizeAtCompileTime == Dynamic
|
||||
? Dynamic
|
||||
: int(DerivativeType::SizeAtCompileTime) >
|
||||
int(OtherDerivativeType::SizeAtCompileTime)
|
||||
? DerivativeType::SizeAtCompileTime
|
||||
: OtherDerivativeType::SizeAtCompileTime;
|
||||
using type = CoherentPadOp<DerivativeType, SizeAtCompileTime>;
|
||||
static type pad(const DerivativeType& x, const OtherDerivativeType& y) {
|
||||
// CoherentPadOp uses variable_if_dynamic<SizeAtCompileTime>. In this case, `SizeAtCompileTime` might
|
||||
// by Dynamic, so we need to take the runtime maximum of x, y.
|
||||
return CoherentPadOp<DerivativeType, SizeAtCompileTime>(x, numext::maxi(x.size(), y.size()));
|
||||
}
|
||||
};
|
||||
|
||||
// Both are fixed-sized and equal, don't need to pad.
|
||||
// Both are fixed-size and this is larger than other, don't need to pad.
|
||||
template <typename DerivativeType, typename OtherDerivativeType>
|
||||
struct maybe_coherent_pad_helper<
|
||||
DerivativeType, OtherDerivativeType,
|
||||
std::enable_if_t<DerivativeType::SizeAtCompileTime >= OtherDerivativeType::SizeAtCompileTime &&
|
||||
DerivativeType::SizeAtCompileTime != Dynamic &&
|
||||
OtherDerivativeType::SizeAtCompileTime != Dynamic>> {
|
||||
using type = const DerivativeType&;
|
||||
static const DerivativeType& pad(const DerivativeType& x, const OtherDerivativeType& /*y*/) { return x; }
|
||||
};
|
||||
|
||||
template <typename DerivativeType, typename OtherDerivativeType>
|
||||
typename maybe_coherent_pad_helper<DerivativeType, OtherDerivativeType>::type MaybeCoherentPad(
|
||||
const DerivativeType& x, const OtherDerivativeType& y) {
|
||||
return maybe_coherent_pad_helper<DerivativeType, OtherDerivativeType>::pad(x, y);
|
||||
}
|
||||
|
||||
template <typename Op, typename LhsDerivativeType, typename RhsDerivativeType>
|
||||
auto MakeCoherentCwiseBinaryOp(const LhsDerivativeType& x, const RhsDerivativeType& y, Op op = Op()) {
|
||||
const auto& lhs = MaybeCoherentPad(x, y);
|
||||
const auto& rhs = MaybeCoherentPad(y, x);
|
||||
return CwiseBinaryOp<Op, remove_all_t<decltype(lhs)>, remove_all_t<decltype(rhs)>>(lhs, rhs, op);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
|
||||
template <typename DerivativeType>
|
||||
class AutoDiffScalar;
|
||||
@ -214,13 +245,10 @@ class AutoDiffScalar
|
||||
}
|
||||
|
||||
template <typename OtherDerType>
|
||||
inline AutoDiffScalar<
|
||||
CwiseBinaryOp<internal::scalar_sum_op<Scalar>, const DerType, const internal::remove_all_t<OtherDerType>>>
|
||||
operator+(const AutoDiffScalar<OtherDerType>& other) const {
|
||||
internal::make_coherent(m_derivatives, other.derivatives());
|
||||
return AutoDiffScalar<
|
||||
CwiseBinaryOp<internal::scalar_sum_op<Scalar>, const DerType, const internal::remove_all_t<OtherDerType>>>(
|
||||
m_value + other.value(), m_derivatives + other.derivatives());
|
||||
inline auto operator+(const AutoDiffScalar<OtherDerType>& other) const {
|
||||
return MakeAutoDiffScalar(
|
||||
m_value + other.value(),
|
||||
internal::MakeCoherentCwiseBinaryOp<internal::scalar_sum_op<Scalar>>(m_derivatives, other.derivatives()));
|
||||
}
|
||||
|
||||
template <typename OtherDerType>
|
||||
@ -245,13 +273,10 @@ class AutoDiffScalar
|
||||
}
|
||||
|
||||
template <typename OtherDerType>
|
||||
inline AutoDiffScalar<
|
||||
CwiseBinaryOp<internal::scalar_difference_op<Scalar>, const DerType, const internal::remove_all_t<OtherDerType>>>
|
||||
operator-(const AutoDiffScalar<OtherDerType>& other) const {
|
||||
internal::make_coherent(m_derivatives, other.derivatives());
|
||||
return AutoDiffScalar<CwiseBinaryOp<internal::scalar_difference_op<Scalar>, const DerType,
|
||||
const internal::remove_all_t<OtherDerType>>>(
|
||||
m_value - other.value(), m_derivatives - other.derivatives());
|
||||
inline auto operator-(const AutoDiffScalar<OtherDerType>& other) const {
|
||||
return MakeAutoDiffScalar(m_value - other.value(),
|
||||
internal::MakeCoherentCwiseBinaryOp<internal::scalar_difference_op<Scalar>>(
|
||||
m_derivatives, other.derivatives()));
|
||||
}
|
||||
|
||||
template <typename OtherDerType>
|
||||
@ -264,13 +289,11 @@ class AutoDiffScalar
|
||||
return AutoDiffScalar<CwiseUnaryOp<internal::scalar_opposite_op<Scalar>, const DerType>>(-m_value, -m_derivatives);
|
||||
}
|
||||
|
||||
inline AutoDiffScalar<EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(DerType, Scalar, product)> operator*(
|
||||
const Scalar& other) const {
|
||||
inline auto operator*(const Scalar& other) const {
|
||||
return MakeAutoDiffScalar(m_value * other, m_derivatives * other);
|
||||
}
|
||||
|
||||
friend inline AutoDiffScalar<EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(DerType, Scalar, product)> operator*(
|
||||
const Scalar& other, const AutoDiffScalar& a) {
|
||||
friend inline auto operator*(const Scalar& other, const AutoDiffScalar& a) {
|
||||
return MakeAutoDiffScalar(a.value() * other, a.derivatives() * other);
|
||||
}
|
||||
|
||||
@ -290,13 +313,11 @@ class AutoDiffScalar
|
||||
// a.derivatives() * other);
|
||||
// }
|
||||
|
||||
inline AutoDiffScalar<EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(DerType, Scalar, product)> operator/(
|
||||
const Scalar& other) const {
|
||||
inline auto operator/(const Scalar& other) const {
|
||||
return MakeAutoDiffScalar(m_value / other, (m_derivatives * (Scalar(1) / other)));
|
||||
}
|
||||
|
||||
friend inline AutoDiffScalar<EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(DerType, Scalar, product)> operator/(
|
||||
const Scalar& other, const AutoDiffScalar& a) {
|
||||
friend inline auto operator/(const Scalar& other, const AutoDiffScalar& a) {
|
||||
return MakeAutoDiffScalar(other / a.value(), a.derivatives() * (Scalar(-other) / (a.value() * a.value())));
|
||||
}
|
||||
|
||||
@ -317,26 +338,18 @@ class AutoDiffScalar
|
||||
// }
|
||||
|
||||
template <typename OtherDerType>
|
||||
inline AutoDiffScalar<EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(
|
||||
CwiseBinaryOp<internal::scalar_difference_op<Scalar> EIGEN_COMMA const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(
|
||||
DerType, Scalar, product) EIGEN_COMMA const
|
||||
EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(internal::remove_all_t<OtherDerType>, Scalar, product)>,
|
||||
Scalar, product)>
|
||||
operator/(const AutoDiffScalar<OtherDerType>& other) const {
|
||||
internal::make_coherent(m_derivatives, other.derivatives());
|
||||
inline auto operator/(const AutoDiffScalar<OtherDerType>& other) const {
|
||||
return MakeAutoDiffScalar(m_value / other.value(),
|
||||
((m_derivatives * other.value()) - (other.derivatives() * m_value)) *
|
||||
internal::MakeCoherentCwiseBinaryOp<internal::scalar_difference_op<Scalar>>(
|
||||
m_derivatives * other.value(), (other.derivatives() * m_value)) *
|
||||
(Scalar(1) / (other.value() * other.value())));
|
||||
}
|
||||
|
||||
template <typename OtherDerType>
|
||||
inline AutoDiffScalar<CwiseBinaryOp<
|
||||
internal::scalar_sum_op<Scalar>, const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(DerType, Scalar, product),
|
||||
const EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(internal::remove_all_t<OtherDerType>, Scalar, product)>>
|
||||
operator*(const AutoDiffScalar<OtherDerType>& other) const {
|
||||
internal::make_coherent(m_derivatives, other.derivatives());
|
||||
inline auto operator*(const AutoDiffScalar<OtherDerType>& other) const {
|
||||
return MakeAutoDiffScalar(m_value * other.value(),
|
||||
(m_derivatives * other.value()) + (other.derivatives() * m_value));
|
||||
internal::MakeCoherentCwiseBinaryOp<internal::scalar_sum_op<Scalar>>(
|
||||
m_derivatives * other.value(), other.derivatives() * m_value));
|
||||
}
|
||||
|
||||
inline AutoDiffScalar& operator*=(const Scalar& other) {
|
||||
@ -430,64 +443,6 @@ struct auto_diff_special_op<DerivativeType, false> {
|
||||
void operator+() const;
|
||||
};
|
||||
|
||||
template <typename BinOp, typename A, typename B, typename RefType>
|
||||
void make_coherent_expression(CwiseBinaryOp<BinOp, A, B> xpr, const RefType& ref) {
|
||||
make_coherent(xpr.const_cast_derived().lhs(), ref);
|
||||
make_coherent(xpr.const_cast_derived().rhs(), ref);
|
||||
}
|
||||
|
||||
template <typename UnaryOp, typename A, typename RefType>
|
||||
void make_coherent_expression(const CwiseUnaryOp<UnaryOp, A>& xpr, const RefType& ref) {
|
||||
make_coherent(xpr.nestedExpression().const_cast_derived(), ref);
|
||||
}
|
||||
|
||||
// needed for compilation only
|
||||
template <typename UnaryOp, typename A, typename RefType>
|
||||
void make_coherent_expression(const CwiseNullaryOp<UnaryOp, A>&, const RefType&) {}
|
||||
|
||||
template <typename A_Scalar, int A_Rows, int A_Cols, int A_Options, int A_MaxRows, int A_MaxCols, typename B>
|
||||
struct make_coherent_impl<Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows, A_MaxCols>, B> {
|
||||
typedef Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows, A_MaxCols> A;
|
||||
static void run(A& a, B& b) {
|
||||
if ((A_Rows == Dynamic || A_Cols == Dynamic) && (a.size() == 0)) {
|
||||
a.resize(b.size());
|
||||
a.setZero();
|
||||
} else if (B::SizeAtCompileTime == Dynamic && a.size() != 0 && b.size() == 0) {
|
||||
make_coherent_expression(b, a);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <typename A, typename B_Scalar, int B_Rows, int B_Cols, int B_Options, int B_MaxRows, int B_MaxCols>
|
||||
struct make_coherent_impl<A, Matrix<B_Scalar, B_Rows, B_Cols, B_Options, B_MaxRows, B_MaxCols>> {
|
||||
typedef Matrix<B_Scalar, B_Rows, B_Cols, B_Options, B_MaxRows, B_MaxCols> B;
|
||||
static void run(A& a, B& b) {
|
||||
if ((B_Rows == Dynamic || B_Cols == Dynamic) && (b.size() == 0)) {
|
||||
b.resize(a.size());
|
||||
b.setZero();
|
||||
} else if (A::SizeAtCompileTime == Dynamic && b.size() != 0 && a.size() == 0) {
|
||||
make_coherent_expression(a, b);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
template <typename A_Scalar, int A_Rows, int A_Cols, int A_Options, int A_MaxRows, int A_MaxCols, typename B_Scalar,
|
||||
int B_Rows, int B_Cols, int B_Options, int B_MaxRows, int B_MaxCols>
|
||||
struct make_coherent_impl<Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows, A_MaxCols>,
|
||||
Matrix<B_Scalar, B_Rows, B_Cols, B_Options, B_MaxRows, B_MaxCols>> {
|
||||
typedef Matrix<A_Scalar, A_Rows, A_Cols, A_Options, A_MaxRows, A_MaxCols> A;
|
||||
typedef Matrix<B_Scalar, B_Rows, B_Cols, B_Options, B_MaxRows, B_MaxCols> B;
|
||||
static void run(A& a, B& b) {
|
||||
if ((A_Rows == Dynamic || A_Cols == Dynamic) && (a.size() == 0)) {
|
||||
a.resize(b.size());
|
||||
a.setZero();
|
||||
} else if ((B_Rows == Dynamic || B_Cols == Dynamic) && (b.size() == 0)) {
|
||||
b.resize(a.size());
|
||||
b.setZero();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
} // end namespace internal
|
||||
|
||||
template <typename DerType, typename BinOp>
|
||||
@ -518,10 +473,7 @@ struct ScalarBinaryOpTraits<typename DerType::Scalar, AutoDiffScalar<DerType>, B
|
||||
|
||||
#define EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(FUNC, CODE) \
|
||||
template <typename DerType> \
|
||||
inline Eigen::AutoDiffScalar<EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE( \
|
||||
Eigen::internal::remove_all_t<DerType>, \
|
||||
typename Eigen::internal::traits<Eigen::internal::remove_all_t<DerType>>::Scalar, product)> \
|
||||
FUNC(const Eigen::AutoDiffScalar<DerType>& x) { \
|
||||
inline auto FUNC(const Eigen::AutoDiffScalar<DerType>& x) { \
|
||||
using namespace Eigen; \
|
||||
typedef typename Eigen::internal::traits<Eigen::internal::remove_all_t<DerType>>::Scalar Scalar; \
|
||||
EIGEN_UNUSED_VARIABLE(sizeof(Scalar)); \
|
||||
@ -602,10 +554,8 @@ EIGEN_AUTODIFF_DECLARE_GLOBAL_UNARY(log, using std::log;
|
||||
x.derivatives() * (Scalar(1) / x.value()));)
|
||||
|
||||
template <typename DerType>
|
||||
inline Eigen::AutoDiffScalar<EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(
|
||||
internal::remove_all_t<DerType>, typename internal::traits<internal::remove_all_t<DerType>>::Scalar, product)>
|
||||
pow(const Eigen::AutoDiffScalar<DerType>& x,
|
||||
const typename internal::traits<internal::remove_all_t<DerType>>::Scalar& y) {
|
||||
inline auto pow(const Eigen::AutoDiffScalar<DerType>& x,
|
||||
const typename internal::traits<internal::remove_all_t<DerType>>::Scalar& y) {
|
||||
using namespace Eigen;
|
||||
using std::pow;
|
||||
return Eigen::MakeAutoDiffScalar(pow(x.value(), y), x.derivatives() * (y * pow(x.value(), y - 1)));
|
||||
|
152
unsupported/Eigen/src/AutoDiff/CoherentPadOp.h
Normal file
152
unsupported/Eigen/src/AutoDiff/CoherentPadOp.h
Normal file
@ -0,0 +1,152 @@
|
||||
// This file is part of Eigen, a lightweight C++ template library
|
||||
// for linear algebra.
|
||||
//
|
||||
// Copyright (C) 2020 The Eigen Team.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla
|
||||
// Public License v. 2.0. If a copy of the MPL was not distributed
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
#ifndef EIGEN_COHERENT_PAD_OP_H
|
||||
#define EIGEN_COHERENT_PAD_OP_H
|
||||
|
||||
#include "./InternalHeaderCheck.h"
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
namespace internal {
|
||||
|
||||
// Pads a vector with zeros to a given size.
|
||||
template <typename XprType, int SizeAtCompileTime_>
|
||||
struct CoherentPadOp;
|
||||
|
||||
template <typename XprType, int SizeAtCompileTime_>
|
||||
struct traits<CoherentPadOp<XprType, SizeAtCompileTime_>> : public traits<XprType> {
|
||||
typedef typename internal::remove_all<XprType>::type PlainXprType;
|
||||
typedef typename internal::ref_selector<XprType>::type XprNested;
|
||||
typedef typename std::remove_reference_t<XprNested> XprNested_;
|
||||
enum : int {
|
||||
IsRowMajor = traits<PlainXprType>::Flags & RowMajorBit,
|
||||
SizeAtCompileTime = SizeAtCompileTime_,
|
||||
RowsAtCompileTime = IsRowMajor ? 1 : SizeAtCompileTime,
|
||||
ColsAtCompileTime = IsRowMajor ? SizeAtCompileTime : 1,
|
||||
MaxRowsAtCompileTime = RowsAtCompileTime,
|
||||
MaxColsAtCompileTime = ColsAtCompileTime,
|
||||
Flags = traits<XprType>::Flags & ~NestByRefBit,
|
||||
};
|
||||
};
|
||||
|
||||
// Pads a vector with zeros to a given size.
|
||||
template <typename XprType, int SizeAtCompileTime_>
|
||||
struct CoherentPadOp : public dense_xpr_base<CoherentPadOp<XprType, SizeAtCompileTime_>>::type {
|
||||
typedef typename internal::generic_xpr_base<CoherentPadOp<XprType, SizeAtCompileTime_>>::type Base;
|
||||
EIGEN_GENERIC_PUBLIC_INTERFACE(CoherentPadOp)
|
||||
|
||||
using XprNested = typename traits<CoherentPadOp>::XprNested;
|
||||
using XprNested_ = typename traits<CoherentPadOp>::XprNested_;
|
||||
using NestedExpression = XprNested_;
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoherentPadOp() = delete;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoherentPadOp(const CoherentPadOp&) = default;
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoherentPadOp(CoherentPadOp&& other) = default;
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoherentPadOp(const XprType& xpr, Index size) : xpr_(xpr), size_(size) {
|
||||
static_assert(XprNested_::IsVectorAtCompileTime && "input type must be a vector");
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const XprNested_& nestedExpression() const { return xpr_; }
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index size() const { return size_.value(); }
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rows() const {
|
||||
return traits<CoherentPadOp>::IsRowMajor ? Index(1) : size();
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index cols() const {
|
||||
return traits<CoherentPadOp>::IsRowMajor ? size() : Index(1);
|
||||
}
|
||||
|
||||
private:
|
||||
XprNested xpr_;
|
||||
const internal::variable_if_dynamic<Index, SizeAtCompileTime> size_;
|
||||
};
|
||||
|
||||
// Adapted from the Replicate evaluator.
|
||||
template <typename ArgType, int SizeAtCompileTime>
|
||||
struct unary_evaluator<CoherentPadOp<ArgType, SizeAtCompileTime>>
|
||||
: evaluator_base<CoherentPadOp<ArgType, SizeAtCompileTime>> {
|
||||
typedef CoherentPadOp<ArgType, SizeAtCompileTime> XprType;
|
||||
typedef typename internal::remove_all_t<typename XprType::CoeffReturnType> CoeffReturnType;
|
||||
typedef typename internal::nested_eval<ArgType, 1>::type ArgTypeNested;
|
||||
typedef internal::remove_all_t<ArgTypeNested> ArgTypeNestedCleaned;
|
||||
|
||||
enum {
|
||||
CoeffReadCost = evaluator<ArgTypeNestedCleaned>::CoeffReadCost,
|
||||
LinearAccessMask = XprType::IsVectorAtCompileTime ? LinearAccessBit : 0,
|
||||
Flags = evaluator<ArgTypeNestedCleaned>::Flags & (HereditaryBits | LinearAccessMask | RowMajorBit),
|
||||
Alignment = evaluator<ArgTypeNestedCleaned>::Alignment
|
||||
};
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE explicit unary_evaluator(const XprType& pad)
|
||||
: m_arg(pad.nestedExpression()), m_argImpl(m_arg), m_size(pad.nestedExpression().size()) {}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index row, Index col) const {
|
||||
EIGEN_IF_CONSTEXPR(XprType::IsRowMajor) {
|
||||
if (col < m_size.value()) {
|
||||
return m_argImpl.coeff(1, col);
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (row < m_size.value()) {
|
||||
return m_argImpl.coeff(row, 1);
|
||||
}
|
||||
}
|
||||
return CoeffReturnType(0);
|
||||
}
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const {
|
||||
if (index < m_size.value()) {
|
||||
return m_argImpl.coeff(index);
|
||||
}
|
||||
return CoeffReturnType(0);
|
||||
}
|
||||
|
||||
template <int LoadMode, typename PacketType>
|
||||
EIGEN_STRONG_INLINE PacketType packet(Index row, Index col) const {
|
||||
// AutoDiff scalar's derivative must be a vector, which is enforced by static assert.
|
||||
// Defer to linear access for simplicity.
|
||||
EIGEN_IF_CONSTEXPR(XprType::IsRowMajor) { return packet(col); }
|
||||
return packet(row);
|
||||
}
|
||||
|
||||
template <int LoadMode, typename PacketType>
|
||||
EIGEN_STRONG_INLINE PacketType packet(Index index) const {
|
||||
constexpr int kPacketSize = unpacket_traits<PacketType>::size;
|
||||
if (index + kPacketSize <= m_size.value()) {
|
||||
return m_argImpl.template packet<LoadMode, PacketType>(index);
|
||||
} else if (index < m_size.value()) {
|
||||
// Partial packet.
|
||||
EIGEN_ALIGN_MAX std::remove_const_t<CoeffReturnType> values[kPacketSize];
|
||||
const int partial = m_size.value() - index;
|
||||
for (int i = 0; i < partial && i < kPacketSize; ++i) {
|
||||
values[i] = m_argImpl.coeff(index + i);
|
||||
}
|
||||
for (int i = partial; i < kPacketSize; ++i) {
|
||||
values[i] = CoeffReturnType(0);
|
||||
}
|
||||
return pload<PacketType>(values);
|
||||
}
|
||||
return pset1<PacketType>(CoeffReturnType(0));
|
||||
}
|
||||
|
||||
protected:
|
||||
const ArgTypeNested m_arg;
|
||||
evaluator<ArgTypeNestedCleaned> m_argImpl;
|
||||
const variable_if_dynamic<Index, ArgTypeNestedCleaned::SizeAtCompileTime> m_size;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
|
||||
} // namespace Eigen
|
||||
|
||||
#endif // EIGEN_CWISE_BINARY_OP_H
|
@ -175,15 +175,11 @@ void forward_jacobian(const Func& f) {
|
||||
jref.setZero();
|
||||
yref.setZero();
|
||||
f(x, &yref, &jref);
|
||||
// std::cerr << y.transpose() << "\n\n";;
|
||||
// std::cerr << j << "\n\n";;
|
||||
|
||||
j.setZero();
|
||||
y.setZero();
|
||||
AutoDiffJacobian<Func> autoj(f);
|
||||
autoj(x, &y, &j);
|
||||
// std::cerr << y.transpose() << "\n\n";;
|
||||
// std::cerr << j << "\n\n";;
|
||||
|
||||
VERIFY_IS_APPROX(y, yref);
|
||||
VERIFY_IS_APPROX(j, jref);
|
||||
@ -277,8 +273,6 @@ double bug_1222() {
|
||||
return denom.value();
|
||||
}
|
||||
|
||||
#ifdef EIGEN_TEST_PART_5
|
||||
|
||||
double bug_1223() {
|
||||
using std::min;
|
||||
typedef Eigen::AutoDiffScalar<Eigen::Vector3d> AD;
|
||||
@ -338,8 +332,6 @@ double bug_1281() {
|
||||
return (y1 + y2 + y3).value();
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
EIGEN_DECLARE_TEST(autodiff) {
|
||||
for (int i = 0; i < g_repeat; i++) {
|
||||
CALL_SUBTEST_1(test_autodiff_scalar<1>());
|
||||
|
Loading…
x
Reference in New Issue
Block a user