Improved support for RowMajor tensors

Misc fixes and API cleanups.
This commit is contained in:
Benoit Steiner 2015-01-14 15:38:48 -08:00
parent 6559d09c60
commit f697df7237
17 changed files with 1405 additions and 356 deletions

View File

@ -33,6 +33,8 @@ struct traits<TensorAssignOp<LhsXprType, RhsXprType> >
typedef typename RhsXprType::Nested RhsNested; typedef typename RhsXprType::Nested RhsNested;
typedef typename remove_reference<LhsNested>::type _LhsNested; typedef typename remove_reference<LhsNested>::type _LhsNested;
typedef typename remove_reference<RhsNested>::type _RhsNested; typedef typename remove_reference<RhsNested>::type _RhsNested;
static const std::size_t NumDimensions = internal::traits<LhsXprType>::NumDimensions;
static const int Layout = internal::traits<LhsXprType>::Layout;
enum { enum {
Flags = 0, Flags = 0,
@ -94,12 +96,18 @@ struct TensorEvaluator<const TensorAssignOp<LeftArgType, RightArgType>, Device>
enum { enum {
IsAligned = TensorEvaluator<LeftArgType, Device>::IsAligned & TensorEvaluator<RightArgType, Device>::IsAligned, IsAligned = TensorEvaluator<LeftArgType, Device>::IsAligned & TensorEvaluator<RightArgType, Device>::IsAligned,
PacketAccess = TensorEvaluator<LeftArgType, Device>::PacketAccess & TensorEvaluator<RightArgType, Device>::PacketAccess, PacketAccess = TensorEvaluator<LeftArgType, Device>::PacketAccess & TensorEvaluator<RightArgType, Device>::PacketAccess,
Layout = TensorEvaluator<LeftArgType, Device>::Layout,
}; };
EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device) : EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device) :
m_leftImpl(op.lhsExpression(), device), m_leftImpl(op.lhsExpression(), device),
m_rightImpl(op.rhsExpression(), device) m_rightImpl(op.rhsExpression(), device)
{ } {
EIGEN_STATIC_ASSERT((TensorEvaluator<LeftArgType, Device>::Layout == TensorEvaluator<RightArgType, Device>::Layout), YOU_MADE_A_PROGRAMMING_MISTAKE);
// The dimensions of the lhs and the rhs tensors should be equal to prevent
// overflows and ensure the result is fully initialized.
eigen_assert(dimensions_match(m_leftImpl.dimensions(), m_leftImpl.dimensions()));
}
typedef typename XprType::Index Index; typedef typename XprType::Index Index;
typedef typename XprType::Scalar Scalar; typedef typename XprType::Scalar Scalar;
@ -114,7 +122,7 @@ struct TensorEvaluator<const TensorAssignOp<LeftArgType, RightArgType>, Device>
} }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar*) { EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar*) {
eigen_assert(internal::dimensions_match(m_leftImpl.dimensions(), m_rightImpl.dimensions())); eigen_assert(dimensions_match(m_leftImpl.dimensions(), m_rightImpl.dimensions()));
m_leftImpl.evalSubExprsIfNeeded(NULL); m_leftImpl.evalSubExprsIfNeeded(NULL);
// If the lhs provides raw access to its storage area (i.e. if m_leftImpl.data() returns a non // If the lhs provides raw access to its storage area (i.e. if m_leftImpl.data() returns a non
// null value), attempt to evaluate the rhs expression in place. Returns true iff in place // null value), attempt to evaluate the rhs expression in place. Returns true iff in place

View File

@ -25,77 +25,118 @@ template<typename Derived>
class TensorBase<Derived, ReadOnlyAccessors> class TensorBase<Derived, ReadOnlyAccessors>
{ {
public: public:
typedef typename internal::traits<Derived>::Scalar Scalar; typedef internal::traits<Derived> DerivedTraits;
typedef typename internal::traits<Derived>::Index Index; typedef typename DerivedTraits::Scalar Scalar;
typedef Scalar CoeffReturnType; typedef typename DerivedTraits::Index Index;
typedef typename internal::packet_traits<Scalar>::type PacketReturnType; typedef typename internal::remove_const<Scalar>::type CoeffReturnType;
typedef typename internal::packet_traits<CoeffReturnType>::type PacketReturnType;
static const int NumDimensions = DerivedTraits::NumDimensions;
// Dimensions // Generic nullary operation support.
EIGEN_DEVICE_FUNC template <typename CustomNullaryOp> EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Index dimension(std::size_t n) const { return derived().dimensions()[n]; } EIGEN_STRONG_INLINE const TensorCwiseNullaryOp<CustomNullaryOp, const Derived>
EIGEN_DEVICE_FUNC nullaryExpr(const CustomNullaryOp& func) const {
EIGEN_STRONG_INLINE Index size() const { return internal::array_prod(derived().dimensions()); } return TensorCwiseNullaryOp<CustomNullaryOp, const Derived>(derived(), func);
}
// Nullary operators // Coefficient-wise nullary operators
EIGEN_DEVICE_FUNC EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseNullaryOp<internal::scalar_constant_op<Scalar>, const Derived> EIGEN_STRONG_INLINE const TensorCwiseNullaryOp<internal::scalar_constant_op<Scalar>, const Derived>
constant(const Scalar& value) const { constant(const Scalar& value) const {
return TensorCwiseNullaryOp<internal::scalar_constant_op<Scalar>, const Derived> return nullaryExpr(internal::scalar_constant_op<Scalar>(value));
(derived(), internal::scalar_constant_op<Scalar>(value));
} }
EIGEN_DEVICE_FUNC EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseNullaryOp<internal::UniformRandomGenerator<Scalar>, const Derived> EIGEN_STRONG_INLINE const TensorCwiseNullaryOp<internal::UniformRandomGenerator<Scalar>, const Derived>
random() const { random() const {
return TensorCwiseNullaryOp<internal::UniformRandomGenerator<Scalar>, const Derived>(derived()); return nullaryExpr(internal::UniformRandomGenerator<Scalar>());
} }
template <typename RandomGenerator> EIGEN_DEVICE_FUNC template <typename RandomGenerator> EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseNullaryOp<RandomGenerator, const Derived> EIGEN_STRONG_INLINE const TensorCwiseNullaryOp<RandomGenerator, const Derived>
random() const { random() const {
return TensorCwiseNullaryOp<RandomGenerator, const Derived>(derived()); return nullaryExpr(RandomGenerator());
}
// Generic unary operation support.
template <typename CustomUnaryOp> EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<CustomUnaryOp, const Derived>
unaryExpr(const CustomUnaryOp& func) const {
return TensorCwiseUnaryOp<CustomUnaryOp, const Derived>(derived(), func);
} }
// Coefficient-wise unary operators // Coefficient-wise unary operators
EIGEN_DEVICE_FUNC EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_opposite_op<Scalar>, const Derived> EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_opposite_op<Scalar>, const Derived>
operator-() const { return derived(); } operator-() const {
return unaryExpr(internal::scalar_opposite_op<Scalar>());
}
EIGEN_DEVICE_FUNC EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_sqrt_op<Scalar>, const Derived> EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_sqrt_op<Scalar>, const Derived>
sqrt() const { return derived(); } sqrt() const {
return unaryExpr(internal::scalar_sqrt_op<Scalar>());
}
EIGEN_DEVICE_FUNC EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_square_op<Scalar>, const Derived> EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_square_op<Scalar>, const Derived>
square() const { return derived(); } square() const {
return unaryExpr(internal::scalar_square_op<Scalar>());
}
EIGEN_DEVICE_FUNC EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_inverse_op<Scalar>, const Derived> EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_inverse_op<Scalar>, const Derived>
inverse() const { return derived(); } inverse() const {
return unaryExpr(internal::scalar_inverse_op<Scalar>());
}
EIGEN_DEVICE_FUNC EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_exp_op<Scalar>, const Derived> EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_exp_op<Scalar>, const Derived>
exp() const { return derived(); } exp() const {
return unaryExpr(internal::scalar_exp_op<Scalar>());
}
EIGEN_DEVICE_FUNC EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_log_op<Scalar>, const Derived> EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_log_op<Scalar>, const Derived>
log() const { return derived(); } log() const {
return unaryExpr(internal::scalar_log_op<Scalar>());
}
EIGEN_DEVICE_FUNC EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_abs_op<Scalar>, const Derived> EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_abs_op<Scalar>, const Derived>
abs() const { return derived(); } abs() const {
return unaryExpr(internal::scalar_abs_op<Scalar>());
}
EIGEN_DEVICE_FUNC EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_pow_op<Scalar>, const Derived> EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_pow_op<Scalar>, const Derived>
pow(Scalar exponent) const { pow(Scalar exponent) const {
return TensorCwiseUnaryOp<internal::scalar_pow_op<Scalar>, const Derived> return unaryExpr(internal::scalar_pow_op<Scalar>(exponent));
(derived(), internal::scalar_pow_op<Scalar>(exponent)); }
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_add_op<Scalar>, const Derived>
operator+ (Scalar rhs) const {
return unaryExpr(internal::scalar_add_op<Scalar>(rhs));
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_sub_op<Scalar>, const Derived>
operator- (Scalar rhs) const {
EIGEN_STATIC_ASSERT((std::numeric_limits<Scalar>::is_signed || internal::is_same<Scalar, const std::complex<float> >::value), YOU_MADE_A_PROGRAMMING_MISTAKE);
return unaryExpr(internal::scalar_sub_op<Scalar>(rhs));
} }
EIGEN_DEVICE_FUNC EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const Derived> EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const Derived>
operator * (Scalar scale) const { operator* (Scalar rhs) const {
return TensorCwiseUnaryOp<internal::scalar_multiple_op<Scalar>, const Derived> return unaryExpr(internal::scalar_multiple_op<Scalar>(rhs));
(derived(), internal::scalar_multiple_op<Scalar>(scale)); }
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_quotient1_op<Scalar>, const Derived>
operator/ (Scalar rhs) const {
// EIGEN_STATIC_ASSERT(!std::numeric_limits<Scalar>::is_integer, YOU_MADE_A_PROGRAMMING_MISTAKE);
return unaryExpr(internal::scalar_quotient1_op<Scalar>(rhs));
} }
EIGEN_DEVICE_FUNC EIGEN_DEVICE_FUNC
@ -110,86 +151,106 @@ class TensorBase<Derived, ReadOnlyAccessors>
return cwiseMin(constant(threshold)); return cwiseMin(constant(threshold));
} }
template <typename CustomUnaryOp> EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<CustomUnaryOp, const Derived>
unaryExpr(const CustomUnaryOp& func) const {
return TensorCwiseUnaryOp<CustomUnaryOp, const Derived>(derived(), func);
}
template <typename NewType> EIGEN_DEVICE_FUNC template <typename NewType> EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_cast_op<Scalar, NewType>, const Derived> EIGEN_STRONG_INLINE const TensorCwiseUnaryOp<internal::scalar_cast_op<Scalar, NewType>, const Derived>
cast() const { cast() const {
return derived(); return unaryExpr(internal::scalar_cast_op<Scalar, NewType>());
}
// Generic binary operation support.
template <typename CustomBinaryOp, typename OtherDerived> EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const TensorCwiseBinaryOp<CustomBinaryOp, const Derived, const OtherDerived>
binaryExpr(const OtherDerived& other, const CustomBinaryOp& func) const {
return TensorCwiseBinaryOp<CustomBinaryOp, const Derived, const OtherDerived>(derived(), other, func);
} }
// Coefficient-wise binary operators. // Coefficient-wise binary operators.
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorCwiseBinaryOp<internal::scalar_sum_op<Scalar>, const Derived, const OtherDerived> const TensorCwiseBinaryOp<internal::scalar_sum_op<Scalar>, const Derived, const OtherDerived>
operator+(const OtherDerived& other) const { operator+(const OtherDerived& other) const {
return TensorCwiseBinaryOp<internal::scalar_sum_op<Scalar>, const Derived, const OtherDerived>(derived(), other.derived()); return binaryExpr(other.derived(), internal::scalar_sum_op<Scalar>());
} }
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorCwiseBinaryOp<internal::scalar_difference_op<Scalar>, const Derived, const OtherDerived> const TensorCwiseBinaryOp<internal::scalar_difference_op<Scalar>, const Derived, const OtherDerived>
operator-(const OtherDerived& other) const { operator-(const OtherDerived& other) const {
return TensorCwiseBinaryOp<internal::scalar_difference_op<Scalar>, const Derived, const OtherDerived>(derived(), other.derived()); return binaryExpr(other.derived(), internal::scalar_difference_op<Scalar>());
} }
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorCwiseBinaryOp<internal::scalar_product_op<Scalar>, const Derived, const OtherDerived> const TensorCwiseBinaryOp<internal::scalar_product_op<Scalar>, const Derived, const OtherDerived>
operator*(const OtherDerived& other) const { operator*(const OtherDerived& other) const {
return TensorCwiseBinaryOp<internal::scalar_product_op<Scalar>, const Derived, const OtherDerived>(derived(), other.derived()); return binaryExpr(other.derived(), internal::scalar_product_op<Scalar>());
} }
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorCwiseBinaryOp<internal::scalar_quotient_op<Scalar>, const Derived, const OtherDerived> const TensorCwiseBinaryOp<internal::scalar_quotient_op<Scalar>, const Derived, const OtherDerived>
operator/(const OtherDerived& other) const { operator/(const OtherDerived& other) const {
return TensorCwiseBinaryOp<internal::scalar_quotient_op<Scalar>, const Derived, const OtherDerived>(derived(), other.derived()); return binaryExpr(other.derived(), internal::scalar_quotient_op<Scalar>());
} }
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorCwiseBinaryOp<internal::scalar_max_op<Scalar>, const Derived, const OtherDerived> const TensorCwiseBinaryOp<internal::scalar_max_op<Scalar>, const Derived, const OtherDerived>
cwiseMax(const OtherDerived& other) const { cwiseMax(const OtherDerived& other) const {
return TensorCwiseBinaryOp<internal::scalar_max_op<Scalar>, const Derived, const OtherDerived>(derived(), other.derived()); return binaryExpr(other.derived(), internal::scalar_max_op<Scalar>());
} }
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorCwiseBinaryOp<internal::scalar_min_op<Scalar>, const Derived, const OtherDerived> const TensorCwiseBinaryOp<internal::scalar_min_op<Scalar>, const Derived, const OtherDerived>
cwiseMin(const OtherDerived& other) const { cwiseMin(const OtherDerived& other) const {
return TensorCwiseBinaryOp<internal::scalar_min_op<Scalar>, const Derived, const OtherDerived>(derived(), other.derived()); return binaryExpr(other.derived(), internal::scalar_min_op<Scalar>());
}
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorCwiseBinaryOp<internal::scalar_boolean_and_op, const Derived, const OtherDerived>
operator&&(const OtherDerived& other) const {
return binaryExpr(other.derived(), internal::scalar_boolean_and_op());
}
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorCwiseBinaryOp<internal::scalar_boolean_or_op, const Derived, const OtherDerived>
operator||(const OtherDerived& other) const {
return binaryExpr(other.derived(), internal::scalar_boolean_or_op());
} }
// Comparisons and tests. // Comparisons and tests.
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorCwiseBinaryOp<std::less<Scalar>, const Derived, const OtherDerived> const TensorCwiseBinaryOp<std::less<Scalar>, const Derived, const OtherDerived>
operator<(const OtherDerived& other) const { operator<(const OtherDerived& other) const {
return TensorCwiseBinaryOp<std::less<Scalar>, const Derived, const OtherDerived>(derived(), other.derived()); return binaryExpr(other.derived(), std::less<Scalar>());
} }
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorCwiseBinaryOp<std::less_equal<Scalar>, const Derived, const OtherDerived> const TensorCwiseBinaryOp<std::less_equal<Scalar>, const Derived, const OtherDerived>
operator<=(const OtherDerived& other) const { operator<=(const OtherDerived& other) const {
return TensorCwiseBinaryOp<std::less_equal<Scalar>, const Derived, const OtherDerived>(derived(), other.derived()); return binaryExpr(other.derived(), std::less_equal<Scalar>());
} }
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorCwiseBinaryOp<std::greater<Scalar>, const Derived, const OtherDerived> const TensorCwiseBinaryOp<std::greater<Scalar>, const Derived, const OtherDerived>
operator>(const OtherDerived& other) const { operator>(const OtherDerived& other) const {
return TensorCwiseBinaryOp<std::greater<Scalar>, const Derived, const OtherDerived>(derived(), other.derived()); return binaryExpr(other.derived(), std::greater<Scalar>());
} }
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorCwiseBinaryOp<std::greater_equal<Scalar>, const Derived, const OtherDerived> const TensorCwiseBinaryOp<std::greater_equal<Scalar>, const Derived, const OtherDerived>
operator>=(const OtherDerived& other) const { operator>=(const OtherDerived& other) const {
return TensorCwiseBinaryOp<std::greater_equal<Scalar>, const Derived, const OtherDerived>(derived(), other.derived()); return binaryExpr(other.derived(), std::greater_equal<Scalar>());
} }
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorCwiseBinaryOp<std::equal_to<Scalar>, const Derived, const OtherDerived> const TensorCwiseBinaryOp<std::equal_to<Scalar>, const Derived, const OtherDerived>
operator==(const OtherDerived& other) const { operator==(const OtherDerived& other) const {
return TensorCwiseBinaryOp<std::equal_to<Scalar>, const Derived, const OtherDerived>(derived(), other.derived()); return binaryExpr(other.derived(), std::equal_to<Scalar>());
} }
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorCwiseBinaryOp<std::not_equal_to<Scalar>, const Derived, const OtherDerived> const TensorCwiseBinaryOp<std::not_equal_to<Scalar>, const Derived, const OtherDerived>
operator!=(const OtherDerived& other) const { operator!=(const OtherDerived& other) const {
return TensorCwiseBinaryOp<std::not_equal_to<Scalar>, const Derived, const OtherDerived>(derived(), other.derived()); return binaryExpr(other.derived(), std::not_equal_to<Scalar>());
}
// Coefficient-wise ternary operators.
template<typename ThenDerived, typename ElseDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorSelectOp<const Derived, const ThenDerived, const ElseDerived>
select(const ThenDerived& thenTensor, const ElseDerived& elseTensor) const {
return TensorSelectOp<const Derived, const ThenDerived, const ElseDerived>(derived(), thenTensor.derived(), elseTensor.derived());
} }
// Contractions. // Contractions.
@ -208,29 +269,72 @@ class TensorBase<Derived, ReadOnlyAccessors>
return TensorConvolutionOp<const Dimensions, const Derived, const KernelDerived>(derived(), kernel.derived(), dims); return TensorConvolutionOp<const Dimensions, const Derived, const KernelDerived>(derived(), kernel.derived(), dims);
} }
// Coefficient-wise ternary operators.
template<typename ThenDerived, typename ElseDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorSelectOp<const Derived, const ThenDerived, const ElseDerived>
select(const ThenDerived& thenTensor, const ElseDerived& elseTensor) const {
return TensorSelectOp<const Derived, const ThenDerived, const ElseDerived>(derived(), thenTensor.derived(), elseTensor.derived());
}
// Reductions. // Reductions.
template <typename Dims> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE template <typename Dims> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorReductionOp<internal::SumReducer<Scalar>, const Dims, const Derived> const TensorReductionOp<internal::SumReducer<CoeffReturnType>, const Dims, const Derived>
sum(const Dims& dims) const { sum(const Dims& dims) const {
return TensorReductionOp<internal::SumReducer<Scalar>, const Dims, const Derived>(derived(), dims, internal::SumReducer<Scalar>()); return TensorReductionOp<internal::SumReducer<CoeffReturnType>, const Dims, const Derived>(derived(), dims, internal::SumReducer<CoeffReturnType>());
} }
const TensorReductionOp<internal::SumReducer<CoeffReturnType>, const array<Index, NumDimensions>, const Derived>
sum() const {
array<Index, NumDimensions> in_dims;
for (int i = 0; i < NumDimensions; ++i) in_dims[i] = i;
return TensorReductionOp<internal::SumReducer<CoeffReturnType>, const array<Index, NumDimensions>, const Derived>(derived(), in_dims, internal::SumReducer<CoeffReturnType>());
}
template <typename Dims> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE template <typename Dims> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorReductionOp<internal::MaxReducer<Scalar>, const Dims, const Derived> const TensorReductionOp<internal::MeanReducer<CoeffReturnType>, const Dims, const Derived>
mean(const Dims& dims) const {
return TensorReductionOp<internal::MeanReducer<CoeffReturnType>, const Dims, const Derived>(derived(), dims, internal::MeanReducer<CoeffReturnType>());
}
const TensorReductionOp<internal::MeanReducer<CoeffReturnType>, const array<Index, NumDimensions>, const Derived>
mean() const {
array<Index, NumDimensions> in_dims;
for (int i = 0; i < NumDimensions; ++i) in_dims[i] = i;
return TensorReductionOp<internal::MeanReducer<CoeffReturnType>, const array<Index, NumDimensions>, const Derived>(derived(), in_dims, internal::MeanReducer<CoeffReturnType>());
}
template <typename Dims> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorReductionOp<internal::ProdReducer<CoeffReturnType>, const Dims, const Derived>
prod(const Dims& dims) const {
return TensorReductionOp<internal::ProdReducer<CoeffReturnType>, const Dims, const Derived>(derived(), dims, internal::ProdReducer<CoeffReturnType>());
}
const TensorReductionOp<internal::ProdReducer<CoeffReturnType>, const array<Index, NumDimensions>, const Derived>
prod() const {
array<Index, NumDimensions> in_dims;
for (int i = 0; i < NumDimensions; ++i) in_dims[i] = i;
return TensorReductionOp<internal::ProdReducer<CoeffReturnType>, const array<Index, NumDimensions>, const Derived>(derived(), in_dims, internal::ProdReducer<CoeffReturnType>());
}
template <typename Dims> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorReductionOp<internal::MaxReducer<CoeffReturnType>, const Dims, const Derived>
maximum(const Dims& dims) const { maximum(const Dims& dims) const {
return TensorReductionOp<internal::MaxReducer<Scalar>, const Dims, const Derived>(derived(), dims, internal::MaxReducer<Scalar>()); return TensorReductionOp<internal::MaxReducer<CoeffReturnType>, const Dims, const Derived>(derived(), dims, internal::MaxReducer<CoeffReturnType>());
} }
const TensorReductionOp<internal::MaxReducer<CoeffReturnType>, const array<Index, NumDimensions>, const Derived>
maximum() const {
array<Index, NumDimensions> in_dims;
for (int i = 0; i < NumDimensions; ++i) in_dims[i] = i;
return TensorReductionOp<internal::MaxReducer<CoeffReturnType>, const array<Index, NumDimensions>, const Derived>(derived(), in_dims, internal::MaxReducer<CoeffReturnType>());
}
template <typename Dims> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE template <typename Dims> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorReductionOp<internal::MinReducer<Scalar>, const Dims, const Derived> const TensorReductionOp<internal::MinReducer<CoeffReturnType>, const Dims, const Derived>
minimum(const Dims& dims) const { minimum(const Dims& dims) const {
return TensorReductionOp<internal::MinReducer<Scalar>, const Dims, const Derived>(derived(), dims, internal::MinReducer<Scalar>()); return TensorReductionOp<internal::MinReducer<CoeffReturnType>, const Dims, const Derived>(derived(), dims, internal::MinReducer<CoeffReturnType>());
} }
const TensorReductionOp<internal::MinReducer<CoeffReturnType>, const array<Index, NumDimensions>, const Derived>
minimum() const {
array<Index, NumDimensions> in_dims;
for (int i = 0; i < NumDimensions; ++i) in_dims[i] = i;
return TensorReductionOp<internal::MinReducer<CoeffReturnType>, const array<Index, NumDimensions>, const Derived>(derived(), in_dims, internal::MinReducer<CoeffReturnType>());
}
template <typename Reducer, typename Dims> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE template <typename Reducer, typename Dims> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorReductionOp<Reducer, const Dims, const Derived> const TensorReductionOp<Reducer, const Dims, const Derived>
reduce(const Dims& dims, const Reducer& reducer) const { reduce(const Dims& dims, const Reducer& reducer) const {
@ -258,17 +362,44 @@ class TensorBase<Derived, ReadOnlyAccessors>
template <Index Rows, Index Cols> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE template <Index Rows, Index Cols> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorImagePatchOp<Rows, Cols, const Derived> const TensorImagePatchOp<Rows, Cols, const Derived>
extract_image_patches() const { extract_image_patches() const {
return TensorImagePatchOp<Rows, Cols, const Derived>(derived(), Rows, Cols, 1, 1); return TensorImagePatchOp<Rows, Cols, const Derived>(derived(), Rows, Cols, 1, 1, PADDING_SAME);
}
template <Index Rows, Index Cols> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorImagePatchOp<Rows, Cols, const Derived>
extract_image_patches(const PaddingType padding_type) const {
return TensorImagePatchOp<Rows, Cols, const Derived>(derived(), Rows, Cols, 1, 1, padding_type);
}
template <Index Rows, Index Cols> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorImagePatchOp<Rows, Cols, const Derived>
extract_image_patches(const Index stride, const PaddingType padding_type) const {
return TensorImagePatchOp<Rows, Cols, const Derived>(derived(), Rows, Cols, stride, stride, padding_type);
} }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorImagePatchOp<Dynamic, Dynamic, const Derived> const TensorImagePatchOp<Dynamic, Dynamic, const Derived>
extract_image_patches(const Index patch_rows, const Index patch_cols, extract_image_patches(const Index patch_rows, const Index patch_cols,
const Index row_stride = 1, const Index col_stride = 1) const { const Index row_stride = 1, const Index col_stride = 1) const {
return TensorImagePatchOp<Dynamic, Dynamic, const Derived>(derived(), patch_rows, patch_cols, row_stride, col_stride); return TensorImagePatchOp<Dynamic, Dynamic, const Derived>(derived(), patch_rows, patch_cols, row_stride, col_stride,
PADDING_SAME);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorImagePatchOp<Dynamic, Dynamic, const Derived>
extract_image_patches(const Index patch_rows, const Index patch_cols,
const Index row_stride, const Index col_stride,
const PaddingType padding_type) const {
return TensorImagePatchOp<Dynamic, Dynamic, const Derived>(derived(), patch_rows, patch_cols, row_stride, col_stride,
padding_type);
} }
// Morphing operators. // Morphing operators.
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorLayoutSwapOp<const Derived>
swap_layout() const {
return TensorLayoutSwapOp<const Derived>(derived());
}
template <typename NewDimensions> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE template <typename NewDimensions> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorReshapingOp<const NewDimensions, const Derived> const TensorReshapingOp<const NewDimensions, const Derived>
reshape(const NewDimensions& newDimensions) const { reshape(const NewDimensions& newDimensions) const {
@ -279,10 +410,20 @@ class TensorBase<Derived, ReadOnlyAccessors>
slice(const StartIndices& startIndices, const Sizes& sizes) const { slice(const StartIndices& startIndices, const Sizes& sizes) const {
return TensorSlicingOp<const StartIndices, const Sizes, const Derived>(derived(), startIndices, sizes); return TensorSlicingOp<const StartIndices, const Sizes, const Derived>(derived(), startIndices, sizes);
} }
template <std::size_t DimId> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE template <Index DimId> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorChippingOp<DimId, const Derived> const TensorChippingOp<DimId, const Derived>
chip(const Index offset) const { chip(const Index offset) const {
return TensorChippingOp<DimId, const Derived>(derived(), offset); return TensorChippingOp<DimId, const Derived>(derived(), offset, DimId);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorChippingOp<Dynamic, const Derived>
chip(const Index offset, const Index dim) const {
return TensorChippingOp<Dynamic, const Derived>(derived(), offset, dim);
}
template <typename ReverseDimensions> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorReverseOp<const ReverseDimensions, const Derived>
reverse(const ReverseDimensions& rev) const {
return TensorReverseOp<const ReverseDimensions, const Derived>(derived(), rev);
} }
template <typename PaddingDimensions> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE template <typename PaddingDimensions> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
const TensorPaddingOp<const PaddingDimensions, const Derived> const TensorPaddingOp<const PaddingDimensions, const Derived>
@ -308,21 +449,24 @@ class TensorBase<Derived, ReadOnlyAccessors>
protected: protected:
template <typename Scalar, std::size_t NumIndices, int Options> friend class Tensor; template <typename Scalar, std::size_t NumIndices, int Options> friend class Tensor;
template <typename Scalar, int Options> friend class TensorVarDim;
template <typename OtherDerived, int AccessLevel> friend class TensorBase; template <typename OtherDerived, int AccessLevel> friend class TensorBase;
EIGEN_DEVICE_FUNC EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE const Derived& derived() const { return *static_cast<const Derived*>(this); } EIGEN_STRONG_INLINE const Derived& derived() const { return *static_cast<const Derived*>(this); }
}; };
template<typename Derived> template<typename Derived>
class TensorBase<Derived, WriteAccessors> : public TensorBase<Derived, ReadOnlyAccessors> { class TensorBase<Derived, WriteAccessors> : public TensorBase<Derived, ReadOnlyAccessors> {
public: public:
typedef typename internal::traits<Derived>::Scalar Scalar; typedef internal::traits<Derived> DerivedTraits;
typedef typename internal::traits<Derived>::Index Index; typedef typename DerivedTraits::Scalar Scalar;
typedef typename DerivedTraits::Index Index;
typedef Scalar CoeffReturnType; typedef Scalar CoeffReturnType;
typedef typename internal::packet_traits<Scalar>::type PacketReturnType; typedef typename internal::packet_traits<Scalar>::type PacketReturnType;
static const int NumDimensions = DerivedTraits::NumDimensions;
template <typename Scalar, std::size_t NumIndices, int Options> friend class Tensor; template <typename Scalar, std::size_t NumIndices, int Options> friend class Tensor;
template <typename Scalar, int Options> friend class TensorVarDim;
template <typename OtherDerived, int AccessLevel> friend class TensorBase; template <typename OtherDerived, int AccessLevel> friend class TensorBase;
EIGEN_DEVICE_FUNC EIGEN_DEVICE_FUNC
@ -337,24 +481,43 @@ class TensorBase<Derived, WriteAccessors> : public TensorBase<Derived, ReadOnlyA
EIGEN_STRONG_INLINE Derived& setRandom() { EIGEN_STRONG_INLINE Derived& setRandom() {
return derived() = this->random(); return derived() = this->random();
} }
template <typename RandomGenerator> EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Derived& setRandom() {
return derived() = this->template random<RandomGenerator>();
}
#ifdef EIGEN_HAS_VARIADIC_TEMPLATES
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Derived& setValues(
const typename internal::Initializer<Derived, NumDimensions>::InitList& vals) {
TensorEvaluator<Derived, DefaultDevice> eval(derived(), DefaultDevice());
internal::initialize_tensor<Derived, NumDimensions>(eval, vals);
return derived();
}
#endif // EIGEN_HAS_VARIADIC_TEMPLATES
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Derived& operator+=(const OtherDerived& other) { Derived& operator+=(const OtherDerived& other) {
return derived() = TensorCwiseBinaryOp<internal::scalar_sum_op<Scalar>, const Derived, const OtherDerived>(derived(), other.derived()); return derived() = derived() + other.derived();
} }
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Derived& operator-=(const OtherDerived& other) { Derived& operator-=(const OtherDerived& other) {
return derived() = TensorCwiseBinaryOp<internal::scalar_difference_op<Scalar>, const Derived, const OtherDerived>(derived(), other.derived()); return derived() = derived() - other.derived();
} }
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Derived& operator*=(const OtherDerived& other) { Derived& operator*=(const OtherDerived& other) {
return derived() = TensorCwiseBinaryOp<internal::scalar_product_op<Scalar>, const Derived, const OtherDerived>(derived(), other.derived()); return derived() = derived() * other.derived();
} }
template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE template<typename OtherDerived> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Derived& operator/=(const OtherDerived& other) { Derived& operator/=(const OtherDerived& other) {
return derived() = TensorCwiseBinaryOp<internal::scalar_quotient_op<Scalar>, const Derived, const OtherDerived>(derived(), other.derived()); return derived() = derived() / other.derived();
} }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
TensorLayoutSwapOp<Derived>
swap_layout() const {
return TensorLayoutSwapOp<Derived>(derived());
}
template <typename NewDimensions> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE template <typename NewDimensions> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
TensorReshapingOp<const NewDimensions, Derived> TensorReshapingOp<const NewDimensions, Derived>
reshape(const NewDimensions& newDimensions) const { reshape(const NewDimensions& newDimensions) const {
@ -365,16 +528,26 @@ class TensorBase<Derived, WriteAccessors> : public TensorBase<Derived, ReadOnlyA
slice(const StartIndices& startIndices, const Sizes& sizes) const { slice(const StartIndices& startIndices, const Sizes& sizes) const {
return TensorSlicingOp<const StartIndices, const Sizes, Derived>(derived(), startIndices, sizes); return TensorSlicingOp<const StartIndices, const Sizes, Derived>(derived(), startIndices, sizes);
} }
template <std::size_t DimId> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE template <DenseIndex DimId> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
TensorChippingOp<DimId, Derived> TensorChippingOp<DimId, Derived>
chip(const Index offset) const { chip(const Index offset) const {
return TensorChippingOp<DimId, Derived>(derived(), offset); return TensorChippingOp<DimId, Derived>(derived(), offset, DimId);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
TensorChippingOp<Dynamic, Derived>
chip(const Index offset, const Index dim) const {
return TensorChippingOp<Dynamic, Derived>(derived(), offset, dim);
} }
template <typename Shuffle> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE template <typename Shuffle> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
TensorShufflingOp<const Shuffle, Derived> TensorShufflingOp<const Shuffle, Derived>
shuffle(const Shuffle& shuffle) const { shuffle(const Shuffle& shuffle) const {
return TensorShufflingOp<const Shuffle, Derived>(derived(), shuffle); return TensorShufflingOp<const Shuffle, Derived>(derived(), shuffle);
} }
template <typename Strides> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
TensorStridingOp<const Strides, Derived>
stride(const Strides& strides) const {
return TensorStridingOp<const Strides, Derived>(derived(), strides);
}
// Select the device on which to evaluate the expression. // Select the device on which to evaluate the expression.
template <typename DeviceType> template <typename DeviceType>

View File

@ -30,6 +30,8 @@ struct traits<TensorBroadcastingOp<Broadcast, XprType> > : public traits<XprType
typedef typename XprTraits::Index Index; typedef typename XprTraits::Index Index;
typedef typename XprType::Nested Nested; typedef typename XprType::Nested Nested;
typedef typename remove_reference<Nested>::type _Nested; typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions;
static const int Layout = XprTraits::Layout;
}; };
template<typename Broadcast, typename XprType> template<typename Broadcast, typename XprType>
@ -91,6 +93,7 @@ struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device>
enum { enum {
IsAligned = false, IsAligned = false,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess, PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
Layout = TensorEvaluator<ArgType, Device>::Layout,
}; };
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
@ -103,12 +106,21 @@ struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device>
m_dimensions[i] = input_dims[i] * broadcast[i]; m_dimensions[i] = input_dims[i] * broadcast[i];
} }
if (Layout == ColMajor) {
m_inputStrides[0] = 1; m_inputStrides[0] = 1;
m_outputStrides[0] = 1; m_outputStrides[0] = 1;
for (int i = 1; i < NumDims; ++i) { for (int i = 1; i < NumDims; ++i) {
m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1]; m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1];
m_outputStrides[i] = m_outputStrides[i-1] * m_dimensions[i-1]; m_outputStrides[i] = m_outputStrides[i-1] * m_dimensions[i-1];
} }
} else {
m_inputStrides[NumDims-1] = 1;
m_outputStrides[NumDims-1] = 1;
for (int i = NumDims-2; i >= 0; --i) {
m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1];
m_outputStrides[i] = m_outputStrides[i+1] * m_dimensions[i+1];
}
}
} }
typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename XprType::CoeffReturnType CoeffReturnType;
@ -125,32 +137,90 @@ struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device>
m_impl.cleanup(); m_impl.cleanup();
} }
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE CoeffReturnType coeff(Index index) const
{
if (Layout == ColMajor) {
return coeffColMajor(index);
} else {
return coeffRowMajor(index);
}
}
// TODO: attempt to speed this up. The integer divisions and modulo are slow // TODO: attempt to speed this up. The integer divisions and modulo are slow
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeffColMajor(Index index) const
{ {
Index inputIndex = 0; Index inputIndex = 0;
for (int i = NumDims - 1; i > 0; --i) { for (int i = NumDims - 1; i > 0; --i) {
const Index idx = index / m_outputStrides[i]; const Index idx = index / m_outputStrides[i];
if (internal::index_statically_eq<Broadcast>()(i, 1)) {
eigen_assert(idx < m_impl.dimensions()[i]);
inputIndex += idx * m_inputStrides[i];
} else {
if (internal::index_statically_eq<InputDimensions>()(i, 1)) { if (internal::index_statically_eq<InputDimensions>()(i, 1)) {
eigen_assert(idx % m_impl.dimensions()[i] == 0); eigen_assert(idx % m_impl.dimensions()[i] == 0);
} else { } else {
inputIndex += (idx % m_impl.dimensions()[i]) * m_inputStrides[i]; inputIndex += (idx % m_impl.dimensions()[i]) * m_inputStrides[i];
} }
}
index -= idx * m_outputStrides[i]; index -= idx * m_outputStrides[i];
} }
if (internal::index_statically_eq<Broadcast>()(0, 1)) { if (internal::index_statically_eq<Broadcast>()(0, 1)) {
eigen_assert(index < m_impl.dimensions()[0]); eigen_assert(index < m_impl.dimensions()[0]);
inputIndex += index; inputIndex += index;
} else {
if (internal::index_statically_eq<InputDimensions>()(0, 1)) {
eigen_assert(index % m_impl.dimensions()[0] == 0);
} else { } else {
inputIndex += (index % m_impl.dimensions()[0]); inputIndex += (index % m_impl.dimensions()[0]);
} }
}
return m_impl.coeff(inputIndex); return m_impl.coeff(inputIndex);
} }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeffRowMajor(Index index) const
{
Index inputIndex = 0;
for (int i = 0; i < NumDims - 1; ++i) {
const Index idx = index / m_outputStrides[i];
if (internal::index_statically_eq<Broadcast>()(i, 1)) {
eigen_assert(idx < m_impl.dimensions()[i]);
inputIndex += idx * m_inputStrides[i];
} else {
if (internal::index_statically_eq<InputDimensions>()(i, 1)) {
eigen_assert(idx % m_impl.dimensions()[i] == 0);
} else {
inputIndex += (idx % m_impl.dimensions()[i]) * m_inputStrides[i];
}
}
index -= idx * m_outputStrides[i];
}
if (internal::index_statically_eq<Broadcast>()(NumDims-1, 1)) {
eigen_assert(index < m_impl.dimensions()[NumDims-1]);
inputIndex += index;
} else {
if (internal::index_statically_eq<InputDimensions>()(NumDims-1, 1)) {
eigen_assert(index % m_impl.dimensions()[NumDims-1] == 0);
} else {
inputIndex += (index % m_impl.dimensions()[NumDims-1]);
}
}
return m_impl.coeff(inputIndex);
}
template<int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketReturnType packet(Index index) const
{
if (Layout == ColMajor) {
return packetColMajor<LoadMode>(index);
} else {
return packetRowMajor<LoadMode>(index);
}
}
// Ignore the LoadMode and always use unaligned loads since we can't guarantee // Ignore the LoadMode and always use unaligned loads since we can't guarantee
// the alignment at compile time. // the alignment at compile time.
template<int LoadMode> template<int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetColMajor(Index index) const
{ {
const int packetSize = internal::unpacket_traits<PacketReturnType>::size; const int packetSize = internal::unpacket_traits<PacketReturnType>::size;
EIGEN_STATIC_ASSERT(packetSize > 1, YOU_MADE_A_PROGRAMMING_MISTAKE) EIGEN_STATIC_ASSERT(packetSize > 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
@ -161,20 +231,30 @@ struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device>
Index inputIndex = 0; Index inputIndex = 0;
for (int i = NumDims - 1; i > 0; --i) { for (int i = NumDims - 1; i > 0; --i) {
const Index idx = index / m_outputStrides[i]; const Index idx = index / m_outputStrides[i];
if (internal::index_statically_eq<Broadcast>()(i, 1)) {
eigen_assert(idx < m_impl.dimensions()[i]);
inputIndex += idx * m_inputStrides[i];
} else {
if (internal::index_statically_eq<InputDimensions>()(i, 1)) { if (internal::index_statically_eq<InputDimensions>()(i, 1)) {
eigen_assert(idx % m_impl.dimensions()[i] == 0); eigen_assert(idx % m_impl.dimensions()[i] == 0);
} else { } else {
inputIndex += (idx % m_impl.dimensions()[i]) * m_inputStrides[i]; inputIndex += (idx % m_impl.dimensions()[i]) * m_inputStrides[i];
} }
}
index -= idx * m_outputStrides[i]; index -= idx * m_outputStrides[i];
} }
Index innermostLoc; Index innermostLoc;
if (internal::index_statically_eq<Broadcast>()(0, 1)) { if (internal::index_statically_eq<Broadcast>()(0, 1)) {
eigen_assert(index < m_impl.dimensions()[0]); eigen_assert(index < m_impl.dimensions()[0]);
innermostLoc = index; innermostLoc = index;
} else {
if (internal::index_statically_eq<InputDimensions>()(0, 1)) {
eigen_assert(innermostLoc % m_impl.dimensions()[0] == 0);
innermostLoc = 0;
} else { } else {
innermostLoc = index % m_impl.dimensions()[0]; innermostLoc = index % m_impl.dimensions()[0];
} }
}
inputIndex += innermostLoc; inputIndex += innermostLoc;
// Todo: this could be extended to the second dimension if we're not // Todo: this could be extended to the second dimension if we're not
@ -185,13 +265,67 @@ struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device>
EIGEN_ALIGN_DEFAULT typename internal::remove_const<CoeffReturnType>::type values[packetSize]; EIGEN_ALIGN_DEFAULT typename internal::remove_const<CoeffReturnType>::type values[packetSize];
values[0] = m_impl.coeff(inputIndex); values[0] = m_impl.coeff(inputIndex);
for (int i = 1; i < packetSize; ++i) { for (int i = 1; i < packetSize; ++i) {
values[i] = coeff(originalIndex+i); values[i] = coeffColMajor(originalIndex+i);
} }
PacketReturnType rslt = internal::pload<PacketReturnType>(values); PacketReturnType rslt = internal::pload<PacketReturnType>(values);
return rslt; return rslt;
} }
} }
template<int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetRowMajor(Index index) const
{
const int packetSize = internal::unpacket_traits<PacketReturnType>::size;
EIGEN_STATIC_ASSERT(packetSize > 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
eigen_assert(index+packetSize-1 < dimensions().TotalSize());
const Index originalIndex = index;
Index inputIndex = 0;
for (int i = 0; i < NumDims - 1; ++i) {
const Index idx = index / m_outputStrides[i];
if (internal::index_statically_eq<Broadcast>()(i, 1)) {
eigen_assert(idx < m_impl.dimensions()[i]);
inputIndex += idx * m_inputStrides[i];
} else {
if (internal::index_statically_eq<InputDimensions>()(i, 1)) {
eigen_assert(idx % m_impl.dimensions()[i] == 0);
} else {
inputIndex += (idx % m_impl.dimensions()[i]) * m_inputStrides[i];
}
}
index -= idx * m_outputStrides[i];
}
Index innermostLoc;
if (internal::index_statically_eq<Broadcast>()(NumDims-1, 1)) {
eigen_assert(index < m_impl.dimensions()[NumDims-1]);
innermostLoc = index;
} else {
if (internal::index_statically_eq<InputDimensions>()(NumDims-1, 1)) {
eigen_assert(innermostLoc % m_impl.dimensions()[NumDims-1] == 0);
innermostLoc = 0;
} else {
innermostLoc = index % m_impl.dimensions()[NumDims-1];
}
}
inputIndex += innermostLoc;
// Todo: this could be extended to the second dimension if we're not
// broadcasting alongside the first dimension, and so on.
if (innermostLoc + packetSize <= m_impl.dimensions()[NumDims-1]) {
return m_impl.template packet<Unaligned>(inputIndex);
} else {
EIGEN_ALIGN_DEFAULT typename internal::remove_const<CoeffReturnType>::type values[packetSize];
values[0] = m_impl.coeff(inputIndex);
for (int i = 1; i < packetSize; ++i) {
values[i] = coeffRowMajor(originalIndex+i);
}
PacketReturnType rslt = internal::pload<PacketReturnType>(values);
return rslt;
}
}
Scalar* data() const { return NULL; } Scalar* data() const { return NULL; }
protected: protected:

View File

@ -21,34 +21,61 @@ namespace Eigen {
*/ */
namespace internal { namespace internal {
template<std::size_t DimId, typename XprType> template<DenseIndex DimId, typename XprType>
struct traits<TensorChippingOp<DimId, XprType> > : public traits<XprType> struct traits<TensorChippingOp<DimId, XprType> > : public traits<XprType>
{ {
typedef typename XprType::Scalar Scalar; typedef typename XprType::Scalar Scalar;
typedef typename internal::packet_traits<Scalar>::type Packet; typedef traits<XprType> XprTraits;
typedef typename traits<XprType>::StorageKind StorageKind; typedef typename packet_traits<Scalar>::type Packet;
typedef typename traits<XprType>::Index Index; typedef typename XprTraits::StorageKind StorageKind;
typedef typename XprTraits::Index Index;
typedef typename XprType::Nested Nested; typedef typename XprType::Nested Nested;
typedef typename remove_reference<Nested>::type _Nested; typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions - 1;
static const int Layout = XprTraits::Layout;
}; };
template<std::size_t DimId, typename XprType> template<DenseIndex DimId, typename XprType>
struct eval<TensorChippingOp<DimId, XprType>, Eigen::Dense> struct eval<TensorChippingOp<DimId, XprType>, Eigen::Dense>
{ {
typedef const TensorChippingOp<DimId, XprType>& type; typedef const TensorChippingOp<DimId, XprType>& type;
}; };
template<std::size_t DimId, typename XprType> template<DenseIndex DimId, typename XprType>
struct nested<TensorChippingOp<DimId, XprType>, 1, typename eval<TensorChippingOp<DimId, XprType> >::type> struct nested<TensorChippingOp<DimId, XprType>, 1, typename eval<TensorChippingOp<DimId, XprType> >::type>
{ {
typedef TensorChippingOp<DimId, XprType> type; typedef TensorChippingOp<DimId, XprType> type;
}; };
template <DenseIndex DimId>
struct DimensionId
{
DimensionId(DenseIndex dim) {
eigen_assert(dim == DimId);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex actualDim() const {
return DimId;
}
};
template <>
struct DimensionId<Dynamic>
{
DimensionId(DenseIndex dim) : actual_dim(dim) {
eigen_assert(dim >= 0);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE DenseIndex actualDim() const {
return actual_dim;
}
private:
const DenseIndex actual_dim;
};
} // end namespace internal } // end namespace internal
template<std::size_t DimId, typename XprType> template<DenseIndex DimId, typename XprType>
class TensorChippingOp : public TensorBase<TensorChippingOp<DimId, XprType> > class TensorChippingOp : public TensorBase<TensorChippingOp<DimId, XprType> >
{ {
public: public:
@ -61,11 +88,14 @@ class TensorChippingOp : public TensorBase<TensorChippingOp<DimId, XprType> >
typedef typename Eigen::internal::traits<TensorChippingOp>::StorageKind StorageKind; typedef typename Eigen::internal::traits<TensorChippingOp>::StorageKind StorageKind;
typedef typename Eigen::internal::traits<TensorChippingOp>::Index Index; typedef typename Eigen::internal::traits<TensorChippingOp>::Index Index;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorChippingOp(const XprType& expr, const Index offset) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorChippingOp(const XprType& expr, const Index offset, const Index dim)
: m_xpr(expr), m_offset(offset) {} : m_xpr(expr), m_offset(offset), m_dim(dim) {
}
EIGEN_DEVICE_FUNC EIGEN_DEVICE_FUNC
const Index offset() const { return m_offset; } const Index offset() const { return m_offset; }
EIGEN_DEVICE_FUNC
const Index dim() const { return m_dim.actualDim(); }
EIGEN_DEVICE_FUNC EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename XprType::Nested>::type& const typename internal::remove_all<typename XprType::Nested>::type&
@ -77,18 +107,20 @@ class TensorChippingOp : public TensorBase<TensorChippingOp<DimId, XprType> >
{ {
typedef TensorAssignOp<TensorChippingOp, const OtherDerived> Assign; typedef TensorAssignOp<TensorChippingOp, const OtherDerived> Assign;
Assign assign(*this, other); Assign assign(*this, other);
internal::TensorExecutor<const Assign, DefaultDevice, false>::run(assign, DefaultDevice()); static const bool Vectorize = TensorEvaluator<const Assign, DefaultDevice>::PacketAccess;
internal::TensorExecutor<const Assign, DefaultDevice, Vectorize>::run(assign, DefaultDevice());
return *this; return *this;
} }
protected: protected:
typename XprType::Nested m_xpr; typename XprType::Nested m_xpr;
const Index m_offset; const Index m_offset;
const internal::DimensionId<DimId> m_dim;
}; };
// Eval as rvalue // Eval as rvalue
template<std::size_t DimId, typename ArgType, typename Device> template<DenseIndex DimId, typename ArgType, typename Device>
struct TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device> struct TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device>
{ {
typedef TensorChippingOp<DimId, ArgType> XprType; typedef TensorChippingOp<DimId, ArgType> XprType;
@ -96,25 +128,28 @@ struct TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device>
static const int NumDims = NumInputDims-1; static const int NumDims = NumInputDims-1;
typedef typename XprType::Index Index; typedef typename XprType::Index Index;
typedef DSizes<Index, NumDims> Dimensions; typedef DSizes<Index, NumDims> Dimensions;
typedef typename XprType::Scalar Scalar;
enum { enum {
// Alignment can't be guaranteed at compile time since it depends on the // Alignment can't be guaranteed at compile time since it depends on the
// slice offsets. // slice offsets.
IsAligned = false, IsAligned = false,
PacketAccess = false, // not yet implemented PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false, // to be implemented
}; };
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: m_impl(op.expression(), device), m_device(device) : m_impl(op.expression(), device), m_dim(op.dim()), m_device(device)
{ {
// We could also support the case where NumInputDims==1 if needed. // We could also support the case where NumInputDims==1 if needed.
EIGEN_STATIC_ASSERT(NumInputDims >= 2, YOU_MADE_A_PROGRAMMING_MISTAKE); EIGEN_STATIC_ASSERT(NumInputDims >= 2, YOU_MADE_A_PROGRAMMING_MISTAKE);
EIGEN_STATIC_ASSERT(NumInputDims > DimId, YOU_MADE_A_PROGRAMMING_MISTAKE); eigen_assert(NumInputDims > m_dim.actualDim());
const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions(); const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
int j = 0; int j = 0;
for (int i = 0; i < NumInputDims; ++i) { for (int i = 0; i < NumInputDims; ++i) {
if (i != DimId) { if (i != m_dim.actualDim()) {
m_dimensions[j] = input_dims[i]; m_dimensions[j] = input_dims[i];
++j; ++j;
} }
@ -122,15 +157,21 @@ struct TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device>
m_stride = 1; m_stride = 1;
m_inputStride = 1; m_inputStride = 1;
for (int i = 0; i < DimId; ++i) { if (Layout == ColMajor) {
for (int i = 0; i < m_dim.actualDim(); ++i) {
m_stride *= input_dims[i]; m_stride *= input_dims[i];
m_inputStride *= input_dims[i]; m_inputStride *= input_dims[i];
} }
m_inputStride *= input_dims[DimId]; } else {
for (int i = NumInputDims-1; i > m_dim.actualDim(); --i) {
m_stride *= input_dims[i];
m_inputStride *= input_dims[i];
}
}
m_inputStride *= input_dims[m_dim.actualDim()];
m_inputOffset = m_stride * op.offset(); m_inputOffset = m_stride * op.offset();
} }
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename XprType::PacketReturnType PacketReturnType; typedef typename XprType::PacketReturnType PacketReturnType;
@ -150,16 +191,52 @@ struct TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device>
return m_impl.coeff(srcCoeff(index)); return m_impl.coeff(srcCoeff(index));
} }
/* to be done
template<int LoadMode> template<int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
{ {
const int packetSize = internal::unpacket_traits<PacketReturnType>::size;
EIGEN_STATIC_ASSERT(packetSize > 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
eigen_assert(index+packetSize-1 < dimensions().TotalSize());
}*/ if ((Layout == ColMajor && m_dim.actualDim() == 0) ||
(Layout == RowMajor && m_dim.actualDim() == NumInputDims-1)) {
// m_stride is equal to 1, so let's avoid the integer division.
eigen_assert(m_stride == 1);
Index inputIndex = index * m_inputStride + m_inputOffset;
EIGEN_ALIGN_DEFAULT typename internal::remove_const<CoeffReturnType>::type values[packetSize];
for (int i = 0; i < packetSize; ++i) {
values[i] = m_impl.coeff(inputIndex);
inputIndex += m_inputStride;
}
PacketReturnType rslt = internal::pload<PacketReturnType>(values);
return rslt;
} else if ((Layout == ColMajor && m_dim.actualDim() == NumInputDims - 1) ||
(Layout == RowMajor && m_dim.actualDim() == 0)) {
// m_stride is aways greater than index, so let's avoid the integer division.
eigen_assert(m_stride > index);
return m_impl.template packet<LoadMode>(index + m_inputOffset);
} else {
const Index idx = index / m_stride;
const Index rem = index - idx * m_stride;
if (rem + packetSize <= m_stride) {
Index inputIndex = idx * m_inputStride + m_inputOffset + rem;
return m_impl.template packet<LoadMode>(inputIndex);
} else {
// Cross the stride boundary. Fallback to slow path.
EIGEN_ALIGN_DEFAULT typename internal::remove_const<CoeffReturnType>::type values[packetSize];
for (int i = 0; i < packetSize; ++i) {
values[i] = coeff(index);
++index;
}
PacketReturnType rslt = internal::pload<PacketReturnType>(values);
return rslt;
}
}
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar* data() const { EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar* data() const {
Scalar* result = m_impl.data(); Scalar* result = m_impl.data();
if (DimId == NumDims && result) { if (m_dim.actualDim() == NumDims && result) {
return result + m_inputOffset; return result + m_inputOffset;
} else { } else {
return NULL; return NULL;
@ -170,11 +247,13 @@ struct TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const
{ {
Index inputIndex; Index inputIndex;
if (DimId == 0) { if ((Layout == ColMajor && m_dim.actualDim() == 0) ||
(Layout == RowMajor && m_dim.actualDim() == NumInputDims-1)) {
// m_stride is equal to 1, so let's avoid the integer division. // m_stride is equal to 1, so let's avoid the integer division.
eigen_assert(m_stride == 1); eigen_assert(m_stride == 1);
inputIndex = index * m_inputStride + m_inputOffset; inputIndex = index * m_inputStride + m_inputOffset;
} else if (DimId == NumInputDims-1) { } else if ((Layout == ColMajor && m_dim.actualDim() == NumInputDims-1) ||
(Layout == RowMajor && m_dim.actualDim() == 0)) {
// m_stride is aways greater than index, so let's avoid the integer division. // m_stride is aways greater than index, so let's avoid the integer division.
eigen_assert(m_stride > index); eigen_assert(m_stride > index);
inputIndex = index + m_inputOffset; inputIndex = index + m_inputOffset;
@ -192,12 +271,13 @@ struct TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device>
Index m_inputOffset; Index m_inputOffset;
Index m_inputStride; Index m_inputStride;
TensorEvaluator<ArgType, Device> m_impl; TensorEvaluator<ArgType, Device> m_impl;
const internal::DimensionId<DimId> m_dim;
const Device& m_device; const Device& m_device;
}; };
// Eval as lvalue // Eval as lvalue
template<std::size_t DimId, typename ArgType, typename Device> template<DenseIndex DimId, typename ArgType, typename Device>
struct TensorEvaluator<TensorChippingOp<DimId, ArgType>, Device> struct TensorEvaluator<TensorChippingOp<DimId, ArgType>, Device>
: public TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device> : public TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device>
{ {
@ -207,17 +287,17 @@ struct TensorEvaluator<TensorChippingOp<DimId, ArgType>, Device>
static const int NumDims = NumInputDims-1; static const int NumDims = NumInputDims-1;
typedef typename XprType::Index Index; typedef typename XprType::Index Index;
typedef DSizes<Index, NumDims> Dimensions; typedef DSizes<Index, NumDims> Dimensions;
typedef typename XprType::Scalar Scalar;
enum { enum {
IsAligned = false, IsAligned = false,
PacketAccess = false, PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
}; };
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: Base(op, device) : Base(op, device)
{ } { }
typedef typename XprType::Scalar Scalar;
typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename XprType::CoeffReturnType CoeffReturnType;
typedef typename XprType::PacketReturnType PacketReturnType; typedef typename XprType::PacketReturnType PacketReturnType;
@ -226,11 +306,45 @@ struct TensorEvaluator<TensorChippingOp<DimId, ArgType>, Device>
return this->m_impl.coeffRef(this->srcCoeff(index)); return this->m_impl.coeffRef(this->srcCoeff(index));
} }
/* to be done
template <int StoreMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE template <int StoreMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void writePacket(Index index, const PacketReturnType& x) void writePacket(Index index, const PacketReturnType& x)
{ {
} */ static const int packetSize = internal::unpacket_traits<PacketReturnType>::size;
EIGEN_STATIC_ASSERT(packetSize > 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
if ((this->Layout == ColMajor && this->m_dim.actualDim() == 0) ||
(this->Layout == RowMajor && this->m_dim.actualDim() == NumInputDims-1)) {
// m_stride is equal to 1, so let's avoid the integer division.
eigen_assert(this->m_stride == 1);
EIGEN_ALIGN_DEFAULT typename internal::remove_const<CoeffReturnType>::type values[packetSize];
internal::pstore<CoeffReturnType, PacketReturnType>(values, x);
Index inputIndex = index * this->m_inputStride + this->m_inputOffset;
for (int i = 0; i < packetSize; ++i) {
this->m_impl.coeffRef(inputIndex) = values[i];
inputIndex += this->m_inputStride;
}
} else if ((this->Layout == ColMajor && this->m_dim.actualDim() == NumInputDims-1) ||
(this->Layout == RowMajor && this->m_dim.actualDim() == 0)) {
// m_stride is aways greater than index, so let's avoid the integer division.
eigen_assert(this->m_stride > index);
this->m_impl.template writePacket<StoreMode>(index + this->m_inputOffset, x);
} else {
const Index idx = index / this->m_stride;
const Index rem = index - idx * this->m_stride;
if (rem + packetSize <= this->m_stride) {
const Index inputIndex = idx * this->m_inputStride + this->m_inputOffset + rem;
this->m_impl.template writePacket<StoreMode>(inputIndex, x);
} else {
// Cross stride boundary. Fallback to slow path.
EIGEN_ALIGN_DEFAULT typename internal::remove_const<CoeffReturnType>::type values[packetSize];
internal::pstore<CoeffReturnType, PacketReturnType>(values, x);
for (int i = 0; i < packetSize; ++i) {
this->coeffRef(index) = values[i];
++index;
}
}
}
}
}; };

View File

@ -35,6 +35,8 @@ struct traits<TensorConcatenationOp<Axis, LhsXprType, RhsXprType> >
typedef typename RhsXprType::Nested RhsNested; typedef typename RhsXprType::Nested RhsNested;
typedef typename remove_reference<LhsNested>::type _LhsNested; typedef typename remove_reference<LhsNested>::type _LhsNested;
typedef typename remove_reference<RhsNested>::type _RhsNested; typedef typename remove_reference<RhsNested>::type _RhsNested;
static const int NumDimensions = traits<LhsXprType>::NumDimensions;
static const int Layout = traits<LhsXprType>::Layout;
enum { Flags = 0 }; enum { Flags = 0 };
}; };
@ -103,11 +105,13 @@ struct TensorEvaluator<const TensorConcatenationOp<Axis, LeftArgType, RightArgTy
enum { enum {
IsAligned = false, IsAligned = false,
PacketAccess = TensorEvaluator<LeftArgType, Device>::PacketAccess & TensorEvaluator<RightArgType, Device>::PacketAccess, PacketAccess = TensorEvaluator<LeftArgType, Device>::PacketAccess & TensorEvaluator<RightArgType, Device>::PacketAccess,
Layout = TensorEvaluator<LeftArgType, Device>::Layout,
}; };
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: m_leftImpl(op.lhsExpression(), device), m_rightImpl(op.rhsExpression(), device), m_axis(op.axis()) : m_leftImpl(op.lhsExpression(), device), m_rightImpl(op.rhsExpression(), device), m_axis(op.axis())
{ {
EIGEN_STATIC_ASSERT((TensorEvaluator<LeftArgType, Device>::Layout == TensorEvaluator<RightArgType, Device>::Layout || NumDims == 1), YOU_MADE_A_PROGRAMMING_MISTAKE);
EIGEN_STATIC_ASSERT(NumDims == RightNumDims, YOU_MADE_A_PROGRAMMING_MISTAKE) EIGEN_STATIC_ASSERT(NumDims == RightNumDims, YOU_MADE_A_PROGRAMMING_MISTAKE)
eigen_assert(0 <= m_axis && m_axis < NumDims); eigen_assert(0 <= m_axis && m_axis < NumDims);
const Dimensions& lhs_dims = m_leftImpl.dimensions(); const Dimensions& lhs_dims = m_leftImpl.dimensions();
@ -127,14 +131,27 @@ struct TensorEvaluator<const TensorConcatenationOp<Axis, LeftArgType, RightArgTy
m_dimensions[i] = lhs_dims[i]; m_dimensions[i] = lhs_dims[i];
} }
if (Layout == ColMajor) {
m_leftStrides[0] = 1; m_leftStrides[0] = 1;
m_rightStrides[0] = 1; m_rightStrides[0] = 1;
m_outputStrides[0] = 1; m_outputStrides[0] = 1;
for (int i = 1; i < NumDims; ++i) { for (int i = 1; i < NumDims; ++i) {
m_leftStrides[i] = m_leftStrides[i-1] * lhs_dims[i-1]; m_leftStrides[i] = m_leftStrides[i-1] * lhs_dims[i-1];
m_rightStrides[i] = m_rightStrides[i-1] * rhs_dims[i-1]; m_rightStrides[i] = m_rightStrides[i-1] * rhs_dims[i-1];
m_outputStrides[i] = m_outputStrides[i-1] * m_dimensions[i-1]; m_outputStrides[i] = m_outputStrides[i-1] * m_dimensions[i-1];
} }
} else {
m_leftStrides[NumDims - 1] = 1;
m_rightStrides[NumDims - 1] = 1;
m_outputStrides[NumDims - 1] = 1;
for (int i = NumDims - 2; i >= 0; --i) {
m_leftStrides[i] = m_leftStrides[i+1] * lhs_dims[i+1];
m_rightStrides[i] = m_rightStrides[i+1] * rhs_dims[i+1];
m_outputStrides[i] = m_outputStrides[i+1] * m_dimensions[i+1];
}
}
} }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
@ -159,26 +176,50 @@ struct TensorEvaluator<const TensorConcatenationOp<Axis, LeftArgType, RightArgTy
{ {
// Collect dimension-wise indices (subs). // Collect dimension-wise indices (subs).
array<Index, NumDims> subs; array<Index, NumDims> subs;
if (Layout == ColMajor) {
for (int i = NumDims - 1; i > 0; --i) { for (int i = NumDims - 1; i > 0; --i) {
subs[i] = index / m_outputStrides[i]; subs[i] = index / m_outputStrides[i];
index -= subs[i] * m_outputStrides[i]; index -= subs[i] * m_outputStrides[i];
} }
subs[0] = index; subs[0] = index;
} else {
for (int i = 0; i < NumDims - 1; ++i) {
subs[i] = index / m_outputStrides[i];
index -= subs[i] * m_outputStrides[i];
}
subs[NumDims - 1] = index;
}
const Dimensions& left_dims = m_leftImpl.dimensions(); const Dimensions& left_dims = m_leftImpl.dimensions();
if (subs[m_axis] < left_dims[m_axis]) { if (subs[m_axis] < left_dims[m_axis]) {
Index left_index = subs[0]; Index left_index;
if (Layout == ColMajor) {
left_index = subs[0];
for (int i = 1; i < NumDims; ++i) { for (int i = 1; i < NumDims; ++i) {
left_index += (subs[i] % left_dims[i]) * m_leftStrides[i]; left_index += (subs[i] % left_dims[i]) * m_leftStrides[i];
} }
} else {
left_index = subs[NumDims - 1];
for (int i = NumDims - 2; i >= 0; --i) {
left_index += (subs[i] % left_dims[i]) * m_leftStrides[i];
}
}
return m_leftImpl.coeff(left_index); return m_leftImpl.coeff(left_index);
} else { } else {
subs[m_axis] -= left_dims[m_axis]; subs[m_axis] -= left_dims[m_axis];
const Dimensions& right_dims = m_rightImpl.dimensions(); const Dimensions& right_dims = m_rightImpl.dimensions();
Index right_index = subs[0]; Index right_index;
if (Layout == ColMajor) {
right_index = subs[0];
for (int i = 1; i < NumDims; ++i) { for (int i = 1; i < NumDims; ++i) {
right_index += (subs[i] % right_dims[i]) * m_rightStrides[i]; right_index += (subs[i] % right_dims[i]) * m_rightStrides[i];
} }
} else {
right_index = subs[NumDims - 1];
for (int i = NumDims - 2; i >= 0; --i) {
right_index += (subs[i] % right_dims[i]) * m_rightStrides[i];
}
}
return m_rightImpl.coeff(right_index); return m_rightImpl.coeff(right_index);
} }
} }

View File

@ -93,10 +93,10 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
typedef array<Index, RDims> right_dim_mapper_t; typedef array<Index, RDims> right_dim_mapper_t;
typedef array<Index, ContractDims> contract_t; typedef array<Index, ContractDims> contract_t;
typedef array<Index, max_n_1<LDims - ContractDims>::size> left_nocontract_t; typedef array<Index, internal::max_n_1<LDims - ContractDims>::size> left_nocontract_t;
typedef array<Index, max_n_1<RDims - ContractDims>::size> right_nocontract_t; typedef array<Index, internal::max_n_1<RDims - ContractDims>::size> right_nocontract_t;
static const int NumDims = max_n_1<LDims + RDims - 2 * ContractDims>::size; static const int NumDims = internal::max_n_1<LDims + RDims - 2 * ContractDims>::size;
typedef DSizes<Index, NumDims> Dimensions; typedef DSizes<Index, NumDims> Dimensions;

View File

@ -144,9 +144,9 @@ template<typename Dimensions, typename InputXprType, typename KernelXprType>
struct traits<TensorConvolutionOp<Dimensions, InputXprType, KernelXprType> > struct traits<TensorConvolutionOp<Dimensions, InputXprType, KernelXprType> >
{ {
// Type promotion to handle the case where the types of the lhs and the rhs are different. // Type promotion to handle the case where the types of the lhs and the rhs are different.
typedef typename internal::promote_storage_type<typename InputXprType::Scalar, typedef typename promote_storage_type<typename InputXprType::Scalar,
typename KernelXprType::Scalar>::ret Scalar; typename KernelXprType::Scalar>::ret Scalar;
typedef typename internal::packet_traits<Scalar>::type Packet; typedef typename packet_traits<Scalar>::type Packet;
typedef typename promote_storage_type<typename traits<InputXprType>::StorageKind, typedef typename promote_storage_type<typename traits<InputXprType>::StorageKind,
typename traits<KernelXprType>::StorageKind>::ret StorageKind; typename traits<KernelXprType>::StorageKind>::ret StorageKind;
typedef typename promote_index_type<typename traits<InputXprType>::Index, typedef typename promote_index_type<typename traits<InputXprType>::Index,
@ -155,6 +155,8 @@ struct traits<TensorConvolutionOp<Dimensions, InputXprType, KernelXprType> >
typedef typename KernelXprType::Nested RhsNested; typedef typename KernelXprType::Nested RhsNested;
typedef typename remove_reference<LhsNested>::type _LhsNested; typedef typename remove_reference<LhsNested>::type _LhsNested;
typedef typename remove_reference<RhsNested>::type _RhsNested; typedef typename remove_reference<RhsNested>::type _RhsNested;
static const int NumDimensions = traits<InputXprType>::NumDimensions;
static const int Layout = traits<InputXprType>::Layout;
enum { enum {
Flags = 0, Flags = 0,
@ -227,11 +229,17 @@ struct TensorEvaluator<const TensorConvolutionOp<Indices, InputArgType, KernelAr
enum { enum {
IsAligned = TensorEvaluator<InputArgType, Device>::IsAligned & TensorEvaluator<KernelArgType, Device>::IsAligned, IsAligned = TensorEvaluator<InputArgType, Device>::IsAligned & TensorEvaluator<KernelArgType, Device>::IsAligned,
PacketAccess = TensorEvaluator<InputArgType, Device>::PacketAccess & TensorEvaluator<KernelArgType, Device>::PacketAccess, PacketAccess = TensorEvaluator<InputArgType, Device>::PacketAccess & TensorEvaluator<KernelArgType, Device>::PacketAccess,
Layout = TensorEvaluator<InputArgType, Device>::Layout,
CoordAccess = false, // to be implemented
}; };
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: m_inputImpl(op.inputExpression(), device), m_kernelImpl(op.kernelExpression(), device), m_kernelArg(op.kernelExpression()), m_kernel(NULL), m_local_kernel(false), m_device(device) : m_inputImpl(op.inputExpression(), device), m_kernelImpl(op.kernelExpression(), device), m_kernelArg(op.kernelExpression()), m_kernel(NULL), m_local_kernel(false), m_device(device)
{ {
EIGEN_STATIC_ASSERT((TensorEvaluator<InputArgType, Device>::Layout == TensorEvaluator<KernelArgType, Device>::Layout), YOU_MADE_A_PROGRAMMING_MISTAKE);
// Only column major tensors are supported for now.
EIGEN_STATIC_ASSERT((Layout == ColMajor), YOU_MADE_A_PROGRAMMING_MISTAKE);
const typename TensorEvaluator<InputArgType, Device>::Dimensions& input_dims = m_inputImpl.dimensions(); const typename TensorEvaluator<InputArgType, Device>::Dimensions& input_dims = m_inputImpl.dimensions();
const typename TensorEvaluator<KernelArgType, Device>::Dimensions& kernel_dims = m_kernelImpl.dimensions(); const typename TensorEvaluator<KernelArgType, Device>::Dimensions& kernel_dims = m_kernelImpl.dimensions();
@ -389,10 +397,6 @@ struct TensorEvaluator<const TensorConvolutionOp<Indices, InputArgType, KernelAr
} }
} }
// No copy, no assignment
TensorEvaluator(const TensorEvaluator&);
TensorEvaluator& operator = (const TensorEvaluator&);
array<Index, NumDims> m_inputStride; array<Index, NumDims> m_inputStride;
array<Index, NumDims> m_outputStride; array<Index, NumDims> m_outputStride;
@ -421,7 +425,7 @@ struct GetKernelSize {
} }
}; };
template <> template <>
struct GetKernelSize<Eigen::Dynamic> { struct GetKernelSize<Dynamic> {
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int operator() (const int kernelSize) const { EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE int operator() (const int kernelSize) const {
return kernelSize; return kernelSize;
} }
@ -610,11 +614,17 @@ struct TensorEvaluator<const TensorConvolutionOp<Indices, InputArgType, KernelAr
enum { enum {
IsAligned = TensorEvaluator<InputArgType, GpuDevice>::IsAligned & TensorEvaluator<KernelArgType, GpuDevice>::IsAligned, IsAligned = TensorEvaluator<InputArgType, GpuDevice>::IsAligned & TensorEvaluator<KernelArgType, GpuDevice>::IsAligned,
PacketAccess = false, PacketAccess = false,
Layout = TensorEvaluator<InputArgType, GpuDevice>::Layout,
CoordAccess = false, // to be implemented
}; };
EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const GpuDevice& device) EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const GpuDevice& device)
: m_inputImpl(op.inputExpression(), device), m_kernelArg(op.kernelExpression()), m_kernelImpl(op.kernelExpression(), device), m_indices(op.indices()), m_buf(NULL), m_kernel(NULL), m_local_kernel(false), m_device(device) : m_inputImpl(op.inputExpression(), device), m_kernelArg(op.kernelExpression()), m_kernelImpl(op.kernelExpression(), device), m_indices(op.indices()), m_buf(NULL), m_kernel(NULL), m_local_kernel(false), m_device(device)
{ {
EIGEN_STATIC_ASSERT((TensorEvaluator<InputArgType, GpuDevice>::Layout == TensorEvaluator<KernelArgType, GpuDevice>::Layout), YOU_MADE_A_PROGRAMMING_MISTAKE);
// Only column major tensors are supported for now.
EIGEN_STATIC_ASSERT((Layout == ColMajor), YOU_MADE_A_PROGRAMMING_MISTAKE);
const typename TensorEvaluator<InputArgType, GpuDevice>::Dimensions& input_dims = m_inputImpl.dimensions(); const typename TensorEvaluator<InputArgType, GpuDevice>::Dimensions& input_dims = m_inputImpl.dimensions();
const typename TensorEvaluator<KernelArgType, GpuDevice>::Dimensions& kernel_dims = m_kernelImpl.dimensions(); const typename TensorEvaluator<KernelArgType, GpuDevice>::Dimensions& kernel_dims = m_kernelImpl.dimensions();
@ -740,19 +750,17 @@ struct TensorEvaluator<const TensorConvolutionOp<Indices, InputArgType, KernelAr
internal::IndexMapper<Index, InputDims, 1> indexMapper(m_inputImpl.dimensions(), kernel_dims, indices); internal::IndexMapper<Index, InputDims, 1> indexMapper(m_inputImpl.dimensions(), kernel_dims, indices);
switch(kernel_size) { switch(kernel_size) {
case 4: { case 4: {
EigenConvolutionKernel1D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, 4> <<<num_blocks, block_size, shared_mem, m_device.stream()>>>(m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, 4, data); LAUNCH_CUDA_KERNEL((EigenConvolutionKernel1D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, 4>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, 4, data);
break; break;
} }
case 7: { case 7: {
EigenConvolutionKernel1D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, 7> <<<num_blocks, block_size, shared_mem, m_device.stream()>>>(m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, 7, data); LAUNCH_CUDA_KERNEL((EigenConvolutionKernel1D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, 7>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, 7, data);
break; break;
} }
default: { default: {
EigenConvolutionKernel1D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, Eigen::Dynamic> <<<num_blocks, block_size, shared_mem, m_device.stream()>>>(m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, kernel_size, data); LAUNCH_CUDA_KERNEL((EigenConvolutionKernel1D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, Dynamic>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, kernel_size, data);
} }
} }
cudaError_t error = cudaGetLastError();
assert(error == cudaSuccess);
break; break;
} }
@ -797,11 +805,11 @@ struct TensorEvaluator<const TensorConvolutionOp<Indices, InputArgType, KernelAr
case 4: { case 4: {
switch (kernel_size_y) { switch (kernel_size_y) {
case 7: { case 7: {
EigenConvolutionKernel2D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, 4, 7> <<<num_blocks, block_size, shared_mem, m_device.stream()>>>(m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, 4, 7, data); LAUNCH_CUDA_KERNEL((EigenConvolutionKernel2D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, 4, 7>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, 4, 7, data);
break; break;
} }
default: { default: {
EigenConvolutionKernel2D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, 4, Eigen::Dynamic> <<<num_blocks, block_size, shared_mem, m_device.stream()>>>(m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, 4, kernel_size_y, data); LAUNCH_CUDA_KERNEL((EigenConvolutionKernel2D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, 4, Dynamic>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, 4, kernel_size_y, data);
break; break;
} }
} }
@ -810,23 +818,21 @@ struct TensorEvaluator<const TensorConvolutionOp<Indices, InputArgType, KernelAr
case 7: { case 7: {
switch (kernel_size_y) { switch (kernel_size_y) {
case 4: { case 4: {
EigenConvolutionKernel2D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, 7, 4> <<<num_blocks, block_size, shared_mem, m_device.stream()>>>(m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, 7, 4, data); LAUNCH_CUDA_KERNEL((EigenConvolutionKernel2D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, 7, 4>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, 7, 4, data);
break; break;
} }
default: { default: {
EigenConvolutionKernel2D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, 7, Eigen::Dynamic> <<<num_blocks, block_size, shared_mem, m_device.stream()>>>(m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, 7, kernel_size_y, data); LAUNCH_CUDA_KERNEL((EigenConvolutionKernel2D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, 7, Dynamic>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, 7, kernel_size_y, data);
break; break;
} }
} }
break; break;
} }
default: { default: {
EigenConvolutionKernel2D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, Eigen::Dynamic, Eigen::Dynamic> <<<num_blocks, block_size, shared_mem, m_device.stream()>>>(m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, kernel_size_x, kernel_size_y, data); LAUNCH_CUDA_KERNEL((EigenConvolutionKernel2D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims, Dynamic, Dynamic>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, kernel_size_x, kernel_size_y, data);
break; break;
} }
} }
cudaError_t error = cudaGetLastError();
assert(error == cudaSuccess);
break; break;
} }
@ -858,9 +864,7 @@ struct TensorEvaluator<const TensorConvolutionOp<Indices, InputArgType, KernelAr
const array<Index, 3> kernel_dims(m_kernelImpl.dimensions()[0], m_kernelImpl.dimensions()[1], m_kernelImpl.dimensions()[2]); const array<Index, 3> kernel_dims(m_kernelImpl.dimensions()[0], m_kernelImpl.dimensions()[1], m_kernelImpl.dimensions()[2]);
internal::IndexMapper<Index, InputDims, 3> indexMapper(m_inputImpl.dimensions(), kernel_dims, indices); internal::IndexMapper<Index, InputDims, 3> indexMapper(m_inputImpl.dimensions(), kernel_dims, indices);
EigenConvolutionKernel3D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims> <<<num_blocks, block_size, shared_mem, m_device.stream()>>>(m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, numZ, maxZ, kernel_size_x, kernel_size_y, kernel_size_z, data); LAUNCH_CUDA_KERNEL((EigenConvolutionKernel3D<TensorEvaluator<InputArgType, GpuDevice>, Index, InputDims>), num_blocks, block_size, shared_mem, m_device, m_inputImpl, indexMapper, m_kernel, numP, numX, maxX, numY, maxY, numZ, maxZ, kernel_size_x, kernel_size_y, kernel_size_z, data);
cudaError_t error = cudaGetLastError();
assert(error == cudaSuccess);
break; break;
} }

View File

@ -25,11 +25,14 @@ struct traits<TensorEvalToOp<XprType> >
{ {
// Type promotion to handle the case where the types of the lhs and the rhs are different. // Type promotion to handle the case where the types of the lhs and the rhs are different.
typedef typename XprType::Scalar Scalar; typedef typename XprType::Scalar Scalar;
typedef typename internal::packet_traits<Scalar>::type Packet; typedef traits<XprType> XprTraits;
typedef typename traits<XprType>::StorageKind StorageKind; typedef typename packet_traits<Scalar>::type Packet;
typedef typename traits<XprType>::Index Index; typedef typename XprTraits::StorageKind StorageKind;
typedef typename XprTraits::Index Index;
typedef typename XprType::Nested Nested; typedef typename XprType::Nested Nested;
typedef typename remove_reference<Nested>::type _Nested; typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions;
static const int Layout = XprTraits::Layout;
enum { enum {
Flags = 0, Flags = 0,
@ -60,24 +63,24 @@ class TensorEvalToOp : public TensorBase<TensorEvalToOp<XprType> >
typedef typename Eigen::internal::traits<TensorEvalToOp>::Scalar Scalar; typedef typename Eigen::internal::traits<TensorEvalToOp>::Scalar Scalar;
typedef typename Eigen::internal::traits<TensorEvalToOp>::Packet Packet; typedef typename Eigen::internal::traits<TensorEvalToOp>::Packet Packet;
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType;
typedef typename XprType::PacketReturnType PacketReturnType; typedef typename internal::remove_const<typename XprType::PacketReturnType>::type PacketReturnType;
typedef typename Eigen::internal::nested<TensorEvalToOp>::type Nested; typedef typename Eigen::internal::nested<TensorEvalToOp>::type Nested;
typedef typename Eigen::internal::traits<TensorEvalToOp>::StorageKind StorageKind; typedef typename Eigen::internal::traits<TensorEvalToOp>::StorageKind StorageKind;
typedef typename Eigen::internal::traits<TensorEvalToOp>::Index Index; typedef typename Eigen::internal::traits<TensorEvalToOp>::Index Index;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvalToOp(Scalar* buffer, const XprType& expr) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvalToOp(CoeffReturnType* buffer, const XprType& expr)
: m_xpr(expr), m_buffer(buffer) {} : m_xpr(expr), m_buffer(buffer) {}
EIGEN_DEVICE_FUNC EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename XprType::Nested>::type& const typename internal::remove_all<typename XprType::Nested>::type&
expression() const { return m_xpr; } expression() const { return m_xpr; }
EIGEN_DEVICE_FUNC Scalar* buffer() const { return m_buffer; } EIGEN_DEVICE_FUNC CoeffReturnType* buffer() const { return m_buffer; }
protected: protected:
typename XprType::Nested m_xpr; typename XprType::Nested m_xpr;
Scalar* m_buffer; CoeffReturnType* m_buffer;
}; };
@ -93,6 +96,8 @@ struct TensorEvaluator<const TensorEvalToOp<ArgType>, Device>
enum { enum {
IsAligned = true, IsAligned = true,
PacketAccess = true, PacketAccess = true,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false, // to be implemented
}; };
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
@ -103,12 +108,12 @@ struct TensorEvaluator<const TensorEvalToOp<ArgType>, Device>
} }
typedef typename XprType::Index Index; typedef typename XprType::Index Index;
typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType;
typedef typename XprType::PacketReturnType PacketReturnType; typedef typename internal::remove_const<typename XprType::PacketReturnType>::type PacketReturnType;
EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_impl.dimensions(); } EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_impl.dimensions(); }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar*) { EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType*) {
m_impl.evalSubExprsIfNeeded(NULL); m_impl.evalSubExprsIfNeeded(NULL);
return true; return true;
} }
@ -117,7 +122,7 @@ struct TensorEvaluator<const TensorEvalToOp<ArgType>, Device>
m_buffer[i] = m_impl.coeff(i); m_buffer[i] = m_impl.coeff(i);
} }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalPacket(Index i) { EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void evalPacket(Index i) {
internal::pstoret<Scalar, Packet, Aligned>(m_buffer + i, m_impl.template packet<TensorEvaluator<ArgType, Device>::IsAligned ? Aligned : Unaligned>(i)); internal::pstoret<CoeffReturnType, PacketReturnType, Aligned>(m_buffer + i, m_impl.template packet<TensorEvaluator<ArgType, Device>::IsAligned ? Aligned : Unaligned>(i));
} }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
@ -135,12 +140,12 @@ struct TensorEvaluator<const TensorEvalToOp<ArgType>, Device>
return internal::ploadt<Packet, LoadMode>(m_buffer + index); return internal::ploadt<Packet, LoadMode>(m_buffer + index);
} }
Scalar* data() const { return NULL; } CoeffReturnType* data() const { return NULL; }
private: private:
TensorEvaluator<ArgType, Device> m_impl; TensorEvaluator<ArgType, Device> m_impl;
const Device& m_device; const Device& m_device;
Scalar* m_buffer; CoeffReturnType* m_buffer;
}; };

View File

@ -25,11 +25,14 @@ struct traits<TensorForcedEvalOp<XprType> >
{ {
// Type promotion to handle the case where the types of the lhs and the rhs are different. // Type promotion to handle the case where the types of the lhs and the rhs are different.
typedef typename XprType::Scalar Scalar; typedef typename XprType::Scalar Scalar;
typedef typename internal::packet_traits<Scalar>::type Packet; typedef traits<XprType> XprTraits;
typedef typename packet_traits<Scalar>::type Packet;
typedef typename traits<XprType>::StorageKind StorageKind; typedef typename traits<XprType>::StorageKind StorageKind;
typedef typename traits<XprType>::Index Index; typedef typename traits<XprType>::Index Index;
typedef typename XprType::Nested Nested; typedef typename XprType::Nested Nested;
typedef typename remove_reference<Nested>::type _Nested; typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions;
static const int Layout = XprTraits::Layout;
enum { enum {
Flags = 0, Flags = 0,
@ -59,8 +62,8 @@ class TensorForcedEvalOp : public TensorBase<TensorForcedEvalOp<XprType> >
typedef typename Eigen::internal::traits<TensorForcedEvalOp>::Scalar Scalar; typedef typename Eigen::internal::traits<TensorForcedEvalOp>::Scalar Scalar;
typedef typename Eigen::internal::traits<TensorForcedEvalOp>::Packet Packet; typedef typename Eigen::internal::traits<TensorForcedEvalOp>::Packet Packet;
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType;
typedef typename XprType::PacketReturnType PacketReturnType; typedef typename internal::remove_const<typename XprType::PacketReturnType>::type PacketReturnType;
typedef typename Eigen::internal::nested<TensorForcedEvalOp>::type Nested; typedef typename Eigen::internal::nested<TensorForcedEvalOp>::type Nested;
typedef typename Eigen::internal::traits<TensorForcedEvalOp>::StorageKind StorageKind; typedef typename Eigen::internal::traits<TensorForcedEvalOp>::StorageKind StorageKind;
typedef typename Eigen::internal::traits<TensorForcedEvalOp>::Index Index; typedef typename Eigen::internal::traits<TensorForcedEvalOp>::Index Index;
@ -88,6 +91,7 @@ struct TensorEvaluator<const TensorForcedEvalOp<ArgType>, Device>
enum { enum {
IsAligned = true, IsAligned = true,
PacketAccess = (internal::packet_traits<Scalar>::size > 1), PacketAccess = (internal::packet_traits<Scalar>::size > 1),
Layout = TensorEvaluator<ArgType, Device>::Layout,
}; };
EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device) EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device)
@ -100,10 +104,16 @@ struct TensorEvaluator<const TensorForcedEvalOp<ArgType>, Device>
EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_impl.dimensions(); } EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_impl.dimensions(); }
EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar*) { EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType*) {
m_impl.evalSubExprsIfNeeded(NULL); m_impl.evalSubExprsIfNeeded(NULL);
m_buffer = (Scalar*)m_device.allocate(m_impl.dimensions().TotalSize() * sizeof(Scalar)); const Index numValues = m_impl.dimensions().TotalSize();
m_buffer = (CoeffReturnType*)m_device.allocate(numValues * sizeof(CoeffReturnType));
// Should initialize the memory in case we're dealing with non POD types.
if (!internal::is_arithmetic<CoeffReturnType>::value) {
for (Index i = 0; i < numValues; ++i) {
new(m_buffer+i) CoeffReturnType();
}
}
typedef TensorEvalToOp<const ArgType> EvalTo; typedef TensorEvalToOp<const ArgType> EvalTo;
EvalTo evalToTmp(m_buffer, m_op); EvalTo evalToTmp(m_buffer, m_op);
internal::TensorExecutor<const EvalTo, Device, TensorEvaluator<ArgType, Device>::PacketAccess>::run(evalToTmp, m_device); internal::TensorExecutor<const EvalTo, Device, TensorEvaluator<ArgType, Device>::PacketAccess>::run(evalToTmp, m_device);
@ -132,7 +142,7 @@ struct TensorEvaluator<const TensorForcedEvalOp<ArgType>, Device>
TensorEvaluator<ArgType, Device> m_impl; TensorEvaluator<ArgType, Device> m_impl;
const ArgType m_op; const ArgType m_op;
const Device& m_device; const Device& m_device;
Scalar* m_buffer; CoeffReturnType* m_buffer;
}; };

View File

@ -29,9 +29,11 @@ template<typename Dimensions, typename InputXprType, typename KernelXprType> cla
template<typename PatchDim, typename XprType> class TensorPatchOp; template<typename PatchDim, typename XprType> class TensorPatchOp;
template<DenseIndex Rows, DenseIndex Cols, typename XprType> class TensorImagePatchOp; template<DenseIndex Rows, DenseIndex Cols, typename XprType> class TensorImagePatchOp;
template<typename Broadcast, typename XprType> class TensorBroadcastingOp; template<typename Broadcast, typename XprType> class TensorBroadcastingOp;
template<std::size_t DimId, typename XprType> class TensorChippingOp; template<DenseIndex DimId, typename XprType> class TensorChippingOp;
template<typename NewDimensions, typename XprType> class TensorReshapingOp; template<typename NewDimensions, typename XprType> class TensorReshapingOp;
template<typename XprType> class TensorLayoutSwapOp;
template<typename StartIndices, typename Sizes, typename XprType> class TensorSlicingOp; template<typename StartIndices, typename Sizes, typename XprType> class TensorSlicingOp;
template<typename ReverseDimensions, typename XprType> class TensorReverseOp;
template<typename PaddingDimensions, typename XprType> class TensorPaddingOp; template<typename PaddingDimensions, typename XprType> class TensorPaddingOp;
template<typename Shuffle, typename XprType> class TensorShufflingOp; template<typename Shuffle, typename XprType> class TensorShufflingOp;
template<typename Strides, typename XprType> class TensorStridingOp; template<typename Strides, typename XprType> class TensorStridingOp;

View File

@ -37,6 +37,8 @@ struct traits<TensorImagePatchOp<Rows, Cols, XprType> > : public traits<XprType>
typedef typename XprTraits::Index Index; typedef typename XprTraits::Index Index;
typedef typename XprType::Nested Nested; typedef typename XprType::Nested Nested;
typedef typename remove_reference<Nested>::type _Nested; typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions + 1;
static const int Layout = XprTraits::Layout;
}; };
template<DenseIndex Rows, DenseIndex Cols, typename XprType> template<DenseIndex Rows, DenseIndex Cols, typename XprType>
@ -53,8 +55,6 @@ struct nested<TensorImagePatchOp<Rows, Cols, XprType>, 1, typename eval<TensorIm
} // end namespace internal } // end namespace internal
template<DenseIndex Rows, DenseIndex Cols, typename XprType> template<DenseIndex Rows, DenseIndex Cols, typename XprType>
class TensorImagePatchOp : public TensorBase<TensorImagePatchOp<Rows, Cols, XprType>, ReadOnlyAccessors> class TensorImagePatchOp : public TensorBase<TensorImagePatchOp<Rows, Cols, XprType>, ReadOnlyAccessors>
{ {
@ -69,9 +69,11 @@ class TensorImagePatchOp : public TensorBase<TensorImagePatchOp<Rows, Cols, XprT
typedef typename Eigen::internal::traits<TensorImagePatchOp>::Index Index; typedef typename Eigen::internal::traits<TensorImagePatchOp>::Index Index;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorImagePatchOp(const XprType& expr, DenseIndex patch_rows, DenseIndex patch_cols, EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorImagePatchOp(const XprType& expr, DenseIndex patch_rows, DenseIndex patch_cols,
DenseIndex row_strides, DenseIndex col_strides) DenseIndex row_strides, DenseIndex col_strides,
PaddingType padding_type)
: m_xpr(expr), m_patch_rows(patch_rows), m_patch_cols(patch_cols), : m_xpr(expr), m_patch_rows(patch_rows), m_patch_cols(patch_cols),
m_row_strides(row_strides), m_col_strides(col_strides){} m_row_strides(row_strides), m_col_strides(col_strides),
m_padding_type(padding_type) {}
EIGEN_DEVICE_FUNC EIGEN_DEVICE_FUNC
DenseIndex patch_rows() const { return m_patch_rows; } DenseIndex patch_rows() const { return m_patch_rows; }
@ -81,6 +83,8 @@ class TensorImagePatchOp : public TensorBase<TensorImagePatchOp<Rows, Cols, XprT
DenseIndex row_strides() const { return m_row_strides; } DenseIndex row_strides() const { return m_row_strides; }
EIGEN_DEVICE_FUNC EIGEN_DEVICE_FUNC
DenseIndex col_strides() const { return m_col_strides; } DenseIndex col_strides() const { return m_col_strides; }
EIGEN_DEVICE_FUNC
PaddingType padding_type() const { return m_padding_type; }
EIGEN_DEVICE_FUNC EIGEN_DEVICE_FUNC
const typename internal::remove_all<typename XprType::Nested>::type& const typename internal::remove_all<typename XprType::Nested>::type&
@ -92,6 +96,7 @@ class TensorImagePatchOp : public TensorBase<TensorImagePatchOp<Rows, Cols, XprT
const DenseIndex m_patch_cols; const DenseIndex m_patch_cols;
const DenseIndex m_row_strides; const DenseIndex m_row_strides;
const DenseIndex m_col_strides; const DenseIndex m_col_strides;
const PaddingType m_padding_type;
}; };
@ -108,41 +113,79 @@ struct TensorEvaluator<const TensorImagePatchOp<Rows, Cols, ArgType>, Device>
enum { enum {
IsAligned = false, IsAligned = false,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess, PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = NumDims == 5,
}; };
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: m_impl(op.expression(), device) : m_impl(op.expression(), device)
{ {
// Only column major tensors are supported for now.
EIGEN_STATIC_ASSERT((Layout == ColMajor), YOU_MADE_A_PROGRAMMING_MISTAKE);
EIGEN_STATIC_ASSERT(NumDims >= 4, YOU_MADE_A_PROGRAMMING_MISTAKE); EIGEN_STATIC_ASSERT(NumDims >= 4, YOU_MADE_A_PROGRAMMING_MISTAKE);
const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions(); const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
// Caches a few variables.
m_inputRows = input_dims[1];
m_inputCols = input_dims[2];
m_row_strides = op.row_strides();
m_col_strides = op.col_strides();
// We only support same strides for both dimensions and square patches.
eigen_assert(m_row_strides == m_col_strides);
switch (op.padding_type()) {
case PADDING_VALID:
m_outputRows = ceil((m_inputRows - op.patch_rows() + 1.f) / static_cast<float>(m_row_strides));
m_outputCols = ceil((m_inputCols - op.patch_cols() + 1.f) / static_cast<float>(m_col_strides));
// Calculate the padding
m_rowPaddingTop = ((m_outputRows - 1) * m_row_strides + op.patch_rows() - m_inputRows) / 2;
m_colPaddingLeft = ((m_outputCols - 1) * m_col_strides + op.patch_cols() - m_inputCols) / 2;
break;
case PADDING_SAME:
m_outputRows = ceil(m_inputRows / static_cast<float>(m_row_strides));
m_outputCols = ceil(m_inputCols / static_cast<float>(m_col_strides));
// Calculate the padding
m_rowPaddingTop = ((m_outputRows - 1) * m_row_strides + op.patch_rows() - m_inputRows) / 2;
m_colPaddingLeft = ((m_outputCols - 1) * m_col_strides + op.patch_cols() - m_inputCols) / 2;
break;
default:
eigen_assert(false && "unexpected padding");
}
// Dimensions for result of extraction.
// 0: depth
// 1: patch_rows
// 2: patch_cols
// 3: number of patches
// 4 and beyond: anything else (such as batch).
m_dimensions[0] = input_dims[0]; m_dimensions[0] = input_dims[0];
m_dimensions[1] = op.patch_rows(); m_dimensions[1] = op.patch_rows();
m_dimensions[2] = op.patch_cols(); m_dimensions[2] = op.patch_cols();
m_dimensions[3] = ceilf(static_cast<float>(input_dims[1]) / op.row_strides()) * m_dimensions[3] = m_outputRows * m_outputCols;
ceilf(static_cast<float>(input_dims[2]) / op.col_strides());
for (int i = 4; i < NumDims; ++i) { for (int i = 4; i < NumDims; ++i) {
m_dimensions[i] = input_dims[i-1]; m_dimensions[i] = input_dims[i-1];
} }
// Strides for moving the patch in various dimensions.
m_colStride = m_dimensions[1]; m_colStride = m_dimensions[1];
m_patchStride = m_colStride * m_dimensions[2] * m_dimensions[0]; m_patchStride = m_colStride * m_dimensions[2] * m_dimensions[0];
m_otherStride = m_patchStride * m_dimensions[3]; m_otherStride = m_patchStride * m_dimensions[3];
m_inputRows = input_dims[1]; // Strides for navigating through the input tensor.
m_inputCols = input_dims[2]; m_rowInputStride = input_dims[0];
m_colInputStride = input_dims[0] * input_dims[1];
m_rowInputStride = input_dims[0] * op.row_strides();
m_colInputStride = input_dims[0] * input_dims[1] * op.col_strides();
m_patchInputStride = input_dims[0] * input_dims[1] * input_dims[2]; m_patchInputStride = input_dims[0] * input_dims[1] * input_dims[2];
m_rowPaddingTop = op.patch_rows() / 2; // Fast representations of different variables.
m_colPaddingLeft = op.patch_cols() / 2;
m_fastOtherStride = internal::TensorIntDivisor<Index>(m_otherStride); m_fastOtherStride = internal::TensorIntDivisor<Index>(m_otherStride);
m_fastPatchStride = internal::TensorIntDivisor<Index>(m_patchStride); m_fastPatchStride = internal::TensorIntDivisor<Index>(m_patchStride);
m_fastColStride = internal::TensorIntDivisor<Index>(m_colStride); m_fastColStride = internal::TensorIntDivisor<Index>(m_colStride);
m_fastInputRows = internal::TensorIntDivisor<Index>(m_inputRows); // Number of patches in the width dimension.
m_fastOutputRows = internal::TensorIntDivisor<Index>(m_outputRows);
m_fastDimZero = internal::TensorIntDivisor<Index>(m_dimensions[0]); m_fastDimZero = internal::TensorIntDivisor<Index>(m_dimensions[0]);
} }
@ -162,26 +205,29 @@ struct TensorEvaluator<const TensorImagePatchOp<Rows, Cols, ArgType>, Device>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
{ {
// Find the location of the first element of the patch. // Patch index corresponding to the passed in index.
const Index patchIndex = index / m_fastPatchStride; const Index patchIndex = index / m_fastPatchStride;
// Find the offset of the element wrt the location of the first element. // Find the offset of the element wrt the location of the first element.
const Index patchOffset = (index - patchIndex * m_patchStride) / m_fastDimZero; const Index patchOffset = (index - patchIndex * m_patchStride) / m_fastDimZero;
// Other ways to index this element.
const Index otherIndex = (NumDims == 4) ? 0 : index / m_fastOtherStride; const Index otherIndex = (NumDims == 4) ? 0 : index / m_fastOtherStride;
const Index patch2DIndex = (NumDims == 4) ? patchIndex : (index - otherIndex * m_otherStride) / m_fastPatchStride; const Index patch2DIndex = (NumDims == 4) ? patchIndex : (index - otherIndex * m_otherStride) / m_fastPatchStride;
const Index colIndex = patch2DIndex / m_fastInputRows; const Index colIndex = patch2DIndex / m_fastOutputRows;
const Index colOffset = patchOffset / m_fastColStride; const Index colOffset = patchOffset / m_fastColStride;
const Index inputCol = colIndex + colOffset - m_colPaddingLeft; // Calculate col index in the input original tensor.
const Index inputCol = colIndex * m_col_strides + colOffset - m_colPaddingLeft;
if (inputCol < 0 || inputCol >= m_inputCols) { if (inputCol < 0 || inputCol >= m_inputCols) {
return Scalar(0); return Scalar(0);
} }
const Index rowIndex = patch2DIndex - colIndex * m_inputRows; // m_rowStride is always 1 const Index rowIndex = patch2DIndex - colIndex * m_outputRows;
const Index rowOffset = patchOffset - colOffset * m_colStride; const Index rowOffset = patchOffset - colOffset * m_colStride;
const Index inputRow = rowIndex + rowOffset - m_rowPaddingTop; // Calculate row index in the original input tensor.
const Index inputRow = rowIndex * m_row_strides + rowOffset - m_rowPaddingTop;
if (inputRow < 0 || inputRow >= m_inputRows) { if (inputRow < 0 || inputRow >= m_inputRows) {
return Scalar(0); return Scalar(0);
} }
@ -214,20 +260,24 @@ struct TensorEvaluator<const TensorImagePatchOp<Rows, Cols, ArgType>, Device>
const Index patch2DIndex = (NumDims == 4) ? patchIndex : (indices[0] - otherIndex * m_otherStride) / m_fastPatchStride; const Index patch2DIndex = (NumDims == 4) ? patchIndex : (indices[0] - otherIndex * m_otherStride) / m_fastPatchStride;
eigen_assert(patch2DIndex == (indices[1] - otherIndex * m_otherStride) / m_fastPatchStride); eigen_assert(patch2DIndex == (indices[1] - otherIndex * m_otherStride) / m_fastPatchStride);
const Index colIndex = patch2DIndex / m_fastInputRows; const Index colIndex = patch2DIndex / m_fastOutputRows;
const Index colOffsets[2] = {patchOffsets[0] / m_fastColStride, patchOffsets[1] / m_fastColStride}; const Index colOffsets[2] = {patchOffsets[0] / m_fastColStride, patchOffsets[1] / m_fastColStride};
const Index inputCols[2] = {colIndex + colOffsets[0] - m_colPaddingLeft, colIndex + colOffsets[1] - m_colPaddingLeft}; // Calculate col indices in the original input tensor.
const Index inputCols[2] = {colIndex * m_col_strides + colOffsets[0] -
m_colPaddingLeft, colIndex * m_col_strides + colOffsets[1] - m_colPaddingLeft};
if (inputCols[1] < 0 || inputCols[0] >= m_inputCols) { if (inputCols[1] < 0 || inputCols[0] >= m_inputCols) {
// all zeros // all zeros
return internal::pset1<PacketReturnType>(Scalar(0)); return internal::pset1<PacketReturnType>(Scalar(0));
} }
if (inputCols[0] == inputCols[1]) { if (inputCols[0] == inputCols[1]) {
const Index rowIndex = patch2DIndex - colIndex * m_inputRows; const Index rowIndex = patch2DIndex - colIndex * m_outputRows;
const Index rowOffsets[2] = {patchOffsets[0] - colOffsets[0]*m_colStride, patchOffsets[1] - colOffsets[1]*m_colStride}; const Index rowOffsets[2] = {patchOffsets[0] - colOffsets[0]*m_colStride, patchOffsets[1] - colOffsets[1]*m_colStride};
eigen_assert(rowOffsets[0] <= rowOffsets[1]); eigen_assert(rowOffsets[0] <= rowOffsets[1]);
const Index inputRows[2] = {rowIndex + rowOffsets[0] - m_rowPaddingTop, rowIndex + rowOffsets[1] - m_rowPaddingTop}; // Calculate col indices in the original input tensor.
const Index inputRows[2] = {rowIndex * m_row_strides + rowOffsets[0] -
m_rowPaddingTop, rowIndex * m_row_strides + rowOffsets[1] - m_rowPaddingTop};
if (inputRows[1] < 0 || inputRows[0] >= m_inputRows) { if (inputRows[1] < 0 || inputRows[0] >= m_inputRows) {
// all zeros // all zeros
@ -247,6 +297,43 @@ struct TensorEvaluator<const TensorImagePatchOp<Rows, Cols, ArgType>, Device>
Scalar* data() const { return NULL; } Scalar* data() const { return NULL; }
const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
Index rowPaddingTop() const { return m_rowPaddingTop; }
Index colPaddingLeft() const { return m_colPaddingLeft; }
Index outputRows() const { return m_outputRows; }
Index outputCols() const { return m_outputCols; }
Index userRowStride() const { return m_row_strides; }
Index userColStride() const { return m_col_strides; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(const array<Index, NumDims>& coords) const
{
// Location of the first element of the patch.
// 0: d, 1: patch_rows, 2: patch_cols, 3: number of patches, 4: number of batches
const Index patchIndex = coords[3];
array<Index, NumDims-1> inputCoords;
inputCoords[0] = coords[0]; // depth
inputCoords[1] = patchIndex / m_inputCols + coords[1] - m_rowPaddingTop;
inputCoords[2] = patchIndex - patchIndex / m_inputCols * m_inputCols + coords[2] - m_colPaddingLeft;
inputCoords[3] = coords[4]; // batch
// If the computed coordinates are outside the original image perimeter, return 0.
if (inputCoords[1] < 0 || inputCoords[1] >= m_inputRows ||
inputCoords[2] < 0 || inputCoords[2] >= m_inputCols) {
return Scalar(0);
}
if (TensorEvaluator<ArgType, Device>::CoordAccess) {
return m_impl.coeff(inputCoords);
} else {
Index inputIndex =
inputCoords[3] * m_patchInputStride +
inputCoords[2] * m_colInputStride +
inputCoords[1] * m_rowInputStride +
inputCoords[0];
return m_impl.coeff(inputIndex);
}
}
protected: protected:
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetWithPossibleZero(Index index) const EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetWithPossibleZero(Index index) const
{ {
@ -264,6 +351,8 @@ struct TensorEvaluator<const TensorImagePatchOp<Rows, Cols, ArgType>, Device>
Index m_otherStride; Index m_otherStride;
Index m_patchStride; Index m_patchStride;
Index m_colStride; Index m_colStride;
Index m_row_strides;
Index m_col_strides;
internal::TensorIntDivisor<Index> m_fastOtherStride; internal::TensorIntDivisor<Index> m_fastOtherStride;
internal::TensorIntDivisor<Index> m_fastPatchStride; internal::TensorIntDivisor<Index> m_fastPatchStride;
internal::TensorIntDivisor<Index> m_fastColStride; internal::TensorIntDivisor<Index> m_fastColStride;
@ -275,10 +364,13 @@ struct TensorEvaluator<const TensorImagePatchOp<Rows, Cols, ArgType>, Device>
Index m_inputRows; Index m_inputRows;
Index m_inputCols; Index m_inputCols;
Index m_outputRows;
Index m_outputCols;
Index m_rowPaddingTop; Index m_rowPaddingTop;
Index m_colPaddingLeft; Index m_colPaddingLeft;
internal::TensorIntDivisor<Index> m_fastInputRows; internal::TensorIntDivisor<Index> m_fastOutputRows;
internal::TensorIntDivisor<Index> m_fastDimZero; internal::TensorIntDivisor<Index> m_fastDimZero;
TensorEvaluator<ArgType, Device> m_impl; TensorEvaluator<ArgType, Device> m_impl;

View File

@ -24,11 +24,14 @@ template<typename NewDimensions, typename XprType>
struct traits<TensorReshapingOp<NewDimensions, XprType> > : public traits<XprType> struct traits<TensorReshapingOp<NewDimensions, XprType> > : public traits<XprType>
{ {
typedef typename XprType::Scalar Scalar; typedef typename XprType::Scalar Scalar;
typedef typename internal::packet_traits<Scalar>::type Packet; typedef traits<XprType> XprTraits;
typedef typename traits<XprType>::StorageKind StorageKind; typedef typename packet_traits<Scalar>::type Packet;
typedef typename traits<XprType>::Index Index; typedef typename XprTraits::StorageKind StorageKind;
typedef typename XprTraits::Index Index;
typedef typename XprType::Nested Nested; typedef typename XprType::Nested Nested;
typedef typename remove_reference<Nested>::type _Nested; typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = array_size<NewDimensions>::value;
static const int Layout = XprTraits::Layout;
}; };
template<typename NewDimensions, typename XprType> template<typename NewDimensions, typename XprType>
@ -54,8 +57,8 @@ class TensorReshapingOp : public TensorBase<TensorReshapingOp<NewDimensions, Xpr
typedef typename Eigen::internal::traits<TensorReshapingOp>::Scalar Scalar; typedef typename Eigen::internal::traits<TensorReshapingOp>::Scalar Scalar;
typedef typename Eigen::internal::traits<TensorReshapingOp>::Packet Packet; typedef typename Eigen::internal::traits<TensorReshapingOp>::Packet Packet;
typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
typedef typename XprType::CoeffReturnType CoeffReturnType; typedef typename internal::remove_const<typename XprType::CoeffReturnType>::type CoeffReturnType;
typedef typename XprType::PacketReturnType PacketReturnType; typedef typename internal::remove_const<typename XprType::PacketReturnType>::type PacketReturnType;
typedef typename Eigen::internal::nested<TensorReshapingOp>::type Nested; typedef typename Eigen::internal::nested<TensorReshapingOp>::type Nested;
typedef typename Eigen::internal::traits<TensorReshapingOp>::StorageKind StorageKind; typedef typename Eigen::internal::traits<TensorReshapingOp>::StorageKind StorageKind;
typedef typename Eigen::internal::traits<TensorReshapingOp>::Index Index; typedef typename Eigen::internal::traits<TensorReshapingOp>::Index Index;
@ -96,11 +99,17 @@ struct TensorEvaluator<const TensorReshapingOp<NewDimensions, ArgType>, Device>
enum { enum {
IsAligned = TensorEvaluator<ArgType, Device>::IsAligned, IsAligned = TensorEvaluator<ArgType, Device>::IsAligned,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess, PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false, // to be implemented
}; };
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: m_impl(op.expression(), device), m_dimensions(op.dimensions()) : m_impl(op.expression(), device), m_dimensions(op.dimensions())
{ } {
// The total size of the reshaped tensor must be equal to the total size
// of the input tensor.
eigen_assert(internal::array_prod(m_impl.dimensions()) == internal::array_prod(op.dimensions()));
}
typedef typename XprType::Index Index; typedef typename XprType::Index Index;
typedef typename XprType::Scalar Scalar; typedef typename XprType::Scalar Scalar;
@ -109,7 +118,7 @@ struct TensorEvaluator<const TensorReshapingOp<NewDimensions, ArgType>, Device>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* data) { EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* data) {
return m_impl.evalSubExprsIfNeeded(data); return m_impl.evalSubExprsIfNeeded(data);
} }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() { EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void cleanup() {
@ -127,7 +136,9 @@ struct TensorEvaluator<const TensorReshapingOp<NewDimensions, ArgType>, Device>
return m_impl.template packet<LoadMode>(index); return m_impl.template packet<LoadMode>(index);
} }
Scalar* data() const { return m_impl.data(); } CoeffReturnType* data() const { return m_impl.data(); }
const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
protected: protected:
TensorEvaluator<ArgType, Device> m_impl; TensorEvaluator<ArgType, Device> m_impl;
@ -148,6 +159,8 @@ template<typename NewDimensions, typename ArgType, typename Device>
enum { enum {
IsAligned = TensorEvaluator<ArgType, Device>::IsAligned, IsAligned = TensorEvaluator<ArgType, Device>::IsAligned,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess, PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false, // to be implemented
}; };
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
@ -183,11 +196,14 @@ template<typename StartIndices, typename Sizes, typename XprType>
struct traits<TensorSlicingOp<StartIndices, Sizes, XprType> > : public traits<XprType> struct traits<TensorSlicingOp<StartIndices, Sizes, XprType> > : public traits<XprType>
{ {
typedef typename XprType::Scalar Scalar; typedef typename XprType::Scalar Scalar;
typedef typename internal::packet_traits<Scalar>::type Packet; typedef traits<XprType> XprTraits;
typedef typename traits<XprType>::StorageKind StorageKind; typedef typename packet_traits<Scalar>::type Packet;
typedef typename traits<XprType>::Index Index; typedef typename XprTraits::StorageKind StorageKind;
typedef typename XprTraits::Index Index;
typedef typename XprType::Nested Nested; typedef typename XprType::Nested Nested;
typedef typename remove_reference<Nested>::type _Nested; typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = array_size<StartIndices>::value;
static const int Layout = XprTraits::Layout;
}; };
template<typename StartIndices, typename Sizes, typename XprType> template<typename StartIndices, typename Sizes, typename XprType>
@ -260,6 +276,8 @@ struct TensorEvaluator<const TensorSlicingOp<StartIndices, Sizes, ArgType>, Devi
// slice offsets and sizes. // slice offsets and sizes.
IsAligned = /*TensorEvaluator<ArgType, Device>::IsAligned*/false, IsAligned = /*TensorEvaluator<ArgType, Device>::IsAligned*/false,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess, PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = TensorEvaluator<ArgType, Device>::CoordAccess,
}; };
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
@ -270,22 +288,30 @@ struct TensorEvaluator<const TensorSlicingOp<StartIndices, Sizes, ArgType>, Devi
} }
const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions(); const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
for (int i = 0; i < NumDims; ++i) { const Sizes& output_dims = op.sizes();
if (i > 0) { if (Layout == ColMajor) {
m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1];
} else {
m_inputStrides[0] = 1; m_inputStrides[0] = 1;
} for (int i = 1; i < NumDims; ++i) {
m_inputStrides[i] = m_inputStrides[i-1] * input_dims[i-1];
} }
const Sizes& output_dims = op.sizes();
for (int i = 0; i < NumDims; ++i) {
if (i > 0) {
m_outputStrides[i] = m_outputStrides[i-1] * output_dims[i-1];
m_fastOutputStrides[i] = internal::TensorIntDivisor<Index>(m_outputStrides[i]);
} else {
m_outputStrides[0] = 1; m_outputStrides[0] = 1;
m_fastOutputStrides[0] = 1; m_fastOutputStrides[0] = 1;
for (int i = 1; i < NumDims; ++i) {
m_outputStrides[i] = m_outputStrides[i-1] * output_dims[i-1];
m_fastOutputStrides[i] = internal::TensorIntDivisor<Index>(m_outputStrides[i]);
}
} else {
m_inputStrides[NumDims-1] = 1;
for (int i = NumDims - 2; i >= 0; --i) {
m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1];
}
m_outputStrides[NumDims-1] = 1;
m_fastOutputStrides[NumDims-1] = 1;
for (int i = NumDims - 2; i >= 0; --i) {
m_outputStrides[i] = m_outputStrides[i+1] * output_dims[i+1];
m_fastOutputStrides[i] = internal::TensorIntDivisor<Index>(m_outputStrides[i]);
} }
} }
} }
@ -299,16 +325,25 @@ struct TensorEvaluator<const TensorSlicingOp<StartIndices, Sizes, ArgType>, Devi
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* data) { EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(CoeffReturnType* data) {
m_impl.evalSubExprsIfNeeded(NULL); m_impl.evalSubExprsIfNeeded(NULL);
if (internal::is_arithmetic<Scalar>::value && data && m_impl.data()) { if (internal::is_arithmetic<Scalar>::value && data && m_impl.data()) {
Index contiguous_values = 1; Index contiguous_values = 1;
if (Layout == ColMajor) {
for (int i = 0; i < NumDims; ++i) { for (int i = 0; i < NumDims; ++i) {
contiguous_values *= dimensions()[i]; contiguous_values *= dimensions()[i];
if (dimensions()[i] != m_impl.dimensions()[i]) { if (dimensions()[i] != m_impl.dimensions()[i]) {
break; break;
} }
} }
} else {
for (int i = NumDims-1; i >= 0; --i) {
contiguous_values *= dimensions()[i];
if (dimensions()[i] != m_impl.dimensions()[i]) {
break;
}
}
}
// Use memcpy if it's going to be faster than using the regular evaluation. // Use memcpy if it's going to be faster than using the regular evaluation.
if (contiguous_values > 2 * m_device.numThreads()) { if (contiguous_values > 2 * m_device.numThreads()) {
Scalar* src = m_impl.data(); Scalar* src = m_impl.data();
@ -340,6 +375,7 @@ struct TensorEvaluator<const TensorSlicingOp<StartIndices, Sizes, ArgType>, Devi
Index inputIndices[] = {0, 0}; Index inputIndices[] = {0, 0};
Index indices[] = {index, index + packetSize - 1}; Index indices[] = {index, index + packetSize - 1};
if (Layout == ColMajor) {
for (int i = NumDims - 1; i > 0; --i) { for (int i = NumDims - 1; i > 0; --i) {
const Index idx0 = indices[0] / m_fastOutputStrides[i]; const Index idx0 = indices[0] / m_fastOutputStrides[i];
const Index idx1 = indices[1] / m_fastOutputStrides[i]; const Index idx1 = indices[1] / m_fastOutputStrides[i];
@ -350,6 +386,18 @@ struct TensorEvaluator<const TensorSlicingOp<StartIndices, Sizes, ArgType>, Devi
} }
inputIndices[0] += (indices[0] + m_offsets[0]); inputIndices[0] += (indices[0] + m_offsets[0]);
inputIndices[1] += (indices[1] + m_offsets[0]); inputIndices[1] += (indices[1] + m_offsets[0]);
} else {
for (int i = 0; i < NumDims - 1; ++i) {
const Index idx0 = indices[0] / m_fastOutputStrides[i];
const Index idx1 = indices[1] / m_fastOutputStrides[i];
inputIndices[0] += (idx0 + m_offsets[i]) * m_inputStrides[i];
inputIndices[1] += (idx1 + m_offsets[i]) * m_inputStrides[i];
indices[0] -= idx0 * m_outputStrides[i];
indices[1] -= idx1 * m_outputStrides[i];
}
inputIndices[0] += (indices[0] + m_offsets[NumDims-1]);
inputIndices[1] += (indices[1] + m_offsets[NumDims-1]);
}
if (inputIndices[1] - inputIndices[0] == packetSize - 1) { if (inputIndices[1] - inputIndices[0] == packetSize - 1) {
PacketReturnType rslt = m_impl.template packet<Unaligned>(inputIndices[0]); PacketReturnType rslt = m_impl.template packet<Unaligned>(inputIndices[0]);
return rslt; return rslt;
@ -366,10 +414,20 @@ struct TensorEvaluator<const TensorSlicingOp<StartIndices, Sizes, ArgType>, Devi
} }
} }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar* data() const { EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(const array<Index, NumDims>& coords)
{
array<Index, NumDims> inputCoords;
for (int i = 0; i < NumDims; ++i) {
inputCoords = coords[i] + this->m_offsets[i];
}
return m_impl.coeff(inputCoords);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType* data() const {
Scalar* result = m_impl.data(); Scalar* result = m_impl.data();
if (result) { if (result) {
Index offset = 0; Index offset = 0;
if (Layout == ColMajor) {
for (int i = 0; i < NumDims; ++i) { for (int i = 0; i < NumDims; ++i) {
if (m_dimensions[i] != m_impl.dimensions()[i]) { if (m_dimensions[i] != m_impl.dimensions()[i]) {
offset += m_offsets[i] * m_inputStrides[i]; offset += m_offsets[i] * m_inputStrides[i];
@ -382,6 +440,20 @@ struct TensorEvaluator<const TensorSlicingOp<StartIndices, Sizes, ArgType>, Devi
break; break;
} }
} }
} else {
for (int i = NumDims - 1; i >= 0; --i) {
if (m_dimensions[i] != m_impl.dimensions()[i]) {
offset += m_offsets[i] * m_inputStrides[i];
for (int j = i-1; j >= 0; --j) {
if (m_dimensions[j] > 1) {
return NULL;
}
offset += m_offsets[j] * m_inputStrides[j];
}
break;
}
}
}
return result + offset; return result + offset;
} }
return NULL; return NULL;
@ -391,12 +463,21 @@ struct TensorEvaluator<const TensorSlicingOp<StartIndices, Sizes, ArgType>, Devi
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const
{ {
Index inputIndex = 0; Index inputIndex = 0;
if (Layout == ColMajor) {
for (int i = NumDims - 1; i > 0; --i) { for (int i = NumDims - 1; i > 0; --i) {
const Index idx = index / m_fastOutputStrides[i]; const Index idx = index / m_fastOutputStrides[i];
inputIndex += (idx + m_offsets[i]) * m_inputStrides[i]; inputIndex += (idx + m_offsets[i]) * m_inputStrides[i];
index -= idx * m_outputStrides[i]; index -= idx * m_outputStrides[i];
} }
inputIndex += (index + m_offsets[0]); inputIndex += (index + m_offsets[0]);
} else {
for (int i = 0; i < NumDims - 1; ++i) {
const Index idx = index / m_fastOutputStrides[i];
inputIndex += (idx + m_offsets[i]) * m_inputStrides[i];
index -= idx * m_outputStrides[i];
}
inputIndex += (index + m_offsets[NumDims-1]);
}
return inputIndex; return inputIndex;
} }
@ -422,6 +503,8 @@ struct TensorEvaluator<TensorSlicingOp<StartIndices, Sizes, ArgType>, Device>
enum { enum {
IsAligned = /*TensorEvaluator<ArgType, Device>::IsAligned*/false, IsAligned = /*TensorEvaluator<ArgType, Device>::IsAligned*/false,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess, PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = TensorEvaluator<ArgType, Device>::CoordAccess,
}; };
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
@ -445,6 +528,7 @@ struct TensorEvaluator<TensorSlicingOp<StartIndices, Sizes, ArgType>, Device>
const int packetSize = internal::unpacket_traits<PacketReturnType>::size; const int packetSize = internal::unpacket_traits<PacketReturnType>::size;
Index inputIndices[] = {0, 0}; Index inputIndices[] = {0, 0};
Index indices[] = {index, index + packetSize - 1}; Index indices[] = {index, index + packetSize - 1};
if (Layout == ColMajor) {
for (int i = NumDims - 1; i > 0; --i) { for (int i = NumDims - 1; i > 0; --i) {
const Index idx0 = indices[0] / this->m_fastOutputStrides[i]; const Index idx0 = indices[0] / this->m_fastOutputStrides[i];
const Index idx1 = indices[1] / this->m_fastOutputStrides[i]; const Index idx1 = indices[1] / this->m_fastOutputStrides[i];
@ -455,6 +539,18 @@ struct TensorEvaluator<TensorSlicingOp<StartIndices, Sizes, ArgType>, Device>
} }
inputIndices[0] += (indices[0] + this->m_offsets[0]); inputIndices[0] += (indices[0] + this->m_offsets[0]);
inputIndices[1] += (indices[1] + this->m_offsets[0]); inputIndices[1] += (indices[1] + this->m_offsets[0]);
} else {
for (int i = 0; i < NumDims - 1; ++i) {
const Index idx0 = indices[0] / this->m_fastOutputStrides[i];
const Index idx1 = indices[1] / this->m_fastOutputStrides[i];
inputIndices[0] += (idx0 + this->m_offsets[i]) * this->m_inputStrides[i];
inputIndices[1] += (idx1 + this->m_offsets[i]) * this->m_inputStrides[i];
indices[0] -= idx0 * this->m_outputStrides[i];
indices[1] -= idx1 * this->m_outputStrides[i];
}
inputIndices[0] += (indices[0] + this->m_offsets[NumDims-1]);
inputIndices[1] += (indices[1] + this->m_offsets[NumDims-1]);
}
if (inputIndices[1] - inputIndices[0] == packetSize - 1) { if (inputIndices[1] - inputIndices[0] == packetSize - 1) {
this->m_impl.template writePacket<StoreMode>(inputIndices[0], x); this->m_impl.template writePacket<StoreMode>(inputIndices[0], x);
} }
@ -468,6 +564,15 @@ struct TensorEvaluator<TensorSlicingOp<StartIndices, Sizes, ArgType>, Device>
} }
} }
} }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType& coeffRef(const array<Index, NumDims>& coords)
{
array<Index, NumDims> inputCoords;
for (int i = 0; i < NumDims; ++i) {
inputCoords = coords[i] + this->m_offsets[i];
}
return this->m_impl.coeffRef(inputCoords);
}
}; };

View File

@ -24,11 +24,14 @@ template<typename PaddingDimensions, typename XprType>
struct traits<TensorPaddingOp<PaddingDimensions, XprType> > : public traits<XprType> struct traits<TensorPaddingOp<PaddingDimensions, XprType> > : public traits<XprType>
{ {
typedef typename XprType::Scalar Scalar; typedef typename XprType::Scalar Scalar;
typedef typename internal::packet_traits<Scalar>::type Packet; typedef traits<XprType> XprTraits;
typedef typename traits<XprType>::StorageKind StorageKind; typedef typename packet_traits<Scalar>::type Packet;
typedef typename traits<XprType>::Index Index; typedef typename XprTraits::StorageKind StorageKind;
typedef typename XprTraits::Index Index;
typedef typename XprType::Nested Nested; typedef typename XprType::Nested Nested;
typedef typename remove_reference<Nested>::type _Nested; typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions;
static const int Layout = XprTraits::Layout;
}; };
template<typename PaddingDimensions, typename XprType> template<typename PaddingDimensions, typename XprType>
@ -88,6 +91,8 @@ struct TensorEvaluator<const TensorPaddingOp<PaddingDimensions, ArgType>, Device
enum { enum {
IsAligned = false, IsAligned = false,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess, PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = true,
}; };
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
@ -99,6 +104,7 @@ struct TensorEvaluator<const TensorPaddingOp<PaddingDimensions, ArgType>, Device
m_dimensions[i] += m_padding[i].first + m_padding[i].second; m_dimensions[i] += m_padding[i].first + m_padding[i].second;
} }
const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions(); const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
if (Layout == ColMajor) {
m_inputStrides[0] = 1; m_inputStrides[0] = 1;
m_outputStrides[0] = 1; m_outputStrides[0] = 1;
for (int i = 1; i < NumDims; ++i) { for (int i = 1; i < NumDims; ++i) {
@ -106,6 +112,15 @@ struct TensorEvaluator<const TensorPaddingOp<PaddingDimensions, ArgType>, Device
m_outputStrides[i] = m_outputStrides[i-1] * m_dimensions[i-1]; m_outputStrides[i] = m_outputStrides[i-1] * m_dimensions[i-1];
} }
m_outputStrides[NumDims] = m_outputStrides[NumDims-1] * m_dimensions[NumDims-1]; m_outputStrides[NumDims] = m_outputStrides[NumDims-1] * m_dimensions[NumDims-1];
} else {
m_inputStrides[NumDims - 1] = 1;
m_outputStrides[NumDims] = 1;
for (int i = NumDims - 2; i >= 0; --i) {
m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1];
m_outputStrides[i+1] = m_outputStrides[i+2] * m_dimensions[i+1];
}
m_outputStrides[0] = m_outputStrides[1] * m_dimensions[0];
}
} }
typedef typename XprType::Scalar Scalar; typedef typename XprType::Scalar Scalar;
@ -126,6 +141,7 @@ struct TensorEvaluator<const TensorPaddingOp<PaddingDimensions, ArgType>, Device
{ {
eigen_assert(index < dimensions().TotalSize()); eigen_assert(index < dimensions().TotalSize());
Index inputIndex = 0; Index inputIndex = 0;
if (Layout == ColMajor) {
for (int i = NumDims - 1; i > 0; --i) { for (int i = NumDims - 1; i > 0; --i) {
const Index idx = index / m_outputStrides[i]; const Index idx = index / m_outputStrides[i];
if (idx < m_padding[i].first || idx >= m_dimensions[i] - m_padding[i].second) { if (idx < m_padding[i].first || idx >= m_dimensions[i] - m_padding[i].second) {
@ -138,11 +154,71 @@ struct TensorEvaluator<const TensorPaddingOp<PaddingDimensions, ArgType>, Device
return Scalar(0); return Scalar(0);
} }
inputIndex += (index - m_padding[0].first); inputIndex += (index - m_padding[0].first);
} else {
for (int i = 0; i < NumDims - 1; ++i) {
const Index idx = index / m_outputStrides[i+1];
if (idx < m_padding[i].first || idx >= m_dimensions[i] - m_padding[i].second) {
return Scalar(0);
}
inputIndex += (idx - m_padding[i].first) * m_inputStrides[i];
index -= idx * m_outputStrides[i+1];
}
if (index < m_padding[NumDims-1].first ||
index >= m_dimensions[NumDims-1] - m_padding[NumDims-1].second) {
return Scalar(0);
}
inputIndex += (index - m_padding[NumDims-1].first);
}
return m_impl.coeff(inputIndex); return m_impl.coeff(inputIndex);
} }
template<int LoadMode> template<int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
{
if (Layout == ColMajor) {
return packetColMajor(index);
}
return packetRowMajor(index);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(const array<Index, NumDims>& coords) const
{
Index inputIndex;
if (Layout == ColMajor) {
const Index idx = coords[0];
if (idx < m_padding[0].first || idx >= m_dimensions[0] - m_padding[0].second) {
return Scalar(0);
}
inputIndex = idx - m_padding[0].first;
for (int i = 1; i < NumDims; ++i) {
const Index idx = coords[i];
if (idx < m_padding[i].first || idx >= m_dimensions[i] - m_padding[i].second) {
return Scalar(0);
}
inputIndex += (idx - m_padding[i].first) * m_inputStrides[i];
}
} else {
const Index idx = coords[NumDims-1];
if (idx < m_padding[NumDims-1].first || idx >= m_dimensions[NumDims-1] - m_padding[NumDims-1].second) {
return Scalar(0);
}
inputIndex = idx - m_padding[NumDims-1].first;
for (int i = NumDims - 2; i >= 0; --i) {
const Index idx = coords[i];
if (idx < m_padding[i].first || idx >= m_dimensions[i] - m_padding[i].second) {
return Scalar(0);
}
inputIndex += (idx - m_padding[i].first) * m_inputStrides[i];
}
}
return m_impl.coeff(inputIndex);
}
Scalar* data() const { return NULL; }
protected:
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetColMajor(Index index) const
{ {
const int packetSize = internal::unpacket_traits<PacketReturnType>::size; const int packetSize = internal::unpacket_traits<PacketReturnType>::size;
EIGEN_STATIC_ASSERT(packetSize > 1, YOU_MADE_A_PROGRAMMING_MISTAKE) EIGEN_STATIC_ASSERT(packetSize > 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
@ -200,9 +276,64 @@ struct TensorEvaluator<const TensorPaddingOp<PaddingDimensions, ArgType>, Device
return packetWithPossibleZero(initialIndex); return packetWithPossibleZero(initialIndex);
} }
Scalar* data() const { return NULL; } EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetRowMajor(Index index) const
{
const int packetSize = internal::unpacket_traits<PacketReturnType>::size;
EIGEN_STATIC_ASSERT(packetSize > 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
eigen_assert(index+packetSize-1 < dimensions().TotalSize());
protected: const Index initialIndex = index;
Index inputIndex = 0;
for (int i = 0; i < NumDims - 1; ++i) {
const Index first = index;
const Index last = index + packetSize - 1;
const Index lastPaddedLeft = m_padding[i].first * m_outputStrides[i+1];
const Index firstPaddedRight = (m_dimensions[i] - m_padding[i].second) * m_outputStrides[i+1];
const Index lastPaddedRight = m_outputStrides[i];
if (last < lastPaddedLeft) {
// all the coefficient are in the padding zone.
return internal::pset1<PacketReturnType>(Scalar(0));
}
else if (first >= firstPaddedRight && last < lastPaddedRight) {
// all the coefficient are in the padding zone.
return internal::pset1<PacketReturnType>(Scalar(0));
}
else if (first >= lastPaddedLeft && last < firstPaddedRight) {
// all the coefficient are between the 2 padding zones.
const Index idx = index / m_outputStrides[i+1];
inputIndex += (idx - m_padding[i].first) * m_inputStrides[i];
index -= idx * m_outputStrides[i+1];
}
else {
// Every other case
return packetWithPossibleZero(initialIndex);
}
}
const Index last = index + packetSize - 1;
const Index first = index;
const Index lastPaddedLeft = m_padding[NumDims-1].first;
const Index firstPaddedRight = (m_dimensions[NumDims-1] - m_padding[NumDims-1].second);
const Index lastPaddedRight = m_outputStrides[NumDims-1];
if (last < lastPaddedLeft) {
// all the coefficient are in the padding zone.
return internal::pset1<PacketReturnType>(Scalar(0));
}
else if (first >= firstPaddedRight && last < lastPaddedRight) {
// all the coefficient are in the padding zone.
return internal::pset1<PacketReturnType>(Scalar(0));
}
else if (first >= lastPaddedLeft && last < firstPaddedRight) {
// all the coefficient are between the 2 padding zones.
inputIndex += (index - m_padding[NumDims-1].first);
return m_impl.template packet<Unaligned>(inputIndex);
}
// Every other case
return packetWithPossibleZero(initialIndex);
}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetWithPossibleZero(Index index) const EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetWithPossibleZero(Index index) const
{ {

View File

@ -24,11 +24,14 @@ template<typename PatchDim, typename XprType>
struct traits<TensorPatchOp<PatchDim, XprType> > : public traits<XprType> struct traits<TensorPatchOp<PatchDim, XprType> > : public traits<XprType>
{ {
typedef typename XprType::Scalar Scalar; typedef typename XprType::Scalar Scalar;
typedef typename internal::packet_traits<Scalar>::type Packet; typedef traits<XprType> XprTraits;
typedef typename traits<XprType>::StorageKind StorageKind; typedef typename packet_traits<Scalar>::type Packet;
typedef typename traits<XprType>::Index Index; typedef typename XprTraits::StorageKind StorageKind;
typedef typename XprTraits::Index Index;
typedef typename XprType::Nested Nested; typedef typename XprType::Nested Nested;
typedef typename remove_reference<Nested>::type _Nested; typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions + 1;
static const int Layout = XprTraits::Layout;
}; };
template<typename PatchDim, typename XprType> template<typename PatchDim, typename XprType>
@ -89,11 +92,16 @@ struct TensorEvaluator<const TensorPatchOp<PatchDim, ArgType>, Device>
enum { enum {
IsAligned = false, IsAligned = false,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess, PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = true,
}; };
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: m_impl(op.expression(), device) : m_impl(op.expression(), device)
{ {
// Only column major tensors are supported for now.
EIGEN_STATIC_ASSERT((Layout == ColMajor), YOU_MADE_A_PROGRAMMING_MISTAKE);
Index num_patches = 1; Index num_patches = 1;
const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions(); const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
const PatchDim& patch_dims = op.patch_dims(); const PatchDim& patch_dims = op.patch_dims();
@ -195,6 +203,35 @@ struct TensorEvaluator<const TensorPatchOp<PatchDim, ArgType>, Device>
} }
} }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(const array<Index, NumDims>& coords) const
{
// Location of the first element of the patch.
const Index patchIndex = coords[NumDims - 1];
if (TensorEvaluator<ArgType, Device>::CoordAccess) {
array<Index, NumDims-1> inputCoords;
for (int i = NumDims - 2; i > 0; --i) {
const Index patchIdx = patchIndex / m_patchStrides[i];
patchIndex -= patchIdx * m_patchStrides[i];
const Index offsetIdx = coords[i];
inputCoords[i] = coords[i] + patchIdx;
}
inputCoords[0] = (patchIndex + coords[0]);
return m_impl.coeff(inputCoords);
}
else {
Index inputIndex = 0;
for (int i = NumDims - 2; i > 0; --i) {
const Index patchIdx = patchIndex / m_patchStrides[i];
patchIndex -= patchIdx * m_patchStrides[i];
const Index offsetIdx = coords[i];
inputIndex += (patchIdx + offsetIdx) * m_inputStrides[i];
}
inputIndex += (patchIndex + coords[0]);
return m_impl.coeff(inputIndex);
}
}
Scalar* data() const { return NULL; } Scalar* data() const { return NULL; }
protected: protected:
@ -206,7 +243,6 @@ struct TensorEvaluator<const TensorPatchOp<PatchDim, ArgType>, Device>
TensorEvaluator<ArgType, Device> m_impl; TensorEvaluator<ArgType, Device> m_impl;
}; };
} // end namespace Eigen } // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_PATCH_H #endif // EIGEN_CXX11_TENSOR_TENSOR_PATCH_H

View File

@ -24,11 +24,14 @@ template<typename Shuffle, typename XprType>
struct traits<TensorShufflingOp<Shuffle, XprType> > : public traits<XprType> struct traits<TensorShufflingOp<Shuffle, XprType> > : public traits<XprType>
{ {
typedef typename XprType::Scalar Scalar; typedef typename XprType::Scalar Scalar;
typedef typename internal::packet_traits<Scalar>::type Packet; typedef traits<XprType> XprTraits;
typedef typename traits<XprType>::StorageKind StorageKind; typedef typename packet_traits<Scalar>::type Packet;
typedef typename traits<XprType>::Index Index; typedef typename XprTraits::StorageKind StorageKind;
typedef typename XprTraits::Index Index;
typedef typename XprType::Nested Nested; typedef typename XprType::Nested Nested;
typedef typename remove_reference<Nested>::type _Nested; typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions;
static const int Layout = XprTraits::Layout;
}; };
template<typename Shuffle, typename XprType> template<typename Shuffle, typename XprType>
@ -99,6 +102,8 @@ struct TensorEvaluator<const TensorShufflingOp<Shuffle, ArgType>, Device>
enum { enum {
IsAligned = false, IsAligned = false,
PacketAccess = (internal::packet_traits<Scalar>::size > 1), PacketAccess = (internal::packet_traits<Scalar>::size > 1),
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false, // to be implemented
}; };
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
@ -112,15 +117,22 @@ struct TensorEvaluator<const TensorShufflingOp<Shuffle, ArgType>, Device>
array<Index, NumDims> inputStrides; array<Index, NumDims> inputStrides;
for (int i = 0; i < NumDims; ++i) { if (Layout == ColMajor) {
if (i > 0) {
inputStrides[i] = inputStrides[i-1] * input_dims[i-1];
m_outputStrides[i] = m_outputStrides[i-1] * m_dimensions[i-1];
} else {
inputStrides[0] = 1; inputStrides[0] = 1;
m_outputStrides[0] = 1; m_outputStrides[0] = 1;
for (int i = 1; i < NumDims; ++i) {
inputStrides[i] = inputStrides[i - 1] * input_dims[i - 1];
m_outputStrides[i] = m_outputStrides[i - 1] * m_dimensions[i - 1];
}
} else {
inputStrides[NumDims - 1] = 1;
m_outputStrides[NumDims - 1] = 1;
for (int i = NumDims - 2; i >= 0; --i) {
inputStrides[i] = inputStrides[i + 1] * input_dims[i + 1];
m_outputStrides[i] = m_outputStrides[i + 1] * m_dimensions[i + 1];
} }
} }
for (int i = 0; i < NumDims; ++i) { for (int i = 0; i < NumDims; ++i) {
m_inputStrides[i] = inputStrides[shuffle[i]]; m_inputStrides[i] = inputStrides[shuffle[i]];
} }
@ -162,15 +174,23 @@ struct TensorEvaluator<const TensorShufflingOp<Shuffle, ArgType>, Device>
Scalar* data() const { return NULL; } Scalar* data() const { return NULL; }
protected: protected:
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const {
{
Index inputIndex = 0; Index inputIndex = 0;
if (Layout == ColMajor) {
for (int i = NumDims - 1; i > 0; --i) { for (int i = NumDims - 1; i > 0; --i) {
const Index idx = index / m_outputStrides[i]; const Index idx = index / m_outputStrides[i];
inputIndex += idx * m_inputStrides[i]; inputIndex += idx * m_inputStrides[i];
index -= idx * m_outputStrides[i]; index -= idx * m_outputStrides[i];
} }
return inputIndex + index * m_inputStrides[0]; return inputIndex + index * m_inputStrides[0];
} else {
for (int i = 0; i < NumDims - 1; ++i) {
const Index idx = index / m_outputStrides[i];
inputIndex += idx * m_inputStrides[i];
index -= idx * m_outputStrides[i];
}
return inputIndex + index * m_inputStrides[NumDims - 1];
}
} }
Dimensions m_dimensions; Dimensions m_dimensions;

View File

@ -24,11 +24,14 @@ template<typename Strides, typename XprType>
struct traits<TensorStridingOp<Strides, XprType> > : public traits<XprType> struct traits<TensorStridingOp<Strides, XprType> > : public traits<XprType>
{ {
typedef typename XprType::Scalar Scalar; typedef typename XprType::Scalar Scalar;
typedef typename internal::packet_traits<Scalar>::type Packet; typedef traits<XprType> XprTraits;
typedef typename traits<XprType>::StorageKind StorageKind; typedef typename packet_traits<Scalar>::type Packet;
typedef typename traits<XprType>::Index Index; typedef typename XprTraits::StorageKind StorageKind;
typedef typename XprTraits::Index Index;
typedef typename XprType::Nested Nested; typedef typename XprType::Nested Nested;
typedef typename remove_reference<Nested>::type _Nested; typedef typename remove_reference<Nested>::type _Nested;
static const int NumDimensions = XprTraits::NumDimensions;
static const int Layout = XprTraits::Layout;
}; };
template<typename Strides, typename XprType> template<typename Strides, typename XprType>
@ -98,6 +101,8 @@ struct TensorEvaluator<const TensorStridingOp<Strides, ArgType>, Device>
enum { enum {
IsAligned = /*TensorEvaluator<ArgType, Device>::IsAligned*/false, IsAligned = /*TensorEvaluator<ArgType, Device>::IsAligned*/false,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess, PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false, // to be implemented
}; };
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
@ -109,6 +114,7 @@ struct TensorEvaluator<const TensorStridingOp<Strides, ArgType>, Device>
} }
const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions(); const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
if (Layout == ColMajor) {
m_outputStrides[0] = 1; m_outputStrides[0] = 1;
m_inputStrides[0] = 1; m_inputStrides[0] = 1;
for (int i = 1; i < NumDims; ++i) { for (int i = 1; i < NumDims; ++i) {
@ -117,6 +123,16 @@ struct TensorEvaluator<const TensorStridingOp<Strides, ArgType>, Device>
m_inputStrides[i-1] *= op.strides()[i-1]; m_inputStrides[i-1] *= op.strides()[i-1];
} }
m_inputStrides[NumDims-1] *= op.strides()[NumDims-1]; m_inputStrides[NumDims-1] *= op.strides()[NumDims-1];
} else { // RowMajor
m_outputStrides[NumDims-1] = 1;
m_inputStrides[NumDims-1] = 1;
for (int i = NumDims - 2; i >= 0; --i) {
m_outputStrides[i] = m_outputStrides[i+1] * m_dimensions[i+1];
m_inputStrides[i] = m_inputStrides[i+1] * input_dims[i+1];
m_inputStrides[i+1] *= op.strides()[i+1];
}
m_inputStrides[0] *= op.strides()[0];
}
} }
typedef typename XprType::Scalar Scalar; typedef typename XprType::Scalar Scalar;
@ -135,14 +151,7 @@ struct TensorEvaluator<const TensorStridingOp<Strides, ArgType>, Device>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
{ {
Index inputIndex = 0; return m_impl.coeff(srcCoeff(index));
for (int i = NumDims - 1; i > 0; --i) {
const Index idx = index / m_outputStrides[i];
inputIndex += idx * m_inputStrides[i];
index -= idx * m_outputStrides[i];
}
inputIndex += index * m_inputStrides[0];
return m_impl.coeff(inputIndex);
} }
template<int LoadMode> template<int LoadMode>
@ -154,6 +163,7 @@ struct TensorEvaluator<const TensorStridingOp<Strides, ArgType>, Device>
Index inputIndices[] = {0, 0}; Index inputIndices[] = {0, 0};
Index indices[] = {index, index + packetSize - 1}; Index indices[] = {index, index + packetSize - 1};
if (Layout == ColMajor) {
for (int i = NumDims - 1; i > 0; --i) { for (int i = NumDims - 1; i > 0; --i) {
const Index idx0 = indices[0] / m_outputStrides[i]; const Index idx0 = indices[0] / m_outputStrides[i];
const Index idx1 = indices[1] / m_outputStrides[i]; const Index idx1 = indices[1] / m_outputStrides[i];
@ -164,6 +174,18 @@ struct TensorEvaluator<const TensorStridingOp<Strides, ArgType>, Device>
} }
inputIndices[0] += indices[0] * m_inputStrides[0]; inputIndices[0] += indices[0] * m_inputStrides[0];
inputIndices[1] += indices[1] * m_inputStrides[0]; inputIndices[1] += indices[1] * m_inputStrides[0];
} else { // RowMajor
for (int i = 0; i < NumDims - 1; ++i) {
const Index idx0 = indices[0] / m_outputStrides[i];
const Index idx1 = indices[1] / m_outputStrides[i];
inputIndices[0] += idx0 * m_inputStrides[i];
inputIndices[1] += idx1 * m_inputStrides[i];
indices[0] -= idx0 * m_outputStrides[i];
indices[1] -= idx1 * m_outputStrides[i];
}
inputIndices[0] += indices[0] * m_inputStrides[NumDims-1];
inputIndices[1] += indices[1] * m_inputStrides[NumDims-1];
}
if (inputIndices[1] - inputIndices[0] == packetSize - 1) { if (inputIndices[1] - inputIndices[0] == packetSize - 1) {
PacketReturnType rslt = m_impl.template packet<Unaligned>(inputIndices[0]); PacketReturnType rslt = m_impl.template packet<Unaligned>(inputIndices[0]);
return rslt; return rslt;
@ -183,6 +205,27 @@ struct TensorEvaluator<const TensorStridingOp<Strides, ArgType>, Device>
Scalar* data() const { return NULL; } Scalar* data() const { return NULL; }
protected: protected:
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const
{
Index inputIndex = 0;
if (Layout == ColMajor) {
for (int i = NumDims - 1; i > 0; --i) {
const Index idx = index / m_outputStrides[i];
inputIndex += idx * m_inputStrides[i];
index -= idx * m_outputStrides[i];
}
inputIndex += index * m_inputStrides[0];
} else { // RowMajor
for (int i = 0; i < NumDims - 1; ++i) {
const Index idx = index / m_outputStrides[i];
inputIndex += idx * m_inputStrides[i];
index -= idx * m_outputStrides[i];
}
inputIndex += index * m_inputStrides[NumDims-1];
}
return inputIndex;
}
Dimensions m_dimensions; Dimensions m_dimensions;
array<Index, NumDims> m_outputStrides; array<Index, NumDims> m_outputStrides;
array<Index, NumDims> m_inputStrides; array<Index, NumDims> m_inputStrides;
@ -190,6 +233,84 @@ struct TensorEvaluator<const TensorStridingOp<Strides, ArgType>, Device>
}; };
// Eval as lvalue
template<typename Strides, typename ArgType, typename Device>
struct TensorEvaluator<TensorStridingOp<Strides, ArgType>, Device>
: public TensorEvaluator<const TensorStridingOp<Strides, ArgType>, Device>
{
typedef TensorStridingOp<Strides, ArgType> XprType;
typedef TensorEvaluator<const XprType, Device> Base;
// typedef typename XprType::Index Index;
static const int NumDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
// typedef DSizes<Index, NumDims> Dimensions;
enum {
IsAligned = /*TensorEvaluator<ArgType, Device>::IsAligned*/false,
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
Layout = TensorEvaluator<ArgType, Device>::Layout,
CoordAccess = false, // to be implemented
};
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device)
: Base(op, device) { }
typedef typename XprType::Index Index;
typedef typename XprType::Scalar Scalar;
typedef typename XprType::PacketReturnType PacketReturnType;
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(Index index)
{
return this->m_impl.coeffRef(this->srcCoeff(index));
}
template <int StoreMode> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
void writePacket(Index index, const PacketReturnType& x)
{
const int packetSize = internal::unpacket_traits<PacketReturnType>::size;
EIGEN_STATIC_ASSERT(packetSize > 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
eigen_assert(index+packetSize-1 < this->dimensions().TotalSize());
Index inputIndices[] = {0, 0};
Index indices[] = {index, index + packetSize - 1};
if (Layout == ColMajor) {
for (int i = NumDims - 1; i > 0; --i) {
const Index idx0 = indices[0] / this->m_outputStrides[i];
const Index idx1 = indices[1] / this->m_outputStrides[i];
inputIndices[0] += idx0 * this->m_inputStrides[i];
inputIndices[1] += idx1 * this->m_inputStrides[i];
indices[0] -= idx0 * this->m_outputStrides[i];
indices[1] -= idx1 * this->m_outputStrides[i];
}
inputIndices[0] += indices[0] * this->m_inputStrides[0];
inputIndices[1] += indices[1] * this->m_inputStrides[0];
} else { // RowMajor
for (int i = 0; i < NumDims - 1; ++i) {
const Index idx0 = indices[0] / this->m_outputStrides[i];
const Index idx1 = indices[1] / this->m_outputStrides[i];
inputIndices[0] += idx0 * this->m_inputStrides[i];
inputIndices[1] += idx1 * this->m_inputStrides[i];
indices[0] -= idx0 * this->m_outputStrides[i];
indices[1] -= idx1 * this->m_outputStrides[i];
}
inputIndices[0] += indices[0] * this->m_inputStrides[NumDims-1];
inputIndices[1] += indices[1] * this->m_inputStrides[NumDims-1];
}
if (inputIndices[1] - inputIndices[0] == packetSize - 1) {
this->m_impl.template writePacket<Unaligned>(inputIndices[0], x);
}
else {
EIGEN_ALIGN_DEFAULT Scalar values[packetSize];
internal::pstore<Scalar, PacketReturnType>(values, x);
this->m_impl.coeffRef(inputIndices[0]) = values[0];
this->m_impl.coeffRef(inputIndices[1]) = values[packetSize-1];
for (int i = 1; i < packetSize-1; ++i) {
this->coeffRef(index+i) = values[i];
}
}
}
};
} // end namespace Eigen } // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_STRIDING_H #endif // EIGEN_CXX11_TENSOR_TENSOR_STRIDING_H

View File

@ -50,6 +50,8 @@ struct traits<Tensor<Scalar_, NumIndices_, Options_> >
typedef Scalar_ Scalar; typedef Scalar_ Scalar;
typedef Dense StorageKind; typedef Dense StorageKind;
typedef DenseIndex Index; typedef DenseIndex Index;
static const int NumDimensions = NumIndices_;
static const int Layout = Options_ & RowMajor ? RowMajor : ColMajor;
enum { enum {
Options = Options_, Options = Options_,
Flags = compute_tensor_flags<Scalar_, Options_>::ret | LvalueBit, Flags = compute_tensor_flags<Scalar_, Options_>::ret | LvalueBit,
@ -63,6 +65,8 @@ struct traits<TensorFixedSize<Scalar_, Dimensions, Options_> >
typedef Scalar_ Scalar; typedef Scalar_ Scalar;
typedef Dense StorageKind; typedef Dense StorageKind;
typedef DenseIndex Index; typedef DenseIndex Index;
static const int NumDimensions = array_size<Dimensions>::value;
static const int Layout = Options_ & RowMajor ? RowMajor : ColMajor;
enum { enum {
Options = Options_, Options = Options_,
Flags = compute_tensor_flags<Scalar_, Options_>::ret | LvalueBit, Flags = compute_tensor_flags<Scalar_, Options_>::ret | LvalueBit,
@ -78,6 +82,8 @@ struct traits<TensorMap<PlainObjectType, Options_> >
typedef typename BaseTraits::Scalar Scalar; typedef typename BaseTraits::Scalar Scalar;
typedef typename BaseTraits::StorageKind StorageKind; typedef typename BaseTraits::StorageKind StorageKind;
typedef typename BaseTraits::Index Index; typedef typename BaseTraits::Index Index;
static const int NumDimensions = BaseTraits::NumDimensions;
static const int Layout = BaseTraits::Layout;
enum { enum {
Options = Options_, Options = Options_,
Flags = ((BaseTraits::Flags | LvalueBit) & ~AlignedBit) | (Options&Aligned ? AlignedBit : 0), Flags = ((BaseTraits::Flags | LvalueBit) & ~AlignedBit) | (Options&Aligned ? AlignedBit : 0),
@ -92,6 +98,8 @@ struct traits<TensorRef<PlainObjectType> >
typedef typename BaseTraits::Scalar Scalar; typedef typename BaseTraits::Scalar Scalar;
typedef typename BaseTraits::StorageKind StorageKind; typedef typename BaseTraits::StorageKind StorageKind;
typedef typename BaseTraits::Index Index; typedef typename BaseTraits::Index Index;
static const int NumDimensions = BaseTraits::NumDimensions;
static const int Layout = BaseTraits::Layout;
enum { enum {
Options = BaseTraits::Options, Options = BaseTraits::Options,
Flags = ((BaseTraits::Flags | LvalueBit) & ~AlignedBit) | (Options&Aligned ? AlignedBit : 0), Flags = ((BaseTraits::Flags | LvalueBit) & ~AlignedBit) | (Options&Aligned ? AlignedBit : 0),
@ -198,6 +206,51 @@ struct nested<const TensorRef<PlainObjectType>, 1, typename eval<TensorRef<Plain
}; };
} // end namespace internal } // end namespace internal
// Convolutional layers take in an input tensor of shape (D, R, C, B), or (D, C,
// R, B), and convolve it with a set of filters, which can also be presented as
// a tensor (D, K, K, M), where M is the number of filters, K is the filter
// size, and each 3-dimensional tensor of size (D, K, K) is a filter. For
// simplicity we assume that we always use square filters (which is usually the
// case in images), hence the two Ks in the tensor dimension. It also takes in
// a few additional parameters:
// Stride (S): The convolution stride is the offset between locations where we
// apply the filters. A larger stride means that the output will be
// spatially smaller.
// Padding (P): The padding we apply to the input tensor along the R and C
// dimensions. This is usually used to make sure that the spatial
// dimensions of the output matches our intention.
//
// Two types of padding are often used:
// SAME: The pad value is computed so that the output will have size
// R/S and C/S.
// VALID: no padding is carried out.
// When we do padding, the padded values at the padded locations are usually
// zero.
//
// The output dimensions for convolution, when given all the parameters above,
// are as follows:
// When Padding = SAME: the output size is (B, R', C', M), where
// R' = ceil(float(R) / float(S))
// C' = ceil(float(C) / float(S))
// where ceil is the ceiling function. The input tensor is padded with 0 as
// needed. The number of padded rows and columns are computed as:
// Pr = ((R' - 1) * S + K - R) / 2
// Pc = ((C' - 1) * S + K - C) / 2
// when the stride is 1, we have the simplified case R'=R, C'=C, Pr=Pc=(K-1)/2.
// This is where SAME comes from - the output has the same size as the input has.
// When Padding = VALID: the output size is computed as
// R' = ceil(float(R - K + 1) / float(S))
// C' = ceil(float(C - K + 1) / float(S))
// and the number of padded rows and columns are computed in the same way as in
// the SAME case.
// When the stride is 1, we have the simplified case R'=R-K+1, C'=C-K+1, Pr=0,
// Pc=0.
typedef enum {
PADDING_VALID = 1,
PADDING_SAME = 2,
} PaddingType;
} // end namespace Eigen } // end namespace Eigen
#endif // EIGEN_CXX11_TENSOR_TENSOR_TRAITS_H #endif // EIGEN_CXX11_TENSOR_TENSOR_TRAITS_H