mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-08-12 03:39:01 +08:00
Silcenced a few compilation warnings
This commit is contained in:
parent
410895a7e4
commit
114e863f08
@ -106,7 +106,7 @@ struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device>
|
||||
m_dimensions[i] = input_dims[i] * broadcast[i];
|
||||
}
|
||||
|
||||
if (Layout == ColMajor) {
|
||||
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
|
||||
m_inputStrides[0] = 1;
|
||||
m_outputStrides[0] = 1;
|
||||
for (int i = 1; i < NumDims; ++i) {
|
||||
@ -139,7 +139,7 @@ struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device>
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE CoeffReturnType coeff(Index index) const
|
||||
{
|
||||
if (Layout == ColMajor) {
|
||||
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
|
||||
return coeffColMajor(index);
|
||||
} else {
|
||||
return coeffRowMajor(index);
|
||||
@ -210,7 +210,7 @@ struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device>
|
||||
template<int LoadMode>
|
||||
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketReturnType packet(Index index) const
|
||||
{
|
||||
if (Layout == ColMajor) {
|
||||
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
|
||||
return packetColMajor<LoadMode>(index);
|
||||
} else {
|
||||
return packetRowMajor<LoadMode>(index);
|
||||
@ -326,7 +326,7 @@ struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device>
|
||||
}
|
||||
|
||||
|
||||
Scalar* data() const { return NULL; }
|
||||
EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
|
||||
|
||||
protected:
|
||||
Dimensions m_dimensions;
|
||||
|
@ -536,7 +536,7 @@ struct TensorContractionEvaluatorBase
|
||||
DSizes<Index, LDims> eval_left_dims;
|
||||
DSizes<Index, RDims> eval_right_dims;
|
||||
array<IndexPair<Index>, ContractDims> eval_op_indices;
|
||||
if (Layout == ColMajor) {
|
||||
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
|
||||
// For ColMajor, we keep using the existing dimensions
|
||||
for (int i = 0; i < LDims; i++) {
|
||||
eval_left_dims[i] = m_leftImpl.dimensions()[i];
|
||||
|
@ -167,7 +167,7 @@ struct TensorEvaluator<const Derived, Device>
|
||||
#endif
|
||||
}
|
||||
|
||||
const Scalar* data() const { return m_data; }
|
||||
EIGEN_DEVICE_FUNC const Scalar* data() const { return m_data; }
|
||||
|
||||
protected:
|
||||
const Scalar* m_data;
|
||||
@ -218,7 +218,7 @@ struct TensorEvaluator<const TensorCwiseNullaryOp<NullaryOp, ArgType>, Device>
|
||||
return m_functor.packetOp(index);
|
||||
}
|
||||
|
||||
CoeffReturnType* data() const { return NULL; }
|
||||
EIGEN_DEVICE_FUNC CoeffReturnType* data() const { return NULL; }
|
||||
|
||||
private:
|
||||
const NullaryOp m_functor;
|
||||
@ -273,7 +273,7 @@ struct TensorEvaluator<const TensorCwiseUnaryOp<UnaryOp, ArgType>, Device>
|
||||
return m_functor.packetOp(m_argImpl.template packet<LoadMode>(index));
|
||||
}
|
||||
|
||||
CoeffReturnType* data() const { return NULL; }
|
||||
EIGEN_DEVICE_FUNC CoeffReturnType* data() const { return NULL; }
|
||||
|
||||
private:
|
||||
const UnaryOp m_functor;
|
||||
@ -301,7 +301,7 @@ struct TensorEvaluator<const TensorCwiseBinaryOp<BinaryOp, LeftArgType, RightArg
|
||||
m_leftImpl(op.lhsExpression(), device),
|
||||
m_rightImpl(op.rhsExpression(), device)
|
||||
{
|
||||
EIGEN_STATIC_ASSERT((TensorEvaluator<LeftArgType, Device>::Layout == TensorEvaluator<RightArgType, Device>::Layout || internal::traits<XprType>::NumDimensions == 1), YOU_MADE_A_PROGRAMMING_MISTAKE);
|
||||
EIGEN_STATIC_ASSERT((static_cast<int>(TensorEvaluator<LeftArgType, Device>::Layout) == static_cast<int>(TensorEvaluator<RightArgType, Device>::Layout) || internal::traits<XprType>::NumDimensions == 1), YOU_MADE_A_PROGRAMMING_MISTAKE);
|
||||
eigen_assert(dimensions_match(m_leftImpl.dimensions(), m_rightImpl.dimensions()));
|
||||
}
|
||||
|
||||
@ -337,7 +337,7 @@ struct TensorEvaluator<const TensorCwiseBinaryOp<BinaryOp, LeftArgType, RightArg
|
||||
return m_functor.packetOp(m_leftImpl.template packet<LoadMode>(index), m_rightImpl.template packet<LoadMode>(index));
|
||||
}
|
||||
|
||||
CoeffReturnType* data() const { return NULL; }
|
||||
EIGEN_DEVICE_FUNC CoeffReturnType* data() const { return NULL; }
|
||||
|
||||
private:
|
||||
const BinaryOp m_functor;
|
||||
@ -413,7 +413,7 @@ struct TensorEvaluator<const TensorSelectOp<IfArgType, ThenArgType, ElseArgType>
|
||||
m_elseImpl.template packet<LoadMode>(index));
|
||||
}
|
||||
|
||||
CoeffReturnType* data() const { return NULL; }
|
||||
EIGEN_DEVICE_FUNC CoeffReturnType* data() const { return NULL; }
|
||||
|
||||
private:
|
||||
TensorEvaluator<IfArgType, Device> m_condImpl;
|
||||
|
@ -245,7 +245,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
|
||||
}
|
||||
|
||||
// Precompute output strides.
|
||||
if (Layout == ColMajor) {
|
||||
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
|
||||
m_outputStrides[0] = 1;
|
||||
for (int i = 1; i < NumOutputDims; ++i) {
|
||||
m_outputStrides[i] = m_outputStrides[i - 1] * m_dimensions[i - 1];
|
||||
@ -259,7 +259,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
|
||||
|
||||
// Precompute input strides.
|
||||
array<Index, NumInputDims> input_strides;
|
||||
if (Layout == ColMajor) {
|
||||
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
|
||||
input_strides[0] = 1;
|
||||
for (int i = 1; i < NumInputDims; ++i) {
|
||||
input_strides[i] = input_strides[i-1] * input_dims[i-1];
|
||||
@ -309,7 +309,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
|
||||
Op reducer(m_reducer);
|
||||
if (ReducingInnerMostDims) {
|
||||
const Index num_values_to_reduce =
|
||||
(Layout == ColMajor) ? m_preservedStrides[0] : m_preservedStrides[NumOutputDims - 1];
|
||||
(static_cast<int>(Layout) == static_cast<int>(ColMajor)) ? m_preservedStrides[0] : m_preservedStrides[NumOutputDims - 1];
|
||||
return internal::InnerMostDimReducer<Self, Op>::reduce(*this, firstInput(index),
|
||||
num_values_to_reduce, reducer);
|
||||
} else {
|
||||
@ -330,7 +330,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
|
||||
EIGEN_ALIGN_DEFAULT typename internal::remove_const<CoeffReturnType>::type values[packetSize];
|
||||
if (ReducingInnerMostDims) {
|
||||
const Index num_values_to_reduce =
|
||||
(Layout == ColMajor) ? m_preservedStrides[0] : m_preservedStrides[NumOutputDims - 1];
|
||||
(static_cast<int>(Layout) == static_cast<int>(ColMajor)) ? m_preservedStrides[0] : m_preservedStrides[NumOutputDims - 1];
|
||||
const Index firstIndex = firstInput(index);
|
||||
for (Index i = 0; i < packetSize; ++i) {
|
||||
Op reducer(m_reducer);
|
||||
@ -339,7 +339,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
|
||||
}
|
||||
} else if (PreservingInnerMostDims) {
|
||||
const Index firstIndex = firstInput(index);
|
||||
const int innermost_dim = (Layout == ColMajor) ? 0 : NumOutputDims - 1;
|
||||
const int innermost_dim = (static_cast<int>(Layout) == static_cast<int>(ColMajor)) ? 0 : NumOutputDims - 1;
|
||||
// TBD: extend this the the n innermost dimensions that we preserve.
|
||||
if (((firstIndex % m_dimensions[innermost_dim]) + packetSize - 1) < m_dimensions[innermost_dim]) {
|
||||
Op reducer(m_reducer);
|
||||
@ -371,7 +371,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
|
||||
// used to compute the reduction at output index "index".
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index firstInput(Index index) const {
|
||||
if (ReducingInnerMostDims) {
|
||||
if (Layout == ColMajor) {
|
||||
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
|
||||
return index * m_preservedStrides[0];
|
||||
} else {
|
||||
return index * m_preservedStrides[NumOutputDims - 1];
|
||||
@ -379,7 +379,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
|
||||
}
|
||||
// TBD: optimize the case where we preserve the innermost dimensions.
|
||||
Index startInput = 0;
|
||||
if (Layout == ColMajor) {
|
||||
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
|
||||
for (int i = NumOutputDims - 1; i > 0; --i) {
|
||||
// This is index_i in the output tensor.
|
||||
const Index idx = index / m_outputStrides[i];
|
||||
|
Loading…
x
Reference in New Issue
Block a user