Silcenced a few compilation warnings

This commit is contained in:
Benoit Steiner 2015-02-10 12:20:24 -08:00
parent 410895a7e4
commit 114e863f08
4 changed files with 18 additions and 18 deletions

View File

@ -106,7 +106,7 @@ struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device>
m_dimensions[i] = input_dims[i] * broadcast[i]; m_dimensions[i] = input_dims[i] * broadcast[i];
} }
if (Layout == ColMajor) { if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
m_inputStrides[0] = 1; m_inputStrides[0] = 1;
m_outputStrides[0] = 1; m_outputStrides[0] = 1;
for (int i = 1; i < NumDims; ++i) { for (int i = 1; i < NumDims; ++i) {
@ -139,7 +139,7 @@ struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE CoeffReturnType coeff(Index index) const EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE CoeffReturnType coeff(Index index) const
{ {
if (Layout == ColMajor) { if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
return coeffColMajor(index); return coeffColMajor(index);
} else { } else {
return coeffRowMajor(index); return coeffRowMajor(index);
@ -210,7 +210,7 @@ struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device>
template<int LoadMode> template<int LoadMode>
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketReturnType packet(Index index) const EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE PacketReturnType packet(Index index) const
{ {
if (Layout == ColMajor) { if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
return packetColMajor<LoadMode>(index); return packetColMajor<LoadMode>(index);
} else { } else {
return packetRowMajor<LoadMode>(index); return packetRowMajor<LoadMode>(index);
@ -326,7 +326,7 @@ struct TensorEvaluator<const TensorBroadcastingOp<Broadcast, ArgType>, Device>
} }
Scalar* data() const { return NULL; } EIGEN_DEVICE_FUNC Scalar* data() const { return NULL; }
protected: protected:
Dimensions m_dimensions; Dimensions m_dimensions;

View File

@ -536,7 +536,7 @@ struct TensorContractionEvaluatorBase
DSizes<Index, LDims> eval_left_dims; DSizes<Index, LDims> eval_left_dims;
DSizes<Index, RDims> eval_right_dims; DSizes<Index, RDims> eval_right_dims;
array<IndexPair<Index>, ContractDims> eval_op_indices; array<IndexPair<Index>, ContractDims> eval_op_indices;
if (Layout == ColMajor) { if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
// For ColMajor, we keep using the existing dimensions // For ColMajor, we keep using the existing dimensions
for (int i = 0; i < LDims; i++) { for (int i = 0; i < LDims; i++) {
eval_left_dims[i] = m_leftImpl.dimensions()[i]; eval_left_dims[i] = m_leftImpl.dimensions()[i];

View File

@ -167,7 +167,7 @@ struct TensorEvaluator<const Derived, Device>
#endif #endif
} }
const Scalar* data() const { return m_data; } EIGEN_DEVICE_FUNC const Scalar* data() const { return m_data; }
protected: protected:
const Scalar* m_data; const Scalar* m_data;
@ -218,7 +218,7 @@ struct TensorEvaluator<const TensorCwiseNullaryOp<NullaryOp, ArgType>, Device>
return m_functor.packetOp(index); return m_functor.packetOp(index);
} }
CoeffReturnType* data() const { return NULL; } EIGEN_DEVICE_FUNC CoeffReturnType* data() const { return NULL; }
private: private:
const NullaryOp m_functor; const NullaryOp m_functor;
@ -273,7 +273,7 @@ struct TensorEvaluator<const TensorCwiseUnaryOp<UnaryOp, ArgType>, Device>
return m_functor.packetOp(m_argImpl.template packet<LoadMode>(index)); return m_functor.packetOp(m_argImpl.template packet<LoadMode>(index));
} }
CoeffReturnType* data() const { return NULL; } EIGEN_DEVICE_FUNC CoeffReturnType* data() const { return NULL; }
private: private:
const UnaryOp m_functor; const UnaryOp m_functor;
@ -301,7 +301,7 @@ struct TensorEvaluator<const TensorCwiseBinaryOp<BinaryOp, LeftArgType, RightArg
m_leftImpl(op.lhsExpression(), device), m_leftImpl(op.lhsExpression(), device),
m_rightImpl(op.rhsExpression(), device) m_rightImpl(op.rhsExpression(), device)
{ {
EIGEN_STATIC_ASSERT((TensorEvaluator<LeftArgType, Device>::Layout == TensorEvaluator<RightArgType, Device>::Layout || internal::traits<XprType>::NumDimensions == 1), YOU_MADE_A_PROGRAMMING_MISTAKE); EIGEN_STATIC_ASSERT((static_cast<int>(TensorEvaluator<LeftArgType, Device>::Layout) == static_cast<int>(TensorEvaluator<RightArgType, Device>::Layout) || internal::traits<XprType>::NumDimensions == 1), YOU_MADE_A_PROGRAMMING_MISTAKE);
eigen_assert(dimensions_match(m_leftImpl.dimensions(), m_rightImpl.dimensions())); eigen_assert(dimensions_match(m_leftImpl.dimensions(), m_rightImpl.dimensions()));
} }
@ -337,7 +337,7 @@ struct TensorEvaluator<const TensorCwiseBinaryOp<BinaryOp, LeftArgType, RightArg
return m_functor.packetOp(m_leftImpl.template packet<LoadMode>(index), m_rightImpl.template packet<LoadMode>(index)); return m_functor.packetOp(m_leftImpl.template packet<LoadMode>(index), m_rightImpl.template packet<LoadMode>(index));
} }
CoeffReturnType* data() const { return NULL; } EIGEN_DEVICE_FUNC CoeffReturnType* data() const { return NULL; }
private: private:
const BinaryOp m_functor; const BinaryOp m_functor;
@ -413,7 +413,7 @@ struct TensorEvaluator<const TensorSelectOp<IfArgType, ThenArgType, ElseArgType>
m_elseImpl.template packet<LoadMode>(index)); m_elseImpl.template packet<LoadMode>(index));
} }
CoeffReturnType* data() const { return NULL; } EIGEN_DEVICE_FUNC CoeffReturnType* data() const { return NULL; }
private: private:
TensorEvaluator<IfArgType, Device> m_condImpl; TensorEvaluator<IfArgType, Device> m_condImpl;

View File

@ -245,7 +245,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
} }
// Precompute output strides. // Precompute output strides.
if (Layout == ColMajor) { if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
m_outputStrides[0] = 1; m_outputStrides[0] = 1;
for (int i = 1; i < NumOutputDims; ++i) { for (int i = 1; i < NumOutputDims; ++i) {
m_outputStrides[i] = m_outputStrides[i - 1] * m_dimensions[i - 1]; m_outputStrides[i] = m_outputStrides[i - 1] * m_dimensions[i - 1];
@ -259,7 +259,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
// Precompute input strides. // Precompute input strides.
array<Index, NumInputDims> input_strides; array<Index, NumInputDims> input_strides;
if (Layout == ColMajor) { if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
input_strides[0] = 1; input_strides[0] = 1;
for (int i = 1; i < NumInputDims; ++i) { for (int i = 1; i < NumInputDims; ++i) {
input_strides[i] = input_strides[i-1] * input_dims[i-1]; input_strides[i] = input_strides[i-1] * input_dims[i-1];
@ -309,7 +309,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
Op reducer(m_reducer); Op reducer(m_reducer);
if (ReducingInnerMostDims) { if (ReducingInnerMostDims) {
const Index num_values_to_reduce = const Index num_values_to_reduce =
(Layout == ColMajor) ? m_preservedStrides[0] : m_preservedStrides[NumOutputDims - 1]; (static_cast<int>(Layout) == static_cast<int>(ColMajor)) ? m_preservedStrides[0] : m_preservedStrides[NumOutputDims - 1];
return internal::InnerMostDimReducer<Self, Op>::reduce(*this, firstInput(index), return internal::InnerMostDimReducer<Self, Op>::reduce(*this, firstInput(index),
num_values_to_reduce, reducer); num_values_to_reduce, reducer);
} else { } else {
@ -330,7 +330,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
EIGEN_ALIGN_DEFAULT typename internal::remove_const<CoeffReturnType>::type values[packetSize]; EIGEN_ALIGN_DEFAULT typename internal::remove_const<CoeffReturnType>::type values[packetSize];
if (ReducingInnerMostDims) { if (ReducingInnerMostDims) {
const Index num_values_to_reduce = const Index num_values_to_reduce =
(Layout == ColMajor) ? m_preservedStrides[0] : m_preservedStrides[NumOutputDims - 1]; (static_cast<int>(Layout) == static_cast<int>(ColMajor)) ? m_preservedStrides[0] : m_preservedStrides[NumOutputDims - 1];
const Index firstIndex = firstInput(index); const Index firstIndex = firstInput(index);
for (Index i = 0; i < packetSize; ++i) { for (Index i = 0; i < packetSize; ++i) {
Op reducer(m_reducer); Op reducer(m_reducer);
@ -339,7 +339,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
} }
} else if (PreservingInnerMostDims) { } else if (PreservingInnerMostDims) {
const Index firstIndex = firstInput(index); const Index firstIndex = firstInput(index);
const int innermost_dim = (Layout == ColMajor) ? 0 : NumOutputDims - 1; const int innermost_dim = (static_cast<int>(Layout) == static_cast<int>(ColMajor)) ? 0 : NumOutputDims - 1;
// TBD: extend this the the n innermost dimensions that we preserve. // TBD: extend this the the n innermost dimensions that we preserve.
if (((firstIndex % m_dimensions[innermost_dim]) + packetSize - 1) < m_dimensions[innermost_dim]) { if (((firstIndex % m_dimensions[innermost_dim]) + packetSize - 1) < m_dimensions[innermost_dim]) {
Op reducer(m_reducer); Op reducer(m_reducer);
@ -371,7 +371,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
// used to compute the reduction at output index "index". // used to compute the reduction at output index "index".
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index firstInput(Index index) const { EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index firstInput(Index index) const {
if (ReducingInnerMostDims) { if (ReducingInnerMostDims) {
if (Layout == ColMajor) { if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
return index * m_preservedStrides[0]; return index * m_preservedStrides[0];
} else { } else {
return index * m_preservedStrides[NumOutputDims - 1]; return index * m_preservedStrides[NumOutputDims - 1];
@ -379,7 +379,7 @@ struct TensorEvaluator<const TensorReductionOp<Op, Dims, ArgType>, Device>
} }
// TBD: optimize the case where we preserve the innermost dimensions. // TBD: optimize the case where we preserve the innermost dimensions.
Index startInput = 0; Index startInput = 0;
if (Layout == ColMajor) { if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
for (int i = NumOutputDims - 1; i > 0; --i) { for (int i = NumOutputDims - 1; i > 0; --i) {
// This is index_i in the output tensor. // This is index_i in the output tensor.
const Index idx = index / m_outputStrides[i]; const Index idx = index / m_outputStrides[i];