mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-08-12 11:49:02 +08:00
Silenced several compilation warnings
This commit is contained in:
parent
4716c2c666
commit
410895a7e4
@ -103,7 +103,7 @@ struct TensorEvaluator<const TensorAssignOp<LeftArgType, RightArgType>, Device>
|
||||
m_leftImpl(op.lhsExpression(), device),
|
||||
m_rightImpl(op.rhsExpression(), device)
|
||||
{
|
||||
EIGEN_STATIC_ASSERT((TensorEvaluator<LeftArgType, Device>::Layout == TensorEvaluator<RightArgType, Device>::Layout), YOU_MADE_A_PROGRAMMING_MISTAKE);
|
||||
EIGEN_STATIC_ASSERT((static_cast<int>(TensorEvaluator<LeftArgType, Device>::Layout) == static_cast<int>(TensorEvaluator<RightArgType, Device>::Layout)), YOU_MADE_A_PROGRAMMING_MISTAKE);
|
||||
// The dimensions of the lhs and the rhs tensors should be equal to prevent
|
||||
// overflows and ensure the result is fully initialized.
|
||||
eigen_assert(dimensions_match(m_leftImpl.dimensions(), m_leftImpl.dimensions()));
|
||||
|
@ -257,13 +257,13 @@ struct TensorEvaluator<const TensorChippingOp<DimId, ArgType>, Device>
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index srcCoeff(Index index) const
|
||||
{
|
||||
Index inputIndex;
|
||||
if ((Layout == ColMajor && m_dim.actualDim() == 0) ||
|
||||
(Layout == RowMajor && m_dim.actualDim() == NumInputDims-1)) {
|
||||
if ((static_cast<int>(Layout) == static_cast<int>(ColMajor) && m_dim.actualDim() == 0) ||
|
||||
(static_cast<int>(Layout) == static_cast<int>(RowMajor) && m_dim.actualDim() == NumInputDims-1)) {
|
||||
// m_stride is equal to 1, so let's avoid the integer division.
|
||||
eigen_assert(m_stride == 1);
|
||||
inputIndex = index * m_inputStride + m_inputOffset;
|
||||
} else if ((Layout == ColMajor && m_dim.actualDim() == NumInputDims-1) ||
|
||||
(Layout == RowMajor && m_dim.actualDim() == 0)) {
|
||||
} else if ((static_cast<int>(Layout) == static_cast<int>(ColMajor) && m_dim.actualDim() == NumInputDims-1) ||
|
||||
(static_cast<int>(Layout) == static_cast<int>(RowMajor) && m_dim.actualDim() == 0)) {
|
||||
// m_stride is aways greater than index, so let's avoid the integer division.
|
||||
eigen_assert(m_stride > index);
|
||||
inputIndex = index + m_inputOffset;
|
||||
@ -322,8 +322,8 @@ struct TensorEvaluator<TensorChippingOp<DimId, ArgType>, Device>
|
||||
static const int packetSize = internal::unpacket_traits<PacketReturnType>::size;
|
||||
EIGEN_STATIC_ASSERT(packetSize > 1, YOU_MADE_A_PROGRAMMING_MISTAKE)
|
||||
|
||||
if ((this->Layout == ColMajor && this->m_dim.actualDim() == 0) ||
|
||||
(this->Layout == RowMajor && this->m_dim.actualDim() == NumInputDims-1)) {
|
||||
if ((static_cast<int>(this->Layout) == static_cast<int>(ColMajor) && this->m_dim.actualDim() == 0) ||
|
||||
(static_cast<int>(this->Layout) == static_cast<int>(RowMajor) && this->m_dim.actualDim() == NumInputDims-1)) {
|
||||
// m_stride is equal to 1, so let's avoid the integer division.
|
||||
eigen_assert(this->m_stride == 1);
|
||||
EIGEN_ALIGN_DEFAULT typename internal::remove_const<CoeffReturnType>::type values[packetSize];
|
||||
@ -333,8 +333,8 @@ struct TensorEvaluator<TensorChippingOp<DimId, ArgType>, Device>
|
||||
this->m_impl.coeffRef(inputIndex) = values[i];
|
||||
inputIndex += this->m_inputStride;
|
||||
}
|
||||
} else if ((this->Layout == ColMajor && this->m_dim.actualDim() == NumInputDims-1) ||
|
||||
(this->Layout == RowMajor && this->m_dim.actualDim() == 0)) {
|
||||
} else if ((static_cast<int>(this->Layout) == static_cast<int>(ColMajor) && this->m_dim.actualDim() == NumInputDims-1) ||
|
||||
(static_cast<int>(this->Layout) == static_cast<int>(RowMajor) && this->m_dim.actualDim() == 0)) {
|
||||
// m_stride is aways greater than index, so let's avoid the integer division.
|
||||
eigen_assert(this->m_stride > index);
|
||||
this->m_impl.template writePacket<StoreMode>(index + this->m_inputOffset, x);
|
||||
|
@ -499,9 +499,9 @@ struct TensorContractionEvaluatorBase
|
||||
// If we want to compute A * B = C, where A is LHS and B is RHS, the code
|
||||
// will pretend B is LHS and A is RHS.
|
||||
typedef typename internal::conditional<
|
||||
Layout == ColMajor, LeftArgType, RightArgType>::type EvalLeftArgType;
|
||||
static_cast<int>(Layout) == static_cast<int>(ColMajor), LeftArgType, RightArgType>::type EvalLeftArgType;
|
||||
typedef typename internal::conditional<
|
||||
Layout == ColMajor, RightArgType, LeftArgType>::type EvalRightArgType;
|
||||
static_cast<int>(Layout) == static_cast<int>(ColMajor), RightArgType, LeftArgType>::type EvalRightArgType;
|
||||
|
||||
static const int LDims =
|
||||
internal::array_size<typename TensorEvaluator<EvalLeftArgType, Device>::Dimensions>::value;
|
||||
@ -520,14 +520,14 @@ struct TensorContractionEvaluatorBase
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
|
||||
TensorContractionEvaluatorBase(const XprType& op, const Device& device)
|
||||
: m_leftImpl(choose(Cond<Layout == ColMajor>(),
|
||||
: m_leftImpl(choose(Cond<static_cast<int>(Layout) == static_cast<int>(ColMajor)>(),
|
||||
op.lhsExpression(), op.rhsExpression()), device),
|
||||
m_rightImpl(choose(Cond<Layout == ColMajor>(),
|
||||
m_rightImpl(choose(Cond<static_cast<int>(Layout) == static_cast<int>(ColMajor)>(),
|
||||
op.rhsExpression(), op.lhsExpression()), device),
|
||||
m_device(device),
|
||||
m_result(NULL) {
|
||||
EIGEN_STATIC_ASSERT((TensorEvaluator<LeftArgType, Device>::Layout ==
|
||||
TensorEvaluator<RightArgType, Device>::Layout),
|
||||
EIGEN_STATIC_ASSERT((static_cast<int>(TensorEvaluator<LeftArgType, Device>::Layout) ==
|
||||
static_cast<int>(TensorEvaluator<RightArgType, Device>::Layout)),
|
||||
YOU_MADE_A_PROGRAMMING_MISTAKE);
|
||||
|
||||
eigen_assert((internal::array_size<contract_t>::value > 0) && "Must contract on some indices");
|
||||
@ -681,7 +681,7 @@ struct TensorContractionEvaluatorBase
|
||||
}
|
||||
|
||||
// If the layout is RowMajor, we need to reverse the m_dimensions
|
||||
if (Layout == RowMajor) {
|
||||
if (static_cast<int>(Layout) == static_cast<int>(RowMajor)) {
|
||||
for (int i = 0, j = NumDims - 1; i < j; i++, j--) {
|
||||
std::swap(m_dimensions[i], m_dimensions[j]);
|
||||
}
|
||||
@ -855,9 +855,9 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
|
||||
// If we want to compute A * B = C, where A is LHS and B is RHS, the code
|
||||
// will pretend B is LHS and A is RHS.
|
||||
typedef typename internal::conditional<
|
||||
Layout == ColMajor, LeftArgType, RightArgType>::type EvalLeftArgType;
|
||||
static_cast<int>(Layout) == static_cast<int>(ColMajor), LeftArgType, RightArgType>::type EvalLeftArgType;
|
||||
typedef typename internal::conditional<
|
||||
Layout == ColMajor, RightArgType, LeftArgType>::type EvalRightArgType;
|
||||
static_cast<int>(Layout) == static_cast<int>(ColMajor), RightArgType, LeftArgType>::type EvalRightArgType;
|
||||
|
||||
static const int LDims =
|
||||
internal::array_size<typename TensorEvaluator<EvalLeftArgType, Device>::Dimensions>::value;
|
||||
|
@ -79,9 +79,9 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
|
||||
// If we want to compute A * B = C, where A is LHS and B is RHS, the code
|
||||
// will pretend B is LHS and A is RHS.
|
||||
typedef typename internal::conditional<
|
||||
Layout == ColMajor, LeftArgType, RightArgType>::type EvalLeftArgType;
|
||||
static_cast<int>(Layout) == static_cast<int>(ColMajor), LeftArgType, RightArgType>::type EvalLeftArgType;
|
||||
typedef typename internal::conditional<
|
||||
Layout == ColMajor, RightArgType, LeftArgType>::type EvalRightArgType;
|
||||
static_cast<int>(Layout) == static_cast<int>(ColMajor), RightArgType, LeftArgType>::type EvalRightArgType;
|
||||
|
||||
static const int LDims =
|
||||
internal::array_size<typename TensorEvaluator<EvalLeftArgType, Device>::Dimensions>::value;
|
||||
|
@ -94,14 +94,14 @@ struct TensorEvaluator
|
||||
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar& coeffRef(const array<DenseIndex, NumCoords>& coords) {
|
||||
eigen_assert(m_data);
|
||||
if (Layout == ColMajor) {
|
||||
if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
|
||||
return m_data[m_dims.IndexOfColMajor(coords)];
|
||||
} else {
|
||||
return m_data[m_dims.IndexOfRowMajor(coords)];
|
||||
}
|
||||
}
|
||||
|
||||
Scalar* data() const { return m_data; }
|
||||
EIGEN_DEVICE_FUNC Scalar* data() const { return m_data; }
|
||||
|
||||
protected:
|
||||
Scalar* m_data;
|
||||
|
@ -112,7 +112,7 @@ struct TensorEvaluator<const TensorLayoutSwapOp<ArgType>, Device>
|
||||
enum {
|
||||
IsAligned = TensorEvaluator<ArgType, Device>::IsAligned,
|
||||
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
|
||||
Layout = (TensorEvaluator<ArgType, Device>::Layout == ColMajor) ? RowMajor : ColMajor,
|
||||
Layout = (static_cast<int>(TensorEvaluator<ArgType, Device>::Layout) == static_cast<int>(ColMajor)) ? RowMajor : ColMajor,
|
||||
CoordAccess = false, // to be implemented
|
||||
};
|
||||
|
||||
@ -169,7 +169,7 @@ template<typename ArgType, typename Device>
|
||||
enum {
|
||||
IsAligned = TensorEvaluator<ArgType, Device>::IsAligned,
|
||||
PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
|
||||
Layout = (TensorEvaluator<ArgType, Device>::Layout == ColMajor) ? RowMajor : ColMajor,
|
||||
Layout = (static_cast<int>(TensorEvaluator<ArgType, Device>::Layout) == static_cast<int>(ColMajor)) ? RowMajor : ColMajor,
|
||||
CoordAccess = false, // to be implemented
|
||||
};
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user