Optimized casting of tensors in the case where the casting happens to be a no-op

This commit is contained in:
Benoit Steiner 2016-02-21 11:16:15 -08:00
parent 203490017f
commit 96a24b05cc

View File

@ -195,8 +195,11 @@ struct TensorEvaluator<const TensorConversionOp<TargetType, ArgType>, Device>
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_impl.dimensions(); }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* /*data*/)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(Scalar* data)
{
if (internal::is_same<TargetType, SrcType>::value) {
return m_impl.evalSubExprsIfNeeded((SrcType*)data);
}
m_impl.evalSubExprsIfNeeded(NULL);
return true;
}