diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h b/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h index 78068be35..74b905329 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h @@ -90,14 +90,21 @@ struct TensorEvaluator, Device> static const int PacketSize = PacketType::size; enum { - IsAligned = true, - PacketAccess = (PacketType::size > 1), - BlockAccess = false, + IsAligned = true, + PacketAccess = (PacketType::size > 1), + BlockAccess = internal::is_arithmetic::value, PreferBlockAccess = false, - Layout = TensorEvaluator::Layout, - RawAccess = true + Layout = TensorEvaluator::Layout, + RawAccess = true }; + typedef typename internal::TensorBlock< + CoeffReturnType, Index, internal::traits::NumDimensions, Layout> + TensorBlock; + typedef typename internal::TensorBlockReader< + CoeffReturnType, Index, internal::traits::NumDimensions, Layout> + TensorBlockReader; + EIGEN_DEVICE_FUNC TensorEvaluator(const XprType& op, const Device& device) /// op_ is used for sycl : m_impl(op.expression(), device), m_op(op.expression()), m_device(device), m_buffer(NULL) @@ -139,6 +146,14 @@ struct TensorEvaluator, Device> return internal::ploadt(m_buffer + index); } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void getResourceRequirements( + std::vector*) const {} + + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void block(TensorBlock* block) const { + assert(m_buffer != NULL); + TensorBlockReader::Run(block, m_buffer); + } + EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost costPerCoeff(bool vectorized) const { return TensorOpCost(sizeof(CoeffReturnType), 0, 0, vectorized, PacketSize); } diff --git a/unsupported/test/cxx11_tensor_executor.cpp b/unsupported/test/cxx11_tensor_executor.cpp index 18c87b35e..608306613 100644 --- a/unsupported/test/cxx11_tensor_executor.cpp +++ b/unsupported/test/cxx11_tensor_executor.cpp @@ -452,6 +452,38 @@ static void test_execute_slice_lvalue(Device d) } } +template +static void test_execute_broadcasting_of_forced_eval(Device d) +{ + static constexpr int Options = 0 | Layout; + + auto dims = RandomDims(1, 10); + Tensor src(dims); + src.setRandom(); + + const auto broadcasts = RandomDims(1, 7); + const auto expr = src.square().eval().broadcast(broadcasts); + + // We assume that broadcasting on a default device is tested and correct, so + // we can rely on it to verify correctness of tensor executor and tiling. + Tensor golden; + golden = expr; + + // Now do the broadcasting using configured tensor executor. + Tensor dst(golden.dimensions()); + + using Assign = TensorAssignOp; + using Executor = + internal::TensorExecutor; + + Executor::run(Assign(dst, expr), d); + + for (Index i = 0; i < dst.dimensions().TotalSize(); ++i) { + VERIFY_IS_EQUAL(dst.coeff(i), golden.coeff(i)); + } +} + #define CALL_SUBTEST_PART(PART) \ CALL_SUBTEST_##PART @@ -528,8 +560,13 @@ EIGEN_DECLARE_TEST(cxx11_tensor_executor) { CALL_SUBTEST_COMBINATIONS(11, test_execute_slice_lvalue, float, 4); CALL_SUBTEST_COMBINATIONS(11, test_execute_slice_lvalue, float, 5); + CALL_SUBTEST_COMBINATIONS(12, test_execute_broadcasting_of_forced_eval, float, 2); + CALL_SUBTEST_COMBINATIONS(12, test_execute_broadcasting_of_forced_eval, float, 3); + CALL_SUBTEST_COMBINATIONS(12, test_execute_broadcasting_of_forced_eval, float, 4); + CALL_SUBTEST_COMBINATIONS(12, test_execute_broadcasting_of_forced_eval, float, 5); + // Force CMake to split this test. - // EIGEN_SUFFIXES;1;2;3;4;5;6;7;8;9;10;11 + // EIGEN_SUFFIXES;1;2;3;4;5;6;7;8;9;10;11;12 } #undef CALL_SUBTEST_COMBINATIONS