From 3026f1f2965dce7c7858e3a7f36e3bd1dda3f3fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Antonio=20S=C3=A1nchez?= Date: Mon, 8 Jan 2024 00:13:17 +0000 Subject: [PATCH] Fix various asan errors. --- Eigen/src/Eigenvalues/ComplexSchur.h | 2 +- test/threads_non_blocking_thread_pool.cpp | 85 ++++++++++--------- .../Eigen/CXX11/src/Tensor/TensorForcedEval.h | 9 ++ unsupported/Eigen/src/SparseExtra/MarketIO.h | 1 + 4 files changed, 55 insertions(+), 42 deletions(-) diff --git a/Eigen/src/Eigenvalues/ComplexSchur.h b/Eigen/src/Eigenvalues/ComplexSchur.h index 126b442a7..1ec8fb83d 100644 --- a/Eigen/src/Eigenvalues/ComplexSchur.h +++ b/Eigen/src/Eigenvalues/ComplexSchur.h @@ -275,7 +275,7 @@ inline bool ComplexSchur::subdiagonalEntryIsNeglegible(Index i) { template typename ComplexSchur::ComplexScalar ComplexSchur::computeShift(Index iu, Index iter) { using std::abs; - if (iter == 10 || iter == 20) { + if ((iter == 10 || iter == 20) && iu > 1) { // exceptional shift, taken from http://www.netlib.org/eispack/comqr.f return abs(numext::real(m_matT.coeff(iu, iu - 1))) + abs(numext::real(m_matT.coeff(iu - 1, iu - 2))); } diff --git a/test/threads_non_blocking_thread_pool.cpp b/test/threads_non_blocking_thread_pool.cpp index 2f0cf5860..e805cf2c4 100644 --- a/test/threads_non_blocking_thread_pool.cpp +++ b/test/threads_non_blocking_thread_pool.cpp @@ -112,53 +112,56 @@ static void test_cancel() { static void test_pool_partitions() { const int kThreads = 2; - ThreadPool tp(kThreads); - - // Assign each thread to its own partition, so that stealing other work only - // occurs globally when a thread is idle. - std::vector> steal_partitions(kThreads); - for (int i = 0; i < kThreads; ++i) { - steal_partitions[i] = std::make_pair(i, i + 1); - } - tp.SetStealPartitions(steal_partitions); std::atomic running(0); std::atomic done(0); std::atomic phase(0); - // Schedule kThreads tasks and ensure that they all are running. - for (int i = 0; i < kThreads; ++i) { - tp.Schedule([&]() { - const int thread_id = tp.CurrentThreadId(); - VERIFY_GE(thread_id, 0); - VERIFY_LE(thread_id, kThreads - 1); - ++running; - while (phase < 1) { - } - ++done; - }); + { + ThreadPool tp(kThreads); + + // Assign each thread to its own partition, so that stealing other work only + // occurs globally when a thread is idle. + std::vector> steal_partitions(kThreads); + for (int i = 0; i < kThreads; ++i) { + steal_partitions[i] = std::make_pair(i, i + 1); + } + tp.SetStealPartitions(steal_partitions); + + // Schedule kThreads tasks and ensure that they all are running. + for (int i = 0; i < kThreads; ++i) { + tp.Schedule([&]() { + const int thread_id = tp.CurrentThreadId(); + VERIFY_GE(thread_id, 0); + VERIFY_LE(thread_id, kThreads - 1); + ++running; + while (phase < 1) { + } + ++done; + }); + } + while (running != kThreads) { + } + // Schedule each closure to only run on thread 'i' and verify that it does. + for (int i = 0; i < kThreads; ++i) { + tp.ScheduleWithHint( + [&, i]() { + ++running; + const int thread_id = tp.CurrentThreadId(); + VERIFY_IS_EQUAL(thread_id, i); + while (phase < 2) { + } + ++done; + }, + i, i + 1); + } + running = 0; + phase = 1; + while (running != kThreads) { + } + running = 0; + phase = 2; } - while (running != kThreads) { - } - // Schedule each closure to only run on thread 'i' and verify that it does. - for (int i = 0; i < kThreads; ++i) { - tp.ScheduleWithHint( - [&, i]() { - ++running; - const int thread_id = tp.CurrentThreadId(); - VERIFY_IS_EQUAL(thread_id, i); - while (phase < 2) { - } - ++done; - }, - i, i + 1); - } - running = 0; - phase = 1; - while (running != kThreads) { - } - running = 0; - phase = 2; } EIGEN_DECLARE_TEST(cxx11_non_blocking_thread_pool) { diff --git a/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h b/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h index c3a7ef413..0e87fac04 100644 --- a/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h +++ b/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h @@ -126,10 +126,16 @@ struct TensorEvaluator, Device> { TensorEvaluator(const XprType& op, const Device& device) : m_impl(op.expression(), device), m_op(op.expression()), m_device(device), m_buffer(NULL) {} + ~TensorEvaluator() { cleanup(); } + EIGEN_DEVICE_FUNC const Dimensions& dimensions() const { return m_impl.dimensions(); } EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType) { const Index numValues = internal::array_prod(m_impl.dimensions()); + + if (m_buffer != nullptr) { + m_device.deallocate_temp(m_buffer); + } m_buffer = m_device.get((CoeffReturnType*)m_device.allocate_temp(numValues * sizeof(CoeffReturnType))); internal::non_integral_type_placement_new()(numValues, m_buffer); @@ -148,6 +154,9 @@ struct TensorEvaluator, Device> { template EIGEN_STRONG_INLINE void evalSubExprsIfNeededAsync(EvaluatorPointerType, EvalSubExprsCallback done) { const Index numValues = internal::array_prod(m_impl.dimensions()); + if (m_buffer != nullptr) { + m_device.deallocate_temp(m_buffer); + } m_buffer = m_device.get((CoeffReturnType*)m_device.allocate_temp(numValues * sizeof(CoeffReturnType))); typedef TensorEvalToOp> EvalTo; EvalTo evalToTmp(m_device.get(m_buffer), m_op); diff --git a/unsupported/Eigen/src/SparseExtra/MarketIO.h b/unsupported/Eigen/src/SparseExtra/MarketIO.h index 5e65b26a2..f92622dca 100644 --- a/unsupported/Eigen/src/SparseExtra/MarketIO.h +++ b/unsupported/Eigen/src/SparseExtra/MarketIO.h @@ -309,6 +309,7 @@ bool saveMarket(const SparseMatrixType& mat, const std::string& filename, int sy out << header << std::endl; out << mat.rows() << " " << mat.cols() << " " << mat.nonZeros() << "\n"; int count = 0; + EIGEN_UNUSED_VARIABLE(count); for (int j = 0; j < mat.outerSize(); ++j) for (typename SparseMatrixType::InnerIterator it(mat, j); it; ++it) { ++count;