Re-implement move assignments.

The original swap approach leads to potential undefined behavior (reading
uninitialized memory) and results in unnecessary copying of data for static
storage.

Here we pass down the move assignment to the underlying storage.  Static
storage does a one-way copy, dynamic storage does a swap.

Modified the tests to no longer read from the moved-from matrix/tensor,
since that can lead to UB. Added a test to ensure we do not access
uninitialized memory in a move.

Fixes: #2119
This commit is contained in:
Antonio Sanchez 2021-03-05 12:54:26 -08:00 committed by Rasmus Munk Larsen
parent b8d1857f0d
commit 543e34ab9d
8 changed files with 62 additions and 17 deletions

View File

@ -157,7 +157,7 @@ class Array
EIGEN_DEVICE_FUNC
Array& operator=(Array&& other) EIGEN_NOEXCEPT_IF(std::is_nothrow_move_assignable<Scalar>::value)
{
other.swap(*this);
Base::operator=(std::move(other));
return *this;
}
#endif

View File

@ -278,7 +278,7 @@ class Matrix
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
Matrix& operator=(Matrix&& other) EIGEN_NOEXCEPT_IF(std::is_nothrow_move_assignable<Scalar>::value)
{
other.swap(*this);
Base::operator=(std::move(other));
return *this;
}
#endif

View File

@ -508,8 +508,8 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
EIGEN_DEVICE_FUNC
PlainObjectBase& operator=(PlainObjectBase&& other) EIGEN_NOEXCEPT
{
using std::swap;
swap(m_storage, other.m_storage);
_check_template_params();
m_storage = std::move(other.m_storage);
return *this;
}
#endif

View File

@ -18,6 +18,36 @@
using internal::UIntPtr;
// A Scalar that asserts for uninitialized access.
template<typename T>
class SafeScalar {
public:
SafeScalar() : initialized_(false) {}
SafeScalar(const SafeScalar& other) {
*this = other;
}
SafeScalar& operator=(const SafeScalar& other) {
val_ = T(other);
initialized_ = true;
return *this;
}
SafeScalar(T val) : val_(val), initialized_(true) {}
SafeScalar& operator=(T val) {
val_ = val;
initialized_ = true;
}
operator T() const {
VERIFY(initialized_ && "Uninitialized access.");
return val_;
}
private:
T val_;
bool initialized_;
};
#if EIGEN_HAS_RVALUE_REFERENCES
template <typename MatrixType>
void rvalue_copyassign(const MatrixType& m)
@ -94,19 +124,24 @@ void rvalue_move(const MatrixType& m)
MatrixType d = m;
VERIFY_IS_EQUAL(d, m);
// rvalue reference is moved
// rvalue reference is moved - copy constructor.
MatrixType e_src(m);
VERIFY_IS_EQUAL(e_src, m);
MatrixType e_dst(std::move(e_src));
VERIFY_IS_EQUAL(e_src, MatrixType());
VERIFY_IS_EQUAL(e_dst, m);
// rvalue reference is moved
// rvalue reference is moved - copy constructor.
MatrixType f_src(m);
VERIFY_IS_EQUAL(f_src, m);
MatrixType f_dst = std::move(f_src);
VERIFY_IS_EQUAL(f_src, MatrixType());
VERIFY_IS_EQUAL(f_dst, m);
// rvalue reference is moved - copy assignment.
MatrixType g_src(m);
VERIFY_IS_EQUAL(g_src, m);
MatrixType g_dst;
g_dst = std::move(g_src);
VERIFY_IS_EQUAL(g_dst, m);
}
#else
template <typename MatrixType>
@ -144,6 +179,8 @@ EIGEN_DECLARE_TEST(rvalue_types)
#if EIGEN_HAS_CXX11
CALL_SUBTEST_5(rvalue_move(Eigen::Matrix<MovableScalar<float>,1,3>::Random().eval()));
CALL_SUBTEST_5(rvalue_move(Eigen::Matrix<SafeScalar<float>,1,3>::Random().eval()));
CALL_SUBTEST_5(rvalue_move(Eigen::Matrix<SafeScalar<float>,Eigen::Dynamic,Eigen::Dynamic>::Random(1,3).eval()));
#endif
}
}

View File

@ -688,7 +688,7 @@ void big_sparse_triplet(Index rows, Index cols, double density) {
typedef Triplet<Scalar,Index> TripletType;
std::vector<TripletType> triplets;
double nelements = density * rows*cols;
VERIFY(nelements>=0 && nelements < NumTraits<StorageIndex>::highest());
VERIFY(nelements>=0 && nelements < static_cast<double>(NumTraits<StorageIndex>::highest()));
Index ntriplets = Index(nelements);
triplets.reserve(ntriplets);
Scalar sum = Scalar(0);

View File

@ -402,14 +402,13 @@ class Tensor : public TensorBase<Tensor<Scalar_, NumIndices_, Options_, IndexTyp
#if EIGEN_HAS_RVALUE_REFERENCES
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Tensor(Self&& other)
: Tensor()
: m_storage(std::move(other.m_storage))
{
m_storage.swap(other.m_storage);
}
EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE Tensor& operator=(Self&& other)
{
m_storage.swap(other.m_storage);
m_storage = std::move(other.m_storage);
return *this;
}
#endif

View File

@ -108,6 +108,20 @@ class TensorStorage<T, DSizes<IndexType, NumIndices_>, Options_>
return *this;
}
#if EIGEN_HAS_RVALUE_REFERENCES
EIGEN_DEVICE_FUNC TensorStorage(Self&& other) : TensorStorage()
{
*this = std::move(other);
}
EIGEN_DEVICE_FUNC Self& operator=(Self&& other)
{
numext::swap(m_data, other.m_data);
numext::swap(m_dimensions, other.m_dimensions);
return *this;
}
#endif
EIGEN_DEVICE_FUNC ~TensorStorage() { internal::conditional_aligned_delete_auto<T,(Options_&DontAlign)==0>(m_data, internal::array_prod(m_dimensions)); }
EIGEN_DEVICE_FUNC void swap(Self& other)
{ numext::swap(m_data,other.m_data); numext::swap(m_dimensions,other.m_dimensions); }

View File

@ -62,14 +62,9 @@ static void test_move()
moved_tensor3 = std::move(moved_tensor1);
moved_tensor4 = std::move(moved_tensor2);
VERIFY_IS_EQUAL(moved_tensor1.size(), 8);
VERIFY_IS_EQUAL(moved_tensor2.size(), 8);
for (int i = 0; i < 8; i++)
{
calc_indices(i, x, y, z);
VERIFY_IS_EQUAL(moved_tensor1(x,y,z), 0);
VERIFY_IS_EQUAL(moved_tensor2(x,y,z), 0);
VERIFY_IS_EQUAL(moved_tensor3(x,y,z), i);
VERIFY_IS_EQUAL(moved_tensor4(x,y,z), 2 * i);
}