Insert from triplets

This commit is contained in:
Charles Schlosser 2023-04-12 20:01:48 +00:00 committed by Rasmus Munk Larsen
parent 990a282fc4
commit 0d12fcc34e
5 changed files with 679 additions and 110 deletions

View File

@ -78,7 +78,7 @@ void assign_sparse_to_sparse(DstXprType &dst, const SrcXprType &src)
SrcEvaluatorType srcEvaluator(src);
const bool transpose = (DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit);
constexpr bool transpose = (DstEvaluatorType::Flags & RowMajorBit) != (SrcEvaluatorType::Flags & RowMajorBit);
const Index outerEvaluationSize = (SrcEvaluatorType::Flags&RowMajorBit) ? src.rows() : src.cols();
Index reserveSize = 0;

View File

@ -344,46 +344,77 @@ class SparseCompressedBase<Derived>::ReverseInnerIterator
namespace internal {
// modified from https://artificial-mind.net/blog/2020/11/28/std-sort-multiple-ranges
template <typename Scalar, typename StorageIndex>
class StorageVal;
template <typename Scalar, typename StorageIndex>
class StorageRef;
template <typename Scalar, typename StorageIndex>
class CompressedStorageIterator;
// wrapper class analogous to std::pair<StorageIndex&, Scalar&>
// class to hold an index/value pair
template <typename Scalar, typename StorageIndex>
class StorageVal
{
public:
StorageVal(const StorageIndex& innerIndex, const Scalar& value) : m_innerIndex(innerIndex), m_value(value) {}
StorageVal(const StorageVal& other) : m_innerIndex(other.m_innerIndex), m_value(other.m_value) {}
inline const StorageIndex& key() const { return m_innerIndex; }
inline StorageIndex& key() { return m_innerIndex; }
inline const Scalar& value() const { return m_value; }
inline Scalar& value() { return m_value; }
// enables StorageVal to be compared with respect to any type that is convertible to StorageIndex
inline operator StorageIndex() const { return m_innerIndex; }
protected:
StorageIndex m_innerIndex;
Scalar m_value;
private:
StorageVal() = delete;
};
// class to hold an index/value iterator pair
// used to define assignment, swap, and comparison operators for CompressedStorageIterator
template <typename Scalar, typename StorageIndex>
class StorageRef
{
public:
using value_type = std::pair<StorageIndex, Scalar>;
using value_type = StorageVal<Scalar, StorageIndex>;
inline StorageRef& operator=(const StorageRef& other) {
*m_innerIndexIterator = *other.m_innerIndexIterator;
*m_valueIterator = *other.m_valueIterator;
key() = other.key();
value() = other.value();
return *this;
}
inline StorageRef& operator=(const value_type& other) {
std::tie(*m_innerIndexIterator, *m_valueIterator) = other;
key() = other.key();
value() = other.value();
return *this;
}
inline operator value_type() const { return std::make_pair(*m_innerIndexIterator, *m_valueIterator); }
inline operator value_type() const { return value_type(key(), value()); }
inline friend void swap(const StorageRef& a, const StorageRef& b) {
std::iter_swap(a.m_innerIndexIterator, b.m_innerIndexIterator);
std::iter_swap(a.m_valueIterator, b.m_valueIterator);
std::iter_swap(a.keyPtr(), b.keyPtr());
std::iter_swap(a.valuePtr(), b.valuePtr());
}
inline static const StorageIndex& key(const StorageRef& a) { return *a.m_innerIndexIterator; }
inline static const StorageIndex& key(const value_type& a) { return a.first; }
#define REF_COMP_REF(OP) inline friend bool operator OP(const StorageRef& a, const StorageRef& b) { return key(a) OP key(b); };
#define REF_COMP_VAL(OP) inline friend bool operator OP(const StorageRef& a, const value_type& b) { return key(a) OP key(b); };
#define VAL_COMP_REF(OP) inline friend bool operator OP(const value_type& a, const StorageRef& b) { return key(a) OP key(b); };
#define MAKE_COMPS(OP) REF_COMP_REF(OP) REF_COMP_VAL(OP) VAL_COMP_REF(OP)
MAKE_COMPS(<) MAKE_COMPS(>) MAKE_COMPS(<=) MAKE_COMPS(>=) MAKE_COMPS(==) MAKE_COMPS(!=)
inline const StorageIndex& key() const { return *m_innerIndexIterator; }
inline StorageIndex& key() { return *m_innerIndexIterator; }
inline const Scalar& value() const { return *m_valueIterator; }
inline Scalar& value() { return *m_valueIterator; }
inline StorageIndex* keyPtr() const { return m_innerIndexIterator; }
inline Scalar* valuePtr() const { return m_valueIterator; }
// enables StorageRef to be compared with respect to any type that is convertible to StorageIndex
inline operator StorageIndex() const { return *m_innerIndexIterator; }
protected:
StorageIndex* m_innerIndexIterator;
Scalar* m_valueIterator;
private:
StorageRef() = delete;
// these constructors are only called by the CompressedStorageIterator constructors for convenience only
// these constructors are called by the CompressedStorageIterator constructors for convenience only
StorageRef(StorageIndex* innerIndexIterator, Scalar* valueIterator) : m_innerIndexIterator(innerIndexIterator), m_valueIterator(valueIterator) {}
StorageRef(const StorageRef& other) : m_innerIndexIterator(other.m_innerIndexIterator), m_valueIterator(other.m_valueIterator) {}
@ -418,10 +449,16 @@ public:
inline CompressedStorageIterator& operator--() { --m_index; return *this; }
inline CompressedStorageIterator& operator+=(difference_type offset) { m_index += offset; return *this; }
inline CompressedStorageIterator& operator-=(difference_type offset) { m_index -= offset; return *this; }
inline reference operator*() const { return reference(m_data.m_innerIndexIterator + m_index, m_data.m_valueIterator + m_index); }
inline reference operator*() const { return reference(m_data.keyPtr() + m_index, m_data.valuePtr() + m_index); }
#define MAKE_COMP(OP) inline bool operator OP(const CompressedStorageIterator& other) const { return m_index OP other.m_index; }
MAKE_COMP(<) MAKE_COMP(>) MAKE_COMP(>=) MAKE_COMP(<=) MAKE_COMP(!=) MAKE_COMP(==)
MAKE_COMP(<)
MAKE_COMP(>)
MAKE_COMP(>=)
MAKE_COMP(<=)
MAKE_COMP(!=)
MAKE_COMP(==)
#undef MAKE_COMP
protected:
difference_type m_index;

View File

@ -51,6 +51,12 @@ class CwiseBinaryOpImpl<BinaryOp, Lhs, Rhs, Sparse>
namespace internal {
// The default evaluator performs an "arithmetic" operation on two input arrays.
// Given input arrays 'lhs' and 'rhs' and binary functor 'func',
// the sparse destination array 'dst' is evaluated as follows:
// if lhs(i,j) and rhs(i,j) are present, dst(i,j) = func(lhs(i,j), rhs(i,j))
// if lhs(i,j) is present and rhs(i,j) is null, dst(i,j) = func(lhs(i,j), 0)
// if lhs(i,j) is null and rhs(i,j) is present, dst(i,j) = func(0, rhs(i,j))
// Generic "sparse OP sparse"
template<typename XprType> struct binary_sparse_evaluator;
@ -72,7 +78,7 @@ public:
public:
EIGEN_STRONG_INLINE InnerIterator(const binary_evaluator& aEval, Index outer)
: m_lhsIter(aEval.m_lhsImpl,outer), m_rhsIter(aEval.m_rhsImpl,outer), m_functor(aEval.m_functor)
: m_lhsIter(aEval.m_lhsImpl,outer), m_rhsIter(aEval.m_rhsImpl,outer), m_functor(aEval.m_functor), m_value(Scalar(0))
{
this->operator++();
}
@ -100,7 +106,6 @@ public:
}
else
{
m_value = Scalar(0); // this is to avoid a compilation warning
m_id = -1;
}
return *this;
@ -394,6 +399,13 @@ struct binary_evaluator<CwiseBinaryOp<scalar_boolean_and_op<bool>, Lhs, Rhs>, It
explicit binary_evaluator(const XprType& xpr) : Base(xpr) {}
};
// The conjunction "^" evaluator performs a logical "and" or set "intersection" operation on two input arrays.
// Given input arrays 'lhs' and 'rhs' and binary functor 'func',
// the sparse destination array 'dst' is evaluated as follows:
// if lhs(i,j) and rhs(i,j) are present, dst(i,j) = func(lhs(i,j), rhs(i,j))
// if lhs(i,j) is present and rhs(i,j) is null, dst(i,j) is null
// if lhs(i,j) is null and rhs(i,j) is present, dst(i,j) is null
// "sparse ^ sparse"
template<typename XprType>
struct sparse_conjunction_evaluator<XprType, IteratorBased, IteratorBased>
@ -626,6 +638,273 @@ protected:
evaluator<RhsArg> m_rhsImpl;
};
template<typename T,
typename LhsKind = typename evaluator_traits<typename T::Lhs>::Kind,
typename RhsKind = typename evaluator_traits<typename T::Rhs>::Kind,
typename LhsScalar = typename traits<typename T::Lhs>::Scalar,
typename RhsScalar = typename traits<typename T::Rhs>::Scalar> struct sparse_disjunction_evaluator;
// The disjunction "v" evaluator performs a logical "or" or set "union" operation on two input arrays.
// Given input arrays 'lhs' and 'rhs' and binary functor 'func',
// the sparse destination array 'dst' is evaluated as follows:
// if lhs(i,j) and rhs(i,j) are present, dst(i,j) = func(lhs(i,j), rhs(i,j))
// if lhs(i,j) is present and rhs(i,j) is null, dst(i,j) = lhs(i,j)
// if lhs(i,j) is null and rhs(i,j) is present, dst(i,j) = rhs(i,j)
// "sparse v sparse"
template <typename XprType>
struct sparse_disjunction_evaluator<XprType, IteratorBased, IteratorBased> : evaluator_base<XprType> {
protected:
typedef typename XprType::Functor BinaryOp;
typedef typename XprType::Lhs LhsArg;
typedef typename XprType::Rhs RhsArg;
typedef typename evaluator<LhsArg>::InnerIterator LhsIterator;
typedef typename evaluator<RhsArg>::InnerIterator RhsIterator;
typedef typename XprType::StorageIndex StorageIndex;
typedef typename traits<XprType>::Scalar Scalar;
public:
class InnerIterator {
public:
EIGEN_STRONG_INLINE InnerIterator(const sparse_disjunction_evaluator& aEval, Index outer)
: m_lhsIter(aEval.m_lhsImpl, outer),
m_rhsIter(aEval.m_rhsImpl, outer),
m_functor(aEval.m_functor),
m_value(Scalar(0)) {
this->operator++();
}
EIGEN_STRONG_INLINE InnerIterator& operator++() {
if (m_lhsIter && m_rhsIter && (m_lhsIter.index() == m_rhsIter.index())) {
m_id = m_lhsIter.index();
m_value = m_functor(m_lhsIter.value(), m_rhsIter.value());
++m_lhsIter;
++m_rhsIter;
} else if (m_lhsIter && (!m_rhsIter || (m_lhsIter.index() < m_rhsIter.index()))) {
m_id = m_lhsIter.index();
m_value = m_lhsIter.value();
++m_lhsIter;
} else if (m_rhsIter && (!m_lhsIter || (m_lhsIter.index() > m_rhsIter.index()))) {
m_id = m_rhsIter.index();
m_value = m_rhsIter.value();
++m_rhsIter;
} else {
m_id = -1;
}
return *this;
}
EIGEN_STRONG_INLINE Scalar value() const { return m_value; }
EIGEN_STRONG_INLINE StorageIndex index() const { return m_id; }
EIGEN_STRONG_INLINE Index outer() const { return m_lhsIter.outer(); }
EIGEN_STRONG_INLINE Index row() const { return LhsArg::IsRowMajor ? m_lhsIter.row() : index(); }
EIGEN_STRONG_INLINE Index col() const { return LhsArg::IsRowMajor ? index() : m_lhsIter.col(); }
EIGEN_STRONG_INLINE operator bool() const { return m_id >= 0; }
protected:
LhsIterator m_lhsIter;
RhsIterator m_rhsIter;
const BinaryOp& m_functor;
Scalar m_value;
StorageIndex m_id;
};
enum {
CoeffReadCost = int(evaluator<LhsArg>::CoeffReadCost) + int(evaluator<RhsArg>::CoeffReadCost) +
int(functor_traits<BinaryOp>::Cost),
Flags = XprType::Flags
};
explicit sparse_disjunction_evaluator(const XprType& xpr)
: m_functor(xpr.functor()), m_lhsImpl(xpr.lhs()), m_rhsImpl(xpr.rhs()) {
EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
inline Index nonZerosEstimate() const { return m_lhsImpl.nonZerosEstimate() + m_rhsImpl.nonZerosEstimate(); }
protected:
const BinaryOp m_functor;
evaluator<LhsArg> m_lhsImpl;
evaluator<RhsArg> m_rhsImpl;
};
// "dense v sparse"
template <typename XprType>
struct sparse_disjunction_evaluator<XprType, IndexBased, IteratorBased> : evaluator_base<XprType> {
protected:
typedef typename XprType::Functor BinaryOp;
typedef typename XprType::Lhs LhsArg;
typedef typename XprType::Rhs RhsArg;
typedef evaluator<LhsArg> LhsEvaluator;
typedef typename evaluator<RhsArg>::InnerIterator RhsIterator;
typedef typename XprType::StorageIndex StorageIndex;
typedef typename traits<XprType>::Scalar Scalar;
public:
class InnerIterator {
enum { IsRowMajor = (int(RhsArg::Flags) & RowMajorBit) == RowMajorBit };
public:
EIGEN_STRONG_INLINE InnerIterator(const sparse_disjunction_evaluator& aEval, Index outer)
: m_lhsEval(aEval.m_lhsImpl),
m_rhsIter(aEval.m_rhsImpl, outer),
m_functor(aEval.m_functor),
m_value(0),
m_id(-1),
m_innerSize(aEval.m_expr.rhs().innerSize()) {
this->operator++();
}
EIGEN_STRONG_INLINE InnerIterator& operator++() {
++m_id;
if (m_id < m_innerSize) {
Scalar lhsVal = m_lhsEval.coeff(IsRowMajor ? m_rhsIter.outer() : m_id, IsRowMajor ? m_id : m_rhsIter.outer());
if (m_rhsIter && m_rhsIter.index() == m_id) {
m_value = m_functor(lhsVal, m_rhsIter.value());
++m_rhsIter;
} else
m_value = lhsVal;
}
return *this;
}
EIGEN_STRONG_INLINE Scalar value() const {
eigen_internal_assert(m_id < m_innerSize);
return m_value;
}
EIGEN_STRONG_INLINE StorageIndex index() const { return m_id; }
EIGEN_STRONG_INLINE Index outer() const { return m_rhsIter.outer(); }
EIGEN_STRONG_INLINE Index row() const { return IsRowMajor ? m_rhsIter.outer() : m_id; }
EIGEN_STRONG_INLINE Index col() const { return IsRowMajor ? m_id : m_rhsIter.outer(); }
EIGEN_STRONG_INLINE operator bool() const { return m_id < m_innerSize; }
protected:
const evaluator<LhsArg>& m_lhsEval;
RhsIterator m_rhsIter;
const BinaryOp& m_functor;
Scalar m_value;
StorageIndex m_id;
StorageIndex m_innerSize;
};
enum {
CoeffReadCost = int(evaluator<LhsArg>::CoeffReadCost) + int(evaluator<RhsArg>::CoeffReadCost) +
int(functor_traits<BinaryOp>::Cost),
Flags = XprType::Flags
};
explicit sparse_disjunction_evaluator(const XprType& xpr)
: m_functor(xpr.functor()), m_lhsImpl(xpr.lhs()), m_rhsImpl(xpr.rhs()), m_expr(xpr) {
EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
inline Index nonZerosEstimate() const { return m_expr.size(); }
protected:
const BinaryOp m_functor;
evaluator<LhsArg> m_lhsImpl;
evaluator<RhsArg> m_rhsImpl;
const XprType& m_expr;
};
// "sparse v dense"
template <typename XprType>
struct sparse_disjunction_evaluator<XprType, IteratorBased, IndexBased> : evaluator_base<XprType> {
protected:
typedef typename XprType::Functor BinaryOp;
typedef typename XprType::Lhs LhsArg;
typedef typename XprType::Rhs RhsArg;
typedef typename evaluator<LhsArg>::InnerIterator LhsIterator;
typedef evaluator<RhsArg> RhsEvaluator;
typedef typename XprType::StorageIndex StorageIndex;
typedef typename traits<XprType>::Scalar Scalar;
public:
class InnerIterator {
enum { IsRowMajor = (int(LhsArg::Flags) & RowMajorBit) == RowMajorBit };
public:
EIGEN_STRONG_INLINE InnerIterator(const sparse_disjunction_evaluator& aEval, Index outer)
: m_lhsIter(aEval.m_lhsImpl, outer),
m_rhsEval(aEval.m_rhsImpl),
m_functor(aEval.m_functor),
m_value(0),
m_id(-1),
m_innerSize(aEval.m_expr.lhs().innerSize()) {
this->operator++();
}
EIGEN_STRONG_INLINE InnerIterator& operator++() {
++m_id;
if (m_id < m_innerSize) {
Scalar rhsVal = m_rhsEval.coeff(IsRowMajor ? m_lhsIter.outer() : m_id, IsRowMajor ? m_id : m_lhsIter.outer());
if (m_lhsIter && m_lhsIter.index() == m_id) {
m_value = m_functor(m_lhsIter.value(), rhsVal);
++m_lhsIter;
} else
m_value = rhsVal;
}
return *this;
}
EIGEN_STRONG_INLINE Scalar value() const {
eigen_internal_assert(m_id < m_innerSize);
return m_value;
}
EIGEN_STRONG_INLINE StorageIndex index() const { return m_id; }
EIGEN_STRONG_INLINE Index outer() const { return m_lhsIter.outer(); }
EIGEN_STRONG_INLINE Index row() const { return IsRowMajor ? m_lhsIter.outer() : m_id; }
EIGEN_STRONG_INLINE Index col() const { return IsRowMajor ? m_id : m_lhsIter.outer(); }
EIGEN_STRONG_INLINE operator bool() const { return m_id < m_innerSize; }
protected:
LhsIterator m_lhsIter;
const evaluator<RhsArg>& m_rhsEval;
const BinaryOp& m_functor;
Scalar m_value;
StorageIndex m_id;
StorageIndex m_innerSize;
};
enum {
CoeffReadCost = int(evaluator<LhsArg>::CoeffReadCost) + int(evaluator<RhsArg>::CoeffReadCost) +
int(functor_traits<BinaryOp>::Cost),
Flags = XprType::Flags
};
explicit sparse_disjunction_evaluator(const XprType& xpr)
: m_functor(xpr.functor()), m_lhsImpl(xpr.lhs()), m_rhsImpl(xpr.rhs()), m_expr(xpr) {
EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);
EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
}
inline Index nonZerosEstimate() const { return m_expr.size(); }
protected:
const BinaryOp m_functor;
evaluator<LhsArg> m_lhsImpl;
evaluator<RhsArg> m_rhsImpl;
const XprType& m_expr;
};
// when DupFunc is wrapped with scalar_dup_op, use disjunction evaulator
template <typename T1, typename T2, typename DupFunc, typename Lhs, typename Rhs>
struct binary_evaluator<CwiseBinaryOp<scalar_disjunction_op<DupFunc, T1, T2>, Lhs, Rhs>, IteratorBased, IteratorBased>
: sparse_disjunction_evaluator<CwiseBinaryOp<scalar_disjunction_op<DupFunc, T1, T2>, Lhs, Rhs> > {
typedef CwiseBinaryOp<scalar_disjunction_op<DupFunc, T1, T2>, Lhs, Rhs> XprType;
typedef sparse_disjunction_evaluator<XprType> Base;
explicit binary_evaluator(const XprType& xpr) : Base(xpr) {}
};
}
/***************************************************************************

View File

@ -151,7 +151,7 @@ class SparseMatrix
typedef typename Base::IndexVector IndexVector;
typedef typename Base::ScalarVector ScalarVector;
protected:
typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0),StorageIndex> TransposedSparseMatrix;
typedef SparseMatrix<Scalar, IsRowMajor ? ColMajor : RowMajor, StorageIndex> TransposedSparseMatrix;
Index m_outerSize;
Index m_innerSize;
@ -489,6 +489,18 @@ class SparseMatrix
template<typename InputIterators, typename DupFunctor>
void setFromSortedTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
template<typename InputIterators>
void insertFromTriplets(const InputIterators& begin, const InputIterators& end);
template<typename InputIterators, typename DupFunctor>
void insertFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
template<typename InputIterators>
void insertFromSortedTriplets(const InputIterators& begin, const InputIterators& end);
template<typename InputIterators, typename DupFunctor>
void insertFromSortedTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
//---
/** \internal
@ -1095,49 +1107,51 @@ namespace internal {
template <typename InputIterator, typename SparseMatrixType, typename DupFunctor>
void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat,
DupFunctor dup_func) {
constexpr bool IsRowMajor = SparseMatrixType::IsRowMajor;
typedef typename SparseMatrixType::StorageIndex StorageIndex;
typedef typename VectorX<StorageIndex>::AlignedMapType IndexMap;
using StorageIndex = typename SparseMatrixType::StorageIndex;
using IndexMap = typename VectorX<StorageIndex>::AlignedMapType;
using TransposedSparseMatrix = SparseMatrix<typename SparseMatrixType::Scalar, IsRowMajor ? ColMajor : RowMajor, StorageIndex>;
if (begin == end) return;
// free innerNonZeroPtr (if present) and zero outerIndexPtr
mat.resize(mat.rows(), mat.cols());
// allocate temporary storage for nonzero insertion (outer size) and duplicate removal (inner size)
ei_declare_aligned_stack_constructed_variable(StorageIndex, tmp, numext::maxi(mat.innerSize(), mat.outerSize()), 0);
// There are two strategies to consider for constructing a matrix from unordered triplets:
// A) construct the 'mat' in its native storage order and sort in-place (less memory); or,
// B) construct the transposed matrix and use an implicit sort upon assignment to `mat` (less time).
// This routine uses B) for faster execution time.
TransposedSparseMatrix trmat(mat.rows(), mat.cols());
// scan triplets to determine allocation size before constructing matrix
IndexMap outerIndexMap(mat.outerIndexPtr(), mat.outerSize() + 1);
Index nonZeros = 0;
for (InputIterator it(begin); it != end; ++it) {
eigen_assert(it->row() >= 0 && it->row() < mat.rows() && it->col() >= 0 && it->col() < mat.cols());
StorageIndex j = convert_index<StorageIndex>(IsRowMajor ? it->row() : it->col());
outerIndexMap.coeffRef(j + 1)++;
StorageIndex j = convert_index<StorageIndex>(IsRowMajor ? it->col() : it->row());
if (nonZeros == NumTraits<StorageIndex>::highest()) internal::throw_std_bad_alloc();
trmat.outerIndexPtr()[j + 1]++;
nonZeros++;
}
// finalize outer indices and allocate memory
std::partial_sum(outerIndexMap.begin(), outerIndexMap.end(), outerIndexMap.begin());
eigen_assert(nonZeros == mat.outerIndexPtr()[mat.outerSize()]);
mat.resizeNonZeros(nonZeros);
std::partial_sum(trmat.outerIndexPtr(), trmat.outerIndexPtr() + trmat.outerSize() + 1, trmat.outerIndexPtr());
eigen_assert(nonZeros == trmat.outerIndexPtr()[trmat.outerSize()]);
trmat.resizeNonZeros(nonZeros);
// use tmp to track nonzero insertions
IndexMap back(tmp, mat.outerSize());
back = outerIndexMap.head(mat.outerSize());
// construct temporary array to track insertions (outersize) and collapse duplicates (innersize)
ei_declare_aligned_stack_constructed_variable(StorageIndex, tmp, numext::maxi(mat.innerSize(), mat.outerSize()), 0);
smart_copy(trmat.outerIndexPtr(), trmat.outerIndexPtr() + trmat.outerSize(), tmp);
// push triplets to back of each inner vector
// push triplets to back of each vector
for (InputIterator it(begin); it != end; ++it) {
StorageIndex j = convert_index<StorageIndex>(IsRowMajor ? it->row() : it->col());
StorageIndex i = convert_index<StorageIndex>(IsRowMajor ? it->col() : it->row());
mat.data().index(back.coeff(j)) = i;
mat.data().value(back.coeff(j)) = it->value();
back.coeffRef(j)++;
StorageIndex j = convert_index<StorageIndex>(IsRowMajor ? it->col() : it->row());
StorageIndex i = convert_index<StorageIndex>(IsRowMajor ? it->row() : it->col());
StorageIndex k = tmp[j];
trmat.data().index(k) = i;
trmat.data().value(k) = it->value();
tmp[j]++;
}
// use tmp to collapse duplicates
IndexMap wi(tmp, mat.innerSize());
mat.collapseDuplicates(wi, dup_func);
mat.sortInnerIndices();
IndexMap wi(tmp, trmat.innerSize());
trmat.collapseDuplicates(wi, dup_func);
// implicit sorting
mat = trmat;
}
// Creates a compressed sparse matrix from a sorted range of triplets
@ -1145,8 +1159,8 @@ template <typename InputIterator, typename SparseMatrixType, typename DupFunctor
void set_from_triplets_sorted(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat,
DupFunctor dup_func) {
constexpr bool IsRowMajor = SparseMatrixType::IsRowMajor;
typedef typename SparseMatrixType::StorageIndex StorageIndex;
typedef typename VectorX<StorageIndex>::AlignedMapType IndexMap;
using StorageIndex = typename SparseMatrixType::StorageIndex;
if (begin == end) return;
constexpr StorageIndex kEmptyIndexValue(-1);
@ -1156,7 +1170,6 @@ void set_from_triplets_sorted(const InputIterator& begin, const InputIterator& e
StorageIndex previous_j = kEmptyIndexValue;
StorageIndex previous_i = kEmptyIndexValue;
// scan triplets to determine allocation size before constructing matrix
IndexMap outerIndexMap(mat.outerIndexPtr(), mat.outerSize() + 1);
Index nonZeros = 0;
for (InputIterator it(begin); it != end; ++it) {
eigen_assert(it->row() >= 0 && it->row() < mat.rows() && it->col() >= 0 && it->col() < mat.cols());
@ -1166,16 +1179,16 @@ void set_from_triplets_sorted(const InputIterator& begin, const InputIterator& e
// identify duplicates by examining previous location
bool duplicate = (previous_j == j) && (previous_i == i);
if (!duplicate) {
outerIndexMap.coeffRef(j + 1)++;
if (nonZeros == NumTraits<StorageIndex>::highest()) internal::throw_std_bad_alloc();
nonZeros++;
mat.outerIndexPtr()[j + 1]++;
previous_j = j;
previous_i = i;
}
previous_j = j;
previous_i = i;
}
// finalize outer indices and allocate memory
std::partial_sum(outerIndexMap.begin(), outerIndexMap.end(), outerIndexMap.begin());
std::partial_sum(mat.outerIndexPtr(), mat.outerIndexPtr() + mat.outerSize() + 1, mat.outerIndexPtr());
eigen_assert(nonZeros == mat.outerIndexPtr()[mat.outerSize()]);
mat.resizeNonZeros(nonZeros);
@ -1197,27 +1210,73 @@ void set_from_triplets_sorted(const InputIterator& begin, const InputIterator& e
back++;
}
}
eigen_assert(back == nonZeros);
// matrix is finalized
}
// thin wrapper around a generic binary functor to use the sparse disjunction evaulator instead of the default "arithmetic" evaulator
template<typename DupFunctor, typename LhsScalar, typename RhsScalar = LhsScalar>
struct scalar_disjunction_op
{
using result_type = typename result_of<DupFunctor(LhsScalar, RhsScalar)>::type;
scalar_disjunction_op(const DupFunctor& op) : m_functor(op) {}
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE result_type operator() (const LhsScalar& a, const RhsScalar& b) const { return m_functor(a, b); }
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const DupFunctor& functor() const { return m_functor; }
const DupFunctor& m_functor;
};
template <typename DupFunctor, typename LhsScalar, typename RhsScalar>
struct functor_traits<scalar_disjunction_op<DupFunctor, LhsScalar, RhsScalar>> : public functor_traits<DupFunctor> {};
// Creates a compressed sparse matrix from its existing entries and those from an unsorted range of triplets
template <typename InputIterator, typename SparseMatrixType, typename DupFunctor>
void insert_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat,
DupFunctor dup_func) {
using Scalar = typename SparseMatrixType::Scalar;
using SrcXprType = CwiseBinaryOp<scalar_disjunction_op<DupFunctor, Scalar>, const SparseMatrixType, const SparseMatrixType>;
// set_from_triplets is necessary to sort the inner indices and remove the duplicate entries
SparseMatrixType trips(mat.rows(), mat.cols());
set_from_triplets(begin, end, trips, dup_func);
SrcXprType src = mat.binaryExpr(trips, scalar_disjunction_op<DupFunctor, Scalar>(dup_func));
// the sparse assignment procedure creates a temporary matrix and swaps the final result
assign_sparse_to_sparse<SparseMatrixType, SrcXprType>(mat, src);
}
// Creates a compressed sparse matrix from its existing entries and those from an sorted range of triplets
template <typename InputIterator, typename SparseMatrixType, typename DupFunctor>
void insert_from_triplets_sorted(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat,
DupFunctor dup_func) {
using Scalar = typename SparseMatrixType::Scalar;
using SrcXprType = CwiseBinaryOp<scalar_disjunction_op<DupFunctor, Scalar>, const SparseMatrixType, const SparseMatrixType>;
/** Fill the matrix \c *this with the list of \em triplets defined by the iterator range \a begin - \a end.
// TODO: process triplets without making a copy
SparseMatrixType trips(mat.rows(), mat.cols());
set_from_triplets_sorted(begin, end, trips, dup_func);
SrcXprType src = mat.binaryExpr(trips, scalar_disjunction_op<DupFunctor, Scalar>(dup_func));
// the sparse assignment procedure creates a temporary matrix and swaps the final result
assign_sparse_to_sparse<SparseMatrixType, SrcXprType>(mat, src);
}
} // namespace internal
/** Fill the matrix \c *this with the list of \em triplets defined in the half-open range from \a begin to \a end.
*
* A \em triplet is a tuple (i,j,value) defining a non-zero element.
* The input list of triplets does not have to be sorted, and can contains duplicated elements.
* The input list of triplets does not have to be sorted, and may contain duplicated elements.
* In any case, the result is a \b sorted and \b compressed sparse matrix where the duplicates have been summed up.
* This is a \em O(n) operation, with \em n the number of triplet elements.
* The initial contents of \c *this is destroyed.
* The initial contents of \c *this are destroyed.
* The matrix \c *this must be properly resized beforehand using the SparseMatrix(Index,Index) constructor,
* or the resize(Index,Index) method. The sizes are not extracted from the triplet list.
*
* The \a InputIterators value_type must provide the following interface:
* \code
* Scalar value() const; // the value
* Scalar row() const; // the row index i
* Scalar col() const; // the column index j
* IndexType row() const; // the row index i
* IndexType col() const; // the column index j
* \endcode
* See for instance the Eigen::Triplet template class.
*
@ -1247,20 +1306,6 @@ void SparseMatrix<Scalar,Options_,StorageIndex_>::setFromTriplets(const InputIte
internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,Options_,StorageIndex_> >(begin, end, *this, internal::scalar_sum_op<Scalar,Scalar>());
}
/** The same as setFromTriplets but triplets are assumed to be pre-sorted. This is faster and requires less temporary storage.
* Two triplets `a` and `b` are appropriately ordered if:
* \code
* ColMajor: ((a.col() != b.col()) ? (a.col() < b.col()) : (a.row() < b.row())
* RowMajor: ((a.row() != b.row()) ? (a.row() < b.row()) : (a.col() < b.col())
* \endcode
*/
template<typename Scalar, int Options_, typename StorageIndex_>
template<typename InputIterators>
void SparseMatrix<Scalar, Options_, StorageIndex_>::setFromSortedTriplets(const InputIterators& begin, const InputIterators& end)
{
internal::set_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_> >(begin, end, *this, internal::scalar_sum_op<Scalar, Scalar>());
}
/** The same as setFromTriplets but when duplicates are met the functor \a dup_func is applied:
* \code
* value = dup_func(OldValue, NewValue)
@ -1274,7 +1319,21 @@ template<typename Scalar, int Options_, typename StorageIndex_>
template<typename InputIterators, typename DupFunctor>
void SparseMatrix<Scalar, Options_, StorageIndex_>::setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func)
{
internal::set_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(begin, end, *this, dup_func);
internal::set_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(begin, end, *this, dup_func);
}
/** The same as setFromTriplets but triplets are assumed to be pre-sorted. This is faster and requires less temporary storage.
* Two triplets `a` and `b` are appropriately ordered if:
* \code
* ColMajor: ((a.col() != b.col()) ? (a.col() < b.col()) : (a.row() < b.row())
* RowMajor: ((a.row() != b.row()) ? (a.row() < b.row()) : (a.col() < b.col())
* \endcode
*/
template<typename Scalar, int Options_, typename StorageIndex_>
template<typename InputIterators>
void SparseMatrix<Scalar, Options_, StorageIndex_>::setFromSortedTriplets(const InputIterators& begin, const InputIterators& end)
{
internal::set_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_> >(begin, end, *this, internal::scalar_sum_op<Scalar, Scalar>());
}
/** The same as setFromSortedTriplets but when duplicates are met the functor \a dup_func is applied:
@ -1283,52 +1342,147 @@ void SparseMatrix<Scalar, Options_, StorageIndex_>::setFromTriplets(const InputI
* \endcode
* Here is a C++11 example keeping the latest entry only:
* \code
* mat.setFromTriplets(triplets.begin(), triplets.end(), [] (const Scalar&,const Scalar &b) { return b; });
* mat.setFromSortedTriplets(triplets.begin(), triplets.end(), [] (const Scalar&,const Scalar &b) { return b; });
* \endcode
*/
template<typename Scalar, int Options_, typename StorageIndex_>
template<typename InputIterators, typename DupFunctor>
void SparseMatrix<Scalar, Options_, StorageIndex_>::setFromSortedTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func)
{
internal::set_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(begin, end, *this, dup_func);
internal::set_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(begin, end, *this, dup_func);
}
/** Insert a batch of elements into the matrix \c *this with the list of \em triplets defined in the half-open range from \a begin to \a end.
*
* A \em triplet is a tuple (i,j,value) defining a non-zero element.
* The input list of triplets does not have to be sorted, and may contain duplicated elements.
* In any case, the result is a \b sorted and \b compressed sparse matrix where the duplicates have been summed up.
* This is a \em O(n) operation, with \em n the number of triplet elements.
* The initial contents of \c *this are preserved (except for the summation of duplicate elements).
* The matrix \c *this must be properly sized beforehand. The sizes are not extracted from the triplet list.
*
* The \a InputIterators value_type must provide the following interface:
* \code
* Scalar value() const; // the value
* IndexType row() const; // the row index i
* IndexType col() const; // the column index j
* \endcode
* See for instance the Eigen::Triplet template class.
*
* Here is a typical usage example:
* \code
SparseMatrixType m(rows,cols); // m contains nonzero entries
typedef Triplet<double> T;
std::vector<T> tripletList;
tripletList.reserve(estimation_of_entries);
for(...)
{
// ...
tripletList.push_back(T(i,j,v_ij));
}
m.insertFromTriplets(tripletList.begin(), tripletList.end());
// m is ready to go!
* \endcode
*
* \warning The list of triplets is read multiple times (at least twice). Therefore, it is not recommended to define
* an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather
* be explicitly stored into a std::vector for instance.
*/
template<typename Scalar, int Options_, typename StorageIndex_>
template<typename InputIterators>
void SparseMatrix<Scalar, Options_, StorageIndex_>::insertFromTriplets(const InputIterators& begin, const InputIterators& end)
{
internal::insert_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_> >(begin, end, *this, internal::scalar_sum_op<Scalar, Scalar>());
}
/** The same as insertFromTriplets but when duplicates are met the functor \a dup_func is applied:
* \code
* value = dup_func(OldValue, NewValue)
* \endcode
* Here is a C++11 example keeping the latest entry only:
* \code
* mat.insertFromTriplets(triplets.begin(), triplets.end(), [] (const Scalar&,const Scalar &b) { return b; });
* \endcode
*/
template<typename Scalar, int Options_, typename StorageIndex_>
template<typename InputIterators, typename DupFunctor>
void SparseMatrix<Scalar, Options_, StorageIndex_>::insertFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func)
{
internal::insert_from_triplets<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(begin, end, *this, dup_func);
}
/** The same as insertFromTriplets but triplets are assumed to be pre-sorted. This is faster and requires less temporary storage.
* Two triplets `a` and `b` are appropriately ordered if:
* \code
* ColMajor: ((a.col() != b.col()) ? (a.col() < b.col()) : (a.row() < b.row())
* RowMajor: ((a.row() != b.row()) ? (a.row() < b.row()) : (a.col() < b.col())
* \endcode
*/
template<typename Scalar, int Options_, typename StorageIndex_>
template<typename InputIterators>
void SparseMatrix<Scalar, Options_, StorageIndex_>::insertFromSortedTriplets(const InputIterators& begin, const InputIterators& end)
{
internal::insert_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_> >(begin, end, *this, internal::scalar_sum_op<Scalar, Scalar>());
}
/** The same as insertFromSortedTriplets but when duplicates are met the functor \a dup_func is applied:
* \code
* value = dup_func(OldValue, NewValue)
* \endcode
* Here is a C++11 example keeping the latest entry only:
* \code
* mat.insertFromSortedTriplets(triplets.begin(), triplets.end(), [] (const Scalar&,const Scalar &b) { return b; });
* \endcode
*/
template<typename Scalar, int Options_, typename StorageIndex_>
template<typename InputIterators, typename DupFunctor>
void SparseMatrix<Scalar, Options_, StorageIndex_>::insertFromSortedTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func)
{
internal::insert_from_triplets_sorted<InputIterators, SparseMatrix<Scalar, Options_, StorageIndex_>, DupFunctor>(begin, end, *this, dup_func);
}
/** \internal */
template<typename Scalar, int Options_, typename StorageIndex_>
template<typename Derived, typename DupFunctor>
void SparseMatrix<Scalar, Options_, StorageIndex_>::collapseDuplicates(DenseBase<Derived>& wi, DupFunctor dup_func)
{
eigen_assert(wi.size() >= m_innerSize);
template <typename Scalar_, int Options_, typename StorageIndex_>
template <typename Derived, typename DupFunctor>
void SparseMatrix<Scalar_, Options_, StorageIndex_>::collapseDuplicates(DenseBase<Derived>& wi, DupFunctor dup_func) {
// removes duplicate entries and compresses the matrix
// the excess allocated memory is not released
// the inner indices do not need to be sorted, nor is the matrix returned in a sorted state
eigen_assert(wi.size() == m_innerSize);
constexpr StorageIndex kEmptyIndexValue(-1);
wi.setConstant(kEmptyIndexValue);
StorageIndex count = 0;
const bool is_compressed = isCompressed();
// for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
for (Index j = 0; j < m_outerSize; ++j) {
StorageIndex start = count;
StorageIndex oldEnd = isCompressed() ? m_outerIndex[j + 1] : m_outerIndex[j] + m_innerNonZeros[j];
for (StorageIndex k = m_outerIndex[j]; k < oldEnd; ++k) {
const StorageIndex newBegin = count;
const StorageIndex end = is_compressed ? m_outerIndex[j + 1] : m_outerIndex[j] + m_innerNonZeros[j];
for (StorageIndex k = m_outerIndex[j]; k < end; ++k) {
StorageIndex i = m_data.index(k);
if (wi(i) >= start) {
// we already meet this entry => accumulate it
if (wi(i) >= newBegin) {
// entry at k is a duplicate
// accumulate it into the primary entry located at wi(i)
m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k));
} else {
// k is the primary entry in j with inner index i
// shift it to the left and record its location at wi(i)
m_data.index(count) = i;
m_data.value(count) = m_data.value(k);
m_data.index(count) = m_data.index(k);
wi(i) = count;
++count;
}
}
m_outerIndex[j] = start;
m_outerIndex[j] = newBegin;
}
m_outerIndex[m_outerSize] = count;
// turn the matrix into compressed form
m_data.resize(count);
// turn the matrix into compressed form (if it is not already)
internal::conditional_aligned_delete_auto<StorageIndex, true>(m_innerNonZeros, m_outerSize);
m_innerNonZeros = 0;
m_data.resize(m_outerIndex[m_outerSize]);
}
/** \internal */
template<typename Scalar, int Options_, typename StorageIndex_>
template<typename OtherDerived>

View File

@ -453,43 +453,106 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re
VERIFY_IS_APPROX(m2, refM2);
}
// test setFromTriplets
// test setFromTriplets / insertFromTriplets
{
typedef Triplet<Scalar,StorageIndex> TripletType;
Index ntriplets = rows * cols;
std::vector<TripletType> triplets;
Index ntriplets = rows*cols;
triplets.reserve(ntriplets);
DenseMatrix refMat_sum = DenseMatrix::Zero(rows,cols);
DenseMatrix refMat_prod = DenseMatrix::Zero(rows,cols);
DenseMatrix refMat_last = DenseMatrix::Zero(rows,cols);
for(Index i=0;i<ntriplets;++i)
{
StorageIndex r = internal::random<StorageIndex>(0,StorageIndex(rows-1));
StorageIndex c = internal::random<StorageIndex>(0,StorageIndex(cols-1));
for (Index i = 0; i < ntriplets; ++i) {
StorageIndex r = internal::random<StorageIndex>(0, StorageIndex(rows - 1));
StorageIndex c = internal::random<StorageIndex>(0, StorageIndex(cols - 1));
Scalar v = internal::random<Scalar>();
triplets.push_back(TripletType(r,c,v));
refMat_sum(r,c) += v;
if(std::abs(refMat_prod(r,c))==0)
refMat_prod(r,c) = v;
triplets.push_back(TripletType(r, c, v));
refMat_sum(r, c) += v;
if (std::abs(refMat_prod(r, c)) == 0)
refMat_prod(r, c) = v;
else
refMat_prod(r,c) *= v;
refMat_last(r,c) = v;
refMat_prod(r, c) *= v;
refMat_last(r, c) = v;
}
std::vector<TripletType> moreTriplets;
moreTriplets.reserve(ntriplets);
DenseMatrix refMat_sum_more = refMat_sum;
DenseMatrix refMat_prod_more = refMat_prod;
DenseMatrix refMat_last_more = refMat_last;
for (Index i = 0; i < ntriplets; ++i) {
StorageIndex r = internal::random<StorageIndex>(0, StorageIndex(rows - 1));
StorageIndex c = internal::random<StorageIndex>(0, StorageIndex(cols - 1));
Scalar v = internal::random<Scalar>();
moreTriplets.push_back(TripletType(r, c, v));
refMat_sum_more(r, c) += v;
if (std::abs(refMat_prod_more(r, c)) == 0)
refMat_prod_more(r, c) = v;
else
refMat_prod_more(r, c) *= v;
refMat_last_more(r, c) = v;
}
SparseMatrixType m(rows,cols);
// test setFromTriplets / insertFromTriplets
m.setFromTriplets(triplets.begin(), triplets.end());
VERIFY_IS_APPROX(m, refMat_sum);
VERIFY_IS_EQUAL(m.innerIndicesAreSorted(), m.outerSize());
VERIFY(m.isCompressed());
m.insertFromTriplets(moreTriplets.begin(), moreTriplets.end());
VERIFY_IS_APPROX(m, refMat_sum_more);
VERIFY_IS_EQUAL(m.innerIndicesAreSorted(), m.outerSize());
m.setFromTriplets(triplets.begin(), triplets.end(), std::multiplies<Scalar>());
VERIFY_IS_APPROX(m, refMat_prod);
VERIFY_IS_EQUAL(m.innerIndicesAreSorted(), m.outerSize());
VERIFY(m.isCompressed());
m.insertFromTriplets(moreTriplets.begin(), moreTriplets.end(), std::multiplies<Scalar>());
VERIFY_IS_APPROX(m, refMat_prod_more);
VERIFY_IS_EQUAL(m.innerIndicesAreSorted(), m.outerSize());
m.setFromTriplets(triplets.begin(), triplets.end(), [] (Scalar,Scalar b) { return b; });
VERIFY_IS_APPROX(m, refMat_last);
VERIFY_IS_EQUAL(m.innerIndicesAreSorted(), m.outerSize());
m.setFromTriplets(triplets.begin(), triplets.end(), [](Scalar, Scalar b) { return b; });
VERIFY(m.isCompressed());
m.insertFromTriplets(moreTriplets.begin(), moreTriplets.end(), [](Scalar, Scalar b) { return b; });
VERIFY_IS_APPROX(m, refMat_last_more);
VERIFY_IS_EQUAL(m.innerIndicesAreSorted(), m.outerSize());
// test setFromSortedTriplets
// insert into an uncompressed matrix
VectorXi reserveSizes(m.outerSize());
for (Index i = 0; i < m.outerSize(); i++) reserveSizes[i] = internal::random<int>(1, 7);
m.setFromTriplets(triplets.begin(), triplets.end());
m.reserve(reserveSizes);
VERIFY(!m.isCompressed());
m.insertFromTriplets(moreTriplets.begin(), moreTriplets.end());
VERIFY_IS_APPROX(m, refMat_sum_more);
VERIFY_IS_EQUAL(m.innerIndicesAreSorted(), m.outerSize());
m.setFromTriplets(triplets.begin(), triplets.end(), std::multiplies<Scalar>());
m.reserve(reserveSizes);
VERIFY(!m.isCompressed());
m.insertFromTriplets(moreTriplets.begin(), moreTriplets.end(), std::multiplies<Scalar>());
VERIFY_IS_APPROX(m, refMat_prod_more);
VERIFY_IS_EQUAL(m.innerIndicesAreSorted(), m.outerSize());
m.setFromTriplets(triplets.begin(), triplets.end(), [](Scalar, Scalar b) { return b; });
m.reserve(reserveSizes);
VERIFY(!m.isCompressed());
m.insertFromTriplets(moreTriplets.begin(), moreTriplets.end(), [](Scalar, Scalar b) { return b; });
VERIFY_IS_APPROX(m, refMat_last_more);
VERIFY_IS_EQUAL(m.innerIndicesAreSorted(), m.outerSize());
// test setFromSortedTriplets / insertFromSortedTriplets
struct triplet_comp {
inline bool operator()(const TripletType& a, const TripletType& b) {
@ -501,20 +564,56 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re
// stable_sort is only necessary when the reduction functor is dependent on the order of the triplets
// this is the case with refMat_last
// for most cases, std::sort is sufficient and preferred
std::stable_sort(triplets.begin(), triplets.end(), triplet_comp());
m.setZero();
std::stable_sort(triplets.begin(), triplets.end(), triplet_comp());
std::stable_sort(moreTriplets.begin(), moreTriplets.end(), triplet_comp());
m.setFromSortedTriplets(triplets.begin(), triplets.end());
VERIFY_IS_APPROX(m, refMat_sum);
VERIFY_IS_EQUAL(m.innerIndicesAreSorted(), m.outerSize());
VERIFY(m.isCompressed());
m.insertFromSortedTriplets(moreTriplets.begin(), moreTriplets.end());
VERIFY_IS_APPROX(m, refMat_sum_more);
VERIFY_IS_EQUAL(m.innerIndicesAreSorted(), m.outerSize());
m.setFromSortedTriplets(triplets.begin(), triplets.end(), std::multiplies<Scalar>());
VERIFY_IS_APPROX(m, refMat_prod);
VERIFY_IS_EQUAL(m.innerIndicesAreSorted(), m.outerSize());
VERIFY(m.isCompressed());
m.insertFromSortedTriplets(moreTriplets.begin(), moreTriplets.end(), std::multiplies<Scalar>());
VERIFY_IS_APPROX(m, refMat_prod_more);
VERIFY_IS_EQUAL(m.innerIndicesAreSorted(), m.outerSize());
m.setFromSortedTriplets(triplets.begin(), triplets.end(), [](Scalar, Scalar b) { return b; });
VERIFY_IS_APPROX(m, refMat_last);
VERIFY_IS_EQUAL(m.innerIndicesAreSorted(), m.outerSize());
VERIFY(m.isCompressed());
m.insertFromSortedTriplets(moreTriplets.begin(), moreTriplets.end(), [](Scalar, Scalar b) { return b; });
VERIFY_IS_APPROX(m, refMat_last_more);
VERIFY_IS_EQUAL(m.innerIndicesAreSorted(), m.outerSize());
// insert into an uncompressed matrix
m.setFromSortedTriplets(triplets.begin(), triplets.end());
m.reserve(reserveSizes);
VERIFY(!m.isCompressed());
m.insertFromSortedTriplets(moreTriplets.begin(), moreTriplets.end());
VERIFY_IS_APPROX(m, refMat_sum_more);
VERIFY_IS_EQUAL(m.innerIndicesAreSorted(), m.outerSize());
m.setFromSortedTriplets(triplets.begin(), triplets.end(), std::multiplies<Scalar>());
m.reserve(reserveSizes);
VERIFY(!m.isCompressed());
m.insertFromSortedTriplets(moreTriplets.begin(), moreTriplets.end(), std::multiplies<Scalar>());
VERIFY_IS_APPROX(m, refMat_prod_more);
VERIFY_IS_EQUAL(m.innerIndicesAreSorted(), m.outerSize());
m.setFromSortedTriplets(triplets.begin(), triplets.end(), [](Scalar, Scalar b) { return b; });
m.reserve(reserveSizes);
VERIFY(!m.isCompressed());
m.insertFromSortedTriplets(moreTriplets.begin(), moreTriplets.end(), [](Scalar, Scalar b) { return b; });
VERIFY_IS_APPROX(m, refMat_last_more);
VERIFY_IS_EQUAL(m.innerIndicesAreSorted(), m.outerSize());
}
// test Map