mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-06-04 18:54:00 +08:00
Fix many long to int conversion warnings:
- fix usage of Index (API) versus StorageIndex (when multiple indexes are stored) - use StorageIndex(val) when the input has already been check - use internal::convert_index<StorageIndex>(val) when val is potentially unsafe (directly comes from user input)
This commit is contained in:
parent
fc202bab39
commit
aa6c516ec1
@ -66,9 +66,9 @@ class CwiseUnaryOp : public CwiseUnaryOpImpl<UnaryOp, XprType, typename internal
|
||||
: m_xpr(xpr), m_functor(func) {}
|
||||
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE StorageIndex rows() const { return m_xpr.rows(); }
|
||||
EIGEN_STRONG_INLINE Index rows() const { return m_xpr.rows(); }
|
||||
EIGEN_DEVICE_FUNC
|
||||
EIGEN_STRONG_INLINE StorageIndex cols() const { return m_xpr.cols(); }
|
||||
EIGEN_STRONG_INLINE Index cols() const { return m_xpr.cols(); }
|
||||
|
||||
/** \returns the functor representing the unary operation */
|
||||
EIGEN_DEVICE_FUNC
|
||||
|
@ -147,7 +147,8 @@ class PermutationBase : public EigenBase<Derived>
|
||||
/** Sets *this to be the identity permutation matrix */
|
||||
void setIdentity()
|
||||
{
|
||||
for(Index i = 0; i < size(); ++i)
|
||||
StorageIndex n = StorageIndex(size());
|
||||
for(StorageIndex i = 0; i < n; ++i)
|
||||
indices().coeffRef(i) = i;
|
||||
}
|
||||
|
||||
|
@ -27,7 +27,7 @@ namespace internal {
|
||||
*/
|
||||
template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
|
||||
bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x,
|
||||
const Preconditioner& precond, int& iters,
|
||||
const Preconditioner& precond, Index& iters,
|
||||
typename Dest::RealScalar& tol_error)
|
||||
{
|
||||
using std::sqrt;
|
||||
@ -36,9 +36,9 @@ bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x,
|
||||
typedef typename Dest::Scalar Scalar;
|
||||
typedef Matrix<Scalar,Dynamic,1> VectorType;
|
||||
RealScalar tol = tol_error;
|
||||
int maxIters = iters;
|
||||
Index maxIters = iters;
|
||||
|
||||
int n = mat.cols();
|
||||
Index n = mat.cols();
|
||||
VectorType r = rhs - mat * x;
|
||||
VectorType r0 = r;
|
||||
|
||||
@ -61,8 +61,8 @@ bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x,
|
||||
|
||||
RealScalar tol2 = tol*tol;
|
||||
RealScalar eps2 = NumTraits<Scalar>::epsilon()*NumTraits<Scalar>::epsilon();
|
||||
int i = 0;
|
||||
int restarts = 0;
|
||||
Index i = 0;
|
||||
Index restarts = 0;
|
||||
|
||||
while ( r.squaredNorm()/rhs_sqnorm > tol2 && i<maxIters )
|
||||
{
|
||||
@ -182,7 +182,7 @@ public:
|
||||
void _solve_with_guess_impl(const Rhs& b, Dest& x) const
|
||||
{
|
||||
bool failed = false;
|
||||
for(int j=0; j<b.cols(); ++j)
|
||||
for(Index j=0; j<b.cols(); ++j)
|
||||
{
|
||||
m_iterations = Base::maxIterations();
|
||||
m_error = Base::m_tolerance;
|
||||
|
@ -26,7 +26,7 @@ namespace internal {
|
||||
template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
|
||||
EIGEN_DONT_INLINE
|
||||
void conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x,
|
||||
const Preconditioner& precond, int& iters,
|
||||
const Preconditioner& precond, Index& iters,
|
||||
typename Dest::RealScalar& tol_error)
|
||||
{
|
||||
using std::sqrt;
|
||||
@ -36,9 +36,9 @@ void conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x,
|
||||
typedef Matrix<Scalar,Dynamic,1> VectorType;
|
||||
|
||||
RealScalar tol = tol_error;
|
||||
int maxIters = iters;
|
||||
Index maxIters = iters;
|
||||
|
||||
int n = mat.cols();
|
||||
Index n = mat.cols();
|
||||
|
||||
VectorType residual = rhs - mat * x; //initial residual
|
||||
|
||||
@ -64,7 +64,7 @@ void conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x,
|
||||
|
||||
VectorType z(n), tmp(n);
|
||||
RealScalar absNew = numext::real(residual.dot(p)); // the square of the absolute value of r scaled by invM
|
||||
int i = 0;
|
||||
Index i = 0;
|
||||
while(i < maxIters)
|
||||
{
|
||||
tmp.noalias() = mat * p; // the bottleneck of the algorithm
|
||||
@ -190,7 +190,7 @@ public:
|
||||
m_iterations = Base::maxIterations();
|
||||
m_error = Base::m_tolerance;
|
||||
|
||||
for(int j=0; j<b.cols(); ++j)
|
||||
for(Index j=0; j<b.cols(); ++j)
|
||||
{
|
||||
m_iterations = Base::maxIterations();
|
||||
m_error = Base::m_tolerance;
|
||||
|
@ -145,7 +145,7 @@ public:
|
||||
* It is either the value setted by setMaxIterations or, by default,
|
||||
* twice the number of columns of the matrix.
|
||||
*/
|
||||
int maxIterations() const
|
||||
Index maxIterations() const
|
||||
{
|
||||
return (m_maxIterations<0) ? 2*mp_matrix.cols() : m_maxIterations;
|
||||
}
|
||||
@ -153,14 +153,14 @@ public:
|
||||
/** Sets the max number of iterations.
|
||||
* Default is twice the number of columns of the matrix.
|
||||
*/
|
||||
Derived& setMaxIterations(int maxIters)
|
||||
Derived& setMaxIterations(Index maxIters)
|
||||
{
|
||||
m_maxIterations = maxIters;
|
||||
return derived();
|
||||
}
|
||||
|
||||
/** \returns the number of iterations performed during the last solve */
|
||||
int iterations() const
|
||||
Index iterations() const
|
||||
{
|
||||
eigen_assert(m_isInitialized && "ConjugateGradient is not initialized.");
|
||||
return m_iterations;
|
||||
@ -200,11 +200,11 @@ public:
|
||||
{
|
||||
eigen_assert(rows()==b.rows());
|
||||
|
||||
int rhsCols = b.cols();
|
||||
int size = b.rows();
|
||||
Index rhsCols = b.cols();
|
||||
Index size = b.rows();
|
||||
Eigen::Matrix<DestScalar,Dynamic,1> tb(size);
|
||||
Eigen::Matrix<DestScalar,Dynamic,1> tx(size);
|
||||
for(int k=0; k<rhsCols; ++k)
|
||||
for(Index k=0; k<rhsCols; ++k)
|
||||
{
|
||||
tb = b.col(k);
|
||||
tx = derived().solve(tb);
|
||||
@ -233,11 +233,11 @@ protected:
|
||||
Ref<const MatrixType> mp_matrix;
|
||||
Preconditioner m_preconditioner;
|
||||
|
||||
int m_maxIterations;
|
||||
Index m_maxIterations;
|
||||
RealScalar m_tolerance;
|
||||
|
||||
mutable RealScalar m_error;
|
||||
mutable int m_iterations;
|
||||
mutable Index m_iterations;
|
||||
mutable ComputationInfo m_info;
|
||||
mutable bool m_analysisIsOk, m_factorizationIsOk;
|
||||
};
|
||||
|
@ -41,10 +41,10 @@ template<typename T0, typename T1> inline bool amd_marked(const T0* w, const T1&
|
||||
template<typename T0, typename T1> inline void amd_mark(const T0* w, const T1& j) { return w[j] = amd_flip(w[j]); }
|
||||
|
||||
/* clear w */
|
||||
template<typename Index>
|
||||
static Index cs_wclear (Index mark, Index lemax, Index *w, Index n)
|
||||
template<typename StorageIndex>
|
||||
static StorageIndex cs_wclear (StorageIndex mark, StorageIndex lemax, StorageIndex *w, StorageIndex n)
|
||||
{
|
||||
Index k;
|
||||
StorageIndex k;
|
||||
if(mark < 2 || (mark + lemax < 0))
|
||||
{
|
||||
for(k = 0; k < n; k++)
|
||||
@ -56,10 +56,10 @@ static Index cs_wclear (Index mark, Index lemax, Index *w, Index n)
|
||||
}
|
||||
|
||||
/* depth-first search and postorder of a tree rooted at node j */
|
||||
template<typename Index>
|
||||
Index cs_tdfs(Index j, Index k, Index *head, const Index *next, Index *post, Index *stack)
|
||||
template<typename StorageIndex>
|
||||
StorageIndex cs_tdfs(StorageIndex j, StorageIndex k, StorageIndex *head, const StorageIndex *next, StorageIndex *post, StorageIndex *stack)
|
||||
{
|
||||
Index i, p, top = 0;
|
||||
StorageIndex i, p, top = 0;
|
||||
if(!head || !next || !post || !stack) return (-1); /* check inputs */
|
||||
stack[0] = j; /* place j on the stack */
|
||||
while (top >= 0) /* while (stack is not empty) */
|
||||
@ -87,41 +87,39 @@ Index cs_tdfs(Index j, Index k, Index *head, const Index *next, Index *post, Ind
|
||||
* \returns the permutation P reducing the fill-in of the input matrix \a C
|
||||
* The input matrix \a C must be a selfadjoint compressed column major SparseMatrix object. Both the upper and lower parts have to be stored, but the diagonal entries are optional.
|
||||
* On exit the values of C are destroyed */
|
||||
template<typename Scalar, typename Index>
|
||||
void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, PermutationMatrix<Dynamic,Dynamic,Index>& perm)
|
||||
template<typename Scalar, typename StorageIndex>
|
||||
void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,StorageIndex>& C, PermutationMatrix<Dynamic,Dynamic,StorageIndex>& perm)
|
||||
{
|
||||
using std::sqrt;
|
||||
|
||||
Index d, dk, dext, lemax = 0, e, elenk, eln, i, j, k, k1,
|
||||
k2, k3, jlast, ln, dense, nzmax, mindeg = 0, nvi, nvj, nvk, mark, wnvi,
|
||||
ok, nel = 0, p, p1, p2, p3, p4, pj, pk, pk1, pk2, pn, q, t;
|
||||
StorageIndex d, dk, dext, lemax = 0, e, elenk, eln, i, j, k, k1,
|
||||
k2, k3, jlast, ln, dense, nzmax, mindeg = 0, nvi, nvj, nvk, mark, wnvi,
|
||||
ok, nel = 0, p, p1, p2, p3, p4, pj, pk, pk1, pk2, pn, q, t, h;
|
||||
|
||||
std::size_t h;
|
||||
StorageIndex n = StorageIndex(C.cols());
|
||||
dense = std::max<StorageIndex> (16, StorageIndex(10 * sqrt(double(n)))); /* find dense threshold */
|
||||
dense = (std::min)(n-2, dense);
|
||||
|
||||
Index n = C.cols();
|
||||
dense = std::max<Index> (16, Index(10 * sqrt(double(n)))); /* find dense threshold */
|
||||
dense = std::min<Index> (n-2, dense);
|
||||
|
||||
Index cnz = C.nonZeros();
|
||||
StorageIndex cnz = StorageIndex(C.nonZeros());
|
||||
perm.resize(n+1);
|
||||
t = cnz + cnz/5 + 2*n; /* add elbow room to C */
|
||||
C.resizeNonZeros(t);
|
||||
|
||||
// get workspace
|
||||
ei_declare_aligned_stack_constructed_variable(Index,W,8*(n+1),0);
|
||||
Index* len = W;
|
||||
Index* nv = W + (n+1);
|
||||
Index* next = W + 2*(n+1);
|
||||
Index* head = W + 3*(n+1);
|
||||
Index* elen = W + 4*(n+1);
|
||||
Index* degree = W + 5*(n+1);
|
||||
Index* w = W + 6*(n+1);
|
||||
Index* hhead = W + 7*(n+1);
|
||||
Index* last = perm.indices().data(); /* use P as workspace for last */
|
||||
ei_declare_aligned_stack_constructed_variable(StorageIndex,W,8*(n+1),0);
|
||||
StorageIndex* len = W;
|
||||
StorageIndex* nv = W + (n+1);
|
||||
StorageIndex* next = W + 2*(n+1);
|
||||
StorageIndex* head = W + 3*(n+1);
|
||||
StorageIndex* elen = W + 4*(n+1);
|
||||
StorageIndex* degree = W + 5*(n+1);
|
||||
StorageIndex* w = W + 6*(n+1);
|
||||
StorageIndex* hhead = W + 7*(n+1);
|
||||
StorageIndex* last = perm.indices().data(); /* use P as workspace for last */
|
||||
|
||||
/* --- Initialize quotient graph ---------------------------------------- */
|
||||
Index* Cp = C.outerIndexPtr();
|
||||
Index* Ci = C.innerIndexPtr();
|
||||
StorageIndex* Cp = C.outerIndexPtr();
|
||||
StorageIndex* Ci = C.innerIndexPtr();
|
||||
for(k = 0; k < n; k++)
|
||||
len[k] = Cp[k+1] - Cp[k];
|
||||
len[n] = 0;
|
||||
@ -138,7 +136,7 @@ void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, Permutation
|
||||
elen[i] = 0; // Ek of node i is empty
|
||||
degree[i] = len[i]; // degree of node i
|
||||
}
|
||||
mark = internal::cs_wclear<Index>(0, 0, w, n); /* clear w */
|
||||
mark = internal::cs_wclear<StorageIndex>(0, 0, w, n); /* clear w */
|
||||
elen[n] = -2; /* n is a dead element */
|
||||
Cp[n] = -1; /* n is a root of assembly tree */
|
||||
w[n] = 0; /* n is a dead element */
|
||||
@ -253,7 +251,7 @@ void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, Permutation
|
||||
elen[k] = -2; /* k is now an element */
|
||||
|
||||
/* --- Find set differences ----------------------------------------- */
|
||||
mark = internal::cs_wclear<Index>(mark, lemax, w, n); /* clear w if necessary */
|
||||
mark = internal::cs_wclear<StorageIndex>(mark, lemax, w, n); /* clear w if necessary */
|
||||
for(pk = pk1; pk < pk2; pk++) /* scan 1: find |Le\Lk| */
|
||||
{
|
||||
i = Ci[pk];
|
||||
@ -323,7 +321,7 @@ void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, Permutation
|
||||
}
|
||||
else
|
||||
{
|
||||
degree[i] = std::min<Index> (degree[i], d); /* update degree(i) */
|
||||
degree[i] = std::min<StorageIndex> (degree[i], d); /* update degree(i) */
|
||||
Ci[pn] = Ci[p3]; /* move first node to end */
|
||||
Ci[p3] = Ci[p1]; /* move 1st el. to end of Ei */
|
||||
Ci[p1] = k; /* add k as 1st element in of Ei */
|
||||
@ -331,12 +329,12 @@ void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, Permutation
|
||||
h %= n; /* finalize hash of i */
|
||||
next[i] = hhead[h]; /* place i in hash bucket */
|
||||
hhead[h] = i;
|
||||
last[i] = Index(h); /* save hash of i in last[i] */
|
||||
last[i] = h; /* save hash of i in last[i] */
|
||||
}
|
||||
} /* scan2 is done */
|
||||
degree[k] = dk; /* finalize |Lk| */
|
||||
lemax = std::max<Index>(lemax, dk);
|
||||
mark = internal::cs_wclear<Index>(mark+lemax, lemax, w, n); /* clear w */
|
||||
lemax = std::max<StorageIndex>(lemax, dk);
|
||||
mark = internal::cs_wclear<StorageIndex>(mark+lemax, lemax, w, n); /* clear w */
|
||||
|
||||
/* --- Supernode detection ------------------------------------------ */
|
||||
for(pk = pk1; pk < pk2; pk++)
|
||||
@ -384,12 +382,12 @@ void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, Permutation
|
||||
if((nvi = -nv[i]) <= 0) continue;/* skip if i is dead */
|
||||
nv[i] = nvi; /* restore nv[i] */
|
||||
d = degree[i] + dk - nvi; /* compute external degree(i) */
|
||||
d = std::min<Index> (d, n - nel - nvi);
|
||||
d = std::min<StorageIndex> (d, n - nel - nvi);
|
||||
if(head[d] != -1) last[head[d]] = i;
|
||||
next[i] = head[d]; /* put i back in degree list */
|
||||
last[i] = -1;
|
||||
head[d] = i;
|
||||
mindeg = std::min<Index> (mindeg, d); /* find new minimum degree */
|
||||
mindeg = std::min<StorageIndex> (mindeg, d); /* find new minimum degree */
|
||||
degree[i] = d;
|
||||
Ci[p++] = i; /* place i in Lk */
|
||||
}
|
||||
@ -422,7 +420,7 @@ void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, Permutation
|
||||
}
|
||||
for(k = 0, i = 0; i <= n; i++) /* postorder the assembly tree */
|
||||
{
|
||||
if(Cp[i] == -1) k = internal::cs_tdfs<Index>(i, k, head, next, perm.indices().data(), w);
|
||||
if(Cp[i] == -1) k = internal::cs_tdfs<StorageIndex>(i, k, head, next, perm.indices().data(), w);
|
||||
}
|
||||
|
||||
perm.indices().conservativeResize(n);
|
||||
|
@ -135,54 +135,54 @@ namespace internal {
|
||||
/* ========================================================================== */
|
||||
|
||||
// == Row and Column structures ==
|
||||
template <typename Index>
|
||||
template <typename IndexType>
|
||||
struct colamd_col
|
||||
{
|
||||
Index start ; /* index for A of first row in this column, or DEAD */
|
||||
IndexType start ; /* index for A of first row in this column, or DEAD */
|
||||
/* if column is dead */
|
||||
Index length ; /* number of rows in this column */
|
||||
IndexType length ; /* number of rows in this column */
|
||||
union
|
||||
{
|
||||
Index thickness ; /* number of original columns represented by this */
|
||||
IndexType thickness ; /* number of original columns represented by this */
|
||||
/* col, if the column is alive */
|
||||
Index parent ; /* parent in parent tree super-column structure, if */
|
||||
IndexType parent ; /* parent in parent tree super-column structure, if */
|
||||
/* the column is dead */
|
||||
} shared1 ;
|
||||
union
|
||||
{
|
||||
Index score ; /* the score used to maintain heap, if col is alive */
|
||||
Index order ; /* pivot ordering of this column, if col is dead */
|
||||
IndexType score ; /* the score used to maintain heap, if col is alive */
|
||||
IndexType order ; /* pivot ordering of this column, if col is dead */
|
||||
} shared2 ;
|
||||
union
|
||||
{
|
||||
Index headhash ; /* head of a hash bucket, if col is at the head of */
|
||||
IndexType headhash ; /* head of a hash bucket, if col is at the head of */
|
||||
/* a degree list */
|
||||
Index hash ; /* hash value, if col is not in a degree list */
|
||||
Index prev ; /* previous column in degree list, if col is in a */
|
||||
IndexType hash ; /* hash value, if col is not in a degree list */
|
||||
IndexType prev ; /* previous column in degree list, if col is in a */
|
||||
/* degree list (but not at the head of a degree list) */
|
||||
} shared3 ;
|
||||
union
|
||||
{
|
||||
Index degree_next ; /* next column, if col is in a degree list */
|
||||
Index hash_next ; /* next column, if col is in a hash list */
|
||||
IndexType degree_next ; /* next column, if col is in a degree list */
|
||||
IndexType hash_next ; /* next column, if col is in a hash list */
|
||||
} shared4 ;
|
||||
|
||||
};
|
||||
|
||||
template <typename Index>
|
||||
template <typename IndexType>
|
||||
struct Colamd_Row
|
||||
{
|
||||
Index start ; /* index for A of first col in this row */
|
||||
Index length ; /* number of principal columns in this row */
|
||||
IndexType start ; /* index for A of first col in this row */
|
||||
IndexType length ; /* number of principal columns in this row */
|
||||
union
|
||||
{
|
||||
Index degree ; /* number of principal & non-principal columns in row */
|
||||
Index p ; /* used as a row pointer in init_rows_cols () */
|
||||
IndexType degree ; /* number of principal & non-principal columns in row */
|
||||
IndexType p ; /* used as a row pointer in init_rows_cols () */
|
||||
} shared1 ;
|
||||
union
|
||||
{
|
||||
Index mark ; /* for computing set differences and marking dead rows*/
|
||||
Index first_column ;/* first column in row (used in garbage collection) */
|
||||
IndexType mark ; /* for computing set differences and marking dead rows*/
|
||||
IndexType first_column ;/* first column in row (used in garbage collection) */
|
||||
} shared2 ;
|
||||
|
||||
};
|
||||
@ -202,38 +202,38 @@ struct Colamd_Row
|
||||
|
||||
This macro is not needed when using symamd.
|
||||
|
||||
Explicit typecast to Index added Sept. 23, 2002, COLAMD version 2.2, to avoid
|
||||
Explicit typecast to IndexType added Sept. 23, 2002, COLAMD version 2.2, to avoid
|
||||
gcc -pedantic warning messages.
|
||||
*/
|
||||
template <typename Index>
|
||||
inline Index colamd_c(Index n_col)
|
||||
{ return Index( ((n_col) + 1) * sizeof (colamd_col<Index>) / sizeof (Index) ) ; }
|
||||
template <typename IndexType>
|
||||
inline IndexType colamd_c(IndexType n_col)
|
||||
{ return IndexType( ((n_col) + 1) * sizeof (colamd_col<IndexType>) / sizeof (IndexType) ) ; }
|
||||
|
||||
template <typename Index>
|
||||
inline Index colamd_r(Index n_row)
|
||||
{ return Index(((n_row) + 1) * sizeof (Colamd_Row<Index>) / sizeof (Index)); }
|
||||
template <typename IndexType>
|
||||
inline IndexType colamd_r(IndexType n_row)
|
||||
{ return IndexType(((n_row) + 1) * sizeof (Colamd_Row<IndexType>) / sizeof (IndexType)); }
|
||||
|
||||
// Prototypes of non-user callable routines
|
||||
template <typename Index>
|
||||
static Index init_rows_cols (Index n_row, Index n_col, Colamd_Row<Index> Row [], colamd_col<Index> col [], Index A [], Index p [], Index stats[COLAMD_STATS] );
|
||||
template <typename IndexType>
|
||||
static IndexType init_rows_cols (IndexType n_row, IndexType n_col, Colamd_Row<IndexType> Row [], colamd_col<IndexType> col [], IndexType A [], IndexType p [], IndexType stats[COLAMD_STATS] );
|
||||
|
||||
template <typename Index>
|
||||
static void init_scoring (Index n_row, Index n_col, Colamd_Row<Index> Row [], colamd_col<Index> Col [], Index A [], Index head [], double knobs[COLAMD_KNOBS], Index *p_n_row2, Index *p_n_col2, Index *p_max_deg);
|
||||
template <typename IndexType>
|
||||
static void init_scoring (IndexType n_row, IndexType n_col, Colamd_Row<IndexType> Row [], colamd_col<IndexType> Col [], IndexType A [], IndexType head [], double knobs[COLAMD_KNOBS], IndexType *p_n_row2, IndexType *p_n_col2, IndexType *p_max_deg);
|
||||
|
||||
template <typename Index>
|
||||
static Index find_ordering (Index n_row, Index n_col, Index Alen, Colamd_Row<Index> Row [], colamd_col<Index> Col [], Index A [], Index head [], Index n_col2, Index max_deg, Index pfree);
|
||||
template <typename IndexType>
|
||||
static IndexType find_ordering (IndexType n_row, IndexType n_col, IndexType Alen, Colamd_Row<IndexType> Row [], colamd_col<IndexType> Col [], IndexType A [], IndexType head [], IndexType n_col2, IndexType max_deg, IndexType pfree);
|
||||
|
||||
template <typename Index>
|
||||
static void order_children (Index n_col, colamd_col<Index> Col [], Index p []);
|
||||
template <typename IndexType>
|
||||
static void order_children (IndexType n_col, colamd_col<IndexType> Col [], IndexType p []);
|
||||
|
||||
template <typename Index>
|
||||
static void detect_super_cols (colamd_col<Index> Col [], Index A [], Index head [], Index row_start, Index row_length ) ;
|
||||
template <typename IndexType>
|
||||
static void detect_super_cols (colamd_col<IndexType> Col [], IndexType A [], IndexType head [], IndexType row_start, IndexType row_length ) ;
|
||||
|
||||
template <typename Index>
|
||||
static Index garbage_collection (Index n_row, Index n_col, Colamd_Row<Index> Row [], colamd_col<Index> Col [], Index A [], Index *pfree) ;
|
||||
template <typename IndexType>
|
||||
static IndexType garbage_collection (IndexType n_row, IndexType n_col, Colamd_Row<IndexType> Row [], colamd_col<IndexType> Col [], IndexType A [], IndexType *pfree) ;
|
||||
|
||||
template <typename Index>
|
||||
static inline Index clear_mark (Index n_row, Colamd_Row<Index> Row [] ) ;
|
||||
template <typename IndexType>
|
||||
static inline IndexType clear_mark (IndexType n_row, Colamd_Row<IndexType> Row [] ) ;
|
||||
|
||||
/* === No debugging ========================================================= */
|
||||
|
||||
@ -260,8 +260,8 @@ static inline Index clear_mark (Index n_row, Colamd_Row<Index> Row [] ) ;
|
||||
* \param n_col number of columns in A
|
||||
* \return recommended value of Alen for use by colamd
|
||||
*/
|
||||
template <typename Index>
|
||||
inline Index colamd_recommended ( Index nnz, Index n_row, Index n_col)
|
||||
template <typename IndexType>
|
||||
inline IndexType colamd_recommended ( IndexType nnz, IndexType n_row, IndexType n_col)
|
||||
{
|
||||
if ((nnz) < 0 || (n_row) < 0 || (n_col) < 0)
|
||||
return (-1);
|
||||
@ -325,22 +325,22 @@ static inline void colamd_set_defaults(double knobs[COLAMD_KNOBS])
|
||||
* \param knobs parameter settings for colamd
|
||||
* \param stats colamd output statistics and error codes
|
||||
*/
|
||||
template <typename Index>
|
||||
static bool colamd(Index n_row, Index n_col, Index Alen, Index *A, Index *p, double knobs[COLAMD_KNOBS], Index stats[COLAMD_STATS])
|
||||
template <typename IndexType>
|
||||
static bool colamd(IndexType n_row, IndexType n_col, IndexType Alen, IndexType *A, IndexType *p, double knobs[COLAMD_KNOBS], IndexType stats[COLAMD_STATS])
|
||||
{
|
||||
/* === Local variables ================================================== */
|
||||
|
||||
Index i ; /* loop index */
|
||||
Index nnz ; /* nonzeros in A */
|
||||
Index Row_size ; /* size of Row [], in integers */
|
||||
Index Col_size ; /* size of Col [], in integers */
|
||||
Index need ; /* minimum required length of A */
|
||||
Colamd_Row<Index> *Row ; /* pointer into A of Row [0..n_row] array */
|
||||
colamd_col<Index> *Col ; /* pointer into A of Col [0..n_col] array */
|
||||
Index n_col2 ; /* number of non-dense, non-empty columns */
|
||||
Index n_row2 ; /* number of non-dense, non-empty rows */
|
||||
Index ngarbage ; /* number of garbage collections performed */
|
||||
Index max_deg ; /* maximum row degree */
|
||||
IndexType i ; /* loop index */
|
||||
IndexType nnz ; /* nonzeros in A */
|
||||
IndexType Row_size ; /* size of Row [], in integers */
|
||||
IndexType Col_size ; /* size of Col [], in integers */
|
||||
IndexType need ; /* minimum required length of A */
|
||||
Colamd_Row<IndexType> *Row ; /* pointer into A of Row [0..n_row] array */
|
||||
colamd_col<IndexType> *Col ; /* pointer into A of Col [0..n_col] array */
|
||||
IndexType n_col2 ; /* number of non-dense, non-empty columns */
|
||||
IndexType n_row2 ; /* number of non-dense, non-empty rows */
|
||||
IndexType ngarbage ; /* number of garbage collections performed */
|
||||
IndexType max_deg ; /* maximum row degree */
|
||||
double default_knobs [COLAMD_KNOBS] ; /* default knobs array */
|
||||
|
||||
|
||||
@ -431,8 +431,8 @@ static bool colamd(Index n_row, Index n_col, Index Alen, Index *A, Index *p, dou
|
||||
}
|
||||
|
||||
Alen -= Col_size + Row_size ;
|
||||
Col = (colamd_col<Index> *) &A [Alen] ;
|
||||
Row = (Colamd_Row<Index> *) &A [Alen + Col_size] ;
|
||||
Col = (colamd_col<IndexType> *) &A [Alen] ;
|
||||
Row = (Colamd_Row<IndexType> *) &A [Alen + Col_size] ;
|
||||
|
||||
/* === Construct the row and column data structures ===================== */
|
||||
|
||||
@ -485,29 +485,29 @@ static bool colamd(Index n_row, Index n_col, Index Alen, Index *A, Index *p, dou
|
||||
column form of the matrix. Returns false if the matrix is invalid,
|
||||
true otherwise. Not user-callable.
|
||||
*/
|
||||
template <typename Index>
|
||||
static Index init_rows_cols /* returns true if OK, or false otherwise */
|
||||
template <typename IndexType>
|
||||
static IndexType init_rows_cols /* returns true if OK, or false otherwise */
|
||||
(
|
||||
/* === Parameters ======================================================= */
|
||||
|
||||
Index n_row, /* number of rows of A */
|
||||
Index n_col, /* number of columns of A */
|
||||
Colamd_Row<Index> Row [], /* of size n_row+1 */
|
||||
colamd_col<Index> Col [], /* of size n_col+1 */
|
||||
Index A [], /* row indices of A, of size Alen */
|
||||
Index p [], /* pointers to columns in A, of size n_col+1 */
|
||||
Index stats [COLAMD_STATS] /* colamd statistics */
|
||||
IndexType n_row, /* number of rows of A */
|
||||
IndexType n_col, /* number of columns of A */
|
||||
Colamd_Row<IndexType> Row [], /* of size n_row+1 */
|
||||
colamd_col<IndexType> Col [], /* of size n_col+1 */
|
||||
IndexType A [], /* row indices of A, of size Alen */
|
||||
IndexType p [], /* pointers to columns in A, of size n_col+1 */
|
||||
IndexType stats [COLAMD_STATS] /* colamd statistics */
|
||||
)
|
||||
{
|
||||
/* === Local variables ================================================== */
|
||||
|
||||
Index col ; /* a column index */
|
||||
Index row ; /* a row index */
|
||||
Index *cp ; /* a column pointer */
|
||||
Index *cp_end ; /* a pointer to the end of a column */
|
||||
Index *rp ; /* a row pointer */
|
||||
Index *rp_end ; /* a pointer to the end of a row */
|
||||
Index last_row ; /* previous row */
|
||||
IndexType col ; /* a column index */
|
||||
IndexType row ; /* a row index */
|
||||
IndexType *cp ; /* a column pointer */
|
||||
IndexType *cp_end ; /* a pointer to the end of a column */
|
||||
IndexType *rp ; /* a row pointer */
|
||||
IndexType *rp_end ; /* a pointer to the end of a row */
|
||||
IndexType last_row ; /* previous row */
|
||||
|
||||
/* === Initialize columns, and check column pointers ==================== */
|
||||
|
||||
@ -701,40 +701,40 @@ static Index init_rows_cols /* returns true if OK, or false otherwise */
|
||||
Kills dense or empty columns and rows, calculates an initial score for
|
||||
each column, and places all columns in the degree lists. Not user-callable.
|
||||
*/
|
||||
template <typename Index>
|
||||
template <typename IndexType>
|
||||
static void init_scoring
|
||||
(
|
||||
/* === Parameters ======================================================= */
|
||||
|
||||
Index n_row, /* number of rows of A */
|
||||
Index n_col, /* number of columns of A */
|
||||
Colamd_Row<Index> Row [], /* of size n_row+1 */
|
||||
colamd_col<Index> Col [], /* of size n_col+1 */
|
||||
Index A [], /* column form and row form of A */
|
||||
Index head [], /* of size n_col+1 */
|
||||
IndexType n_row, /* number of rows of A */
|
||||
IndexType n_col, /* number of columns of A */
|
||||
Colamd_Row<IndexType> Row [], /* of size n_row+1 */
|
||||
colamd_col<IndexType> Col [], /* of size n_col+1 */
|
||||
IndexType A [], /* column form and row form of A */
|
||||
IndexType head [], /* of size n_col+1 */
|
||||
double knobs [COLAMD_KNOBS],/* parameters */
|
||||
Index *p_n_row2, /* number of non-dense, non-empty rows */
|
||||
Index *p_n_col2, /* number of non-dense, non-empty columns */
|
||||
Index *p_max_deg /* maximum row degree */
|
||||
IndexType *p_n_row2, /* number of non-dense, non-empty rows */
|
||||
IndexType *p_n_col2, /* number of non-dense, non-empty columns */
|
||||
IndexType *p_max_deg /* maximum row degree */
|
||||
)
|
||||
{
|
||||
/* === Local variables ================================================== */
|
||||
|
||||
Index c ; /* a column index */
|
||||
Index r, row ; /* a row index */
|
||||
Index *cp ; /* a column pointer */
|
||||
Index deg ; /* degree of a row or column */
|
||||
Index *cp_end ; /* a pointer to the end of a column */
|
||||
Index *new_cp ; /* new column pointer */
|
||||
Index col_length ; /* length of pruned column */
|
||||
Index score ; /* current column score */
|
||||
Index n_col2 ; /* number of non-dense, non-empty columns */
|
||||
Index n_row2 ; /* number of non-dense, non-empty rows */
|
||||
Index dense_row_count ; /* remove rows with more entries than this */
|
||||
Index dense_col_count ; /* remove cols with more entries than this */
|
||||
Index min_score ; /* smallest column score */
|
||||
Index max_deg ; /* maximum row degree */
|
||||
Index next_col ; /* Used to add to degree list.*/
|
||||
IndexType c ; /* a column index */
|
||||
IndexType r, row ; /* a row index */
|
||||
IndexType *cp ; /* a column pointer */
|
||||
IndexType deg ; /* degree of a row or column */
|
||||
IndexType *cp_end ; /* a pointer to the end of a column */
|
||||
IndexType *new_cp ; /* new column pointer */
|
||||
IndexType col_length ; /* length of pruned column */
|
||||
IndexType score ; /* current column score */
|
||||
IndexType n_col2 ; /* number of non-dense, non-empty columns */
|
||||
IndexType n_row2 ; /* number of non-dense, non-empty rows */
|
||||
IndexType dense_row_count ; /* remove rows with more entries than this */
|
||||
IndexType dense_col_count ; /* remove cols with more entries than this */
|
||||
IndexType min_score ; /* smallest column score */
|
||||
IndexType max_deg ; /* maximum row degree */
|
||||
IndexType next_col ; /* Used to add to degree list.*/
|
||||
|
||||
|
||||
/* === Extract knobs ==================================================== */
|
||||
@ -845,7 +845,7 @@ static void init_scoring
|
||||
score = COLAMD_MIN (score, n_col) ;
|
||||
}
|
||||
/* determine pruned column length */
|
||||
col_length = (Index) (new_cp - &A [Col [c].start]) ;
|
||||
col_length = (IndexType) (new_cp - &A [Col [c].start]) ;
|
||||
if (col_length == 0)
|
||||
{
|
||||
/* a newly-made null column (all rows in this col are "dense" */
|
||||
@ -938,56 +938,56 @@ static void init_scoring
|
||||
(no supercolumns on input). Uses a minimum approximate column minimum
|
||||
degree ordering method. Not user-callable.
|
||||
*/
|
||||
template <typename Index>
|
||||
static Index find_ordering /* return the number of garbage collections */
|
||||
template <typename IndexType>
|
||||
static IndexType find_ordering /* return the number of garbage collections */
|
||||
(
|
||||
/* === Parameters ======================================================= */
|
||||
|
||||
Index n_row, /* number of rows of A */
|
||||
Index n_col, /* number of columns of A */
|
||||
Index Alen, /* size of A, 2*nnz + n_col or larger */
|
||||
Colamd_Row<Index> Row [], /* of size n_row+1 */
|
||||
colamd_col<Index> Col [], /* of size n_col+1 */
|
||||
Index A [], /* column form and row form of A */
|
||||
Index head [], /* of size n_col+1 */
|
||||
Index n_col2, /* Remaining columns to order */
|
||||
Index max_deg, /* Maximum row degree */
|
||||
Index pfree /* index of first free slot (2*nnz on entry) */
|
||||
IndexType n_row, /* number of rows of A */
|
||||
IndexType n_col, /* number of columns of A */
|
||||
IndexType Alen, /* size of A, 2*nnz + n_col or larger */
|
||||
Colamd_Row<IndexType> Row [], /* of size n_row+1 */
|
||||
colamd_col<IndexType> Col [], /* of size n_col+1 */
|
||||
IndexType A [], /* column form and row form of A */
|
||||
IndexType head [], /* of size n_col+1 */
|
||||
IndexType n_col2, /* Remaining columns to order */
|
||||
IndexType max_deg, /* Maximum row degree */
|
||||
IndexType pfree /* index of first free slot (2*nnz on entry) */
|
||||
)
|
||||
{
|
||||
/* === Local variables ================================================== */
|
||||
|
||||
Index k ; /* current pivot ordering step */
|
||||
Index pivot_col ; /* current pivot column */
|
||||
Index *cp ; /* a column pointer */
|
||||
Index *rp ; /* a row pointer */
|
||||
Index pivot_row ; /* current pivot row */
|
||||
Index *new_cp ; /* modified column pointer */
|
||||
Index *new_rp ; /* modified row pointer */
|
||||
Index pivot_row_start ; /* pointer to start of pivot row */
|
||||
Index pivot_row_degree ; /* number of columns in pivot row */
|
||||
Index pivot_row_length ; /* number of supercolumns in pivot row */
|
||||
Index pivot_col_score ; /* score of pivot column */
|
||||
Index needed_memory ; /* free space needed for pivot row */
|
||||
Index *cp_end ; /* pointer to the end of a column */
|
||||
Index *rp_end ; /* pointer to the end of a row */
|
||||
Index row ; /* a row index */
|
||||
Index col ; /* a column index */
|
||||
Index max_score ; /* maximum possible score */
|
||||
Index cur_score ; /* score of current column */
|
||||
IndexType k ; /* current pivot ordering step */
|
||||
IndexType pivot_col ; /* current pivot column */
|
||||
IndexType *cp ; /* a column pointer */
|
||||
IndexType *rp ; /* a row pointer */
|
||||
IndexType pivot_row ; /* current pivot row */
|
||||
IndexType *new_cp ; /* modified column pointer */
|
||||
IndexType *new_rp ; /* modified row pointer */
|
||||
IndexType pivot_row_start ; /* pointer to start of pivot row */
|
||||
IndexType pivot_row_degree ; /* number of columns in pivot row */
|
||||
IndexType pivot_row_length ; /* number of supercolumns in pivot row */
|
||||
IndexType pivot_col_score ; /* score of pivot column */
|
||||
IndexType needed_memory ; /* free space needed for pivot row */
|
||||
IndexType *cp_end ; /* pointer to the end of a column */
|
||||
IndexType *rp_end ; /* pointer to the end of a row */
|
||||
IndexType row ; /* a row index */
|
||||
IndexType col ; /* a column index */
|
||||
IndexType max_score ; /* maximum possible score */
|
||||
IndexType cur_score ; /* score of current column */
|
||||
unsigned int hash ; /* hash value for supernode detection */
|
||||
Index head_column ; /* head of hash bucket */
|
||||
Index first_col ; /* first column in hash bucket */
|
||||
Index tag_mark ; /* marker value for mark array */
|
||||
Index row_mark ; /* Row [row].shared2.mark */
|
||||
Index set_difference ; /* set difference size of row with pivot row */
|
||||
Index min_score ; /* smallest column score */
|
||||
Index col_thickness ; /* "thickness" (no. of columns in a supercol) */
|
||||
Index max_mark ; /* maximum value of tag_mark */
|
||||
Index pivot_col_thickness ; /* number of columns represented by pivot col */
|
||||
Index prev_col ; /* Used by Dlist operations. */
|
||||
Index next_col ; /* Used by Dlist operations. */
|
||||
Index ngarbage ; /* number of garbage collections performed */
|
||||
IndexType head_column ; /* head of hash bucket */
|
||||
IndexType first_col ; /* first column in hash bucket */
|
||||
IndexType tag_mark ; /* marker value for mark array */
|
||||
IndexType row_mark ; /* Row [row].shared2.mark */
|
||||
IndexType set_difference ; /* set difference size of row with pivot row */
|
||||
IndexType min_score ; /* smallest column score */
|
||||
IndexType col_thickness ; /* "thickness" (no. of columns in a supercol) */
|
||||
IndexType max_mark ; /* maximum value of tag_mark */
|
||||
IndexType pivot_col_thickness ; /* number of columns represented by pivot col */
|
||||
IndexType prev_col ; /* Used by Dlist operations. */
|
||||
IndexType next_col ; /* Used by Dlist operations. */
|
||||
IndexType ngarbage ; /* number of garbage collections performed */
|
||||
|
||||
|
||||
/* === Initialization and clear mark ==================================== */
|
||||
@ -1277,7 +1277,7 @@ static Index find_ordering /* return the number of garbage collections */
|
||||
}
|
||||
|
||||
/* recompute the column's length */
|
||||
Col [col].length = (Index) (new_cp - &A [Col [col].start]) ;
|
||||
Col [col].length = (IndexType) (new_cp - &A [Col [col].start]) ;
|
||||
|
||||
/* === Further mass elimination ================================= */
|
||||
|
||||
@ -1325,7 +1325,7 @@ static Index find_ordering /* return the number of garbage collections */
|
||||
Col [col].shared4.hash_next = first_col ;
|
||||
|
||||
/* save hash function in Col [col].shared3.hash */
|
||||
Col [col].shared3.hash = (Index) hash ;
|
||||
Col [col].shared3.hash = (IndexType) hash ;
|
||||
COLAMD_ASSERT (COL_IS_ALIVE (col)) ;
|
||||
}
|
||||
}
|
||||
@ -1420,7 +1420,7 @@ static Index find_ordering /* return the number of garbage collections */
|
||||
/* update pivot row length to reflect any cols that were killed */
|
||||
/* during super-col detection and mass elimination */
|
||||
Row [pivot_row].start = pivot_row_start ;
|
||||
Row [pivot_row].length = (Index) (new_rp - &A[pivot_row_start]) ;
|
||||
Row [pivot_row].length = (IndexType) (new_rp - &A[pivot_row_start]) ;
|
||||
Row [pivot_row].shared1.degree = pivot_row_degree ;
|
||||
Row [pivot_row].shared2.mark = 0 ;
|
||||
/* pivot row is no longer dead */
|
||||
@ -1449,22 +1449,22 @@ static Index find_ordering /* return the number of garbage collections */
|
||||
taken by this routine is O (n_col), that is, linear in the number of
|
||||
columns. Not user-callable.
|
||||
*/
|
||||
template <typename Index>
|
||||
template <typename IndexType>
|
||||
static inline void order_children
|
||||
(
|
||||
/* === Parameters ======================================================= */
|
||||
|
||||
Index n_col, /* number of columns of A */
|
||||
colamd_col<Index> Col [], /* of size n_col+1 */
|
||||
Index p [] /* p [0 ... n_col-1] is the column permutation*/
|
||||
IndexType n_col, /* number of columns of A */
|
||||
colamd_col<IndexType> Col [], /* of size n_col+1 */
|
||||
IndexType p [] /* p [0 ... n_col-1] is the column permutation*/
|
||||
)
|
||||
{
|
||||
/* === Local variables ================================================== */
|
||||
|
||||
Index i ; /* loop counter for all columns */
|
||||
Index c ; /* column index */
|
||||
Index parent ; /* index of column's parent */
|
||||
Index order ; /* column's order */
|
||||
IndexType i ; /* loop counter for all columns */
|
||||
IndexType c ; /* column index */
|
||||
IndexType parent ; /* index of column's parent */
|
||||
IndexType order ; /* column's order */
|
||||
|
||||
/* === Order each non-principal column ================================== */
|
||||
|
||||
@ -1550,33 +1550,33 @@ static inline void order_children
|
||||
just been computed in the approximate degree computation.
|
||||
Not user-callable.
|
||||
*/
|
||||
template <typename Index>
|
||||
template <typename IndexType>
|
||||
static void detect_super_cols
|
||||
(
|
||||
/* === Parameters ======================================================= */
|
||||
|
||||
colamd_col<Index> Col [], /* of size n_col+1 */
|
||||
Index A [], /* row indices of A */
|
||||
Index head [], /* head of degree lists and hash buckets */
|
||||
Index row_start, /* pointer to set of columns to check */
|
||||
Index row_length /* number of columns to check */
|
||||
colamd_col<IndexType> Col [], /* of size n_col+1 */
|
||||
IndexType A [], /* row indices of A */
|
||||
IndexType head [], /* head of degree lists and hash buckets */
|
||||
IndexType row_start, /* pointer to set of columns to check */
|
||||
IndexType row_length /* number of columns to check */
|
||||
)
|
||||
{
|
||||
/* === Local variables ================================================== */
|
||||
|
||||
Index hash ; /* hash value for a column */
|
||||
Index *rp ; /* pointer to a row */
|
||||
Index c ; /* a column index */
|
||||
Index super_c ; /* column index of the column to absorb into */
|
||||
Index *cp1 ; /* column pointer for column super_c */
|
||||
Index *cp2 ; /* column pointer for column c */
|
||||
Index length ; /* length of column super_c */
|
||||
Index prev_c ; /* column preceding c in hash bucket */
|
||||
Index i ; /* loop counter */
|
||||
Index *rp_end ; /* pointer to the end of the row */
|
||||
Index col ; /* a column index in the row to check */
|
||||
Index head_column ; /* first column in hash bucket or degree list */
|
||||
Index first_col ; /* first column in hash bucket */
|
||||
IndexType hash ; /* hash value for a column */
|
||||
IndexType *rp ; /* pointer to a row */
|
||||
IndexType c ; /* a column index */
|
||||
IndexType super_c ; /* column index of the column to absorb into */
|
||||
IndexType *cp1 ; /* column pointer for column super_c */
|
||||
IndexType *cp2 ; /* column pointer for column c */
|
||||
IndexType length ; /* length of column super_c */
|
||||
IndexType prev_c ; /* column preceding c in hash bucket */
|
||||
IndexType i ; /* loop counter */
|
||||
IndexType *rp_end ; /* pointer to the end of the row */
|
||||
IndexType col ; /* a column index in the row to check */
|
||||
IndexType head_column ; /* first column in hash bucket or degree list */
|
||||
IndexType first_col ; /* first column in hash bucket */
|
||||
|
||||
/* === Consider each column in the row ================================== */
|
||||
|
||||
@ -1701,27 +1701,27 @@ static void detect_super_cols
|
||||
itself linear in the number of nonzeros in the input matrix.
|
||||
Not user-callable.
|
||||
*/
|
||||
template <typename Index>
|
||||
static Index garbage_collection /* returns the new value of pfree */
|
||||
template <typename IndexType>
|
||||
static IndexType garbage_collection /* returns the new value of pfree */
|
||||
(
|
||||
/* === Parameters ======================================================= */
|
||||
|
||||
Index n_row, /* number of rows */
|
||||
Index n_col, /* number of columns */
|
||||
Colamd_Row<Index> Row [], /* row info */
|
||||
colamd_col<Index> Col [], /* column info */
|
||||
Index A [], /* A [0 ... Alen-1] holds the matrix */
|
||||
Index *pfree /* &A [0] ... pfree is in use */
|
||||
IndexType n_row, /* number of rows */
|
||||
IndexType n_col, /* number of columns */
|
||||
Colamd_Row<IndexType> Row [], /* row info */
|
||||
colamd_col<IndexType> Col [], /* column info */
|
||||
IndexType A [], /* A [0 ... Alen-1] holds the matrix */
|
||||
IndexType *pfree /* &A [0] ... pfree is in use */
|
||||
)
|
||||
{
|
||||
/* === Local variables ================================================== */
|
||||
|
||||
Index *psrc ; /* source pointer */
|
||||
Index *pdest ; /* destination pointer */
|
||||
Index j ; /* counter */
|
||||
Index r ; /* a row index */
|
||||
Index c ; /* a column index */
|
||||
Index length ; /* length of a row or column */
|
||||
IndexType *psrc ; /* source pointer */
|
||||
IndexType *pdest ; /* destination pointer */
|
||||
IndexType j ; /* counter */
|
||||
IndexType r ; /* a row index */
|
||||
IndexType c ; /* a column index */
|
||||
IndexType length ; /* length of a row or column */
|
||||
|
||||
/* === Defragment the columns =========================================== */
|
||||
|
||||
@ -1734,7 +1734,7 @@ static Index garbage_collection /* returns the new value of pfree */
|
||||
|
||||
/* move and compact the column */
|
||||
COLAMD_ASSERT (pdest <= psrc) ;
|
||||
Col [c].start = (Index) (pdest - &A [0]) ;
|
||||
Col [c].start = (IndexType) (pdest - &A [0]) ;
|
||||
length = Col [c].length ;
|
||||
for (j = 0 ; j < length ; j++)
|
||||
{
|
||||
@ -1744,7 +1744,7 @@ static Index garbage_collection /* returns the new value of pfree */
|
||||
*pdest++ = r ;
|
||||
}
|
||||
}
|
||||
Col [c].length = (Index) (pdest - &A [Col [c].start]) ;
|
||||
Col [c].length = (IndexType) (pdest - &A [Col [c].start]) ;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1791,7 +1791,7 @@ static Index garbage_collection /* returns the new value of pfree */
|
||||
|
||||
/* move and compact the row */
|
||||
COLAMD_ASSERT (pdest <= psrc) ;
|
||||
Row [r].start = (Index) (pdest - &A [0]) ;
|
||||
Row [r].start = (IndexType) (pdest - &A [0]) ;
|
||||
length = Row [r].length ;
|
||||
for (j = 0 ; j < length ; j++)
|
||||
{
|
||||
@ -1801,7 +1801,7 @@ static Index garbage_collection /* returns the new value of pfree */
|
||||
*pdest++ = c ;
|
||||
}
|
||||
}
|
||||
Row [r].length = (Index) (pdest - &A [Row [r].start]) ;
|
||||
Row [r].length = (IndexType) (pdest - &A [Row [r].start]) ;
|
||||
|
||||
}
|
||||
}
|
||||
@ -1810,7 +1810,7 @@ static Index garbage_collection /* returns the new value of pfree */
|
||||
|
||||
/* === Return the new value of pfree ==================================== */
|
||||
|
||||
return ((Index) (pdest - &A [0])) ;
|
||||
return ((IndexType) (pdest - &A [0])) ;
|
||||
}
|
||||
|
||||
|
||||
@ -1822,18 +1822,18 @@ static Index garbage_collection /* returns the new value of pfree */
|
||||
Clears the Row [].shared2.mark array, and returns the new tag_mark.
|
||||
Return value is the new tag_mark. Not user-callable.
|
||||
*/
|
||||
template <typename Index>
|
||||
static inline Index clear_mark /* return the new value for tag_mark */
|
||||
template <typename IndexType>
|
||||
static inline IndexType clear_mark /* return the new value for tag_mark */
|
||||
(
|
||||
/* === Parameters ======================================================= */
|
||||
|
||||
Index n_row, /* number of rows in A */
|
||||
Colamd_Row<Index> Row [] /* Row [0 ... n_row-1].shared2.mark is set to zero */
|
||||
IndexType n_row, /* number of rows in A */
|
||||
Colamd_Row<IndexType> Row [] /* Row [0 ... n_row-1].shared2.mark is set to zero */
|
||||
)
|
||||
{
|
||||
/* === Local variables ================================================== */
|
||||
|
||||
Index r ;
|
||||
IndexType r ;
|
||||
|
||||
for (r = 0 ; r < n_row ; r++)
|
||||
{
|
||||
|
@ -111,12 +111,12 @@ class NaturalOrdering
|
||||
* Functor computing the \em column \em approximate \em minimum \em degree ordering
|
||||
* The matrix should be in column-major and \b compressed format (see SparseMatrix::makeCompressed()).
|
||||
*/
|
||||
template<typename Index>
|
||||
template<typename StorageIndex>
|
||||
class COLAMDOrdering
|
||||
{
|
||||
public:
|
||||
typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType;
|
||||
typedef Matrix<Index, Dynamic, 1> IndexVector;
|
||||
typedef PermutationMatrix<Dynamic, Dynamic, StorageIndex> PermutationType;
|
||||
typedef Matrix<StorageIndex, Dynamic, 1> IndexVector;
|
||||
|
||||
/** Compute the permutation vector \a perm form the sparse matrix \a mat
|
||||
* \warning The input sparse matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()).
|
||||
@ -126,26 +126,26 @@ class COLAMDOrdering
|
||||
{
|
||||
eigen_assert(mat.isCompressed() && "COLAMDOrdering requires a sparse matrix in compressed mode. Call .makeCompressed() before passing it to COLAMDOrdering");
|
||||
|
||||
Index m = mat.rows();
|
||||
Index n = mat.cols();
|
||||
Index nnz = mat.nonZeros();
|
||||
StorageIndex m = StorageIndex(mat.rows());
|
||||
StorageIndex n = StorageIndex(mat.cols());
|
||||
StorageIndex nnz = StorageIndex(mat.nonZeros());
|
||||
// Get the recommended value of Alen to be used by colamd
|
||||
Index Alen = internal::colamd_recommended(nnz, m, n);
|
||||
StorageIndex Alen = internal::colamd_recommended(nnz, m, n);
|
||||
// Set the default parameters
|
||||
double knobs [COLAMD_KNOBS];
|
||||
Index stats [COLAMD_STATS];
|
||||
StorageIndex stats [COLAMD_STATS];
|
||||
internal::colamd_set_defaults(knobs);
|
||||
|
||||
IndexVector p(n+1), A(Alen);
|
||||
for(Index i=0; i <= n; i++) p(i) = mat.outerIndexPtr()[i];
|
||||
for(Index i=0; i < nnz; i++) A(i) = mat.innerIndexPtr()[i];
|
||||
for(StorageIndex i=0; i <= n; i++) p(i) = mat.outerIndexPtr()[i];
|
||||
for(StorageIndex i=0; i < nnz; i++) A(i) = mat.innerIndexPtr()[i];
|
||||
// Call Colamd routine to compute the ordering
|
||||
Index info = internal::colamd(m, n, Alen, A.data(), p.data(), knobs, stats);
|
||||
StorageIndex info = internal::colamd(m, n, Alen, A.data(), p.data(), knobs, stats);
|
||||
EIGEN_UNUSED_VARIABLE(info);
|
||||
eigen_assert( info && "COLAMD failed " );
|
||||
|
||||
perm.resize(n);
|
||||
for (Index i = 0; i < n; i++) perm.indices()(p(i)) = i;
|
||||
for (StorageIndex i = 0; i < n; i++) perm.indices()(p(i)) = i;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -308,7 +308,7 @@ void PastixBase<Derived>::analyzePattern(ColSpMatrix& mat)
|
||||
if(m_size>0)
|
||||
clean();
|
||||
|
||||
m_size = mat.rows();
|
||||
m_size = internal::convert_index<int>(mat.rows());
|
||||
m_perm.resize(m_size);
|
||||
m_invp.resize(m_size);
|
||||
|
||||
@ -337,7 +337,7 @@ void PastixBase<Derived>::factorize(ColSpMatrix& mat)
|
||||
eigen_assert(m_analysisIsOk && "The analysis phase should be called before the factorization phase");
|
||||
m_iparm(IPARM_START_TASK) = API_TASK_NUMFACT;
|
||||
m_iparm(IPARM_END_TASK) = API_TASK_NUMFACT;
|
||||
m_size = mat.rows();
|
||||
m_size = internal::convert_index<int>(mat.rows());
|
||||
|
||||
internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, m_size, mat.outerIndexPtr(), mat.innerIndexPtr(),
|
||||
mat.valuePtr(), m_perm.data(), m_invp.data(), 0, 0, m_iparm.data(), m_dparm.data());
|
||||
@ -373,7 +373,7 @@ bool PastixBase<Base>::_solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest> &x
|
||||
m_iparm[IPARM_START_TASK] = API_TASK_SOLVE;
|
||||
m_iparm[IPARM_END_TASK] = API_TASK_REFINE;
|
||||
|
||||
internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, x.rows(), 0, 0, 0,
|
||||
internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, internal::convert_index<int>(x.rows()), 0, 0, 0,
|
||||
m_perm.data(), m_invp.data(), &x(0, i), rhs, m_iparm.data(), m_dparm.data());
|
||||
}
|
||||
|
||||
|
@ -202,7 +202,7 @@ class SimplicialCholeskyBase : public SparseSolverBase<Derived>
|
||||
void factorize(const MatrixType& a)
|
||||
{
|
||||
eigen_assert(a.rows()==a.cols());
|
||||
int size = a.cols();
|
||||
Index size = a.cols();
|
||||
CholMatrixType tmp(size,size);
|
||||
ConstCholMatrixPtr pmat;
|
||||
|
||||
@ -226,7 +226,7 @@ class SimplicialCholeskyBase : public SparseSolverBase<Derived>
|
||||
void analyzePattern(const MatrixType& a, bool doLDLT)
|
||||
{
|
||||
eigen_assert(a.rows()==a.cols());
|
||||
int size = a.cols();
|
||||
Index size = a.cols();
|
||||
CholMatrixType tmp(size,size);
|
||||
ConstCholMatrixPtr pmat;
|
||||
ordering(a, pmat, tmp);
|
||||
|
@ -50,14 +50,14 @@ namespace Eigen {
|
||||
template<typename Derived>
|
||||
void SimplicialCholeskyBase<Derived>::analyzePattern_preordered(const CholMatrixType& ap, bool doLDLT)
|
||||
{
|
||||
const Index size = ap.rows();
|
||||
const StorageIndex size = StorageIndex(ap.rows());
|
||||
m_matrix.resize(size, size);
|
||||
m_parent.resize(size);
|
||||
m_nonZerosPerCol.resize(size);
|
||||
|
||||
ei_declare_aligned_stack_constructed_variable(Index, tags, size, 0);
|
||||
ei_declare_aligned_stack_constructed_variable(StorageIndex, tags, size, 0);
|
||||
|
||||
for(Index k = 0; k < size; ++k)
|
||||
for(StorageIndex k = 0; k < size; ++k)
|
||||
{
|
||||
/* L(k,:) pattern: all nodes reachable in etree from nz in A(0:k-1,k) */
|
||||
m_parent[k] = -1; /* parent of k is not yet known */
|
||||
@ -65,7 +65,7 @@ void SimplicialCholeskyBase<Derived>::analyzePattern_preordered(const CholMatrix
|
||||
m_nonZerosPerCol[k] = 0; /* count of nonzeros in column k of L */
|
||||
for(typename CholMatrixType::InnerIterator it(ap,k); it; ++it)
|
||||
{
|
||||
Index i = it.index();
|
||||
StorageIndex i = it.index();
|
||||
if(i < k)
|
||||
{
|
||||
/* follow path from i to root of etree, stop at flagged node */
|
||||
@ -84,7 +84,7 @@ void SimplicialCholeskyBase<Derived>::analyzePattern_preordered(const CholMatrix
|
||||
/* construct Lp index array from m_nonZerosPerCol column counts */
|
||||
StorageIndex* Lp = m_matrix.outerIndexPtr();
|
||||
Lp[0] = 0;
|
||||
for(Index k = 0; k < size; ++k)
|
||||
for(StorageIndex k = 0; k < size; ++k)
|
||||
Lp[k+1] = Lp[k] + m_nonZerosPerCol[k] + (doLDLT ? 0 : 1);
|
||||
|
||||
m_matrix.resizeNonZeros(Lp[size]);
|
||||
@ -104,10 +104,10 @@ void SimplicialCholeskyBase<Derived>::factorize_preordered(const CholMatrixType&
|
||||
|
||||
eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
|
||||
eigen_assert(ap.rows()==ap.cols());
|
||||
const Index size = ap.rows();
|
||||
eigen_assert(m_parent.size()==size);
|
||||
eigen_assert(m_nonZerosPerCol.size()==size);
|
||||
eigen_assert(m_parent.size()==ap.rows());
|
||||
eigen_assert(m_nonZerosPerCol.size()==ap.rows());
|
||||
|
||||
const StorageIndex size = StorageIndex(ap.rows());
|
||||
const StorageIndex* Lp = m_matrix.outerIndexPtr();
|
||||
StorageIndex* Li = m_matrix.innerIndexPtr();
|
||||
Scalar* Lx = m_matrix.valuePtr();
|
||||
@ -119,16 +119,16 @@ void SimplicialCholeskyBase<Derived>::factorize_preordered(const CholMatrixType&
|
||||
bool ok = true;
|
||||
m_diag.resize(DoLDLT ? size : 0);
|
||||
|
||||
for(Index k = 0; k < size; ++k)
|
||||
for(StorageIndex k = 0; k < size; ++k)
|
||||
{
|
||||
// compute nonzero pattern of kth row of L, in topological order
|
||||
y[k] = 0.0; // Y(0:k) is now all zero
|
||||
Index top = size; // stack for pattern is empty
|
||||
StorageIndex top = size; // stack for pattern is empty
|
||||
tags[k] = k; // mark node k as visited
|
||||
m_nonZerosPerCol[k] = 0; // count of nonzeros in column k of L
|
||||
for(typename CholMatrixType::InnerIterator it(ap,k); it; ++it)
|
||||
{
|
||||
Index i = it.index();
|
||||
StorageIndex i = it.index();
|
||||
if(i <= k)
|
||||
{
|
||||
y[i] += numext::conj(it.value()); /* scatter A(i,k) into Y (sum duplicates) */
|
||||
|
@ -131,7 +131,7 @@ class CompressedStorage
|
||||
|
||||
/** \returns the stored value at index \a key
|
||||
* If the value does not exist, then the value \a defaultValue is returned without any insertion. */
|
||||
inline Scalar at(Index key, const Scalar& defaultValue = Scalar(0)) const
|
||||
inline Scalar at(StorageIndex key, const Scalar& defaultValue = Scalar(0)) const
|
||||
{
|
||||
if (m_size==0)
|
||||
return defaultValue;
|
||||
@ -144,7 +144,7 @@ class CompressedStorage
|
||||
}
|
||||
|
||||
/** Like at(), but the search is performed in the range [start,end) */
|
||||
inline Scalar atInRange(Index start, Index end, Index key, const Scalar &defaultValue = Scalar(0)) const
|
||||
inline Scalar atInRange(Index start, Index end, StorageIndex key, const Scalar &defaultValue = Scalar(0)) const
|
||||
{
|
||||
if (start>=end)
|
||||
return defaultValue;
|
||||
@ -159,7 +159,7 @@ class CompressedStorage
|
||||
/** \returns a reference to the value at index \a key
|
||||
* If the value does not exist, then the value \a defaultValue is inserted
|
||||
* such that the keys are sorted. */
|
||||
inline Scalar& atWithInsertion(Index key, const Scalar& defaultValue = Scalar(0))
|
||||
inline Scalar& atWithInsertion(StorageIndex key, const Scalar& defaultValue = Scalar(0))
|
||||
{
|
||||
Index id = searchLowerIndex(0,m_size,key);
|
||||
if (id>=m_size || m_indices[id]!=key)
|
||||
@ -189,7 +189,7 @@ class CompressedStorage
|
||||
internal::smart_memmove(m_indices+id, m_indices+m_size, m_indices+id+1);
|
||||
}
|
||||
m_size++;
|
||||
m_indices[id] = convert_index<StorageIndex>(key);
|
||||
m_indices[id] = key;
|
||||
m_values[id] = defaultValue;
|
||||
}
|
||||
return m_values[id];
|
||||
|
@ -149,10 +149,10 @@ public:
|
||||
// update innerNonZeros
|
||||
if(!m_matrix.isCompressed())
|
||||
for(Index j=0; j<m_outerSize.value(); ++j)
|
||||
matrix.innerNonZeroPtr()[m_outerStart+j] = tmp.innerVector(j).nonZeros();
|
||||
matrix.innerNonZeroPtr()[m_outerStart+j] = StorageIndex(tmp.innerVector(j).nonZeros());
|
||||
|
||||
// update outer index pointers
|
||||
StorageIndex p = start;
|
||||
StorageIndex p = StorageIndex(start);
|
||||
for(Index k=0; k<m_outerSize.value(); ++k)
|
||||
{
|
||||
matrix.outerIndexPtr()[m_outerStart+k] = p;
|
||||
|
@ -61,27 +61,26 @@ template <typename MatrixType, typename IndexVector>
|
||||
int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowElt, typename MatrixType::StorageIndex *perm=0)
|
||||
{
|
||||
typedef typename MatrixType::StorageIndex StorageIndex;
|
||||
Index nc = mat.cols(); // Number of columns
|
||||
Index m = mat.rows();
|
||||
Index diagSize = (std::min)(nc,m);
|
||||
StorageIndex nc = convert_index<StorageIndex>(mat.cols()); // Number of columns
|
||||
StorageIndex m = convert_index<StorageIndex>(mat.rows());
|
||||
StorageIndex diagSize = (std::min)(nc,m);
|
||||
IndexVector root(nc); // root of subtree of etree
|
||||
root.setZero();
|
||||
IndexVector pp(nc); // disjoint sets
|
||||
pp.setZero(); // Initialize disjoint sets
|
||||
parent.resize(mat.cols());
|
||||
//Compute first nonzero column in each row
|
||||
StorageIndex row,col;
|
||||
firstRowElt.resize(m);
|
||||
firstRowElt.setConstant(nc);
|
||||
firstRowElt.segment(0, diagSize).setLinSpaced(diagSize, 0, diagSize-1);
|
||||
bool found_diag;
|
||||
for (col = 0; col < nc; col++)
|
||||
for (StorageIndex col = 0; col < nc; col++)
|
||||
{
|
||||
Index pcol = col;
|
||||
StorageIndex pcol = col;
|
||||
if(perm) pcol = perm[col];
|
||||
for (typename MatrixType::InnerIterator it(mat, pcol); it; ++it)
|
||||
{
|
||||
row = it.row();
|
||||
Index row = it.row();
|
||||
firstRowElt(row) = (std::min)(firstRowElt(row), col);
|
||||
}
|
||||
}
|
||||
@ -89,8 +88,8 @@ int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowEl
|
||||
except use (firstRowElt[r],c) in place of an edge (r,c) of A.
|
||||
Thus each row clique in A'*A is replaced by a star
|
||||
centered at its first vertex, which has the same fill. */
|
||||
Index rset, cset, rroot;
|
||||
for (col = 0; col < nc; col++)
|
||||
StorageIndex rset, cset, rroot;
|
||||
for (StorageIndex col = 0; col < nc; col++)
|
||||
{
|
||||
found_diag = col>=m;
|
||||
pp(col) = col;
|
||||
@ -99,7 +98,7 @@ int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowEl
|
||||
parent(col) = nc;
|
||||
/* The diagonal element is treated here even if it does not exist in the matrix
|
||||
* hence the loop is executed once more */
|
||||
Index pcol = col;
|
||||
StorageIndex pcol = col;
|
||||
if(perm) pcol = perm[col];
|
||||
for (typename MatrixType::InnerIterator it(mat, pcol); it||!found_diag; ++it)
|
||||
{ // A sequence of interleaved find and union is performed
|
||||
@ -107,7 +106,7 @@ int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowEl
|
||||
if(it) i = it.index();
|
||||
if (i == col) found_diag = true;
|
||||
|
||||
row = firstRowElt(i);
|
||||
StorageIndex row = firstRowElt(i);
|
||||
if (row >= col) continue;
|
||||
rset = internal::etree_find(row, pp); // Find the name of the set containing row
|
||||
rroot = root(rset);
|
||||
@ -128,9 +127,10 @@ int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowEl
|
||||
* This routine was contributed by Cédric Doucet, CEDRAT Group, Meylan, France.
|
||||
*/
|
||||
template <typename IndexVector>
|
||||
void nr_etdfs (Index n, IndexVector& parent, IndexVector& first_kid, IndexVector& next_kid, IndexVector& post, Index postnum)
|
||||
void nr_etdfs (typename IndexVector::Scalar n, IndexVector& parent, IndexVector& first_kid, IndexVector& next_kid, IndexVector& post, typename IndexVector::Scalar postnum)
|
||||
{
|
||||
Index current = n, first, next;
|
||||
typedef typename IndexVector::Scalar StorageIndex;
|
||||
StorageIndex current = n, first, next;
|
||||
while (postnum != n)
|
||||
{
|
||||
// No kid for the current node
|
||||
@ -175,21 +175,21 @@ void nr_etdfs (Index n, IndexVector& parent, IndexVector& first_kid, IndexVector
|
||||
* \param post postordered tree
|
||||
*/
|
||||
template <typename IndexVector>
|
||||
void treePostorder(Index n, IndexVector& parent, IndexVector& post)
|
||||
void treePostorder(typename IndexVector::Scalar n, IndexVector& parent, IndexVector& post)
|
||||
{
|
||||
typedef typename IndexVector::Scalar StorageIndex;
|
||||
IndexVector first_kid, next_kid; // Linked list of children
|
||||
Index postnum;
|
||||
StorageIndex postnum;
|
||||
// Allocate storage for working arrays and results
|
||||
first_kid.resize(n+1);
|
||||
next_kid.setZero(n+1);
|
||||
post.setZero(n+1);
|
||||
|
||||
// Set up structure describing children
|
||||
Index v, dad;
|
||||
first_kid.setConstant(-1);
|
||||
for (v = n-1; v >= 0; v--)
|
||||
for (StorageIndex v = n-1; v >= 0; v--)
|
||||
{
|
||||
dad = parent(v);
|
||||
StorageIndex dad = parent(v);
|
||||
next_kid(v) = first_kid(dad);
|
||||
first_kid(dad) = v;
|
||||
}
|
||||
|
@ -188,7 +188,7 @@ class SparseMatrix
|
||||
const Index outer = IsRowMajor ? row : col;
|
||||
const Index inner = IsRowMajor ? col : row;
|
||||
Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
|
||||
return m_data.atInRange(m_outerIndex[outer], end, inner);
|
||||
return m_data.atInRange(m_outerIndex[outer], end, StorageIndex(inner));
|
||||
}
|
||||
|
||||
/** \returns a non-const reference to the value of the matrix at position \a i, \a j
|
||||
@ -211,7 +211,7 @@ class SparseMatrix
|
||||
eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
|
||||
if(end<=start)
|
||||
return insert(row,col);
|
||||
const Index p = m_data.searchLowerIndex(start,end-1,inner);
|
||||
const Index p = m_data.searchLowerIndex(start,end-1,StorageIndex(inner));
|
||||
if((p<end) && (m_data.index(p)==inner))
|
||||
return m_data.value(p);
|
||||
else
|
||||
@ -390,7 +390,7 @@ class SparseMatrix
|
||||
* \sa insertBack, startVec */
|
||||
inline Scalar& insertBackByOuterInner(Index outer, Index inner)
|
||||
{
|
||||
eigen_assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
|
||||
eigen_assert(Index(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
|
||||
eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
|
||||
Index p = m_outerIndex[outer+1];
|
||||
++m_outerIndex[outer+1];
|
||||
@ -714,9 +714,9 @@ class SparseMatrix
|
||||
{
|
||||
eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
|
||||
this->m_data.resize(rows());
|
||||
Eigen::Map<IndexVector>(&this->m_data.index(0), rows()).setLinSpaced(0, rows()-1);
|
||||
Eigen::Map<IndexVector>(&this->m_data.index(0), rows()).setLinSpaced(0, StorageIndex(rows()-1));
|
||||
Eigen::Map<ScalarVector>(&this->m_data.value(0), rows()).setOnes();
|
||||
Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, rows());
|
||||
Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, StorageIndex(rows()));
|
||||
}
|
||||
inline SparseMatrix& operator=(const SparseMatrix& other)
|
||||
{
|
||||
|
@ -61,7 +61,7 @@ struct permut_sparsematrix_product_retval
|
||||
for(Index j=0; j<m_matrix.outerSize(); ++j)
|
||||
{
|
||||
Index jp = m_permutation.indices().coeff(j);
|
||||
sizes[((Side==OnTheLeft) ^ Transposed) ? jp : j] = m_matrix.innerVector(((Side==OnTheRight) ^ Transposed) ? jp : j).nonZeros();
|
||||
sizes[((Side==OnTheLeft) ^ Transposed) ? jp : j] = StorageIndex(m_matrix.innerVector(((Side==OnTheRight) ^ Transposed) ? jp : j).nonZeros());
|
||||
}
|
||||
tmp.reserve(sizes);
|
||||
for(Index j=0; j<m_matrix.outerSize(); ++j)
|
||||
|
@ -452,7 +452,7 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixTyp
|
||||
SrcMode = SrcOrder==RowMajor ? (_SrcMode==Upper ? Lower : Upper) : _SrcMode
|
||||
};
|
||||
|
||||
StorageIndex size = mat.rows();
|
||||
Index size = mat.rows();
|
||||
VectorI count(size);
|
||||
count.setZero();
|
||||
dest.resize(size,size);
|
||||
|
@ -24,16 +24,16 @@ void solve_sparse_through_dense_panels(const Decomposition &dec, const Rhs& rhs,
|
||||
EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
|
||||
typedef typename Dest::Scalar DestScalar;
|
||||
// we process the sparse rhs per block of NbColsAtOnce columns temporarily stored into a dense matrix.
|
||||
static const int NbColsAtOnce = 4;
|
||||
int rhsCols = rhs.cols();
|
||||
int size = rhs.rows();
|
||||
static const Index NbColsAtOnce = 4;
|
||||
Index rhsCols = rhs.cols();
|
||||
Index size = rhs.rows();
|
||||
// the temporary matrices do not need more columns than NbColsAtOnce:
|
||||
int tmpCols = (std::min)(rhsCols, NbColsAtOnce);
|
||||
Index tmpCols = (std::min)(rhsCols, NbColsAtOnce);
|
||||
Eigen::Matrix<DestScalar,Dynamic,Dynamic> tmp(size,tmpCols);
|
||||
Eigen::Matrix<DestScalar,Dynamic,Dynamic> tmpX(size,tmpCols);
|
||||
for(int k=0; k<rhsCols; k+=NbColsAtOnce)
|
||||
for(Index k=0; k<rhsCols; k+=NbColsAtOnce)
|
||||
{
|
||||
int actualCols = std::min<int>(rhsCols-k, NbColsAtOnce);
|
||||
Index actualCols = std::min<Index>(rhsCols-k, NbColsAtOnce);
|
||||
tmp.leftCols(actualCols) = rhs.middleCols(k,actualCols);
|
||||
tmpX.leftCols(actualCols) = dec.solve(tmp.leftCols(actualCols));
|
||||
dest.middleCols(k,actualCols) = tmpX.leftCols(actualCols).sparseView();
|
||||
|
@ -103,7 +103,7 @@ class SparseVector
|
||||
inline Scalar coeff(Index i) const
|
||||
{
|
||||
eigen_assert(i>=0 && i<m_size);
|
||||
return m_data.at(i);
|
||||
return m_data.at(StorageIndex(i));
|
||||
}
|
||||
|
||||
inline Scalar& coeffRef(Index row, Index col)
|
||||
@ -121,7 +121,7 @@ class SparseVector
|
||||
inline Scalar& coeffRef(Index i)
|
||||
{
|
||||
eigen_assert(i>=0 && i<m_size);
|
||||
return m_data.atWithInsertion(i);
|
||||
return m_data.atWithInsertion(StorageIndex(i));
|
||||
}
|
||||
|
||||
public:
|
||||
|
@ -397,7 +397,7 @@ void SparseLU<MatrixType, OrderingType>::analyzePattern(const MatrixType& mat)
|
||||
if (!m_symmetricmode) {
|
||||
IndexVector post, iwork;
|
||||
// Post order etree
|
||||
internal::treePostorder(m_mat.cols(), m_etree, post);
|
||||
internal::treePostorder(StorageIndex(m_mat.cols()), m_etree, post);
|
||||
|
||||
|
||||
// Renumber etree in postorder
|
||||
@ -479,7 +479,7 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
|
||||
else
|
||||
{ //FIXME This should not be needed if the empty permutation is handled transparently
|
||||
m_perm_c.resize(matrix.cols());
|
||||
for(Index i = 0; i < matrix.cols(); ++i) m_perm_c.indices()(i) = i;
|
||||
for(StorageIndex i = 0; i < matrix.cols(); ++i) m_perm_c.indices()(i) = i;
|
||||
}
|
||||
|
||||
Index m = m_mat.rows();
|
||||
|
@ -40,7 +40,7 @@ class SparseLUImpl
|
||||
Index snode_bmod (const Index jcol, const Index fsupc, ScalarVector& dense, GlobalLU_t& glu);
|
||||
Index pivotL(const Index jcol, const RealScalar& diagpivotthresh, IndexVector& perm_r, IndexVector& iperm_c, Index& pivrow, GlobalLU_t& glu);
|
||||
template <typename Traits>
|
||||
void dfs_kernel(const Index jj, IndexVector& perm_r,
|
||||
void dfs_kernel(const StorageIndex jj, IndexVector& perm_r,
|
||||
Index& nseg, IndexVector& panel_lsub, IndexVector& segrep,
|
||||
Ref<IndexVector> repfnz_col, IndexVector& xprune, Ref<IndexVector> marker, IndexVector& parent,
|
||||
IndexVector& xplore, GlobalLU_t& glu, Index& nextl_col, Index krow, Traits& traits);
|
||||
|
@ -178,11 +178,11 @@ class MappedSuperNodalMatrix
|
||||
* \brief InnerIterator class to iterate over nonzero values of the current column in the supernodal matrix L
|
||||
*
|
||||
*/
|
||||
template<typename Scalar, typename Index>
|
||||
class MappedSuperNodalMatrix<Scalar,Index>::InnerIterator
|
||||
template<typename Scalar, typename StorageIndex>
|
||||
class MappedSuperNodalMatrix<Scalar,StorageIndex>::InnerIterator
|
||||
{
|
||||
public:
|
||||
InnerIterator(const MappedSuperNodalMatrix& mat, Eigen::Index outer)
|
||||
InnerIterator(const MappedSuperNodalMatrix& mat, Index outer)
|
||||
: m_matrix(mat),
|
||||
m_outer(outer),
|
||||
m_supno(mat.colToSup()[outer]),
|
||||
|
@ -53,7 +53,7 @@ void SparseLUImpl<Scalar,StorageIndex>::fixupL(const Index n, const IndexVector&
|
||||
{
|
||||
Index fsupc, i, j, k, jstart;
|
||||
|
||||
Index nextl = 0;
|
||||
StorageIndex nextl = 0;
|
||||
Index nsuper = (glu.supno)(n);
|
||||
|
||||
// For each supernode
|
||||
|
@ -138,7 +138,7 @@ Index SparseLUImpl<Scalar,StorageIndex>::column_bmod(const Index jcol, const Ind
|
||||
glu.lusup.segment(nextlu,offset).setZero();
|
||||
nextlu += offset;
|
||||
}
|
||||
glu.xlusup(jcol + 1) = nextlu; // close L\U(*,jcol);
|
||||
glu.xlusup(jcol + 1) = StorageIndex(nextlu); // close L\U(*,jcol);
|
||||
|
||||
/* For more updates within the panel (also within the current supernode),
|
||||
* should start from the first column of the panel, or the first column
|
||||
|
@ -112,13 +112,13 @@ Index SparseLUImpl<Scalar,StorageIndex>::column_dfs(const Index m, const Index j
|
||||
// krow was visited before, go to the next nonz;
|
||||
if (kmark == jcol) continue;
|
||||
|
||||
dfs_kernel(jcol, perm_r, nseg, glu.lsub, segrep, repfnz, xprune, marker2, parent,
|
||||
dfs_kernel(StorageIndex(jcol), perm_r, nseg, glu.lsub, segrep, repfnz, xprune, marker2, parent,
|
||||
xplore, glu, nextl, krow, traits);
|
||||
} // for each nonzero ...
|
||||
|
||||
Index fsupc, jptr, jm1ptr, ito, ifrom, istop;
|
||||
Index nsuper = glu.supno(jcol);
|
||||
Index jcolp1 = jcol + 1;
|
||||
Index fsupc;
|
||||
StorageIndex nsuper = glu.supno(jcol);
|
||||
StorageIndex jcolp1 = StorageIndex(jcol) + 1;
|
||||
Index jcolm1 = jcol - 1;
|
||||
|
||||
// check to see if j belongs in the same supernode as j-1
|
||||
@ -129,8 +129,8 @@ Index SparseLUImpl<Scalar,StorageIndex>::column_dfs(const Index m, const Index j
|
||||
else
|
||||
{
|
||||
fsupc = glu.xsup(nsuper);
|
||||
jptr = glu.xlsub(jcol); // Not yet compressed
|
||||
jm1ptr = glu.xlsub(jcolm1);
|
||||
StorageIndex jptr = glu.xlsub(jcol); // Not yet compressed
|
||||
StorageIndex jm1ptr = glu.xlsub(jcolm1);
|
||||
|
||||
// Use supernodes of type T2 : see SuperLU paper
|
||||
if ( (nextl-jptr != jptr-jm1ptr-1) ) jsuper = emptyIdxLU;
|
||||
@ -148,13 +148,13 @@ Index SparseLUImpl<Scalar,StorageIndex>::column_dfs(const Index m, const Index j
|
||||
{ // starts a new supernode
|
||||
if ( (fsupc < jcolm1-1) )
|
||||
{ // >= 3 columns in nsuper
|
||||
ito = glu.xlsub(fsupc+1);
|
||||
StorageIndex ito = glu.xlsub(fsupc+1);
|
||||
glu.xlsub(jcolm1) = ito;
|
||||
istop = ito + jptr - jm1ptr;
|
||||
StorageIndex istop = ito + jptr - jm1ptr;
|
||||
xprune(jcolm1) = istop; // intialize xprune(jcol-1)
|
||||
glu.xlsub(jcol) = istop;
|
||||
|
||||
for (ifrom = jm1ptr; ifrom < nextl; ++ifrom, ++ito)
|
||||
for (StorageIndex ifrom = jm1ptr; ifrom < nextl; ++ifrom, ++ito)
|
||||
glu.lsub(ito) = glu.lsub(ifrom);
|
||||
nextl = ito; // = istop + length(jcol)
|
||||
}
|
||||
@ -166,8 +166,8 @@ Index SparseLUImpl<Scalar,StorageIndex>::column_dfs(const Index m, const Index j
|
||||
// Tidy up the pointers before exit
|
||||
glu.xsup(nsuper+1) = jcolp1;
|
||||
glu.supno(jcolp1) = nsuper;
|
||||
xprune(jcol) = nextl; // Intialize upper bound for pruning
|
||||
glu.xlsub(jcolp1) = nextl;
|
||||
xprune(jcol) = StorageIndex(nextl); // Intialize upper bound for pruning
|
||||
glu.xlsub(jcolp1) = StorageIndex(nextl);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ Index SparseLUImpl<Scalar,StorageIndex>::copy_to_ucol(const Index jcol, const In
|
||||
|
||||
// For each nonzero supernode segment of U[*,j] in topological order
|
||||
Index k = nseg - 1, i;
|
||||
Index nextu = glu.xusub(jcol);
|
||||
StorageIndex nextu = glu.xusub(jcol);
|
||||
Index kfnz, isub, segsize;
|
||||
Index new_next,irow;
|
||||
Index fsupc, mem;
|
||||
|
@ -48,15 +48,14 @@ void SparseLUImpl<Scalar,StorageIndex>::heap_relax_snode (const Index n, IndexVe
|
||||
|
||||
// The etree may not be postordered, but its heap ordered
|
||||
IndexVector post;
|
||||
internal::treePostorder(n, et, post); // Post order etree
|
||||
internal::treePostorder(StorageIndex(n), et, post); // Post order etree
|
||||
IndexVector inv_post(n+1);
|
||||
Index i;
|
||||
for (i = 0; i < n+1; ++i) inv_post(post(i)) = i; // inv_post = post.inverse()???
|
||||
for (StorageIndex i = 0; i < n+1; ++i) inv_post(post(i)) = i; // inv_post = post.inverse()???
|
||||
|
||||
// Renumber etree in postorder
|
||||
IndexVector iwork(n);
|
||||
IndexVector et_save(n+1);
|
||||
for (i = 0; i < n; ++i)
|
||||
for (Index i = 0; i < n; ++i)
|
||||
{
|
||||
iwork(post(i)) = post(et(i));
|
||||
}
|
||||
@ -78,7 +77,7 @@ void SparseLUImpl<Scalar,StorageIndex>::heap_relax_snode (const Index n, IndexVe
|
||||
StorageIndex k;
|
||||
Index nsuper_et_post = 0; // Number of relaxed snodes in postordered etree
|
||||
Index nsuper_et = 0; // Number of relaxed snodes in the original etree
|
||||
Index l;
|
||||
StorageIndex l;
|
||||
for (j = 0; j < n; )
|
||||
{
|
||||
parent = et(j);
|
||||
@ -90,8 +89,8 @@ void SparseLUImpl<Scalar,StorageIndex>::heap_relax_snode (const Index n, IndexVe
|
||||
}
|
||||
// Found a supernode in postordered etree, j is the last column
|
||||
++nsuper_et_post;
|
||||
k = n;
|
||||
for (i = snode_start; i <= j; ++i)
|
||||
k = StorageIndex(n);
|
||||
for (Index i = snode_start; i <= j; ++i)
|
||||
k = (std::min)(k, inv_post(i));
|
||||
l = inv_post(j);
|
||||
if ( (l - k) == (j - snode_start) ) // Same number of columns in the snode
|
||||
@ -102,7 +101,7 @@ void SparseLUImpl<Scalar,StorageIndex>::heap_relax_snode (const Index n, IndexVe
|
||||
}
|
||||
else
|
||||
{
|
||||
for (i = snode_start; i <= j; ++i)
|
||||
for (Index i = snode_start; i <= j; ++i)
|
||||
{
|
||||
l = inv_post(i);
|
||||
if (descendants(i) == 0)
|
||||
|
@ -41,7 +41,7 @@ struct panel_dfs_traits
|
||||
panel_dfs_traits(Index jcol, StorageIndex* marker)
|
||||
: m_jcol(jcol), m_marker(marker)
|
||||
{}
|
||||
bool update_segrep(Index krep, Index jj)
|
||||
bool update_segrep(Index krep, StorageIndex jj)
|
||||
{
|
||||
if(m_marker[krep]<m_jcol)
|
||||
{
|
||||
@ -59,7 +59,7 @@ struct panel_dfs_traits
|
||||
|
||||
template <typename Scalar, typename StorageIndex>
|
||||
template <typename Traits>
|
||||
void SparseLUImpl<Scalar,StorageIndex>::dfs_kernel(const Index jj, IndexVector& perm_r,
|
||||
void SparseLUImpl<Scalar,StorageIndex>::dfs_kernel(const StorageIndex jj, IndexVector& perm_r,
|
||||
Index& nseg, IndexVector& panel_lsub, IndexVector& segrep,
|
||||
Ref<IndexVector> repfnz_col, IndexVector& xprune, Ref<IndexVector> marker, IndexVector& parent,
|
||||
IndexVector& xplore, GlobalLU_t& glu,
|
||||
@ -67,14 +67,14 @@ void SparseLUImpl<Scalar,StorageIndex>::dfs_kernel(const Index jj, IndexVector&
|
||||
)
|
||||
{
|
||||
|
||||
Index kmark = marker(krow);
|
||||
StorageIndex kmark = marker(krow);
|
||||
|
||||
// For each unmarked krow of jj
|
||||
marker(krow) = jj;
|
||||
Index kperm = perm_r(krow);
|
||||
StorageIndex kperm = perm_r(krow);
|
||||
if (kperm == emptyIdxLU ) {
|
||||
// krow is in L : place it in structure of L(*, jj)
|
||||
panel_lsub(nextl_col++) = krow; // krow is indexed into A
|
||||
panel_lsub(nextl_col++) = StorageIndex(krow); // krow is indexed into A
|
||||
|
||||
traits.mem_expand(panel_lsub, nextl_col, kmark);
|
||||
}
|
||||
@ -83,9 +83,9 @@ void SparseLUImpl<Scalar,StorageIndex>::dfs_kernel(const Index jj, IndexVector&
|
||||
// krow is in U : if its supernode-representative krep
|
||||
// has been explored, update repfnz(*)
|
||||
// krep = supernode representative of the current row
|
||||
Index krep = glu.xsup(glu.supno(kperm)+1) - 1;
|
||||
StorageIndex krep = glu.xsup(glu.supno(kperm)+1) - 1;
|
||||
// First nonzero element in the current column:
|
||||
Index myfnz = repfnz_col(krep);
|
||||
StorageIndex myfnz = repfnz_col(krep);
|
||||
|
||||
if (myfnz != emptyIdxLU )
|
||||
{
|
||||
@ -96,26 +96,26 @@ void SparseLUImpl<Scalar,StorageIndex>::dfs_kernel(const Index jj, IndexVector&
|
||||
else
|
||||
{
|
||||
// Otherwise, perform dfs starting at krep
|
||||
Index oldrep = emptyIdxLU;
|
||||
StorageIndex oldrep = emptyIdxLU;
|
||||
parent(krep) = oldrep;
|
||||
repfnz_col(krep) = kperm;
|
||||
Index xdfs = glu.xlsub(krep);
|
||||
StorageIndex xdfs = glu.xlsub(krep);
|
||||
Index maxdfs = xprune(krep);
|
||||
|
||||
Index kpar;
|
||||
StorageIndex kpar;
|
||||
do
|
||||
{
|
||||
// For each unmarked kchild of krep
|
||||
while (xdfs < maxdfs)
|
||||
{
|
||||
Index kchild = glu.lsub(xdfs);
|
||||
StorageIndex kchild = glu.lsub(xdfs);
|
||||
xdfs++;
|
||||
Index chmark = marker(kchild);
|
||||
StorageIndex chmark = marker(kchild);
|
||||
|
||||
if (chmark != jj )
|
||||
{
|
||||
marker(kchild) = jj;
|
||||
Index chperm = perm_r(kchild);
|
||||
StorageIndex chperm = perm_r(kchild);
|
||||
|
||||
if (chperm == emptyIdxLU)
|
||||
{
|
||||
@ -128,7 +128,7 @@ void SparseLUImpl<Scalar,StorageIndex>::dfs_kernel(const Index jj, IndexVector&
|
||||
// case kchild is in U :
|
||||
// chrep = its supernode-rep. If its rep has been explored,
|
||||
// update its repfnz(*)
|
||||
Index chrep = glu.xsup(glu.supno(chperm)+1) - 1;
|
||||
StorageIndex chrep = glu.xsup(glu.supno(chperm)+1) - 1;
|
||||
myfnz = repfnz_col(chrep);
|
||||
|
||||
if (myfnz != emptyIdxLU)
|
||||
@ -227,7 +227,7 @@ void SparseLUImpl<Scalar,StorageIndex>::panel_dfs(const Index m, const Index w,
|
||||
panel_dfs_traits<IndexVector> traits(jcol, marker1.data());
|
||||
|
||||
// For each column in the panel
|
||||
for (Index jj = jcol; jj < jcol + w; jj++)
|
||||
for (StorageIndex jj = StorageIndex(jcol); jj < jcol + w; jj++)
|
||||
{
|
||||
nextl_col = (jj - jcol) * m;
|
||||
|
||||
@ -241,7 +241,7 @@ void SparseLUImpl<Scalar,StorageIndex>::panel_dfs(const Index m, const Index w,
|
||||
Index krow = it.row();
|
||||
dense_col(krow) = it.value();
|
||||
|
||||
Index kmark = marker(krow);
|
||||
StorageIndex kmark = marker(krow);
|
||||
if (kmark == jj)
|
||||
continue; // krow visited before, go to the next nonzero
|
||||
|
||||
|
@ -89,7 +89,7 @@ Index SparseLUImpl<Scalar,StorageIndex>::pivotL(const Index jcol, const RealScal
|
||||
// Test for singularity
|
||||
if ( pivmax == 0.0 ) {
|
||||
pivrow = lsub_ptr[pivptr];
|
||||
perm_r(pivrow) = jcol;
|
||||
perm_r(pivrow) = StorageIndex(jcol);
|
||||
return (jcol+1);
|
||||
}
|
||||
|
||||
@ -110,7 +110,7 @@ Index SparseLUImpl<Scalar,StorageIndex>::pivotL(const Index jcol, const RealScal
|
||||
}
|
||||
|
||||
// Record pivot row
|
||||
perm_r(pivrow) = jcol;
|
||||
perm_r(pivrow) = StorageIndex(jcol);
|
||||
// Interchange row subscripts
|
||||
if (pivptr != nsupc )
|
||||
{
|
||||
|
@ -124,7 +124,7 @@ void SparseLUImpl<Scalar,StorageIndex>::pruneL(const Index jcol, const IndexVect
|
||||
}
|
||||
} // end while
|
||||
|
||||
xprune(irep) = kmin; //Pruning
|
||||
xprune(irep) = StorageIndex(kmin); //Pruning
|
||||
} // end if do_prune
|
||||
} // end pruning
|
||||
} // End for each U-segment
|
||||
|
@ -48,10 +48,10 @@ void SparseLUImpl<Scalar,StorageIndex>::relax_snode (const Index n, IndexVector&
|
||||
{
|
||||
|
||||
// compute the number of descendants of each node in the etree
|
||||
Index j, parent;
|
||||
Index parent;
|
||||
relax_end.setConstant(emptyIdxLU);
|
||||
descendants.setZero();
|
||||
for (j = 0; j < n; j++)
|
||||
for (Index j = 0; j < n; j++)
|
||||
{
|
||||
parent = et(j);
|
||||
if (parent != n) // not the dummy root
|
||||
@ -59,7 +59,7 @@ void SparseLUImpl<Scalar,StorageIndex>::relax_snode (const Index n, IndexVector&
|
||||
}
|
||||
// Identify the relaxed supernodes by postorder traversal of the etree
|
||||
Index snode_start; // beginning of a snode
|
||||
for (j = 0; j < n; )
|
||||
for (Index j = 0; j < n; )
|
||||
{
|
||||
parent = et(j);
|
||||
snode_start = j;
|
||||
@ -69,7 +69,7 @@ void SparseLUImpl<Scalar,StorageIndex>::relax_snode (const Index n, IndexVector&
|
||||
parent = et(j);
|
||||
}
|
||||
// Found a supernode in postordered etree, j is the last column
|
||||
relax_end(snode_start) = j; // Record last column
|
||||
relax_end(snode_start) = StorageIndex(j); // Record last column
|
||||
j++;
|
||||
// Search for a new leaf
|
||||
while (descendants(j) != 0 && j < n) j++;
|
||||
|
@ -296,7 +296,7 @@ void SparseQR<MatrixType,OrderingType>::analyzePattern(const MatrixType& mat)
|
||||
if (!m_perm_c.size())
|
||||
{
|
||||
m_perm_c.resize(n);
|
||||
m_perm_c.indices().setLinSpaced(n, 0,n-1);
|
||||
m_perm_c.indices().setLinSpaced(n, 0,StorageIndex(n-1));
|
||||
}
|
||||
|
||||
// Compute the column elimination tree of the permuted matrix
|
||||
@ -327,8 +327,8 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
|
||||
using std::abs;
|
||||
|
||||
eigen_assert(m_analysisIsok && "analyzePattern() should be called before this step");
|
||||
StorageIndex m = mat.rows();
|
||||
StorageIndex n = mat.cols();
|
||||
StorageIndex m = StorageIndex(mat.rows());
|
||||
StorageIndex n = StorageIndex(mat.cols());
|
||||
StorageIndex diagSize = (std::min)(m,n);
|
||||
IndexVector mark((std::max)(m,n)); mark.setConstant(-1); // Record the visited nodes
|
||||
IndexVector Ridx(n), Qidx(m); // Store temporarily the row indexes for the current column of R and Q
|
||||
@ -406,7 +406,7 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
|
||||
for (typename QRMatrixType::InnerIterator itp(m_pmat, col); itp || !found_diag; ++itp)
|
||||
{
|
||||
StorageIndex curIdx = nonzeroCol;
|
||||
if(itp) curIdx = itp.row();
|
||||
if(itp) curIdx = StorageIndex(itp.row());
|
||||
if(curIdx == nonzeroCol) found_diag = true;
|
||||
|
||||
// Get the nonzeros indexes of the current column of R
|
||||
@ -467,7 +467,7 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
|
||||
{
|
||||
for (typename QRMatrixType::InnerIterator itq(m_Q, curIdx); itq; ++itq)
|
||||
{
|
||||
StorageIndex iQ = itq.row();
|
||||
StorageIndex iQ = StorageIndex(itq.row());
|
||||
if (mark(iQ) != col)
|
||||
{
|
||||
Qidx(nzcolQ++) = iQ; // Add this row to the pattern of Q,
|
||||
|
@ -313,8 +313,9 @@ class UmfPackLU : public SparseSolverBase<UmfPackLU<_MatrixType> >
|
||||
void analyzePattern_impl()
|
||||
{
|
||||
int errorCode = 0;
|
||||
errorCode = umfpack_symbolic(m_copyMatrix.rows(), m_copyMatrix.cols(), m_outerIndexPtr, m_innerIndexPtr, m_valuePtr,
|
||||
&m_symbolic, 0, 0);
|
||||
errorCode = umfpack_symbolic(internal::convert_index<int>(m_copyMatrix.rows()),
|
||||
internal::convert_index<int>(m_copyMatrix.cols()),
|
||||
m_outerIndexPtr, m_innerIndexPtr, m_valuePtr, &m_symbolic, 0, 0);
|
||||
|
||||
m_isInitialized = true;
|
||||
m_info = errorCode ? InvalidInput : Success;
|
||||
|
@ -398,8 +398,8 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re
|
||||
refMat.setZero();
|
||||
for(Index i=0;i<ntriplets;++i)
|
||||
{
|
||||
StorageIndex r = internal::random<StorageIndex>(0,rows-1);
|
||||
StorageIndex c = internal::random<StorageIndex>(0,cols-1);
|
||||
StorageIndex r = internal::random<StorageIndex>(0,StorageIndex(rows-1));
|
||||
StorageIndex c = internal::random<StorageIndex>(0,StorageIndex(cols-1));
|
||||
Scalar v = internal::random<Scalar>();
|
||||
triplets.push_back(TripletType(r,c,v));
|
||||
refMat(r,c) += v;
|
||||
|
@ -37,7 +37,7 @@ template<typename Scalar> void test_spqr_scalar()
|
||||
SPQR<MatrixType> solver;
|
||||
generate_sparse_rectangular_problem(A,dA);
|
||||
|
||||
int m = A.rows();
|
||||
Index m = A.rows();
|
||||
b = DenseVector::Random(m);
|
||||
solver.compute(A);
|
||||
if (solver.info() != Success)
|
||||
|
@ -54,7 +54,7 @@ namespace internal {
|
||||
*/
|
||||
template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
|
||||
bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Preconditioner & precond,
|
||||
int &iters, const int &restart, typename Dest::RealScalar & tol_error) {
|
||||
Index &iters, const Index &restart, typename Dest::RealScalar & tol_error) {
|
||||
|
||||
using std::sqrt;
|
||||
using std::abs;
|
||||
@ -65,10 +65,10 @@ bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Precondition
|
||||
typedef Matrix < Scalar, Dynamic, Dynamic > FMatrixType;
|
||||
|
||||
RealScalar tol = tol_error;
|
||||
const int maxIters = iters;
|
||||
const Index maxIters = iters;
|
||||
iters = 0;
|
||||
|
||||
const int m = mat.rows();
|
||||
const Index m = mat.rows();
|
||||
|
||||
// residual and preconditioned residual
|
||||
const VectorType p0 = rhs - mat*x;
|
||||
@ -97,14 +97,14 @@ bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Precondition
|
||||
w(0)=(Scalar) beta;
|
||||
H.bottomLeftCorner(m - 1, 1) = e;
|
||||
|
||||
for (int k = 1; k <= restart; ++k) {
|
||||
for (Index k = 1; k <= restart; ++k) {
|
||||
|
||||
++iters;
|
||||
|
||||
VectorType v = VectorType::Unit(m, k - 1), workspace(m);
|
||||
|
||||
// apply Householder reflections H_{1} ... H_{k-1} to v
|
||||
for (int i = k - 1; i >= 0; --i) {
|
||||
for (Index i = k - 1; i >= 0; --i) {
|
||||
v.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data());
|
||||
}
|
||||
|
||||
@ -113,7 +113,7 @@ bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Precondition
|
||||
v=precond.solve(t);
|
||||
|
||||
// apply Householder reflections H_{k-1} ... H_{1} to v
|
||||
for (int i = 0; i < k; ++i) {
|
||||
for (Index i = 0; i < k; ++i) {
|
||||
v.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data());
|
||||
}
|
||||
|
||||
@ -133,7 +133,7 @@ bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Precondition
|
||||
}
|
||||
|
||||
if (k > 1) {
|
||||
for (int i = 0; i < k - 1; ++i) {
|
||||
for (Index i = 0; i < k - 1; ++i) {
|
||||
// apply old Givens rotations to v
|
||||
v.applyOnTheLeft(i, i + 1, G[i].adjoint());
|
||||
}
|
||||
@ -166,7 +166,7 @@ bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Precondition
|
||||
// apply Householder reflection H_{k} to x_new
|
||||
x_new.tail(m - k + 1).applyHouseholderOnTheLeft(H.col(k - 1).tail(m - k), tau.coeffRef(k - 1), workspace.data());
|
||||
|
||||
for (int i = k - 2; i >= 0; --i) {
|
||||
for (Index i = k - 2; i >= 0; --i) {
|
||||
x_new += y(i) * VectorType::Unit(m, i);
|
||||
// apply Householder reflection H_{i} to x_new
|
||||
x_new.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data());
|
||||
@ -265,7 +265,7 @@ class GMRES : public IterativeSolverBase<GMRES<_MatrixType,_Preconditioner> >
|
||||
using Base::m_isInitialized;
|
||||
|
||||
private:
|
||||
int m_restart;
|
||||
Index m_restart;
|
||||
|
||||
public:
|
||||
using Base::_solve_impl;
|
||||
@ -295,19 +295,19 @@ public:
|
||||
|
||||
/** Get the number of iterations after that a restart is performed.
|
||||
*/
|
||||
int get_restart() { return m_restart; }
|
||||
Index get_restart() { return m_restart; }
|
||||
|
||||
/** Set the number of iterations after that a restart is performed.
|
||||
* \param restart number of iterations for a restarti, default is 30.
|
||||
*/
|
||||
void set_restart(const int restart) { m_restart=restart; }
|
||||
void set_restart(const Index restart) { m_restart=restart; }
|
||||
|
||||
/** \internal */
|
||||
template<typename Rhs,typename Dest>
|
||||
void _solve_with_guess_impl(const Rhs& b, Dest& x) const
|
||||
{
|
||||
bool failed = false;
|
||||
for(int j=0; j<b.cols(); ++j)
|
||||
for(Index j=0; j<b.cols(); ++j)
|
||||
{
|
||||
m_iterations = Base::maxIterations();
|
||||
m_error = Base::m_tolerance;
|
||||
|
@ -29,7 +29,7 @@ namespace Eigen {
|
||||
template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
|
||||
EIGEN_DONT_INLINE
|
||||
void minres(const MatrixType& mat, const Rhs& rhs, Dest& x,
|
||||
const Preconditioner& precond, int& iters,
|
||||
const Preconditioner& precond, Index& iters,
|
||||
typename Dest::RealScalar& tol_error)
|
||||
{
|
||||
using std::sqrt;
|
||||
@ -48,8 +48,8 @@ namespace Eigen {
|
||||
}
|
||||
|
||||
// initialize
|
||||
const int maxIters(iters); // initialize maxIters to iters
|
||||
const int N(mat.cols()); // the size of the matrix
|
||||
const Index maxIters(iters); // initialize maxIters to iters
|
||||
const Index N(mat.cols()); // the size of the matrix
|
||||
const RealScalar threshold2(tol_error*tol_error*rhsNorm2); // convergence threshold (compared to residualNorm2)
|
||||
|
||||
// Initialize preconditioned Lanczos
|
||||
|
Loading…
x
Reference in New Issue
Block a user