Fix many long to int conversion warnings:

- fix usage of Index (API) versus StorageIndex (when multiple indexes are stored)
 - use StorageIndex(val) when the input has already been check
 - use internal::convert_index<StorageIndex>(val) when val is potentially unsafe (directly comes from user input)
This commit is contained in:
Gael Guennebaud 2015-02-16 13:19:05 +01:00
parent fc202bab39
commit aa6c516ec1
37 changed files with 397 additions and 398 deletions

View File

@ -66,9 +66,9 @@ class CwiseUnaryOp : public CwiseUnaryOpImpl<UnaryOp, XprType, typename internal
: m_xpr(xpr), m_functor(func) {} : m_xpr(xpr), m_functor(func) {}
EIGEN_DEVICE_FUNC EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE StorageIndex rows() const { return m_xpr.rows(); } EIGEN_STRONG_INLINE Index rows() const { return m_xpr.rows(); }
EIGEN_DEVICE_FUNC EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE StorageIndex cols() const { return m_xpr.cols(); } EIGEN_STRONG_INLINE Index cols() const { return m_xpr.cols(); }
/** \returns the functor representing the unary operation */ /** \returns the functor representing the unary operation */
EIGEN_DEVICE_FUNC EIGEN_DEVICE_FUNC

View File

@ -147,7 +147,8 @@ class PermutationBase : public EigenBase<Derived>
/** Sets *this to be the identity permutation matrix */ /** Sets *this to be the identity permutation matrix */
void setIdentity() void setIdentity()
{ {
for(Index i = 0; i < size(); ++i) StorageIndex n = StorageIndex(size());
for(StorageIndex i = 0; i < n; ++i)
indices().coeffRef(i) = i; indices().coeffRef(i) = i;
} }

View File

@ -27,7 +27,7 @@ namespace internal {
*/ */
template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner> template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x, bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x,
const Preconditioner& precond, int& iters, const Preconditioner& precond, Index& iters,
typename Dest::RealScalar& tol_error) typename Dest::RealScalar& tol_error)
{ {
using std::sqrt; using std::sqrt;
@ -36,9 +36,9 @@ bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x,
typedef typename Dest::Scalar Scalar; typedef typename Dest::Scalar Scalar;
typedef Matrix<Scalar,Dynamic,1> VectorType; typedef Matrix<Scalar,Dynamic,1> VectorType;
RealScalar tol = tol_error; RealScalar tol = tol_error;
int maxIters = iters; Index maxIters = iters;
int n = mat.cols(); Index n = mat.cols();
VectorType r = rhs - mat * x; VectorType r = rhs - mat * x;
VectorType r0 = r; VectorType r0 = r;
@ -61,8 +61,8 @@ bool bicgstab(const MatrixType& mat, const Rhs& rhs, Dest& x,
RealScalar tol2 = tol*tol; RealScalar tol2 = tol*tol;
RealScalar eps2 = NumTraits<Scalar>::epsilon()*NumTraits<Scalar>::epsilon(); RealScalar eps2 = NumTraits<Scalar>::epsilon()*NumTraits<Scalar>::epsilon();
int i = 0; Index i = 0;
int restarts = 0; Index restarts = 0;
while ( r.squaredNorm()/rhs_sqnorm > tol2 && i<maxIters ) while ( r.squaredNorm()/rhs_sqnorm > tol2 && i<maxIters )
{ {
@ -182,7 +182,7 @@ public:
void _solve_with_guess_impl(const Rhs& b, Dest& x) const void _solve_with_guess_impl(const Rhs& b, Dest& x) const
{ {
bool failed = false; bool failed = false;
for(int j=0; j<b.cols(); ++j) for(Index j=0; j<b.cols(); ++j)
{ {
m_iterations = Base::maxIterations(); m_iterations = Base::maxIterations();
m_error = Base::m_tolerance; m_error = Base::m_tolerance;

View File

@ -26,7 +26,7 @@ namespace internal {
template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner> template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
EIGEN_DONT_INLINE EIGEN_DONT_INLINE
void conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x, void conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x,
const Preconditioner& precond, int& iters, const Preconditioner& precond, Index& iters,
typename Dest::RealScalar& tol_error) typename Dest::RealScalar& tol_error)
{ {
using std::sqrt; using std::sqrt;
@ -36,9 +36,9 @@ void conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x,
typedef Matrix<Scalar,Dynamic,1> VectorType; typedef Matrix<Scalar,Dynamic,1> VectorType;
RealScalar tol = tol_error; RealScalar tol = tol_error;
int maxIters = iters; Index maxIters = iters;
int n = mat.cols(); Index n = mat.cols();
VectorType residual = rhs - mat * x; //initial residual VectorType residual = rhs - mat * x; //initial residual
@ -64,7 +64,7 @@ void conjugate_gradient(const MatrixType& mat, const Rhs& rhs, Dest& x,
VectorType z(n), tmp(n); VectorType z(n), tmp(n);
RealScalar absNew = numext::real(residual.dot(p)); // the square of the absolute value of r scaled by invM RealScalar absNew = numext::real(residual.dot(p)); // the square of the absolute value of r scaled by invM
int i = 0; Index i = 0;
while(i < maxIters) while(i < maxIters)
{ {
tmp.noalias() = mat * p; // the bottleneck of the algorithm tmp.noalias() = mat * p; // the bottleneck of the algorithm
@ -190,7 +190,7 @@ public:
m_iterations = Base::maxIterations(); m_iterations = Base::maxIterations();
m_error = Base::m_tolerance; m_error = Base::m_tolerance;
for(int j=0; j<b.cols(); ++j) for(Index j=0; j<b.cols(); ++j)
{ {
m_iterations = Base::maxIterations(); m_iterations = Base::maxIterations();
m_error = Base::m_tolerance; m_error = Base::m_tolerance;

View File

@ -145,7 +145,7 @@ public:
* It is either the value setted by setMaxIterations or, by default, * It is either the value setted by setMaxIterations or, by default,
* twice the number of columns of the matrix. * twice the number of columns of the matrix.
*/ */
int maxIterations() const Index maxIterations() const
{ {
return (m_maxIterations<0) ? 2*mp_matrix.cols() : m_maxIterations; return (m_maxIterations<0) ? 2*mp_matrix.cols() : m_maxIterations;
} }
@ -153,14 +153,14 @@ public:
/** Sets the max number of iterations. /** Sets the max number of iterations.
* Default is twice the number of columns of the matrix. * Default is twice the number of columns of the matrix.
*/ */
Derived& setMaxIterations(int maxIters) Derived& setMaxIterations(Index maxIters)
{ {
m_maxIterations = maxIters; m_maxIterations = maxIters;
return derived(); return derived();
} }
/** \returns the number of iterations performed during the last solve */ /** \returns the number of iterations performed during the last solve */
int iterations() const Index iterations() const
{ {
eigen_assert(m_isInitialized && "ConjugateGradient is not initialized."); eigen_assert(m_isInitialized && "ConjugateGradient is not initialized.");
return m_iterations; return m_iterations;
@ -200,11 +200,11 @@ public:
{ {
eigen_assert(rows()==b.rows()); eigen_assert(rows()==b.rows());
int rhsCols = b.cols(); Index rhsCols = b.cols();
int size = b.rows(); Index size = b.rows();
Eigen::Matrix<DestScalar,Dynamic,1> tb(size); Eigen::Matrix<DestScalar,Dynamic,1> tb(size);
Eigen::Matrix<DestScalar,Dynamic,1> tx(size); Eigen::Matrix<DestScalar,Dynamic,1> tx(size);
for(int k=0; k<rhsCols; ++k) for(Index k=0; k<rhsCols; ++k)
{ {
tb = b.col(k); tb = b.col(k);
tx = derived().solve(tb); tx = derived().solve(tb);
@ -233,11 +233,11 @@ protected:
Ref<const MatrixType> mp_matrix; Ref<const MatrixType> mp_matrix;
Preconditioner m_preconditioner; Preconditioner m_preconditioner;
int m_maxIterations; Index m_maxIterations;
RealScalar m_tolerance; RealScalar m_tolerance;
mutable RealScalar m_error; mutable RealScalar m_error;
mutable int m_iterations; mutable Index m_iterations;
mutable ComputationInfo m_info; mutable ComputationInfo m_info;
mutable bool m_analysisIsOk, m_factorizationIsOk; mutable bool m_analysisIsOk, m_factorizationIsOk;
}; };

View File

@ -41,10 +41,10 @@ template<typename T0, typename T1> inline bool amd_marked(const T0* w, const T1&
template<typename T0, typename T1> inline void amd_mark(const T0* w, const T1& j) { return w[j] = amd_flip(w[j]); } template<typename T0, typename T1> inline void amd_mark(const T0* w, const T1& j) { return w[j] = amd_flip(w[j]); }
/* clear w */ /* clear w */
template<typename Index> template<typename StorageIndex>
static Index cs_wclear (Index mark, Index lemax, Index *w, Index n) static StorageIndex cs_wclear (StorageIndex mark, StorageIndex lemax, StorageIndex *w, StorageIndex n)
{ {
Index k; StorageIndex k;
if(mark < 2 || (mark + lemax < 0)) if(mark < 2 || (mark + lemax < 0))
{ {
for(k = 0; k < n; k++) for(k = 0; k < n; k++)
@ -56,10 +56,10 @@ static Index cs_wclear (Index mark, Index lemax, Index *w, Index n)
} }
/* depth-first search and postorder of a tree rooted at node j */ /* depth-first search and postorder of a tree rooted at node j */
template<typename Index> template<typename StorageIndex>
Index cs_tdfs(Index j, Index k, Index *head, const Index *next, Index *post, Index *stack) StorageIndex cs_tdfs(StorageIndex j, StorageIndex k, StorageIndex *head, const StorageIndex *next, StorageIndex *post, StorageIndex *stack)
{ {
Index i, p, top = 0; StorageIndex i, p, top = 0;
if(!head || !next || !post || !stack) return (-1); /* check inputs */ if(!head || !next || !post || !stack) return (-1); /* check inputs */
stack[0] = j; /* place j on the stack */ stack[0] = j; /* place j on the stack */
while (top >= 0) /* while (stack is not empty) */ while (top >= 0) /* while (stack is not empty) */
@ -87,41 +87,39 @@ Index cs_tdfs(Index j, Index k, Index *head, const Index *next, Index *post, Ind
* \returns the permutation P reducing the fill-in of the input matrix \a C * \returns the permutation P reducing the fill-in of the input matrix \a C
* The input matrix \a C must be a selfadjoint compressed column major SparseMatrix object. Both the upper and lower parts have to be stored, but the diagonal entries are optional. * The input matrix \a C must be a selfadjoint compressed column major SparseMatrix object. Both the upper and lower parts have to be stored, but the diagonal entries are optional.
* On exit the values of C are destroyed */ * On exit the values of C are destroyed */
template<typename Scalar, typename Index> template<typename Scalar, typename StorageIndex>
void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, PermutationMatrix<Dynamic,Dynamic,Index>& perm) void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,StorageIndex>& C, PermutationMatrix<Dynamic,Dynamic,StorageIndex>& perm)
{ {
using std::sqrt; using std::sqrt;
Index d, dk, dext, lemax = 0, e, elenk, eln, i, j, k, k1, StorageIndex d, dk, dext, lemax = 0, e, elenk, eln, i, j, k, k1,
k2, k3, jlast, ln, dense, nzmax, mindeg = 0, nvi, nvj, nvk, mark, wnvi, k2, k3, jlast, ln, dense, nzmax, mindeg = 0, nvi, nvj, nvk, mark, wnvi,
ok, nel = 0, p, p1, p2, p3, p4, pj, pk, pk1, pk2, pn, q, t; ok, nel = 0, p, p1, p2, p3, p4, pj, pk, pk1, pk2, pn, q, t, h;
std::size_t h; StorageIndex n = StorageIndex(C.cols());
dense = std::max<StorageIndex> (16, StorageIndex(10 * sqrt(double(n)))); /* find dense threshold */
dense = (std::min)(n-2, dense);
Index n = C.cols(); StorageIndex cnz = StorageIndex(C.nonZeros());
dense = std::max<Index> (16, Index(10 * sqrt(double(n)))); /* find dense threshold */
dense = std::min<Index> (n-2, dense);
Index cnz = C.nonZeros();
perm.resize(n+1); perm.resize(n+1);
t = cnz + cnz/5 + 2*n; /* add elbow room to C */ t = cnz + cnz/5 + 2*n; /* add elbow room to C */
C.resizeNonZeros(t); C.resizeNonZeros(t);
// get workspace // get workspace
ei_declare_aligned_stack_constructed_variable(Index,W,8*(n+1),0); ei_declare_aligned_stack_constructed_variable(StorageIndex,W,8*(n+1),0);
Index* len = W; StorageIndex* len = W;
Index* nv = W + (n+1); StorageIndex* nv = W + (n+1);
Index* next = W + 2*(n+1); StorageIndex* next = W + 2*(n+1);
Index* head = W + 3*(n+1); StorageIndex* head = W + 3*(n+1);
Index* elen = W + 4*(n+1); StorageIndex* elen = W + 4*(n+1);
Index* degree = W + 5*(n+1); StorageIndex* degree = W + 5*(n+1);
Index* w = W + 6*(n+1); StorageIndex* w = W + 6*(n+1);
Index* hhead = W + 7*(n+1); StorageIndex* hhead = W + 7*(n+1);
Index* last = perm.indices().data(); /* use P as workspace for last */ StorageIndex* last = perm.indices().data(); /* use P as workspace for last */
/* --- Initialize quotient graph ---------------------------------------- */ /* --- Initialize quotient graph ---------------------------------------- */
Index* Cp = C.outerIndexPtr(); StorageIndex* Cp = C.outerIndexPtr();
Index* Ci = C.innerIndexPtr(); StorageIndex* Ci = C.innerIndexPtr();
for(k = 0; k < n; k++) for(k = 0; k < n; k++)
len[k] = Cp[k+1] - Cp[k]; len[k] = Cp[k+1] - Cp[k];
len[n] = 0; len[n] = 0;
@ -138,7 +136,7 @@ void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, Permutation
elen[i] = 0; // Ek of node i is empty elen[i] = 0; // Ek of node i is empty
degree[i] = len[i]; // degree of node i degree[i] = len[i]; // degree of node i
} }
mark = internal::cs_wclear<Index>(0, 0, w, n); /* clear w */ mark = internal::cs_wclear<StorageIndex>(0, 0, w, n); /* clear w */
elen[n] = -2; /* n is a dead element */ elen[n] = -2; /* n is a dead element */
Cp[n] = -1; /* n is a root of assembly tree */ Cp[n] = -1; /* n is a root of assembly tree */
w[n] = 0; /* n is a dead element */ w[n] = 0; /* n is a dead element */
@ -253,7 +251,7 @@ void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, Permutation
elen[k] = -2; /* k is now an element */ elen[k] = -2; /* k is now an element */
/* --- Find set differences ----------------------------------------- */ /* --- Find set differences ----------------------------------------- */
mark = internal::cs_wclear<Index>(mark, lemax, w, n); /* clear w if necessary */ mark = internal::cs_wclear<StorageIndex>(mark, lemax, w, n); /* clear w if necessary */
for(pk = pk1; pk < pk2; pk++) /* scan 1: find |Le\Lk| */ for(pk = pk1; pk < pk2; pk++) /* scan 1: find |Le\Lk| */
{ {
i = Ci[pk]; i = Ci[pk];
@ -323,7 +321,7 @@ void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, Permutation
} }
else else
{ {
degree[i] = std::min<Index> (degree[i], d); /* update degree(i) */ degree[i] = std::min<StorageIndex> (degree[i], d); /* update degree(i) */
Ci[pn] = Ci[p3]; /* move first node to end */ Ci[pn] = Ci[p3]; /* move first node to end */
Ci[p3] = Ci[p1]; /* move 1st el. to end of Ei */ Ci[p3] = Ci[p1]; /* move 1st el. to end of Ei */
Ci[p1] = k; /* add k as 1st element in of Ei */ Ci[p1] = k; /* add k as 1st element in of Ei */
@ -331,12 +329,12 @@ void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, Permutation
h %= n; /* finalize hash of i */ h %= n; /* finalize hash of i */
next[i] = hhead[h]; /* place i in hash bucket */ next[i] = hhead[h]; /* place i in hash bucket */
hhead[h] = i; hhead[h] = i;
last[i] = Index(h); /* save hash of i in last[i] */ last[i] = h; /* save hash of i in last[i] */
} }
} /* scan2 is done */ } /* scan2 is done */
degree[k] = dk; /* finalize |Lk| */ degree[k] = dk; /* finalize |Lk| */
lemax = std::max<Index>(lemax, dk); lemax = std::max<StorageIndex>(lemax, dk);
mark = internal::cs_wclear<Index>(mark+lemax, lemax, w, n); /* clear w */ mark = internal::cs_wclear<StorageIndex>(mark+lemax, lemax, w, n); /* clear w */
/* --- Supernode detection ------------------------------------------ */ /* --- Supernode detection ------------------------------------------ */
for(pk = pk1; pk < pk2; pk++) for(pk = pk1; pk < pk2; pk++)
@ -384,12 +382,12 @@ void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, Permutation
if((nvi = -nv[i]) <= 0) continue;/* skip if i is dead */ if((nvi = -nv[i]) <= 0) continue;/* skip if i is dead */
nv[i] = nvi; /* restore nv[i] */ nv[i] = nvi; /* restore nv[i] */
d = degree[i] + dk - nvi; /* compute external degree(i) */ d = degree[i] + dk - nvi; /* compute external degree(i) */
d = std::min<Index> (d, n - nel - nvi); d = std::min<StorageIndex> (d, n - nel - nvi);
if(head[d] != -1) last[head[d]] = i; if(head[d] != -1) last[head[d]] = i;
next[i] = head[d]; /* put i back in degree list */ next[i] = head[d]; /* put i back in degree list */
last[i] = -1; last[i] = -1;
head[d] = i; head[d] = i;
mindeg = std::min<Index> (mindeg, d); /* find new minimum degree */ mindeg = std::min<StorageIndex> (mindeg, d); /* find new minimum degree */
degree[i] = d; degree[i] = d;
Ci[p++] = i; /* place i in Lk */ Ci[p++] = i; /* place i in Lk */
} }
@ -422,7 +420,7 @@ void minimum_degree_ordering(SparseMatrix<Scalar,ColMajor,Index>& C, Permutation
} }
for(k = 0, i = 0; i <= n; i++) /* postorder the assembly tree */ for(k = 0, i = 0; i <= n; i++) /* postorder the assembly tree */
{ {
if(Cp[i] == -1) k = internal::cs_tdfs<Index>(i, k, head, next, perm.indices().data(), w); if(Cp[i] == -1) k = internal::cs_tdfs<StorageIndex>(i, k, head, next, perm.indices().data(), w);
} }
perm.indices().conservativeResize(n); perm.indices().conservativeResize(n);

View File

@ -135,54 +135,54 @@ namespace internal {
/* ========================================================================== */ /* ========================================================================== */
// == Row and Column structures == // == Row and Column structures ==
template <typename Index> template <typename IndexType>
struct colamd_col struct colamd_col
{ {
Index start ; /* index for A of first row in this column, or DEAD */ IndexType start ; /* index for A of first row in this column, or DEAD */
/* if column is dead */ /* if column is dead */
Index length ; /* number of rows in this column */ IndexType length ; /* number of rows in this column */
union union
{ {
Index thickness ; /* number of original columns represented by this */ IndexType thickness ; /* number of original columns represented by this */
/* col, if the column is alive */ /* col, if the column is alive */
Index parent ; /* parent in parent tree super-column structure, if */ IndexType parent ; /* parent in parent tree super-column structure, if */
/* the column is dead */ /* the column is dead */
} shared1 ; } shared1 ;
union union
{ {
Index score ; /* the score used to maintain heap, if col is alive */ IndexType score ; /* the score used to maintain heap, if col is alive */
Index order ; /* pivot ordering of this column, if col is dead */ IndexType order ; /* pivot ordering of this column, if col is dead */
} shared2 ; } shared2 ;
union union
{ {
Index headhash ; /* head of a hash bucket, if col is at the head of */ IndexType headhash ; /* head of a hash bucket, if col is at the head of */
/* a degree list */ /* a degree list */
Index hash ; /* hash value, if col is not in a degree list */ IndexType hash ; /* hash value, if col is not in a degree list */
Index prev ; /* previous column in degree list, if col is in a */ IndexType prev ; /* previous column in degree list, if col is in a */
/* degree list (but not at the head of a degree list) */ /* degree list (but not at the head of a degree list) */
} shared3 ; } shared3 ;
union union
{ {
Index degree_next ; /* next column, if col is in a degree list */ IndexType degree_next ; /* next column, if col is in a degree list */
Index hash_next ; /* next column, if col is in a hash list */ IndexType hash_next ; /* next column, if col is in a hash list */
} shared4 ; } shared4 ;
}; };
template <typename Index> template <typename IndexType>
struct Colamd_Row struct Colamd_Row
{ {
Index start ; /* index for A of first col in this row */ IndexType start ; /* index for A of first col in this row */
Index length ; /* number of principal columns in this row */ IndexType length ; /* number of principal columns in this row */
union union
{ {
Index degree ; /* number of principal & non-principal columns in row */ IndexType degree ; /* number of principal & non-principal columns in row */
Index p ; /* used as a row pointer in init_rows_cols () */ IndexType p ; /* used as a row pointer in init_rows_cols () */
} shared1 ; } shared1 ;
union union
{ {
Index mark ; /* for computing set differences and marking dead rows*/ IndexType mark ; /* for computing set differences and marking dead rows*/
Index first_column ;/* first column in row (used in garbage collection) */ IndexType first_column ;/* first column in row (used in garbage collection) */
} shared2 ; } shared2 ;
}; };
@ -202,38 +202,38 @@ struct Colamd_Row
This macro is not needed when using symamd. This macro is not needed when using symamd.
Explicit typecast to Index added Sept. 23, 2002, COLAMD version 2.2, to avoid Explicit typecast to IndexType added Sept. 23, 2002, COLAMD version 2.2, to avoid
gcc -pedantic warning messages. gcc -pedantic warning messages.
*/ */
template <typename Index> template <typename IndexType>
inline Index colamd_c(Index n_col) inline IndexType colamd_c(IndexType n_col)
{ return Index( ((n_col) + 1) * sizeof (colamd_col<Index>) / sizeof (Index) ) ; } { return IndexType( ((n_col) + 1) * sizeof (colamd_col<IndexType>) / sizeof (IndexType) ) ; }
template <typename Index> template <typename IndexType>
inline Index colamd_r(Index n_row) inline IndexType colamd_r(IndexType n_row)
{ return Index(((n_row) + 1) * sizeof (Colamd_Row<Index>) / sizeof (Index)); } { return IndexType(((n_row) + 1) * sizeof (Colamd_Row<IndexType>) / sizeof (IndexType)); }
// Prototypes of non-user callable routines // Prototypes of non-user callable routines
template <typename Index> template <typename IndexType>
static Index init_rows_cols (Index n_row, Index n_col, Colamd_Row<Index> Row [], colamd_col<Index> col [], Index A [], Index p [], Index stats[COLAMD_STATS] ); static IndexType init_rows_cols (IndexType n_row, IndexType n_col, Colamd_Row<IndexType> Row [], colamd_col<IndexType> col [], IndexType A [], IndexType p [], IndexType stats[COLAMD_STATS] );
template <typename Index> template <typename IndexType>
static void init_scoring (Index n_row, Index n_col, Colamd_Row<Index> Row [], colamd_col<Index> Col [], Index A [], Index head [], double knobs[COLAMD_KNOBS], Index *p_n_row2, Index *p_n_col2, Index *p_max_deg); static void init_scoring (IndexType n_row, IndexType n_col, Colamd_Row<IndexType> Row [], colamd_col<IndexType> Col [], IndexType A [], IndexType head [], double knobs[COLAMD_KNOBS], IndexType *p_n_row2, IndexType *p_n_col2, IndexType *p_max_deg);
template <typename Index> template <typename IndexType>
static Index find_ordering (Index n_row, Index n_col, Index Alen, Colamd_Row<Index> Row [], colamd_col<Index> Col [], Index A [], Index head [], Index n_col2, Index max_deg, Index pfree); static IndexType find_ordering (IndexType n_row, IndexType n_col, IndexType Alen, Colamd_Row<IndexType> Row [], colamd_col<IndexType> Col [], IndexType A [], IndexType head [], IndexType n_col2, IndexType max_deg, IndexType pfree);
template <typename Index> template <typename IndexType>
static void order_children (Index n_col, colamd_col<Index> Col [], Index p []); static void order_children (IndexType n_col, colamd_col<IndexType> Col [], IndexType p []);
template <typename Index> template <typename IndexType>
static void detect_super_cols (colamd_col<Index> Col [], Index A [], Index head [], Index row_start, Index row_length ) ; static void detect_super_cols (colamd_col<IndexType> Col [], IndexType A [], IndexType head [], IndexType row_start, IndexType row_length ) ;
template <typename Index> template <typename IndexType>
static Index garbage_collection (Index n_row, Index n_col, Colamd_Row<Index> Row [], colamd_col<Index> Col [], Index A [], Index *pfree) ; static IndexType garbage_collection (IndexType n_row, IndexType n_col, Colamd_Row<IndexType> Row [], colamd_col<IndexType> Col [], IndexType A [], IndexType *pfree) ;
template <typename Index> template <typename IndexType>
static inline Index clear_mark (Index n_row, Colamd_Row<Index> Row [] ) ; static inline IndexType clear_mark (IndexType n_row, Colamd_Row<IndexType> Row [] ) ;
/* === No debugging ========================================================= */ /* === No debugging ========================================================= */
@ -260,8 +260,8 @@ static inline Index clear_mark (Index n_row, Colamd_Row<Index> Row [] ) ;
* \param n_col number of columns in A * \param n_col number of columns in A
* \return recommended value of Alen for use by colamd * \return recommended value of Alen for use by colamd
*/ */
template <typename Index> template <typename IndexType>
inline Index colamd_recommended ( Index nnz, Index n_row, Index n_col) inline IndexType colamd_recommended ( IndexType nnz, IndexType n_row, IndexType n_col)
{ {
if ((nnz) < 0 || (n_row) < 0 || (n_col) < 0) if ((nnz) < 0 || (n_row) < 0 || (n_col) < 0)
return (-1); return (-1);
@ -325,22 +325,22 @@ static inline void colamd_set_defaults(double knobs[COLAMD_KNOBS])
* \param knobs parameter settings for colamd * \param knobs parameter settings for colamd
* \param stats colamd output statistics and error codes * \param stats colamd output statistics and error codes
*/ */
template <typename Index> template <typename IndexType>
static bool colamd(Index n_row, Index n_col, Index Alen, Index *A, Index *p, double knobs[COLAMD_KNOBS], Index stats[COLAMD_STATS]) static bool colamd(IndexType n_row, IndexType n_col, IndexType Alen, IndexType *A, IndexType *p, double knobs[COLAMD_KNOBS], IndexType stats[COLAMD_STATS])
{ {
/* === Local variables ================================================== */ /* === Local variables ================================================== */
Index i ; /* loop index */ IndexType i ; /* loop index */
Index nnz ; /* nonzeros in A */ IndexType nnz ; /* nonzeros in A */
Index Row_size ; /* size of Row [], in integers */ IndexType Row_size ; /* size of Row [], in integers */
Index Col_size ; /* size of Col [], in integers */ IndexType Col_size ; /* size of Col [], in integers */
Index need ; /* minimum required length of A */ IndexType need ; /* minimum required length of A */
Colamd_Row<Index> *Row ; /* pointer into A of Row [0..n_row] array */ Colamd_Row<IndexType> *Row ; /* pointer into A of Row [0..n_row] array */
colamd_col<Index> *Col ; /* pointer into A of Col [0..n_col] array */ colamd_col<IndexType> *Col ; /* pointer into A of Col [0..n_col] array */
Index n_col2 ; /* number of non-dense, non-empty columns */ IndexType n_col2 ; /* number of non-dense, non-empty columns */
Index n_row2 ; /* number of non-dense, non-empty rows */ IndexType n_row2 ; /* number of non-dense, non-empty rows */
Index ngarbage ; /* number of garbage collections performed */ IndexType ngarbage ; /* number of garbage collections performed */
Index max_deg ; /* maximum row degree */ IndexType max_deg ; /* maximum row degree */
double default_knobs [COLAMD_KNOBS] ; /* default knobs array */ double default_knobs [COLAMD_KNOBS] ; /* default knobs array */
@ -431,8 +431,8 @@ static bool colamd(Index n_row, Index n_col, Index Alen, Index *A, Index *p, dou
} }
Alen -= Col_size + Row_size ; Alen -= Col_size + Row_size ;
Col = (colamd_col<Index> *) &A [Alen] ; Col = (colamd_col<IndexType> *) &A [Alen] ;
Row = (Colamd_Row<Index> *) &A [Alen + Col_size] ; Row = (Colamd_Row<IndexType> *) &A [Alen + Col_size] ;
/* === Construct the row and column data structures ===================== */ /* === Construct the row and column data structures ===================== */
@ -485,29 +485,29 @@ static bool colamd(Index n_row, Index n_col, Index Alen, Index *A, Index *p, dou
column form of the matrix. Returns false if the matrix is invalid, column form of the matrix. Returns false if the matrix is invalid,
true otherwise. Not user-callable. true otherwise. Not user-callable.
*/ */
template <typename Index> template <typename IndexType>
static Index init_rows_cols /* returns true if OK, or false otherwise */ static IndexType init_rows_cols /* returns true if OK, or false otherwise */
( (
/* === Parameters ======================================================= */ /* === Parameters ======================================================= */
Index n_row, /* number of rows of A */ IndexType n_row, /* number of rows of A */
Index n_col, /* number of columns of A */ IndexType n_col, /* number of columns of A */
Colamd_Row<Index> Row [], /* of size n_row+1 */ Colamd_Row<IndexType> Row [], /* of size n_row+1 */
colamd_col<Index> Col [], /* of size n_col+1 */ colamd_col<IndexType> Col [], /* of size n_col+1 */
Index A [], /* row indices of A, of size Alen */ IndexType A [], /* row indices of A, of size Alen */
Index p [], /* pointers to columns in A, of size n_col+1 */ IndexType p [], /* pointers to columns in A, of size n_col+1 */
Index stats [COLAMD_STATS] /* colamd statistics */ IndexType stats [COLAMD_STATS] /* colamd statistics */
) )
{ {
/* === Local variables ================================================== */ /* === Local variables ================================================== */
Index col ; /* a column index */ IndexType col ; /* a column index */
Index row ; /* a row index */ IndexType row ; /* a row index */
Index *cp ; /* a column pointer */ IndexType *cp ; /* a column pointer */
Index *cp_end ; /* a pointer to the end of a column */ IndexType *cp_end ; /* a pointer to the end of a column */
Index *rp ; /* a row pointer */ IndexType *rp ; /* a row pointer */
Index *rp_end ; /* a pointer to the end of a row */ IndexType *rp_end ; /* a pointer to the end of a row */
Index last_row ; /* previous row */ IndexType last_row ; /* previous row */
/* === Initialize columns, and check column pointers ==================== */ /* === Initialize columns, and check column pointers ==================== */
@ -701,40 +701,40 @@ static Index init_rows_cols /* returns true if OK, or false otherwise */
Kills dense or empty columns and rows, calculates an initial score for Kills dense or empty columns and rows, calculates an initial score for
each column, and places all columns in the degree lists. Not user-callable. each column, and places all columns in the degree lists. Not user-callable.
*/ */
template <typename Index> template <typename IndexType>
static void init_scoring static void init_scoring
( (
/* === Parameters ======================================================= */ /* === Parameters ======================================================= */
Index n_row, /* number of rows of A */ IndexType n_row, /* number of rows of A */
Index n_col, /* number of columns of A */ IndexType n_col, /* number of columns of A */
Colamd_Row<Index> Row [], /* of size n_row+1 */ Colamd_Row<IndexType> Row [], /* of size n_row+1 */
colamd_col<Index> Col [], /* of size n_col+1 */ colamd_col<IndexType> Col [], /* of size n_col+1 */
Index A [], /* column form and row form of A */ IndexType A [], /* column form and row form of A */
Index head [], /* of size n_col+1 */ IndexType head [], /* of size n_col+1 */
double knobs [COLAMD_KNOBS],/* parameters */ double knobs [COLAMD_KNOBS],/* parameters */
Index *p_n_row2, /* number of non-dense, non-empty rows */ IndexType *p_n_row2, /* number of non-dense, non-empty rows */
Index *p_n_col2, /* number of non-dense, non-empty columns */ IndexType *p_n_col2, /* number of non-dense, non-empty columns */
Index *p_max_deg /* maximum row degree */ IndexType *p_max_deg /* maximum row degree */
) )
{ {
/* === Local variables ================================================== */ /* === Local variables ================================================== */
Index c ; /* a column index */ IndexType c ; /* a column index */
Index r, row ; /* a row index */ IndexType r, row ; /* a row index */
Index *cp ; /* a column pointer */ IndexType *cp ; /* a column pointer */
Index deg ; /* degree of a row or column */ IndexType deg ; /* degree of a row or column */
Index *cp_end ; /* a pointer to the end of a column */ IndexType *cp_end ; /* a pointer to the end of a column */
Index *new_cp ; /* new column pointer */ IndexType *new_cp ; /* new column pointer */
Index col_length ; /* length of pruned column */ IndexType col_length ; /* length of pruned column */
Index score ; /* current column score */ IndexType score ; /* current column score */
Index n_col2 ; /* number of non-dense, non-empty columns */ IndexType n_col2 ; /* number of non-dense, non-empty columns */
Index n_row2 ; /* number of non-dense, non-empty rows */ IndexType n_row2 ; /* number of non-dense, non-empty rows */
Index dense_row_count ; /* remove rows with more entries than this */ IndexType dense_row_count ; /* remove rows with more entries than this */
Index dense_col_count ; /* remove cols with more entries than this */ IndexType dense_col_count ; /* remove cols with more entries than this */
Index min_score ; /* smallest column score */ IndexType min_score ; /* smallest column score */
Index max_deg ; /* maximum row degree */ IndexType max_deg ; /* maximum row degree */
Index next_col ; /* Used to add to degree list.*/ IndexType next_col ; /* Used to add to degree list.*/
/* === Extract knobs ==================================================== */ /* === Extract knobs ==================================================== */
@ -845,7 +845,7 @@ static void init_scoring
score = COLAMD_MIN (score, n_col) ; score = COLAMD_MIN (score, n_col) ;
} }
/* determine pruned column length */ /* determine pruned column length */
col_length = (Index) (new_cp - &A [Col [c].start]) ; col_length = (IndexType) (new_cp - &A [Col [c].start]) ;
if (col_length == 0) if (col_length == 0)
{ {
/* a newly-made null column (all rows in this col are "dense" */ /* a newly-made null column (all rows in this col are "dense" */
@ -938,56 +938,56 @@ static void init_scoring
(no supercolumns on input). Uses a minimum approximate column minimum (no supercolumns on input). Uses a minimum approximate column minimum
degree ordering method. Not user-callable. degree ordering method. Not user-callable.
*/ */
template <typename Index> template <typename IndexType>
static Index find_ordering /* return the number of garbage collections */ static IndexType find_ordering /* return the number of garbage collections */
( (
/* === Parameters ======================================================= */ /* === Parameters ======================================================= */
Index n_row, /* number of rows of A */ IndexType n_row, /* number of rows of A */
Index n_col, /* number of columns of A */ IndexType n_col, /* number of columns of A */
Index Alen, /* size of A, 2*nnz + n_col or larger */ IndexType Alen, /* size of A, 2*nnz + n_col or larger */
Colamd_Row<Index> Row [], /* of size n_row+1 */ Colamd_Row<IndexType> Row [], /* of size n_row+1 */
colamd_col<Index> Col [], /* of size n_col+1 */ colamd_col<IndexType> Col [], /* of size n_col+1 */
Index A [], /* column form and row form of A */ IndexType A [], /* column form and row form of A */
Index head [], /* of size n_col+1 */ IndexType head [], /* of size n_col+1 */
Index n_col2, /* Remaining columns to order */ IndexType n_col2, /* Remaining columns to order */
Index max_deg, /* Maximum row degree */ IndexType max_deg, /* Maximum row degree */
Index pfree /* index of first free slot (2*nnz on entry) */ IndexType pfree /* index of first free slot (2*nnz on entry) */
) )
{ {
/* === Local variables ================================================== */ /* === Local variables ================================================== */
Index k ; /* current pivot ordering step */ IndexType k ; /* current pivot ordering step */
Index pivot_col ; /* current pivot column */ IndexType pivot_col ; /* current pivot column */
Index *cp ; /* a column pointer */ IndexType *cp ; /* a column pointer */
Index *rp ; /* a row pointer */ IndexType *rp ; /* a row pointer */
Index pivot_row ; /* current pivot row */ IndexType pivot_row ; /* current pivot row */
Index *new_cp ; /* modified column pointer */ IndexType *new_cp ; /* modified column pointer */
Index *new_rp ; /* modified row pointer */ IndexType *new_rp ; /* modified row pointer */
Index pivot_row_start ; /* pointer to start of pivot row */ IndexType pivot_row_start ; /* pointer to start of pivot row */
Index pivot_row_degree ; /* number of columns in pivot row */ IndexType pivot_row_degree ; /* number of columns in pivot row */
Index pivot_row_length ; /* number of supercolumns in pivot row */ IndexType pivot_row_length ; /* number of supercolumns in pivot row */
Index pivot_col_score ; /* score of pivot column */ IndexType pivot_col_score ; /* score of pivot column */
Index needed_memory ; /* free space needed for pivot row */ IndexType needed_memory ; /* free space needed for pivot row */
Index *cp_end ; /* pointer to the end of a column */ IndexType *cp_end ; /* pointer to the end of a column */
Index *rp_end ; /* pointer to the end of a row */ IndexType *rp_end ; /* pointer to the end of a row */
Index row ; /* a row index */ IndexType row ; /* a row index */
Index col ; /* a column index */ IndexType col ; /* a column index */
Index max_score ; /* maximum possible score */ IndexType max_score ; /* maximum possible score */
Index cur_score ; /* score of current column */ IndexType cur_score ; /* score of current column */
unsigned int hash ; /* hash value for supernode detection */ unsigned int hash ; /* hash value for supernode detection */
Index head_column ; /* head of hash bucket */ IndexType head_column ; /* head of hash bucket */
Index first_col ; /* first column in hash bucket */ IndexType first_col ; /* first column in hash bucket */
Index tag_mark ; /* marker value for mark array */ IndexType tag_mark ; /* marker value for mark array */
Index row_mark ; /* Row [row].shared2.mark */ IndexType row_mark ; /* Row [row].shared2.mark */
Index set_difference ; /* set difference size of row with pivot row */ IndexType set_difference ; /* set difference size of row with pivot row */
Index min_score ; /* smallest column score */ IndexType min_score ; /* smallest column score */
Index col_thickness ; /* "thickness" (no. of columns in a supercol) */ IndexType col_thickness ; /* "thickness" (no. of columns in a supercol) */
Index max_mark ; /* maximum value of tag_mark */ IndexType max_mark ; /* maximum value of tag_mark */
Index pivot_col_thickness ; /* number of columns represented by pivot col */ IndexType pivot_col_thickness ; /* number of columns represented by pivot col */
Index prev_col ; /* Used by Dlist operations. */ IndexType prev_col ; /* Used by Dlist operations. */
Index next_col ; /* Used by Dlist operations. */ IndexType next_col ; /* Used by Dlist operations. */
Index ngarbage ; /* number of garbage collections performed */ IndexType ngarbage ; /* number of garbage collections performed */
/* === Initialization and clear mark ==================================== */ /* === Initialization and clear mark ==================================== */
@ -1277,7 +1277,7 @@ static Index find_ordering /* return the number of garbage collections */
} }
/* recompute the column's length */ /* recompute the column's length */
Col [col].length = (Index) (new_cp - &A [Col [col].start]) ; Col [col].length = (IndexType) (new_cp - &A [Col [col].start]) ;
/* === Further mass elimination ================================= */ /* === Further mass elimination ================================= */
@ -1325,7 +1325,7 @@ static Index find_ordering /* return the number of garbage collections */
Col [col].shared4.hash_next = first_col ; Col [col].shared4.hash_next = first_col ;
/* save hash function in Col [col].shared3.hash */ /* save hash function in Col [col].shared3.hash */
Col [col].shared3.hash = (Index) hash ; Col [col].shared3.hash = (IndexType) hash ;
COLAMD_ASSERT (COL_IS_ALIVE (col)) ; COLAMD_ASSERT (COL_IS_ALIVE (col)) ;
} }
} }
@ -1420,7 +1420,7 @@ static Index find_ordering /* return the number of garbage collections */
/* update pivot row length to reflect any cols that were killed */ /* update pivot row length to reflect any cols that were killed */
/* during super-col detection and mass elimination */ /* during super-col detection and mass elimination */
Row [pivot_row].start = pivot_row_start ; Row [pivot_row].start = pivot_row_start ;
Row [pivot_row].length = (Index) (new_rp - &A[pivot_row_start]) ; Row [pivot_row].length = (IndexType) (new_rp - &A[pivot_row_start]) ;
Row [pivot_row].shared1.degree = pivot_row_degree ; Row [pivot_row].shared1.degree = pivot_row_degree ;
Row [pivot_row].shared2.mark = 0 ; Row [pivot_row].shared2.mark = 0 ;
/* pivot row is no longer dead */ /* pivot row is no longer dead */
@ -1449,22 +1449,22 @@ static Index find_ordering /* return the number of garbage collections */
taken by this routine is O (n_col), that is, linear in the number of taken by this routine is O (n_col), that is, linear in the number of
columns. Not user-callable. columns. Not user-callable.
*/ */
template <typename Index> template <typename IndexType>
static inline void order_children static inline void order_children
( (
/* === Parameters ======================================================= */ /* === Parameters ======================================================= */
Index n_col, /* number of columns of A */ IndexType n_col, /* number of columns of A */
colamd_col<Index> Col [], /* of size n_col+1 */ colamd_col<IndexType> Col [], /* of size n_col+1 */
Index p [] /* p [0 ... n_col-1] is the column permutation*/ IndexType p [] /* p [0 ... n_col-1] is the column permutation*/
) )
{ {
/* === Local variables ================================================== */ /* === Local variables ================================================== */
Index i ; /* loop counter for all columns */ IndexType i ; /* loop counter for all columns */
Index c ; /* column index */ IndexType c ; /* column index */
Index parent ; /* index of column's parent */ IndexType parent ; /* index of column's parent */
Index order ; /* column's order */ IndexType order ; /* column's order */
/* === Order each non-principal column ================================== */ /* === Order each non-principal column ================================== */
@ -1550,33 +1550,33 @@ static inline void order_children
just been computed in the approximate degree computation. just been computed in the approximate degree computation.
Not user-callable. Not user-callable.
*/ */
template <typename Index> template <typename IndexType>
static void detect_super_cols static void detect_super_cols
( (
/* === Parameters ======================================================= */ /* === Parameters ======================================================= */
colamd_col<Index> Col [], /* of size n_col+1 */ colamd_col<IndexType> Col [], /* of size n_col+1 */
Index A [], /* row indices of A */ IndexType A [], /* row indices of A */
Index head [], /* head of degree lists and hash buckets */ IndexType head [], /* head of degree lists and hash buckets */
Index row_start, /* pointer to set of columns to check */ IndexType row_start, /* pointer to set of columns to check */
Index row_length /* number of columns to check */ IndexType row_length /* number of columns to check */
) )
{ {
/* === Local variables ================================================== */ /* === Local variables ================================================== */
Index hash ; /* hash value for a column */ IndexType hash ; /* hash value for a column */
Index *rp ; /* pointer to a row */ IndexType *rp ; /* pointer to a row */
Index c ; /* a column index */ IndexType c ; /* a column index */
Index super_c ; /* column index of the column to absorb into */ IndexType super_c ; /* column index of the column to absorb into */
Index *cp1 ; /* column pointer for column super_c */ IndexType *cp1 ; /* column pointer for column super_c */
Index *cp2 ; /* column pointer for column c */ IndexType *cp2 ; /* column pointer for column c */
Index length ; /* length of column super_c */ IndexType length ; /* length of column super_c */
Index prev_c ; /* column preceding c in hash bucket */ IndexType prev_c ; /* column preceding c in hash bucket */
Index i ; /* loop counter */ IndexType i ; /* loop counter */
Index *rp_end ; /* pointer to the end of the row */ IndexType *rp_end ; /* pointer to the end of the row */
Index col ; /* a column index in the row to check */ IndexType col ; /* a column index in the row to check */
Index head_column ; /* first column in hash bucket or degree list */ IndexType head_column ; /* first column in hash bucket or degree list */
Index first_col ; /* first column in hash bucket */ IndexType first_col ; /* first column in hash bucket */
/* === Consider each column in the row ================================== */ /* === Consider each column in the row ================================== */
@ -1701,27 +1701,27 @@ static void detect_super_cols
itself linear in the number of nonzeros in the input matrix. itself linear in the number of nonzeros in the input matrix.
Not user-callable. Not user-callable.
*/ */
template <typename Index> template <typename IndexType>
static Index garbage_collection /* returns the new value of pfree */ static IndexType garbage_collection /* returns the new value of pfree */
( (
/* === Parameters ======================================================= */ /* === Parameters ======================================================= */
Index n_row, /* number of rows */ IndexType n_row, /* number of rows */
Index n_col, /* number of columns */ IndexType n_col, /* number of columns */
Colamd_Row<Index> Row [], /* row info */ Colamd_Row<IndexType> Row [], /* row info */
colamd_col<Index> Col [], /* column info */ colamd_col<IndexType> Col [], /* column info */
Index A [], /* A [0 ... Alen-1] holds the matrix */ IndexType A [], /* A [0 ... Alen-1] holds the matrix */
Index *pfree /* &A [0] ... pfree is in use */ IndexType *pfree /* &A [0] ... pfree is in use */
) )
{ {
/* === Local variables ================================================== */ /* === Local variables ================================================== */
Index *psrc ; /* source pointer */ IndexType *psrc ; /* source pointer */
Index *pdest ; /* destination pointer */ IndexType *pdest ; /* destination pointer */
Index j ; /* counter */ IndexType j ; /* counter */
Index r ; /* a row index */ IndexType r ; /* a row index */
Index c ; /* a column index */ IndexType c ; /* a column index */
Index length ; /* length of a row or column */ IndexType length ; /* length of a row or column */
/* === Defragment the columns =========================================== */ /* === Defragment the columns =========================================== */
@ -1734,7 +1734,7 @@ static Index garbage_collection /* returns the new value of pfree */
/* move and compact the column */ /* move and compact the column */
COLAMD_ASSERT (pdest <= psrc) ; COLAMD_ASSERT (pdest <= psrc) ;
Col [c].start = (Index) (pdest - &A [0]) ; Col [c].start = (IndexType) (pdest - &A [0]) ;
length = Col [c].length ; length = Col [c].length ;
for (j = 0 ; j < length ; j++) for (j = 0 ; j < length ; j++)
{ {
@ -1744,7 +1744,7 @@ static Index garbage_collection /* returns the new value of pfree */
*pdest++ = r ; *pdest++ = r ;
} }
} }
Col [c].length = (Index) (pdest - &A [Col [c].start]) ; Col [c].length = (IndexType) (pdest - &A [Col [c].start]) ;
} }
} }
@ -1791,7 +1791,7 @@ static Index garbage_collection /* returns the new value of pfree */
/* move and compact the row */ /* move and compact the row */
COLAMD_ASSERT (pdest <= psrc) ; COLAMD_ASSERT (pdest <= psrc) ;
Row [r].start = (Index) (pdest - &A [0]) ; Row [r].start = (IndexType) (pdest - &A [0]) ;
length = Row [r].length ; length = Row [r].length ;
for (j = 0 ; j < length ; j++) for (j = 0 ; j < length ; j++)
{ {
@ -1801,7 +1801,7 @@ static Index garbage_collection /* returns the new value of pfree */
*pdest++ = c ; *pdest++ = c ;
} }
} }
Row [r].length = (Index) (pdest - &A [Row [r].start]) ; Row [r].length = (IndexType) (pdest - &A [Row [r].start]) ;
} }
} }
@ -1810,7 +1810,7 @@ static Index garbage_collection /* returns the new value of pfree */
/* === Return the new value of pfree ==================================== */ /* === Return the new value of pfree ==================================== */
return ((Index) (pdest - &A [0])) ; return ((IndexType) (pdest - &A [0])) ;
} }
@ -1822,18 +1822,18 @@ static Index garbage_collection /* returns the new value of pfree */
Clears the Row [].shared2.mark array, and returns the new tag_mark. Clears the Row [].shared2.mark array, and returns the new tag_mark.
Return value is the new tag_mark. Not user-callable. Return value is the new tag_mark. Not user-callable.
*/ */
template <typename Index> template <typename IndexType>
static inline Index clear_mark /* return the new value for tag_mark */ static inline IndexType clear_mark /* return the new value for tag_mark */
( (
/* === Parameters ======================================================= */ /* === Parameters ======================================================= */
Index n_row, /* number of rows in A */ IndexType n_row, /* number of rows in A */
Colamd_Row<Index> Row [] /* Row [0 ... n_row-1].shared2.mark is set to zero */ Colamd_Row<IndexType> Row [] /* Row [0 ... n_row-1].shared2.mark is set to zero */
) )
{ {
/* === Local variables ================================================== */ /* === Local variables ================================================== */
Index r ; IndexType r ;
for (r = 0 ; r < n_row ; r++) for (r = 0 ; r < n_row ; r++)
{ {

View File

@ -111,12 +111,12 @@ class NaturalOrdering
* Functor computing the \em column \em approximate \em minimum \em degree ordering * Functor computing the \em column \em approximate \em minimum \em degree ordering
* The matrix should be in column-major and \b compressed format (see SparseMatrix::makeCompressed()). * The matrix should be in column-major and \b compressed format (see SparseMatrix::makeCompressed()).
*/ */
template<typename Index> template<typename StorageIndex>
class COLAMDOrdering class COLAMDOrdering
{ {
public: public:
typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType; typedef PermutationMatrix<Dynamic, Dynamic, StorageIndex> PermutationType;
typedef Matrix<Index, Dynamic, 1> IndexVector; typedef Matrix<StorageIndex, Dynamic, 1> IndexVector;
/** Compute the permutation vector \a perm form the sparse matrix \a mat /** Compute the permutation vector \a perm form the sparse matrix \a mat
* \warning The input sparse matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()). * \warning The input sparse matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()).
@ -126,26 +126,26 @@ class COLAMDOrdering
{ {
eigen_assert(mat.isCompressed() && "COLAMDOrdering requires a sparse matrix in compressed mode. Call .makeCompressed() before passing it to COLAMDOrdering"); eigen_assert(mat.isCompressed() && "COLAMDOrdering requires a sparse matrix in compressed mode. Call .makeCompressed() before passing it to COLAMDOrdering");
Index m = mat.rows(); StorageIndex m = StorageIndex(mat.rows());
Index n = mat.cols(); StorageIndex n = StorageIndex(mat.cols());
Index nnz = mat.nonZeros(); StorageIndex nnz = StorageIndex(mat.nonZeros());
// Get the recommended value of Alen to be used by colamd // Get the recommended value of Alen to be used by colamd
Index Alen = internal::colamd_recommended(nnz, m, n); StorageIndex Alen = internal::colamd_recommended(nnz, m, n);
// Set the default parameters // Set the default parameters
double knobs [COLAMD_KNOBS]; double knobs [COLAMD_KNOBS];
Index stats [COLAMD_STATS]; StorageIndex stats [COLAMD_STATS];
internal::colamd_set_defaults(knobs); internal::colamd_set_defaults(knobs);
IndexVector p(n+1), A(Alen); IndexVector p(n+1), A(Alen);
for(Index i=0; i <= n; i++) p(i) = mat.outerIndexPtr()[i]; for(StorageIndex i=0; i <= n; i++) p(i) = mat.outerIndexPtr()[i];
for(Index i=0; i < nnz; i++) A(i) = mat.innerIndexPtr()[i]; for(StorageIndex i=0; i < nnz; i++) A(i) = mat.innerIndexPtr()[i];
// Call Colamd routine to compute the ordering // Call Colamd routine to compute the ordering
Index info = internal::colamd(m, n, Alen, A.data(), p.data(), knobs, stats); StorageIndex info = internal::colamd(m, n, Alen, A.data(), p.data(), knobs, stats);
EIGEN_UNUSED_VARIABLE(info); EIGEN_UNUSED_VARIABLE(info);
eigen_assert( info && "COLAMD failed " ); eigen_assert( info && "COLAMD failed " );
perm.resize(n); perm.resize(n);
for (Index i = 0; i < n; i++) perm.indices()(p(i)) = i; for (StorageIndex i = 0; i < n; i++) perm.indices()(p(i)) = i;
} }
}; };

View File

@ -308,7 +308,7 @@ void PastixBase<Derived>::analyzePattern(ColSpMatrix& mat)
if(m_size>0) if(m_size>0)
clean(); clean();
m_size = mat.rows(); m_size = internal::convert_index<int>(mat.rows());
m_perm.resize(m_size); m_perm.resize(m_size);
m_invp.resize(m_size); m_invp.resize(m_size);
@ -337,7 +337,7 @@ void PastixBase<Derived>::factorize(ColSpMatrix& mat)
eigen_assert(m_analysisIsOk && "The analysis phase should be called before the factorization phase"); eigen_assert(m_analysisIsOk && "The analysis phase should be called before the factorization phase");
m_iparm(IPARM_START_TASK) = API_TASK_NUMFACT; m_iparm(IPARM_START_TASK) = API_TASK_NUMFACT;
m_iparm(IPARM_END_TASK) = API_TASK_NUMFACT; m_iparm(IPARM_END_TASK) = API_TASK_NUMFACT;
m_size = mat.rows(); m_size = internal::convert_index<int>(mat.rows());
internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, m_size, mat.outerIndexPtr(), mat.innerIndexPtr(), internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, m_size, mat.outerIndexPtr(), mat.innerIndexPtr(),
mat.valuePtr(), m_perm.data(), m_invp.data(), 0, 0, m_iparm.data(), m_dparm.data()); mat.valuePtr(), m_perm.data(), m_invp.data(), 0, 0, m_iparm.data(), m_dparm.data());
@ -373,7 +373,7 @@ bool PastixBase<Base>::_solve_impl(const MatrixBase<Rhs> &b, MatrixBase<Dest> &x
m_iparm[IPARM_START_TASK] = API_TASK_SOLVE; m_iparm[IPARM_START_TASK] = API_TASK_SOLVE;
m_iparm[IPARM_END_TASK] = API_TASK_REFINE; m_iparm[IPARM_END_TASK] = API_TASK_REFINE;
internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, x.rows(), 0, 0, 0, internal::eigen_pastix(&m_pastixdata, MPI_COMM_WORLD, internal::convert_index<int>(x.rows()), 0, 0, 0,
m_perm.data(), m_invp.data(), &x(0, i), rhs, m_iparm.data(), m_dparm.data()); m_perm.data(), m_invp.data(), &x(0, i), rhs, m_iparm.data(), m_dparm.data());
} }

View File

@ -202,7 +202,7 @@ class SimplicialCholeskyBase : public SparseSolverBase<Derived>
void factorize(const MatrixType& a) void factorize(const MatrixType& a)
{ {
eigen_assert(a.rows()==a.cols()); eigen_assert(a.rows()==a.cols());
int size = a.cols(); Index size = a.cols();
CholMatrixType tmp(size,size); CholMatrixType tmp(size,size);
ConstCholMatrixPtr pmat; ConstCholMatrixPtr pmat;
@ -226,7 +226,7 @@ class SimplicialCholeskyBase : public SparseSolverBase<Derived>
void analyzePattern(const MatrixType& a, bool doLDLT) void analyzePattern(const MatrixType& a, bool doLDLT)
{ {
eigen_assert(a.rows()==a.cols()); eigen_assert(a.rows()==a.cols());
int size = a.cols(); Index size = a.cols();
CholMatrixType tmp(size,size); CholMatrixType tmp(size,size);
ConstCholMatrixPtr pmat; ConstCholMatrixPtr pmat;
ordering(a, pmat, tmp); ordering(a, pmat, tmp);

View File

@ -50,14 +50,14 @@ namespace Eigen {
template<typename Derived> template<typename Derived>
void SimplicialCholeskyBase<Derived>::analyzePattern_preordered(const CholMatrixType& ap, bool doLDLT) void SimplicialCholeskyBase<Derived>::analyzePattern_preordered(const CholMatrixType& ap, bool doLDLT)
{ {
const Index size = ap.rows(); const StorageIndex size = StorageIndex(ap.rows());
m_matrix.resize(size, size); m_matrix.resize(size, size);
m_parent.resize(size); m_parent.resize(size);
m_nonZerosPerCol.resize(size); m_nonZerosPerCol.resize(size);
ei_declare_aligned_stack_constructed_variable(Index, tags, size, 0); ei_declare_aligned_stack_constructed_variable(StorageIndex, tags, size, 0);
for(Index k = 0; k < size; ++k) for(StorageIndex k = 0; k < size; ++k)
{ {
/* L(k,:) pattern: all nodes reachable in etree from nz in A(0:k-1,k) */ /* L(k,:) pattern: all nodes reachable in etree from nz in A(0:k-1,k) */
m_parent[k] = -1; /* parent of k is not yet known */ m_parent[k] = -1; /* parent of k is not yet known */
@ -65,7 +65,7 @@ void SimplicialCholeskyBase<Derived>::analyzePattern_preordered(const CholMatrix
m_nonZerosPerCol[k] = 0; /* count of nonzeros in column k of L */ m_nonZerosPerCol[k] = 0; /* count of nonzeros in column k of L */
for(typename CholMatrixType::InnerIterator it(ap,k); it; ++it) for(typename CholMatrixType::InnerIterator it(ap,k); it; ++it)
{ {
Index i = it.index(); StorageIndex i = it.index();
if(i < k) if(i < k)
{ {
/* follow path from i to root of etree, stop at flagged node */ /* follow path from i to root of etree, stop at flagged node */
@ -84,7 +84,7 @@ void SimplicialCholeskyBase<Derived>::analyzePattern_preordered(const CholMatrix
/* construct Lp index array from m_nonZerosPerCol column counts */ /* construct Lp index array from m_nonZerosPerCol column counts */
StorageIndex* Lp = m_matrix.outerIndexPtr(); StorageIndex* Lp = m_matrix.outerIndexPtr();
Lp[0] = 0; Lp[0] = 0;
for(Index k = 0; k < size; ++k) for(StorageIndex k = 0; k < size; ++k)
Lp[k+1] = Lp[k] + m_nonZerosPerCol[k] + (doLDLT ? 0 : 1); Lp[k+1] = Lp[k] + m_nonZerosPerCol[k] + (doLDLT ? 0 : 1);
m_matrix.resizeNonZeros(Lp[size]); m_matrix.resizeNonZeros(Lp[size]);
@ -104,10 +104,10 @@ void SimplicialCholeskyBase<Derived>::factorize_preordered(const CholMatrixType&
eigen_assert(m_analysisIsOk && "You must first call analyzePattern()"); eigen_assert(m_analysisIsOk && "You must first call analyzePattern()");
eigen_assert(ap.rows()==ap.cols()); eigen_assert(ap.rows()==ap.cols());
const Index size = ap.rows(); eigen_assert(m_parent.size()==ap.rows());
eigen_assert(m_parent.size()==size); eigen_assert(m_nonZerosPerCol.size()==ap.rows());
eigen_assert(m_nonZerosPerCol.size()==size);
const StorageIndex size = StorageIndex(ap.rows());
const StorageIndex* Lp = m_matrix.outerIndexPtr(); const StorageIndex* Lp = m_matrix.outerIndexPtr();
StorageIndex* Li = m_matrix.innerIndexPtr(); StorageIndex* Li = m_matrix.innerIndexPtr();
Scalar* Lx = m_matrix.valuePtr(); Scalar* Lx = m_matrix.valuePtr();
@ -119,16 +119,16 @@ void SimplicialCholeskyBase<Derived>::factorize_preordered(const CholMatrixType&
bool ok = true; bool ok = true;
m_diag.resize(DoLDLT ? size : 0); m_diag.resize(DoLDLT ? size : 0);
for(Index k = 0; k < size; ++k) for(StorageIndex k = 0; k < size; ++k)
{ {
// compute nonzero pattern of kth row of L, in topological order // compute nonzero pattern of kth row of L, in topological order
y[k] = 0.0; // Y(0:k) is now all zero y[k] = 0.0; // Y(0:k) is now all zero
Index top = size; // stack for pattern is empty StorageIndex top = size; // stack for pattern is empty
tags[k] = k; // mark node k as visited tags[k] = k; // mark node k as visited
m_nonZerosPerCol[k] = 0; // count of nonzeros in column k of L m_nonZerosPerCol[k] = 0; // count of nonzeros in column k of L
for(typename CholMatrixType::InnerIterator it(ap,k); it; ++it) for(typename CholMatrixType::InnerIterator it(ap,k); it; ++it)
{ {
Index i = it.index(); StorageIndex i = it.index();
if(i <= k) if(i <= k)
{ {
y[i] += numext::conj(it.value()); /* scatter A(i,k) into Y (sum duplicates) */ y[i] += numext::conj(it.value()); /* scatter A(i,k) into Y (sum duplicates) */

View File

@ -131,7 +131,7 @@ class CompressedStorage
/** \returns the stored value at index \a key /** \returns the stored value at index \a key
* If the value does not exist, then the value \a defaultValue is returned without any insertion. */ * If the value does not exist, then the value \a defaultValue is returned without any insertion. */
inline Scalar at(Index key, const Scalar& defaultValue = Scalar(0)) const inline Scalar at(StorageIndex key, const Scalar& defaultValue = Scalar(0)) const
{ {
if (m_size==0) if (m_size==0)
return defaultValue; return defaultValue;
@ -144,7 +144,7 @@ class CompressedStorage
} }
/** Like at(), but the search is performed in the range [start,end) */ /** Like at(), but the search is performed in the range [start,end) */
inline Scalar atInRange(Index start, Index end, Index key, const Scalar &defaultValue = Scalar(0)) const inline Scalar atInRange(Index start, Index end, StorageIndex key, const Scalar &defaultValue = Scalar(0)) const
{ {
if (start>=end) if (start>=end)
return defaultValue; return defaultValue;
@ -159,7 +159,7 @@ class CompressedStorage
/** \returns a reference to the value at index \a key /** \returns a reference to the value at index \a key
* If the value does not exist, then the value \a defaultValue is inserted * If the value does not exist, then the value \a defaultValue is inserted
* such that the keys are sorted. */ * such that the keys are sorted. */
inline Scalar& atWithInsertion(Index key, const Scalar& defaultValue = Scalar(0)) inline Scalar& atWithInsertion(StorageIndex key, const Scalar& defaultValue = Scalar(0))
{ {
Index id = searchLowerIndex(0,m_size,key); Index id = searchLowerIndex(0,m_size,key);
if (id>=m_size || m_indices[id]!=key) if (id>=m_size || m_indices[id]!=key)
@ -189,7 +189,7 @@ class CompressedStorage
internal::smart_memmove(m_indices+id, m_indices+m_size, m_indices+id+1); internal::smart_memmove(m_indices+id, m_indices+m_size, m_indices+id+1);
} }
m_size++; m_size++;
m_indices[id] = convert_index<StorageIndex>(key); m_indices[id] = key;
m_values[id] = defaultValue; m_values[id] = defaultValue;
} }
return m_values[id]; return m_values[id];

View File

@ -149,10 +149,10 @@ public:
// update innerNonZeros // update innerNonZeros
if(!m_matrix.isCompressed()) if(!m_matrix.isCompressed())
for(Index j=0; j<m_outerSize.value(); ++j) for(Index j=0; j<m_outerSize.value(); ++j)
matrix.innerNonZeroPtr()[m_outerStart+j] = tmp.innerVector(j).nonZeros(); matrix.innerNonZeroPtr()[m_outerStart+j] = StorageIndex(tmp.innerVector(j).nonZeros());
// update outer index pointers // update outer index pointers
StorageIndex p = start; StorageIndex p = StorageIndex(start);
for(Index k=0; k<m_outerSize.value(); ++k) for(Index k=0; k<m_outerSize.value(); ++k)
{ {
matrix.outerIndexPtr()[m_outerStart+k] = p; matrix.outerIndexPtr()[m_outerStart+k] = p;

View File

@ -61,27 +61,26 @@ template <typename MatrixType, typename IndexVector>
int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowElt, typename MatrixType::StorageIndex *perm=0) int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowElt, typename MatrixType::StorageIndex *perm=0)
{ {
typedef typename MatrixType::StorageIndex StorageIndex; typedef typename MatrixType::StorageIndex StorageIndex;
Index nc = mat.cols(); // Number of columns StorageIndex nc = convert_index<StorageIndex>(mat.cols()); // Number of columns
Index m = mat.rows(); StorageIndex m = convert_index<StorageIndex>(mat.rows());
Index diagSize = (std::min)(nc,m); StorageIndex diagSize = (std::min)(nc,m);
IndexVector root(nc); // root of subtree of etree IndexVector root(nc); // root of subtree of etree
root.setZero(); root.setZero();
IndexVector pp(nc); // disjoint sets IndexVector pp(nc); // disjoint sets
pp.setZero(); // Initialize disjoint sets pp.setZero(); // Initialize disjoint sets
parent.resize(mat.cols()); parent.resize(mat.cols());
//Compute first nonzero column in each row //Compute first nonzero column in each row
StorageIndex row,col;
firstRowElt.resize(m); firstRowElt.resize(m);
firstRowElt.setConstant(nc); firstRowElt.setConstant(nc);
firstRowElt.segment(0, diagSize).setLinSpaced(diagSize, 0, diagSize-1); firstRowElt.segment(0, diagSize).setLinSpaced(diagSize, 0, diagSize-1);
bool found_diag; bool found_diag;
for (col = 0; col < nc; col++) for (StorageIndex col = 0; col < nc; col++)
{ {
Index pcol = col; StorageIndex pcol = col;
if(perm) pcol = perm[col]; if(perm) pcol = perm[col];
for (typename MatrixType::InnerIterator it(mat, pcol); it; ++it) for (typename MatrixType::InnerIterator it(mat, pcol); it; ++it)
{ {
row = it.row(); Index row = it.row();
firstRowElt(row) = (std::min)(firstRowElt(row), col); firstRowElt(row) = (std::min)(firstRowElt(row), col);
} }
} }
@ -89,8 +88,8 @@ int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowEl
except use (firstRowElt[r],c) in place of an edge (r,c) of A. except use (firstRowElt[r],c) in place of an edge (r,c) of A.
Thus each row clique in A'*A is replaced by a star Thus each row clique in A'*A is replaced by a star
centered at its first vertex, which has the same fill. */ centered at its first vertex, which has the same fill. */
Index rset, cset, rroot; StorageIndex rset, cset, rroot;
for (col = 0; col < nc; col++) for (StorageIndex col = 0; col < nc; col++)
{ {
found_diag = col>=m; found_diag = col>=m;
pp(col) = col; pp(col) = col;
@ -99,7 +98,7 @@ int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowEl
parent(col) = nc; parent(col) = nc;
/* The diagonal element is treated here even if it does not exist in the matrix /* The diagonal element is treated here even if it does not exist in the matrix
* hence the loop is executed once more */ * hence the loop is executed once more */
Index pcol = col; StorageIndex pcol = col;
if(perm) pcol = perm[col]; if(perm) pcol = perm[col];
for (typename MatrixType::InnerIterator it(mat, pcol); it||!found_diag; ++it) for (typename MatrixType::InnerIterator it(mat, pcol); it||!found_diag; ++it)
{ // A sequence of interleaved find and union is performed { // A sequence of interleaved find and union is performed
@ -107,7 +106,7 @@ int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowEl
if(it) i = it.index(); if(it) i = it.index();
if (i == col) found_diag = true; if (i == col) found_diag = true;
row = firstRowElt(i); StorageIndex row = firstRowElt(i);
if (row >= col) continue; if (row >= col) continue;
rset = internal::etree_find(row, pp); // Find the name of the set containing row rset = internal::etree_find(row, pp); // Find the name of the set containing row
rroot = root(rset); rroot = root(rset);
@ -128,9 +127,10 @@ int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowEl
* This routine was contributed by Cédric Doucet, CEDRAT Group, Meylan, France. * This routine was contributed by Cédric Doucet, CEDRAT Group, Meylan, France.
*/ */
template <typename IndexVector> template <typename IndexVector>
void nr_etdfs (Index n, IndexVector& parent, IndexVector& first_kid, IndexVector& next_kid, IndexVector& post, Index postnum) void nr_etdfs (typename IndexVector::Scalar n, IndexVector& parent, IndexVector& first_kid, IndexVector& next_kid, IndexVector& post, typename IndexVector::Scalar postnum)
{ {
Index current = n, first, next; typedef typename IndexVector::Scalar StorageIndex;
StorageIndex current = n, first, next;
while (postnum != n) while (postnum != n)
{ {
// No kid for the current node // No kid for the current node
@ -175,21 +175,21 @@ void nr_etdfs (Index n, IndexVector& parent, IndexVector& first_kid, IndexVector
* \param post postordered tree * \param post postordered tree
*/ */
template <typename IndexVector> template <typename IndexVector>
void treePostorder(Index n, IndexVector& parent, IndexVector& post) void treePostorder(typename IndexVector::Scalar n, IndexVector& parent, IndexVector& post)
{ {
typedef typename IndexVector::Scalar StorageIndex;
IndexVector first_kid, next_kid; // Linked list of children IndexVector first_kid, next_kid; // Linked list of children
Index postnum; StorageIndex postnum;
// Allocate storage for working arrays and results // Allocate storage for working arrays and results
first_kid.resize(n+1); first_kid.resize(n+1);
next_kid.setZero(n+1); next_kid.setZero(n+1);
post.setZero(n+1); post.setZero(n+1);
// Set up structure describing children // Set up structure describing children
Index v, dad;
first_kid.setConstant(-1); first_kid.setConstant(-1);
for (v = n-1; v >= 0; v--) for (StorageIndex v = n-1; v >= 0; v--)
{ {
dad = parent(v); StorageIndex dad = parent(v);
next_kid(v) = first_kid(dad); next_kid(v) = first_kid(dad);
first_kid(dad) = v; first_kid(dad) = v;
} }

View File

@ -188,7 +188,7 @@ class SparseMatrix
const Index outer = IsRowMajor ? row : col; const Index outer = IsRowMajor ? row : col;
const Index inner = IsRowMajor ? col : row; const Index inner = IsRowMajor ? col : row;
Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1]; Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
return m_data.atInRange(m_outerIndex[outer], end, inner); return m_data.atInRange(m_outerIndex[outer], end, StorageIndex(inner));
} }
/** \returns a non-const reference to the value of the matrix at position \a i, \a j /** \returns a non-const reference to the value of the matrix at position \a i, \a j
@ -211,7 +211,7 @@ class SparseMatrix
eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix"); eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
if(end<=start) if(end<=start)
return insert(row,col); return insert(row,col);
const Index p = m_data.searchLowerIndex(start,end-1,inner); const Index p = m_data.searchLowerIndex(start,end-1,StorageIndex(inner));
if((p<end) && (m_data.index(p)==inner)) if((p<end) && (m_data.index(p)==inner))
return m_data.value(p); return m_data.value(p);
else else
@ -390,7 +390,7 @@ class SparseMatrix
* \sa insertBack, startVec */ * \sa insertBack, startVec */
inline Scalar& insertBackByOuterInner(Index outer, Index inner) inline Scalar& insertBackByOuterInner(Index outer, Index inner)
{ {
eigen_assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)"); eigen_assert(Index(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)"); eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
Index p = m_outerIndex[outer+1]; Index p = m_outerIndex[outer+1];
++m_outerIndex[outer+1]; ++m_outerIndex[outer+1];
@ -714,9 +714,9 @@ class SparseMatrix
{ {
eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES"); eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
this->m_data.resize(rows()); this->m_data.resize(rows());
Eigen::Map<IndexVector>(&this->m_data.index(0), rows()).setLinSpaced(0, rows()-1); Eigen::Map<IndexVector>(&this->m_data.index(0), rows()).setLinSpaced(0, StorageIndex(rows()-1));
Eigen::Map<ScalarVector>(&this->m_data.value(0), rows()).setOnes(); Eigen::Map<ScalarVector>(&this->m_data.value(0), rows()).setOnes();
Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, rows()); Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, StorageIndex(rows()));
} }
inline SparseMatrix& operator=(const SparseMatrix& other) inline SparseMatrix& operator=(const SparseMatrix& other)
{ {

View File

@ -61,7 +61,7 @@ struct permut_sparsematrix_product_retval
for(Index j=0; j<m_matrix.outerSize(); ++j) for(Index j=0; j<m_matrix.outerSize(); ++j)
{ {
Index jp = m_permutation.indices().coeff(j); Index jp = m_permutation.indices().coeff(j);
sizes[((Side==OnTheLeft) ^ Transposed) ? jp : j] = m_matrix.innerVector(((Side==OnTheRight) ^ Transposed) ? jp : j).nonZeros(); sizes[((Side==OnTheLeft) ^ Transposed) ? jp : j] = StorageIndex(m_matrix.innerVector(((Side==OnTheRight) ^ Transposed) ? jp : j).nonZeros());
} }
tmp.reserve(sizes); tmp.reserve(sizes);
for(Index j=0; j<m_matrix.outerSize(); ++j) for(Index j=0; j<m_matrix.outerSize(); ++j)

View File

@ -452,7 +452,7 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixTyp
SrcMode = SrcOrder==RowMajor ? (_SrcMode==Upper ? Lower : Upper) : _SrcMode SrcMode = SrcOrder==RowMajor ? (_SrcMode==Upper ? Lower : Upper) : _SrcMode
}; };
StorageIndex size = mat.rows(); Index size = mat.rows();
VectorI count(size); VectorI count(size);
count.setZero(); count.setZero();
dest.resize(size,size); dest.resize(size,size);

View File

@ -24,16 +24,16 @@ void solve_sparse_through_dense_panels(const Decomposition &dec, const Rhs& rhs,
EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
typedef typename Dest::Scalar DestScalar; typedef typename Dest::Scalar DestScalar;
// we process the sparse rhs per block of NbColsAtOnce columns temporarily stored into a dense matrix. // we process the sparse rhs per block of NbColsAtOnce columns temporarily stored into a dense matrix.
static const int NbColsAtOnce = 4; static const Index NbColsAtOnce = 4;
int rhsCols = rhs.cols(); Index rhsCols = rhs.cols();
int size = rhs.rows(); Index size = rhs.rows();
// the temporary matrices do not need more columns than NbColsAtOnce: // the temporary matrices do not need more columns than NbColsAtOnce:
int tmpCols = (std::min)(rhsCols, NbColsAtOnce); Index tmpCols = (std::min)(rhsCols, NbColsAtOnce);
Eigen::Matrix<DestScalar,Dynamic,Dynamic> tmp(size,tmpCols); Eigen::Matrix<DestScalar,Dynamic,Dynamic> tmp(size,tmpCols);
Eigen::Matrix<DestScalar,Dynamic,Dynamic> tmpX(size,tmpCols); Eigen::Matrix<DestScalar,Dynamic,Dynamic> tmpX(size,tmpCols);
for(int k=0; k<rhsCols; k+=NbColsAtOnce) for(Index k=0; k<rhsCols; k+=NbColsAtOnce)
{ {
int actualCols = std::min<int>(rhsCols-k, NbColsAtOnce); Index actualCols = std::min<Index>(rhsCols-k, NbColsAtOnce);
tmp.leftCols(actualCols) = rhs.middleCols(k,actualCols); tmp.leftCols(actualCols) = rhs.middleCols(k,actualCols);
tmpX.leftCols(actualCols) = dec.solve(tmp.leftCols(actualCols)); tmpX.leftCols(actualCols) = dec.solve(tmp.leftCols(actualCols));
dest.middleCols(k,actualCols) = tmpX.leftCols(actualCols).sparseView(); dest.middleCols(k,actualCols) = tmpX.leftCols(actualCols).sparseView();

View File

@ -103,7 +103,7 @@ class SparseVector
inline Scalar coeff(Index i) const inline Scalar coeff(Index i) const
{ {
eigen_assert(i>=0 && i<m_size); eigen_assert(i>=0 && i<m_size);
return m_data.at(i); return m_data.at(StorageIndex(i));
} }
inline Scalar& coeffRef(Index row, Index col) inline Scalar& coeffRef(Index row, Index col)
@ -121,7 +121,7 @@ class SparseVector
inline Scalar& coeffRef(Index i) inline Scalar& coeffRef(Index i)
{ {
eigen_assert(i>=0 && i<m_size); eigen_assert(i>=0 && i<m_size);
return m_data.atWithInsertion(i); return m_data.atWithInsertion(StorageIndex(i));
} }
public: public:

View File

@ -397,7 +397,7 @@ void SparseLU<MatrixType, OrderingType>::analyzePattern(const MatrixType& mat)
if (!m_symmetricmode) { if (!m_symmetricmode) {
IndexVector post, iwork; IndexVector post, iwork;
// Post order etree // Post order etree
internal::treePostorder(m_mat.cols(), m_etree, post); internal::treePostorder(StorageIndex(m_mat.cols()), m_etree, post);
// Renumber etree in postorder // Renumber etree in postorder
@ -479,7 +479,7 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
else else
{ //FIXME This should not be needed if the empty permutation is handled transparently { //FIXME This should not be needed if the empty permutation is handled transparently
m_perm_c.resize(matrix.cols()); m_perm_c.resize(matrix.cols());
for(Index i = 0; i < matrix.cols(); ++i) m_perm_c.indices()(i) = i; for(StorageIndex i = 0; i < matrix.cols(); ++i) m_perm_c.indices()(i) = i;
} }
Index m = m_mat.rows(); Index m = m_mat.rows();

View File

@ -40,7 +40,7 @@ class SparseLUImpl
Index snode_bmod (const Index jcol, const Index fsupc, ScalarVector& dense, GlobalLU_t& glu); Index snode_bmod (const Index jcol, const Index fsupc, ScalarVector& dense, GlobalLU_t& glu);
Index pivotL(const Index jcol, const RealScalar& diagpivotthresh, IndexVector& perm_r, IndexVector& iperm_c, Index& pivrow, GlobalLU_t& glu); Index pivotL(const Index jcol, const RealScalar& diagpivotthresh, IndexVector& perm_r, IndexVector& iperm_c, Index& pivrow, GlobalLU_t& glu);
template <typename Traits> template <typename Traits>
void dfs_kernel(const Index jj, IndexVector& perm_r, void dfs_kernel(const StorageIndex jj, IndexVector& perm_r,
Index& nseg, IndexVector& panel_lsub, IndexVector& segrep, Index& nseg, IndexVector& panel_lsub, IndexVector& segrep,
Ref<IndexVector> repfnz_col, IndexVector& xprune, Ref<IndexVector> marker, IndexVector& parent, Ref<IndexVector> repfnz_col, IndexVector& xprune, Ref<IndexVector> marker, IndexVector& parent,
IndexVector& xplore, GlobalLU_t& glu, Index& nextl_col, Index krow, Traits& traits); IndexVector& xplore, GlobalLU_t& glu, Index& nextl_col, Index krow, Traits& traits);

View File

@ -178,11 +178,11 @@ class MappedSuperNodalMatrix
* \brief InnerIterator class to iterate over nonzero values of the current column in the supernodal matrix L * \brief InnerIterator class to iterate over nonzero values of the current column in the supernodal matrix L
* *
*/ */
template<typename Scalar, typename Index> template<typename Scalar, typename StorageIndex>
class MappedSuperNodalMatrix<Scalar,Index>::InnerIterator class MappedSuperNodalMatrix<Scalar,StorageIndex>::InnerIterator
{ {
public: public:
InnerIterator(const MappedSuperNodalMatrix& mat, Eigen::Index outer) InnerIterator(const MappedSuperNodalMatrix& mat, Index outer)
: m_matrix(mat), : m_matrix(mat),
m_outer(outer), m_outer(outer),
m_supno(mat.colToSup()[outer]), m_supno(mat.colToSup()[outer]),

View File

@ -53,7 +53,7 @@ void SparseLUImpl<Scalar,StorageIndex>::fixupL(const Index n, const IndexVector&
{ {
Index fsupc, i, j, k, jstart; Index fsupc, i, j, k, jstart;
Index nextl = 0; StorageIndex nextl = 0;
Index nsuper = (glu.supno)(n); Index nsuper = (glu.supno)(n);
// For each supernode // For each supernode

View File

@ -138,7 +138,7 @@ Index SparseLUImpl<Scalar,StorageIndex>::column_bmod(const Index jcol, const Ind
glu.lusup.segment(nextlu,offset).setZero(); glu.lusup.segment(nextlu,offset).setZero();
nextlu += offset; nextlu += offset;
} }
glu.xlusup(jcol + 1) = nextlu; // close L\U(*,jcol); glu.xlusup(jcol + 1) = StorageIndex(nextlu); // close L\U(*,jcol);
/* For more updates within the panel (also within the current supernode), /* For more updates within the panel (also within the current supernode),
* should start from the first column of the panel, or the first column * should start from the first column of the panel, or the first column

View File

@ -112,13 +112,13 @@ Index SparseLUImpl<Scalar,StorageIndex>::column_dfs(const Index m, const Index j
// krow was visited before, go to the next nonz; // krow was visited before, go to the next nonz;
if (kmark == jcol) continue; if (kmark == jcol) continue;
dfs_kernel(jcol, perm_r, nseg, glu.lsub, segrep, repfnz, xprune, marker2, parent, dfs_kernel(StorageIndex(jcol), perm_r, nseg, glu.lsub, segrep, repfnz, xprune, marker2, parent,
xplore, glu, nextl, krow, traits); xplore, glu, nextl, krow, traits);
} // for each nonzero ... } // for each nonzero ...
Index fsupc, jptr, jm1ptr, ito, ifrom, istop; Index fsupc;
Index nsuper = glu.supno(jcol); StorageIndex nsuper = glu.supno(jcol);
Index jcolp1 = jcol + 1; StorageIndex jcolp1 = StorageIndex(jcol) + 1;
Index jcolm1 = jcol - 1; Index jcolm1 = jcol - 1;
// check to see if j belongs in the same supernode as j-1 // check to see if j belongs in the same supernode as j-1
@ -129,8 +129,8 @@ Index SparseLUImpl<Scalar,StorageIndex>::column_dfs(const Index m, const Index j
else else
{ {
fsupc = glu.xsup(nsuper); fsupc = glu.xsup(nsuper);
jptr = glu.xlsub(jcol); // Not yet compressed StorageIndex jptr = glu.xlsub(jcol); // Not yet compressed
jm1ptr = glu.xlsub(jcolm1); StorageIndex jm1ptr = glu.xlsub(jcolm1);
// Use supernodes of type T2 : see SuperLU paper // Use supernodes of type T2 : see SuperLU paper
if ( (nextl-jptr != jptr-jm1ptr-1) ) jsuper = emptyIdxLU; if ( (nextl-jptr != jptr-jm1ptr-1) ) jsuper = emptyIdxLU;
@ -148,13 +148,13 @@ Index SparseLUImpl<Scalar,StorageIndex>::column_dfs(const Index m, const Index j
{ // starts a new supernode { // starts a new supernode
if ( (fsupc < jcolm1-1) ) if ( (fsupc < jcolm1-1) )
{ // >= 3 columns in nsuper { // >= 3 columns in nsuper
ito = glu.xlsub(fsupc+1); StorageIndex ito = glu.xlsub(fsupc+1);
glu.xlsub(jcolm1) = ito; glu.xlsub(jcolm1) = ito;
istop = ito + jptr - jm1ptr; StorageIndex istop = ito + jptr - jm1ptr;
xprune(jcolm1) = istop; // intialize xprune(jcol-1) xprune(jcolm1) = istop; // intialize xprune(jcol-1)
glu.xlsub(jcol) = istop; glu.xlsub(jcol) = istop;
for (ifrom = jm1ptr; ifrom < nextl; ++ifrom, ++ito) for (StorageIndex ifrom = jm1ptr; ifrom < nextl; ++ifrom, ++ito)
glu.lsub(ito) = glu.lsub(ifrom); glu.lsub(ito) = glu.lsub(ifrom);
nextl = ito; // = istop + length(jcol) nextl = ito; // = istop + length(jcol)
} }
@ -166,8 +166,8 @@ Index SparseLUImpl<Scalar,StorageIndex>::column_dfs(const Index m, const Index j
// Tidy up the pointers before exit // Tidy up the pointers before exit
glu.xsup(nsuper+1) = jcolp1; glu.xsup(nsuper+1) = jcolp1;
glu.supno(jcolp1) = nsuper; glu.supno(jcolp1) = nsuper;
xprune(jcol) = nextl; // Intialize upper bound for pruning xprune(jcol) = StorageIndex(nextl); // Intialize upper bound for pruning
glu.xlsub(jcolp1) = nextl; glu.xlsub(jcolp1) = StorageIndex(nextl);
return 0; return 0;
} }

View File

@ -56,7 +56,7 @@ Index SparseLUImpl<Scalar,StorageIndex>::copy_to_ucol(const Index jcol, const In
// For each nonzero supernode segment of U[*,j] in topological order // For each nonzero supernode segment of U[*,j] in topological order
Index k = nseg - 1, i; Index k = nseg - 1, i;
Index nextu = glu.xusub(jcol); StorageIndex nextu = glu.xusub(jcol);
Index kfnz, isub, segsize; Index kfnz, isub, segsize;
Index new_next,irow; Index new_next,irow;
Index fsupc, mem; Index fsupc, mem;

View File

@ -48,15 +48,14 @@ void SparseLUImpl<Scalar,StorageIndex>::heap_relax_snode (const Index n, IndexVe
// The etree may not be postordered, but its heap ordered // The etree may not be postordered, but its heap ordered
IndexVector post; IndexVector post;
internal::treePostorder(n, et, post); // Post order etree internal::treePostorder(StorageIndex(n), et, post); // Post order etree
IndexVector inv_post(n+1); IndexVector inv_post(n+1);
Index i; for (StorageIndex i = 0; i < n+1; ++i) inv_post(post(i)) = i; // inv_post = post.inverse()???
for (i = 0; i < n+1; ++i) inv_post(post(i)) = i; // inv_post = post.inverse()???
// Renumber etree in postorder // Renumber etree in postorder
IndexVector iwork(n); IndexVector iwork(n);
IndexVector et_save(n+1); IndexVector et_save(n+1);
for (i = 0; i < n; ++i) for (Index i = 0; i < n; ++i)
{ {
iwork(post(i)) = post(et(i)); iwork(post(i)) = post(et(i));
} }
@ -78,7 +77,7 @@ void SparseLUImpl<Scalar,StorageIndex>::heap_relax_snode (const Index n, IndexVe
StorageIndex k; StorageIndex k;
Index nsuper_et_post = 0; // Number of relaxed snodes in postordered etree Index nsuper_et_post = 0; // Number of relaxed snodes in postordered etree
Index nsuper_et = 0; // Number of relaxed snodes in the original etree Index nsuper_et = 0; // Number of relaxed snodes in the original etree
Index l; StorageIndex l;
for (j = 0; j < n; ) for (j = 0; j < n; )
{ {
parent = et(j); parent = et(j);
@ -90,8 +89,8 @@ void SparseLUImpl<Scalar,StorageIndex>::heap_relax_snode (const Index n, IndexVe
} }
// Found a supernode in postordered etree, j is the last column // Found a supernode in postordered etree, j is the last column
++nsuper_et_post; ++nsuper_et_post;
k = n; k = StorageIndex(n);
for (i = snode_start; i <= j; ++i) for (Index i = snode_start; i <= j; ++i)
k = (std::min)(k, inv_post(i)); k = (std::min)(k, inv_post(i));
l = inv_post(j); l = inv_post(j);
if ( (l - k) == (j - snode_start) ) // Same number of columns in the snode if ( (l - k) == (j - snode_start) ) // Same number of columns in the snode
@ -102,7 +101,7 @@ void SparseLUImpl<Scalar,StorageIndex>::heap_relax_snode (const Index n, IndexVe
} }
else else
{ {
for (i = snode_start; i <= j; ++i) for (Index i = snode_start; i <= j; ++i)
{ {
l = inv_post(i); l = inv_post(i);
if (descendants(i) == 0) if (descendants(i) == 0)

View File

@ -41,7 +41,7 @@ struct panel_dfs_traits
panel_dfs_traits(Index jcol, StorageIndex* marker) panel_dfs_traits(Index jcol, StorageIndex* marker)
: m_jcol(jcol), m_marker(marker) : m_jcol(jcol), m_marker(marker)
{} {}
bool update_segrep(Index krep, Index jj) bool update_segrep(Index krep, StorageIndex jj)
{ {
if(m_marker[krep]<m_jcol) if(m_marker[krep]<m_jcol)
{ {
@ -59,7 +59,7 @@ struct panel_dfs_traits
template <typename Scalar, typename StorageIndex> template <typename Scalar, typename StorageIndex>
template <typename Traits> template <typename Traits>
void SparseLUImpl<Scalar,StorageIndex>::dfs_kernel(const Index jj, IndexVector& perm_r, void SparseLUImpl<Scalar,StorageIndex>::dfs_kernel(const StorageIndex jj, IndexVector& perm_r,
Index& nseg, IndexVector& panel_lsub, IndexVector& segrep, Index& nseg, IndexVector& panel_lsub, IndexVector& segrep,
Ref<IndexVector> repfnz_col, IndexVector& xprune, Ref<IndexVector> marker, IndexVector& parent, Ref<IndexVector> repfnz_col, IndexVector& xprune, Ref<IndexVector> marker, IndexVector& parent,
IndexVector& xplore, GlobalLU_t& glu, IndexVector& xplore, GlobalLU_t& glu,
@ -67,14 +67,14 @@ void SparseLUImpl<Scalar,StorageIndex>::dfs_kernel(const Index jj, IndexVector&
) )
{ {
Index kmark = marker(krow); StorageIndex kmark = marker(krow);
// For each unmarked krow of jj // For each unmarked krow of jj
marker(krow) = jj; marker(krow) = jj;
Index kperm = perm_r(krow); StorageIndex kperm = perm_r(krow);
if (kperm == emptyIdxLU ) { if (kperm == emptyIdxLU ) {
// krow is in L : place it in structure of L(*, jj) // krow is in L : place it in structure of L(*, jj)
panel_lsub(nextl_col++) = krow; // krow is indexed into A panel_lsub(nextl_col++) = StorageIndex(krow); // krow is indexed into A
traits.mem_expand(panel_lsub, nextl_col, kmark); traits.mem_expand(panel_lsub, nextl_col, kmark);
} }
@ -83,9 +83,9 @@ void SparseLUImpl<Scalar,StorageIndex>::dfs_kernel(const Index jj, IndexVector&
// krow is in U : if its supernode-representative krep // krow is in U : if its supernode-representative krep
// has been explored, update repfnz(*) // has been explored, update repfnz(*)
// krep = supernode representative of the current row // krep = supernode representative of the current row
Index krep = glu.xsup(glu.supno(kperm)+1) - 1; StorageIndex krep = glu.xsup(glu.supno(kperm)+1) - 1;
// First nonzero element in the current column: // First nonzero element in the current column:
Index myfnz = repfnz_col(krep); StorageIndex myfnz = repfnz_col(krep);
if (myfnz != emptyIdxLU ) if (myfnz != emptyIdxLU )
{ {
@ -96,26 +96,26 @@ void SparseLUImpl<Scalar,StorageIndex>::dfs_kernel(const Index jj, IndexVector&
else else
{ {
// Otherwise, perform dfs starting at krep // Otherwise, perform dfs starting at krep
Index oldrep = emptyIdxLU; StorageIndex oldrep = emptyIdxLU;
parent(krep) = oldrep; parent(krep) = oldrep;
repfnz_col(krep) = kperm; repfnz_col(krep) = kperm;
Index xdfs = glu.xlsub(krep); StorageIndex xdfs = glu.xlsub(krep);
Index maxdfs = xprune(krep); Index maxdfs = xprune(krep);
Index kpar; StorageIndex kpar;
do do
{ {
// For each unmarked kchild of krep // For each unmarked kchild of krep
while (xdfs < maxdfs) while (xdfs < maxdfs)
{ {
Index kchild = glu.lsub(xdfs); StorageIndex kchild = glu.lsub(xdfs);
xdfs++; xdfs++;
Index chmark = marker(kchild); StorageIndex chmark = marker(kchild);
if (chmark != jj ) if (chmark != jj )
{ {
marker(kchild) = jj; marker(kchild) = jj;
Index chperm = perm_r(kchild); StorageIndex chperm = perm_r(kchild);
if (chperm == emptyIdxLU) if (chperm == emptyIdxLU)
{ {
@ -128,7 +128,7 @@ void SparseLUImpl<Scalar,StorageIndex>::dfs_kernel(const Index jj, IndexVector&
// case kchild is in U : // case kchild is in U :
// chrep = its supernode-rep. If its rep has been explored, // chrep = its supernode-rep. If its rep has been explored,
// update its repfnz(*) // update its repfnz(*)
Index chrep = glu.xsup(glu.supno(chperm)+1) - 1; StorageIndex chrep = glu.xsup(glu.supno(chperm)+1) - 1;
myfnz = repfnz_col(chrep); myfnz = repfnz_col(chrep);
if (myfnz != emptyIdxLU) if (myfnz != emptyIdxLU)
@ -227,7 +227,7 @@ void SparseLUImpl<Scalar,StorageIndex>::panel_dfs(const Index m, const Index w,
panel_dfs_traits<IndexVector> traits(jcol, marker1.data()); panel_dfs_traits<IndexVector> traits(jcol, marker1.data());
// For each column in the panel // For each column in the panel
for (Index jj = jcol; jj < jcol + w; jj++) for (StorageIndex jj = StorageIndex(jcol); jj < jcol + w; jj++)
{ {
nextl_col = (jj - jcol) * m; nextl_col = (jj - jcol) * m;
@ -241,7 +241,7 @@ void SparseLUImpl<Scalar,StorageIndex>::panel_dfs(const Index m, const Index w,
Index krow = it.row(); Index krow = it.row();
dense_col(krow) = it.value(); dense_col(krow) = it.value();
Index kmark = marker(krow); StorageIndex kmark = marker(krow);
if (kmark == jj) if (kmark == jj)
continue; // krow visited before, go to the next nonzero continue; // krow visited before, go to the next nonzero

View File

@ -89,7 +89,7 @@ Index SparseLUImpl<Scalar,StorageIndex>::pivotL(const Index jcol, const RealScal
// Test for singularity // Test for singularity
if ( pivmax == 0.0 ) { if ( pivmax == 0.0 ) {
pivrow = lsub_ptr[pivptr]; pivrow = lsub_ptr[pivptr];
perm_r(pivrow) = jcol; perm_r(pivrow) = StorageIndex(jcol);
return (jcol+1); return (jcol+1);
} }
@ -110,7 +110,7 @@ Index SparseLUImpl<Scalar,StorageIndex>::pivotL(const Index jcol, const RealScal
} }
// Record pivot row // Record pivot row
perm_r(pivrow) = jcol; perm_r(pivrow) = StorageIndex(jcol);
// Interchange row subscripts // Interchange row subscripts
if (pivptr != nsupc ) if (pivptr != nsupc )
{ {

View File

@ -124,7 +124,7 @@ void SparseLUImpl<Scalar,StorageIndex>::pruneL(const Index jcol, const IndexVect
} }
} // end while } // end while
xprune(irep) = kmin; //Pruning xprune(irep) = StorageIndex(kmin); //Pruning
} // end if do_prune } // end if do_prune
} // end pruning } // end pruning
} // End for each U-segment } // End for each U-segment

View File

@ -48,10 +48,10 @@ void SparseLUImpl<Scalar,StorageIndex>::relax_snode (const Index n, IndexVector&
{ {
// compute the number of descendants of each node in the etree // compute the number of descendants of each node in the etree
Index j, parent; Index parent;
relax_end.setConstant(emptyIdxLU); relax_end.setConstant(emptyIdxLU);
descendants.setZero(); descendants.setZero();
for (j = 0; j < n; j++) for (Index j = 0; j < n; j++)
{ {
parent = et(j); parent = et(j);
if (parent != n) // not the dummy root if (parent != n) // not the dummy root
@ -59,7 +59,7 @@ void SparseLUImpl<Scalar,StorageIndex>::relax_snode (const Index n, IndexVector&
} }
// Identify the relaxed supernodes by postorder traversal of the etree // Identify the relaxed supernodes by postorder traversal of the etree
Index snode_start; // beginning of a snode Index snode_start; // beginning of a snode
for (j = 0; j < n; ) for (Index j = 0; j < n; )
{ {
parent = et(j); parent = et(j);
snode_start = j; snode_start = j;
@ -69,7 +69,7 @@ void SparseLUImpl<Scalar,StorageIndex>::relax_snode (const Index n, IndexVector&
parent = et(j); parent = et(j);
} }
// Found a supernode in postordered etree, j is the last column // Found a supernode in postordered etree, j is the last column
relax_end(snode_start) = j; // Record last column relax_end(snode_start) = StorageIndex(j); // Record last column
j++; j++;
// Search for a new leaf // Search for a new leaf
while (descendants(j) != 0 && j < n) j++; while (descendants(j) != 0 && j < n) j++;

View File

@ -296,7 +296,7 @@ void SparseQR<MatrixType,OrderingType>::analyzePattern(const MatrixType& mat)
if (!m_perm_c.size()) if (!m_perm_c.size())
{ {
m_perm_c.resize(n); m_perm_c.resize(n);
m_perm_c.indices().setLinSpaced(n, 0,n-1); m_perm_c.indices().setLinSpaced(n, 0,StorageIndex(n-1));
} }
// Compute the column elimination tree of the permuted matrix // Compute the column elimination tree of the permuted matrix
@ -327,8 +327,8 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
using std::abs; using std::abs;
eigen_assert(m_analysisIsok && "analyzePattern() should be called before this step"); eigen_assert(m_analysisIsok && "analyzePattern() should be called before this step");
StorageIndex m = mat.rows(); StorageIndex m = StorageIndex(mat.rows());
StorageIndex n = mat.cols(); StorageIndex n = StorageIndex(mat.cols());
StorageIndex diagSize = (std::min)(m,n); StorageIndex diagSize = (std::min)(m,n);
IndexVector mark((std::max)(m,n)); mark.setConstant(-1); // Record the visited nodes IndexVector mark((std::max)(m,n)); mark.setConstant(-1); // Record the visited nodes
IndexVector Ridx(n), Qidx(m); // Store temporarily the row indexes for the current column of R and Q IndexVector Ridx(n), Qidx(m); // Store temporarily the row indexes for the current column of R and Q
@ -406,7 +406,7 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
for (typename QRMatrixType::InnerIterator itp(m_pmat, col); itp || !found_diag; ++itp) for (typename QRMatrixType::InnerIterator itp(m_pmat, col); itp || !found_diag; ++itp)
{ {
StorageIndex curIdx = nonzeroCol; StorageIndex curIdx = nonzeroCol;
if(itp) curIdx = itp.row(); if(itp) curIdx = StorageIndex(itp.row());
if(curIdx == nonzeroCol) found_diag = true; if(curIdx == nonzeroCol) found_diag = true;
// Get the nonzeros indexes of the current column of R // Get the nonzeros indexes of the current column of R
@ -467,7 +467,7 @@ void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
{ {
for (typename QRMatrixType::InnerIterator itq(m_Q, curIdx); itq; ++itq) for (typename QRMatrixType::InnerIterator itq(m_Q, curIdx); itq; ++itq)
{ {
StorageIndex iQ = itq.row(); StorageIndex iQ = StorageIndex(itq.row());
if (mark(iQ) != col) if (mark(iQ) != col)
{ {
Qidx(nzcolQ++) = iQ; // Add this row to the pattern of Q, Qidx(nzcolQ++) = iQ; // Add this row to the pattern of Q,

View File

@ -313,8 +313,9 @@ class UmfPackLU : public SparseSolverBase<UmfPackLU<_MatrixType> >
void analyzePattern_impl() void analyzePattern_impl()
{ {
int errorCode = 0; int errorCode = 0;
errorCode = umfpack_symbolic(m_copyMatrix.rows(), m_copyMatrix.cols(), m_outerIndexPtr, m_innerIndexPtr, m_valuePtr, errorCode = umfpack_symbolic(internal::convert_index<int>(m_copyMatrix.rows()),
&m_symbolic, 0, 0); internal::convert_index<int>(m_copyMatrix.cols()),
m_outerIndexPtr, m_innerIndexPtr, m_valuePtr, &m_symbolic, 0, 0);
m_isInitialized = true; m_isInitialized = true;
m_info = errorCode ? InvalidInput : Success; m_info = errorCode ? InvalidInput : Success;

View File

@ -398,8 +398,8 @@ template<typename SparseMatrixType> void sparse_basic(const SparseMatrixType& re
refMat.setZero(); refMat.setZero();
for(Index i=0;i<ntriplets;++i) for(Index i=0;i<ntriplets;++i)
{ {
StorageIndex r = internal::random<StorageIndex>(0,rows-1); StorageIndex r = internal::random<StorageIndex>(0,StorageIndex(rows-1));
StorageIndex c = internal::random<StorageIndex>(0,cols-1); StorageIndex c = internal::random<StorageIndex>(0,StorageIndex(cols-1));
Scalar v = internal::random<Scalar>(); Scalar v = internal::random<Scalar>();
triplets.push_back(TripletType(r,c,v)); triplets.push_back(TripletType(r,c,v));
refMat(r,c) += v; refMat(r,c) += v;

View File

@ -37,7 +37,7 @@ template<typename Scalar> void test_spqr_scalar()
SPQR<MatrixType> solver; SPQR<MatrixType> solver;
generate_sparse_rectangular_problem(A,dA); generate_sparse_rectangular_problem(A,dA);
int m = A.rows(); Index m = A.rows();
b = DenseVector::Random(m); b = DenseVector::Random(m);
solver.compute(A); solver.compute(A);
if (solver.info() != Success) if (solver.info() != Success)

View File

@ -54,7 +54,7 @@ namespace internal {
*/ */
template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner> template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Preconditioner & precond, bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Preconditioner & precond,
int &iters, const int &restart, typename Dest::RealScalar & tol_error) { Index &iters, const Index &restart, typename Dest::RealScalar & tol_error) {
using std::sqrt; using std::sqrt;
using std::abs; using std::abs;
@ -65,10 +65,10 @@ bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Precondition
typedef Matrix < Scalar, Dynamic, Dynamic > FMatrixType; typedef Matrix < Scalar, Dynamic, Dynamic > FMatrixType;
RealScalar tol = tol_error; RealScalar tol = tol_error;
const int maxIters = iters; const Index maxIters = iters;
iters = 0; iters = 0;
const int m = mat.rows(); const Index m = mat.rows();
// residual and preconditioned residual // residual and preconditioned residual
const VectorType p0 = rhs - mat*x; const VectorType p0 = rhs - mat*x;
@ -97,14 +97,14 @@ bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Precondition
w(0)=(Scalar) beta; w(0)=(Scalar) beta;
H.bottomLeftCorner(m - 1, 1) = e; H.bottomLeftCorner(m - 1, 1) = e;
for (int k = 1; k <= restart; ++k) { for (Index k = 1; k <= restart; ++k) {
++iters; ++iters;
VectorType v = VectorType::Unit(m, k - 1), workspace(m); VectorType v = VectorType::Unit(m, k - 1), workspace(m);
// apply Householder reflections H_{1} ... H_{k-1} to v // apply Householder reflections H_{1} ... H_{k-1} to v
for (int i = k - 1; i >= 0; --i) { for (Index i = k - 1; i >= 0; --i) {
v.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data()); v.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data());
} }
@ -113,7 +113,7 @@ bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Precondition
v=precond.solve(t); v=precond.solve(t);
// apply Householder reflections H_{k-1} ... H_{1} to v // apply Householder reflections H_{k-1} ... H_{1} to v
for (int i = 0; i < k; ++i) { for (Index i = 0; i < k; ++i) {
v.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data()); v.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data());
} }
@ -133,7 +133,7 @@ bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Precondition
} }
if (k > 1) { if (k > 1) {
for (int i = 0; i < k - 1; ++i) { for (Index i = 0; i < k - 1; ++i) {
// apply old Givens rotations to v // apply old Givens rotations to v
v.applyOnTheLeft(i, i + 1, G[i].adjoint()); v.applyOnTheLeft(i, i + 1, G[i].adjoint());
} }
@ -166,7 +166,7 @@ bool gmres(const MatrixType & mat, const Rhs & rhs, Dest & x, const Precondition
// apply Householder reflection H_{k} to x_new // apply Householder reflection H_{k} to x_new
x_new.tail(m - k + 1).applyHouseholderOnTheLeft(H.col(k - 1).tail(m - k), tau.coeffRef(k - 1), workspace.data()); x_new.tail(m - k + 1).applyHouseholderOnTheLeft(H.col(k - 1).tail(m - k), tau.coeffRef(k - 1), workspace.data());
for (int i = k - 2; i >= 0; --i) { for (Index i = k - 2; i >= 0; --i) {
x_new += y(i) * VectorType::Unit(m, i); x_new += y(i) * VectorType::Unit(m, i);
// apply Householder reflection H_{i} to x_new // apply Householder reflection H_{i} to x_new
x_new.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data()); x_new.tail(m - i).applyHouseholderOnTheLeft(H.col(i).tail(m - i - 1), tau.coeffRef(i), workspace.data());
@ -265,7 +265,7 @@ class GMRES : public IterativeSolverBase<GMRES<_MatrixType,_Preconditioner> >
using Base::m_isInitialized; using Base::m_isInitialized;
private: private:
int m_restart; Index m_restart;
public: public:
using Base::_solve_impl; using Base::_solve_impl;
@ -295,19 +295,19 @@ public:
/** Get the number of iterations after that a restart is performed. /** Get the number of iterations after that a restart is performed.
*/ */
int get_restart() { return m_restart; } Index get_restart() { return m_restart; }
/** Set the number of iterations after that a restart is performed. /** Set the number of iterations after that a restart is performed.
* \param restart number of iterations for a restarti, default is 30. * \param restart number of iterations for a restarti, default is 30.
*/ */
void set_restart(const int restart) { m_restart=restart; } void set_restart(const Index restart) { m_restart=restart; }
/** \internal */ /** \internal */
template<typename Rhs,typename Dest> template<typename Rhs,typename Dest>
void _solve_with_guess_impl(const Rhs& b, Dest& x) const void _solve_with_guess_impl(const Rhs& b, Dest& x) const
{ {
bool failed = false; bool failed = false;
for(int j=0; j<b.cols(); ++j) for(Index j=0; j<b.cols(); ++j)
{ {
m_iterations = Base::maxIterations(); m_iterations = Base::maxIterations();
m_error = Base::m_tolerance; m_error = Base::m_tolerance;

View File

@ -29,7 +29,7 @@ namespace Eigen {
template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner> template<typename MatrixType, typename Rhs, typename Dest, typename Preconditioner>
EIGEN_DONT_INLINE EIGEN_DONT_INLINE
void minres(const MatrixType& mat, const Rhs& rhs, Dest& x, void minres(const MatrixType& mat, const Rhs& rhs, Dest& x,
const Preconditioner& precond, int& iters, const Preconditioner& precond, Index& iters,
typename Dest::RealScalar& tol_error) typename Dest::RealScalar& tol_error)
{ {
using std::sqrt; using std::sqrt;
@ -48,8 +48,8 @@ namespace Eigen {
} }
// initialize // initialize
const int maxIters(iters); // initialize maxIters to iters const Index maxIters(iters); // initialize maxIters to iters
const int N(mat.cols()); // the size of the matrix const Index N(mat.cols()); // the size of the matrix
const RealScalar threshold2(tol_error*tol_error*rhsNorm2); // convergence threshold (compared to residualNorm2) const RealScalar threshold2(tol_error*tol_error*rhsNorm2); // convergence threshold (compared to residualNorm2)
// Initialize preconditioned Lanczos // Initialize preconditioned Lanczos