mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-09-26 00:03:14 +08:00
build complete... almost
This commit is contained in:
parent
f8a0745cb0
commit
0c9b08e46e
@ -32,9 +32,8 @@ template<class Derived>
|
|||||||
class OrderingBase
|
class OrderingBase
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
typedef typename internal::traits<Derived>::MatrixType MatrixType;
|
typedef typename internal::traits<Derived>::Scalar Scalar;
|
||||||
typedef typename MatrixType::Scalar Scalar;
|
typedef typename internal::traits<Derived>::Index Index;
|
||||||
typedef typename MatrixType::Index Index;
|
|
||||||
typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType;
|
typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
@ -42,10 +41,12 @@ class OrderingBase
|
|||||||
{
|
{
|
||||||
|
|
||||||
}
|
}
|
||||||
|
template<typename MatrixType>
|
||||||
OrderingBase(const MatrixType& mat):OrderingBase()
|
OrderingBase(const MatrixType& mat):OrderingBase()
|
||||||
{
|
{
|
||||||
compute(mat);
|
compute(mat);
|
||||||
}
|
}
|
||||||
|
template<typename MatrixType>
|
||||||
Derived& compute(const MatrixType& mat)
|
Derived& compute(const MatrixType& mat)
|
||||||
{
|
{
|
||||||
return derived().compute(mat);
|
return derived().compute(mat);
|
||||||
@ -61,9 +62,9 @@ class OrderingBase
|
|||||||
/**
|
/**
|
||||||
* Get the permutation vector
|
* Get the permutation vector
|
||||||
*/
|
*/
|
||||||
PermutationType& get_perm(const MatrixType& mat)
|
PermutationType& get_perm()
|
||||||
{
|
{
|
||||||
if (m_isInitialized = true) return m_P;
|
if (m_isInitialized == true) return m_P;
|
||||||
else abort(); // FIXME Should find a smoother way to exit with error code
|
else abort(); // FIXME Should find a smoother way to exit with error code
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -101,7 +102,6 @@ class OrderingBase
|
|||||||
mutable bool m_isInitialized;
|
mutable bool m_isInitialized;
|
||||||
SparseMatrix<Scalar,ColMajor,Index> m_mat; // Stores the (symmetrized) matrix to permute
|
SparseMatrix<Scalar,ColMajor,Index> m_mat; // Stores the (symmetrized) matrix to permute
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the approximate minimum degree ordering
|
* Get the approximate minimum degree ordering
|
||||||
* If the matrix is not structurally symmetric, an ordering of A^T+A is computed
|
* If the matrix is not structurally symmetric, an ordering of A^T+A is computed
|
||||||
@ -161,6 +161,15 @@ class AMDOrdering : public OrderingBase<AMDOrdering<Scalar, Index> >
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
namespace internal {
|
||||||
|
template <typename _Scalar, typename _Index>
|
||||||
|
struct traits<AMDOrdering<_Scalar, _Index> >
|
||||||
|
{
|
||||||
|
typedef _Scalar Scalar;
|
||||||
|
typedef _Index Index;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get the column approximate minimum degree ordering
|
* Get the column approximate minimum degree ordering
|
||||||
* The matrix should be in column-major format
|
* The matrix should be in column-major format
|
||||||
|
@ -54,15 +54,15 @@ class SparseLU
|
|||||||
typedef SuperNodalMatrix<Scalar, Index> SCMatrix;
|
typedef SuperNodalMatrix<Scalar, Index> SCMatrix;
|
||||||
typedef Matrix<Scalar,Dynamic,1> ScalarVector;
|
typedef Matrix<Scalar,Dynamic,1> ScalarVector;
|
||||||
typedef Matrix<Index,Dynamic,1> IndexVector;
|
typedef Matrix<Index,Dynamic,1> IndexVector;
|
||||||
// typedef GlobalLU_t<ScalarVector, IndexVector> LU_GlobalLU_t;
|
|
||||||
typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType;
|
typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType;
|
||||||
public:
|
public:
|
||||||
SparseLU():m_isInitialized(true),m_symmetricmode(false),m_diagpivotthresh(1.0)
|
SparseLU():m_isInitialized(true),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0)
|
||||||
{
|
{
|
||||||
initperfvalues();
|
initperfvalues();
|
||||||
}
|
}
|
||||||
SparseLU(const MatrixType& matrix):SparseLU()
|
SparseLU(const MatrixType& matrix):m_isInitialized(true),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0)
|
||||||
{
|
{
|
||||||
|
initperfvalues();
|
||||||
compute(matrix);
|
compute(matrix);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -114,6 +114,21 @@ class SparseLU
|
|||||||
// return solve_retval<SparseLU, Rhs>(*this, B.derived());
|
// return solve_retval<SparseLU, Rhs>(*this, B.derived());
|
||||||
// }
|
// }
|
||||||
|
|
||||||
|
|
||||||
|
/** \brief Reports whether previous computation was successful.
|
||||||
|
*
|
||||||
|
* \returns \c Success if computation was succesful,
|
||||||
|
* \c NumericalIssue if the PaStiX reports a problem
|
||||||
|
* \c InvalidInput if the input matrix is invalid
|
||||||
|
*
|
||||||
|
* \sa iparm()
|
||||||
|
*/
|
||||||
|
ComputationInfo info() const
|
||||||
|
{
|
||||||
|
eigen_assert(m_isInitialized && "Decomposition is not initialized.");
|
||||||
|
return m_info;
|
||||||
|
}
|
||||||
|
|
||||||
template<typename Rhs, typename Dest>
|
template<typename Rhs, typename Dest>
|
||||||
bool _solve(const MatrixBase<Rhs> &B, MatrixBase<Dest> &X) const
|
bool _solve(const MatrixBase<Rhs> &B, MatrixBase<Dest> &X) const
|
||||||
{
|
{
|
||||||
@ -141,7 +156,7 @@ class SparseLU
|
|||||||
const Scalar * Lval = m_Lstore.valuePtr(); // Nonzero values
|
const Scalar * Lval = m_Lstore.valuePtr(); // Nonzero values
|
||||||
Matrix<Scalar,Dynamic,Dynamic> work(n, nrhs); // working vector
|
Matrix<Scalar,Dynamic,Dynamic> work(n, nrhs); // working vector
|
||||||
work.setZero();
|
work.setZero();
|
||||||
int j, k, i, icol,jcol;
|
int j, k, i,jcol;
|
||||||
for (k = 0; k <= m_Lstore.nsuper(); k ++)
|
for (k = 0; k <= m_Lstore.nsuper(); k ++)
|
||||||
{
|
{
|
||||||
fsupc = m_Lstore.supToCol()[k];
|
fsupc = m_Lstore.supToCol()[k];
|
||||||
@ -168,13 +183,12 @@ class SparseLU
|
|||||||
// The supernode has more than one column
|
// The supernode has more than one column
|
||||||
|
|
||||||
// Triangular solve
|
// Triangular solve
|
||||||
Map<Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > A( &(Lval[luptr]), nsupc, nsupc, OuterStride<>(nsupr) );
|
Map<const Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > A( &(Lval[luptr]), nsupc, nsupc, OuterStride<>(nsupr) );
|
||||||
// Map<Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride > u( &(X(fsupc,0)), nsupc, nrhs, OuterStride<>(X.rows()) );
|
Block<Dest > U(X, fsupc, 0, nsupc, nrhs); //FIXME TODO Consider more RHS
|
||||||
Matrix<Scalar,Dynamic,Dynamic>& U = X.block(fsupc, 0, nsupc, nrhs); //FIXME Check this
|
|
||||||
U = A.template triangularView<Lower>().solve(U);
|
U = A.template triangularView<Lower>().solve(U);
|
||||||
|
|
||||||
// Matrix-vector product
|
// Matrix-vector product
|
||||||
new (&A) Map<Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > ( &(Lval[luptr+nsupc]), nrow, nsupc, OuterStride<>(nsupr) );
|
new (&A) Map<const Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > ( &(Lval[luptr+nsupc]), nrow, nsupc, OuterStride<>(nsupr) );
|
||||||
work.block(0, 0, nrow, nrhs) = A * U;
|
work.block(0, 0, nrow, nrhs) = A * U;
|
||||||
|
|
||||||
//Begin Scatter
|
//Begin Scatter
|
||||||
@ -210,8 +224,8 @@ class SparseLU
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
Map<Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > A( &(Lval[luptr]), nsupc, nsupc, OuterStride<>(nsupr) );
|
Map<const Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > A( &(Lval[luptr]), nsupc, nsupc, OuterStride<>(nsupr) );
|
||||||
Matrix<Scalar,Dynamic,Dynamic>& U = X.block(fsupc, 0, nsupc, nrhs);
|
Block<const Dest> U(X, fsupc, 0, nsupc, nrhs);
|
||||||
U = A.template triangularView<Upper>().solve(U);
|
U = A.template triangularView<Upper>().solve(U);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -221,8 +235,8 @@ class SparseLU
|
|||||||
{
|
{
|
||||||
for (i = m_Ustore.outerIndexPtr()[jcol]; i < m_Ustore.outerIndexPtr()[jcol]; i++)
|
for (i = m_Ustore.outerIndexPtr()[jcol]; i < m_Ustore.outerIndexPtr()[jcol]; i++)
|
||||||
{
|
{
|
||||||
irow = m_Ustore.InnerIndices()[i];
|
irow = m_Ustore.innerIndexPtr()[i];
|
||||||
X(irow, j) -= X(jcol, j) * m_Ustore.Values()[i];
|
X(irow, j) -= X(jcol, j) * m_Ustore.valuePtr()[i];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -254,12 +268,12 @@ class SparseLU
|
|||||||
bool m_analysisIsOk;
|
bool m_analysisIsOk;
|
||||||
NCMatrix m_mat; // The input (permuted ) matrix
|
NCMatrix m_mat; // The input (permuted ) matrix
|
||||||
SCMatrix m_Lstore; // The lower triangular matrix (supernodal)
|
SCMatrix m_Lstore; // The lower triangular matrix (supernodal)
|
||||||
NCMatrix m_Ustore; // The upper triangular matrix
|
MappedSparseMatrix<Scalar> m_Ustore; // The upper triangular matrix
|
||||||
PermutationType m_perm_c; // Column permutation
|
PermutationType m_perm_c; // Column permutation
|
||||||
PermutationType m_perm_r ; // Row permutation
|
PermutationType m_perm_r ; // Row permutation
|
||||||
IndexVector m_etree; // Column elimination tree
|
IndexVector m_etree; // Column elimination tree
|
||||||
|
|
||||||
static LU_GlobalLU_t<IndexVector, ScalarVector> m_glu; // persistent data to facilitate multiple factors
|
LU_GlobalLU_t<IndexVector, ScalarVector> m_glu; // persistent data to facilitate multiple factors
|
||||||
// FIXME All fields of this struct can be defined separately as class members
|
// FIXME All fields of this struct can be defined separately as class members
|
||||||
|
|
||||||
// SuperLU/SparseLU options
|
// SuperLU/SparseLU options
|
||||||
@ -332,9 +346,11 @@ void SparseLU<MatrixType, OrderingType>::analyzePattern(const MatrixType& mat)
|
|||||||
m_etree = iwork;
|
m_etree = iwork;
|
||||||
|
|
||||||
// Postmultiply A*Pc by post, i.e reorder the matrix according to the postorder of the etree
|
// Postmultiply A*Pc by post, i.e reorder the matrix according to the postorder of the etree
|
||||||
PermutationType post_perm(post);
|
|
||||||
//m_mat = m_mat * post_perm; // FIXME This should surely be in factorize()
|
|
||||||
|
|
||||||
|
PermutationType post_perm(m);;
|
||||||
|
for (int i = 0; i < m; i++)
|
||||||
|
post_perm.indices()(i) = post(i);
|
||||||
|
//m_mat = m_mat * post_perm; // FIXME This should surely be in factorize()
|
||||||
// Composition of the two permutations
|
// Composition of the two permutations
|
||||||
m_perm_c = m_perm_c * post_perm;
|
m_perm_c = m_perm_c * post_perm;
|
||||||
} // end postordering
|
} // end postordering
|
||||||
@ -357,6 +373,7 @@ void SparseLU<MatrixType, OrderingType>::analyzePattern(const MatrixType& mat)
|
|||||||
#include "SparseLU_pruneL.h"
|
#include "SparseLU_pruneL.h"
|
||||||
#include "SparseLU_Utils.h"
|
#include "SparseLU_Utils.h"
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* - Numerical factorization
|
* - Numerical factorization
|
||||||
* - Interleaved with the symbolic factorization
|
* - Interleaved with the symbolic factorization
|
||||||
@ -380,9 +397,8 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
|
|||||||
eigen_assert(m_analysisIsOk && "analyzePattern() should be called first");
|
eigen_assert(m_analysisIsOk && "analyzePattern() should be called first");
|
||||||
eigen_assert((matrix.rows() == matrix.cols()) && "Only for squared matrices");
|
eigen_assert((matrix.rows() == matrix.cols()) && "Only for squared matrices");
|
||||||
|
|
||||||
|
typedef typename IndexVector::Scalar Index;
|
||||||
|
|
||||||
ScalarVector work; // Scalar work vector
|
|
||||||
IndexVector iwork; //Index work vector
|
|
||||||
|
|
||||||
// Apply the column permutation computed in analyzepattern()
|
// Apply the column permutation computed in analyzepattern()
|
||||||
m_mat = matrix * m_perm_c;
|
m_mat = matrix * m_perm_c;
|
||||||
@ -394,7 +410,7 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
|
|||||||
int maxpanel = m_panel_size * m;
|
int maxpanel = m_panel_size * m;
|
||||||
// Allocate storage common to the factor routines
|
// Allocate storage common to the factor routines
|
||||||
int lwork = 0;
|
int lwork = 0;
|
||||||
int info = LUMemInit(m, n, nnz, work, iwork, lwork, m_fillfactor, m_panel_size, m_maxsuper, m_rowblk, m_glu);
|
int info = LUMemInit(m, n, nnz, lwork, m_fillfactor, m_panel_size, m_glu);
|
||||||
if (info)
|
if (info)
|
||||||
{
|
{
|
||||||
std::cerr << "UNABLE TO ALLOCATE WORKING MEMORY\n\n" ;
|
std::cerr << "UNABLE TO ALLOCATE WORKING MEMORY\n\n" ;
|
||||||
@ -404,29 +420,37 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
|
|||||||
|
|
||||||
|
|
||||||
// Set up pointers for integer working arrays
|
// Set up pointers for integer working arrays
|
||||||
int idx = 0;
|
// int idx = 0;
|
||||||
VectorBlock<IndexVector> segrep(iwork, idx, m);
|
// VectorBlock<IndexVector> segrep(iwork, idx, m);
|
||||||
idx += m;
|
// idx += m;
|
||||||
VectorBlock<IndexVector> parent(iwork, idx, m);
|
// VectorBlock<IndexVector> parent(iwork, idx, m);
|
||||||
idx += m;
|
// idx += m;
|
||||||
VectorBlock<IndexVector> xplore(iwork, idx, m);
|
// VectorBlock<IndexVector> xplore(iwork, idx, m);
|
||||||
idx += m;
|
// idx += m;
|
||||||
VectorBlock<IndexVector> repfnz(iwork, idx, maxpanel);
|
// VectorBlock<IndexVector> repfnz(iwork, idx, maxpanel);
|
||||||
idx += maxpanel;
|
// idx += maxpanel;
|
||||||
VectorBlock<IndexVector> panel_lsub(iwork, idx, maxpanel);
|
// VectorBlock<IndexVector> panel_lsub(iwork, idx, maxpanel);
|
||||||
idx += maxpanel;
|
// idx += maxpanel;
|
||||||
VectorBlock<IndexVector> xprune(iwork, idx, n);
|
// VectorBlock<IndexVector> xprune(iwork, idx, n);
|
||||||
idx += n;
|
// idx += n;
|
||||||
VectorBlock<IndexVector> marker(iwork, idx, m * LU_NO_MARKER);
|
// VectorBlock<IndexVector> marker(iwork, idx, m * LU_NO_MARKER);
|
||||||
|
// Set up pointers for integer working arrays
|
||||||
|
IndexVector segrep(m);
|
||||||
|
IndexVector parent(m);
|
||||||
|
IndexVector xplore(m);
|
||||||
|
IndexVector repfnz(maxpanel);
|
||||||
|
IndexVector panel_lsub(maxpanel);
|
||||||
|
IndexVector xprune(n);
|
||||||
|
IndexVector marker(m*LU_NO_MARKER);
|
||||||
|
|
||||||
repfnz.setConstant(-1);
|
repfnz.setConstant(-1);
|
||||||
panel_lsub.setConstant(-1);
|
panel_lsub.setConstant(-1);
|
||||||
|
|
||||||
// Set up pointers for scalar working arrays
|
// Set up pointers for scalar working arrays
|
||||||
VectorBlock<ScalarVector> dense(work, 0, maxpanel);
|
ScalarVector dense;
|
||||||
dense.setZero();
|
dense.setZero(maxpanel);
|
||||||
VectorBlock<ScalarVector> tempv(work, maxpanel, LU_NUM_TEMPV(m, m_panel_size, m_maxsuper, m_rowblk) );
|
ScalarVector tempv;
|
||||||
tempv.setZero();
|
tempv.setZero(LU_NUM_TEMPV(m, m_panel_size, m_maxsuper, m_rowblk) );
|
||||||
|
|
||||||
// Setup Permutation vectors
|
// Setup Permutation vectors
|
||||||
// Compute the inverse of perm_c
|
// Compute the inverse of perm_c
|
||||||
@ -434,12 +458,13 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
|
|||||||
|
|
||||||
// Identify initial relaxed snodes
|
// Identify initial relaxed snodes
|
||||||
IndexVector relax_end(n);
|
IndexVector relax_end(n);
|
||||||
if ( m_symmetricmode = true )
|
if ( m_symmetricmode == true )
|
||||||
LU_heap_relax_snode(n, m_etree, m_relax, marker, relax_end);
|
LU_heap_relax_snode<IndexVector>(n, m_etree, m_relax, marker, relax_end);
|
||||||
else
|
else
|
||||||
LU_relax_snode(n, m_etree, m_relax, marker, relax_end);
|
LU_relax_snode<IndexVector>(n, m_etree, m_relax, marker, relax_end);
|
||||||
|
|
||||||
m_perm_r.setConstant(-1);
|
m_perm_r.resize(m);
|
||||||
|
m_perm_r.indices().setConstant(-1); //FIXME
|
||||||
marker.setConstant(-1);
|
marker.setConstant(-1);
|
||||||
|
|
||||||
IndexVector& xsup = m_glu.xsup;
|
IndexVector& xsup = m_glu.xsup;
|
||||||
@ -451,19 +476,19 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
|
|||||||
Index& nzlumax = m_glu.nzlumax;
|
Index& nzlumax = m_glu.nzlumax;
|
||||||
|
|
||||||
supno(0) = IND_EMPTY;
|
supno(0) = IND_EMPTY;
|
||||||
xsup(0) = xlsub(0) = xusub(0) = xlusup(0) = 0;
|
xsup(0) = xlsub(0) = xusub(0) = xlusup(0) = Index(0);
|
||||||
|
|
||||||
// Work on one 'panel' at a time. A panel is one of the following :
|
// Work on one 'panel' at a time. A panel is one of the following :
|
||||||
// (a) a relaxed supernode at the bottom of the etree, or
|
// (a) a relaxed supernode at the bottom of the etree, or
|
||||||
// (b) panel_size contiguous columns, <panel_size> defined by the user
|
// (b) panel_size contiguous columns, <panel_size> defined by the user
|
||||||
register int jcol,kcol;
|
int jcol,kcol;
|
||||||
IndexVector panel_histo(n);
|
IndexVector panel_histo(n);
|
||||||
Index nextu, nextlu, jsupno, fsupc, new_next;
|
Index nextu, nextlu, jsupno, fsupc, new_next;
|
||||||
Index pivrow; // Pivotal row number in the original row matrix
|
Index pivrow; // Pivotal row number in the original row matrix
|
||||||
int nseg1; // Number of segments in U-column above panel row jcol
|
int nseg1; // Number of segments in U-column above panel row jcol
|
||||||
int nseg; // Number of segments in each U-column
|
int nseg; // Number of segments in each U-column
|
||||||
int irep,ir, icol;
|
int irep, icol;
|
||||||
int i, k, jj,j;
|
int i, k, jj;
|
||||||
for (jcol = 0; jcol < n; )
|
for (jcol = 0; jcol < n; )
|
||||||
{
|
{
|
||||||
if (relax_end(jcol) != IND_EMPTY)
|
if (relax_end(jcol) != IND_EMPTY)
|
||||||
@ -472,7 +497,7 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
|
|||||||
|
|
||||||
// Factorize the relaxed supernode(jcol:kcol)
|
// Factorize the relaxed supernode(jcol:kcol)
|
||||||
// First, determine the union of the row structure of the snode
|
// First, determine the union of the row structure of the snode
|
||||||
info = LU_snode_dfs(jcol, kcol, m_mat.innerIndexPtr(), m_mat.outerIndexPtr(), xprune, marker);
|
info = LU_snode_dfs(jcol, kcol, m_mat.innerIndexPtr(), m_mat.outerIndexPtr(), xprune, marker, m_glu);
|
||||||
if ( info )
|
if ( info )
|
||||||
{
|
{
|
||||||
std::cerr << "MEMORY ALLOCATION FAILED IN SNODE_DFS() \n";
|
std::cerr << "MEMORY ALLOCATION FAILED IN SNODE_DFS() \n";
|
||||||
@ -488,7 +513,7 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
|
|||||||
int mem;
|
int mem;
|
||||||
while (new_next > nzlumax )
|
while (new_next > nzlumax )
|
||||||
{
|
{
|
||||||
mem = LUMemXpand<Scalar>(lusup, nzlumax, nextlu, LUSUP, m_glu.num_expansions);
|
mem = LUMemXpand(lusup, nzlumax, nextlu, LUSUP, m_glu.num_expansions);
|
||||||
if (mem)
|
if (mem)
|
||||||
{
|
{
|
||||||
std::cerr << "MEMORY ALLOCATION FAILED FOR L FACTOR \n";
|
std::cerr << "MEMORY ALLOCATION FAILED FOR L FACTOR \n";
|
||||||
@ -502,13 +527,13 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
|
|||||||
xusub(icol+1) = nextu;
|
xusub(icol+1) = nextu;
|
||||||
// Scatter into SPA dense(*)
|
// Scatter into SPA dense(*)
|
||||||
for (typename MatrixType::InnerIterator it(m_mat, icol); it; ++it)
|
for (typename MatrixType::InnerIterator it(m_mat, icol); it; ++it)
|
||||||
dense(it.row()) = it.val();
|
dense(it.row()) = it.value();
|
||||||
|
|
||||||
// Numeric update within the snode
|
// Numeric update within the snode
|
||||||
LU_snode_bmod(icol, jsupno, fsupc, dense, m_glu);
|
LU_snode_bmod(icol, fsupc, dense, m_glu);
|
||||||
|
|
||||||
// Eliminate the current column
|
// Eliminate the current column
|
||||||
info = LU_pivotL(icol, m_diagpivotthresh, m_perm_r, iperm_c, pivrow, m_glu);
|
info = LU_pivotL(icol, m_diagpivotthresh, m_perm_r.indices(), iperm_c.indices(), pivrow, m_glu);
|
||||||
if ( info )
|
if ( info )
|
||||||
{
|
{
|
||||||
m_info = NumericalIssue;
|
m_info = NumericalIssue;
|
||||||
@ -536,13 +561,13 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
|
|||||||
panel_size = n - jcol;
|
panel_size = n - jcol;
|
||||||
|
|
||||||
// Symbolic outer factorization on a panel of columns
|
// Symbolic outer factorization on a panel of columns
|
||||||
LU_panel_dfs(m, panel_size, jcol, m_mat, m_perm_r, nseg1, dense, panel_lsub, segrep, repfnz, xprune, marker, parent, xplore, m_glu);
|
LU_panel_dfs(m, panel_size, jcol, m_mat, m_perm_r.indices(), nseg1, dense, panel_lsub, segrep, repfnz, xprune, marker, parent, xplore, m_glu);
|
||||||
|
|
||||||
// Numeric sup-panel updates in topological order
|
// Numeric sup-panel updates in topological order
|
||||||
LU_panel_bmod(m, panel_size, jcol, nseg1, dense, tempv, segrep, repfnz, m_glu);
|
LU_panel_bmod(m, panel_size, jcol, nseg1, dense, tempv, segrep, repfnz, m_glu);
|
||||||
|
|
||||||
// Sparse LU within the panel, and below the panel diagonal
|
// Sparse LU within the panel, and below the panel diagonal
|
||||||
for ( jj = jcol; j< jcol + panel_size; jj++)
|
for ( jj = jcol; jj< jcol + panel_size; jj++)
|
||||||
{
|
{
|
||||||
k = (jj - jcol) * m; // Column index for w-wide arrays
|
k = (jj - jcol) * m; // Column index for w-wide arrays
|
||||||
|
|
||||||
@ -550,7 +575,7 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
|
|||||||
//Depth-first-search for the current column
|
//Depth-first-search for the current column
|
||||||
VectorBlock<IndexVector> panel_lsubk(panel_lsub, k, m);
|
VectorBlock<IndexVector> panel_lsubk(panel_lsub, k, m);
|
||||||
VectorBlock<IndexVector> repfnz_k(repfnz, k, m);
|
VectorBlock<IndexVector> repfnz_k(repfnz, k, m);
|
||||||
info = LU_column_dfs(m, jj, m_perm_r, m_maxsuper, nseg, panel_lsub(k), segrep, repfnz_k, xprune, marker, parent, xplore, m_glu);
|
info = LU_column_dfs(m, jj, m_perm_r.indices(), m_maxsuper, nseg, panel_lsubk, segrep, repfnz_k, xprune, marker, parent, xplore, m_glu);
|
||||||
if ( !info )
|
if ( !info )
|
||||||
{
|
{
|
||||||
std::cerr << "UNABLE TO EXPAND MEMORY IN COLUMN_DFS() \n";
|
std::cerr << "UNABLE TO EXPAND MEMORY IN COLUMN_DFS() \n";
|
||||||
@ -559,7 +584,7 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
// Numeric updates to this column
|
// Numeric updates to this column
|
||||||
VectorBlock<IndexVector> dense_k(dense, k, m);
|
VectorBlock<ScalarVector> dense_k(dense, k, m);
|
||||||
VectorBlock<IndexVector> segrep_k(segrep, nseg1, m);
|
VectorBlock<IndexVector> segrep_k(segrep, nseg1, m);
|
||||||
info = LU_column_bmod(jj, (nseg - nseg1), dense_k, tempv, segrep_k, repfnz_k, jcol, m_glu);
|
info = LU_column_bmod(jj, (nseg - nseg1), dense_k, tempv, segrep_k, repfnz_k, jcol, m_glu);
|
||||||
if ( info )
|
if ( info )
|
||||||
@ -571,7 +596,7 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Copy the U-segments to ucol(*)
|
// Copy the U-segments to ucol(*)
|
||||||
info = LU_copy_to_col(jj, nseg, segrep, repfnz_k, m_perm_r, dense_k, m_glu);
|
info = LU_copy_to_ucol(jj, nseg, segrep, repfnz_k ,m_perm_r.indices(), dense_k, m_glu);
|
||||||
if ( info )
|
if ( info )
|
||||||
{
|
{
|
||||||
std::cerr << "UNABLE TO EXPAND MEMORY IN COPY_TO_UCOL() \n";
|
std::cerr << "UNABLE TO EXPAND MEMORY IN COPY_TO_UCOL() \n";
|
||||||
@ -581,7 +606,7 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Form the L-segment
|
// Form the L-segment
|
||||||
info = LU_pivotL(jj, m_diagpivotthresh, m_perm_r, iperm_c, pivrow, m_glu);
|
info = LU_pivotL(jj, m_diagpivotthresh, m_perm_r.indices(), iperm_c.indices(), pivrow, m_glu);
|
||||||
if ( info )
|
if ( info )
|
||||||
{
|
{
|
||||||
std::cerr<< "THE MATRIX IS STRUCTURALLY SINGULAR ... ZERO COLUMN AT " << info <<std::endl;
|
std::cerr<< "THE MATRIX IS STRUCTURALLY SINGULAR ... ZERO COLUMN AT " << info <<std::endl;
|
||||||
@ -591,7 +616,7 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Prune columns (0:jj-1) using column jj
|
// Prune columns (0:jj-1) using column jj
|
||||||
LU_pruneL(jj, m_perm_r, pivrow, nseg, segrep, repfnz_k, xprune, m_glu);
|
LU_pruneL(jj, m_perm_r.indices(), pivrow, nseg, segrep, repfnz_k, xprune, m_glu);
|
||||||
|
|
||||||
// Reset repfnz for this column
|
// Reset repfnz for this column
|
||||||
for (i = 0; i < nseg; i++)
|
for (i = 0; i < nseg; i++)
|
||||||
@ -604,23 +629,10 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
|
|||||||
} // end else
|
} // end else
|
||||||
} // end for -- end elimination
|
} // end for -- end elimination
|
||||||
|
|
||||||
// Adjust row permutation in the case of rectangular matrices... Deprecated
|
|
||||||
if (m > n )
|
|
||||||
{
|
|
||||||
k = 0;
|
|
||||||
for (i = 0; i < m; ++i)
|
|
||||||
{
|
|
||||||
if ( m_perm_r(i) == IND_EMPTY )
|
|
||||||
{
|
|
||||||
m_perm_r(i) = n + k;
|
|
||||||
++k;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Count the number of nonzeros in factors
|
// Count the number of nonzeros in factors
|
||||||
LU_countnz(n, xprune, m_nnzL, m_nnzU, m_glu);
|
LU_countnz(n, m_nnzL, m_nnzU, m_glu);
|
||||||
// Apply permutation to the L subscripts
|
// Apply permutation to the L subscripts
|
||||||
LU_fixupL(n, m_perm_r, m_glu);
|
LU_fixupL/*<IndexVector, ScalarVector>*/(n, m_perm_r.indices(), m_glu);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@ -628,8 +640,8 @@ void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
|
|||||||
m_Lstore.setInfos(m, n, m_glu.lusup, m_glu.xlusup, m_glu.lsub, m_glu.xlsub, m_glu.supno, m_glu.xsup);
|
m_Lstore.setInfos(m, n, m_glu.lusup, m_glu.xlusup, m_glu.lsub, m_glu.xlsub, m_glu.supno, m_glu.xsup);
|
||||||
// Create the column major upper sparse matrix U;
|
// Create the column major upper sparse matrix U;
|
||||||
// it is assumed here that MatrixType = SparseMatrix<Scalar,ColumnMajor>
|
// it is assumed here that MatrixType = SparseMatrix<Scalar,ColumnMajor>
|
||||||
new (&m_Ustore) Map<MatrixType > ( m, n, m_nnzU, m_glu.xusub.data(), m_glu.usub.data(), m_glu.ucol.data() );
|
new (&m_Ustore) MappedSparseMatrix<Scalar> ( m, n, m_nnzU, m_glu.xusub.data(), m_glu.usub.data(), m_glu.ucol.data() );
|
||||||
this.m_Ustore = m_Ustore; //FIXME Is it necessary
|
//this.m_Ustore = m_Ustore; //FIXME Is it necessary
|
||||||
|
|
||||||
m_info = Success;
|
m_info = Success;
|
||||||
m_factorizationIsOk = true;
|
m_factorizationIsOk = true;
|
||||||
|
@ -188,7 +188,6 @@ void LU_TreePostorder(int n, IndexVector& parent, IndexVector& post)
|
|||||||
// Depth-first search from dummy root vertex #n
|
// Depth-first search from dummy root vertex #n
|
||||||
postnum = 0;
|
postnum = 0;
|
||||||
LU_nr_etdfs(n, parent, first_kid, next_kid, post, postnum);
|
LU_nr_etdfs(n, parent, first_kid, next_kid, post, postnum);
|
||||||
return post;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif
|
#endif
|
@ -47,13 +47,15 @@ class SuperNodalMatrix
|
|||||||
public:
|
public:
|
||||||
typedef _Scalar Scalar;
|
typedef _Scalar Scalar;
|
||||||
typedef _Index Index;
|
typedef _Index Index;
|
||||||
|
typedef Matrix<Index,Dynamic,1> IndexVector;
|
||||||
|
typedef Matrix<Scalar,Dynamic,1> ScalarVector;
|
||||||
public:
|
public:
|
||||||
SuperNodalMatrix()
|
SuperNodalMatrix()
|
||||||
{
|
{
|
||||||
|
|
||||||
}
|
}
|
||||||
SuperNodalMatrix(Index m, Index n, Scalar *nzval, Index* nzval_colptr, Index* rowind,
|
SuperNodalMatrix(int m, int n, ScalarVector& nzval, IndexVector& nzval_colptr, IndexVector& rowind,
|
||||||
Index* rowind_colptr, Index* col_to_sup, Index* sup_to_col )
|
IndexVector& rowind_colptr, IndexVector& col_to_sup, IndexVector& sup_to_col )
|
||||||
{
|
{
|
||||||
setInfos(m, n, nzval, nzval_colptr, rowind, rowind_colptr, col_to_sup, sup_to_col);
|
setInfos(m, n, nzval, nzval_colptr, rowind, rowind_colptr, col_to_sup, sup_to_col);
|
||||||
}
|
}
|
||||||
@ -68,17 +70,17 @@ class SuperNodalMatrix
|
|||||||
* FIXME This class will be modified such that it can be use in the course
|
* FIXME This class will be modified such that it can be use in the course
|
||||||
* of the factorization.
|
* of the factorization.
|
||||||
*/
|
*/
|
||||||
void setInfos(Index m, Index n, Scalar *nzval, Index* nzval_colptr, Index* rowind,
|
void setInfos(int m, int n, ScalarVector& nzval, IndexVector& nzval_colptr, IndexVector& rowind,
|
||||||
Index* rowind_colptr, Index* col_to_sup, Index* sup_to_col )
|
IndexVector& rowind_colptr, IndexVector& col_to_sup, IndexVector& sup_to_col )
|
||||||
{
|
{
|
||||||
m_row = m;
|
m_row = m;
|
||||||
m_col = n;
|
m_col = n;
|
||||||
m_nzval = nzval;
|
m_nzval = nzval.data();
|
||||||
m_nzval_colptr = nzval_colptr;
|
m_nzval_colptr = nzval_colptr.data();
|
||||||
m_rowind = rowind;
|
m_rowind = rowind.data();
|
||||||
m_rowind_colptr = rowind_colptr;
|
m_rowind_colptr = rowind_colptr.data();
|
||||||
m_col_to_sup = col_to_sup;
|
m_col_to_sup = col_to_sup.data();
|
||||||
m_sup_to_col = sup_to_col;
|
m_sup_to_col = sup_to_col.data();
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -108,6 +110,10 @@ class SuperNodalMatrix
|
|||||||
return m_nzval;
|
return m_nzval;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const Scalar* valuePtr() const
|
||||||
|
{
|
||||||
|
return m_nzval;
|
||||||
|
}
|
||||||
/**
|
/**
|
||||||
* Return the pointers to the beginning of each column in \ref valuePtr()
|
* Return the pointers to the beginning of each column in \ref valuePtr()
|
||||||
*/
|
*/
|
||||||
@ -116,6 +122,11 @@ class SuperNodalMatrix
|
|||||||
return m_nzval_colptr;
|
return m_nzval_colptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const Index* colIndexPtr() const
|
||||||
|
{
|
||||||
|
return m_nzval_colptr;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return the array of compressed row indices of all supernodes
|
* Return the array of compressed row indices of all supernodes
|
||||||
*/
|
*/
|
||||||
@ -123,6 +134,12 @@ class SuperNodalMatrix
|
|||||||
{
|
{
|
||||||
return m_rowind;
|
return m_rowind;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const Index* rowIndex() const
|
||||||
|
{
|
||||||
|
return m_rowind;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return the location in \em rowvaluePtr() which starts each column
|
* Return the location in \em rowvaluePtr() which starts each column
|
||||||
*/
|
*/
|
||||||
@ -130,17 +147,33 @@ class SuperNodalMatrix
|
|||||||
{
|
{
|
||||||
return m_rowind_colptr;
|
return m_rowind_colptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const Index* rowIndexPtr() const
|
||||||
|
{
|
||||||
|
return m_rowind_colptr;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return the array of column-to-supernode mapping
|
* Return the array of column-to-supernode mapping
|
||||||
*/
|
*/
|
||||||
Index colToSup()
|
Index* colToSup()
|
||||||
|
{
|
||||||
|
return m_col_to_sup;
|
||||||
|
}
|
||||||
|
|
||||||
|
const Index* colToSup() const
|
||||||
{
|
{
|
||||||
return m_col_to_sup;
|
return m_col_to_sup;
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
* Return the array of supernode-to-column mapping
|
* Return the array of supernode-to-column mapping
|
||||||
*/
|
*/
|
||||||
Index supToCol()
|
Index* supToCol()
|
||||||
|
{
|
||||||
|
return m_sup_to_col;
|
||||||
|
}
|
||||||
|
|
||||||
|
const Index* supToCol() const
|
||||||
{
|
{
|
||||||
return m_sup_to_col;
|
return m_sup_to_col;
|
||||||
}
|
}
|
||||||
@ -148,7 +181,7 @@ class SuperNodalMatrix
|
|||||||
/**
|
/**
|
||||||
* Return the number of supernodes
|
* Return the number of supernodes
|
||||||
*/
|
*/
|
||||||
int nsuper()
|
int nsuper() const
|
||||||
{
|
{
|
||||||
return m_nsuper;
|
return m_nsuper;
|
||||||
}
|
}
|
||||||
|
@ -61,11 +61,11 @@
|
|||||||
* \param vec Valid pointer to the vector to allocate or expand
|
* \param vec Valid pointer to the vector to allocate or expand
|
||||||
* \param [in,out]length At input, contain the current length of the vector that is to be increased. At output, length of the newly allocated vector
|
* \param [in,out]length At input, contain the current length of the vector that is to be increased. At output, length of the newly allocated vector
|
||||||
* \param [in]len_to_copy Current number of elements in the factors
|
* \param [in]len_to_copy Current number of elements in the factors
|
||||||
* \param keep_prev true: use length and do not expand the vector; false: compute new_len and expand
|
* \param keep_prev 1: use length and do not expand the vector; 0: compute new_len and expand
|
||||||
* \param [in,out]num_expansions Number of times the memory has been expanded
|
* \param [in,out]num_expansions Number of times the memory has been expanded
|
||||||
*/
|
*/
|
||||||
template <typename VectorType >
|
template <typename VectorType >
|
||||||
int expand(VectorType& vec, int& length, int len_to_copy, bool keep_prev, int& num_expansions)
|
int expand(VectorType& vec, int& length, int len_to_copy, int keep_prev, int& num_expansions)
|
||||||
{
|
{
|
||||||
|
|
||||||
float alpha = 1.5; // Ratio of the memory increase
|
float alpha = 1.5; // Ratio of the memory increase
|
||||||
@ -120,18 +120,16 @@ int expand(VectorType& vec, int& length, int len_to_copy, bool keep_prev, int&
|
|||||||
* \param m number of rows of the input matrix
|
* \param m number of rows of the input matrix
|
||||||
* \param n number of columns
|
* \param n number of columns
|
||||||
* \param annz number of initial nonzeros in the matrix
|
* \param annz number of initial nonzeros in the matrix
|
||||||
* \param work scalar working space needed by all factor routines
|
|
||||||
* \param iwork Integer working space
|
|
||||||
* \param lwork if lwork=-1, this routine returns an estimated size of the required memory
|
* \param lwork if lwork=-1, this routine returns an estimated size of the required memory
|
||||||
* \param glu persistent data to facilitate multiple factors : will be deleted later ??
|
* \param glu persistent data to facilitate multiple factors : will be deleted later ??
|
||||||
* \return an estimated size of the required memory if lwork = -1; otherwise, return the size of actually allocated when memory allocation failed
|
* \return an estimated size of the required memory if lwork = -1; otherwise, return the size of actually allocated when memory allocation failed
|
||||||
* NOTE Unlike SuperLU, this routine does not support successive factorization with the same pattern and the row permutation
|
* NOTE Unlike SuperLU, this routine does not support successive factorization with the same pattern and the row permutation
|
||||||
*/
|
*/
|
||||||
template <typename ScalarVector,typename IndexVector>
|
template <typename IndexVector,typename ScalarVector>
|
||||||
int LUMemInit(int m, int n, int annz, ScalarVector& work, IndexVector& iwork, int lwork, int fillratio, int panel_size, int maxsuper, int rowblk, LU_GlobalLU_t<ScalarVector, IndexVector>& glu)
|
int LUMemInit(int m, int n, int annz, int lwork, int fillratio, int panel_size, LU_GlobalLU_t<IndexVector,ScalarVector>& glu)
|
||||||
{
|
{
|
||||||
typedef typename ScalarVector::Scalar Scalar;
|
typedef typename ScalarVector::Scalar Scalar;
|
||||||
typedef typename IndexVector::Index Index;
|
typedef typename IndexVector::Scalar Index;
|
||||||
|
|
||||||
int& num_expansions = glu.num_expansions; //No memory expansions so far
|
int& num_expansions = glu.num_expansions; //No memory expansions so far
|
||||||
num_expansions = 0;
|
num_expansions = 0;
|
||||||
@ -177,17 +175,12 @@ int LUMemInit(int m, int n, int annz, ScalarVector& work, IndexVector& iwork, in
|
|||||||
|
|
||||||
if (nzlumax < annz ) return nzlumax;
|
if (nzlumax < annz ) return nzlumax;
|
||||||
|
|
||||||
expand<ScalarVector>(glu.lsup, nzlumax, 0, 0, num_expansions);
|
expand<ScalarVector>(glu.lusup, nzlumax, 0, 0, num_expansions);
|
||||||
expand<ScalarVector>(glu.ucol, nzumax, 0, 0, num_expansions);
|
expand<ScalarVector>(glu.ucol, nzumax, 0, 0, num_expansions);
|
||||||
expand<IndexVector>(glu.lsub, nzlmax, 0, 0, num_expansions);
|
expand<IndexVector>(glu.lsub, nzlmax, 0, 0, num_expansions);
|
||||||
expand<IndexVector>(glu.usub, nzumax, 0, 1, num_expansions);
|
expand<IndexVector>(glu.usub, nzumax, 0, 1, num_expansions);
|
||||||
}
|
}
|
||||||
|
|
||||||
// LUWorkInit : Now, allocate known working storage
|
|
||||||
int isize = (2 * panel_size + 3 + LU_NO_MARKER) * m + n;
|
|
||||||
int dsize = m * panel_size + LU_NUM_TEMPV(m, panel_size, maxsuper, rowblk);
|
|
||||||
iwork.resize(isize);
|
|
||||||
work.resize(isize);
|
|
||||||
|
|
||||||
++num_expansions;
|
++num_expansions;
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -87,7 +87,7 @@ typedef enum {LUSUP, UCOL, LSUB, USUB, LLVL, ULVL} LU_MemType;
|
|||||||
|
|
||||||
template <typename IndexVector, typename ScalarVector>
|
template <typename IndexVector, typename ScalarVector>
|
||||||
struct LU_GlobalLU_t {
|
struct LU_GlobalLU_t {
|
||||||
typedef typename IndexVector::Index Index;
|
typedef typename IndexVector::Scalar Index;
|
||||||
IndexVector xsup; //First supernode column ... xsup(s) points to the beginning of the s-th supernode
|
IndexVector xsup; //First supernode column ... xsup(s) points to the beginning of the s-th supernode
|
||||||
IndexVector supno; // Supernode number corresponding to this column (column to supernode mapping)
|
IndexVector supno; // Supernode number corresponding to this column (column to supernode mapping)
|
||||||
ScalarVector lusup; // nonzero values of L ordered by columns
|
ScalarVector lusup; // nonzero values of L ordered by columns
|
||||||
|
@ -22,20 +22,21 @@
|
|||||||
// License and a copy of the GNU General Public License along with
|
// License and a copy of the GNU General Public License along with
|
||||||
// Eigen. If not, see <http://www.gnu.org/licenses/>.
|
// Eigen. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
#ifdef EIGEN_SPARSELU_UTILS_H
|
#ifndef EIGEN_SPARSELU_UTILS_H
|
||||||
#define EIGEN_SPARSELU_UTILS_H
|
#define EIGEN_SPARSELU_UTILS_H
|
||||||
|
|
||||||
|
|
||||||
template <typename IndexVector>
|
|
||||||
void SparseLU::LU_countnz(const int n, IndexVector& xprune, int& nnzL, int& nnzU, GlobalLU_t& Glu)
|
|
||||||
{
|
|
||||||
IndexVector& xsup = Glu.xsup;
|
|
||||||
IndexVector& xlsub = Glu.xlsub;
|
|
||||||
nnzL = 0;
|
|
||||||
nnzU = (Glu.xusub)(n);
|
|
||||||
int nsuper = (Glu.supno)(n);
|
|
||||||
int jlen, irep;
|
|
||||||
|
|
||||||
|
template <typename IndexVector, typename ScalarVector>
|
||||||
|
void LU_countnz(const int n, int& nnzL, int& nnzU, LU_GlobalLU_t<IndexVector, ScalarVector>& glu)
|
||||||
|
{
|
||||||
|
IndexVector& xsup = glu.xsup;
|
||||||
|
IndexVector& xlsub = glu.xlsub;
|
||||||
|
nnzL = 0;
|
||||||
|
nnzU = (glu.xusub)(n);
|
||||||
|
int nsuper = (glu.supno)(n);
|
||||||
|
int jlen;
|
||||||
|
int i, j, fsupc;
|
||||||
if (n <= 0 ) return;
|
if (n <= 0 ) return;
|
||||||
// For each supernode
|
// For each supernode
|
||||||
for (i = 0; i <= nsuper; i++)
|
for (i = 0; i <= nsuper; i++)
|
||||||
@ -46,10 +47,9 @@ void SparseLU::LU_countnz(const int n, IndexVector& xprune, int& nnzL, int& nnzU
|
|||||||
for (j = fsupc; j < xsup(i+1); j++)
|
for (j = fsupc; j < xsup(i+1); j++)
|
||||||
{
|
{
|
||||||
nnzL += jlen;
|
nnzL += jlen;
|
||||||
nnzLU += j - fsupc + 1;
|
nnzU += j - fsupc + 1;
|
||||||
jlen--;
|
jlen--;
|
||||||
}
|
}
|
||||||
irep = xsup(i+1) - 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -60,16 +60,16 @@ void SparseLU::LU_countnz(const int n, IndexVector& xprune, int& nnzL, int& nnzU
|
|||||||
* and applies permutation to the remaining subscripts
|
* and applies permutation to the remaining subscripts
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
template <typename IndexVector>
|
template <typename IndexVector, typename ScalarVector>
|
||||||
void SparseLU::LU_fixupL(const int n, const IndexVector& perm_r, GlobalLU_t& Glu)
|
void LU_fixupL(const int n, const IndexVector& perm_r, LU_GlobalLU_t<IndexVector, ScalarVector>& glu)
|
||||||
{
|
{
|
||||||
int nsuper, fsupc, i, j, k, jstart;
|
int fsupc, i, j, k, jstart;
|
||||||
IndexVector& xsup = GLu.xsup;
|
IndexVector& xsup = glu.xsup;
|
||||||
IndexVector& lsub = Glu.lsub;
|
IndexVector& lsub = glu.lsub;
|
||||||
IndexVector& xlsub = Glu.xlsub;
|
IndexVector& xlsub = glu.xlsub;
|
||||||
|
|
||||||
int nextl = 0;
|
int nextl = 0;
|
||||||
int nsuper = (Glu.supno)(n);
|
int nsuper = (glu.supno)(n);
|
||||||
|
|
||||||
// For each supernode
|
// For each supernode
|
||||||
for (i = 0; i <= nsuper; i++)
|
for (i = 0; i <= nsuper; i++)
|
||||||
@ -80,7 +80,7 @@ void SparseLU::LU_fixupL(const int n, const IndexVector& perm_r, GlobalLU_t& Glu
|
|||||||
for (j = jstart; j < xlsub(fsupc + 1); j++)
|
for (j = jstart; j < xlsub(fsupc + 1); j++)
|
||||||
{
|
{
|
||||||
lsub(nextl) = perm_r(lsub(j)); // Now indexed into P*A
|
lsub(nextl) = perm_r(lsub(j)); // Now indexed into P*A
|
||||||
nextl++
|
nextl++;
|
||||||
}
|
}
|
||||||
for (k = fsupc+1; k < xsup(i+1); k++)
|
for (k = fsupc+1; k < xsup(i+1); k++)
|
||||||
xlsub(k) = nextl; // other columns in supernode i
|
xlsub(k) = nextl; // other columns in supernode i
|
||||||
|
@ -60,12 +60,12 @@
|
|||||||
* > 0 - number of bytes allocated when run out of space
|
* > 0 - number of bytes allocated when run out of space
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
template <typename IndexVector, typename ScalarVector>
|
template <typename IndexVector, typename ScalarVector, typename BlockIndexVector, typename BlockScalarVector>
|
||||||
int LU_column_bmod(const int jcol, const int nseg, ScalarVector& dense, ScalarVector& tempv, IndexVector& segrep, IndexVector& repfnz, int fpanelc, LU_GlobalLU_t<IndexVector, ScalarVector>& glu)
|
int LU_column_bmod(const int jcol, const int nseg, BlockScalarVector& dense, ScalarVector& tempv, BlockIndexVector& segrep, BlockIndexVector& repfnz, int fpanelc, LU_GlobalLU_t<IndexVector, ScalarVector>& glu)
|
||||||
{
|
{
|
||||||
typedef typename IndexVector::Index Index;
|
typedef typename IndexVector::Scalar Index;
|
||||||
typedef typename ScalarVector::Scalar Scalar;
|
typedef typename ScalarVector::Scalar Scalar;
|
||||||
int jsupno, k, ksub, krep, krep_ind, ksupno;
|
int jsupno, k, ksub, krep, ksupno;
|
||||||
int lptr, nrow, isub, i, irow, nextlu, new_next, ufirst;
|
int lptr, nrow, isub, i, irow, nextlu, new_next, ufirst;
|
||||||
int fsupc, nsupc, nsupr, luptr, kfnz, no_zeros;
|
int fsupc, nsupc, nsupr, luptr, kfnz, no_zeros;
|
||||||
/* krep = representative of current k-th supernode
|
/* krep = representative of current k-th supernode
|
||||||
@ -115,7 +115,6 @@ int LU_column_bmod(const int jcol, const int nseg, ScalarVector& dense, ScalarVe
|
|||||||
nsupc = krep - fst_col + 1;
|
nsupc = krep - fst_col + 1;
|
||||||
nsupr = xlsub(fsupc+1) - xlsub(fsupc);
|
nsupr = xlsub(fsupc+1) - xlsub(fsupc);
|
||||||
nrow = nsupr - d_fsupc - nsupc;
|
nrow = nsupr - d_fsupc - nsupc;
|
||||||
krep_ind = lptr + nsupc - 1;
|
|
||||||
|
|
||||||
// NOTE Unlike the original implementation in SuperLU, the only feature
|
// NOTE Unlike the original implementation in SuperLU, the only feature
|
||||||
// available here is a sup-col update.
|
// available here is a sup-col update.
|
||||||
@ -213,7 +212,7 @@ int LU_column_bmod(const int jcol, const int nseg, ScalarVector& dense, ScalarVe
|
|||||||
ufirst = xlusup(jcol) + d_fsupc;
|
ufirst = xlusup(jcol) + d_fsupc;
|
||||||
Map<Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > A( &(lusup.data()[luptr]), nsupc, nsupc, OuterStride<>(nsupr) );
|
Map<Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > A( &(lusup.data()[luptr]), nsupc, nsupc, OuterStride<>(nsupr) );
|
||||||
VectorBlock<ScalarVector> u(lusup, ufirst, nsupc);
|
VectorBlock<ScalarVector> u(lusup, ufirst, nsupc);
|
||||||
u = A.template triangularView().solve(u);
|
u = A.template triangularView<Lower>().solve(u);
|
||||||
|
|
||||||
new (&A) Map<Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > ( &(lusup.data()[luptr+nsupc]), nrow, nsupc, OuterStride<>(nsupr) );
|
new (&A) Map<Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > ( &(lusup.data()[luptr+nsupc]), nrow, nsupc, OuterStride<>(nsupr) );
|
||||||
VectorBlock<ScalarVector> l(lusup, ufirst+nsupc, nrow);
|
VectorBlock<ScalarVector> l(lusup, ufirst+nsupc, nrow);
|
||||||
|
@ -72,13 +72,13 @@
|
|||||||
* > 0 number of bytes allocated when run out of space
|
* > 0 number of bytes allocated when run out of space
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
template <typename IndexVector, typename ScalarVector>
|
template <typename IndexVector, typename ScalarVector, typename BlockIndexVector>
|
||||||
int LU_column_dfs(const int m, const int jcol, IndexVector& perm_r, int maxsuper, IndexVector& nseg, IndexVector& lsub_col, IndexVector& segrep, IndexVector& repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, LU_GlobalLU_t<IndexVector, ScalarVector>& glu)
|
int LU_column_dfs(const int m, const int jcol, IndexVector& perm_r, int maxsuper, int& nseg, BlockIndexVector& lsub_col, IndexVector& segrep, BlockIndexVector& repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, LU_GlobalLU_t<IndexVector, ScalarVector>& glu)
|
||||||
{
|
{
|
||||||
typedef typename IndexVector::Index Index;
|
typedef typename IndexVector::Scalar Index;
|
||||||
typedef typename ScalarVector::Scalar Scalar;
|
typedef typename ScalarVector::Scalar Scalar;
|
||||||
|
|
||||||
int jcolp1, jcolm1, jsuper, nsuper, nextl;
|
int jsuper, nsuper, nextl;
|
||||||
int krow; // Row index of the current element
|
int krow; // Row index of the current element
|
||||||
int kperm; // permuted row index
|
int kperm; // permuted row index
|
||||||
int krep; // Supernode reprentative of the current row
|
int krep; // Supernode reprentative of the current row
|
||||||
@ -92,8 +92,10 @@ int LU_column_dfs(const int m, const int jcol, IndexVector& perm_r, int maxsuper
|
|||||||
IndexVector& supno = glu.supno;
|
IndexVector& supno = glu.supno;
|
||||||
IndexVector& lsub = glu.lsub;
|
IndexVector& lsub = glu.lsub;
|
||||||
IndexVector& xlsub = glu.xlsub;
|
IndexVector& xlsub = glu.xlsub;
|
||||||
IndexVector& nzlmax = glu.nzlmax;
|
Index& nzlmax = glu.nzlmax;
|
||||||
|
|
||||||
|
int jcolm1 = jcol - 1;
|
||||||
|
int jcolp1 = jcol + 1;
|
||||||
nsuper = supno(jcol);
|
nsuper = supno(jcol);
|
||||||
jsuper = nsuper;
|
jsuper = nsuper;
|
||||||
nextl = xlsub(jcol);
|
nextl = xlsub(jcol);
|
||||||
|
@ -59,10 +59,10 @@
|
|||||||
* > 0 - number of bytes allocated when run out of space
|
* > 0 - number of bytes allocated when run out of space
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
template < typename IndexVector, typename ScalarVector>
|
template <typename IndexVector, typename ScalarVector, typename SegRepType, typename RepfnzType, typename DenseType>
|
||||||
int LU_copy_to_ucol(const int jcol, const int nseg, IndexVector& segrep, IndexVector& repfnz, IndexVector& perm_r, ScalarVector& dense, LU_GlobalLU_t<IndexVector, ScalarVector>& glu)
|
int LU_copy_to_ucol(const int jcol, const int nseg, SegRepType& segrep, RepfnzType& repfnz ,IndexVector& perm_r, DenseType& dense, LU_GlobalLU_t<IndexVector, ScalarVector>& glu)
|
||||||
{
|
{
|
||||||
typedef typename IndexVector::Index Index;
|
typedef typename IndexVector::Scalar Index;
|
||||||
typedef typename ScalarVector::Scalar Scalar;
|
typedef typename ScalarVector::Scalar Scalar;
|
||||||
Index ksub, krep, ksupno;
|
Index ksub, krep, ksupno;
|
||||||
|
|
||||||
|
@ -59,9 +59,9 @@ void LU_heap_relax_snode (const int n, IndexVector& et, const int relax_columns,
|
|||||||
|
|
||||||
// The etree may not be postordered, but its heap ordered
|
// The etree may not be postordered, but its heap ordered
|
||||||
IndexVector post;
|
IndexVector post;
|
||||||
TreePostorder(n, et, post); // Post order etree
|
LU_TreePostorder(n, et, post); // Post order etree
|
||||||
IndexVector inv_post(n+1);
|
IndexVector inv_post(n+1);
|
||||||
register int i;
|
int i;
|
||||||
for (i = 0; i < n+1; ++i) inv_post(post(i)) = i; // inv_post = post.inverse()???
|
for (i = 0; i < n+1; ++i) inv_post(post(i)) = i; // inv_post = post.inverse()???
|
||||||
|
|
||||||
// Renumber etree in postorder
|
// Renumber etree in postorder
|
||||||
@ -76,7 +76,7 @@ void LU_heap_relax_snode (const int n, IndexVector& et, const int relax_columns,
|
|||||||
|
|
||||||
// compute the number of descendants of each node in the etree
|
// compute the number of descendants of each node in the etree
|
||||||
relax_end.setConstant(IND_EMPTY);
|
relax_end.setConstant(IND_EMPTY);
|
||||||
register int j, parent;
|
int j, parent;
|
||||||
descendants.setZero();
|
descendants.setZero();
|
||||||
for (j = 0; j < n; j++)
|
for (j = 0; j < n; j++)
|
||||||
{
|
{
|
||||||
@ -85,8 +85,8 @@ void LU_heap_relax_snode (const int n, IndexVector& et, const int relax_columns,
|
|||||||
descendants(parent) += descendants(j) + 1;
|
descendants(parent) += descendants(j) + 1;
|
||||||
}
|
}
|
||||||
// Identify the relaxed supernodes by postorder traversal of the etree
|
// Identify the relaxed supernodes by postorder traversal of the etree
|
||||||
register int snode_start; // beginning of a snode
|
int snode_start; // beginning of a snode
|
||||||
register int k;
|
int k;
|
||||||
int nsuper_et_post = 0; // Number of relaxed snodes in postordered etree
|
int nsuper_et_post = 0; // Number of relaxed snodes in postordered etree
|
||||||
int nsuper_et = 0; // Number of relaxed snodes in the original etree
|
int nsuper_et = 0; // Number of relaxed snodes in the original etree
|
||||||
int l;
|
int l;
|
||||||
|
@ -62,8 +62,8 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
template <typename IndexVector, typename ScalarVector>
|
template <typename DenseIndexBlock, typename IndexVector, typename ScalarVector>
|
||||||
void LU_panel_bmod(const int m, const int w, const int jcol, const int nseg, ScalarVector& dense, ScalarVector& tempv, IndexVector& segrep, IndexVector& repfnz, LU_GlobalLU_t<IndexVector,ScalarVector>& glu)
|
void LU_panel_bmod(const int m, const int w, const int jcol, const int nseg, ScalarVector& dense, ScalarVector& tempv, DenseIndexBlock& segrep, DenseIndexBlock& repfnz, LU_GlobalLU_t<IndexVector,ScalarVector>& glu)
|
||||||
{
|
{
|
||||||
typedef typename ScalarVector::Scalar Scalar;
|
typedef typename ScalarVector::Scalar Scalar;
|
||||||
IndexVector& xsup = glu.xsup;
|
IndexVector& xsup = glu.xsup;
|
||||||
@ -75,7 +75,7 @@ void LU_panel_bmod(const int m, const int w, const int jcol, const int nseg, Sca
|
|||||||
|
|
||||||
int i,ksub,jj,nextl_col,irow;
|
int i,ksub,jj,nextl_col,irow;
|
||||||
int fsupc, nsupc, nsupr, nrow;
|
int fsupc, nsupc, nsupr, nrow;
|
||||||
int krep, krep_ind, kfnz;
|
int krep, kfnz;
|
||||||
int lptr; // points to the row subscripts of a supernode
|
int lptr; // points to the row subscripts of a supernode
|
||||||
int luptr; // ...
|
int luptr; // ...
|
||||||
int segsize,no_zeros,isub ;
|
int segsize,no_zeros,isub ;
|
||||||
@ -95,8 +95,6 @@ void LU_panel_bmod(const int m, const int w, const int jcol, const int nseg, Sca
|
|||||||
nsupr = xlsub(fsupc+1) - xlsub(fsupc);
|
nsupr = xlsub(fsupc+1) - xlsub(fsupc);
|
||||||
nrow = nsupr - nsupc;
|
nrow = nsupr - nsupc;
|
||||||
lptr = xlsub(fsupc);
|
lptr = xlsub(fsupc);
|
||||||
krep_ind = lptr + nsupc - 1;
|
|
||||||
|
|
||||||
// NOTE : Unlike the original implementation in SuperLU, the present implementation
|
// NOTE : Unlike the original implementation in SuperLU, the present implementation
|
||||||
// does not include a 2-D block update.
|
// does not include a 2-D block update.
|
||||||
|
|
||||||
@ -104,8 +102,8 @@ void LU_panel_bmod(const int m, const int w, const int jcol, const int nseg, Sca
|
|||||||
for (jj = jcol; jj < jcol + w; jj++)
|
for (jj = jcol; jj < jcol + w; jj++)
|
||||||
{
|
{
|
||||||
nextl_col = (jj-jcol) * m;
|
nextl_col = (jj-jcol) * m;
|
||||||
VectorBlock<IndexVector> repfnz_col(repfnz.segment(nextl_col, m)); // First nonzero column index for each row
|
VectorBlock<IndexVector> repfnz_col(repfnz, nextl_col, m); // First nonzero column index for each row
|
||||||
VectorBlock<IndexVector> dense_col(dense.segment(nextl_col, m)); // Scatter/gather entire matrix column from/to here
|
VectorBlock<ScalarVector> dense_col(dense, nextl_col, m); // Scatter/gather entire matrix column from/to here
|
||||||
|
|
||||||
kfnz = repfnz_col(krep);
|
kfnz = repfnz_col(krep);
|
||||||
if ( kfnz == IND_EMPTY )
|
if ( kfnz == IND_EMPTY )
|
||||||
|
@ -77,8 +77,8 @@
|
|||||||
*
|
*
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
template <typename MatrixType, typename IndexVector, typename ScalarVector>
|
template <typename MatrixType, typename ScalarVector, typename IndexVector>
|
||||||
void LU_panel_dfs(const int m, const int w, const int jcol, MatrixType& A, IndexVector& perm_r, int& nseg, ScalarVector& dense, IndexVector& panel_lsub, IndexVector& segrep, IndexVector& repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, LU_GlobalLU_t<IndexVector, ScalarVector>& glu)
|
void LU_panel_dfs(const int m, const int w, const int jcol, MatrixType& A, IndexVector& perm_r, int& nseg, ScalarVector& dense, IndexVector& panel_lsub, IndexVector& segrep, IndexVector& repfnz, IndexVector& xprune, IndexVector& marker, IndexVector& parent, IndexVector& xplore, LU_GlobalLU_t<IndexVector, ScalarVector>& glu)
|
||||||
{
|
{
|
||||||
|
|
||||||
int jj; // Index through each column in the panel
|
int jj; // Index through each column in the panel
|
||||||
@ -105,14 +105,14 @@ void LU_panel_dfs(const int m, const int w, const int jcol, MatrixType& A, Index
|
|||||||
nextl_col = (jj - jcol) * m;
|
nextl_col = (jj - jcol) * m;
|
||||||
|
|
||||||
VectorBlock<IndexVector> repfnz_col(repfnz, nextl_col, m); // First nonzero location in each row
|
VectorBlock<IndexVector> repfnz_col(repfnz, nextl_col, m); // First nonzero location in each row
|
||||||
VectorBlock<IndexVector> dense_col(dense,nextl_col, m); // Accumulate a column vector here
|
VectorBlock<ScalarVector> dense_col(dense,nextl_col, m); // Accumulate a column vector here
|
||||||
|
|
||||||
|
|
||||||
// For each nnz in A[*, jj] do depth first search
|
// For each nnz in A[*, jj] do depth first search
|
||||||
for (typename MatrixType::InnerIterator it(A, jj); it; ++it)
|
for (typename MatrixType::InnerIterator it(A, jj); it; ++it)
|
||||||
{
|
{
|
||||||
krow = it.row();
|
krow = it.row();
|
||||||
dense_col(krow) = it.val();
|
dense_col(krow) = it.value();
|
||||||
kmark = marker(krow);
|
kmark = marker(krow);
|
||||||
if (kmark == jj)
|
if (kmark == jj)
|
||||||
continue; // krow visited before, go to the next nonzero
|
continue; // krow visited before, go to the next nonzero
|
||||||
@ -126,7 +126,7 @@ void LU_panel_dfs(const int m, const int w, const int jcol, MatrixType& A, Index
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
// krow is in U : if its supernode-representative krep
|
// krow is in U : if its sup²ernode-representative krep
|
||||||
// has been explored, update repfnz(*)
|
// has been explored, update repfnz(*)
|
||||||
krep = xsup(supno(kperm)+1) - 1;
|
krep = xsup(supno(kperm)+1) - 1;
|
||||||
myfnz = repfnz_col(krep);
|
myfnz = repfnz_col(krep);
|
||||||
|
@ -70,7 +70,7 @@
|
|||||||
template <typename IndexVector, typename ScalarVector>
|
template <typename IndexVector, typename ScalarVector>
|
||||||
int LU_pivotL(const int jcol, const typename ScalarVector::RealScalar diagpivotthresh, IndexVector& perm_r, IndexVector& iperm_c, int& pivrow, LU_GlobalLU_t<IndexVector, ScalarVector>& glu)
|
int LU_pivotL(const int jcol, const typename ScalarVector::RealScalar diagpivotthresh, IndexVector& perm_r, IndexVector& iperm_c, int& pivrow, LU_GlobalLU_t<IndexVector, ScalarVector>& glu)
|
||||||
{
|
{
|
||||||
typedef typename IndexVector::Index Index;
|
typedef typename IndexVector::Scalar Index;
|
||||||
typedef typename ScalarVector::Scalar Scalar;
|
typedef typename ScalarVector::Scalar Scalar;
|
||||||
// Initialize pointers
|
// Initialize pointers
|
||||||
IndexVector& lsub = glu.lsub; // Compressed row subscripts of L rectangular supernodes.
|
IndexVector& lsub = glu.lsub; // Compressed row subscripts of L rectangular supernodes.
|
||||||
@ -91,7 +91,6 @@ int LU_pivotL(const int jcol, const typename ScalarVector::RealScalar diagpivott
|
|||||||
Scalar pivmax = 0.0;
|
Scalar pivmax = 0.0;
|
||||||
Index pivptr = nsupc;
|
Index pivptr = nsupc;
|
||||||
Index diag = IND_EMPTY;
|
Index diag = IND_EMPTY;
|
||||||
Index old_pivptr = nsupc;
|
|
||||||
Scalar rtemp;
|
Scalar rtemp;
|
||||||
Index isub, icol, itemp, k;
|
Index isub, icol, itemp, k;
|
||||||
for (isub = nsupc; isub < nsupr; ++isub) {
|
for (isub = nsupc; isub < nsupr; ++isub) {
|
||||||
|
@ -61,10 +61,10 @@
|
|||||||
* \param glu Global LU data
|
* \param glu Global LU data
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
template <typename IndexVector, typename ScalarVector>
|
template <typename IndexVector, typename ScalarVector, typename BlockIndexVector>
|
||||||
void LU_pruneL(const int jcol, const IndexVector& perm_r, const int pivrow, const int nseg, const IndexVector& segrep, IndexVector& repfnz, IndexVector& xprune, LU_GlobalLU_t<IndexVector, ScalarVector>& glu)
|
void LU_pruneL(const int jcol, const IndexVector& perm_r, const int pivrow, const int nseg, const IndexVector& segrep, BlockIndexVector& repfnz, IndexVector& xprune, LU_GlobalLU_t<IndexVector, ScalarVector>& glu)
|
||||||
{
|
{
|
||||||
typedef typename IndexVector::Index Index;
|
typedef typename IndexVector::Scalar Index;
|
||||||
typedef typename ScalarVector::Scalar Scalar;
|
typedef typename ScalarVector::Scalar Scalar;
|
||||||
// Initialize pointers
|
// Initialize pointers
|
||||||
IndexVector& xsup = glu.xsup;
|
IndexVector& xsup = glu.xsup;
|
||||||
@ -78,7 +78,7 @@ void LU_pruneL(const int jcol, const IndexVector& perm_r, const int pivrow, cons
|
|||||||
int jsupno = supno(jcol);
|
int jsupno = supno(jcol);
|
||||||
int i,irep,irep1;
|
int i,irep,irep1;
|
||||||
bool movnum, do_prune = false;
|
bool movnum, do_prune = false;
|
||||||
Index kmin, kmax, ktemp, minloc, maxloc,krow;
|
Index kmin, kmax, minloc, maxloc,krow;
|
||||||
for (i = 0; i < nseg; i++)
|
for (i = 0; i < nseg; i++)
|
||||||
{
|
{
|
||||||
irep = segrep(i);
|
irep = segrep(i);
|
||||||
|
@ -45,8 +45,7 @@
|
|||||||
#ifndef SPARSELU_SNODE_BMOD_H
|
#ifndef SPARSELU_SNODE_BMOD_H
|
||||||
#define SPARSELU_SNODE_BMOD_H
|
#define SPARSELU_SNODE_BMOD_H
|
||||||
template <typename IndexVector, typename ScalarVector>
|
template <typename IndexVector, typename ScalarVector>
|
||||||
int LU_snode_bmod (const int jcol, const int jsupno, const int fsupc,
|
int LU_snode_bmod (const int jcol, const int fsupc, ScalarVector& dense, LU_GlobalLU_t<IndexVector,ScalarVector>& glu)
|
||||||
ScalarVector& dense, LU_GlobalLU_t<IndexVector,ScalarVector>& glu)
|
|
||||||
{
|
{
|
||||||
typedef typename ScalarVector::Scalar Scalar;
|
typedef typename ScalarVector::Scalar Scalar;
|
||||||
IndexVector& lsub = glu.lsub; // Compressed row subscripts of ( rectangular supernodes ??)
|
IndexVector& lsub = glu.lsub; // Compressed row subscripts of ( rectangular supernodes ??)
|
||||||
|
@ -42,7 +42,7 @@
|
|||||||
* granted, provided the above notices are retained, and a notice that
|
* granted, provided the above notices are retained, and a notice that
|
||||||
* the code was modified is included with the above copyright notice.
|
* the code was modified is included with the above copyright notice.
|
||||||
*/
|
*/
|
||||||
#ifdef SPARSELU_SNODE_DFS_H
|
#ifndef SPARSELU_SNODE_DFS_H
|
||||||
#define SPARSELU_SNODE_DFS_H
|
#define SPARSELU_SNODE_DFS_H
|
||||||
/**
|
/**
|
||||||
* \brief Determine the union of the row structures of those columns within the relaxed snode.
|
* \brief Determine the union of the row structures of those columns within the relaxed snode.
|
||||||
@ -58,9 +58,9 @@
|
|||||||
* \return 0 on success, > 0 size of the memory when memory allocation failed
|
* \return 0 on success, > 0 size of the memory when memory allocation failed
|
||||||
*/
|
*/
|
||||||
template <typename IndexVector, typename ScalarVector>
|
template <typename IndexVector, typename ScalarVector>
|
||||||
int LU_snode_dfs(const int jcol, const int kcol, const IndexVector* asub, const IndexVector* colptr, IndexVector& xprune, IndexVector& marker, LU_GlobalLU_t& glu)
|
int LU_snode_dfs(const int jcol, const int kcol, const typename IndexVector::Scalar* asub, const typename IndexVector::Scalar* colptr, IndexVector& xprune, IndexVector& marker, LU_GlobalLU_t<IndexVector, ScalarVector>& glu)
|
||||||
{
|
{
|
||||||
typedef typename IndexVector::Index;
|
typedef typename IndexVector::Scalar Index;
|
||||||
IndexVector& xsup = glu.xsup;
|
IndexVector& xsup = glu.xsup;
|
||||||
IndexVector& supno = glu.supno; // Supernode number corresponding to this column
|
IndexVector& supno = glu.supno; // Supernode number corresponding to this column
|
||||||
IndexVector& lsub = glu.lsub;
|
IndexVector& lsub = glu.lsub;
|
||||||
@ -74,9 +74,9 @@
|
|||||||
for (i = jcol; i <=kcol; i++)
|
for (i = jcol; i <=kcol; i++)
|
||||||
{
|
{
|
||||||
// For each nonzero in A(*,i)
|
// For each nonzero in A(*,i)
|
||||||
for (k = colptr(i); k < colptr(i+1); k++)
|
for (k = colptr[i]; k < colptr[i+1]; k++)
|
||||||
{
|
{
|
||||||
krow = asub(k);
|
krow = asub[k];
|
||||||
kmark = marker(krow);
|
kmark = marker(krow);
|
||||||
if ( kmark != kcol )
|
if ( kmark != kcol )
|
||||||
{
|
{
|
||||||
|
64
bench/spbench/test_sparseLU.cpp
Normal file
64
bench/spbench/test_sparseLU.cpp
Normal file
@ -0,0 +1,64 @@
|
|||||||
|
// Small bench routine for Eigen available in Eigen
|
||||||
|
// (C) Desire NUENTSA WAKAM, INRIA
|
||||||
|
|
||||||
|
#include <iostream>
|
||||||
|
#include <fstream>
|
||||||
|
#include <iomanip>
|
||||||
|
#include <unsupported/Eigen/SparseExtra>
|
||||||
|
#include <Eigen/SparseLU>
|
||||||
|
|
||||||
|
using namespace std;
|
||||||
|
using namespace Eigen;
|
||||||
|
|
||||||
|
int main(int argc, char **args)
|
||||||
|
{
|
||||||
|
SparseMatrix<double, ColMajor> A;
|
||||||
|
typedef SparseMatrix<double, ColMajor>::Index Index;
|
||||||
|
typedef Matrix<double, Dynamic, Dynamic> DenseMatrix;
|
||||||
|
typedef Matrix<double, Dynamic, 1> DenseRhs;
|
||||||
|
VectorXd b, x, tmp;
|
||||||
|
SparseLU<SparseMatrix<double, ColMajor>, AMDOrdering<double, int> > solver;
|
||||||
|
ifstream matrix_file;
|
||||||
|
string line;
|
||||||
|
int n;
|
||||||
|
|
||||||
|
// Set parameters
|
||||||
|
/* Fill the matrix with sparse matrix stored in Matrix-Market coordinate column-oriented format */
|
||||||
|
if (argc < 2) assert(false && "please, give the matrix market file ");
|
||||||
|
loadMarket(A, args[1]);
|
||||||
|
cout << "End charging matrix " << endl;
|
||||||
|
bool iscomplex=false, isvector=false;
|
||||||
|
int sym;
|
||||||
|
getMarketHeader(args[1], sym, iscomplex, isvector);
|
||||||
|
if (iscomplex) { cout<< " Not for complex matrices \n"; return -1; }
|
||||||
|
if (isvector) { cout << "The provided file is not a matrix file\n"; return -1;}
|
||||||
|
if (sym != 0) { // symmetric matrices, only the lower part is stored
|
||||||
|
SparseMatrix<double, ColMajor> temp;
|
||||||
|
temp = A;
|
||||||
|
A = temp.selfadjointView<Lower>();
|
||||||
|
}
|
||||||
|
n = A.cols();
|
||||||
|
/* Fill the right hand side */
|
||||||
|
|
||||||
|
if (argc > 2)
|
||||||
|
loadMarketVector(b, args[2]);
|
||||||
|
else
|
||||||
|
{
|
||||||
|
b.resize(n);
|
||||||
|
tmp.resize(n);
|
||||||
|
// tmp.setRandom();
|
||||||
|
for (int i = 0; i < n; i++) tmp(i) = i;
|
||||||
|
b = A * tmp ;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Compute the factorization */
|
||||||
|
solver.compute(A);
|
||||||
|
|
||||||
|
solver._solve(b, x);
|
||||||
|
/* Check the accuracy */
|
||||||
|
VectorXd tmp2 = b - A*x;
|
||||||
|
double tempNorm = tmp2.norm()/b.norm();
|
||||||
|
cout << "Relative norm of the computed solution : " << tempNorm <<"\n";
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
Loading…
x
Reference in New Issue
Block a user