implement a more optimistic heuristic to predict the nnz of a saprse*sparse product

This commit is contained in:
Gael Guennebaud 2011-12-16 15:59:44 +01:00
parent 40c0f3af57
commit 732a50d043
2 changed files with 15 additions and 11 deletions

View File

@ -43,12 +43,15 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r
Matrix<Index,Dynamic,1> indices(rows); Matrix<Index,Dynamic,1> indices(rows);
// estimate the number of non zero entries // estimate the number of non zero entries
float ratioLhs = float(lhs.nonZeros())/(float(lhs.rows())*float(lhs.cols())); // given a rhs column containing Y non zeros, we assume that the respective Y columns
float avgNnzPerRhsColumn = float(rhs.nonZeros())/float(cols); // of the lhs differs in average of one non zeros, thus the number of non zeros for
float ratioRes = (std::min)(ratioLhs * avgNnzPerRhsColumn, 1.f); // the product of a rhs column with the lhs is X+Y where X is the average number of non zero
// per column of the lhs.
// Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs)
Index estimated_nnz_prod = lhs.nonZeros() + rhs.nonZeros();
res.setZero(); res.setZero();
res.reserve(Index(ratioRes*rows*cols)); res.reserve(Index(estimated_nnz_prod));
// we compute each column of the result, one after the other // we compute each column of the result, one after the other
for (Index j=0; j<cols; ++j) for (Index j=0; j<cols; ++j)
{ {

View File

@ -47,9 +47,12 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r
AmbiVector<Scalar,Index> tempVector(rows); AmbiVector<Scalar,Index> tempVector(rows);
// estimate the number of non zero entries // estimate the number of non zero entries
float ratioLhs = float(lhs.nonZeros())/(float(lhs.rows())*float(lhs.cols())); // given a rhs column containing Y non zeros, we assume that the respective Y columns
float avgNnzPerRhsColumn = float(rhs.nonZeros())/float(cols); // of the lhs differs in average of one non zeros, thus the number of non zeros for
float ratioRes = (std::min)(ratioLhs * avgNnzPerRhsColumn, 1.f); // the product of a rhs column with the lhs is X+Y where X is the average number of non zero
// per column of the lhs.
// Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs)
Index estimated_nnz_prod = lhs.nonZeros() + rhs.nonZeros();
// mimics a resizeByInnerOuter: // mimics a resizeByInnerOuter:
if(ResultType::IsRowMajor) if(ResultType::IsRowMajor)
@ -57,13 +60,11 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r
else else
res.resize(rows, cols); res.resize(rows, cols);
res.reserve(Index(ratioRes*rows*cols)); res.reserve(estimated_nnz_prod);
for (Index j=0; j<cols; ++j) for (Index j=0; j<cols; ++j)
{ {
// let's do a more accurate determination of the nnz ratio for the current column j of res // let's do a more accurate determination of the nnz ratio for the current column j of res
//float ratioColRes = (std::min)(ratioLhs * rhs.innerNonZeros(j), 1.f); double ratioColRes = (double(rhs.col(j).nonZeros()) + double(lhs.nonZeros())/double(lhs.cols()))/double(lhs.rows());
// FIXME find a nice way to get the number of nonzeros of a sub matrix (here an inner vector)
float ratioColRes = ratioRes;
tempVector.init(ratioColRes); tempVector.init(ratioColRes);
tempVector.setZero(); tempVector.setZero();
for (typename Rhs::InnerIterator rhsIt(rhs, j); rhsIt; ++rhsIt) for (typename Rhs::InnerIterator rhsIt(rhs, j); rhsIt; ++rhsIt)