Add support for Apple's Accelerate sparse matrix solvers

This commit is contained in:
John Mather 2022-03-08 00:09:18 +00:00 committed by Rasmus Munk Larsen
parent 008ff3483a
commit 3a9d404d76
8 changed files with 702 additions and 1 deletions

53
Eigen/AccelerateSupport Normal file
View File

@ -0,0 +1,53 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
#ifndef EIGEN_ACCELERATESUPPORT_MODULE_H
#define EIGEN_ACCELERATESUPPORT_MODULE_H
#include "SparseCore"
#include "src/Core/util/DisableStupidWarnings.h"
/** \ingroup Support_modules
* \defgroup AccelerateSupport_Module AccelerateSupport module
*
* This module provides an interface to the Apple Accelerate library.
* It provides the seven following main factorization classes:
* - class AccelerateLLT: a Cholesky (LL^T) factorization.
* - class AccelerateLDLT: the default LDL^T factorization.
* - class AccelerateLDLTUnpivoted: a Cholesky-like LDL^T factorization with only 1x1 pivots and no pivoting
* - class AccelerateLDLTSBK: an LDL^T factorization with Supernode Bunch-Kaufman and static pivoting
* - class AccelerateLDLTTPP: an LDL^T factorization with full threshold partial pivoting
* - class AccelerateQR: a QR factorization
* - class AccelerateCholeskyAtA: a QR factorization without storing Q (equivalent to A^TA = R^T R)
*
* \code
* #include <Eigen/AccelerateSupport>
* \endcode
*
* In order to use this module, the Accelerate headers must be accessible from
* the include paths, and your binary must be linked to the Accelerate framework.
* The Accelerate library is only available on Apple hardware.
*
* Note that many of the algorithms require additional information about your
* matrices. This can be provided by setting the UpLo template argument when
* defining the factorization class. For example, the following creates an
* LDLT factorization where your matrix is symmetric and uses the lower
* triangle:
*
* \code
* AccelerateLDLT<SparseMatrix<float>, Lower | Symmetric> ldlt;
* \endcode
*
* Failure to do so may result in your application crashing.
*/
#include "src/AccelerateSupport/AccelerateSupport.h"
#include "src/Core/util/ReenableStupidWarnings.h"
#endif // EIGEN_ACCELERATESUPPORT_MODULE_H

View File

@ -27,7 +27,7 @@
* - DiagonalPreconditioner - also called Jacobi preconditioner, work very well on diagonal dominant matrices.
* - IncompleteLUT - incomplete LU factorization with dual thresholding
*
* Such problems can also be solved using the direct sparse decomposition modules: SparseCholesky, CholmodSupport, UmfPackSupport, SuperLUSupport.
* Such problems can also be solved using the direct sparse decomposition modules: SparseCholesky, CholmodSupport, UmfPackSupport, SuperLUSupport, AccelerateSupport.
*
\code
#include <Eigen/IterativeLinearSolvers>

View File

@ -0,0 +1,423 @@
#ifndef EIGEN_ACCELERATESUPPORT_H
#define EIGEN_ACCELERATESUPPORT_H
#include <Accelerate/Accelerate.h>
#include <Eigen/Sparse>
namespace Eigen {
template <typename MatrixType_, int UpLo_, SparseFactorization_t Solver_, bool EnforceSquare_>
class AccelerateImpl;
/** \ingroup AccelerateSupport_Module
* \class AccelerateLLT
* \brief A direct Cholesky (LLT) factorization and solver based on Accelerate
*
* \warning Only single and double precision real scalar types are supported by Accelerate
*
* \tparam MatrixType_ the type of the sparse matrix A, it must be a SparseMatrix<>
* \tparam UpLo_ additional information about the matrix structure. Default is Lower | Symmetric.
*
* \sa \ref TutorialSparseSolverConcept, class AccelerateLLT
*/
template <typename MatrixType, int UpLo = Lower | Symmetric>
using AccelerateLLT = AccelerateImpl<MatrixType, UpLo, SparseFactorizationCholesky, true>;
/** \ingroup AccelerateSupport_Module
* \class AccelerateLDLT
* \brief The default Cholesky (LDLT) factorization and solver based on Accelerate
*
* \warning Only single and double precision real scalar types are supported by Accelerate
*
* \tparam MatrixType_ the type of the sparse matrix A, it must be a SparseMatrix<>
* \tparam UpLo_ additional information about the matrix structure. Default is Lower | Symmetric.
*
* \sa \ref TutorialSparseSolverConcept, class AccelerateLDLT
*/
template <typename MatrixType, int UpLo = Lower | Symmetric>
using AccelerateLDLT = AccelerateImpl<MatrixType, UpLo, SparseFactorizationLDLT, true>;
/** \ingroup AccelerateSupport_Module
* \class AccelerateLDLTUnpivoted
* \brief A direct Cholesky-like LDL^T factorization and solver based on Accelerate with only 1x1 pivots and no pivoting
*
* \warning Only single and double precision real scalar types are supported by Accelerate
*
* \tparam MatrixType_ the type of the sparse matrix A, it must be a SparseMatrix<>
* \tparam UpLo_ additional information about the matrix structure. Default is Lower | Symmetric.
*
* \sa \ref TutorialSparseSolverConcept, class AccelerateLDLTUnpivoted
*/
template <typename MatrixType, int UpLo = Lower | Symmetric>
using AccelerateLDLTUnpivoted = AccelerateImpl<MatrixType, UpLo, SparseFactorizationLDLTUnpivoted, true>;
/** \ingroup AccelerateSupport_Module
* \class AccelerateLDLTSBK
* \brief A direct Cholesky (LDLT) factorization and solver based on Accelerate with Supernode Bunch-Kaufman and static pivoting
*
* \warning Only single and double precision real scalar types are supported by Accelerate
*
* \tparam MatrixType_ the type of the sparse matrix A, it must be a SparseMatrix<>
* \tparam UpLo_ additional information about the matrix structure. Default is Lower | Symmetric.
*
* \sa \ref TutorialSparseSolverConcept, class AccelerateLDLTSBK
*/
template <typename MatrixType, int UpLo = Lower | Symmetric>
using AccelerateLDLTSBK = AccelerateImpl<MatrixType, UpLo, SparseFactorizationLDLTSBK, true>;
/** \ingroup AccelerateSupport_Module
* \class AccelerateLDLTTPP
* \brief A direct Cholesky (LDLT) factorization and solver based on Accelerate with full threshold partial pivoting
*
* \warning Only single and double precision real scalar types are supported by Accelerate
*
* \tparam MatrixType_ the type of the sparse matrix A, it must be a SparseMatrix<>
* \tparam UpLo_ additional information about the matrix structure. Default is Lower | Symmetric.
*
* \sa \ref TutorialSparseSolverConcept, class AccelerateLDLTTPP
*/
template <typename MatrixType, int UpLo = Lower | Symmetric>
using AccelerateLDLTTPP = AccelerateImpl<MatrixType, UpLo, SparseFactorizationLDLTTPP, true>;
/** \ingroup AccelerateSupport_Module
* \class AccelerateQR
* \brief A QR factorization and solver based on Accelerate
*
* \warning Only single and double precision real scalar types are supported by Accelerate
*
* \tparam MatrixType_ the type of the sparse matrix A, it must be a SparseMatrix<>
* \tparam UpLo_ additional information about the matrix structure. Default is 0.
*
* \sa \ref TutorialSparseSolverConcept, class AccelerateQR
*/
template <typename MatrixType, int UpLo = 0>
using AccelerateQR = AccelerateImpl<MatrixType, UpLo, SparseFactorizationQR, false>;
/** \ingroup AccelerateSupport_Module
* \class AccelerateCholeskyAtA
* \brief A QR factorization and solver based on Accelerate without storing Q (equivalent to A^TA = R^T R)
*
* \warning Only single and double precision real scalar types are supported by Accelerate
*
* \tparam MatrixType_ the type of the sparse matrix A, it must be a SparseMatrix<>
* \tparam UpLo_ additional information about the matrix structure. Default is 0.
*
* \sa \ref TutorialSparseSolverConcept, class AccelerateCholeskyAtA
*/
template <typename MatrixType, int UpLo = 0>
using AccelerateCholeskyAtA = AccelerateImpl<MatrixType, UpLo, SparseFactorizationCholeskyAtA, false>;
namespace internal {
template <typename T>
struct AccelFactorizationDeleter {
void operator()(T* sym) {
if (sym) {
SparseCleanup(*sym);
delete sym;
sym = nullptr;
}
}
};
template <typename DenseVecT, typename DenseMatT, typename SparseMatT, typename NumFactT>
struct SparseTypesTraitBase {
typedef DenseVecT AccelDenseVector;
typedef DenseMatT AccelDenseMatrix;
typedef SparseMatT AccelSparseMatrix;
typedef SparseOpaqueSymbolicFactorization SymbolicFactorization;
typedef NumFactT NumericFactorization;
typedef AccelFactorizationDeleter<SymbolicFactorization> SymbolicFactorizationDeleter;
typedef AccelFactorizationDeleter<NumericFactorization> NumericFactorizationDeleter;
};
template <typename Scalar>
struct SparseTypesTrait {};
template <>
struct SparseTypesTrait<double> : SparseTypesTraitBase<DenseVector_Double, DenseMatrix_Double, SparseMatrix_Double,
SparseOpaqueFactorization_Double> {};
template <>
struct SparseTypesTrait<float>
: SparseTypesTraitBase<DenseVector_Float, DenseMatrix_Float, SparseMatrix_Float, SparseOpaqueFactorization_Float> {
};
} // end namespace internal
template <typename MatrixType_, int UpLo_, SparseFactorization_t Solver_, bool EnforceSquare_>
class AccelerateImpl : public SparseSolverBase<AccelerateImpl<MatrixType_, UpLo_, Solver_, EnforceSquare_> > {
protected:
using Base = SparseSolverBase<AccelerateImpl>;
using Base::derived;
using Base::m_isInitialized;
public:
using Base::_solve_impl;
typedef MatrixType_ MatrixType;
typedef typename MatrixType::Scalar Scalar;
typedef typename MatrixType::StorageIndex StorageIndex;
enum { ColsAtCompileTime = Dynamic, MaxColsAtCompileTime = Dynamic };
enum { UpLo = UpLo_ };
using AccelDenseVector = typename internal::SparseTypesTrait<Scalar>::AccelDenseVector;
using AccelDenseMatrix = typename internal::SparseTypesTrait<Scalar>::AccelDenseMatrix;
using AccelSparseMatrix = typename internal::SparseTypesTrait<Scalar>::AccelSparseMatrix;
using SymbolicFactorization = typename internal::SparseTypesTrait<Scalar>::SymbolicFactorization;
using NumericFactorization = typename internal::SparseTypesTrait<Scalar>::NumericFactorization;
using SymbolicFactorizationDeleter = typename internal::SparseTypesTrait<Scalar>::SymbolicFactorizationDeleter;
using NumericFactorizationDeleter = typename internal::SparseTypesTrait<Scalar>::NumericFactorizationDeleter;
AccelerateImpl() {
m_isInitialized = false;
auto check_flag_set = [](int value, int flag) { return ((value & flag) == flag); };
if (check_flag_set(UpLo_, Symmetric)) {
m_sparseKind = SparseSymmetric;
m_triType = (UpLo_ & Lower) ? SparseLowerTriangle : SparseUpperTriangle;
} else if (check_flag_set(UpLo_, UnitLower)) {
m_sparseKind = SparseUnitTriangular;
m_triType = SparseLowerTriangle;
} else if (check_flag_set(UpLo_, UnitUpper)) {
m_sparseKind = SparseUnitTriangular;
m_triType = SparseUpperTriangle;
} else if (check_flag_set(UpLo_, StrictlyLower)) {
m_sparseKind = SparseTriangular;
m_triType = SparseLowerTriangle;
} else if (check_flag_set(UpLo_, StrictlyUpper)) {
m_sparseKind = SparseTriangular;
m_triType = SparseUpperTriangle;
} else if (check_flag_set(UpLo_, Lower)) {
m_sparseKind = SparseTriangular;
m_triType = SparseLowerTriangle;
} else if (check_flag_set(UpLo_, Upper)) {
m_sparseKind = SparseTriangular;
m_triType = SparseUpperTriangle;
} else {
m_sparseKind = SparseOrdinary;
m_triType = (UpLo_ & Lower) ? SparseLowerTriangle : SparseUpperTriangle;
}
m_order = SparseOrderDefault;
}
explicit AccelerateImpl(const MatrixType& matrix) : AccelerateImpl() { compute(matrix); }
~AccelerateImpl() {}
inline Index cols() const { return m_nCols; }
inline Index rows() const { return m_nRows; }
ComputationInfo info() const {
eigen_assert(m_isInitialized && "Decomposition is not initialized.");
return m_info;
}
void analyzePattern(const MatrixType& matrix);
void factorize(const MatrixType& matrix);
void compute(const MatrixType& matrix);
template <typename Rhs, typename Dest>
void _solve_impl(const MatrixBase<Rhs>& b, MatrixBase<Dest>& dest) const;
/** Sets the ordering algorithm to use. */
void setOrder(SparseOrder_t order) { m_order = order; }
private:
template <typename T>
void buildAccelSparseMatrix(const SparseMatrix<T>& a, AccelSparseMatrix& A, std::vector<long>& columnStarts) {
const Index nColumnsStarts = a.cols() + 1;
columnStarts.resize(nColumnsStarts);
for (Index i = 0; i < nColumnsStarts; i++) columnStarts[i] = a.outerIndexPtr()[i];
SparseAttributes_t attributes{};
attributes.transpose = false;
attributes.triangle = m_triType;
attributes.kind = m_sparseKind;
SparseMatrixStructure structure{};
structure.attributes = attributes;
structure.rowCount = static_cast<int>(a.rows());
structure.columnCount = static_cast<int>(a.cols());
structure.blockSize = 1;
structure.columnStarts = columnStarts.data();
structure.rowIndices = const_cast<int*>(a.innerIndexPtr());
A.structure = structure;
A.data = const_cast<T*>(a.valuePtr());
}
void doAnalysis(AccelSparseMatrix& A) {
m_numericFactorization.reset(nullptr);
SparseSymbolicFactorOptions opts{};
opts.control = SparseDefaultControl;
opts.orderMethod = m_order;
opts.order = nullptr;
opts.ignoreRowsAndColumns = nullptr;
opts.malloc = malloc;
opts.free = free;
opts.reportError = nullptr;
m_symbolicFactorization.reset(new SymbolicFactorization(SparseFactor(Solver_, A.structure, opts)));
SparseStatus_t status = m_symbolicFactorization->status;
updateInfoStatus(status);
if (status != SparseStatusOK) m_symbolicFactorization.reset(nullptr);
}
void doFactorization(AccelSparseMatrix& A) {
SparseStatus_t status = SparseStatusReleased;
if (m_symbolicFactorization) {
m_numericFactorization.reset(new NumericFactorization(SparseFactor(*m_symbolicFactorization, A)));
status = m_numericFactorization->status;
if (status != SparseStatusOK) m_numericFactorization.reset(nullptr);
}
updateInfoStatus(status);
}
protected:
void updateInfoStatus(SparseStatus_t status) const {
switch (status) {
case SparseStatusOK:
m_info = Success;
break;
case SparseFactorizationFailed:
case SparseMatrixIsSingular:
m_info = NumericalIssue;
break;
case SparseInternalError:
case SparseParameterError:
case SparseStatusReleased:
default:
m_info = InvalidInput;
break;
}
}
mutable ComputationInfo m_info;
Index m_nRows, m_nCols;
std::unique_ptr<SymbolicFactorization, SymbolicFactorizationDeleter> m_symbolicFactorization;
std::unique_ptr<NumericFactorization, NumericFactorizationDeleter> m_numericFactorization;
SparseKind_t m_sparseKind;
SparseTriangle_t m_triType;
SparseOrder_t m_order;
};
/** Computes the symbolic and numeric decomposition of matrix \a a */
template <typename MatrixType_, int UpLo_, SparseFactorization_t Solver_, bool EnforceSquare_>
void AccelerateImpl<MatrixType_, UpLo_, Solver_, EnforceSquare_>::compute(const MatrixType& a) {
if (EnforceSquare_) eigen_assert(a.rows() == a.cols());
m_nRows = a.rows();
m_nCols = a.cols();
AccelSparseMatrix A{};
std::vector<long> columnStarts;
buildAccelSparseMatrix(a, A, columnStarts);
doAnalysis(A);
if (m_symbolicFactorization) doFactorization(A);
m_isInitialized = true;
}
/** Performs a symbolic decomposition on the sparsity pattern of matrix \a a.
*
* This function is particularly useful when solving for several problems having the same structure.
*
* \sa factorize()
*/
template <typename MatrixType_, int UpLo_, SparseFactorization_t Solver_, bool EnforceSquare_>
void AccelerateImpl<MatrixType_, UpLo_, Solver_, EnforceSquare_>::analyzePattern(const MatrixType& a) {
if (EnforceSquare_) eigen_assert(a.rows() == a.cols());
m_nRows = a.rows();
m_nCols = a.cols();
AccelSparseMatrix A{};
std::vector<long> columnStarts;
buildAccelSparseMatrix(a, A, columnStarts);
doAnalysis(A);
m_isInitialized = true;
}
/** Performs a numeric decomposition of matrix \a a.
*
* The given matrix must have the same sparsity pattern as the matrix on which the symbolic decomposition has been performed.
*
* \sa analyzePattern()
*/
template <typename MatrixType_, int UpLo_, SparseFactorization_t Solver_, bool EnforceSquare_>
void AccelerateImpl<MatrixType_, UpLo_, Solver_, EnforceSquare_>::factorize(const MatrixType& a) {
eigen_assert(m_symbolicFactorization && "You must first call analyzePattern()");
eigen_assert(m_nRows == a.rows() && m_nCols == a.cols());
if (EnforceSquare_) eigen_assert(a.rows() == a.cols());
AccelSparseMatrix A{};
std::vector<long> columnStarts;
buildAccelSparseMatrix(a, A, columnStarts);
doFactorization(A);
}
template <typename MatrixType_, int UpLo_, SparseFactorization_t Solver_, bool EnforceSquare_>
template <typename Rhs, typename Dest>
void AccelerateImpl<MatrixType_, UpLo_, Solver_, EnforceSquare_>::_solve_impl(const MatrixBase<Rhs>& b,
MatrixBase<Dest>& x) const {
if (!m_numericFactorization) {
m_info = InvalidInput;
return;
}
eigen_assert(m_nRows == b.rows());
eigen_assert(((b.cols() == 1) || b.outerStride() == b.rows()));
SparseStatus_t status = SparseStatusOK;
Scalar* b_ptr = const_cast<Scalar*>(b.derived().data());
Scalar* x_ptr = const_cast<Scalar*>(x.derived().data());
AccelDenseMatrix xmat{};
xmat.attributes = SparseAttributes_t();
xmat.columnCount = static_cast<int>(x.cols());
xmat.rowCount = static_cast<int>(x.rows());
xmat.columnStride = xmat.rowCount;
xmat.data = x_ptr;
AccelDenseMatrix bmat{};
bmat.attributes = SparseAttributes_t();
bmat.columnCount = static_cast<int>(b.cols());
bmat.rowCount = static_cast<int>(b.rows());
bmat.columnStride = bmat.rowCount;
bmat.data = b_ptr;
SparseSolve(*m_numericFactorization, bmat, xmat);
updateInfoStatus(status);
}
} // end namespace Eigen
#endif // EIGEN_ACCELERATESUPPORT_H

View File

@ -0,0 +1,3 @@
#ifndef EIGEN_ACCELERATESUPPORT_MODULE_H
#error "Please include Eigen/AccelerateSupport instead of including headers inside the src directory directly."
#endif

View File

@ -0,0 +1,28 @@
if (Accelerate_INCLUDES AND Accelerate_LIBRARIES)
set(Accelerate_FIND_QUIETLY TRUE)
endif ()
find_path(Accelerate_INCLUDES
NAMES
Accelerate.h
PATHS $ENV{ACCELERATEDIR}
)
find_library(Accelerate_LIBRARIES Accelerate PATHS $ENV{ACCELERATEDIR})
include(FindPackageHandleStandardArgs)
find_package_handle_standard_args(Accelerate DEFAULT_MSG
Accelerate_INCLUDES Accelerate_LIBRARIES)
if (Accelerate_FOUND)
get_filename_component(Accelerate_PARENTDIR ${Accelerate_INCLUDES} DIRECTORY)
file(GLOB_RECURSE SparseHeader ${Accelerate_PARENTDIR}/Sparse.h)
if ("${SparseHeader}" STREQUAL "")
message(STATUS "Accelerate sparse matrix support was not found. Accelerate has been disabled.")
set(Accelerate_FOUND FALSE)
endif ()
endif ()
mark_as_advanced(Accelerate_INCLUDES Accelerate_LIBRARIES)

View File

@ -75,6 +75,9 @@ They are summarized in the following tables:
<tr><td>PardisoLLT \n PardisoLDLT \n PardisoLU</td><td>\link PardisoSupport_Module PardisoSupport \endlink</td><td>Direct LLt, LDLt, LU factorizations</td><td>SPD \n SPD \n Square</td><td>Fill-in reducing, Leverage fast dense algebra, Multithreading</td>
<td>Requires the <a href="http://eigen.tuxfamily.org/Counter/redirect_to_mkl.php">Intel MKL</a> package, \b Proprietary </td>
<td>optimized for tough problems patterns, see also \link TopicUsingIntelMKL using MKL with Eigen \endlink</td></tr>
<tr><td>AccelerateLLT \n AccelerateLDLT \n AccelerateQR</td><td>\link AccelerateSupport_Module AccelerateSupport \endlink</td><td>Direct LLt, LDLt, QR factorizations</td><td>SPD \n SPD \n Rectangular</td><td>Fill-in reducing, Leverage fast dense algebra, Multithreading</td>
<td>Requires the <a href="https://developer.apple.com/documentation/accelerate">Apple Accelerate</a> package, \b Proprietary </td>
<td></td></tr>
</table>
Here \c SPD means symmetric positive definite.

View File

@ -134,6 +134,17 @@ else()
ei_add_property(EIGEN_MISSING_BACKENDS "SPQR, ")
endif()
find_package(Accelerate)
if(Accelerate_FOUND)
add_definitions("-DEIGEN_ACCELERATE_SUPPORT")
include_directories(${Accelerate_INCLUDES})
set(SPARSE_LIBS ${SPARSE_LIBS} ${Accelerate_LIBRARIES})
set(Accelerate_ALL_LIBS ${Accelerate_LIBRARIES})
ei_add_property(EIGEN_TESTED_BACKENDS "Accelerate, ")
else()
ei_add_property(EIGEN_MISSING_BACKENDS "Accelerate, ")
endif()
option(EIGEN_TEST_NOQT "Disable Qt support in unit tests" OFF)
if(NOT EIGEN_TEST_NOQT)
find_package(Qt4)
@ -344,6 +355,10 @@ if(METIS_FOUND)
ei_add_test(metis_support "" "${METIS_LIBRARIES}")
endif()
if(Accelerate_FOUND)
ei_add_test(accelerate_support "" "${Accelerate_ALL_LIBS}")
endif()
string(TOLOWER "${CMAKE_CXX_COMPILER}" cmake_cxx_compiler_tolower)
if(cmake_cxx_compiler_tolower MATCHES "qcc")
set(CXX_IS_QCC "ON")

176
test/accelerate_support.cpp Normal file
View File

@ -0,0 +1,176 @@
#define EIGEN_NO_DEBUG_SMALL_PRODUCT_BLOCKS
#include "sparse_solver.h"
#if defined(DEBUG)
#undef DEBUG
#endif
#include <Eigen/AccelerateSupport>
template<typename MatrixType,typename DenseMat>
int generate_sparse_rectangular_problem(MatrixType& A, DenseMat& dA, int maxRows = 300, int maxCols = 300)
{
typedef typename MatrixType::Scalar Scalar;
int rows = internal::random<int>(1, maxRows);
int cols = internal::random<int>(1, maxCols);
double density = (std::max)(8.0 / (rows * cols), 0.01);
A.resize(rows,cols);
dA.resize(rows,cols);
initSparse<Scalar>(density, dA, A, ForceNonZeroDiag);
A.makeCompressed();
return rows;
}
template<typename MatrixType,typename DenseMat>
int generate_sparse_square_symmetric_problem(MatrixType& A, DenseMat& dA, int maxSize = 300)
{
typedef typename MatrixType::Scalar Scalar;
int rows = internal::random<int>(1, maxSize);
int cols = rows;
double density = (std::max)(8.0 / (rows * cols), 0.01);
A.resize(rows,cols);
dA.resize(rows,cols);
initSparse<Scalar>(density, dA, A, ForceNonZeroDiag);
dA = dA * dA.transpose();
A = A * A.transpose();
A.makeCompressed();
return rows;
}
template<typename Scalar, typename Solver> void test_accelerate_ldlt()
{
typedef SparseMatrix<Scalar> MatrixType;
typedef Matrix<Scalar,Dynamic,1> DenseVector;
MatrixType A;
Matrix<Scalar,Dynamic,Dynamic> dA;
generate_sparse_square_symmetric_problem(A, dA);
DenseVector b = DenseVector::Random(A.rows());
Solver solver;
solver.compute(A);
if (solver.info() != Success)
{
std::cerr << "sparse LDLT factorization failed\n";
exit(0);
return;
}
DenseVector x = solver.solve(b);
if (solver.info() != Success)
{
std::cerr << "sparse LDLT factorization failed\n";
exit(0);
return;
}
//Compare with a dense solver
DenseVector refX = dA.ldlt().solve(b);
VERIFY((A * x).isApprox(A * refX, test_precision<Scalar>()));
}
template<typename Scalar, typename Solver> void test_accelerate_llt()
{
typedef SparseMatrix<Scalar> MatrixType;
typedef Matrix<Scalar,Dynamic,1> DenseVector;
MatrixType A;
Matrix<Scalar,Dynamic,Dynamic> dA;
generate_sparse_square_symmetric_problem(A, dA);
DenseVector b = DenseVector::Random(A.rows());
Solver solver;
solver.compute(A);
if (solver.info() != Success)
{
std::cerr << "sparse LLT factorization failed\n";
exit(0);
return;
}
DenseVector x = solver.solve(b);
if (solver.info() != Success)
{
std::cerr << "sparse LLT factorization failed\n";
exit(0);
return;
}
//Compare with a dense solver
DenseVector refX = dA.llt().solve(b);
VERIFY((A * x).isApprox(A * refX, test_precision<Scalar>()));
}
template<typename Scalar, typename Solver> void test_accelerate_qr()
{
typedef SparseMatrix<Scalar> MatrixType;
typedef Matrix<Scalar,Dynamic,1> DenseVector;
MatrixType A;
Matrix<Scalar,Dynamic,Dynamic> dA;
generate_sparse_rectangular_problem(A, dA);
DenseVector b = DenseVector::Random(A.rows());
Solver solver;
solver.compute(A);
if (solver.info() != Success)
{
std::cerr << "sparse QR factorization failed\n";
exit(0);
return;
}
DenseVector x = solver.solve(b);
if (solver.info() != Success)
{
std::cerr << "sparse QR factorization failed\n";
exit(0);
return;
}
//Compare with a dense solver
DenseVector refX = dA.colPivHouseholderQr().solve(b);
VERIFY((A * x).isApprox(A * refX, test_precision<Scalar>()));
}
template<typename Scalar>
void run_tests()
{
typedef SparseMatrix<Scalar> MatrixType;
test_accelerate_ldlt<Scalar, AccelerateLDLT<MatrixType, Lower | Symmetric> >();
test_accelerate_ldlt<Scalar, AccelerateLDLTUnpivoted<MatrixType, Lower | Symmetric> >();
test_accelerate_ldlt<Scalar, AccelerateLDLTSBK<MatrixType, Lower | Symmetric> >();
test_accelerate_ldlt<Scalar, AccelerateLDLTTPP<MatrixType, Lower | Symmetric> >();
test_accelerate_ldlt<Scalar, AccelerateLDLT<MatrixType, Upper | Symmetric> >();
test_accelerate_ldlt<Scalar, AccelerateLDLTUnpivoted<MatrixType, Upper | Symmetric> >();
test_accelerate_ldlt<Scalar, AccelerateLDLTSBK<MatrixType, Upper | Symmetric> >();
test_accelerate_ldlt<Scalar, AccelerateLDLTTPP<MatrixType, Upper | Symmetric> >();
test_accelerate_llt<Scalar, AccelerateLLT<MatrixType, Lower | Symmetric> >();
test_accelerate_llt<Scalar, AccelerateLLT<MatrixType, Upper | Symmetric> >();
test_accelerate_qr<Scalar, AccelerateQR<MatrixType, 0> >();
}
EIGEN_DECLARE_TEST(accelerate_support)
{
CALL_SUBTEST_1(run_tests<float>());
CALL_SUBTEST_2(run_tests<double>());
}