mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-04-24 02:29:33 +08:00
Merged eigen/eigen into default
This commit is contained in:
commit
2a98bd9c8e
@ -84,7 +84,7 @@ cholmod_sparse viewAsCholmod(Ref<SparseMatrix<_Scalar,_Options,_StorageIndex> >
|
|||||||
{
|
{
|
||||||
res.itype = CHOLMOD_INT;
|
res.itype = CHOLMOD_INT;
|
||||||
}
|
}
|
||||||
else if (internal::is_same<_StorageIndex,long>::value)
|
else if (internal::is_same<_StorageIndex,SuiteSparse_long>::value)
|
||||||
{
|
{
|
||||||
res.itype = CHOLMOD_LONG;
|
res.itype = CHOLMOD_LONG;
|
||||||
}
|
}
|
||||||
@ -168,11 +168,11 @@ namespace internal {
|
|||||||
|
|
||||||
#define EIGEN_CHOLMOD_SPECIALIZE0(ret, name) \
|
#define EIGEN_CHOLMOD_SPECIALIZE0(ret, name) \
|
||||||
template<typename _StorageIndex> inline ret cm_ ## name (cholmod_common &Common) { return cholmod_ ## name (&Common); } \
|
template<typename _StorageIndex> inline ret cm_ ## name (cholmod_common &Common) { return cholmod_ ## name (&Common); } \
|
||||||
template<> inline ret cm_ ## name<long> (cholmod_common &Common) { return cholmod_l_ ## name (&Common); }
|
template<> inline ret cm_ ## name<SuiteSparse_long> (cholmod_common &Common) { return cholmod_l_ ## name (&Common); }
|
||||||
|
|
||||||
#define EIGEN_CHOLMOD_SPECIALIZE1(ret, name, t1, a1) \
|
#define EIGEN_CHOLMOD_SPECIALIZE1(ret, name, t1, a1) \
|
||||||
template<typename _StorageIndex> inline ret cm_ ## name (t1& a1, cholmod_common &Common) { return cholmod_ ## name (&a1, &Common); } \
|
template<typename _StorageIndex> inline ret cm_ ## name (t1& a1, cholmod_common &Common) { return cholmod_ ## name (&a1, &Common); } \
|
||||||
template<> inline ret cm_ ## name<long> (t1& a1, cholmod_common &Common) { return cholmod_l_ ## name (&a1, &Common); }
|
template<> inline ret cm_ ## name<SuiteSparse_long> (t1& a1, cholmod_common &Common) { return cholmod_l_ ## name (&a1, &Common); }
|
||||||
|
|
||||||
EIGEN_CHOLMOD_SPECIALIZE0(int, start)
|
EIGEN_CHOLMOD_SPECIALIZE0(int, start)
|
||||||
EIGEN_CHOLMOD_SPECIALIZE0(int, finish)
|
EIGEN_CHOLMOD_SPECIALIZE0(int, finish)
|
||||||
@ -184,15 +184,15 @@ EIGEN_CHOLMOD_SPECIALIZE1(int, free_sparse, cholmod_sparse*, A)
|
|||||||
EIGEN_CHOLMOD_SPECIALIZE1(cholmod_factor*, analyze, cholmod_sparse, A)
|
EIGEN_CHOLMOD_SPECIALIZE1(cholmod_factor*, analyze, cholmod_sparse, A)
|
||||||
|
|
||||||
template<typename _StorageIndex> inline cholmod_dense* cm_solve (int sys, cholmod_factor& L, cholmod_dense& B, cholmod_common &Common) { return cholmod_solve (sys, &L, &B, &Common); }
|
template<typename _StorageIndex> inline cholmod_dense* cm_solve (int sys, cholmod_factor& L, cholmod_dense& B, cholmod_common &Common) { return cholmod_solve (sys, &L, &B, &Common); }
|
||||||
template<> inline cholmod_dense* cm_solve<long> (int sys, cholmod_factor& L, cholmod_dense& B, cholmod_common &Common) { return cholmod_l_solve (sys, &L, &B, &Common); }
|
template<> inline cholmod_dense* cm_solve<SuiteSparse_long> (int sys, cholmod_factor& L, cholmod_dense& B, cholmod_common &Common) { return cholmod_l_solve (sys, &L, &B, &Common); }
|
||||||
|
|
||||||
template<typename _StorageIndex> inline cholmod_sparse* cm_spsolve (int sys, cholmod_factor& L, cholmod_sparse& B, cholmod_common &Common) { return cholmod_spsolve (sys, &L, &B, &Common); }
|
template<typename _StorageIndex> inline cholmod_sparse* cm_spsolve (int sys, cholmod_factor& L, cholmod_sparse& B, cholmod_common &Common) { return cholmod_spsolve (sys, &L, &B, &Common); }
|
||||||
template<> inline cholmod_sparse* cm_spsolve<long> (int sys, cholmod_factor& L, cholmod_sparse& B, cholmod_common &Common) { return cholmod_l_spsolve (sys, &L, &B, &Common); }
|
template<> inline cholmod_sparse* cm_spsolve<SuiteSparse_long> (int sys, cholmod_factor& L, cholmod_sparse& B, cholmod_common &Common) { return cholmod_l_spsolve (sys, &L, &B, &Common); }
|
||||||
|
|
||||||
template<typename _StorageIndex>
|
template<typename _StorageIndex>
|
||||||
inline int cm_factorize_p (cholmod_sparse* A, double beta[2], _StorageIndex* fset, std::size_t fsize, cholmod_factor* L, cholmod_common &Common) { return cholmod_factorize_p (A, beta, fset, fsize, L, &Common); }
|
inline int cm_factorize_p (cholmod_sparse* A, double beta[2], _StorageIndex* fset, std::size_t fsize, cholmod_factor* L, cholmod_common &Common) { return cholmod_factorize_p (A, beta, fset, fsize, L, &Common); }
|
||||||
template<>
|
template<>
|
||||||
inline int cm_factorize_p<long> (cholmod_sparse* A, double beta[2], long* fset, std::size_t fsize, cholmod_factor* L, cholmod_common &Common) { return cholmod_l_factorize_p (A, beta, fset, fsize, L, &Common); }
|
inline int cm_factorize_p<SuiteSparse_long> (cholmod_sparse* A, double beta[2], SuiteSparse_long* fset, std::size_t fsize, cholmod_factor* L, cholmod_common &Common) { return cholmod_l_factorize_p (A, beta, fset, fsize, L, &Common); }
|
||||||
|
|
||||||
#undef EIGEN_CHOLMOD_SPECIALIZE0
|
#undef EIGEN_CHOLMOD_SPECIALIZE0
|
||||||
#undef EIGEN_CHOLMOD_SPECIALIZE1
|
#undef EIGEN_CHOLMOD_SPECIALIZE1
|
||||||
|
@ -405,7 +405,7 @@ template<typename T> struct plain_matrix_type_row_major
|
|||||||
typedef Matrix<typename traits<T>::Scalar,
|
typedef Matrix<typename traits<T>::Scalar,
|
||||||
Rows,
|
Rows,
|
||||||
Cols,
|
Cols,
|
||||||
(MaxCols==1&&MaxRows!=1) ? RowMajor : ColMajor,
|
(MaxCols==1&&MaxRows!=1) ? ColMajor : RowMajor,
|
||||||
MaxRows,
|
MaxRows,
|
||||||
MaxCols
|
MaxCols
|
||||||
> type;
|
> type;
|
||||||
|
@ -73,7 +73,7 @@ struct TensorOpResourceRequirements {
|
|||||||
// expression tree (like reductions) to communicate resources
|
// expression tree (like reductions) to communicate resources
|
||||||
// requirements based on local state (like the total number of reductions
|
// requirements based on local state (like the total number of reductions
|
||||||
// to be computed).
|
// to be computed).
|
||||||
TensorOpResourceRequirements(internal::TensorBlockShapeType shape,
|
TensorOpResourceRequirements(TensorBlockShapeType shape,
|
||||||
const Index size)
|
const Index size)
|
||||||
: block_shape(shape), block_total_size(size) {}
|
: block_shape(shape), block_total_size(size) {}
|
||||||
};
|
};
|
||||||
@ -90,9 +90,9 @@ EIGEN_STRONG_INLINE void MergeResourceRequirements(
|
|||||||
*block_shape = resources[0].block_shape;
|
*block_shape = resources[0].block_shape;
|
||||||
*block_total_size = resources[0].block_total_size;
|
*block_total_size = resources[0].block_total_size;
|
||||||
for (std::vector<TensorOpResourceRequirements>::size_type i = 1; i < resources.size(); ++i) {
|
for (std::vector<TensorOpResourceRequirements>::size_type i = 1; i < resources.size(); ++i) {
|
||||||
if (resources[i].block_shape == TensorBlockShapeType::kSkewedInnerDims &&
|
if (resources[i].block_shape == kSkewedInnerDims &&
|
||||||
*block_shape != TensorBlockShapeType::kSkewedInnerDims) {
|
*block_shape ! kSkewedInnerDims) {
|
||||||
*block_shape = TensorBlockShapeType::kSkewedInnerDims;
|
*block_shape = kSkewedInnerDims;
|
||||||
}
|
}
|
||||||
*block_total_size =
|
*block_total_size =
|
||||||
numext::maxi(*block_total_size, resources[i].block_total_size);
|
numext::maxi(*block_total_size, resources[i].block_total_size);
|
||||||
@ -178,9 +178,9 @@ template <typename Scalar, typename StorageIndex, int NumDims, int Layout,
|
|||||||
bool BlockRead>
|
bool BlockRead>
|
||||||
class TensorBlockIO {
|
class TensorBlockIO {
|
||||||
public:
|
public:
|
||||||
typedef typename internal::TensorBlock<Scalar, StorageIndex, NumDims, Layout>
|
typedef typename TensorBlock<Scalar, StorageIndex, NumDims, Layout>
|
||||||
TensorBlock;
|
TensorBlock;
|
||||||
typedef typename internal::TensorBlockCopyOp<Scalar, StorageIndex>
|
typedef typename TensorBlockCopyOp<Scalar, StorageIndex>
|
||||||
TensorBlockCopyOp;
|
TensorBlockCopyOp;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
@ -320,7 +320,7 @@ template <typename Scalar, typename StorageIndex, int NumDims, int Layout>
|
|||||||
class TensorBlockReader : public TensorBlockIO<Scalar, StorageIndex, NumDims,
|
class TensorBlockReader : public TensorBlockIO<Scalar, StorageIndex, NumDims,
|
||||||
Layout, /*BlockRead=*/true> {
|
Layout, /*BlockRead=*/true> {
|
||||||
public:
|
public:
|
||||||
typedef typename internal::TensorBlock<Scalar, StorageIndex, NumDims, Layout>
|
typedef typename TensorBlock<Scalar, StorageIndex, NumDims, Layout>
|
||||||
TensorBlock;
|
TensorBlock;
|
||||||
typedef TensorBlockIO<Scalar, StorageIndex, NumDims, Layout, /*BlockRead=*/true>
|
typedef TensorBlockIO<Scalar, StorageIndex, NumDims, Layout, /*BlockRead=*/true>
|
||||||
Base;
|
Base;
|
||||||
@ -357,7 +357,7 @@ template <typename Scalar, typename StorageIndex, int NumDims, int Layout>
|
|||||||
class TensorBlockWriter : public TensorBlockIO<Scalar, StorageIndex, NumDims,
|
class TensorBlockWriter : public TensorBlockIO<Scalar, StorageIndex, NumDims,
|
||||||
Layout, /*BlockRead=*/false> {
|
Layout, /*BlockRead=*/false> {
|
||||||
public:
|
public:
|
||||||
typedef typename internal::TensorBlock<Scalar, StorageIndex, NumDims, Layout>
|
typedef typename TensorBlock<Scalar, StorageIndex, NumDims, Layout>
|
||||||
TensorBlock;
|
TensorBlock;
|
||||||
typedef TensorBlockIO<Scalar, StorageIndex, NumDims, Layout, /*BlockRead=*/false>
|
typedef TensorBlockIO<Scalar, StorageIndex, NumDims, Layout, /*BlockRead=*/false>
|
||||||
Base;
|
Base;
|
||||||
@ -434,7 +434,7 @@ struct TensorBlockCwiseBinaryOp {
|
|||||||
template <typename BinaryFunctor, typename StorageIndex, typename OutputScalar,
|
template <typename BinaryFunctor, typename StorageIndex, typename OutputScalar,
|
||||||
int NumDims, int Layout>
|
int NumDims, int Layout>
|
||||||
struct TensorBlockCwiseBinaryIO {
|
struct TensorBlockCwiseBinaryIO {
|
||||||
typedef typename internal::TensorBlock<OutputScalar, StorageIndex, NumDims,
|
typedef typename TensorBlock<OutputScalar, StorageIndex, NumDims,
|
||||||
Layout>::Dimensions Dimensions;
|
Layout>::Dimensions Dimensions;
|
||||||
|
|
||||||
struct BlockIteratorState {
|
struct BlockIteratorState {
|
||||||
@ -627,7 +627,7 @@ struct TensorBlockView {
|
|||||||
template <typename Scalar, typename StorageIndex, int NumDims, int Layout>
|
template <typename Scalar, typename StorageIndex, int NumDims, int Layout>
|
||||||
class TensorBlockMapper {
|
class TensorBlockMapper {
|
||||||
public:
|
public:
|
||||||
typedef typename internal::TensorBlock<Scalar, StorageIndex, NumDims, Layout>
|
typedef typename TensorBlock<Scalar, StorageIndex, NumDims, Layout>
|
||||||
TensorBlock;
|
TensorBlock;
|
||||||
typedef DSizes<StorageIndex, NumDims> Dimensions;
|
typedef DSizes<StorageIndex, NumDims> Dimensions;
|
||||||
|
|
||||||
@ -742,7 +742,7 @@ class TensorBlockMapper {
|
|||||||
block_dim_sizes[i] = 1;
|
block_dim_sizes[i] = 1;
|
||||||
}
|
}
|
||||||
} else if (block_dim_sizes.TotalSize() > min_target_size) {
|
} else if (block_dim_sizes.TotalSize() > min_target_size) {
|
||||||
if (block_shape == TensorBlockShapeType::kUniformAllDims) {
|
if (block_shape == kUniformAllDims) {
|
||||||
// Tensor will not fit within 'min_target_size' budget: calculate tensor
|
// Tensor will not fit within 'min_target_size' budget: calculate tensor
|
||||||
// block dimension sizes based on "square" dimension size target.
|
// block dimension sizes based on "square" dimension size target.
|
||||||
const size_t dim_size_target = static_cast<const size_t>(
|
const size_t dim_size_target = static_cast<const size_t>(
|
||||||
@ -773,7 +773,7 @@ class TensorBlockMapper {
|
|||||||
total_size = total_size_other_dims * block_dim_sizes[dim];
|
total_size = total_size_other_dims * block_dim_sizes[dim];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if (block_shape == TensorBlockShapeType::kSkewedInnerDims) {
|
} else if (block_shape == kSkewedInnerDims) {
|
||||||
StorageIndex coeff_to_allocate = min_target_size;
|
StorageIndex coeff_to_allocate = min_target_size;
|
||||||
for (int i = 0; i < NumDims; ++i) {
|
for (int i = 0; i < NumDims; ++i) {
|
||||||
const int dim = cond<Layout>()(i, NumDims - i - 1);
|
const int dim = cond<Layout>()(i, NumDims - i - 1);
|
||||||
@ -818,7 +818,7 @@ class TensorBlockMapper {
|
|||||||
template <typename Scalar, typename StorageIndex, int NumDims, int Layout>
|
template <typename Scalar, typename StorageIndex, int NumDims, int Layout>
|
||||||
class TensorSliceBlockMapper {
|
class TensorSliceBlockMapper {
|
||||||
public:
|
public:
|
||||||
typedef typename internal::TensorBlock<Scalar, StorageIndex, NumDims, Layout>
|
typedef typename TensorBlock<Scalar, StorageIndex, NumDims, Layout>
|
||||||
TensorBlock;
|
TensorBlock;
|
||||||
typedef DSizes<StorageIndex, NumDims> Dimensions;
|
typedef DSizes<StorageIndex, NumDims> Dimensions;
|
||||||
|
|
||||||
|
@ -155,7 +155,7 @@ struct TensorContractionParams {
|
|||||||
// See expected implementation in NoOpOutputKernel.
|
// See expected implementation in NoOpOutputKernel.
|
||||||
struct OutputKernel {
|
struct OutputKernel {
|
||||||
template <typename Index, typename Scalar>
|
template <typename Index, typename Scalar>
|
||||||
using OutputMapper = internal::blas_data_mapper<Scalar, Index, ColMajor>;
|
typedef internal::blas_data_mapper<Scalar, Index, ColMajor> OutputMapper;
|
||||||
};
|
};
|
||||||
|
|
||||||
// Output kernel that does absolutely nothing.
|
// Output kernel that does absolutely nothing.
|
||||||
|
Loading…
x
Reference in New Issue
Block a user