MIsc. source and comment typos

Found using `codespell` and `grep` from downstream FreeCAD
This commit is contained in:
luz.paz 2018-03-11 10:01:44 -04:00
parent 624df50945
commit e3912f5e63
98 changed files with 122 additions and 122 deletions

View File

@ -360,7 +360,7 @@ inline static const char *SimdInstructionSetsInUse(void) {
namespace Eigen { namespace Eigen {
// we use size_t frequently and we'll never remember to prepend it with std:: everytime just to // we use size_t frequently and we'll never remember to prepend it with std:: every time just to
// ensure QNX/QCC support // ensure QNX/QCC support
using std::size_t; using std::size_t;
// gcc 4.6.0 wants std:: for ptrdiff_t // gcc 4.6.0 wants std:: for ptrdiff_t

View File

@ -247,7 +247,7 @@ template<typename _MatrixType, int _UpLo> class LDLT
/** \brief Reports whether previous computation was successful. /** \brief Reports whether previous computation was successful.
* *
* \returns \c Success if computation was succesful, * \returns \c Success if computation was successful,
* \c NumericalIssue if the factorization failed because of a zero pivot. * \c NumericalIssue if the factorization failed because of a zero pivot.
*/ */
ComputationInfo info() const ComputationInfo info() const

View File

@ -180,7 +180,7 @@ template<typename _MatrixType, int _UpLo> class LLT
/** \brief Reports whether previous computation was successful. /** \brief Reports whether previous computation was successful.
* *
* \returns \c Success if computation was succesful, * \returns \c Success if computation was successful,
* \c NumericalIssue if the matrix.appears not to be positive definite. * \c NumericalIssue if the matrix.appears not to be positive definite.
*/ */
ComputationInfo info() const ComputationInfo info() const

View File

@ -756,7 +756,7 @@ EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_dense_assignment_loop(DstXprType
// AssignmentKind must define a Kind typedef. // AssignmentKind must define a Kind typedef.
template<typename DstShape, typename SrcShape> struct AssignmentKind; template<typename DstShape, typename SrcShape> struct AssignmentKind;
// Assignement kind defined in this file: // Assignment kind defined in this file:
struct Dense2Dense {}; struct Dense2Dense {};
struct EigenBase2EigenBase {}; struct EigenBase2EigenBase {};
@ -899,7 +899,7 @@ struct Assignment<DstXprType, SrcXprType, Functor, EigenBase2EigenBase, Weak>
src.evalTo(dst); src.evalTo(dst);
} }
// NOTE The following two functions are templated to avoid their instanciation if not needed // NOTE The following two functions are templated to avoid their instantiation if not needed
// This is needed because some expressions supports evalTo only and/or have 'void' as scalar type. // This is needed because some expressions supports evalTo only and/or have 'void' as scalar type.
template<typename SrcScalarType> template<typename SrcScalarType>
EIGEN_DEVICE_FUNC EIGEN_DEVICE_FUNC

View File

@ -395,7 +395,7 @@ template<typename Derived> class DenseBase
* Notice that in the case of a plain matrix or vector (not an expression) this function just returns * Notice that in the case of a plain matrix or vector (not an expression) this function just returns
* a const reference, in order to avoid a useless copy. * a const reference, in order to avoid a useless copy.
* *
* \warning Be carefull with eval() and the auto C++ keyword, as detailed in this \link TopicPitfalls_auto_keyword page \endlink. * \warning Be careful with eval() and the auto C++ keyword, as detailed in this \link TopicPitfalls_auto_keyword page \endlink.
*/ */
EIGEN_DEVICE_FUNC EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE EvalReturnType eval() const EIGEN_STRONG_INLINE EvalReturnType eval() const

View File

@ -61,7 +61,7 @@ struct plain_array
#if defined(EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT) #if defined(EIGEN_DISABLE_UNALIGNED_ARRAY_ASSERT)
#define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask) #define EIGEN_MAKE_UNALIGNED_ARRAY_ASSERT(sizemask)
#elif EIGEN_GNUC_AT_LEAST(4,7) #elif EIGEN_GNUC_AT_LEAST(4,7)
// GCC 4.7 is too aggressive in its optimizations and remove the alignement test based on the fact the array is declared to be aligned. // GCC 4.7 is too aggressive in its optimizations and remove the alignment test based on the fact the array is declared to be aligned.
// See this bug report: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=53900 // See this bug report: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=53900
// Hiding the origin of the array pointer behind a function argument seems to do the trick even if the function is inlined: // Hiding the origin of the array pointer behind a function argument seems to do the trick even if the function is inlined:
template<typename PtrType> template<typename PtrType>

View File

@ -749,7 +749,7 @@ inline EIGEN_MATHFUNC_RETVAL(random, Scalar) random()
return EIGEN_MATHFUNC_IMPL(random, Scalar)::run(); return EIGEN_MATHFUNC_IMPL(random, Scalar)::run();
} }
// Implementatin of is* functions // Implementation of is* functions
// std::is* do not work with fast-math and gcc, std::is* are available on MSVC 2013 and newer, as well as in clang. // std::is* do not work with fast-math and gcc, std::is* are available on MSVC 2013 and newer, as well as in clang.
#if (EIGEN_HAS_CXX11_MATH && !(EIGEN_COMP_GNUC_STRICT && __FINITE_MATH_ONLY__)) || (EIGEN_COMP_MSVC>=1800) || (EIGEN_COMP_CLANG) #if (EIGEN_HAS_CXX11_MATH && !(EIGEN_COMP_GNUC_STRICT && __FINITE_MATH_ONLY__)) || (EIGEN_COMP_MSVC>=1800) || (EIGEN_COMP_CLANG)

View File

@ -75,10 +75,10 @@ class NoAlias
* *
* More precisely, noalias() allows to bypass the EvalBeforeAssignBit flag. * More precisely, noalias() allows to bypass the EvalBeforeAssignBit flag.
* Currently, even though several expressions may alias, only product * Currently, even though several expressions may alias, only product
* expressions have this flag. Therefore, noalias() is only usefull when * expressions have this flag. Therefore, noalias() is only useful when
* the source expression contains a matrix product. * the source expression contains a matrix product.
* *
* Here are some examples where noalias is usefull: * Here are some examples where noalias is useful:
* \code * \code
* D.noalias() = A * B; * D.noalias() = A * B;
* D.noalias() += A.transpose() * B; * D.noalias() += A.transpose() * B;

View File

@ -780,7 +780,7 @@ class PlainObjectBase : public internal::dense_xpr_base<Derived>::type
resize(size); resize(size);
} }
// We have a 1x1 matrix/array => the argument is interpreted as the value of the unique coefficient (case where scalar type can be implicitely converted) // We have a 1x1 matrix/array => the argument is interpreted as the value of the unique coefficient (case where scalar type can be implicitly converted)
template<typename T> template<typename T>
EIGEN_DEVICE_FUNC EIGEN_DEVICE_FUNC
EIGEN_STRONG_INLINE void _init1(const Scalar& val0, typename internal::enable_if<Base::SizeAtCompileTime==1 && internal::is_convertible<T, Scalar>::value,T>::type* = 0) EIGEN_STRONG_INLINE void _init1(const Scalar& val0, typename internal::enable_if<Base::SizeAtCompileTime==1 && internal::is_convertible<T, Scalar>::value,T>::type* = 0)

View File

@ -116,7 +116,7 @@ class dense_product_base
: public internal::dense_xpr_base<Product<Lhs,Rhs,Option> >::type : public internal::dense_xpr_base<Product<Lhs,Rhs,Option> >::type
{}; {};
/** Convertion to scalar for inner-products */ /** Conversion to scalar for inner-products */
template<typename Lhs, typename Rhs, int Option> template<typename Lhs, typename Rhs, int Option>
class dense_product_base<Lhs, Rhs, Option, InnerProduct> class dense_product_base<Lhs, Rhs, Option, InnerProduct>
: public internal::dense_xpr_base<Product<Lhs,Rhs,Option> >::type : public internal::dense_xpr_base<Product<Lhs,Rhs,Option> >::type

View File

@ -84,7 +84,7 @@ class TranspositionsBase
} }
// FIXME: do we want such methods ? // FIXME: do we want such methods ?
// might be usefull when the target matrix expression is complex, e.g.: // might be useful when the target matrix expression is complex, e.g.:
// object.matrix().block(..,..,..,..) = trans * object.matrix().block(..,..,..,..); // object.matrix().block(..,..,..,..) = trans * object.matrix().block(..,..,..,..);
/* /*
template<typename MatrixType> template<typename MatrixType>

View File

@ -470,7 +470,7 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularViewImpl<_Mat
* \a Side==OnTheLeft (the default), or the right-inverse-multiply \a other * inverse(\c *this) if * \a Side==OnTheLeft (the default), or the right-inverse-multiply \a other * inverse(\c *this) if
* \a Side==OnTheRight. * \a Side==OnTheRight.
* *
* Note that the template parameter \c Side can be ommitted, in which case \c Side==OnTheLeft * Note that the template parameter \c Side can be omitted, in which case \c Side==OnTheLeft
* *
* The matrix \c *this must be triangular and invertible (i.e., all the coefficients of the * The matrix \c *this must be triangular and invertible (i.e., all the coefficients of the
* diagonal must be non zero). It works as a forward (resp. backward) substitution if \c *this * diagonal must be non zero). It works as a forward (resp. backward) substitution if \c *this
@ -496,7 +496,7 @@ template<typename _MatrixType, unsigned int _Mode> class TriangularViewImpl<_Mat
* \warning The parameter is only marked 'const' to make the C++ compiler accept a temporary expression here. * \warning The parameter is only marked 'const' to make the C++ compiler accept a temporary expression here.
* This function will const_cast it, so constness isn't honored here. * This function will const_cast it, so constness isn't honored here.
* *
* Note that the template parameter \c Side can be ommitted, in which case \c Side==OnTheLeft * Note that the template parameter \c Side can be omitted, in which case \c Side==OnTheLeft
* *
* See TriangularView:solve() for the details. * See TriangularView:solve() for the details.
*/ */

View File

@ -434,7 +434,7 @@ template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from)
return (Packet4i) vec_perm(MSQ, LSQ, mask); // align the data return (Packet4i) vec_perm(MSQ, LSQ, mask); // align the data
} }
#else #else
// We also need ot redefine little endian loading of Packet4i/Packet4f using VSX // We also need to redefine little endian loading of Packet4i/Packet4f using VSX
template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from) template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from)
{ {
EIGEN_DEBUG_UNALIGNED_LOAD EIGEN_DEBUG_UNALIGNED_LOAD
@ -500,7 +500,7 @@ template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& f
vec_st( MSQ, 0, (unsigned char *)to ); // Store the MSQ part vec_st( MSQ, 0, (unsigned char *)to ); // Store the MSQ part
} }
#else #else
// We also need ot redefine little endian loading of Packet4i/Packet4f using VSX // We also need to redefine little endian loading of Packet4i/Packet4f using VSX
template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& from) template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet4i& from)
{ {
EIGEN_DEBUG_ALIGNED_STORE EIGEN_DEBUG_ALIGNED_STORE

View File

@ -242,7 +242,7 @@ Packet2d pexp<Packet2d>(const Packet2d& _x)
return pmax(pmul(x, Packet2d(_mm_castsi128_pd(emm0))), _x); return pmax(pmul(x, Packet2d(_mm_castsi128_pd(emm0))), _x);
} }
/* evaluation of 4 sines at onces, using SSE2 intrinsics. /* evaluation of 4 sines at once, using SSE2 intrinsics.
The code is the exact rewriting of the cephes sinf function. The code is the exact rewriting of the cephes sinf function.
Precision is excellent as long as x < 8192 (I did not bother to Precision is excellent as long as x < 8192 (I did not bother to

View File

@ -1523,7 +1523,7 @@ void gebp_kernel<LhsScalar,RhsScalar,Index,DataMapper,mr,nr,ConjugateLhs,Conjuga
prefetch(&blA[0]); prefetch(&blA[0]);
const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr]; const RhsScalar* blB = &blockB[j2*strideB+offsetB*nr];
// The following piece of code wont work for 512 bit registers // The following piece of code won't work for 512 bit registers
// Moreover, if LhsProgress==8 it assumes that there is a half packet of the same size // Moreover, if LhsProgress==8 it assumes that there is a half packet of the same size
// as nr (which is currently 4) for the return type. // as nr (which is currently 4) for the return type.
typedef typename unpacket_traits<SResPacket>::half SResPacketHalf; typedef typename unpacket_traits<SResPacket>::half SResPacketHalf;
@ -1924,7 +1924,7 @@ EIGEN_DONT_INLINE void gemm_pack_rhs<Scalar, Index, DataMapper, nr, ColMajor, Co
// const Scalar* b6 = &rhs[(j2+6)*rhsStride]; // const Scalar* b6 = &rhs[(j2+6)*rhsStride];
// const Scalar* b7 = &rhs[(j2+7)*rhsStride]; // const Scalar* b7 = &rhs[(j2+7)*rhsStride];
// Index k=0; // Index k=0;
// if(PacketSize==8) // TODO enbale vectorized transposition for PacketSize==4 // if(PacketSize==8) // TODO enable vectorized transposition for PacketSize==4
// { // {
// for(; k<peeled_k; k+=PacketSize) { // for(; k<peeled_k; k+=PacketSize) {
// PacketBlock<Packet> kernel; // PacketBlock<Packet> kernel;

View File

@ -201,7 +201,7 @@ EIGEN_DONT_INLINE void general_matrix_vector_product<Index,LhsScalar,LhsMapper,C
} }
/* Optimized row-major matrix * vector product: /* Optimized row-major matrix * vector product:
* This algorithm processes 4 rows at onces that allows to both reduce * This algorithm processes 4 rows at once that allows to both reduce
* the number of load/stores of the result by a factor 4 and to reduce * the number of load/stores of the result by a factor 4 and to reduce
* the instruction dependency. Moreover, we know that all bands have the * the instruction dependency. Moreover, we know that all bands have the
* same alignment pattern. * same alignment pattern.

View File

@ -117,7 +117,7 @@ void parallelize_gemm(const Functor& func, Index rows, Index cols, Index depth,
// compute the number of threads we are going to use // compute the number of threads we are going to use
Index threads = std::min<Index>(nbThreads(), pb_max_threads); Index threads = std::min<Index>(nbThreads(), pb_max_threads);
// if multi-threading is explicitely disabled, not useful, or if we already are in a parallel session, // if multi-threading is explicitly disabled, not useful, or if we already are in a parallel session,
// then abort multi-threading // then abort multi-threading
// FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp? // FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp?
if((!Condition) || (threads==1) || (omp_get_num_threads()>1)) if((!Condition) || (threads==1) || (omp_get_num_threads()>1))

View File

@ -15,7 +15,7 @@ namespace Eigen {
namespace internal { namespace internal {
/* Optimized selfadjoint matrix * vector product: /* Optimized selfadjoint matrix * vector product:
* This algorithm processes 2 columns at onces that allows to both reduce * This algorithm processes 2 columns at once that allows to both reduce
* the number of load/stores of the result by a factor 2 and to reduce * the number of load/stores of the result by a factor 2 and to reduce
* the instruction dependency. * the instruction dependency.
*/ */

View File

@ -719,7 +719,7 @@ namespace Eigen {
#error EIGEN_MAX_STATIC_ALIGN_BYTES and EIGEN_DONT_ALIGN[_STATICALLY] are both defined with EIGEN_MAX_STATIC_ALIGN_BYTES!=0. Use EIGEN_MAX_STATIC_ALIGN_BYTES=0 as a synonym of EIGEN_DONT_ALIGN_STATICALLY. #error EIGEN_MAX_STATIC_ALIGN_BYTES and EIGEN_DONT_ALIGN[_STATICALLY] are both defined with EIGEN_MAX_STATIC_ALIGN_BYTES!=0. Use EIGEN_MAX_STATIC_ALIGN_BYTES=0 as a synonym of EIGEN_DONT_ALIGN_STATICALLY.
#endif #endif
// EIGEN_DONT_ALIGN_STATICALLY and EIGEN_DONT_ALIGN are deprectated // EIGEN_DONT_ALIGN_STATICALLY and EIGEN_DONT_ALIGN are deprecated
// They imply EIGEN_MAX_STATIC_ALIGN_BYTES=0 // They imply EIGEN_MAX_STATIC_ALIGN_BYTES=0
#if defined(EIGEN_DONT_ALIGN_STATICALLY) || defined(EIGEN_DONT_ALIGN) #if defined(EIGEN_DONT_ALIGN_STATICALLY) || defined(EIGEN_DONT_ALIGN)
#ifdef EIGEN_MAX_STATIC_ALIGN_BYTES #ifdef EIGEN_MAX_STATIC_ALIGN_BYTES
@ -778,7 +778,7 @@ namespace Eigen {
#endif #endif
// At this stage, EIGEN_MAX_STATIC_ALIGN_BYTES>0 is the true test whether we want to align arrays on the stack or not. // At this stage, EIGEN_MAX_STATIC_ALIGN_BYTES>0 is the true test whether we want to align arrays on the stack or not.
// It takes into account both the user choice to explicitly enable/disable alignment (by settting EIGEN_MAX_STATIC_ALIGN_BYTES) // It takes into account both the user choice to explicitly enable/disable alignment (by setting EIGEN_MAX_STATIC_ALIGN_BYTES)
// and the architecture config (EIGEN_ARCH_WANTS_STACK_ALIGNMENT). // and the architecture config (EIGEN_ARCH_WANTS_STACK_ALIGNMENT).
// Henceforth, only EIGEN_MAX_STATIC_ALIGN_BYTES should be used. // Henceforth, only EIGEN_MAX_STATIC_ALIGN_BYTES should be used.

View File

@ -703,7 +703,7 @@ template<typename T> void swap(scoped_array<T> &a,scoped_array<T> &b)
* - 32 bytes alignment if AVX is enabled. * - 32 bytes alignment if AVX is enabled.
* - 64 bytes alignment if AVX512 is enabled. * - 64 bytes alignment if AVX512 is enabled.
* *
* This can be controled using the \c EIGEN_MAX_ALIGN_BYTES macro as documented * This can be controlled using the \c EIGEN_MAX_ALIGN_BYTES macro as documented
* \link TopicPreprocessorDirectivesPerformance there \endlink. * \link TopicPreprocessorDirectivesPerformance there \endlink.
* *
* Example: * Example:

View File

@ -272,7 +272,7 @@ template<> struct numeric_limits<unsigned long long>
#endif #endif
/** \internal /** \internal
* A base class do disable default copy ctor and copy assignement operator. * A base class do disable default copy ctor and copy assignment operator.
*/ */
class noncopyable class noncopyable
{ {

View File

@ -214,7 +214,7 @@ template<typename _MatrixType> class ComplexEigenSolver
/** \brief Reports whether previous computation was successful. /** \brief Reports whether previous computation was successful.
* *
* \returns \c Success if computation was succesful, \c NoConvergence otherwise. * \returns \c Success if computation was successful, \c NoConvergence otherwise.
*/ */
ComputationInfo info() const ComputationInfo info() const
{ {

View File

@ -212,7 +212,7 @@ template<typename _MatrixType> class ComplexSchur
/** \brief Reports whether previous computation was successful. /** \brief Reports whether previous computation was successful.
* *
* \returns \c Success if computation was succesful, \c NoConvergence otherwise. * \returns \c Success if computation was successful, \c NoConvergence otherwise.
*/ */
ComputationInfo info() const ComputationInfo info() const
{ {

View File

@ -277,7 +277,7 @@ template<typename _MatrixType> class EigenSolver
template<typename InputType> template<typename InputType>
EigenSolver& compute(const EigenBase<InputType>& matrix, bool computeEigenvectors = true); EigenSolver& compute(const EigenBase<InputType>& matrix, bool computeEigenvectors = true);
/** \returns NumericalIssue if the input contains INF or NaN values or overflow occured. Returns Success otherwise. */ /** \returns NumericalIssue if the input contains INF or NaN values or overflow occurred. Returns Success otherwise. */
ComputationInfo info() const ComputationInfo info() const
{ {
eigen_assert(m_isInitialized && "EigenSolver is not initialized."); eigen_assert(m_isInitialized && "EigenSolver is not initialized.");

View File

@ -121,7 +121,7 @@ class GeneralizedSelfAdjointEigenSolver : public SelfAdjointEigenSolver<_MatrixT
* *
* \returns Reference to \c *this * \returns Reference to \c *this
* *
* Accoring to \p options, this function computes eigenvalues and (if requested) * According to \p options, this function computes eigenvalues and (if requested)
* the eigenvectors of one of the following three generalized eigenproblems: * the eigenvectors of one of the following three generalized eigenproblems:
* - \c Ax_lBx: \f$ Ax = \lambda B x \f$ * - \c Ax_lBx: \f$ Ax = \lambda B x \f$
* - \c ABx_lx: \f$ ABx = \lambda x \f$ * - \c ABx_lx: \f$ ABx = \lambda x \f$

View File

@ -161,7 +161,7 @@ namespace Eigen {
/** \brief Reports whether previous computation was successful. /** \brief Reports whether previous computation was successful.
* *
* \returns \c Success if computation was succesful, \c NoConvergence otherwise. * \returns \c Success if computation was successful, \c NoConvergence otherwise.
*/ */
ComputationInfo info() const ComputationInfo info() const
{ {

View File

@ -190,7 +190,7 @@ template<typename _MatrixType> class RealSchur
RealSchur& computeFromHessenberg(const HessMatrixType& matrixH, const OrthMatrixType& matrixQ, bool computeU); RealSchur& computeFromHessenberg(const HessMatrixType& matrixH, const OrthMatrixType& matrixQ, bool computeU);
/** \brief Reports whether previous computation was successful. /** \brief Reports whether previous computation was successful.
* *
* \returns \c Success if computation was succesful, \c NoConvergence otherwise. * \returns \c Success if computation was successful, \c NoConvergence otherwise.
*/ */
ComputationInfo info() const ComputationInfo info() const
{ {

View File

@ -337,7 +337,7 @@ template<typename _MatrixType> class SelfAdjointEigenSolver
/** \brief Reports whether previous computation was successful. /** \brief Reports whether previous computation was successful.
* *
* \returns \c Success if computation was succesful, \c NoConvergence otherwise. * \returns \c Success if computation was successful, \c NoConvergence otherwise.
*/ */
EIGEN_DEVICE_FUNC EIGEN_DEVICE_FUNC
ComputationInfo info() const ComputationInfo info() const

View File

@ -128,7 +128,7 @@ public:
/** Concatenates a linear transformation matrix and a uniform scaling /** Concatenates a linear transformation matrix and a uniform scaling
* \relates UniformScaling * \relates UniformScaling
*/ */
// NOTE this operator is defiend in MatrixBase and not as a friend function // NOTE this operator is defined in MatrixBase and not as a friend function
// of UniformScaling to fix an internal crash of Intel's ICC // of UniformScaling to fix an internal crash of Intel's ICC
template<typename Derived,typename Scalar> template<typename Derived,typename Scalar>
EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Derived,Scalar,product) EIGEN_EXPR_BINARYOP_SCALAR_RETURN_TYPE(Derived,Scalar,product)

View File

@ -136,7 +136,7 @@ class IncompleteLUT : public SparseSolverBase<IncompleteLUT<_Scalar, _StorageInd
/** \brief Reports whether previous computation was successful. /** \brief Reports whether previous computation was successful.
* *
* \returns \c Success if computation was succesful, * \returns \c Success if computation was successful,
* \c NumericalIssue if the matrix.appears to be negative. * \c NumericalIssue if the matrix.appears to be negative.
*/ */
ComputationInfo info() const ComputationInfo info() const
@ -230,7 +230,7 @@ void IncompleteLUT<Scalar,StorageIndex>::analyzePattern(const _MatrixType& amat)
SparseMatrix<Scalar,ColMajor, StorageIndex> mat1 = amat; SparseMatrix<Scalar,ColMajor, StorageIndex> mat1 = amat;
SparseMatrix<Scalar,ColMajor, StorageIndex> mat2 = amat.transpose(); SparseMatrix<Scalar,ColMajor, StorageIndex> mat2 = amat.transpose();
// FIXME for a matrix with nearly symmetric pattern, mat2+mat1 is the appropriate choice. // FIXME for a matrix with nearly symmetric pattern, mat2+mat1 is the appropriate choice.
// on the other hand for a really non-symmetric pattern, mat2*mat1 should be prefered... // on the other hand for a really non-symmetric pattern, mat2*mat1 should be preferred...
SparseMatrix<Scalar,ColMajor, StorageIndex> AtA = mat2 + mat1; SparseMatrix<Scalar,ColMajor, StorageIndex> AtA = mat2 + mat1;
AMDOrdering<StorageIndex> ordering; AMDOrdering<StorageIndex> ordering;
ordering(AtA,m_P); ordering(AtA,m_P);

View File

@ -275,7 +275,7 @@ public:
const Preconditioner& preconditioner() const { return m_preconditioner; } const Preconditioner& preconditioner() const { return m_preconditioner; }
/** \returns the max number of iterations. /** \returns the max number of iterations.
* It is either the value setted by setMaxIterations or, by default, * It is either the value set by setMaxIterations or, by default,
* twice the number of columns of the matrix. * twice the number of columns of the matrix.
*/ */
Index maxIterations() const Index maxIterations() const

View File

@ -106,7 +106,7 @@ class KLU : public SparseSolverBase<KLU<_MatrixType> >
/** \brief Reports whether previous computation was successful. /** \brief Reports whether previous computation was successful.
* *
* \returns \c Success if computation was succesful, * \returns \c Success if computation was successful,
* \c NumericalIssue if the matrix.appears to be negative. * \c NumericalIssue if the matrix.appears to be negative.
*/ */
ComputationInfo info() const ComputationInfo info() const

View File

@ -48,7 +48,7 @@ template<typename _MatrixType> struct traits<FullPivLU<_MatrixType> >
* The data of the LU decomposition can be directly accessed through the methods matrixLU(), * The data of the LU decomposition can be directly accessed through the methods matrixLU(),
* permutationP(), permutationQ(). * permutationP(), permutationQ().
* *
* As an exemple, here is how the original matrix can be retrieved: * As an example, here is how the original matrix can be retrieved:
* \include class_FullPivLU.cpp * \include class_FullPivLU.cpp
* Output: \verbinclude class_FullPivLU.out * Output: \verbinclude class_FullPivLU.out
* *

View File

@ -420,8 +420,8 @@ struct partial_lu_impl
* \returns The index of the first pivot which is exactly zero if any, or a negative number otherwise. * \returns The index of the first pivot which is exactly zero if any, or a negative number otherwise.
* *
* \note This very low level interface using pointers, etc. is to: * \note This very low level interface using pointers, etc. is to:
* 1 - reduce the number of instanciations to the strict minimum * 1 - reduce the number of instantiations to the strict minimum
* 2 - avoid infinite recursion of the instanciations with Block<Block<Block<...> > > * 2 - avoid infinite recursion of the instantiations with Block<Block<Block<...> > >
*/ */
static Index blocked_lu(Index rows, Index cols, Scalar* lu_data, Index luStride, PivIndex* row_transpositions, PivIndex& nb_transpositions, Index maxBlockSize=256) static Index blocked_lu(Index rows, Index cols, Scalar* lu_data, Index luStride, PivIndex* row_transpositions, PivIndex& nb_transpositions, Index maxBlockSize=256)
{ {

View File

@ -1493,7 +1493,7 @@ static inline void order_children
c = Col [c].shared1.parent ; c = Col [c].shared1.parent ;
/* continue until we hit an ordered column. There are */ /* continue until we hit an ordered column. There are */
/* guarranteed not to be anymore unordered columns */ /* guaranteed not to be anymore unordered columns */
/* above an ordered column */ /* above an ordered column */
} while (Col [c].shared2.order == COLAMD_EMPTY) ; } while (Col [c].shared2.order == COLAMD_EMPTY) ;
@ -1638,7 +1638,7 @@ static void detect_super_cols
COLAMD_ASSERT (ROW_IS_ALIVE (*cp1)) ; COLAMD_ASSERT (ROW_IS_ALIVE (*cp1)) ;
COLAMD_ASSERT (ROW_IS_ALIVE (*cp2)) ; COLAMD_ASSERT (ROW_IS_ALIVE (*cp2)) ;
/* row indices will same order for both supercols, */ /* row indices will same order for both supercols, */
/* no gather scatter nessasary */ /* no gather scatter necessary */
if (*cp1++ != *cp2++) if (*cp1++ != *cp2++)
{ {
break ; break ;
@ -1688,7 +1688,7 @@ static void detect_super_cols
/* /*
Defragments and compacts columns and rows in the workspace A. Used when Defragments and compacts columns and rows in the workspace A. Used when
all avaliable memory has been used while performing row merging. Returns all available memory has been used while performing row merging. Returns
the index of the first free position in A, after garbage collection. The the index of the first free position in A, after garbage collection. The
time taken by this routine is linear is the size of the array A, which is time taken by this routine is linear is the size of the array A, which is
itself linear in the number of nonzeros in the input matrix. itself linear in the number of nonzeros in the input matrix.

View File

@ -203,7 +203,7 @@ class PastixBase : public SparseSolverBase<Derived>
/** \brief Reports whether previous computation was successful. /** \brief Reports whether previous computation was successful.
* *
* \returns \c Success if computation was succesful, * \returns \c Success if computation was successful,
* \c NumericalIssue if the PaStiX reports a problem * \c NumericalIssue if the PaStiX reports a problem
* \c InvalidInput if the input matrix is invalid * \c InvalidInput if the input matrix is invalid
* *

View File

@ -140,7 +140,7 @@ class PardisoImpl : public SparseSolverBase<Derived>
/** \brief Reports whether previous computation was successful. /** \brief Reports whether previous computation was successful.
* *
* \returns \c Success if computation was succesful, * \returns \c Success if computation was successful,
* \c NumericalIssue if the matrix appears to be negative. * \c NumericalIssue if the matrix appears to be negative.
*/ */
ComputationInfo info() const ComputationInfo info() const

View File

@ -402,7 +402,7 @@ template<typename _MatrixType> class ColPivHouseholderQR
*/ */
RealScalar maxPivot() const { return m_maxpivot; } RealScalar maxPivot() const { return m_maxpivot; }
/** \brief Reports whether the QR factorization was succesful. /** \brief Reports whether the QR factorization was successful.
* *
* \note This function always returns \c Success. It is provided for compatibility * \note This function always returns \c Success. It is provided for compatibility
* with other factorization routines. * with other factorization routines.

View File

@ -353,7 +353,7 @@ class CompleteOrthogonalDecomposition {
inline RealScalar maxPivot() const { return m_cpqr.maxPivot(); } inline RealScalar maxPivot() const { return m_cpqr.maxPivot(); }
/** \brief Reports whether the complete orthogonal decomposition was /** \brief Reports whether the complete orthogonal decomposition was
* succesful. * successful.
* *
* \note This function always returns \c Success. It is provided for * \note This function always returns \c Success. It is provided for
* compatibility * compatibility

View File

@ -220,7 +220,7 @@ class SPQR : public SparseSolverBase<SPQR<_MatrixType> >
/** \brief Reports whether previous computation was successful. /** \brief Reports whether previous computation was successful.
* *
* \returns \c Success if computation was succesful, * \returns \c Success if computation was successful,
* \c NumericalIssue if the sparse QR can not be computed * \c NumericalIssue if the sparse QR can not be computed
*/ */
ComputationInfo info() const ComputationInfo info() const

View File

@ -62,7 +62,7 @@ struct traits<BDCSVD<_MatrixType> >
* recommended and can several order of magnitude faster. * recommended and can several order of magnitude faster.
* *
* \warning this algorithm is unlikely to provide accurate result when compiled with unsafe math optimizations. * \warning this algorithm is unlikely to provide accurate result when compiled with unsafe math optimizations.
* For instance, this concerns Intel's compiler (ICC), which perfroms such optimization by default unless * For instance, this concerns Intel's compiler (ICC), which performs such optimization by default unless
* you compile with the \c -fp-model \c precise option. Likewise, the \c -ffast-math option of GCC or clang will * you compile with the \c -fp-model \c precise option. Likewise, the \c -ffast-math option of GCC or clang will
* significantly degrade the accuracy. * significantly degrade the accuracy.
* *

View File

@ -202,7 +202,7 @@ void upperbidiagonalization_blocked_helper(MatrixType& A,
{ {
SubColumnType y_k( Y.col(k).tail(remainingCols) ); SubColumnType y_k( Y.col(k).tail(remainingCols) );
// let's use the begining of column k of Y as a temporary vector // let's use the beginning of column k of Y as a temporary vector
SubColumnType tmp( Y.col(k).head(k) ); SubColumnType tmp( Y.col(k).head(k) );
y_k.noalias() = A.block(k,k+1, remainingRows,remainingCols).adjoint() * v_k; // bottleneck y_k.noalias() = A.block(k,k+1, remainingRows,remainingCols).adjoint() * v_k; // bottleneck
tmp.noalias() = V_k1.adjoint() * v_k; tmp.noalias() = V_k1.adjoint() * v_k;
@ -231,7 +231,7 @@ void upperbidiagonalization_blocked_helper(MatrixType& A,
{ {
SubColumnType x_k ( X.col(k).tail(remainingRows-1) ); SubColumnType x_k ( X.col(k).tail(remainingRows-1) );
// let's use the begining of column k of X as a temporary vectors // let's use the beginning of column k of X as a temporary vectors
// note that tmp0 and tmp1 overlaps // note that tmp0 and tmp1 overlaps
SubColumnType tmp0 ( X.col(k).head(k) ), SubColumnType tmp0 ( X.col(k).head(k) ),
tmp1 ( X.col(k).head(k+1) ); tmp1 ( X.col(k).head(k+1) );

View File

@ -101,7 +101,7 @@ class SimplicialCholeskyBase : public SparseSolverBase<Derived>
/** \brief Reports whether previous computation was successful. /** \brief Reports whether previous computation was successful.
* *
* \returns \c Success if computation was succesful, * \returns \c Success if computation was successful,
* \c NumericalIssue if the matrix.appears to be negative. * \c NumericalIssue if the matrix.appears to be negative.
*/ */
ComputationInfo info() const ComputationInfo info() const

View File

@ -21,7 +21,7 @@ namespace Eigen {
* This class implements a more versatile variants of the common \em compressed row/column storage format. * This class implements a more versatile variants of the common \em compressed row/column storage format.
* Each colmun's (resp. row) non zeros are stored as a pair of value with associated row (resp. colmiun) index. * Each colmun's (resp. row) non zeros are stored as a pair of value with associated row (resp. colmiun) index.
* All the non zeros are stored in a single large buffer. Unlike the \em compressed format, there might be extra * All the non zeros are stored in a single large buffer. Unlike the \em compressed format, there might be extra
* space inbetween the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero * space in between the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero
* can be done with limited memory reallocation and copies. * can be done with limited memory reallocation and copies.
* *
* A call to the function makeCompressed() turns the matrix into the standard \em compressed format * A call to the function makeCompressed() turns the matrix into the standard \em compressed format
@ -503,7 +503,7 @@ class SparseMatrix
} }
} }
/** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerence \a epsilon */ /** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerance \a epsilon */
void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision()) void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
{ {
prune(default_prunning_func(reference,epsilon)); prune(default_prunning_func(reference,epsilon));
@ -986,7 +986,7 @@ void set_from_triplets(const InputIterator& begin, const InputIterator& end, Spa
* *
* \warning The list of triplets is read multiple times (at least twice). Therefore, it is not recommended to define * \warning The list of triplets is read multiple times (at least twice). Therefore, it is not recommended to define
* an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather * an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather
* be explicitely stored into a std::vector for instance. * be explicitly stored into a std::vector for instance.
*/ */
template<typename Scalar, int _Options, typename _StorageIndex> template<typename Scalar, int _Options, typename _StorageIndex>
template<typename InputIterators> template<typename InputIterators>

View File

@ -17,7 +17,7 @@ namespace Eigen {
* The automatic pruning of the small values can be achieved by calling the pruned() function * The automatic pruning of the small values can be achieved by calling the pruned() function
* in which case a totally different product algorithm is employed: * in which case a totally different product algorithm is employed:
* \code * \code
* C = (A*B).pruned(); // supress numerical zeros (exact) * C = (A*B).pruned(); // suppress numerical zeros (exact)
* C = (A*B).pruned(ref); * C = (A*B).pruned(ref);
* C = (A*B).pruned(ref,epsilon); * C = (A*B).pruned(ref,epsilon);
* \endcode * \endcode

View File

@ -281,7 +281,7 @@ class SparseVector
} }
/** Swaps the values of \c *this and \a other. /** Swaps the values of \c *this and \a other.
* Overloaded for performance: this version performs a \em shallow swap by swaping pointers and attributes only. * Overloaded for performance: this version performs a \em shallow swap by swapping pointers and attributes only.
* \sa SparseMatrixBase::swap() * \sa SparseMatrixBase::swap()
*/ */
inline void swap(SparseVector& other) inline void swap(SparseVector& other)

View File

@ -193,7 +193,7 @@ class SparseLU : public SparseSolverBase<SparseLU<_MatrixType,_OrderingType> >,
/** \brief Reports whether previous computation was successful. /** \brief Reports whether previous computation was successful.
* *
* \returns \c Success if computation was succesful, * \returns \c Success if computation was successful,
* \c NumericalIssue if the LU factorization reports a problem, zero diagonal for instance * \c NumericalIssue if the LU factorization reports a problem, zero diagonal for instance
* \c InvalidInput if the input matrix is invalid * \c InvalidInput if the input matrix is invalid
* *

View File

@ -51,7 +51,7 @@ inline Index LUTempSpace(Index&m, Index& w)
/** /**
* Expand the existing storage to accomodate more fill-ins * Expand the existing storage to accommodate more fill-ins
* \param vec Valid pointer to the vector to allocate or expand * \param vec Valid pointer to the vector to allocate or expand
* \param[in,out] length At input, contain the current length of the vector that is to be increased. At output, length of the newly allocated vector * \param[in,out] length At input, contain the current length of the vector that is to be increased. At output, length of the newly allocated vector
* \param[in] nbElts Current number of elements in the factors * \param[in] nbElts Current number of elements in the factors

View File

@ -151,7 +151,7 @@ Index SparseLUImpl<Scalar,StorageIndex>::column_dfs(const Index m, const Index j
StorageIndex ito = glu.xlsub(fsupc+1); StorageIndex ito = glu.xlsub(fsupc+1);
glu.xlsub(jcolm1) = ito; glu.xlsub(jcolm1) = ito;
StorageIndex istop = ito + jptr - jm1ptr; StorageIndex istop = ito + jptr - jm1ptr;
xprune(jcolm1) = istop; // intialize xprune(jcol-1) xprune(jcolm1) = istop; // initialize xprune(jcol-1)
glu.xlsub(jcol) = istop; glu.xlsub(jcol) = istop;
for (StorageIndex ifrom = jm1ptr; ifrom < nextl; ++ifrom, ++ito) for (StorageIndex ifrom = jm1ptr; ifrom < nextl; ++ifrom, ++ito)
@ -166,7 +166,7 @@ Index SparseLUImpl<Scalar,StorageIndex>::column_dfs(const Index m, const Index j
// Tidy up the pointers before exit // Tidy up the pointers before exit
glu.xsup(nsuper+1) = jcolp1; glu.xsup(nsuper+1) = jcolp1;
glu.supno(jcolp1) = nsuper; glu.supno(jcolp1) = nsuper;
xprune(jcol) = StorageIndex(nextl); // Intialize upper bound for pruning xprune(jcol) = StorageIndex(nextl); // Initialize upper bound for pruning
glu.xlsub(jcolp1) = StorageIndex(nextl); glu.xlsub(jcolp1) = StorageIndex(nextl);
return 0; return 0;

View File

@ -215,7 +215,7 @@ void sparselu_gemm(Index m, Index n, Index d, const Scalar* A, Index lda, const
if(RK==4){ a3 = pload<Packet>(A3+i+(I+1)*PacketSize); }\ if(RK==4){ a3 = pload<Packet>(A3+i+(I+1)*PacketSize); }\
pstore(C0+i+(I)*PacketSize, c0); pstore(C0+i+(I)*PacketSize, c0);
// agressive vectorization and peeling // aggressive vectorization and peeling
for(Index i=0; i<actual_b_end1; i+=PacketSize*8) for(Index i=0; i<actual_b_end1; i+=PacketSize*8)
{ {
EIGEN_ASM_COMMENT("SPARSELU_GEMML_KERNEL2"); EIGEN_ASM_COMMENT("SPARSELU_GEMML_KERNEL2");

View File

@ -38,7 +38,7 @@ namespace internal {
* \brief Performs numeric block updates (sup-panel) in topological order. * \brief Performs numeric block updates (sup-panel) in topological order.
* *
* Before entering this routine, the original nonzeros in the panel * Before entering this routine, the original nonzeros in the panel
* were already copied i nto the spa[m,w] * were already copied into the spa[m,w]
* *
* \param m number of rows in the matrix * \param m number of rows in the matrix
* \param w Panel size * \param w Panel size

View File

@ -352,7 +352,7 @@ class SuperLUBase : public SparseSolverBase<Derived>
/** \brief Reports whether previous computation was successful. /** \brief Reports whether previous computation was successful.
* *
* \returns \c Success if computation was succesful, * \returns \c Success if computation was successful,
* \c NumericalIssue if the matrix.appears to be negative. * \c NumericalIssue if the matrix.appears to be negative.
*/ */
ComputationInfo info() const ComputationInfo info() const

View File

@ -201,7 +201,7 @@ class UmfPackLU : public SparseSolverBase<UmfPackLU<_MatrixType> >
/** \brief Reports whether previous computation was successful. /** \brief Reports whether previous computation was successful.
* *
* \returns \c Success if computation was succesful, * \returns \c Success if computation was successful,
* \c NumericalIssue if the matrix.appears to be negative. * \c NumericalIssue if the matrix.appears to be negative.
*/ */
ComputationInfo info() const ComputationInfo info() const

View File

@ -112,7 +112,7 @@ operator()(const RowIndices& rowIndices, const ColIndices& colIndices) EIGEN_IND
#if EIGEN_HAS_STATIC_ARRAY_TEMPLATE #if EIGEN_HAS_STATIC_ARRAY_TEMPLATE
// The folowing three overloads are needed to handle raw Index[N] arrays. // The following three overloads are needed to handle raw Index[N] arrays.
template<typename RowIndicesT, std::size_t RowIndicesN, typename ColIndices> template<typename RowIndicesT, std::size_t RowIndicesN, typename ColIndices>
IndexedView<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,const RowIndicesT (&)[RowIndicesN],typename IvcColType<ColIndices>::type> IndexedView<EIGEN_INDEXED_VIEW_METHOD_CONST Derived,const RowIndicesT (&)[RowIndicesN],typename IvcColType<ColIndices>::type>

View File

@ -825,7 +825,7 @@ int main(int argc, char* argv[])
} }
for (int i = 1; i < argc; i++) { for (int i = 1; i < argc; i++) {
bool arg_handled = false; bool arg_handled = false;
// Step 1. Try to match action invokation names. // Step 1. Try to match action invocation names.
for (auto it = available_actions.begin(); it != available_actions.end(); ++it) { for (auto it = available_actions.begin(); it != available_actions.end(); ++it) {
if (!strcmp(argv[i], (*it)->invokation_name())) { if (!strcmp(argv[i], (*it)->invokation_name())) {
if (!action) { if (!action) {

View File

@ -36,7 +36,7 @@ For instance:
You can also select a given set of actions defining the environment variable BTL_CONFIG this way: You can also select a given set of actions defining the environment variable BTL_CONFIG this way:
BTL_CONFIG="-a action1{:action2}*" ctest -V BTL_CONFIG="-a action1{:action2}*" ctest -V
An exemple: An example:
BTL_CONFIG="-a axpy:vector_matrix:trisolve:ata" ctest -V -R eigen2 BTL_CONFIG="-a axpy:vector_matrix:trisolve:ata" ctest -V -R eigen2
Finally, if bench results already exist (the bench*.dat files) then they merges by keeping the best for each matrix size. If you want to overwrite the previous ones you can simply add the "--overwrite" option: Finally, if bench results already exist (the bench*.dat files) then they merges by keeping the best for each matrix size. If you want to overwrite the previous ones you can simply add the "--overwrite" option:

View File

@ -159,7 +159,7 @@ BTL_DONT_INLINE void bench( int size_min, int size_max, int nb_point ){
// bench<Mixed_Perf_Analyzer,Action>(size_min,size_max,nb_point); // bench<Mixed_Perf_Analyzer,Action>(size_min,size_max,nb_point);
// Only for small problem size. Otherwize it will be too long // Only for small problem size. Otherwise it will be too long
// bench<X86_Perf_Analyzer,Action>(size_min,size_max,nb_point); // bench<X86_Perf_Analyzer,Action>(size_min,size_max,nb_point);
// bench<STL_Perf_Analyzer,Action>(size_min,size_max,nb_point); // bench<STL_Perf_Analyzer,Action>(size_min,size_max,nb_point);

View File

@ -23,7 +23,7 @@
#include "math.h" #include "math.h"
// The Vector class must satisfy the following part of STL vector concept : // The Vector class must satisfy the following part of STL vector concept :
// resize() method // resize() method
// [] operator for seting element // [] operator for setting element
// the vector element are int compatible. // the vector element are int compatible.
template<class Vector> template<class Vector>
void size_log(const int nb_point, const int size_min, const int size_max, Vector & X) void size_log(const int nb_point, const int size_min, const int size_max, Vector & X)

View File

@ -55,7 +55,7 @@ bool read_xy_file(const std::string & filename, std::vector<int> & tab_sizes,
// The Vector class must satisfy the following part of STL vector concept : // The Vector class must satisfy the following part of STL vector concept :
// resize() method // resize() method
// [] operator for seting element // [] operator for setting element
// the vector element must have the << operator define // the vector element must have the << operator define
using namespace std; using namespace std;

View File

@ -100,7 +100,7 @@ public :
Y+=coef*X; Y+=coef*X;
} }
// alias free assignements // alias free assignments
static inline void matrix_vector_product(gene_matrix & A, gene_vector & B, gene_vector & X, int N){ static inline void matrix_vector_product(gene_matrix & A, gene_vector & B, gene_vector & X, int N){
X.assign(prod(A,B)); X.assign(prod(A,B));

View File

@ -101,7 +101,7 @@ void eigen33(const Matrix& mat, Matrix& evecs, Vector& evals)
computeRoots(scaledMat,evals); computeRoots(scaledMat,evals);
// compute the eigen vectors // compute the eigen vectors
// **here we assume 3 differents eigenvalues** // **here we assume 3 different eigenvalues**
// "optimized version" which appears to be slower with gcc! // "optimized version" which appears to be slower with gcc!
// Vector base; // Vector base;

View File

@ -54,7 +54,7 @@ int main(int argc, char ** args)
statbuf.close(); statbuf.close();
} }
else else
std::cerr << "Unable to open the provided file for writting... \n"; std::cerr << "Unable to open the provided file for writing... \n";
} }
// Get the maximum number of iterations and the tolerance // Get the maximum number of iterations and the tolerance

View File

@ -147,7 +147,7 @@
/* ( 1 + ( n - 1 )*abs( INCX ) ). */ /* ( 1 + ( n - 1 )*abs( INCX ) ). */
/* Before entry, the incremented array X must contain the n */ /* Before entry, the incremented array X must contain the n */
/* element vector x. On exit, X is overwritten with the */ /* element vector x. On exit, X is overwritten with the */
/* tranformed vector x. */ /* transformed vector x. */
/* INCX - INTEGER. */ /* INCX - INTEGER. */
/* On entry, INCX specifies the increment for the elements of */ /* On entry, INCX specifies the increment for the elements of */

View File

@ -143,7 +143,7 @@
/* ( 1 + ( n - 1 )*abs( INCX ) ). */ /* ( 1 + ( n - 1 )*abs( INCX ) ). */
/* Before entry, the incremented array X must contain the n */ /* Before entry, the incremented array X must contain the n */
/* element vector x. On exit, X is overwritten with the */ /* element vector x. On exit, X is overwritten with the */
/* tranformed vector x. */ /* transformed vector x. */
/* INCX - INTEGER. */ /* INCX - INTEGER. */
/* On entry, INCX specifies the increment for the elements of */ /* On entry, INCX specifies the increment for the elements of */

View File

@ -143,7 +143,7 @@
/* ( 1 + ( n - 1 )*abs( INCX ) ). */ /* ( 1 + ( n - 1 )*abs( INCX ) ). */
/* Before entry, the incremented array X must contain the n */ /* Before entry, the incremented array X must contain the n */
/* element vector x. On exit, X is overwritten with the */ /* element vector x. On exit, X is overwritten with the */
/* tranformed vector x. */ /* transformed vector x. */
/* INCX - INTEGER. */ /* INCX - INTEGER. */
/* On entry, INCX specifies the increment for the elements of */ /* On entry, INCX specifies the increment for the elements of */

View File

@ -147,7 +147,7 @@
/* ( 1 + ( n - 1 )*abs( INCX ) ). */ /* ( 1 + ( n - 1 )*abs( INCX ) ). */
/* Before entry, the incremented array X must contain the n */ /* Before entry, the incremented array X must contain the n */
/* element vector x. On exit, X is overwritten with the */ /* element vector x. On exit, X is overwritten with the */
/* tranformed vector x. */ /* transformed vector x. */
/* INCX - INTEGER. */ /* INCX - INTEGER. */
/* On entry, INCX specifies the increment for the elements of */ /* On entry, INCX specifies the increment for the elements of */

View File

@ -33,7 +33,7 @@ int EIGEN_BLAS_FUNC(copy)(int *n, RealScalar *px, int *incx, RealScalar *py, int
Scalar* x = reinterpret_cast<Scalar*>(px); Scalar* x = reinterpret_cast<Scalar*>(px);
Scalar* y = reinterpret_cast<Scalar*>(py); Scalar* y = reinterpret_cast<Scalar*>(py);
// be carefull, *incx==0 is allowed !! // be careful, *incx==0 is allowed !!
if(*incx==1 && *incy==1) if(*incx==1 && *incy==1)
make_vector(y,*n) = make_vector(x,*n); make_vector(y,*n) = make_vector(x,*n);
else else

View File

@ -619,7 +619,7 @@
SUBROUTINE STEST1(SCOMP1,STRUE1,SSIZE,SFAC) SUBROUTINE STEST1(SCOMP1,STRUE1,SSIZE,SFAC)
* ************************* STEST1 ***************************** * ************************* STEST1 *****************************
* *
* THIS IS AN INTERFACE SUBROUTINE TO ACCOMODATE THE FORTRAN * THIS IS AN INTERFACE SUBROUTINE TO ACCOMMODATE THE FORTRAN
* REQUIREMENT THAT WHEN A DUMMY ARGUMENT IS AN ARRAY, THE * REQUIREMENT THAT WHEN A DUMMY ARGUMENT IS AN ARRAY, THE
* ACTUAL ARGUMENT MUST ALSO BE AN ARRAY OR AN ARRAY ELEMENT. * ACTUAL ARGUMENT MUST ALSO BE AN ARRAY OR AN ARRAY ELEMENT.
* *

View File

@ -990,7 +990,7 @@
SUBROUTINE STEST1(SCOMP1,STRUE1,SSIZE,SFAC) SUBROUTINE STEST1(SCOMP1,STRUE1,SSIZE,SFAC)
* ************************* STEST1 ***************************** * ************************* STEST1 *****************************
* *
* THIS IS AN INTERFACE SUBROUTINE TO ACCOMODATE THE FORTRAN * THIS IS AN INTERFACE SUBROUTINE TO ACCOMMODATE THE FORTRAN
* REQUIREMENT THAT WHEN A DUMMY ARGUMENT IS AN ARRAY, THE * REQUIREMENT THAT WHEN A DUMMY ARGUMENT IS AN ARRAY, THE
* ACTUAL ARGUMENT MUST ALSO BE AN ARRAY OR AN ARRAY ELEMENT. * ACTUAL ARGUMENT MUST ALSO BE AN ARRAY OR AN ARRAY ELEMENT.
* *

View File

@ -946,7 +946,7 @@
SUBROUTINE STEST1(SCOMP1,STRUE1,SSIZE,SFAC) SUBROUTINE STEST1(SCOMP1,STRUE1,SSIZE,SFAC)
* ************************* STEST1 ***************************** * ************************* STEST1 *****************************
* *
* THIS IS AN INTERFACE SUBROUTINE TO ACCOMODATE THE FORTRAN * THIS IS AN INTERFACE SUBROUTINE TO ACCOMMODATE THE FORTRAN
* REQUIREMENT THAT WHEN A DUMMY ARGUMENT IS AN ARRAY, THE * REQUIREMENT THAT WHEN A DUMMY ARGUMENT IS AN ARRAY, THE
* ACTUAL ARGUMENT MUST ALSO BE AN ARRAY OR AN ARRAY ELEMENT. * ACTUAL ARGUMENT MUST ALSO BE AN ARRAY OR AN ARRAY ELEMENT.
* *

View File

@ -619,7 +619,7 @@
SUBROUTINE STEST1(SCOMP1,STRUE1,SSIZE,SFAC) SUBROUTINE STEST1(SCOMP1,STRUE1,SSIZE,SFAC)
* ************************* STEST1 ***************************** * ************************* STEST1 *****************************
* *
* THIS IS AN INTERFACE SUBROUTINE TO ACCOMODATE THE FORTRAN * THIS IS AN INTERFACE SUBROUTINE TO ACCOMMODATE THE FORTRAN
* REQUIREMENT THAT WHEN A DUMMY ARGUMENT IS AN ARRAY, THE * REQUIREMENT THAT WHEN A DUMMY ARGUMENT IS AN ARRAY, THE
* ACTUAL ARGUMENT MUST ALSO BE AN ARRAY OR AN ARRAY ELEMENT. * ACTUAL ARGUMENT MUST ALSO BE AN ARRAY OR AN ARRAY ELEMENT.
* *

View File

@ -20,7 +20,7 @@ include(CTest)
set(EIGEN_TEST_BUILD_FLAGS "" CACHE STRING "Options passed to the build command of unit tests") set(EIGEN_TEST_BUILD_FLAGS "" CACHE STRING "Options passed to the build command of unit tests")
# Overwrite default DartConfiguration.tcl such that ctest can build our unit tests. # Overwrite default DartConfiguration.tcl such that ctest can build our unit tests.
# Recall that our unit tests are not in the "all" target, so we have to explicitely ask ctest to build our custom 'buildtests' target. # Recall that our unit tests are not in the "all" target, so we have to explicitly ask ctest to build our custom 'buildtests' target.
# At this stage, we can also add custom flags to the build tool through the user defined EIGEN_TEST_BUILD_FLAGS variable. # At this stage, we can also add custom flags to the build tool through the user defined EIGEN_TEST_BUILD_FLAGS variable.
file(READ "${CMAKE_CURRENT_BINARY_DIR}/DartConfiguration.tcl" EIGEN_DART_CONFIG_FILE) file(READ "${CMAKE_CURRENT_BINARY_DIR}/DartConfiguration.tcl" EIGEN_DART_CONFIG_FILE)
# try to grab the default flags # try to grab the default flags
@ -39,7 +39,7 @@ ei_init_testing()
# configure Eigen related testing options # configure Eigen related testing options
option(EIGEN_NO_ASSERTION_CHECKING "Disable checking of assertions using exceptions" OFF) option(EIGEN_NO_ASSERTION_CHECKING "Disable checking of assertions using exceptions" OFF)
option(EIGEN_DEBUG_ASSERTS "Enable advanced debuging of assertions" OFF) option(EIGEN_DEBUG_ASSERTS "Enable advanced debugging of assertions" OFF)
if(CMAKE_COMPILER_IS_GNUCXX) if(CMAKE_COMPILER_IS_GNUCXX)
option(EIGEN_COVERAGE_TESTING "Enable/disable gcov" OFF) option(EIGEN_COVERAGE_TESTING "Enable/disable gcov" OFF)

View File

@ -247,7 +247,7 @@ endmacro(ei_add_test_internal_sycl)
# #
# If EIGEN_SPLIT_LARGE_TESTS is ON, the test is split into multiple executables # If EIGEN_SPLIT_LARGE_TESTS is ON, the test is split into multiple executables
# test_<testname>_<N> # test_<testname>_<N>
# where N runs from 1 to the greatest occurence found in the source file. Each of these # where N runs from 1 to the greatest occurrence found in the source file. Each of these
# executables is built passing -DEIGEN_TEST_PART_N. This allows to split large tests # executables is built passing -DEIGEN_TEST_PART_N. This allows to split large tests
# into smaller executables. # into smaller executables.
# #
@ -269,8 +269,8 @@ macro(ei_add_test testname)
file(READ "${filename}" test_source) file(READ "${filename}" test_source)
set(parts 0) set(parts 0)
string(REGEX MATCHALL "CALL_SUBTEST_[0-9]+|EIGEN_TEST_PART_[0-9]+|EIGEN_SUFFIXES(;[0-9]+)+" string(REGEX MATCHALL "CALL_SUBTEST_[0-9]+|EIGEN_TEST_PART_[0-9]+|EIGEN_SUFFIXES(;[0-9]+)+"
occurences "${test_source}") occurrences "${test_source}")
string(REGEX REPLACE "CALL_SUBTEST_|EIGEN_TEST_PART_|EIGEN_SUFFIXES" "" suffixes "${occurences}") string(REGEX REPLACE "CALL_SUBTEST_|EIGEN_TEST_PART_|EIGEN_SUFFIXES" "" suffixes "${occurrences}")
list(REMOVE_DUPLICATES suffixes) list(REMOVE_DUPLICATES suffixes)
if(EIGEN_SPLIT_LARGE_TESTS AND suffixes) if(EIGEN_SPLIT_LARGE_TESTS AND suffixes)
add_custom_target(${testname}) add_custom_target(${testname})
@ -303,8 +303,8 @@ macro(ei_add_test_sycl testname)
file(READ "${filename}" test_source) file(READ "${filename}" test_source)
set(parts 0) set(parts 0)
string(REGEX MATCHALL "CALL_SUBTEST_[0-9]+|EIGEN_TEST_PART_[0-9]+|EIGEN_SUFFIXES(;[0-9]+)+" string(REGEX MATCHALL "CALL_SUBTEST_[0-9]+|EIGEN_TEST_PART_[0-9]+|EIGEN_SUFFIXES(;[0-9]+)+"
occurences "${test_source}") occurrences "${test_source}")
string(REGEX REPLACE "CALL_SUBTEST_|EIGEN_TEST_PART_|EIGEN_SUFFIXES" "" suffixes "${occurences}") string(REGEX REPLACE "CALL_SUBTEST_|EIGEN_TEST_PART_|EIGEN_SUFFIXES" "" suffixes "${occurrences}")
list(REMOVE_DUPLICATES suffixes) list(REMOVE_DUPLICATES suffixes)
if(EIGEN_SPLIT_LARGE_TESTS AND suffixes) if(EIGEN_SPLIT_LARGE_TESTS AND suffixes)
add_custom_target(${testname}) add_custom_target(${testname})

View File

@ -243,7 +243,7 @@ endfunction()
####################### #######################
# #
# Adds a SYCL compilation custom command associated with an existing # Adds a SYCL compilation custom command associated with an existing
# target and sets a dependancy on that new command. # target and sets a dependency on that new command.
# #
# targetName : Name of the target to add a SYCL to. # targetName : Name of the target to add a SYCL to.
# binaryDir : Intermediate directory to output the integration header. # binaryDir : Intermediate directory to output the integration header.

View File

@ -15,7 +15,7 @@
# Eigen3::Eigen - The header-only Eigen library # Eigen3::Eigen - The header-only Eigen library
# #
# This module reads hints about search locations from # This module reads hints about search locations from
# the following enviroment variables: # the following environment variables:
# #
# EIGEN3_ROOT # EIGEN3_ROOT
# EIGEN3_ROOT_DIR # EIGEN3_ROOT_DIR

View File

@ -14,7 +14,7 @@
; * - Eigen::Matrix<*,-1,+,*,*,*> ; * - Eigen::Matrix<*,-1,+,*,*,*>
; * - Eigen::Matrix<*,+,+,*,*,*> ; * - Eigen::Matrix<*,+,+,*,*,*>
; * ; *
; * Matrices are displayed properly independantly of the memory ; * Matrices are displayed properly independently of the memory
; * alignment (RowMajor vs. ColMajor). ; * alignment (RowMajor vs. ColMajor).
; * ; *
; * This file is distributed WITHOUT ANY WARRANTY. Please ensure ; * This file is distributed WITHOUT ANY WARRANTY. Please ensure

View File

@ -1764,7 +1764,7 @@ UML_LOOK = YES
# the class node. If there are many fields or methods and many nodes the # the class node. If there are many fields or methods and many nodes the
# graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS # graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS
# threshold limits the number of items for each type to make the size more # threshold limits the number of items for each type to make the size more
# managable. Set this to 0 for no limit. Note that the threshold may be # manageable. Set this to 0 for no limit. Note that the threshold may be
# exceeded by 50% before the limit is enforced. # exceeded by 50% before the limit is enforced.
UML_LIMIT_NUM_FIELDS = 10 UML_LIMIT_NUM_FIELDS = 10

View File

@ -133,7 +133,7 @@ In this special case, the example is fine and will be working because both param
\section TopicPlainFunctionsFailing In which cases do functions taking a plain Matrix or Array argument fail? \section TopicPlainFunctionsFailing In which cases do functions taking a plain Matrix or Array argument fail?
Here, we consider a slightly modified version of the function given above. This time, we do not want to return the result but pass an additional non-const paramter which allows us to store the result. A first naive implementation might look as follows. Here, we consider a slightly modified version of the function given above. This time, we do not want to return the result but pass an additional non-const parameter which allows us to store the result. A first naive implementation might look as follows.
\code \code
// Note: This code is flawed! // Note: This code is flawed!
void cov(const MatrixXf& x, const MatrixXf& y, MatrixXf& C) void cov(const MatrixXf& x, const MatrixXf& y, MatrixXf& C)
@ -176,7 +176,7 @@ The implementation above does now not only work with temporary expressions but i
\section TopicResizingInGenericImplementations How to resize matrices in generic implementations? \section TopicResizingInGenericImplementations How to resize matrices in generic implementations?
One might think we are done now, right? This is not completely true because in order for our covariance function to be generically applicable, we want the follwing code to work One might think we are done now, right? This is not completely true because in order for our covariance function to be generically applicable, we want the following code to work
\code \code
MatrixXf x = MatrixXf::Random(100,3); MatrixXf x = MatrixXf::Random(100,3);
MatrixXf y = MatrixXf::Random(100,3); MatrixXf y = MatrixXf::Random(100,3);

View File

@ -51,7 +51,7 @@ are doing.
\section TopicPreprocessorDirectivesCppVersion C++ standard features \section TopicPreprocessorDirectivesCppVersion C++ standard features
By default, %Eigen strive to automatically detect and enable langage features at compile-time based on By default, %Eigen strive to automatically detect and enable language features at compile-time based on
the information provided by the compiler. the information provided by the compiler.
- \b EIGEN_MAX_CPP_VER - disables usage of C++ features requiring a version greater than EIGEN_MAX_CPP_VER. - \b EIGEN_MAX_CPP_VER - disables usage of C++ features requiring a version greater than EIGEN_MAX_CPP_VER.

View File

@ -68,7 +68,7 @@ The output is as follows:
The second example starts by declaring a 3-by-3 matrix \c m which is initialized using the \link DenseBase::Random(Index,Index) Random() \endlink method with random values between -1 and 1. The next line applies a linear mapping such that the values are between 10 and 110. The function call \link DenseBase::Constant(Index,Index,const Scalar&) MatrixXd::Constant\endlink(3,3,1.2) returns a 3-by-3 matrix expression having all coefficients equal to 1.2. The rest is standard arithmetics. The second example starts by declaring a 3-by-3 matrix \c m which is initialized using the \link DenseBase::Random(Index,Index) Random() \endlink method with random values between -1 and 1. The next line applies a linear mapping such that the values are between 10 and 110. The function call \link DenseBase::Constant(Index,Index,const Scalar&) MatrixXd::Constant\endlink(3,3,1.2) returns a 3-by-3 matrix expression having all coefficients equal to 1.2. The rest is standard arithmetics.
The next line of the \c main function introduces a new type: \c VectorXd. This represents a (column) vector of arbitrary size. Here, the vector \c v is created to contain \c 3 coefficients which are left unitialized. The one but last line uses the so-called comma-initializer, explained in \ref TutorialAdvancedInitialization, to set all coefficients of the vector \c v to be as follows: The next line of the \c main function introduces a new type: \c VectorXd. This represents a (column) vector of arbitrary size. Here, the vector \c v is created to contain \c 3 coefficients which are left uninitialized. The one but last line uses the so-called comma-initializer, explained in \ref TutorialAdvancedInitialization, to set all coefficients of the vector \c v to be as follows:
\f[ \f[
v = v =

View File

@ -80,7 +80,7 @@ sm1.setZero();
\section SparseBasicInfos Matrix properties \section SparseBasicInfos Matrix properties
Beyond the basic functions rows() and cols(), there are some useful functions that are available to easily get some informations from the matrix. Beyond the basic functions rows() and cols(), there are some useful functions that are available to easily get some information from the matrix.
<table class="manual"> <table class="manual">
<tr> <tr>
<td> \code <td> \code

View File

@ -76,7 +76,7 @@ point where the template is defined, without knowing the actual value of the tem
and \c Derived2 in the example). That means that the compiler cannot know that <tt>dst.triangularView</tt> is and \c Derived2 in the example). That means that the compiler cannot know that <tt>dst.triangularView</tt> is
a member template and that the following &lt; symbol is part of the delimiter for the template a member template and that the following &lt; symbol is part of the delimiter for the template
parameter. Another possibility would be that <tt>dst.triangularView</tt> is a member variable with the &lt; parameter. Another possibility would be that <tt>dst.triangularView</tt> is a member variable with the &lt;
symbol refering to the <tt>operator&lt;()</tt> function. In fact, the compiler should choose the second symbol referring to the <tt>operator&lt;()</tt> function. In fact, the compiler should choose the second
possibility, according to the standard. If <tt>dst.triangularView</tt> is a member template (as in our case), possibility, according to the standard. If <tt>dst.triangularView</tt> is a member template (as in our case),
the programmer should specify this explicitly with the \c template keyword and write <tt>dst.template the programmer should specify this explicitly with the \c template keyword and write <tt>dst.template
triangularView</tt>. triangularView</tt>.

View File

@ -58,7 +58,7 @@ the product <tt>matrix3 * matrix4</tt> gets evaluated immediately into a tempora
\code matrix1 = matrix2 * (matrix3 + matrix4); \endcode \code matrix1 = matrix2 * (matrix3 + matrix4); \endcode
Here, provided the matrices have at least 2 rows and 2 columns, each coefficienct of the expression <tt>matrix3 + matrix4</tt> is going to be used several times in the matrix product. Instead of computing the sum everytime, it is much better to compute it once and store it in a temporary variable. Eigen understands this and evaluates <tt>matrix3 + matrix4</tt> into a temporary variable before evaluating the product. Here, provided the matrices have at least 2 rows and 2 columns, each coefficienct of the expression <tt>matrix3 + matrix4</tt> is going to be used several times in the matrix product. Instead of computing the sum every time, it is much better to compute it once and store it in a temporary variable. Eigen understands this and evaluates <tt>matrix3 + matrix4</tt> into a temporary variable before evaluating the product.
*/ */

View File

@ -248,7 +248,7 @@ To get an overview of the true relative speed of the different decomposition, ch
<dt><b>Blocking</b></dt> <dt><b>Blocking</b></dt>
<dd>Means the algorithm can work per block, whence guaranteeing a good scaling of the performance for large matrices.</dd> <dd>Means the algorithm can work per block, whence guaranteeing a good scaling of the performance for large matrices.</dd>
<dt><b>Implicit Multi Threading (MT)</b></dt> <dt><b>Implicit Multi Threading (MT)</b></dt>
<dd>Means the algorithm can take advantage of multicore processors via OpenMP. "Implicit" means the algortihm itself is not parallelized, but that it relies on parallelized matrix-matrix product rountines.</dd> <dd>Means the algorithm can take advantage of multicore processors via OpenMP. "Implicit" means the algortihm itself is not parallelized, but that it relies on parallelized matrix-matrix product routines.</dd>
<dt><b>Explicit Multi Threading (MT)</b></dt> <dt><b>Explicit Multi Threading (MT)</b></dt>
<dd>Means the algorithm is explicitly parallelized to take advantage of multicore processors via OpenMP.</dd> <dd>Means the algorithm is explicitly parallelized to take advantage of multicore processors via OpenMP.</dd>
<dt><b>Meta-unroller</b></dt> <dt><b>Meta-unroller</b></dt>

View File

@ -47,7 +47,7 @@ int main(int argc, char** argv)
\warning note that all functions generating random matrices are \b not re-entrant nor thread-safe. Those include DenseBase::Random(), and DenseBase::setRandom() despite a call to Eigen::initParallel(). This is because these functions are based on std::rand which is not re-entrant. For thread-safe random generator, we recommend the use of boost::random or c++11 random feature. \warning note that all functions generating random matrices are \b not re-entrant nor thread-safe. Those include DenseBase::Random(), and DenseBase::setRandom() despite a call to Eigen::initParallel(). This is because these functions are based on std::rand which is not re-entrant. For thread-safe random generator, we recommend the use of boost::random or c++11 random feature.
In the case your application is parallelized with OpenMP, you might want to disable Eigen's own parallization as detailed in the previous section. In the case your application is parallelized with OpenMP, you might want to disable Eigen's own parallelization as detailed in the previous section.
*/ */

View File

@ -29,9 +29,9 @@ Map<const Vector4i> mi(pi);
\endcode \endcode
where \c pi is an \c int \c *. In this case the size does not have to be passed to the constructor, because it is already specified by the Matrix/Array type. where \c pi is an \c int \c *. In this case the size does not have to be passed to the constructor, because it is already specified by the Matrix/Array type.
Note that Map does not have a default constructor; you \em must pass a pointer to intialize the object. However, you can work around this requirement (see \ref TutorialMapPlacementNew). Note that Map does not have a default constructor; you \em must pass a pointer to initialize the object. However, you can work around this requirement (see \ref TutorialMapPlacementNew).
Map is flexible enough to accomodate a variety of different data representations. There are two other (optional) template parameters: Map is flexible enough to accommodate a variety of different data representations. There are two other (optional) template parameters:
\code \code
Map<typename MatrixType, Map<typename MatrixType,
int MapOptions, int MapOptions,

View File

@ -57,7 +57,7 @@ The \c "_" indicates available free space to quickly insert new elements.
Assuming no reallocation is needed, the insertion of a random element is therefore in O(nnz_j) where nnz_j is the number of nonzeros of the respective inner vector. Assuming no reallocation is needed, the insertion of a random element is therefore in O(nnz_j) where nnz_j is the number of nonzeros of the respective inner vector.
On the other hand, inserting elements with increasing inner indices in a given inner vector is much more efficient since this only requires to increase the respective \c InnerNNZs entry that is a O(1) operation. On the other hand, inserting elements with increasing inner indices in a given inner vector is much more efficient since this only requires to increase the respective \c InnerNNZs entry that is a O(1) operation.
The case where no empty space is available is a special case, and is refered as the \em compressed mode. The case where no empty space is available is a special case, and is referred as the \em compressed mode.
It corresponds to the widely used Compressed Column (or Row) Storage schemes (CCS or CRS). It corresponds to the widely used Compressed Column (or Row) Storage schemes (CCS or CRS).
Any SparseMatrix can be turned to this form by calling the SparseMatrix::makeCompressed() function. Any SparseMatrix can be turned to this form by calling the SparseMatrix::makeCompressed() function.
In this case, one can remark that the \c InnerNNZs array is redundant with \c OuterStarts because we the equality: \c InnerNNZs[j] = \c OuterStarts[j+1]-\c OuterStarts[j]. In this case, one can remark that the \c InnerNNZs array is redundant with \c OuterStarts because we the equality: \c InnerNNZs[j] = \c OuterStarts[j+1]-\c OuterStarts[j].
@ -212,7 +212,7 @@ See the SparseMatrix::setFromTriplets() function and class Triplet for more deta
In some cases, however, slightly higher performance, and lower memory consumption can be reached by directly inserting the non-zeros into the destination matrix. In some cases, however, slightly higher performance, and lower memory consumption can be reached by directly inserting the non-zeros into the destination matrix.
A typical scenario of this approach is illustrated bellow: A typical scenario of this approach is illustrated below:
\code \code
1: SparseMatrix<double> mat(rows,cols); // default is column major 1: SparseMatrix<double> mat(rows,cols); // default is column major
2: mat.reserve(VectorXi::Constant(cols,6)); 2: mat.reserve(VectorXi::Constant(cols,6));

View File

@ -117,8 +117,8 @@ It doesn't disable 16-byte alignment, because that would mean that vectorized an
\section checkmycode How can I check my code is safe regarding alignment issues? \section checkmycode How can I check my code is safe regarding alignment issues?
Unfortunately, there is no possibility in C++ to detect any of the aformentioned shortcoming at compile time (though static analysers are becoming more and more powerful and could detect some of them). Unfortunately, there is no possibility in C++ to detect any of the aforementioned shortcoming at compile time (though static analysers are becoming more and more powerful and could detect some of them).
Even at runtime, all we can do is to catch invalid unaligned allocation and trigger the explicit assertion mentioned at the begining of this page. Even at runtime, all we can do is to catch invalid unaligned allocation and trigger the explicit assertion mentioned at the beginning of this page.
Therefore, if your program runs fine on a given system with some given compilation flags, then this does not guarantee that your code is safe. For instance, on most 64 bits systems buffer are aligned on 16 bytes boundary and so, if you do not enable AVX instruction set, then your code will run fine. On the other hand, the same code may assert if moving to a more exotic platform, or enabling AVX instructions that required 32 bytes alignment by default. Therefore, if your program runs fine on a given system with some given compilation flags, then this does not guarantee that your code is safe. For instance, on most 64 bits systems buffer are aligned on 16 bytes boundary and so, if you do not enable AVX instruction set, then your code will run fine. On the other hand, the same code may assert if moving to a more exotic platform, or enabling AVX instructions that required 32 bytes alignment by default.
The situation is not hopeless though. Assuming your code is well covered by unit test, then you can check its alignment safety by linking it to a custom malloc library returning 8 bytes aligned buffers only. This way all alignment shortcomings should pop-up. To this end, you must also compile your program with \link TopicPreprocessorDirectivesPerformance EIGEN_MALLOC_ALREADY_ALIGNED=0 \endlink. The situation is not hopeless though. Assuming your code is well covered by unit test, then you can check its alignment safety by linking it to a custom malloc library returning 8 bytes aligned buffers only. This way all alignment shortcomings should pop-up. To this end, you must also compile your program with \link TopicPreprocessorDirectivesPerformance EIGEN_MALLOC_ALREADY_ALIGNED=0 \endlink.

View File

@ -5,7 +5,7 @@ namespace Eigen {
Staring from CUDA 5.5 and Eigen 3.3, it is possible to use Eigen's matrices, vectors, and arrays for fixed size within CUDA kernels. This is especially useful when working on numerous but small problems. By default, when Eigen's headers are included within a .cu file compiled by nvcc most Eigen's functions and methods are prefixed by the \c __device__ \c __host__ keywords making them callable from both host and device code. Staring from CUDA 5.5 and Eigen 3.3, it is possible to use Eigen's matrices, vectors, and arrays for fixed size within CUDA kernels. This is especially useful when working on numerous but small problems. By default, when Eigen's headers are included within a .cu file compiled by nvcc most Eigen's functions and methods are prefixed by the \c __device__ \c __host__ keywords making them callable from both host and device code.
This support can be disabled by defining \c EIGEN_NO_CUDA before including any Eigen's header. This support can be disabled by defining \c EIGEN_NO_CUDA before including any Eigen's header.
This might be usefull to disable some warnings when a .cu file makes use of Eigen on the host side only. This might be useful to disable some warnings when a .cu file makes use of Eigen on the host side only.
However, in both cases, host's SIMD vectorization has to be disabled in .cu files. However, in both cases, host's SIMD vectorization has to be disabled in .cu files.
It is thus \b strongly \b recommended to properly move all costly host computation from your .cu files to regular .cpp files. It is thus \b strongly \b recommended to properly move all costly host computation from your .cu files to regular .cpp files.

View File

@ -93,7 +93,7 @@ table th.inter {
border-color: #cccccc; border-color: #cccccc;
} }
/** class for exemple / output tables **/ /** class for example / output tables **/
table.example { table.example {
} }

View File

@ -12,7 +12,7 @@ int main(int argc, char** argv)
assert(argc==2); assert(argc==2);
int n = 300; // size of the image int n = 300; // size of the image
int m = n*n; // number of unknows (=number of pixels) int m = n*n; // number of unknowns (=number of pixels)
// Assembly: // Assembly:
std::vector<T> coefficients; // list of non-zeros coefficients std::vector<T> coefficients; // list of non-zeros coefficients

View File

@ -35,7 +35,7 @@ set(EigenLapack_SRCS ${EigenLapack_SRCS}
second_NONE.f dsecnd_NONE.f second_NONE.f dsecnd_NONE.f
) )
option(EIGEN_ENABLE_LAPACK_TESTS OFF "Enbale the Lapack unit tests") option(EIGEN_ENABLE_LAPACK_TESTS OFF "Enable the Lapack unit tests")
if(EIGEN_ENABLE_LAPACK_TESTS) if(EIGEN_ENABLE_LAPACK_TESTS)
@ -59,7 +59,7 @@ if(EIGEN_ENABLE_LAPACK_TESTS)
message(STATUS "Setup lapack reference and lapack unit tests") message(STATUS "Setup lapack reference and lapack unit tests")
execute_process(COMMAND tar xzf "lapack_addons_3.4.1.tgz" WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}) execute_process(COMMAND tar xzf "lapack_addons_3.4.1.tgz" WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
else() else()
message(STATUS "Download of lapack_addons_3.4.1.tgz failed, LAPACK unit tests wont be enabled") message(STATUS "Download of lapack_addons_3.4.1.tgz failed, LAPACK unit tests won't be enabled")
set(EIGEN_ENABLE_LAPACK_TESTS false) set(EIGEN_ENABLE_LAPACK_TESTS false)
endif() endif()

View File

@ -1,5 +1,5 @@
# generate split test header file only if it does not yet exist # generate split test header file only if it does not yet exist
# in order to prevent a rebuild everytime cmake is configured # in order to prevent a rebuild every time cmake is configured
if(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h) if(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h)
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h "") file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h "")
foreach(i RANGE 1 999) foreach(i RANGE 1 999)

View File

@ -104,7 +104,7 @@ void test_bdcsvd()
CALL_SUBTEST_7( BDCSVD<MatrixXf>(10,10) ); CALL_SUBTEST_7( BDCSVD<MatrixXf>(10,10) );
// Check that preallocation avoids subsequent mallocs // Check that preallocation avoids subsequent mallocs
// Disbaled because not supported by BDCSVD // Disabled because not supported by BDCSVD
// CALL_SUBTEST_9( svd_preallocate<void>() ); // CALL_SUBTEST_9( svd_preallocate<void>() );
CALL_SUBTEST_2( svd_underoverflow<void>() ); CALL_SUBTEST_2( svd_underoverflow<void>() );

View File

@ -47,7 +47,7 @@ template<typename MatrixType> bool find_pivot(typename MatrixType::Scalar tol, M
return false; return false;
} }
/* Check that two column vectors are approximately equal upto permutations. /* Check that two column vectors are approximately equal up to permutations.
* Initially, this method checked that the k-th power sums are equal for all k = 1, ..., vec1.rows(), * Initially, this method checked that the k-th power sums are equal for all k = 1, ..., vec1.rows(),
* however this strategy is numerically inacurate because of numerical cancellation issues. * however this strategy is numerically inacurate because of numerical cancellation issues.
*/ */

View File

@ -241,7 +241,7 @@ template<typename Scalar> void mapQuaternion(void){
const MQuaternionUA& cmq3(mq3); const MQuaternionUA& cmq3(mq3);
VERIFY( &cmq3.x() == &mq3.x() ); VERIFY( &cmq3.x() == &mq3.x() );
// FIXME the following should be ok. The problem is that currently the LValueBit flag // FIXME the following should be ok. The problem is that currently the LValueBit flag
// is used to determine wether we can return a coeff by reference or not, which is not enough for Map<const ...>. // is used to determine whether we can return a coeff by reference or not, which is not enough for Map<const ...>.
//const MCQuaternionUA& cmcq3(mcq3); //const MCQuaternionUA& cmcq3(mcq3);
//VERIFY( &cmcq3.x() == &mcq3.x() ); //VERIFY( &cmcq3.x() == &mcq3.x() );
} }

View File

@ -183,7 +183,7 @@ namespace Eigen
}; };
} }
// If EIGEN_DEBUG_ASSERTS is defined and if no assertion is triggered while // If EIGEN_DEBUG_ASSERTS is defined and if no assertion is triggered while
// one should have been, then the list of excecuted assertions is printed out. // one should have been, then the list of executed assertions is printed out.
// //
// EIGEN_DEBUG_ASSERTS is not enabled by default as it // EIGEN_DEBUG_ASSERTS is not enabled by default as it
// significantly increases the compilation time // significantly increases the compilation time

View File

@ -28,7 +28,7 @@ template<typename T> T negate(const T& x) { return -x; }
} }
} }
// NOTE: we disbale inlining for this function to workaround a GCC issue when using -O3 and the i387 FPU. // NOTE: we disable inlining for this function to workaround a GCC issue when using -O3 and the i387 FPU.
template<typename Scalar> EIGEN_DONT_INLINE template<typename Scalar> EIGEN_DONT_INLINE
bool isApproxAbs(const Scalar& a, const Scalar& b, const typename NumTraits<Scalar>::Real& refvalue) bool isApproxAbs(const Scalar& a, const Scalar& b, const typename NumTraits<Scalar>::Real& refvalue)
{ {