mirror of
https://gitlab.com/libeigen/eigen.git
synced 2025-04-18 07:39:37 +08:00
Fix typos found using codespell
This commit is contained in:
parent
405859f18d
commit
b3fd93207b
@ -181,7 +181,7 @@ struct Assignment<DstXprType, Solve<CwiseUnaryOp<internal::scalar_conjugate_op<t
|
||||
}
|
||||
};
|
||||
|
||||
} // end namepsace internal
|
||||
} // end namespace internal
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
|
@ -91,7 +91,7 @@ void parallelize_gemm(const Functor& func, Index rows, Index cols, Index depth,
|
||||
// FIXME the transpose variable is only needed to properly split
|
||||
// the matrix product when multithreading is enabled. This is a temporary
|
||||
// fix to support row-major destination matrices. This whole
|
||||
// parallelizer mechanism has to be redisigned anyway.
|
||||
// parallelizer mechanism has to be redesigned anyway.
|
||||
EIGEN_UNUSED_VARIABLE(depth);
|
||||
EIGEN_UNUSED_VARIABLE(transpose);
|
||||
func(0,rows, 0,cols);
|
||||
|
@ -108,7 +108,7 @@ struct Assignment<DstXprType, SolveWithGuess<DecType,RhsType,GuessType>, interna
|
||||
}
|
||||
};
|
||||
|
||||
} // end namepsace internal
|
||||
} // end namespace internal
|
||||
|
||||
} // end namespace Eigen
|
||||
|
||||
|
@ -5,7 +5,7 @@
|
||||
|
||||
/*
|
||||
|
||||
NOTE: thes functions vave been adapted from the LDL library:
|
||||
NOTE: these functions have been adapted from the LDL library:
|
||||
|
||||
LDL Copyright (c) 2005 by Timothy A. Davis. All Rights Reserved.
|
||||
|
||||
|
@ -140,7 +140,7 @@ void check_indexed_view()
|
||||
"500 501 502 503 504 505 506 507 508 509")
|
||||
);
|
||||
|
||||
// takes the row numer 3, and repeat it 5 times
|
||||
// take row number 3, and repeat it 5 times
|
||||
VERIFY( MATCH( A(seqN(3,5,0), all),
|
||||
"300 301 302 303 304 305 306 307 308 309\n"
|
||||
"300 301 302 303 304 305 306 307 308 309\n"
|
||||
|
@ -581,7 +581,7 @@ is not initialized.
|
||||
|
||||
Creates a tensor mapping an existing array of data. The data must not be freed
|
||||
until the TensorMap is discarded, and the size of the data must be large enough
|
||||
to accomodate of the coefficients of the tensor.
|
||||
to accommodate the coefficients of the tensor.
|
||||
|
||||
float data[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11};
|
||||
Eigen::TensorMap<Tensor<float, 2>> a(data, 3, 4);
|
||||
|
@ -48,7 +48,7 @@ namespace Eigen {
|
||||
*
|
||||
* <dl>
|
||||
* <dt><b>Relation to other parts of Eigen:</b></dt>
|
||||
* <dd>The midterm developement goal for this class is to have a similar hierarchy as Eigen uses for matrices, so that
|
||||
* <dd>The midterm development goal for this class is to have a similar hierarchy as Eigen uses for matrices, so that
|
||||
* taking blocks or using tensors in expressions is easily possible, including an interface with the vector/matrix code
|
||||
* by providing .asMatrix() and .asVector() (or similar) methods for rank 2 and 1 tensors. However, currently, the %Tensor
|
||||
* class does not provide any of these features and is only available as a stand-alone class that just allows for
|
||||
|
@ -20,7 +20,7 @@ namespace Eigen {
|
||||
* \brief The tensor base class.
|
||||
*
|
||||
* This class is the common parent of the Tensor and TensorMap class, thus
|
||||
* making it possible to use either class interchangably in expressions.
|
||||
* making it possible to use either class interchangeably in expressions.
|
||||
*/
|
||||
|
||||
template<typename Derived>
|
||||
|
@ -75,7 +75,7 @@ class TensorXsmmContractionBlocking {
|
||||
outer_n_ = outer_n_ != 0 ? outer_n_ : n;
|
||||
}
|
||||
#else
|
||||
// Defaults, possibly overriden per-platform.
|
||||
// Defaults, possibly overridden per-platform.
|
||||
copyA_ = true;
|
||||
copyB_ = false;
|
||||
|
||||
|
@ -350,7 +350,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
|
||||
// Normal number of notifications for k slice switch is
|
||||
// nm_ + nn_ + nm_ * nn_. However, first P - 1 slices will receive only
|
||||
// nm_ + nn_ notifications, because they will not receive notifications
|
||||
// from preceeding kernels.
|
||||
// from preceding kernels.
|
||||
state_switch_[x] =
|
||||
x == 0
|
||||
? 1
|
||||
@ -530,7 +530,7 @@ struct TensorEvaluator<const TensorContractionOp<Indices, LeftArgType, RightArgT
|
||||
|
||||
void kernel(Index m, Index n, Index k) {
|
||||
// Note: order of iteration matters here. Iteration over m is innermost
|
||||
// because we want to reuse the same packed rhs in consequetive tasks
|
||||
// because we want to reuse the same packed rhs in consecutive tasks
|
||||
// (rhs fits into L2$ while lhs only into L3$).
|
||||
const Index nend = n * gn_ + gn(n);
|
||||
const Index mend = m * gm_ + gm(m);
|
||||
|
@ -195,7 +195,7 @@ class TensorCostModel {
|
||||
// 11 is L2 cache latency on Haswell.
|
||||
// We don't know whether data is in L1, L2 or L3. But we are most interested
|
||||
// in single-threaded computational time around 100us-10ms (smaller time
|
||||
// is too small for parallelization, larger time is not intersting
|
||||
// is too small for parallelization, larger time is not interesting
|
||||
// either because we are probably using all available threads already).
|
||||
// And for the target time range, L2 seems to be what matters. Data set
|
||||
// fitting into L1 is too small to take noticeable time. Data set fitting
|
||||
|
@ -286,7 +286,7 @@ m_queue(cl::sycl::queue(s, [&](cl::sycl::exception_list l) {
|
||||
tileSize =static_cast<Index>(m_queue.get_device(). template get_info<cl::sycl::info::device::max_work_group_size>());
|
||||
auto s= m_queue.get_device().template get_info<cl::sycl::info::device::vendor>();
|
||||
std::transform(s.begin(), s.end(), s.begin(), ::tolower);
|
||||
if(m_queue.get_device().is_cpu()){ // intel doesnot allow to use max workgroup size
|
||||
if(m_queue.get_device().is_cpu()){ // intel doesn't allow to use max workgroup size
|
||||
tileSize=std::min(static_cast<Index>(256), static_cast<Index>(tileSize));
|
||||
}
|
||||
rng = n;
|
||||
@ -303,7 +303,7 @@ m_queue(cl::sycl::queue(s, [&](cl::sycl::exception_list l) {
|
||||
template<typename Index>
|
||||
EIGEN_STRONG_INLINE void parallel_for_setup(Index dim0, Index dim1, Index &tileSize0, Index &tileSize1, Index &rng0, Index &rng1, Index &GRange0, Index &GRange1) const {
|
||||
Index max_workgroup_Size = static_cast<Index>(maxSyclThreadsPerBlock());
|
||||
if(m_queue.get_device().is_cpu()){ // intel doesnot allow to use max workgroup size
|
||||
if(m_queue.get_device().is_cpu()){ // intel doesn't allow to use max workgroup size
|
||||
max_workgroup_Size=std::min(static_cast<Index>(256), static_cast<Index>(max_workgroup_Size));
|
||||
}
|
||||
Index pow_of_2 = static_cast<Index>(std::log2(max_workgroup_Size));
|
||||
@ -331,7 +331,7 @@ m_queue(cl::sycl::queue(s, [&](cl::sycl::exception_list l) {
|
||||
template<typename Index>
|
||||
EIGEN_STRONG_INLINE void parallel_for_setup(Index dim0, Index dim1,Index dim2, Index &tileSize0, Index &tileSize1, Index &tileSize2, Index &rng0, Index &rng1, Index &rng2, Index &GRange0, Index &GRange1, Index &GRange2) const {
|
||||
Index max_workgroup_Size = static_cast<Index>(maxSyclThreadsPerBlock());
|
||||
if(m_queue.get_device().is_cpu()){ // intel doesnot allow to use max workgroup size
|
||||
if(m_queue.get_device().is_cpu()){ // intel doesn't allow to use max workgroup size
|
||||
max_workgroup_Size=std::min(static_cast<Index>(256), static_cast<Index>(max_workgroup_Size));
|
||||
}
|
||||
Index pow_of_2 = static_cast<Index>(std::log2(max_workgroup_Size));
|
||||
@ -377,7 +377,7 @@ m_queue(cl::sycl::queue(s, [&](cl::sycl::exception_list l) {
|
||||
EIGEN_STRONG_INLINE int majorDeviceVersion() const { return 1; }
|
||||
|
||||
EIGEN_STRONG_INLINE unsigned long maxSyclThreadsPerMultiProcessor() const {
|
||||
// OpenCL doesnot have such concept
|
||||
// OpenCL doesn't have such concept
|
||||
return 2;
|
||||
}
|
||||
|
||||
@ -519,7 +519,7 @@ struct SyclDevice {
|
||||
return m_queue_stream->maxSyclThreadsPerBlock();
|
||||
}
|
||||
EIGEN_STRONG_INLINE unsigned long maxSyclThreadsPerMultiProcessor() const {
|
||||
// OpenCL doesnot have such concept
|
||||
// OpenCL doesn't have such concept
|
||||
return m_queue_stream->maxSyclThreadsPerMultiProcessor();
|
||||
// return stream_->deviceProperties().maxThreadsPerMultiProcessor;
|
||||
}
|
||||
@ -544,7 +544,7 @@ struct SyclDevice {
|
||||
};
|
||||
// This is used as a distingushable device inside the kernel as the sycl device class is not Standard layout.
|
||||
// This is internal and must not be used by user. This dummy device allow us to specialise the tensor evaluator
|
||||
// inside the kenrel. So we can have two types of eval for host and device. This is required for TensorArgMax operation
|
||||
// inside the kernel. So we can have two types of eval for host and device. This is required for TensorArgMax operation
|
||||
struct SyclKernelDevice:DefaultDevice{};
|
||||
|
||||
} // end namespace Eigen
|
||||
|
@ -274,7 +274,7 @@ struct TensorEvaluator<const TensorFFTOp<FFT, ArgType, FFTResultType, FFTDir>, D
|
||||
}
|
||||
}
|
||||
|
||||
// processs the line
|
||||
// process the line
|
||||
if (is_power_of_two) {
|
||||
processDataLineCooleyTukey(line_buf, line_len, log_len);
|
||||
}
|
||||
|
@ -12,7 +12,7 @@
|
||||
|
||||
namespace Eigen {
|
||||
|
||||
// MakePointer class is used as a container of the adress space of the pointer
|
||||
// MakePointer class is used as a container of the address space of the pointer
|
||||
// on the host and on the device. From the host side it generates the T* pointer
|
||||
// and when EIGEN_USE_SYCL is used it construct a buffer with a map_allocator to
|
||||
// T* m_data on the host. It is always called on the device.
|
||||
|
@ -272,8 +272,8 @@ struct TensorEvaluator<const TensorImagePatchOp<Rows, Cols, ArgType>, Device>
|
||||
break;
|
||||
default:
|
||||
eigen_assert(false && "unexpected padding");
|
||||
m_outputCols=0; // silence the uninitialised warnig;
|
||||
m_outputRows=0; //// silence the uninitialised warnig;
|
||||
m_outputCols=0; // silence the uninitialised warning;
|
||||
m_outputRows=0; //// silence the uninitialised warning;
|
||||
}
|
||||
}
|
||||
eigen_assert(m_outputRows > 0);
|
||||
|
@ -167,7 +167,7 @@ struct TensorIntDivisor {
|
||||
shift2 = log_div > 1 ? log_div-1 : 0;
|
||||
}
|
||||
|
||||
// Must have 0 <= numerator. On platforms that dont support the __uint128_t
|
||||
// Must have 0 <= numerator. On platforms that don't support the __uint128_t
|
||||
// type numerator should also be less than 2^32-1.
|
||||
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T divide(const T numerator) const {
|
||||
eigen_assert(static_cast<typename UnsignedTraits<T>::type>(numerator) < NumTraits<UnsignedType>::highest()/2);
|
||||
|
@ -106,7 +106,7 @@ struct FullReducer<Self, Op, const Eigen::SyclDevice, Vectorizable> {
|
||||
/// if the shared memory is less than the GRange, we set shared_mem size to the TotalSize and in this case one kernel would be created for recursion to reduce all to one.
|
||||
if (GRange < outTileSize) outTileSize=GRange;
|
||||
/// creating the shared memory for calculating reduction.
|
||||
/// This one is used to collect all the reduced value of shared memory as we dont have global barrier on GPU. Once it is saved we can
|
||||
/// This one is used to collect all the reduced value of shared memory as we don't have global barrier on GPU. Once it is saved we can
|
||||
/// recursively apply reduction on it in order to reduce the whole.
|
||||
auto temp_global_buffer =cl::sycl::buffer<CoeffReturnType, 1>(cl::sycl::range<1>(GRange));
|
||||
typedef typename Eigen::internal::remove_all<decltype(self.xprDims())>::type Dims;
|
||||
@ -150,7 +150,7 @@ struct InnerReducer<Self, Op, const Eigen::SyclDevice> {
|
||||
|
||||
// getting final out buffer at the moment the created buffer is true because there is no need for assign
|
||||
/// creating the shared memory for calculating reduction.
|
||||
/// This one is used to collect all the reduced value of shared memory as we dont have global barrier on GPU. Once it is saved we can
|
||||
/// This one is used to collect all the reduced value of shared memory as we don't have global barrier on GPU. Once it is saved we can
|
||||
/// recursively apply reduction on it in order to reduce the whole.
|
||||
dev.parallel_for_setup(num_coeffs_to_preserve, tileSize, range, GRange);
|
||||
dev.sycl_queue().submit([&](cl::sycl::handler &cgh) {
|
||||
|
@ -31,7 +31,7 @@ class TensorLazyBaseEvaluator {
|
||||
int refCount() const { return m_refcount; }
|
||||
|
||||
private:
|
||||
// No copy, no assigment;
|
||||
// No copy, no assignment;
|
||||
TensorLazyBaseEvaluator(const TensorLazyBaseEvaluator& other);
|
||||
TensorLazyBaseEvaluator& operator = (const TensorLazyBaseEvaluator& other);
|
||||
|
||||
|
@ -117,7 +117,7 @@ SYCLEXTRFUNCTERNARY()
|
||||
|
||||
|
||||
|
||||
//TensorCustomOp must be specialised otherewise it will be captured by UnaryCategory while its action is different
|
||||
//TensorCustomOp must be specialised otherwise it will be captured by UnaryCategory while its action is different
|
||||
//from the UnaryCategory and it is similar to the general FunctorExtractor.
|
||||
/// specialisation of TensorCustomOp
|
||||
#define SYCLEXTRFUNCCUSTOMUNARYOP(CVQual)\
|
||||
|
@ -80,7 +80,7 @@ template < typename HostExpr, typename FunctorExpr, typename Tuple_of_Acc, typen
|
||||
typedef typename ConvertToDeviceExpression<const HostExpr>::Type DevExpr;
|
||||
auto device_expr = createDeviceExpression<DevExpr, PlaceHolderExpr>(functors, tuple_of_accessors);
|
||||
/// reduction cannot be captured automatically through our device conversion recursion. The reason is that reduction has two behaviour
|
||||
/// the first behaviour is when it is used as a root to lauch the sub-kernel. The second one is when it is treated as a leafnode to pass the
|
||||
/// the first behaviour is when it is used as a root to launch the sub-kernel. The second one is when it is treated as a leafnode to pass the
|
||||
/// calculated result to its parent kernel. While the latter is automatically detected through our device expression generator. The former is created here.
|
||||
const auto device_self_expr= Eigen::TensorReductionOp<Op, Dims, decltype(device_expr.expr) ,MakeGlobalPointer>(device_expr.expr, dims, functor);
|
||||
/// This is the evaluator for device_self_expr. This is exactly similar to the self which has been passed to run function. The difference is
|
||||
@ -121,7 +121,7 @@ class ReductionFunctor<HostExpr, FunctorExpr, Tuple_of_Acc, Dims, Eigen::interna
|
||||
typedef typename ConvertToDeviceExpression<const HostExpr>::Type DevExpr;
|
||||
auto device_expr = createDeviceExpression<DevExpr, PlaceHolderExpr>(functors, tuple_of_accessors);
|
||||
/// reduction cannot be captured automatically through our device conversion recursion. The reason is that reduction has two behaviour
|
||||
/// the first behaviour is when it is used as a root to lauch the sub-kernel. The second one is when it is treated as a leafnode to pass the
|
||||
/// the first behaviour is when it is used as a root to launch the sub-kernel. The second one is when it is treated as a leafnode to pass the
|
||||
/// calculated result to its parent kernel. While the latter is automatically detected through our device expression generator. The former is created here.
|
||||
const auto device_self_expr= Eigen::TensorReductionOp<Op, Dims, decltype(device_expr.expr) ,MakeGlobalPointer>(device_expr.expr, dims, functor);
|
||||
/// This is the evaluator for device_self_expr. This is exactly similar to the self which has been passed to run function. The difference is
|
||||
@ -168,7 +168,7 @@ public:
|
||||
typedef typename TensorSycl::internal::ConvertToDeviceExpression<const HostExpr>::Type DevExpr;
|
||||
auto device_expr = TensorSycl::internal::createDeviceExpression<DevExpr, PlaceHolderExpr>(functors, tuple_of_accessors);
|
||||
/// reduction cannot be captured automatically through our device conversion recursion. The reason is that reduction has two behaviour
|
||||
/// the first behaviour is when it is used as a root to lauch the sub-kernel. The second one is when it is treated as a leafnode to pass the
|
||||
/// the first behaviour is when it is used as a root to launch the sub-kernel. The second one is when it is treated as a leafnode to pass the
|
||||
/// calculated result to its parent kernel. While the latter is automatically detected through our device expression generator. The former is created here.
|
||||
const auto device_self_expr= Eigen::TensorReductionOp<Op, Dims, decltype(device_expr.expr) ,MakeGlobalPointer>(device_expr.expr, dims, op);
|
||||
/// This is the evaluator for device_self_expr. This is exactly similar to the self which has been passed to run function. The difference is
|
||||
@ -215,7 +215,7 @@ public:
|
||||
typedef typename TensorSycl::internal::ConvertToDeviceExpression<const HostExpr>::Type DevExpr;
|
||||
auto device_expr = TensorSycl::internal::createDeviceExpression<DevExpr, PlaceHolderExpr>(functors, tuple_of_accessors);
|
||||
/// reduction cannot be captured automatically through our device conversion recursion. The reason is that reduction has two behaviour
|
||||
/// the first behaviour is when it is used as a root to lauch the sub-kernel. The second one is when it is treated as a leafnode to pass the
|
||||
/// the first behaviour is when it is used as a root to launch the sub-kernel. The second one is when it is treated as a leafnode to pass the
|
||||
/// calculated result to its parent kernel. While the latter is automatically detected through our device expression generator. The former is created here.
|
||||
const auto device_self_expr= Eigen::TensorReductionOp<Op, Dims, decltype(device_expr.expr) ,MakeGlobalPointer>(device_expr.expr, dims, op);
|
||||
/// This is the evaluator for device_self_expr. This is exactly similar to the self which has been passed to run function. The difference is
|
||||
|
@ -143,7 +143,7 @@ struct IndexList {};
|
||||
/// \brief Collects internal details for generating index ranges [MIN, MAX)
|
||||
/// Declare primary template for index range builder
|
||||
/// \tparam MIN is the starting index in the tuple
|
||||
/// \tparam N represents sizeof..(elemens)- sizeof...(Is)
|
||||
/// \tparam N represents sizeof..(elements)- sizeof...(Is)
|
||||
/// \tparam Is... are the list of generated index so far
|
||||
template <size_t MIN, size_t N, size_t... Is>
|
||||
struct RangeBuilder;
|
||||
@ -161,7 +161,7 @@ struct RangeBuilder<MIN, MIN, Is...> {
|
||||
/// in this case we are recursively subtracting N by one and adding one
|
||||
/// index to Is... list until MIN==N
|
||||
/// \tparam MIN is the starting index in the tuple
|
||||
/// \tparam N represents sizeof..(elemens)- sizeof...(Is)
|
||||
/// \tparam N represents sizeof..(elements)- sizeof...(Is)
|
||||
/// \tparam Is... are the list of generated index so far
|
||||
template <size_t MIN, size_t N, size_t... Is>
|
||||
struct RangeBuilder : public RangeBuilder<MIN, N - 1, N - 1, Is...> {};
|
||||
|
@ -568,7 +568,7 @@ struct TensorEvaluator<const TensorVolumePatchOp<Planes, Rows, Cols, ArgType>, D
|
||||
|
||||
Dimensions m_dimensions;
|
||||
|
||||
// Parameters passed to the costructor.
|
||||
// Parameters passed to the constructor.
|
||||
Index m_plane_strides;
|
||||
Index m_row_strides;
|
||||
Index m_col_strides;
|
||||
|
@ -241,7 +241,7 @@ struct dimino_first_step_elements
|
||||
* multiplying all elements in the given subgroup with the new
|
||||
* coset representative. Note that the first element of the
|
||||
* subgroup is always the identity element, so the first element of
|
||||
* ther result of this template is going to be the coset
|
||||
* the result of this template is going to be the coset
|
||||
* representative itself.
|
||||
*
|
||||
* Note that this template accepts an additional boolean parameter
|
||||
|
@ -33,10 +33,10 @@ namespace Eigen {
|
||||
// ec.Notify(true);
|
||||
//
|
||||
// Notify is cheap if there are no waiting threads. Prewait/CommitWait are not
|
||||
// cheap, but they are executed only if the preceeding predicate check has
|
||||
// cheap, but they are executed only if the preceding predicate check has
|
||||
// failed.
|
||||
//
|
||||
// Algorihtm outline:
|
||||
// Algorithm outline:
|
||||
// There are two main variables: predicate (managed by user) and state_.
|
||||
// Operation closely resembles Dekker mutual algorithm:
|
||||
// https://en.wikipedia.org/wiki/Dekker%27s_algorithm
|
||||
@ -79,7 +79,7 @@ class EventCount {
|
||||
uint64_t state = state_.load(std::memory_order_seq_cst);
|
||||
for (;;) {
|
||||
if (int64_t((state & kEpochMask) - epoch) < 0) {
|
||||
// The preceeding waiter has not decided on its fate. Wait until it
|
||||
// The preceding waiter has not decided on its fate. Wait until it
|
||||
// calls either CancelWait or CommitWait, or is notified.
|
||||
EIGEN_THREAD_YIELD();
|
||||
state = state_.load(std::memory_order_seq_cst);
|
||||
@ -110,7 +110,7 @@ class EventCount {
|
||||
uint64_t state = state_.load(std::memory_order_relaxed);
|
||||
for (;;) {
|
||||
if (int64_t((state & kEpochMask) - epoch) < 0) {
|
||||
// The preceeding waiter has not decided on its fate. Wait until it
|
||||
// The preceding waiter has not decided on its fate. Wait until it
|
||||
// calls either CancelWait or CommitWait, or is notified.
|
||||
EIGEN_THREAD_YIELD();
|
||||
state = state_.load(std::memory_order_relaxed);
|
||||
|
@ -198,7 +198,7 @@ class RunQueue {
|
||||
};
|
||||
std::mutex mutex_;
|
||||
// Low log(kSize) + 1 bits in front_ and back_ contain rolling index of
|
||||
// front/back, repsectively. The remaining bits contain modification counters
|
||||
// front/back, respectively. The remaining bits contain modification counters
|
||||
// that are incremented on Push operations. This allows us to (1) distinguish
|
||||
// between empty and full conditions (if we would use log(kSize) bits for
|
||||
// position, these conditions would be indistinguishable); (2) obtain
|
||||
|
@ -219,7 +219,7 @@ template<class T, std::size_t N> struct array_size<const array<T,N>& > {
|
||||
|
||||
#else
|
||||
|
||||
// The compiler supports c++11, and we're not targetting cuda: use std::array as Eigen::array
|
||||
// The compiler supports c++11, and we're not targeting cuda: use std::array as Eigen::array
|
||||
#include <array>
|
||||
namespace Eigen {
|
||||
|
||||
|
@ -35,7 +35,7 @@
|
||||
* a zero for the system (Powell hybrid "dogleg" method).
|
||||
*
|
||||
* This code is a port of minpack (http://en.wikipedia.org/wiki/MINPACK).
|
||||
* Minpack is a very famous, old, robust and well-reknown package, written in
|
||||
* Minpack is a very famous, old, robust and well renowned package, written in
|
||||
* fortran. Those implementations have been carefully tuned, tested, and used
|
||||
* for several decades.
|
||||
*
|
||||
@ -63,7 +63,7 @@
|
||||
* Other tests were added by myself at the very beginning of the
|
||||
* process and check the results for levenberg-marquardt using the reference data
|
||||
* on http://www.itl.nist.gov/div898/strd/nls/nls_main.shtml. Since then i've
|
||||
* carefully checked that the same results were obtained when modifiying the
|
||||
* carefully checked that the same results were obtained when modifying the
|
||||
* code. Please note that we do not always get the exact same decimals as they do,
|
||||
* but this is ok : they use 128bits float, and we do the tests using the C type 'double',
|
||||
* which is 64 bits on most platforms (x86 and amd64, at least).
|
||||
|
@ -25,7 +25,7 @@ namespace Eigen {
|
||||
*
|
||||
* This module provides wrapper functions for a couple of OpenGL functions
|
||||
* which simplify the way to pass Eigen's object to openGL.
|
||||
* Here is an exmaple:
|
||||
* Here is an example:
|
||||
*
|
||||
* \code
|
||||
* // You need to add path_to_eigen/unsupported to your include path.
|
||||
|
@ -170,7 +170,7 @@ private:
|
||||
typedef internal::vector_int_pair<Scalar, Dim> VIPair;
|
||||
typedef std::vector<VIPair, aligned_allocator<VIPair> > VIPairList;
|
||||
typedef Matrix<Scalar, Dim, 1> VectorType;
|
||||
struct VectorComparator //compares vectors, or, more specificall, VIPairs along a particular dimension
|
||||
struct VectorComparator //compares vectors, or more specifically, VIPairs along a particular dimension
|
||||
{
|
||||
VectorComparator(int inDim) : dim(inDim) {}
|
||||
inline bool operator()(const VIPair &v1, const VIPair &v2) const { return v1.first[dim] < v2.first[dim]; }
|
||||
|
@ -300,7 +300,7 @@ public:
|
||||
|
||||
/** \brief Reports whether previous computation was successful.
|
||||
*
|
||||
* \returns \c Success if computation was succesful, \c NoConvergence otherwise.
|
||||
* \returns \c Success if computation was successful, \c NoConvergence otherwise.
|
||||
*/
|
||||
ComputationInfo info() const
|
||||
{
|
||||
|
@ -12,7 +12,7 @@
|
||||
|
||||
namespace Eigen
|
||||
{
|
||||
// Forward declerations
|
||||
// Forward declarations
|
||||
template <typename _Scalar, class _System>
|
||||
class EulerAngles;
|
||||
|
||||
|
@ -99,7 +99,7 @@ void pseudo_inverse(const CMatrix &C, CINVMatrix &CINV)
|
||||
/** \ingroup IterativeSolvers_Module
|
||||
* Constrained conjugate gradient
|
||||
*
|
||||
* Computes the minimum of \f$ 1/2((Ax).x) - bx \f$ under the contraint \f$ Cx \le f \f$
|
||||
* Computes the minimum of \f$ 1/2((Ax).x) - bx \f$ under the constraint \f$ Cx \le f \f$
|
||||
*/
|
||||
template<typename TMatrix, typename CMatrix,
|
||||
typename VectorX, typename VectorB, typename VectorF>
|
||||
|
@ -214,7 +214,7 @@ class DGMRES : public IterativeSolverBase<DGMRES<_MatrixType,_Preconditioner> >
|
||||
void dgmresInitDeflation(Index& rows) const;
|
||||
mutable DenseMatrix m_V; // Krylov basis vectors
|
||||
mutable DenseMatrix m_H; // Hessenberg matrix
|
||||
mutable DenseMatrix m_Hes; // Initial hessenberg matrix wihout Givens rotations applied
|
||||
mutable DenseMatrix m_Hes; // Initial hessenberg matrix without Givens rotations applied
|
||||
mutable Index m_restart; // Maximum size of the Krylov subspace
|
||||
mutable DenseMatrix m_U; // Vectors that form the basis of the invariant subspace
|
||||
mutable DenseMatrix m_MU; // matrix operator applied to m_U (for next cycles)
|
||||
@ -250,7 +250,7 @@ void DGMRES<_MatrixType, _Preconditioner>::dgmres(const MatrixType& mat,const Rh
|
||||
m_H.resize(m_restart+1, m_restart);
|
||||
m_Hes.resize(m_restart, m_restart);
|
||||
m_V.resize(n,m_restart+1);
|
||||
//Initial residual vector and intial norm
|
||||
//Initial residual vector and initial norm
|
||||
x = precond.solve(x);
|
||||
r0 = rhs - mat * x;
|
||||
RealScalar beta = r0.norm();
|
||||
|
@ -73,7 +73,7 @@ void lmqrsolv(
|
||||
qtbpj = -givens.s() * wa[k] + givens.c() * qtbpj;
|
||||
wa[k] = temp;
|
||||
|
||||
/* accumulate the tranformation in the row of s. */
|
||||
/* accumulate the transformation in the row of s. */
|
||||
for (i = k+1; i<n; ++i) {
|
||||
temp = givens.c() * s(i,k) + givens.s() * sdiag[i];
|
||||
sdiag[i] = -givens.s() * s(i,k) + givens.c() * sdiag[i];
|
||||
|
@ -233,9 +233,9 @@ class LevenbergMarquardt : internal::no_assignment_operator
|
||||
|
||||
/**
|
||||
* \brief Reports whether the minimization was successful
|
||||
* \returns \c Success if the minimization was succesful,
|
||||
* \returns \c Success if the minimization was successful,
|
||||
* \c NumericalIssue if a numerical problem arises during the
|
||||
* minimization process, for exemple during the QR factorization
|
||||
* minimization process, for example during the QR factorization
|
||||
* \c NoConvergence if the minimization did not converge after
|
||||
* the maximum number of function evaluation allowed
|
||||
* \c InvalidInput if the input matrix is invalid
|
||||
|
@ -313,7 +313,7 @@ struct matrix_exp_computeUV<MatrixType, long double>
|
||||
matrix_exp_pade17(A, U, V);
|
||||
}
|
||||
|
||||
#elif LDBL_MANT_DIG <= 112 // quadruple precison
|
||||
#elif LDBL_MANT_DIG <= 112 // quadruple precision
|
||||
|
||||
if (l1norm < 1.639394610288918690547467954466970e-005L) {
|
||||
matrix_exp_pade3(arg, U, V);
|
||||
|
@ -81,7 +81,7 @@ class MatrixPowerParenthesesReturnValue : public ReturnByValue< MatrixPowerParen
|
||||
*
|
||||
* \note Currently this class is only used by MatrixPower. One may
|
||||
* insist that this be nested into MatrixPower. This class is here to
|
||||
* faciliate future development of triangular matrix functions.
|
||||
* facilitate future development of triangular matrix functions.
|
||||
*/
|
||||
template<typename MatrixType>
|
||||
class MatrixPowerAtomic : internal::noncopyable
|
||||
|
@ -61,7 +61,7 @@ void qrsolv(
|
||||
qtbpj = -givens.s() * wa[k] + givens.c() * qtbpj;
|
||||
wa[k] = temp;
|
||||
|
||||
/* accumulate the tranformation in the row of s. */
|
||||
/* accumulate the transformation in the row of s. */
|
||||
for (i = k+1; i<n; ++i) {
|
||||
temp = givens.c() * s(i,k) + givens.s() * sdiag[i];
|
||||
sdiag[i] = -givens.s() * s(i,k) + givens.c() * sdiag[i];
|
||||
|
@ -22,7 +22,7 @@ void r1updt(
|
||||
Scalar temp;
|
||||
JacobiRotation<Scalar> givens;
|
||||
|
||||
// r1updt had a broader usecase, but we dont use it here. And, more
|
||||
// r1updt had a broader usecase, but we don't use it here. And, more
|
||||
// importantly, we can not test it.
|
||||
eigen_assert(m==n);
|
||||
eigen_assert(u.size()==m);
|
||||
|
@ -104,7 +104,7 @@ class companion
|
||||
/** Helper function for the balancing algorithm.
|
||||
* \returns true if the row and the column, having colNorm and rowNorm
|
||||
* as norms, are balanced, false otherwise.
|
||||
* colB and rowB are repectively the multipliers for
|
||||
* colB and rowB are respectively the multipliers for
|
||||
* the column and the row in order to balance them.
|
||||
* */
|
||||
bool balanced( RealScalar colNorm, RealScalar rowNorm,
|
||||
@ -113,7 +113,7 @@ class companion
|
||||
/** Helper function for the balancing algorithm.
|
||||
* \returns true if the row and the column, having colNorm and rowNorm
|
||||
* as norms, are balanced, false otherwise.
|
||||
* colB and rowB are repectively the multipliers for
|
||||
* colB and rowB are respectively the multipliers for
|
||||
* the column and the row in order to balance them.
|
||||
* */
|
||||
bool balancedR( RealScalar colNorm, RealScalar rowNorm,
|
||||
|
@ -41,7 +41,7 @@ public:
|
||||
|
||||
/** Sets the relative threshold value used to prune zero coefficients during the decomposition.
|
||||
*
|
||||
* Setting a value greater than zero speeds up computation, and yields to an imcomplete
|
||||
* Setting a value greater than zero speeds up computation, and yields to an incomplete
|
||||
* factorization with fewer non zero coefficients. Such approximate factors are especially
|
||||
* useful to initialize an iterative solver.
|
||||
*
|
||||
|
@ -206,26 +206,26 @@ public:
|
||||
if (col > row) //upper matrix
|
||||
{
|
||||
const Index minOuterIndex = inner - m_data.upperProfile(inner);
|
||||
eigen_assert(outer >= minOuterIndex && "you try to acces a coeff that do not exist in the storage");
|
||||
eigen_assert(outer >= minOuterIndex && "You tried to access a coeff that does not exist in the storage");
|
||||
return this->m_data.upper(m_colStartIndex[inner] + outer - (inner - m_data.upperProfile(inner)));
|
||||
}
|
||||
if (col < row) //lower matrix
|
||||
{
|
||||
const Index minInnerIndex = outer - m_data.lowerProfile(outer);
|
||||
eigen_assert(inner >= minInnerIndex && "you try to acces a coeff that do not exist in the storage");
|
||||
eigen_assert(inner >= minInnerIndex && "You tried to access a coeff that does not exist in the storage");
|
||||
return this->m_data.lower(m_rowStartIndex[outer] + inner - (outer - m_data.lowerProfile(outer)));
|
||||
}
|
||||
} else {
|
||||
if (outer > inner) //upper matrix
|
||||
{
|
||||
const Index maxOuterIndex = inner + m_data.upperProfile(inner);
|
||||
eigen_assert(outer <= maxOuterIndex && "you try to acces a coeff that do not exist in the storage");
|
||||
eigen_assert(outer <= maxOuterIndex && "You tried to access a coeff that does not exist in the storage");
|
||||
return this->m_data.upper(m_colStartIndex[inner] + (outer - inner));
|
||||
}
|
||||
if (outer < inner) //lower matrix
|
||||
{
|
||||
const Index maxInnerIndex = outer + m_data.lowerProfile(outer);
|
||||
eigen_assert(inner <= maxInnerIndex && "you try to acces a coeff that do not exist in the storage");
|
||||
eigen_assert(inner <= maxInnerIndex && "You tried to access a coeff that does not exist in the storage");
|
||||
return this->m_data.lower(m_rowStartIndex[outer] + (inner - outer));
|
||||
}
|
||||
}
|
||||
@ -300,11 +300,11 @@ public:
|
||||
|
||||
if (IsRowMajor) {
|
||||
const Index minInnerIndex = outer - m_data.lowerProfile(outer);
|
||||
eigen_assert(inner >= minInnerIndex && "you try to acces a coeff that do not exist in the storage");
|
||||
eigen_assert(inner >= minInnerIndex && "You tried to access a coeff that does not exist in the storage");
|
||||
return this->m_data.lower(m_rowStartIndex[outer] + inner - (outer - m_data.lowerProfile(outer)));
|
||||
} else {
|
||||
const Index maxInnerIndex = outer + m_data.lowerProfile(outer);
|
||||
eigen_assert(inner <= maxInnerIndex && "you try to acces a coeff that do not exist in the storage");
|
||||
eigen_assert(inner <= maxInnerIndex && "You tried to access a coeff that does not exist in the storage");
|
||||
return this->m_data.lower(m_rowStartIndex[outer] + (inner - outer));
|
||||
}
|
||||
}
|
||||
@ -336,11 +336,11 @@ public:
|
||||
|
||||
if (IsRowMajor) {
|
||||
const Index minOuterIndex = inner - m_data.upperProfile(inner);
|
||||
eigen_assert(outer >= minOuterIndex && "you try to acces a coeff that do not exist in the storage");
|
||||
eigen_assert(outer >= minOuterIndex && "You tried to access a coeff that does not exist in the storage");
|
||||
return this->m_data.upper(m_colStartIndex[inner] + outer - (inner - m_data.upperProfile(inner)));
|
||||
} else {
|
||||
const Index maxOuterIndex = inner + m_data.upperProfile(inner);
|
||||
eigen_assert(outer <= maxOuterIndex && "you try to acces a coeff that do not exist in the storage");
|
||||
eigen_assert(outer <= maxOuterIndex && "You tried to access a coeff that does not exist in the storage");
|
||||
return this->m_data.upper(m_colStartIndex[inner] + (outer - inner));
|
||||
}
|
||||
}
|
||||
|
@ -187,7 +187,7 @@ template<typename _Scalar, int _Options, typename _StorageIndex>
|
||||
/** Does nothing: provided for compatibility with SparseMatrix */
|
||||
inline void finalize() {}
|
||||
|
||||
/** Suppress all nonzeros which are smaller than \a reference under the tolerence \a epsilon */
|
||||
/** Suppress all nonzeros which are smaller than \a reference under the tolerance \a epsilon */
|
||||
void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
|
||||
{
|
||||
for (Index j=0; j<outerSize(); ++j)
|
||||
@ -224,21 +224,21 @@ template<typename _Scalar, int _Options, typename _StorageIndex>
|
||||
}
|
||||
}
|
||||
|
||||
/** The class DynamicSparseMatrix is deprectaed */
|
||||
/** The class DynamicSparseMatrix is deprecated */
|
||||
EIGEN_DEPRECATED inline DynamicSparseMatrix()
|
||||
: m_innerSize(0), m_data(0)
|
||||
{
|
||||
eigen_assert(innerSize()==0 && outerSize()==0);
|
||||
}
|
||||
|
||||
/** The class DynamicSparseMatrix is deprectaed */
|
||||
/** The class DynamicSparseMatrix is deprecated */
|
||||
EIGEN_DEPRECATED inline DynamicSparseMatrix(Index rows, Index cols)
|
||||
: m_innerSize(0)
|
||||
{
|
||||
resize(rows, cols);
|
||||
}
|
||||
|
||||
/** The class DynamicSparseMatrix is deprectaed */
|
||||
/** The class DynamicSparseMatrix is deprecated */
|
||||
template<typename OtherDerived>
|
||||
EIGEN_DEPRECATED explicit inline DynamicSparseMatrix(const SparseMatrixBase<OtherDerived>& other)
|
||||
: m_innerSize(0)
|
||||
|
@ -104,7 +104,7 @@ namespace internal
|
||||
out << value.real << " " << value.imag()<< "\n";
|
||||
}
|
||||
|
||||
} // end namepsace internal
|
||||
} // end namespace internal
|
||||
|
||||
inline bool getMarketHeader(const std::string& filename, int& sym, bool& iscomplex, bool& isvector)
|
||||
{
|
||||
|
@ -181,7 +181,7 @@ namespace Eigen
|
||||
* \ingroup Splines_Module
|
||||
*
|
||||
* \param[in] pts The data points to which a spline should be fit.
|
||||
* \param[out] chord_lengths The resulting chord lenggth vector.
|
||||
* \param[out] chord_lengths The resulting chord length vector.
|
||||
*
|
||||
* \sa Les Piegl and Wayne Tiller, The NURBS book (2nd ed.), 1997, 9.2.1 Global Curve Interpolation to Point Data
|
||||
**/
|
||||
|
@ -20,7 +20,7 @@ However, it:
|
||||
- must rely on Eigen,
|
||||
- must be highly related to math,
|
||||
- should have some general purpose in the sense that it could
|
||||
potentially become an offical Eigen module (or be merged into another one).
|
||||
potentially become an official Eigen module (or be merged into another one).
|
||||
|
||||
In doubt feel free to contact us. For instance, if your addons is very too specific
|
||||
but it shows an interesting way of using Eigen, then it could be a nice demo.
|
||||
|
@ -70,7 +70,7 @@ void bench_svd(const MatrixType& a = MatrixType())
|
||||
std::cout<< std::endl;
|
||||
timerJacobi.reset();
|
||||
timerBDC.reset();
|
||||
cout << " Computes rotaion matrix" <<endl;
|
||||
cout << " Computes rotation matrix" <<endl;
|
||||
for (int k=1; k<=NUMBER_SAMPLE; ++k)
|
||||
{
|
||||
timerBDC.start();
|
||||
|
@ -1,5 +1,5 @@
|
||||
# generate split test header file only if it does not yet exist
|
||||
# in order to prevent a rebuild everytime cmake is configured
|
||||
# in order to prevent a rebuild every time cmake is configured
|
||||
if(NOT EXISTS ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h)
|
||||
file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/split_test_helper.h "")
|
||||
foreach(i RANGE 1 999)
|
||||
|
@ -81,7 +81,7 @@ void check_limits_specialization()
|
||||
typedef std::numeric_limits<AD> A;
|
||||
typedef std::numeric_limits<Scalar> B;
|
||||
|
||||
// workaround "unsed typedef" warning:
|
||||
// workaround "unused typedef" warning:
|
||||
VERIFY(!bool(internal::is_same<B, A>::value));
|
||||
|
||||
#if EIGEN_HAS_CXX11
|
||||
|
@ -22,10 +22,10 @@
|
||||
|
||||
using Eigen::Tensor;
|
||||
|
||||
// Inflation Defenition for each dimention the inflated val would be
|
||||
// Inflation Definition for each dimension the inflated val would be
|
||||
//((dim-1)*strid[dim] +1)
|
||||
|
||||
// for 1 dimnention vector of size 3 with value (4,4,4) with the inflated stride value of 3 would be changed to
|
||||
// for 1 dimension vector of size 3 with value (4,4,4) with the inflated stride value of 3 would be changed to
|
||||
// tensor of size (2*3) +1 = 7 with the value of
|
||||
// (4, 0, 0, 4, 0, 0, 4).
|
||||
|
||||
|
@ -247,7 +247,7 @@ void test_cuda_trancendental() {
|
||||
}
|
||||
for (int i = 0; i < num_elem; ++i) {
|
||||
std::cout << "Checking elemwise log " << i << " input = " << input2(i) << " full = " << full_prec2(i) << " half = " << half_prec2(i) << std::endl;
|
||||
if(std::abs(input2(i)-1.f)<0.05f) // log lacks accurary nearby 1
|
||||
if(std::abs(input2(i)-1.f)<0.05f) // log lacks accuracy nearby 1
|
||||
VERIFY_IS_APPROX(full_prec2(i)+Eigen::half(0.1f), half_prec2(i)+Eigen::half(0.1f));
|
||||
else
|
||||
VERIFY_IS_APPROX(full_prec2(i), half_prec2(i));
|
||||
|
@ -37,7 +37,7 @@ void test_cuda_random_uniform()
|
||||
assert(cudaMemcpyAsync(out.data(), d_out, out_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess);
|
||||
assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess);
|
||||
|
||||
// For now we just check thes code doesn't crash.
|
||||
// For now we just check this code doesn't crash.
|
||||
// TODO: come up with a valid test of randomness
|
||||
}
|
||||
|
||||
|
@ -132,7 +132,7 @@ void test_forward_adolc()
|
||||
}
|
||||
|
||||
{
|
||||
// simple instanciation tests
|
||||
// simple instantiation tests
|
||||
Matrix<adtl::adouble,2,1> x;
|
||||
foo(x);
|
||||
Matrix<adtl::adouble,Dynamic,Dynamic> A(4,4);;
|
||||
|
@ -8,7 +8,7 @@
|
||||
// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
|
||||
// import basic and product tests for deprectaed DynamicSparseMatrix
|
||||
// import basic and product tests for deprecated DynamicSparseMatrix
|
||||
#define EIGEN_NO_DEPRECATED_WARNING
|
||||
#include "sparse_basic.cpp"
|
||||
#include "sparse_product.cpp"
|
||||
|
Loading…
x
Reference in New Issue
Block a user