From ecdd29e44ba3649f692a00a937893c5580fb5284 Mon Sep 17 00:00:00 2001 From: Will Brennan Date: Fri, 5 May 2017 18:47:59 +0100 Subject: [PATCH] fixed spelling mistakes in comments (#104) --- ...ediction_scheme_constrained_multi_parallelogram.h | 2 +- .../mesh_prediction_scheme_parallelogram.h | 2 +- .../mesh_prediction_scheme_tex_coords.h | 4 ++-- .../prediction_scheme_decoder_factory.h | 2 +- .../prediction_scheme_encoder_factory.h | 2 +- ...cheme_normal_octahedron_canonicalized_transform.h | 6 +++--- .../prediction_scheme_normal_octahedron_transform.h | 4 ++-- .../prediction_schemes/prediction_scheme_transform.h | 2 +- compression/config/encoder_options.h | 4 ++-- compression/encode.h | 4 ++-- compression/mesh/mesh_edgebreaker_decoder_impl.cc | 6 +++--- compression/mesh/mesh_edgebreaker_decoder_impl.h | 2 +- compression/mesh/mesh_edgebreaker_encoder_impl.cc | 2 +- compression/mesh/mesh_edgebreaker_shared.h | 10 +++++----- .../mesh_edgebreaker_traversal_predictive_encoder.h | 6 +++--- .../mesh_edgebreaker_traversal_valence_encoder.h | 2 +- compression/point_cloud/point_cloud_decoder.h | 4 ++-- compression/point_cloud/point_cloud_encoder.cc | 2 +- compression/point_cloud/point_cloud_encoder.h | 2 +- core/adaptive_rans_bit_coding_shared.h | 2 +- core/adaptive_rans_bit_decoder.h | 2 +- core/adaptive_rans_bit_encoder.cc | 2 +- core/bit_utils.h | 2 +- core/decoder_buffer.h | 6 +++--- core/direct_bit_decoder.h | 2 +- core/draco_index_type.h | 4 ++-- core/encoder_buffer.h | 2 +- core/rans_bit_decoder.h | 2 +- core/symbol_bit_decoder.h | 2 +- core/vector_d.h | 2 +- io/obj_decoder.cc | 2 +- io/obj_decoder.h | 2 +- io/obj_encoder.cc | 2 +- io/ply_encoder.cc | 2 +- mesh/corner_table.h | 12 ++++++------ mesh/corner_table_traversal_processor.h | 2 +- mesh/edgebreaker_traverser.h | 2 +- mesh/mesh.h | 2 +- mesh/mesh_are_equivalent.cc | 2 +- mesh/mesh_attribute_corner_table.h | 4 ++-- mesh/mesh_cleanup.cc | 2 +- point_cloud/geometry_attribute.h | 10 +++++----- point_cloud/point_attribute.cc | 2 +- point_cloud/point_cloud.cc | 2 +- point_cloud/point_cloud_builder_test.cc | 2 +- 45 files changed, 73 insertions(+), 73 deletions(-) diff --git a/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram.h b/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram.h index 6acb3a2..1b1dd64 100644 --- a/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram.h +++ b/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram.h @@ -123,7 +123,7 @@ bool MeshPredictionSchemeConstrainedMultiParallelogram< std::vector predicted_value; }; - // Bit-field used for computing permutations of exlcluded edges + // Bit-field used for computing permutations of excluded edges // (parallelograms). bool exluded_parallelograms[kMaxNumParallelograms]; diff --git a/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram.h b/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram.h index b3a91e9..0437d07 100644 --- a/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram.h +++ b/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram.h @@ -23,7 +23,7 @@ namespace draco { // Parallelogram prediction predicts an attribute value V from three vertices // on the opposite face to the predicted vertex. The values on the three // vertices are used to construct a parallelogram V' = O - A - B, where O is the -// value on the oppoiste vertex, and A, B are values on the shared vertices: +// value on the opposite vertex, and A, B are values on the shared vertices: // V // / \ // / \ diff --git a/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords.h b/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords.h index d2eca2c..5567ca8 100644 --- a/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords.h +++ b/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords.h @@ -29,7 +29,7 @@ typedef RAnsBitDecoder BinaryDecoder; typedef RAnsBitEncoder BinaryEncoder; // Prediction scheme designed for predicting texture coordinates from known -// spatial position of vertices. For good parameterizations, the ratios between +// spatial position of vertices. For good parametrization, the ratios between // triangle edge lengths should be about the same in both the spatial and UV // coordinate spaces, which makes the positions a good predictor for the UV // coordinates. @@ -243,7 +243,7 @@ void MeshPredictionSchemeTexCoords:: const Vector3f prev_pos = GetPositionForEntryId(prev_data_id); // Use the positions of the above triangle to predict the texture coordinate // on the tip corner C. - // Convert the triangle into a new coordinate system defined by orthoganal + // Convert the triangle into a new coordinate system defined by orthogonal // bases vectors S, T, where S is vector prev_pos - next_pos and T is an // perpendicular vector to S in the same plane as vector the // tip_pos - next_pos. diff --git a/compression/attributes/prediction_schemes/prediction_scheme_decoder_factory.h b/compression/attributes/prediction_schemes/prediction_scheme_decoder_factory.h index e152e4a..7b24b0e 100644 --- a/compression/attributes/prediction_schemes/prediction_scheme_decoder_factory.h +++ b/compression/attributes/prediction_schemes/prediction_scheme_decoder_factory.h @@ -37,7 +37,7 @@ CreatePredictionSchemeForDecoder(PredictionSchemeMethod method, int att_id, // Cast the decoder to mesh decoder. This is not necessarily safe if there // is some other decoder decides to use TRIANGULAR_MESH as the return type, // but unfortunately there is not nice work around for this without using - // RTTI (double dispatch and similar conecepts will not work because of the + // RTTI (double dispatch and similar concepts will not work because of the // template nature of the prediction schemes). const MeshDecoder *const mesh_decoder = static_cast(decoder); diff --git a/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h b/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h index af94f4e..1b699b9 100644 --- a/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h +++ b/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h @@ -47,7 +47,7 @@ CreatePredictionSchemeForEncoder(PredictionSchemeMethod method, int att_id, // Cast the encoder to mesh encoder. This is not necessarily safe if there // is some other encoder decides to use TRIANGULAR_MESH as the return type, // but unfortunately there is not nice work around for this without using - // RTTI (double dispatch and similar conecepts will not work because of the + // RTTI (double dispatch and similar concepts will not work because of the // template nature of the prediction schemes). const MeshEncoder *const mesh_encoder = static_cast(encoder); diff --git a/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform.h b/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform.h index 8cee145..71ae7cc 100644 --- a/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform.h +++ b/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform.h @@ -26,9 +26,9 @@ namespace draco { // The transform works on octahedral coordinates for normals. The square is // subdivided into four inner triangles (diamond) and four outer triangles. The -// inner trianlges are associated with the upper part of the octahedron and the +// inner triangles are associated with the upper part of the octahedron and the // outer triangles are associated with the lower part. -// Given a preditiction value P and the actual value Q that should be encoded, +// Given a prediction value P and the actual value Q that should be encoded, // this transform first checks if P is outside the diamond. If so, the outer // triangles are flipped towards the inside and vice versa. Then it checks if p // is in the bottom left quadrant. If it is not, it rotates p and q accordingly. @@ -36,7 +36,7 @@ namespace draco { // values. The inversion tends to result in shorter correction vectors and the // rotation makes it so that all long correction values are positive, reducing // the possible value range of the correction values and increasing the -// occurence of positive large correction values, which helps the entropy +// occurrences of positive large correction values, which helps the entropy // encoder. This is possible since P is also known by the decoder, see also // ComputeCorrection and ComputeOriginalValue functions. // Note that the tile is not periodic, which implies that the outer edges can diff --git a/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform.h b/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform.h index 5cfb1c4..338c44e 100644 --- a/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform.h +++ b/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform.h @@ -26,9 +26,9 @@ namespace draco { // The transform works on octahedral coordinates for normals. The square is // subdivided into four inner triangles (diamond) and four outer triangles. The -// inner trianlges are associated with the upper part of the octahedron and the +// inner triangles are associated with the upper part of the octahedron and the // outer triangles are associated with the lower part. -// Given a preditiction value P and the actual value Q that should be encoded, +// Given a prediction value P and the actual value Q that should be encoded, // this transform first checks if P is outside the diamond. If so, the outer // triangles are flipped towards the inside and vice versa. The actual // correction value is then based on the mapped P and Q values. This tends to diff --git a/compression/attributes/prediction_schemes/prediction_scheme_transform.h b/compression/attributes/prediction_schemes/prediction_scheme_transform.h index e0a2593..b992087 100644 --- a/compression/attributes/prediction_schemes/prediction_scheme_transform.h +++ b/compression/attributes/prediction_schemes/prediction_scheme_transform.h @@ -37,7 +37,7 @@ class PredictionSchemeTransform { return PREDICTION_TRANSFORM_DELTA; } - // Performs any custom initialization of the trasnform for the encoder. + // Performs any custom initialization of the transform for the encoder. // |size| = total number of values in |orig_data| (i.e., number of entries * // number of components). void InitializeEncoding(const DataTypeT * /* orig_data */, int /* size */, diff --git a/compression/config/encoder_options.h b/compression/config/encoder_options.h index 10e905a..75dd413 100644 --- a/compression/config/encoder_options.h +++ b/compression/config/encoder_options.h @@ -21,7 +21,7 @@ namespace draco { -// Class encapsuling options used by PointCloudEncoder and its derived classes. +// Class encapsulating options used by PointCloudEncoder and its derived classes. // The encoder can be controller through three different options: // 1. Global options // 2. Per attribute options - i.e., options specific to a given attribute. @@ -34,7 +34,7 @@ class EncoderOptions { public: static EncoderOptions CreateDefaultOptions(); - // Sets the global options that serve to control the overal behavior of an + // Sets the global options that serve to control the overall behavior of an // encoder as well as a fallback for attribute options if they are not set. void SetGlobalOptions(const Options &o); Options *GetGlobalOptions() { return &global_options_; } diff --git a/compression/encode.h b/compression/encode.h index 12a2c6e..8c5a4d5 100644 --- a/compression/encode.h +++ b/compression/encode.h @@ -34,7 +34,7 @@ bool EncodeMeshToBuffer(const Mesh &m, const EncoderOptions &options, EncoderBuffer *out_buffer); // Creates default encoding options that contain a valid set of features that -// the encoder can use. Otherwise all options are left unitialized which results +// the encoder can use. Otherwise all options are left uninitialized which results // in a lossless compression. EncoderOptions CreateDefaultEncoderOptions(); @@ -93,7 +93,7 @@ void SetEncodingMethod(EncoderOptions *options, int encoding_method); // Sets the desired prediction method for a given attribute. By default, // prediction scheme is selected automatically by the encoder using other // provided options (such as speed) and input geometry type (mesh, point cloud). -// This function should be called only when a specific prediction is prefered +// This function should be called only when a specific prediction is preferred // (e.g., when it is known that the encoder would select a less optimal // prediction for the given input data). // diff --git a/compression/mesh/mesh_edgebreaker_decoder_impl.cc b/compression/mesh/mesh_edgebreaker_decoder_impl.cc index 350bcca..48b544f 100644 --- a/compression/mesh/mesh_edgebreaker_decoder_impl.cc +++ b/compression/mesh/mesh_edgebreaker_decoder_impl.cc @@ -619,7 +619,7 @@ int MeshEdgeBreakerDecoderImpl::DecodeConnectivity( // \ / \b/ \ / // *-------*-------* // - // TODO(ostava): The ciruclation below should be replaced by functions + // TODO(ostava): The circulation below should be replaced by functions // that can be reused elsewhere. CornerIndex corner_b = corner_table_->Previous(corner); while (corner_table_->Opposite(corner_b) >= 0) { @@ -660,7 +660,7 @@ int MeshEdgeBreakerDecoderImpl::DecodeConnectivity( } } if (num_faces != corner_table_->num_faces()) - return -1; // Unexcpected number of decoded faces. + return -1; // Unexpected number of decoded faces. vertex_id_map_.resize(num_vertices); return num_vertices; } @@ -847,7 +847,7 @@ bool MeshEdgeBreakerDecoderImpl::AssignPointsToCorners() { // Do a deduplication pass over the corners on the processed vertex. // At this point each corner corresponds to one point id and our goal is to // merge similar points into a single point id. - // We do one one pass in a clocwise direction over the corners and we add + // We do one one pass in a clockwise direction over the corners and we add // a new point id whenever one of the attributes change. c = deduplication_first_corner; // Create a new point. diff --git a/compression/mesh/mesh_edgebreaker_decoder_impl.h b/compression/mesh/mesh_edgebreaker_decoder_impl.h index 8c4f272..08faeef 100644 --- a/compression/mesh/mesh_edgebreaker_decoder_impl.h +++ b/compression/mesh/mesh_edgebreaker_decoder_impl.h @@ -76,7 +76,7 @@ class MeshEdgeBreakerDecoderImpl : public MeshEdgeBreakerDecoderImplInterface { // Returns the number of vertices created by the decoder or -1 on error. int DecodeConnectivity(int num_symbols); - // Returns true if the current symbol was part of a topolgy split event. This + // Returns true if the current symbol was part of a topology split event. This // means that the current face was connected to the left edge of a face // encoded with the TOPOLOGY_S symbol. |out_symbol_edge| can be used to // identify which edge of the source symbol was connected to the TOPOLOGY_S diff --git a/compression/mesh/mesh_edgebreaker_encoder_impl.cc b/compression/mesh/mesh_edgebreaker_encoder_impl.cc index 031b94f..e593b72 100644 --- a/compression/mesh/mesh_edgebreaker_encoder_impl.cc +++ b/compression/mesh/mesh_edgebreaker_encoder_impl.cc @@ -188,7 +188,7 @@ bool MeshEdgeBreakerEncoderImpl::GenerateAttributesEncoder( new SequentialAttributeEncodersController(std::move(sequencer), att_id)); // Update the mapping between the encoder id and the attribute data id. - // This will be used by the decoder to select the approperiate attribute + // This will be used by the decoder to select the appropriate attribute // decoder and the correct connectivity. attribute_encoder_to_data_id_map_.push_back(att_data_id); GetEncoder()->AddAttributesEncoder(std::move(att_controller)); diff --git a/compression/mesh/mesh_edgebreaker_shared.h b/compression/mesh/mesh_edgebreaker_shared.h index 4880974..71e4f4e 100644 --- a/compression/mesh/mesh_edgebreaker_shared.h +++ b/compression/mesh/mesh_edgebreaker_shared.h @@ -97,13 +97,13 @@ enum EdgeFaceName : uint8_t { LEFT_FACE_EDGE = 0, RIGHT_FACE_EDGE = 1 }; // Struct used for storing data about a source face that connects to an // already traversed face that was either the initial face or a face encoded -// with either toplogy S (split) symbol. Such connection can be only caused by +// with either topology S (split) symbol. Such connection can be only caused by // topology changes on the traversed surface (if its genus != 0, i.e. when the // surface has topological handles or holes). -// For each occurence of such event we always encode the split symbol id, source -// symbol id and source edge id (left, or right). There will be always exectly -// two occurences of this event for every topological handle on the traversed -// mesh and one occurence for a hole. +// For each occurrence of such event we always encode the split symbol id, source +// symbol id and source edge id (left, or right). There will be always exactly +// two occurrences of this event for every topological handle on the traversed +// mesh and one occurrence for a hole. struct TopologySplitEventData { int32_t split_symbol_id; int32_t source_symbol_id; diff --git a/compression/mesh/mesh_edgebreaker_traversal_predictive_encoder.h b/compression/mesh/mesh_edgebreaker_traversal_predictive_encoder.h index 508ca9b..3fbb889 100644 --- a/compression/mesh/mesh_edgebreaker_traversal_predictive_encoder.h +++ b/compression/mesh/mesh_edgebreaker_traversal_predictive_encoder.h @@ -22,7 +22,7 @@ namespace draco { // Encoder that tries to predict the edgebreaker traversal symbols based on the // vertex valences of the unencoded portion of the mesh. The current prediction // scheme assumes that each vertex has valence 6 which can be used to predict -// the symbol preceeding the one that is currently encoded. Predictions are +// the symbol preceding the one that is currently encoded. Predictions are // encoded using an arithmetic coding which can lead to less than 1 bit per // triangle encoding for highly regular meshes. class MeshEdgeBreakerTraversalPredictiveEncoder @@ -84,10 +84,10 @@ class MeshEdgeBreakerTraversalPredictiveEncoder // Whenever we reach a split symbol, mark its tip vertex as invalid by // setting the valence to a negative value. Any prediction that will // use this vertex will then cause a misprediction. This is currently - // necessary because the decodding works in the reverse direction and + // necessary because the decoding works in the reverse direction and // the decoder doesn't know about these vertices until the split // symbol is decoded at which point two vertices are merged into one. - // This can be most likely solved on the encoder side by spliting the + // This can be most likely solved on the encoder side by splitting the // tip vertex into two, but since split symbols are relatively rare, // it's probably not worth doing it. vertex_valences_[corner_table_->Vertex(last_corner_).value()] = -1; diff --git a/compression/mesh/mesh_edgebreaker_traversal_valence_encoder.h b/compression/mesh/mesh_edgebreaker_traversal_valence_encoder.h index f4b0278..00b6e84 100644 --- a/compression/mesh/mesh_edgebreaker_traversal_valence_encoder.h +++ b/compression/mesh/mesh_edgebreaker_traversal_valence_encoder.h @@ -63,7 +63,7 @@ class MeshEdgeBreakerTraversalValenceEncoder // Replicate the corner to vertex map from the corner table. We need to do // this because the map may get updated during encoding because we add new - // vertices when we encouter split symbols. + // vertices when we encounter split symbols. corner_to_vertex_map_.resize(corner_table_->num_corners()); for (CornerIndex i(0); i < corner_table_->num_corners(); ++i) { corner_to_vertex_map_[i] = corner_table_->Vertex(i); diff --git a/compression/point_cloud/point_cloud_decoder.h b/compression/point_cloud/point_cloud_decoder.h index 5fe50e4..24940c4 100644 --- a/compression/point_cloud/point_cloud_decoder.h +++ b/compression/point_cloud/point_cloud_decoder.h @@ -22,7 +22,7 @@ namespace draco { // Abstract base class for all point cloud and mesh decoders. It provides a -// basic funcionality that is shared between different decoders. +// basic functionality that is shared between different decoders. class PointCloudDecoder { public: PointCloudDecoder(); @@ -30,7 +30,7 @@ class PointCloudDecoder { virtual EncodedGeometryType GetGeometryType() const { return POINT_CLOUD; } - // Decodes a Draco header int othe provided |out_header|. + // Decodes a Draco header int other provided |out_header|. // Returns false on error. static bool DecodeHeader(DecoderBuffer *buffer, DracoHeader *out_header); diff --git a/compression/point_cloud/point_cloud_encoder.cc b/compression/point_cloud/point_cloud_encoder.cc index ed1c84c..5a5bcb0 100644 --- a/compression/point_cloud/point_cloud_encoder.cc +++ b/compression/point_cloud/point_cloud_encoder.cc @@ -153,7 +153,7 @@ bool PointCloudEncoder::RearrangeAttributesEncoders() { // Find the encoding order of the attribute encoders that is determined by // the parent dependencies between individual encoders. Instead of traversing // a graph we encode the attributes in multiple iterations where encoding of - // attributes that depend on other attributes may get posponed until the + // attributes that depend on other attributes may get postponed until the // parent attributes are processed. // This is simpler to implement than graph traversal and it automatically // detects any cycles in the dependency graph. diff --git a/compression/point_cloud/point_cloud_encoder.h b/compression/point_cloud/point_cloud_encoder.h index a36975f..8a79fd1 100644 --- a/compression/point_cloud/point_cloud_encoder.h +++ b/compression/point_cloud/point_cloud_encoder.h @@ -24,7 +24,7 @@ namespace draco { // Abstract base class for all point cloud and mesh encoders. It provides a -// basic funcionality that's shared between different encoders. +// basic functionality that's shared between different encoders. class PointCloudEncoder { public: PointCloudEncoder(); diff --git a/core/adaptive_rans_bit_coding_shared.h b/core/adaptive_rans_bit_coding_shared.h index 5a94b15..da9bdf4 100644 --- a/core/adaptive_rans_bit_coding_shared.h +++ b/core/adaptive_rans_bit_coding_shared.h @@ -30,7 +30,7 @@ inline uint8_t clamp_probability(double p) { return static_cast(p_int); } -// Update the probablity according to new incoming bit. +// Update the probability according to new incoming bit. inline double update_probability(double old_p, bool bit) { static constexpr double w = 128.0; static constexpr double w0 = (w - 1.0) / w; diff --git a/core/adaptive_rans_bit_decoder.h b/core/adaptive_rans_bit_decoder.h index 27fb42c..de40bb2 100644 --- a/core/adaptive_rans_bit_decoder.h +++ b/core/adaptive_rans_bit_decoder.h @@ -33,7 +33,7 @@ class AdaptiveRAnsBitDecoder { // Sets |source_buffer| as the buffer to decode bits from. bool StartDecoding(DecoderBuffer *source_buffer); - // Decode one bit. Returns true if the bit is a 1, otherwsie false. + // Decode one bit. Returns true if the bit is a 1, otherwise false. bool DecodeNextBit(); // Decode the next |nbits| and return the sequence in |value|. |nbits| must be diff --git a/core/adaptive_rans_bit_encoder.cc b/core/adaptive_rans_bit_encoder.cc index a6350d5..ac16d24 100644 --- a/core/adaptive_rans_bit_encoder.cc +++ b/core/adaptive_rans_bit_encoder.cc @@ -30,7 +30,7 @@ void AdaptiveRAnsBitEncoder::EndEncoding(EncoderBuffer *target_buffer) { AnsCoder ans_coder; ans_write_init(&ans_coder, buffer.data()); - // Unfortunaetly we have to encode the bits in reversed order, while the + // Unfortunately we have to encode the bits in reversed order, while the // probabilities that should be given are those of the forward sequence. double p0_f = 0.5; std::vector p0s; diff --git a/core/bit_utils.h b/core/bit_utils.h index 7a1d6f3..05c453a 100644 --- a/core/bit_utils.h +++ b/core/bit_utils.h @@ -50,7 +50,7 @@ inline void CopyBits32(uint32_t *dst, int dst_offset, uint32_t src, // Returns the most location of the most significant bit in the input integer // |n|. -// The funcionality is not defined for |n == 0|. +// The functionality is not defined for |n == 0|. inline int MostSignificantBit(uint32_t n) { #if defined(__GNUC__) return 31 ^ __builtin_clz(n); diff --git a/core/decoder_buffer.h b/core/decoder_buffer.h index e915291..8ed363c 100644 --- a/core/decoder_buffer.h +++ b/core/decoder_buffer.h @@ -33,7 +33,7 @@ class DecoderBuffer { DecoderBuffer &operator=(const DecoderBuffer &buf) = default; // Sets the buffer's internal data. Note that no copy of the input data is - // made so the data owner needs to keep the data valid and unchaged for + // made so the data owner needs to keep the data valid and unchanged for // runtime of the decoder. void Init(const char *data, size_t data_size); @@ -48,7 +48,7 @@ class DecoderBuffer { void EndBitDecoding(); // Decodes up to 32 bits into out_val. Can be called only in between - // StartBitDecoding and EndBitDeoding. Otherwise returns false. + // StartBitDecoding and EndBitDecoding. Otherwise returns false. bool DecodeLeastSignificantBits32(int nbits, uint32_t *out_value) { if (!bit_decoder_active()) return false; @@ -95,7 +95,7 @@ class DecoderBuffer { // Discards #bytes from the input buffer. void Advance(int64_t bytes) { pos_ += bytes; } - // Moves the parsing position to a specific offset from the beggining of the + // Moves the parsing position to a specific offset from the beginning of the // input data. void StartDecodingFrom(int64_t offset) { pos_ = offset; } diff --git a/core/direct_bit_decoder.h b/core/direct_bit_decoder.h index 44cc158..4653f22 100644 --- a/core/direct_bit_decoder.h +++ b/core/direct_bit_decoder.h @@ -30,7 +30,7 @@ class DirectBitDecoder { // Sets |source_buffer| as the buffer to decode bits from. bool StartDecoding(DecoderBuffer *source_buffer); - // Decode one bit. Returns true if the bit is a 1, otherwsie false. + // Decode one bit. Returns true if the bit is a 1, otherwise false. bool DecodeNextBit() { const uint32_t selector = 1 << (31 - num_used_bits_); const bool bit = *pos_ & selector; diff --git a/core/draco_index_type.h b/core/draco_index_type.h index c9b90bb..71f32c4 100644 --- a/core/draco_index_type.h +++ b/core/draco_index_type.h @@ -45,7 +45,7 @@ // // Strongly typed indices support most of the common binary and unary // operators and support for additional operators can be added if -// necesssary. +// necessary. #ifndef DRACO_CORE_DRACO_INDEX_TYPE_H_ #define DRACO_CORE_DRACO_INDEX_TYPE_H_ @@ -166,7 +166,7 @@ std::ostream &operator<<(std::ostream &os, IndexType index) { } // namespace draco -// Specialize std::hash for the stongly indexed types. +// Specialize std::hash for the strongly indexed types. namespace std { template diff --git a/core/encoder_buffer.h b/core/encoder_buffer.h index cda527d..d7d487a 100644 --- a/core/encoder_buffer.h +++ b/core/encoder_buffer.h @@ -23,7 +23,7 @@ namespace draco { // Class representing a buffer that can be used for either for byte-aligned -// encoding of arbitrary data structures or for encoding of varialble-length +// encoding of arbitrary data structures or for encoding of variable-length // bit data. class EncoderBuffer { public: diff --git a/core/rans_bit_decoder.h b/core/rans_bit_decoder.h index 474435b..497e179 100644 --- a/core/rans_bit_decoder.h +++ b/core/rans_bit_decoder.h @@ -33,7 +33,7 @@ class RAnsBitDecoder { // Returns false when the data is invalid. bool StartDecoding(DecoderBuffer *source_buffer); - // Decode one bit. Returns true if the bit is a 1, otherwsie false. + // Decode one bit. Returns true if the bit is a 1, otherwise false. bool DecodeNextBit(); // Decode the next |nbits| and return the sequence in |value|. |nbits| must be diff --git a/core/symbol_bit_decoder.h b/core/symbol_bit_decoder.h index 47460d4..a6b8b84 100644 --- a/core/symbol_bit_decoder.h +++ b/core/symbol_bit_decoder.h @@ -16,7 +16,7 @@ class SymbolBitDecoder { // Sets |source_buffer| as the buffer to decode bits from. bool StartDecoding(DecoderBuffer *source_buffer); - // Decode one bit. Returns true if the bit is a 1, otherwsie false. + // Decode one bit. Returns true if the bit is a 1, otherwise false. bool DecodeNextBit(); // Decode the next |nbits| and return the sequence in |value|. |nbits| must be diff --git a/core/vector_d.h b/core/vector_d.h index c8f8159..dfd9039 100644 --- a/core/vector_d.h +++ b/core/vector_d.h @@ -179,7 +179,7 @@ CoeffT SquaredDistance(const VectorD v1, const VectorD v2) { CoeffT difference; CoeffT squared_distance = 0; - // Check each index seperately so difference is never negative and underflow + // Check each index separately so difference is never negative and underflow // is avoided for unsigned types. for (int i = 0; i < dimension_t; ++i) { if (v1[i] >= v2[i]) { diff --git a/io/obj_decoder.cc b/io/obj_decoder.cc index af4e098..295a2ea 100644 --- a/io/obj_decoder.cc +++ b/io/obj_decoder.cc @@ -356,7 +356,7 @@ bool ObjDecoder::ParseFace(bool *error) { ++num_obj_faces_; } } else { - // We are in the couting mode. + // We are in the counting mode. // We need to determine how many triangles are in the obj face. // Go over the line and check how many gaps there are between non-empty // sub-strings. diff --git a/io/obj_decoder.h b/io/obj_decoder.h index 15790d7..d3eee16 100644 --- a/io/obj_decoder.h +++ b/io/obj_decoder.h @@ -41,7 +41,7 @@ class ObjDecoder { bool DecodeFromBuffer(DecoderBuffer *buffer, PointCloud *out_point_cloud); // Flag that can be used to turn on/off deduplication of input values. - // This should be disabled only when we are sure that the input data doesn not + // This should be disabled only when we are sure that the input data does not // contain any duplicate entries. // Default: true void set_deduplicate_input_values(bool v) { deduplicate_input_values_ = v; } diff --git a/io/obj_encoder.cc b/io/obj_encoder.cc index ad4920f..653265e 100644 --- a/io/obj_encoder.cc +++ b/io/obj_encoder.cc @@ -30,7 +30,7 @@ bool ObjEncoder::EncodeToFile(const PointCloud &pc, const std::string &file_name) { std::ofstream file(file_name); if (!file) - return false; // File coulnd't be opened. + return false; // File could not be opened. // Encode the mesh into a buffer. EncoderBuffer buffer; if (!EncodeToBuffer(pc, &buffer)) diff --git a/io/ply_encoder.cc b/io/ply_encoder.cc index 631bf34..9b8aaba 100644 --- a/io/ply_encoder.cc +++ b/io/ply_encoder.cc @@ -26,7 +26,7 @@ bool PlyEncoder::EncodeToFile(const PointCloud &pc, const std::string &file_name) { std::ofstream file(file_name, std::ios::binary); if (!file) - return false; // File coulnd't be opened. + return false; // File couldn't be opened. // Encode the mesh into a buffer. EncoderBuffer buffer; if (!EncodeToBuffer(pc, &buffer)) diff --git a/mesh/corner_table.h b/mesh/corner_table.h index 0a98f96..1f58ac1 100644 --- a/mesh/corner_table.h +++ b/mesh/corner_table.h @@ -53,7 +53,7 @@ class CornerTable { const IndexTypeVector &faces); // Initializes the CornerTable from provides set of indexed faces. - // The input faces can represent a non-manifold topolgy, in which case the + // The input faces can represent a non-manifold topology, in which case the // non-manifold edges and vertices are going to be split. bool Initialize(const IndexTypeVector &faces); @@ -157,7 +157,7 @@ class CornerTable { return Next(Opposite(Next(corner))); } - // Get opposite corners on the left and right faces respecitively (see image + // Get opposite corners on the left and right faces respectively (see image // below, where L and R are the left and right corners of a corner X. // // *-------*-------* @@ -177,7 +177,7 @@ class CornerTable { } // Returns the number of new vertices that were created as a result of - // spliting of non-manifold vertices of the input geometry. + // splitting of non-manifold vertices of the input geometry. int NumNewVertices() const { return num_vertices() - num_original_vertices_; } int NumOriginalVertices() const { return num_original_vertices_; } @@ -206,7 +206,7 @@ class CornerTable { SetOppositeCorner(corner_1, corner_0); } - // Updates mapping betweeh a corner and a vertex. + // Updates mapping between a corner and a vertex. inline void MapCornerToVertex(CornerIndex corner_id, VertexIndex vert_id) { const FaceIndex face = Face(corner_id); faces_[face][LocalIndex(corner_id)] = vert_id; @@ -284,7 +284,7 @@ class CornerTable { bool ComputeOppositeCorners(int *num_vertices); // Computes the lookup map for going from a vertex to a corner. This method - // can handle non-manifold vertices by spliting them into multiple manifold + // can handle non-manifold vertices by splitting them into multiple manifold // vertices. bool ComputeVertexCorners(int num_vertices); @@ -412,7 +412,7 @@ class FaceAdjacencyIterator // Returns true when all adjacent faces have been visited. bool End() const { return corner_ < 0; } - // Proceeds to the next adjacen face if possible. + // Proceeds to the next adjacent face if possible. void Next() { FindNextFaceNeighbor(); } // std::iterator interface. diff --git a/mesh/corner_table_traversal_processor.h b/mesh/corner_table_traversal_processor.h index b33798c..d9f33eb 100644 --- a/mesh/corner_table_traversal_processor.h +++ b/mesh/corner_table_traversal_processor.h @@ -19,7 +19,7 @@ namespace draco { -// Class providing the basic traversal funcionality needed by traversers (such +// Class providing the basic traversal functionality needed by traverses (such // as the EdgeBreakerTraverser, see edgebreaker_traverser.h). It is used to // return the corner table that is used for the traversal, plus it provides a // basic book-keeping of visited faces and vertices during the traversal. diff --git a/mesh/edgebreaker_traverser.h b/mesh/edgebreaker_traverser.h index d7467a7..fa656f4 100644 --- a/mesh/edgebreaker_traverser.h +++ b/mesh/edgebreaker_traverser.h @@ -27,7 +27,7 @@ namespace draco { // arguments TraversalProcessorT, TraversalObserverT and EdgeBreakerObserverT. // TraversalProcessorT is used to provide infrastructure for handling of visited // vertices and faces, TraversalObserverT can be used to implement custom -// callbacks for varous traversal events, and EdgeBreakerObserverT can be used +// callbacks for various traversal events, and EdgeBreakerObserverT can be used // to provide handling of edgebreaker symbols. // TraversalProcessorT needs to define the type of the corner table as: // diff --git a/mesh/mesh.h b/mesh/mesh.h index d2e19b2..48cc581 100644 --- a/mesh/mesh.h +++ b/mesh/mesh.h @@ -55,7 +55,7 @@ class Mesh : public PointCloud { } // Sets the total number of faces. Creates new empty faces or deletes - // existings ones if necessary. + // existing ones if necessary. void SetNumFaces(size_t num_faces) { faces_.resize(num_faces, Face()); } FaceIndex::ValueType num_faces() const { return faces_.size(); } diff --git a/mesh/mesh_are_equivalent.cc b/mesh/mesh_are_equivalent.cc index 8e31c0f..304cdf0 100644 --- a/mesh/mesh_are_equivalent.cc +++ b/mesh/mesh_are_equivalent.cc @@ -135,7 +135,7 @@ bool MeshAreEquivalent::operator()(const Mesh &mesh0, const Mesh &mesh1) { DCHECK(att0->IsValid()); DCHECK(att1->IsValid()); - // Prepare blocks of memomry to hold data of corners for this attribute. + // Prepare blocks of memory to hold data of corners for this attribute. std::unique_ptr data0(new uint8_t[att0->byte_stride()]); std::unique_ptr data1(new uint8_t[att0->byte_stride()]); diff --git a/mesh/mesh_attribute_corner_table.h b/mesh/mesh_attribute_corner_table.h index ed6ec44..bde4677 100644 --- a/mesh/mesh_attribute_corner_table.h +++ b/mesh/mesh_attribute_corner_table.h @@ -22,7 +22,7 @@ namespace draco { // Class for storing connectivity of mesh attributes. The connectivity is stored // as a difference from the base mesh's corner table, where the differences are -// represnted by attribute seam edges. This class provides a basic funcionality +// represented by attribute seam edges. This class provides a basic functionality // for detecting the seam edges for a given attribute and for traversing the // constrained corner table with the seam edges. class MeshAttributeCornerTable { @@ -122,7 +122,7 @@ class MeshAttributeCornerTable { std::vector corner_to_vertex_map_; // Map between vertices and their associated left most corners. A left most - // corner is a corner that is adjecent to a boundary or an attribute seam from + // corner is a corner that is adjacent to a boundary or an attribute seam from // right (i.e., SwingLeft from that corner will return an invalid corner). If // no such corner exists for a given vertex, then any corner attached to the // vertex can be used. diff --git a/mesh/mesh_cleanup.cc b/mesh/mesh_cleanup.cc index 3091875..5906a73 100644 --- a/mesh/mesh_cleanup.cc +++ b/mesh/mesh_cleanup.cc @@ -162,7 +162,7 @@ bool MeshCleanup::operator()(Mesh *mesh, const MeshCleanupOptions &options) { const PointIndex new_point_id = point_map[i]; if (new_point_id < 0) continue; - // Index of the currenlty processed attribut entry in the original + // Index of the currently processed attribute entry in the original // mesh. const AttributeValueIndex original_entry_index = att->mapped_index(i); diff --git a/point_cloud/geometry_attribute.h b/point_cloud/geometry_attribute.h index 51100c0..cb08a59 100644 --- a/point_cloud/geometry_attribute.h +++ b/point_cloud/geometry_attribute.h @@ -157,14 +157,14 @@ class GeometryAttribute { // Returns the type of the attribute indicating the nature of the attribute. Type attribute_type() const { return attribute_type_; } void set_attribute_type(Type type) { attribute_type_ = type; } - // Retruns the data type that is stored in the attrbute. + // Returns the data type that is stored in the attribute. DataType data_type() const { return data_type_; } // Returns the number of components that are stored for each entry. - // For position attrinute this is usually three (x,y,z), - // while texture coordinates have two compontents (u,v). + // For position attribute this is usually three (x,y,z), + // while texture coordinates have two components (u,v). int8_t components_count() const { return components_count_; } // Indicates whether the data type should be normalized before interpretation, - // that is, it should be devided by the max value of the data type. + // that is, it should be divided by the max value of the data type. bool normalized() const { return normalized_; } // The buffer storing the entire data of the attribute. const DataBuffer *buffer() const { return buffer_; } @@ -219,7 +219,7 @@ class GeometryAttribute { // The same as above but without a component specifier for input attribute. template bool ConvertTypedValue(AttributeValueIndex att_index, OutT *out_value) const { - // Selecte the right method to call based on the number of attribute + // Select the right method to call based on the number of attribute // components. switch (components_count_) { case 1: diff --git a/point_cloud/point_attribute.cc b/point_cloud/point_attribute.cc index bcffc18..31a1a2a 100644 --- a/point_cloud/point_attribute.cc +++ b/point_cloud/point_attribute.cc @@ -81,7 +81,7 @@ AttributeValueIndex::ValueType PointAttribute::DeduplicateValues( return -1; // Unsupported data type. } if (unique_vals == 0) - return -1; // Unexcpected error. + return -1; // Unexpected error. return unique_vals; } diff --git a/point_cloud/point_cloud.cc b/point_cloud/point_cloud.cc index 3b2e8ae..cadf932 100644 --- a/point_cloud/point_cloud.cc +++ b/point_cloud/point_cloud.cc @@ -169,7 +169,7 @@ void PointCloud::ApplyPointIdDeduplication( bool PointCloud::DeduplicateAttributeValues() { // Go over all attributes and create mapping between duplicate entries. if (num_points() == 0) - return false; // Unexcpected attribute size. + return false; // Unexpected attribute size. // Deduplicate all attributes. for (int32_t att_id = 0; att_id < num_attributes(); ++att_id) { if (!attribute(att_id)->DeduplicateValues(*attribute(att_id))) diff --git a/point_cloud/point_cloud_builder_test.cc b/point_cloud/point_cloud_builder_test.cc index 58c009e..b821a5d 100644 --- a/point_cloud/point_cloud_builder_test.cc +++ b/point_cloud/point_cloud_builder_test.cc @@ -140,7 +140,7 @@ TEST_F(PointCloudBuilderTest, MultiUse) { } { - // Use only a sub-set of data (offseted to avoid possible reuse of old + // Use only a sub-set of data (offsetted to avoid possible reuse of old // data). builder.Start(4); const int pos_att_id =