Update snapshot to 0.9.1

This commit is contained in:
Frank Galligan 2017-01-12 16:50:49 -08:00
parent f0d913a715
commit 79185b7058
115 changed files with 8246 additions and 386 deletions

20
BUILD
View File

@ -1,20 +0,0 @@
# Description:
# Geometry and API compression and decompression utils.
package(
default_hdrs_check = "strict",
default_visibility = ["//visibility:public"],
features = [
"-layering_check",
],
)
licenses(["notice"]) # Apache 2.0
exports_files(["LICENSE"])
cc_library(
name = "draco_inc",
includes = ["."],
visibility = ["//visibility:public"],
)

View File

@ -12,8 +12,10 @@ require_cxx_flag_nomsvc("-std=c++11")
option(ENABLE_STANDARD_EDGEBREAKER "" ON) option(ENABLE_STANDARD_EDGEBREAKER "" ON)
option(ENABLE_PREDICTIVE_EDGEBREAKER "" ON) option(ENABLE_PREDICTIVE_EDGEBREAKER "" ON)
option(ENABLE_EXTRA_WARNINGS "" OFF) option(ENABLE_EXTRA_WARNINGS "" OFF)
option(ENABLE_TESTS "Enables tests." OFF)
option(ENABLE_WERROR "" OFF) option(ENABLE_WERROR "" OFF)
option(ENABLE_WEXTRA "" OFF) option(ENABLE_WEXTRA "" OFF)
option(IGNORE_EMPTY_BUILD_TYPE "" OFF)
if (ENABLE_STANDARD_EDGEBREAKER) if (ENABLE_STANDARD_EDGEBREAKER)
add_cxx_preproc_definition("DRACO_STANDARD_EDGEBREAKER_SUPPORTED") add_cxx_preproc_definition("DRACO_STANDARD_EDGEBREAKER_SUPPORTED")
@ -62,15 +64,17 @@ if (GIT_FOUND)
if (NOT EXISTS "${draco_git_dir}") if (NOT EXISTS "${draco_git_dir}")
set(draco_git_dir "${draco_root}/../../../.git") set(draco_git_dir "${draco_root}/../../../.git")
endif () endif ()
execute_process(COMMAND ${GIT_EXECUTABLE} if (EXISTS "${draco_git_dir}")
--git-dir=${draco_git_dir} rev-parse HEAD execute_process(COMMAND ${GIT_EXECUTABLE}
OUTPUT_VARIABLE draco_git_hash) --git-dir=${draco_git_dir} rev-parse HEAD
execute_process( OUTPUT_VARIABLE draco_git_hash)
COMMAND ${GIT_EXECUTABLE} --git-dir=${draco_git_dir}/.git describe execute_process(
OUTPUT_VARIABLE draco_git_desc ERROR_QUIET) COMMAND ${GIT_EXECUTABLE} --git-dir=${draco_git_dir}/.git describe
# Consume newlines from Git output. OUTPUT_VARIABLE draco_git_desc ERROR_QUIET)
string(STRIP "${draco_git_hash}" draco_git_hash) # Consume newlines from Git output.
string(STRIP "${draco_git_desc}" draco_git_desc) string(STRIP "${draco_git_hash}" draco_git_hash)
string(STRIP "${draco_git_desc}" draco_git_desc)
endif ()
endif () endif ()
if (draco_git_hash STREQUAL "") if (draco_git_hash STREQUAL "")
set(draco_git_desc "unknown") set(draco_git_desc "unknown")
@ -83,6 +87,49 @@ configure_file("${draco_root}/cmake/draco_version.cc.cmake"
configure_file("${draco_root}/cmake/draco_version.h.cmake" configure_file("${draco_root}/cmake/draco_version.h.cmake"
"${draco_build_dir}/draco_version.h" COPYONLY) "${draco_build_dir}/draco_version.h" COPYONLY)
if (EMSCRIPTEN)
include(FindPythonInterp)
if (NOT PYTHONINTERP_FOUND)
message(FATAL_ERROR
"Python required for Emscripten builds, but cmake cannot find it.")
endif ()
else ()
# When not building the JS library, and when a build type is not specified,
# default to producing a release mode Draco library to avoid benchmarking
# shenanigans, but only when Draco is not being pulled in via another cmake
# file.
if (CMAKE_BUILD_TYPE STREQUAL "" AND NOT IGNORE_EMPTY_BUILD_TYPE)
if (CMAKE_CURRENT_LIST_FILE STREQUAL CMAKE_PARENT_LIST_FILE)
message(INFO "|Draco: ignoring empty build type, forcing release mode.")
set(CMAKE_BUILD_TYPE "Release" CACHE STRING "Draco overridden build type"
FORCE)
endif ()
endif ()
if (ENABLE_TESTS)
# Googletest defaults.
set(GTEST_SOURCE_DIR
"${draco_root}/../googletest" CACHE STRING
"Path to googletest source directory")
set(GTEST_BUILD_DIR
"${draco_build_dir}/googletest" CACHE STRING
"Path to directory where googletest will be configured and built.")
# Confirm Googletest is where expected.
if (NOT EXISTS "${GTEST_SOURCE_DIR}/CMakeLists.txt")
set(ENABLE_TESTS OFF)
message("Tests disabled: Google test CMakeLists.txt does not exist.")
else ()
set(DRACO_TEST_DATA_DIR "${draco_root}/testdata")
configure_file("${draco_root}/cmake/draco_test_config.h.cmake"
"${draco_build_dir}/testing/draco_test_config.h")
add_subdirectory("${GTEST_SOURCE_DIR}" "${GTEST_BUILD_DIR}")
endif ()
include_directories("${GTEST_SOURCE_DIR}")
endif ()
endif ()
# Draco source file listing variables. # Draco source file listing variables.
set(draco_compression_attributes_decoder_sources set(draco_compression_attributes_decoder_sources
"${draco_root}/compression/attributes/attributes_decoder.cc" "${draco_root}/compression/attributes/attributes_decoder.cc"
@ -91,8 +138,6 @@ set(draco_compression_attributes_decoder_sources
"${draco_root}/compression/attributes/kd_tree_attributes_decoder.h" "${draco_root}/compression/attributes/kd_tree_attributes_decoder.h"
"${draco_root}/compression/attributes/kd_tree_attributes_shared.h" "${draco_root}/compression/attributes/kd_tree_attributes_shared.h"
"${draco_root}/compression/attributes/mesh_attribute_indices_encoding_data.h" "${draco_root}/compression/attributes/mesh_attribute_indices_encoding_data.h"
"${draco_root}/compression/attributes/mesh_normal_attribute_decoder.cc"
"${draco_root}/compression/attributes/mesh_normal_attribute_decoder.h"
"${draco_root}/compression/attributes/mesh_traversal_sequencer.h" "${draco_root}/compression/attributes/mesh_traversal_sequencer.h"
"${draco_root}/compression/attributes/normal_compression_utils.h" "${draco_root}/compression/attributes/normal_compression_utils.h"
"${draco_root}/compression/attributes/sequential_attribute_decoder.cc" "${draco_root}/compression/attributes/sequential_attribute_decoder.cc"
@ -101,6 +146,8 @@ set(draco_compression_attributes_decoder_sources
"${draco_root}/compression/attributes/sequential_attribute_decoders_controller.h" "${draco_root}/compression/attributes/sequential_attribute_decoders_controller.h"
"${draco_root}/compression/attributes/sequential_integer_attribute_decoder.cc" "${draco_root}/compression/attributes/sequential_integer_attribute_decoder.cc"
"${draco_root}/compression/attributes/sequential_integer_attribute_decoder.h" "${draco_root}/compression/attributes/sequential_integer_attribute_decoder.h"
"${draco_root}/compression/attributes/sequential_normal_attribute_decoder.cc"
"${draco_root}/compression/attributes/sequential_normal_attribute_decoder.h"
"${draco_root}/compression/attributes/sequential_quantization_attribute_decoder.cc" "${draco_root}/compression/attributes/sequential_quantization_attribute_decoder.cc"
"${draco_root}/compression/attributes/sequential_quantization_attribute_decoder.h") "${draco_root}/compression/attributes/sequential_quantization_attribute_decoder.h")
@ -111,8 +158,6 @@ set(draco_compression_attributes_encoder_sources
"${draco_root}/compression/attributes/kd_tree_attributes_encoder.h" "${draco_root}/compression/attributes/kd_tree_attributes_encoder.h"
"${draco_root}/compression/attributes/linear_sequencer.h" "${draco_root}/compression/attributes/linear_sequencer.h"
"${draco_root}/compression/attributes/mesh_attribute_indices_encoding_observer.h" "${draco_root}/compression/attributes/mesh_attribute_indices_encoding_observer.h"
"${draco_root}/compression/attributes/mesh_normal_attribute_encoder.cc"
"${draco_root}/compression/attributes/mesh_normal_attribute_encoder.h"
"${draco_root}/compression/attributes/points_sequencer.h" "${draco_root}/compression/attributes/points_sequencer.h"
"${draco_root}/compression/attributes/sequential_attribute_encoder.cc" "${draco_root}/compression/attributes/sequential_attribute_encoder.cc"
"${draco_root}/compression/attributes/sequential_attribute_encoder.h" "${draco_root}/compression/attributes/sequential_attribute_encoder.h"
@ -120,6 +165,8 @@ set(draco_compression_attributes_encoder_sources
"${draco_root}/compression/attributes/sequential_attribute_encoders_controller.h" "${draco_root}/compression/attributes/sequential_attribute_encoders_controller.h"
"${draco_root}/compression/attributes/sequential_integer_attribute_encoder.cc" "${draco_root}/compression/attributes/sequential_integer_attribute_encoder.cc"
"${draco_root}/compression/attributes/sequential_integer_attribute_encoder.h" "${draco_root}/compression/attributes/sequential_integer_attribute_encoder.h"
"${draco_root}/compression/attributes/sequential_normal_attribute_encoder.cc"
"${draco_root}/compression/attributes/sequential_normal_attribute_encoder.h"
"${draco_root}/compression/attributes/sequential_quantization_attribute_encoder.cc" "${draco_root}/compression/attributes/sequential_quantization_attribute_encoder.cc"
"${draco_root}/compression/attributes/sequential_quantization_attribute_encoder.h") "${draco_root}/compression/attributes/sequential_quantization_attribute_encoder.h")
@ -291,7 +338,9 @@ set(draco_point_cloud_sources
"${draco_root}/point_cloud/point_attribute.cc" "${draco_root}/point_cloud/point_attribute.cc"
"${draco_root}/point_cloud/point_attribute.h" "${draco_root}/point_cloud/point_attribute.h"
"${draco_root}/point_cloud/point_cloud.cc" "${draco_root}/point_cloud/point_cloud.cc"
"${draco_root}/point_cloud/point_cloud.h") "${draco_root}/point_cloud/point_cloud.h"
"${draco_root}/point_cloud/point_cloud_builder.cc"
"${draco_root}/point_cloud/point_cloud_builder.h")
set(draco_points_common_sources set(draco_points_common_sources
"${draco_root}/compression/point_cloud/algorithms/point_cloud_types.h" "${draco_root}/compression/point_cloud/algorithms/point_cloud_types.h"
@ -310,77 +359,170 @@ set(draco_points_encoder_sources
"${draco_root}/compression/point_cloud/algorithms/integer_points_kd_tree_encoder.cc" "${draco_root}/compression/point_cloud/algorithms/integer_points_kd_tree_encoder.cc"
"${draco_root}/compression/point_cloud/algorithms/integer_points_kd_tree_encoder.h") "${draco_root}/compression/point_cloud/algorithms/integer_points_kd_tree_encoder.h")
set(draco_js_sources
"${draco_root}/javascript/emscripten/draco_glue_wrapper.cc"
"${draco_root}/javascript/emscripten/webidl_wrapper.cc")
set(draco_test_sources
"${draco_root}/core/draco_tests.cc"
"${draco_root}/core/draco_test_base.h"
"${draco_root}/core/draco_test_utils.cc"
"${draco_root}/core/draco_test_utils.h"
"${draco_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_test.cc"
"${draco_root}/compression/attributes/sequential_integer_attribute_encoding_test.cc"
"${draco_root}/compression/mesh/mesh_encoder_test.cc"
"${draco_root}/compression/point_cloud/point_cloud_kd_tree_encoding_test.cc"
"${draco_root}/compression/point_cloud/point_cloud_sequential_encoding_test.cc"
"${draco_root}/core/bit_coder_test.cc"
"${draco_root}/core/math_utils_test.cc"
"${draco_root}/core/quantization_utils_test.cc"
"${draco_root}/core/rans_coding_test.cc"
"${draco_root}/core/symbol_coding_test.cc"
"${draco_root}/core/vector_d_test.cc"
"${draco_root}/io/obj_decoder_test.cc"
"${draco_root}/io/ply_decoder_test.cc"
"${draco_root}/io/ply_reader_test.cc"
"${draco_root}/io/point_cloud_io_test.cc"
"${draco_root}/mesh/mesh_cleanup_test.cc"
"${draco_root}/mesh/mesh_test.cc"
"${draco_root}/point_cloud/point_cloud_builder_test.cc")
set(draco_version_sources
"${draco_build_dir}/draco_version.cc"
"${draco_build_dir}/draco_version.h")
include_directories("${draco_root}") include_directories("${draco_root}")
# #
# Draco targets. # Draco targets.
# #
if (EMSCRIPTEN)
# Draco js decoder.
add_compiler_flag_if_supported("-s ALLOW_MEMORY_GROWTH=1")
add_compiler_flag_if_supported("--memory-init-file 0")
add_compiler_flag_if_supported("-fno-omit-frame-pointer")
# Object collections that mirror the Draco directory structure. if (CMAKE_BUILD_TYPE STREQUAL "")
add_library(draco_compression_attributes_decoder OBJECT # Force -O3 when no build type is specified.
${draco_compression_attributes_decoder_sources}) add_compiler_flag_if_supported("-O3")
add_library(draco_compression_attributes_encoder OBJECT endif ()
${draco_compression_attributes_encoder_sources})
add_library(draco_compression_attributes_pred_schemes OBJECT
${draco_compression_attributes_pred_schemes_sources})
add_library(draco_compression_config OBJECT ${draco_compression_config_sources})
add_library(draco_compression_decode OBJECT ${draco_compression_decode_sources})
add_library(draco_compression_encode OBJECT ${draco_compression_encode_sources})
add_library(draco_compression_mesh_decoder OBJECT
${draco_compression_mesh_decoder_sources})
add_library(draco_compression_mesh_encoder OBJECT
${draco_compression_mesh_encoder_sources})
add_library(draco_compression_point_cloud_decoder OBJECT
${draco_compression_point_cloud_decoder_sources})
add_library(draco_compression_point_cloud_encoder OBJECT
${draco_compression_point_cloud_encoder_sources})
add_library(draco_core OBJECT ${draco_core_sources})
add_library(draco_io OBJECT ${draco_io_sources})
add_library(draco_mesh OBJECT ${draco_mesh_sources})
add_library(draco_point_cloud OBJECT ${draco_point_cloud_sources})
add_library(draco_points_decoder OBJECT
${draco_points_common_sources}
${draco_points_decoder_sources})
add_library(draco_points_encoder OBJECT
${draco_points_common_sources}
${draco_points_encoder_sources})
# Library targets that consume the object collections. set(draco_js_idl "${draco_root}/javascript/emscripten/draco_web.idl")
add_library(dracodec
"${draco_build_dir}/draco_version.cc"
"${draco_build_dir}/draco_version.h"
$<TARGET_OBJECTS:draco_compression_attributes_decoder>
$<TARGET_OBJECTS:draco_compression_decode>
$<TARGET_OBJECTS:draco_compression_mesh_decoder>
$<TARGET_OBJECTS:draco_compression_point_cloud_decoder>
$<TARGET_OBJECTS:draco_core>
$<TARGET_OBJECTS:draco_io>
$<TARGET_OBJECTS:draco_mesh>
$<TARGET_OBJECTS:draco_point_cloud>
$<TARGET_OBJECTS:draco_points_decoder>)
add_library(draco
"${draco_build_dir}/draco_version.cc"
"${draco_build_dir}/draco_version.h"
$<TARGET_OBJECTS:draco_compression_attributes_decoder>
$<TARGET_OBJECTS:draco_compression_attributes_encoder>
$<TARGET_OBJECTS:draco_compression_attributes_pred_schemes>
$<TARGET_OBJECTS:draco_compression_config>
$<TARGET_OBJECTS:draco_compression_decode>
$<TARGET_OBJECTS:draco_compression_encode>
$<TARGET_OBJECTS:draco_compression_mesh_decoder>
$<TARGET_OBJECTS:draco_compression_mesh_encoder>
$<TARGET_OBJECTS:draco_compression_point_cloud_decoder>
$<TARGET_OBJECTS:draco_compression_point_cloud_encoder>
$<TARGET_OBJECTS:draco_core>
$<TARGET_OBJECTS:draco_io>
$<TARGET_OBJECTS:draco_mesh>
$<TARGET_OBJECTS:draco_point_cloud>
$<TARGET_OBJECTS:draco_points_decoder>
$<TARGET_OBJECTS:draco_points_encoder>)
# Draco app targets. # Generate ${draco_build_dir}/glue.cpp at cmake generation time so it can be
add_executable(draco_decoder "${draco_root}/tools/draco_decoder.cc") # added to targets without cmake reporting errors.
target_link_libraries(draco_decoder PUBLIC dracodec) execute_process(COMMAND ${PYTHON_EXECUTABLE}
add_executable(draco_encoder $ENV{EMSCRIPTEN}/tools/webidl_binder.py ${draco_js_idl}
"${draco_root}/tools/draco_encoder.cc") ${draco_build_dir}/glue
target_link_libraries(draco_encoder PUBLIC draco) OUTPUT_FILE ${draco_build_dir}/glue.cpp)
# Add a custom rule depending on the IDL to regenerate
# ${draco_build_dir}/glue.cpp as needed.
add_custom_command(OUTPUT ${draco_build_dir}/glue.cpp
COMMAND ${PYTHON_EXECUTABLE}
$ENV{EMSCRIPTEN}/tools/webidl_binder.py ${draco_js_idl}
${draco_build_dir}/glue
DEPENDS ${draco_js_idl}
COMMENT "Generating ${draco_build_dir}/glue.cpp."
WORKING_DIRECTORY ${draco_build_dir}
VERBATIM)
# Add path to glue.cpp to draco include paths.
include_directories("${draco_build_dir}")
add_executable(draco_decoder
${draco_compression_attributes_decoder_sources}
${draco_compression_decode_sources}
${draco_compression_mesh_decoder_sources}
${draco_compression_point_cloud_decoder_sources}
${draco_core_sources}
${draco_io_sources}
${draco_mesh_sources}
${draco_point_cloud_sources}
${draco_points_decoder_sources}
${draco_js_sources}
${draco_version_sources})
# Make $draco_js_sources source files depend on glue.cpp.
set_property(SOURCE ${draco_js_sources} APPEND PROPERTY OBJECT_DEPENDS
${draco_build_dir}/glue.cpp)
em_link_post_js(draco_decoder "${draco_build_dir}/glue.js")
else ()
# Standard Draco libs, encoder and decoder.
# Object collections that mirror the Draco directory structure.
add_library(draco_compression_attributes_decoder OBJECT
${draco_compression_attributes_decoder_sources})
add_library(draco_compression_attributes_encoder OBJECT
${draco_compression_attributes_encoder_sources})
add_library(draco_compression_attributes_pred_schemes OBJECT
${draco_compression_attributes_pred_schemes_sources})
add_library(draco_compression_config OBJECT
${draco_compression_config_sources})
add_library(draco_compression_decode OBJECT
${draco_compression_decode_sources})
add_library(draco_compression_encode OBJECT
${draco_compression_encode_sources})
add_library(draco_compression_mesh_decoder OBJECT
${draco_compression_mesh_decoder_sources})
add_library(draco_compression_mesh_encoder OBJECT
${draco_compression_mesh_encoder_sources})
add_library(draco_compression_point_cloud_decoder OBJECT
${draco_compression_point_cloud_decoder_sources})
add_library(draco_compression_point_cloud_encoder OBJECT
${draco_compression_point_cloud_encoder_sources})
add_library(draco_core OBJECT ${draco_core_sources})
add_library(draco_io OBJECT ${draco_io_sources})
add_library(draco_mesh OBJECT ${draco_mesh_sources})
add_library(draco_point_cloud OBJECT ${draco_point_cloud_sources})
add_library(draco_points_decoder OBJECT
${draco_points_common_sources}
${draco_points_decoder_sources})
add_library(draco_points_encoder OBJECT
${draco_points_common_sources}
${draco_points_encoder_sources})
# Library targets that consume the object collections.
add_library(dracodec
${draco_version_sources}
$<TARGET_OBJECTS:draco_compression_attributes_decoder>
$<TARGET_OBJECTS:draco_compression_decode>
$<TARGET_OBJECTS:draco_compression_mesh_decoder>
$<TARGET_OBJECTS:draco_compression_point_cloud_decoder>
$<TARGET_OBJECTS:draco_core>
$<TARGET_OBJECTS:draco_io>
$<TARGET_OBJECTS:draco_mesh>
$<TARGET_OBJECTS:draco_point_cloud>
$<TARGET_OBJECTS:draco_points_decoder>)
add_library(draco
${draco_version_sources}
$<TARGET_OBJECTS:draco_compression_attributes_decoder>
$<TARGET_OBJECTS:draco_compression_attributes_encoder>
$<TARGET_OBJECTS:draco_compression_attributes_pred_schemes>
$<TARGET_OBJECTS:draco_compression_config>
$<TARGET_OBJECTS:draco_compression_decode>
$<TARGET_OBJECTS:draco_compression_encode>
$<TARGET_OBJECTS:draco_compression_mesh_decoder>
$<TARGET_OBJECTS:draco_compression_mesh_encoder>
$<TARGET_OBJECTS:draco_compression_point_cloud_decoder>
$<TARGET_OBJECTS:draco_compression_point_cloud_encoder>
$<TARGET_OBJECTS:draco_core>
$<TARGET_OBJECTS:draco_io>
$<TARGET_OBJECTS:draco_mesh>
$<TARGET_OBJECTS:draco_point_cloud>
$<TARGET_OBJECTS:draco_points_decoder>
$<TARGET_OBJECTS:draco_points_encoder>)
# Draco app targets.
add_executable(draco_decoder "${draco_root}/tools/draco_decoder.cc")
target_link_libraries(draco_decoder PUBLIC dracodec)
add_executable(draco_encoder
"${draco_root}/tools/draco_encoder.cc")
target_link_libraries(draco_encoder PUBLIC draco)
if (ENABLE_TESTS)
add_executable(draco_tests ${draco_test_sources})
include_directories("${draco_build_dir}"
"${GTEST_SOURCE_DIR}/googletest/include")
target_link_libraries(draco_tests draco gtest)
endif ()
endif ()

View File

@ -102,8 +102,8 @@ Unlike Visual Studio and Xcode projects, the build configuration for make
builds is controlled when you run `cmake`. The following examples demonstrate builds is controlled when you run `cmake`. The following examples demonstrate
various build configurations. various build configurations.
Omitting the build type produces makefiles that use build flags containing Omitting the build type produces makefiles that use release build flags
neither optimization nor debug flags: by default:
~~~~~ bash ~~~~~ bash
cmake . cmake .

View File

@ -0,0 +1,12 @@
#ifndef DRACO_TESTING_DRACO_TEST_CONFIG_H_
#define DRACO_TESTING_DRACO_TEST_CONFIG_H_
// If this file is named draco_test_config.h.cmake:
// This file is used as input at cmake generation time.
// If this file is named draco_test_config.h:
// GENERATED FILE, DO NOT EDIT. SEE ABOVE.
#define DRACO_TEST_DATA_DIR "${DRACO_TEST_DATA_DIR}"
#endif // DRACO_TESTING_DRACO_TEST_CONFIG_H_

View File

@ -54,17 +54,19 @@ class AttributesEncoder {
// Returns the number of attributes that need to be encoded before the // Returns the number of attributes that need to be encoded before the
// specified attribute is encoded. // specified attribute is encoded.
// Note that the attribute is specified by its point attribute id. // Note that the attribute is specified by its point attribute id.
virtual int NumParentAttributes(int32_t point_attribute_id) const { virtual int NumParentAttributes(int32_t /* point_attribute_id */) const {
return 0; return 0;
} }
virtual int GetParentAttributeId(int32_t point_attribute_id, virtual int GetParentAttributeId(int32_t /* point_attribute_id */,
int32_t parent_i) const { int32_t /* parent_i */) const {
return -1; return -1;
} }
// Marks a given attribute as a parent of another attribute. // Marks a given attribute as a parent of another attribute.
virtual bool MarkParentAttribute(int32_t point_attribute_id) { return false; } virtual bool MarkParentAttribute(int32_t /* point_attribute_id */) {
return false;
}
// Returns an attribute containing the encoded version of the attribute data. // Returns an attribute containing the encoded version of the attribute data.
// I.e., the data that is going to be used by the decoder after the attribute // I.e., the data that is going to be used by the decoder after the attribute
@ -74,13 +76,13 @@ class AttributesEncoder {
// dependent attributes that require to use the same data that will be // dependent attributes that require to use the same data that will be
// availalbe during decoding. // availalbe during decoding.
virtual const PointAttribute *GetLossyAttributeData( virtual const PointAttribute *GetLossyAttributeData(
int32_t point_attribute_id) { int32_t /* point_attribute_id */) {
return nullptr; return nullptr;
} }
void AddAttributeId(int32_t id) { void AddAttributeId(int32_t id) {
point_attribute_ids_.push_back(id); point_attribute_ids_.push_back(id);
if (id >= point_attribute_to_local_id_map_.size()) if (id >= static_cast<int32_t>(point_attribute_to_local_id_map_.size()))
point_attribute_to_local_id_map_.resize(id + 1, -1); point_attribute_to_local_id_map_.resize(id + 1, -1);
point_attribute_to_local_id_map_[id] = point_attribute_ids_.size() - 1; point_attribute_to_local_id_map_[id] = point_attribute_ids_.size() - 1;
} }
@ -100,7 +102,8 @@ class AttributesEncoder {
protected: protected:
int32_t GetLocalIdForPointAttribute(int32_t point_attribute_id) const { int32_t GetLocalIdForPointAttribute(int32_t point_attribute_id) const {
if (point_attribute_id >= point_attribute_to_local_id_map_.size()) const int id_map_size = point_attribute_to_local_id_map_.size();
if (point_attribute_id >= id_map_size)
return -1; return -1;
return point_attribute_to_local_id_map_[point_attribute_id]; return point_attribute_to_local_id_map_[point_attribute_id];
} }

View File

@ -56,6 +56,9 @@ class PointAttributeVectorIterator {
ret.point_id_ += dif; ret.point_id_ += dif;
return ret; return ret;
} }
bool operator<(const Self &other) const {
return point_id_ < other.point_id_;
}
private: private:
const PointAttribute *attribute_; const PointAttribute *attribute_;

View File

@ -44,7 +44,7 @@ class MeshAttributeIndicesEncodingObserver {
// Interface for TraversalObserverT // Interface for TraversalObserverT
void OnNewFaceVisited(FaceIndex face) {} void OnNewFaceVisited(FaceIndex /* face */) {}
void OnNewVertexVisited(VertexIndex vertex, CornerIndex corner) { void OnNewVertexVisited(VertexIndex vertex, CornerIndex corner) {
const PointIndex point_id = const PointIndex point_id =

View File

@ -66,7 +66,7 @@ class MeshTraversalSequencer : public PointsSequencer {
protected: protected:
bool GenerateSequenceInternal() override { bool GenerateSequenceInternal() override {
if (corner_order_) { if (corner_order_) {
for (int i = 0; i < corner_order_->size(); ++i) { for (uint32_t i = 0; i < corner_order_->size(); ++i) {
ProcessCorner(corner_order_->at(i)); ProcessCorner(corner_order_->at(i));
} }
} else { } else {

View File

@ -74,7 +74,7 @@ void UnitVectorToQuantizedOctahedralCoords(const T *vector,
int32_t s = static_cast<int32_t>(floor(ss * max_value + 0.5)); int32_t s = static_cast<int32_t>(floor(ss * max_value + 0.5));
int32_t t = static_cast<int32_t>(floor(tt * max_value + 0.5)); int32_t t = static_cast<int32_t>(floor(tt * max_value + 0.5));
const int32_t center_value = max_value / 2; const int32_t center_value = static_cast<int32_t>(max_value / 2);
// Convert all edge points in the top left and bottom right quadrants to // Convert all edge points in the top left and bottom right quadrants to
// their corresponding position in the bottom left and top right quadrants. // their corresponding position in the bottom left and top right quadrants.
@ -82,8 +82,8 @@ void UnitVectorToQuantizedOctahedralCoords(const T *vector,
// for the inversion to occur correctly. // for the inversion to occur correctly.
if ((s == 0 && t == 0) || (s == 0 && t == max_value) || if ((s == 0 && t == 0) || (s == 0 && t == max_value) ||
(s == max_value && t == 0)) { (s == max_value && t == 0)) {
s = max_value; s = static_cast<int32_t>(max_value);
t = max_value; t = static_cast<int32_t>(max_value);
} else if (s == 0 && t > center_value) { } else if (s == 0 && t > center_value) {
t = center_value - (t - center_value); t = center_value - (t - center_value);
} else if (s == max_value && t < center_value) { } else if (s == max_value && t < center_value) {
@ -157,6 +157,42 @@ void QuantizedOctaherdalCoordsToUnitVector(int32_t in_s, int32_t in_t,
in_t / max_quantized_value, out_vector); in_t / max_quantized_value, out_vector);
} }
template <typename T>
bool IsInDiamond(const T &max_value_, const T &s, const T &t) {
return std::abs(static_cast<double>(s)) + std::abs(static_cast<double>(t)) <=
static_cast<double>(max_value_);
}
template <typename T>
void InvertRepresentation(const T &max_value_, T *s, T *t) {
T sign_s = 0;
T sign_t = 0;
if (*s >= 0 && *t >= 0) {
sign_s = 1;
sign_t = 1;
} else if (*s <= 0 && *t <= 0) {
sign_s = -1;
sign_t = -1;
} else {
sign_s = (*s > 0) ? 1 : -1;
sign_t = (*t > 0) ? 1 : -1;
}
const T corner_point_s = sign_s * max_value_;
const T corner_point_t = sign_t * max_value_;
*s = 2 * *s - corner_point_s;
*t = 2 * *t - corner_point_t;
if (sign_s * sign_t >= 0) {
T temp = *s;
*s = -*t;
*t = -temp;
} else {
std::swap(*s, *t);
}
*s = (*s + corner_point_s) / 2;
*t = (*t + corner_point_t) / 2;
}
} // namespace draco } // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_NORMAL_COMPRESSION_UTILS_H_ #endif // DRACO_COMPRESSION_ATTRIBUTES_NORMAL_COMPRESSION_UTILS_H_

View File

@ -43,7 +43,7 @@ class PointsSequencer {
// sufficient information to compute the inverse map, because not all point // sufficient information to compute the inverse map, because not all point
// ids are necessarily contained within the map. // ids are necessarily contained within the map.
// Must be implemented for sequencers that are used by attribute decoders. // Must be implemented for sequencers that are used by attribute decoders.
virtual bool UpdatePointToAttributeIndexMapping(PointAttribute *attribute) { virtual bool UpdatePointToAttributeIndexMapping(PointAttribute * /* attr */) {
return false; return false;
} }

View File

@ -63,7 +63,7 @@ class MeshPredictionSchemeMultiParallelogram
template <typename DataTypeT, class TransformT, class MeshDataT> template <typename DataTypeT, class TransformT, class MeshDataT>
bool MeshPredictionSchemeMultiParallelogram<DataTypeT, TransformT, MeshDataT>:: bool MeshPredictionSchemeMultiParallelogram<DataTypeT, TransformT, MeshDataT>::
Encode(const DataTypeT *in_data, CorrType *out_corr, int size, Encode(const DataTypeT *in_data, CorrType *out_corr, int size,
int num_components, const PointIndex *entry_to_point_id_map) { int num_components, const PointIndex * /* entry_to_point_id_map */) {
this->transform().InitializeEncoding(in_data, size, num_components); this->transform().InitializeEncoding(in_data, size, num_components);
const CornerTable *const table = this->mesh_data().corner_table(); const CornerTable *const table = this->mesh_data().corner_table();
const std::vector<int32_t> *const vertex_to_data_map = const std::vector<int32_t> *const vertex_to_data_map =
@ -140,8 +140,8 @@ bool MeshPredictionSchemeMultiParallelogram<DataTypeT, TransformT, MeshDataT>::
template <typename DataTypeT, class TransformT, class MeshDataT> template <typename DataTypeT, class TransformT, class MeshDataT>
bool MeshPredictionSchemeMultiParallelogram<DataTypeT, TransformT, MeshDataT>:: bool MeshPredictionSchemeMultiParallelogram<DataTypeT, TransformT, MeshDataT>::
Decode(const CorrType *in_corr, DataTypeT *out_data, int size, Decode(const CorrType *in_corr, DataTypeT *out_data, int /* size */,
int num_components, const PointIndex *entry_to_point_id_map) { int num_components, const PointIndex * /* entry_to_point_id_map */) {
this->transform().InitializeDecoding(num_components); this->transform().InitializeDecoding(num_components);
std::unique_ptr<DataTypeT[]> pred_vals(new DataTypeT[num_components]()); std::unique_ptr<DataTypeT[]> pred_vals(new DataTypeT[num_components]());
@ -152,7 +152,8 @@ bool MeshPredictionSchemeMultiParallelogram<DataTypeT, TransformT, MeshDataT>::
const std::vector<int32_t> *const vertex_to_data_map = const std::vector<int32_t> *const vertex_to_data_map =
this->mesh_data().vertex_to_data_map(); this->mesh_data().vertex_to_data_map();
for (int p = 1; p < this->mesh_data().data_to_corner_map()->size(); ++p) { const int corner_map_size = this->mesh_data().data_to_corner_map()->size();
for (int p = 1; p < corner_map_size; ++p) {
const CornerIndex start_corner_id = const CornerIndex start_corner_id =
this->mesh_data().data_to_corner_map()->at(p); this->mesh_data().data_to_corner_map()->at(p);

View File

@ -66,7 +66,7 @@ class MeshPredictionSchemeParallelogram
template <typename DataTypeT, class TransformT, class MeshDataT> template <typename DataTypeT, class TransformT, class MeshDataT>
bool MeshPredictionSchemeParallelogram<DataTypeT, TransformT, MeshDataT>:: bool MeshPredictionSchemeParallelogram<DataTypeT, TransformT, MeshDataT>::
Encode(const DataTypeT *in_data, CorrType *out_corr, int size, Encode(const DataTypeT *in_data, CorrType *out_corr, int size,
int num_components, const PointIndex *entry_to_point_id_map) { int num_components, const PointIndex * /* entry_to_point_id_map */) {
this->transform().InitializeEncoding(in_data, size, num_components); this->transform().InitializeEncoding(in_data, size, num_components);
std::unique_ptr<DataTypeT[]> pred_vals(new DataTypeT[num_components]()); std::unique_ptr<DataTypeT[]> pred_vals(new DataTypeT[num_components]());
@ -117,8 +117,8 @@ bool MeshPredictionSchemeParallelogram<DataTypeT, TransformT, MeshDataT>::
template <typename DataTypeT, class TransformT, class MeshDataT> template <typename DataTypeT, class TransformT, class MeshDataT>
bool MeshPredictionSchemeParallelogram<DataTypeT, TransformT, MeshDataT>:: bool MeshPredictionSchemeParallelogram<DataTypeT, TransformT, MeshDataT>::
Decode(const CorrType *in_corr, DataTypeT *out_data, int size, Decode(const CorrType *in_corr, DataTypeT *out_data, int /* size */,
int num_components, const PointIndex *entry_to_point_id_map) { int num_components, const PointIndex * /* entry_to_point_id_map */) {
this->transform().InitializeDecoding(num_components); this->transform().InitializeDecoding(num_components);
const CornerTable *const table = this->mesh_data().corner_table(); const CornerTable *const table = this->mesh_data().corner_table();
@ -130,7 +130,8 @@ bool MeshPredictionSchemeParallelogram<DataTypeT, TransformT, MeshDataT>::
// Restore the first value. // Restore the first value.
this->transform().ComputeOriginalValue(pred_vals.get(), in_corr, out_data, 0); this->transform().ComputeOriginalValue(pred_vals.get(), in_corr, out_data, 0);
for (int p = 1; p < this->mesh_data().data_to_corner_map()->size(); ++p) { const int corner_map_size = this->mesh_data().data_to_corner_map()->size();
for (int p = 1; p < corner_map_size; ++p) {
const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p); const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p);
int vert_opp = p, vert_next = p, vert_prev = p; int vert_opp = p, vert_next = p, vert_prev = p;
const CornerIndex opp_corner = table->Opposite(corner_id); const CornerIndex opp_corner = table->Opposite(corner_id);

View File

@ -73,6 +73,7 @@ class MeshPredictionSchemeTexCoords
GeometryAttribute::Type GetParentAttributeType(int i) const override { GeometryAttribute::Type GetParentAttributeType(int i) const override {
DCHECK_EQ(i, 0); DCHECK_EQ(i, 0);
(void)i;
return GeometryAttribute::POSITION; return GeometryAttribute::POSITION;
} }
@ -137,15 +138,16 @@ bool MeshPredictionSchemeTexCoords<DataTypeT, TransformT, MeshDataT>::Encode(
template <typename DataTypeT, class TransformT, class MeshDataT> template <typename DataTypeT, class TransformT, class MeshDataT>
bool MeshPredictionSchemeTexCoords<DataTypeT, TransformT, MeshDataT>::Decode( bool MeshPredictionSchemeTexCoords<DataTypeT, TransformT, MeshDataT>::Decode(
const CorrType *in_corr, DataTypeT *out_data, int size, int num_components, const CorrType *in_corr, DataTypeT *out_data, int /* size */,
const PointIndex *entry_to_point_id_map) { int num_components, const PointIndex *entry_to_point_id_map) {
num_components_ = num_components; num_components_ = num_components;
entry_to_point_id_map_ = entry_to_point_id_map; entry_to_point_id_map_ = entry_to_point_id_map;
predicted_value_ = predicted_value_ =
std::unique_ptr<DataTypeT[]>(new DataTypeT[num_components]); std::unique_ptr<DataTypeT[]>(new DataTypeT[num_components]);
this->transform().InitializeDecoding(num_components); this->transform().InitializeDecoding(num_components);
for (int p = 0; p < this->mesh_data().data_to_corner_map()->size(); ++p) { const int corner_map_size = this->mesh_data().data_to_corner_map()->size();
for (int p = 0; p < corner_map_size; ++p) {
const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p); const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p);
ComputePredictedValue<false>(corner_id, out_data, p); ComputePredictedValue<false>(corner_id, out_data, p);
@ -226,8 +228,8 @@ void MeshPredictionSchemeTexCoords<DataTypeT, TransformT, MeshDataT>::
const Vector2f p_uv = GetTexCoordForEntryId(prev_data_id, data); const Vector2f p_uv = GetTexCoordForEntryId(prev_data_id, data);
if (p_uv == n_uv) { if (p_uv == n_uv) {
// We cannot do a reliable prediction on degenerated UV triangles. // We cannot do a reliable prediction on degenerated UV triangles.
predicted_value_[0] = p_uv[0]; predicted_value_[0] = static_cast<int>(p_uv[0]);
predicted_value_[1] = p_uv[1]; predicted_value_[1] = static_cast<int>(p_uv[1]);
return; return;
} }
@ -317,11 +319,11 @@ void MeshPredictionSchemeTexCoords<DataTypeT, TransformT, MeshDataT>::
} }
if (std::is_integral<DataTypeT>::value) { if (std::is_integral<DataTypeT>::value) {
// Round the predicted value for integer types. // Round the predicted value for integer types.
predicted_value_[0] = floor(predicted_uv[0] + 0.5); predicted_value_[0] = static_cast<int>(floor(predicted_uv[0] + 0.5));
predicted_value_[1] = floor(predicted_uv[1] + 0.5); predicted_value_[1] = static_cast<int>(floor(predicted_uv[1] + 0.5));
} else { } else {
predicted_value_[0] = predicted_uv[0]; predicted_value_[0] = static_cast<int>(predicted_uv[0]);
predicted_value_[1] = predicted_uv[1]; predicted_value_[1] = static_cast<int>(predicted_uv[1]);
} }
return; return;
} }

View File

@ -60,12 +60,14 @@ class PredictionScheme
int GetNumParentAttributes() const override { return 0; } int GetNumParentAttributes() const override { return 0; }
// Returns the type of each of the parent attribute. // Returns the type of each of the parent attribute.
GeometryAttribute::Type GetParentAttributeType(int i) const override { GeometryAttribute::Type GetParentAttributeType(int /* i */) const override {
return GeometryAttribute::INVALID; return GeometryAttribute::INVALID;
} }
// Sets the required parent attribute. // Sets the required parent attribute.
bool SetParentAttribute(const PointAttribute *att) override { return false; } bool SetParentAttribute(const PointAttribute * /* att */) override {
return false;
}
bool AreCorrectionsPositive() override { bool AreCorrectionsPositive() override {
return transform_.AreCorrectionsPositive(); return transform_.AreCorrectionsPositive();

View File

@ -17,6 +17,7 @@
#include <cmath> #include <cmath>
#include "compression/attributes/normal_compression_utils.h"
#include "compression/attributes/prediction_schemes/prediction_scheme.h" #include "compression/attributes/prediction_schemes/prediction_scheme.h"
#include "core/macros.h" #include "core/macros.h"
#include "core/vector_d.h" #include "core/vector_d.h"
@ -119,44 +120,15 @@ class PredictionSchemeNormalOctahedronTransform
out_orig_vals[1] = orig[1]; out_orig_vals[1] = orig[1];
} }
Point2 InvertRepresentation(Point2 p) const {
DataType sign_x = 0;
DataType sign_y = 0;
if (p[0] >= 0 && p[1] >= 0) {
sign_x = 1;
sign_y = 1;
} else if (p[0] <= 0 && p[1] <= 0) {
sign_x = -1;
sign_y = -1;
} else {
sign_x = (p[0] > 0) ? 1 : -1;
sign_y = (p[1] > 0) ? 1 : -1;
}
const Point2 t = Point2(sign_x * max_value_, sign_y * max_value_);
p = 2 * p - t;
if (sign_x * sign_y >= 0) {
p = Point2(-p[1], -p[0]);
} else {
p = Point2(p[1], p[0]);
}
p = (p + t) / 2;
return p;
}
bool IsInDiamond(const Point2 &p) const {
return std::abs(p[0]) + std::abs(p[1]) <= max_value_;
}
private: private:
Point2 ComputeCorrection(Point2 orig, Point2 pred) const { Point2 ComputeCorrection(Point2 orig, Point2 pred) const {
const Point2 t(max_value_, max_value_); const Point2 t(max_value_, max_value_);
orig = orig - t; orig = orig - t;
pred = pred - t; pred = pred - t;
if (!IsInDiamond(pred)) { if (!IsInDiamond( max_value_, pred[0], pred[1])) {
orig = InvertRepresentation(orig); InvertRepresentation(max_value_, &orig[0], &orig[1]);
pred = InvertRepresentation(pred); InvertRepresentation(max_value_, &pred[0], &pred[1]);
} }
Point2 corr = orig - pred; Point2 corr = orig - pred;
@ -169,15 +141,15 @@ class PredictionSchemeNormalOctahedronTransform
const Point2 t(max_value_, max_value_); const Point2 t(max_value_, max_value_);
pred = pred - t; pred = pred - t;
const bool pred_is_in_diamond = IsInDiamond(pred); const bool pred_is_in_diamond = IsInDiamond( max_value_, pred[0], pred[1]);
if (!pred_is_in_diamond) { if (!pred_is_in_diamond) {
pred = InvertRepresentation(pred); InvertRepresentation(max_value_, &pred[0], &pred[1]);
} }
Point2 orig = pred + corr; Point2 orig = pred + corr;
orig[0] = ModMax(orig[0]); orig[0] = ModMax(orig[0]);
orig[1] = ModMax(orig[1]); orig[1] = ModMax(orig[1]);
if (!pred_is_in_diamond) { if (!pred_is_in_diamond) {
orig = InvertRepresentation(orig); InvertRepresentation(max_value_, &orig[0], &orig[1]);
} }
orig = orig + t; orig = orig + t;
return orig; return orig;

View File

@ -0,0 +1,63 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform.h"
#include "core/draco_test_base.h"
namespace {
class PredictionSchemeNormalOctahedronTransformTest : public ::testing::Test {
protected:
typedef draco::PredictionSchemeNormalOctahedronTransform<int32_t> Transform;
typedef Transform::Point2 Point2;
void TestComputeCorrection(const Transform &transform, const int32_t &ox,
const int32_t &oy, const int32_t &px,
const int32_t &py, const int32_t &cx,
const int32_t &cy) {
const int32_t o[2] = {ox + 7, oy + 7};
const int32_t p[2] = {px + 7, py + 7};
int32_t corr[2] = {500, 500};
transform.ComputeCorrection(o, p, corr, 0);
ASSERT_EQ(corr[0], (cx + 15) % 15);
ASSERT_EQ(corr[1], (cy + 15) % 15);
}
};
TEST_F(PredictionSchemeNormalOctahedronTransformTest, Init) {
const Transform transform(15);
ASSERT_TRUE(transform.AreCorrectionsPositive());
}
TEST_F(PredictionSchemeNormalOctahedronTransformTest, ComputeCorrections) {
const Transform transform(15);
// checks inside diamond
TestComputeCorrection(transform, 0, 0, 0, 0, 0, 0);
TestComputeCorrection(transform, 1, 1, 1, 1, 0, 0);
TestComputeCorrection(transform, 3, 4, 1, 1, 2, 3);
TestComputeCorrection(transform, -1, -1, -1, -1, 0, 0);
TestComputeCorrection(transform, -3, -4, -1, -1, -2, -3);
// checks outside diamond
TestComputeCorrection(transform, 4, 4, 4, 4, 0, 0);
TestComputeCorrection(transform, 5, 6, 4, 4, -2, -1);
TestComputeCorrection(transform, 3, 2, 4, 4, 2, 1);
// checks on outer edges
TestComputeCorrection(transform, 7, 7, 4, 4, -3, -3);
TestComputeCorrection(transform, 6, 7, 4, 4, -3, -2);
TestComputeCorrection(transform, -6, 7, 4, 4, -3, -2);
TestComputeCorrection(transform, 7, 6, 4, 4, -2, -3);
TestComputeCorrection(transform, 7, -6, 4, 4, -2, -3);
}
} // namespace

View File

@ -40,7 +40,7 @@ class PredictionSchemeTransform {
// Performs any custom initialization of the trasnform for the encoder. // Performs any custom initialization of the trasnform for the encoder.
// |size| = total number of values in |orig_data| (i.e., number of entries * // |size| = total number of values in |orig_data| (i.e., number of entries *
// number of components). // number of components).
void InitializeEncoding(const DataTypeT *orig_data, int size, void InitializeEncoding(const DataTypeT * /* orig_data */, int /* size */,
int num_components) { int num_components) {
num_components_ = num_components; num_components_ = num_components;
} }
@ -79,10 +79,10 @@ class PredictionSchemeTransform {
} }
// Encode any transform specific data. // Encode any transform specific data.
bool EncodeTransformData(EncoderBuffer *buffer) { return true; } bool EncodeTransformData(EncoderBuffer * /* buffer */) { return true; }
// Decodes any transform specific data. Called before Initialize() method. // Decodes any transform specific data. Called before Initialize() method.
bool DecodeTransformData(DecoderBuffer *buffer) { return true; } bool DecodeTransformData(DecoderBuffer * /* buffer */) { return true; }
// Should return true if all corrected values are guaranteed to be positive. // Should return true if all corrected values are guaranteed to be positive.
bool AreCorrectionsPositive() const { return false; } bool AreCorrectionsPositive() const { return false; }

View File

@ -13,7 +13,7 @@
// limitations under the License. // limitations under the License.
// //
#include "compression/attributes/sequential_attribute_decoders_controller.h" #include "compression/attributes/sequential_attribute_decoders_controller.h"
#include "compression/attributes/mesh_normal_attribute_decoder.h" #include "compression/attributes/sequential_normal_attribute_decoder.h"
#include "compression/attributes/sequential_quantization_attribute_decoder.h" #include "compression/attributes/sequential_quantization_attribute_decoder.h"
#include "compression/config/compression_shared.h" #include "compression/config/compression_shared.h"
@ -75,8 +75,8 @@ SequentialAttributeDecodersController::CreateSequentialDecoder(
return std::unique_ptr<SequentialAttributeDecoder>( return std::unique_ptr<SequentialAttributeDecoder>(
new SequentialQuantizationAttributeDecoder()); new SequentialQuantizationAttributeDecoder());
case SEQUENTIAL_ATTRIBUTE_ENCODER_NORMALS: case SEQUENTIAL_ATTRIBUTE_ENCODER_NORMALS:
return std::unique_ptr<MeshNormalAttributeDecoder>( return std::unique_ptr<SequentialNormalAttributeDecoder>(
new MeshNormalAttributeDecoder()); new SequentialNormalAttributeDecoder());
default: default:
break; break;
} }

View File

@ -54,7 +54,7 @@ bool SequentialAttributeEncoder::EncodeValues(
const std::unique_ptr<uint8_t[]> value_data_ptr(new uint8_t[entry_size]); const std::unique_ptr<uint8_t[]> value_data_ptr(new uint8_t[entry_size]);
uint8_t *const value_data = value_data_ptr.get(); uint8_t *const value_data = value_data_ptr.get();
// Encode all attribute values in their native raw format. // Encode all attribute values in their native raw format.
for (int i = 0; i < point_ids.size(); ++i) { for (uint32_t i = 0; i < point_ids.size(); ++i) {
const AttributeValueIndex entry_id = attribute_->mapped_index(point_ids[i]); const AttributeValueIndex entry_id = attribute_->mapped_index(point_ids[i]);
attribute_->GetValue(entry_id, value_data); attribute_->GetValue(entry_id, value_data);
out_buffer->Encode(value_data, entry_size); out_buffer->Encode(value_data, entry_size);

View File

@ -13,7 +13,7 @@
// limitations under the License. // limitations under the License.
// //
#include "compression/attributes/sequential_attribute_encoders_controller.h" #include "compression/attributes/sequential_attribute_encoders_controller.h"
#include "compression/attributes/mesh_normal_attribute_encoder.h" #include "compression/attributes/sequential_normal_attribute_encoder.h"
#include "compression/attributes/sequential_quantization_attribute_encoder.h" #include "compression/attributes/sequential_quantization_attribute_encoder.h"
#include "compression/point_cloud/point_cloud_encoder.h" #include "compression/point_cloud/point_cloud_encoder.h"
@ -47,7 +47,7 @@ bool SequentialAttributeEncodersController::EncodeAttributesEncoderData(
if (!AttributesEncoder::EncodeAttributesEncoderData(out_buffer)) if (!AttributesEncoder::EncodeAttributesEncoderData(out_buffer))
return false; return false;
// Encode a unique id of every sequential encoder. // Encode a unique id of every sequential encoder.
for (int i = 0; i < sequential_encoders_.size(); ++i) { for (uint32_t i = 0; i < sequential_encoders_.size(); ++i) {
out_buffer->Encode(sequential_encoders_[i]->GetUniqueId()); out_buffer->Encode(sequential_encoders_[i]->GetUniqueId());
} }
return true; return true;
@ -57,7 +57,7 @@ bool SequentialAttributeEncodersController::EncodeAttributes(
EncoderBuffer *buffer) { EncoderBuffer *buffer) {
if (!sequencer_ || !sequencer_->GenerateSequence(&point_ids_)) if (!sequencer_ || !sequencer_->GenerateSequence(&point_ids_))
return false; return false;
for (int i = 0; i < sequential_encoders_.size(); ++i) { for (uint32_t i = 0; i < sequential_encoders_.size(); ++i) {
if (!sequential_encoders_[i]->Encode(point_ids_, buffer)) if (!sequential_encoders_[i]->Encode(point_ids_, buffer))
return false; return false;
} }
@ -95,7 +95,7 @@ SequentialAttributeEncodersController::CreateSequentialEncoder(int i) {
// We currently only support normals with float coordinates // We currently only support normals with float coordinates
// and must be quantized. // and must be quantized.
return std::unique_ptr<SequentialAttributeEncoder>( return std::unique_ptr<SequentialAttributeEncoder>(
new MeshNormalAttributeEncoder()); new SequentialNormalAttributeEncoder());
} else { } else {
return std::unique_ptr<SequentialAttributeEncoder>( return std::unique_ptr<SequentialAttributeEncoder>(
new SequentialQuantizationAttributeEncoder()); new SequentialQuantizationAttributeEncoder());

View File

@ -90,7 +90,7 @@ bool SequentialIntegerAttributeDecoder::DecodeIntegerValues(
if (!in_buffer->Decode(values_.data(), sizeof(int32_t) * values_.size())) if (!in_buffer->Decode(values_.data(), sizeof(int32_t) * values_.size()))
return false; return false;
} else { } else {
for (int i = 0; i < values_.size(); ++i) { for (uint32_t i = 0; i < values_.size(); ++i) {
in_buffer->Decode(&values_[i], num_bytes); in_buffer->Decode(&values_[i], num_bytes);
} }
} }
@ -151,7 +151,7 @@ void SequentialIntegerAttributeDecoder::StoreTypedValues(uint32_t num_values) {
new AttributeTypeT[num_components]); new AttributeTypeT[num_components]);
int val_id = 0; int val_id = 0;
int out_byte_pos = 0; int out_byte_pos = 0;
for (int i = 0; i < num_values; ++i) { for (uint32_t i = 0; i < num_values; ++i) {
for (int c = 0; c < num_components; ++c) { for (int c = 0; c < num_components; ++c) {
const AttributeTypeT value = const AttributeTypeT value =
static_cast<AttributeTypeT>(values_[val_id++]); static_cast<AttributeTypeT>(values_[val_id++]);

View File

@ -110,7 +110,7 @@ bool SequentialIntegerAttributeEncoder::EncodeValues(
// To compute the maximum bit-length, first OR all values. // To compute the maximum bit-length, first OR all values.
uint32_t masked_value = 0; uint32_t masked_value = 0;
for (int i = 0; i < values_.size(); ++i) { for (uint32_t i = 0; i < values_.size(); ++i) {
masked_value |= values_[i]; masked_value |= values_[i];
} }
// Compute the msb of the ORed value. // Compute the msb of the ORed value.
@ -126,7 +126,7 @@ bool SequentialIntegerAttributeEncoder::EncodeValues(
if (num_bytes == sizeof(decltype(values_)::value_type)) { if (num_bytes == sizeof(decltype(values_)::value_type)) {
out_buffer->Encode(values_.data(), sizeof(int32_t) * values_.size()); out_buffer->Encode(values_.data(), sizeof(int32_t) * values_.size());
} else { } else {
for (int i = 0; i < values_.size(); ++i) { for (uint32_t i = 0; i < values_.size(); ++i) {
out_buffer->Encode(&values_[i], num_bytes); out_buffer->Encode(&values_[i], num_bytes);
} }
} }

View File

@ -0,0 +1,61 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include <numeric>
#include "compression/attributes/sequential_integer_attribute_decoder.h"
#include "compression/attributes/sequential_integer_attribute_encoder.h"
#include "core/draco_test_base.h"
namespace draco {
class SequentialIntegerAttributeEncodingTest : public ::testing::Test {
protected:
};
TEST_F(SequentialIntegerAttributeEncodingTest, DoesCompress) {
// This test verifies that IntegerEncoding encodes and decodes the given data.
const std::vector<int32_t> values{1, 8, 7, 5, 5, 5, 9,
155, -6, -9, 9, 125, 1, 0};
GeometryAttribute ga;
PointAttribute pa;
pa.Init(GeometryAttribute::GENERIC, nullptr, 1, DT_INT32, false, 4, 0);
pa.Reset(values.size());
pa.SetIdentityMapping();
for (uint32_t i = 0; i < values.size(); ++i) {
pa.SetAttributeValue(AttributeValueIndex(i), &values[i]);
}
// List of point ids from 0 to point_ids.size() - 1.
std::vector<PointIndex> point_ids(values.size());
std::iota(point_ids.begin(), point_ids.end(), 0);
EncoderBuffer out_buf;
SequentialIntegerAttributeEncoder ie;
ASSERT_TRUE(ie.InitializeStandalone(&pa));
ASSERT_TRUE(ie.Encode(point_ids, &out_buf));
DecoderBuffer in_buf;
in_buf.Init(out_buf.data(), out_buf.size());
SequentialIntegerAttributeDecoder id;
ASSERT_TRUE(id.InitializeStandalone(&pa));
ASSERT_TRUE(id.Decode(point_ids, &in_buf));
for (uint32_t i = 0; i < values.size(); ++i) {
int32_t entry_val;
pa.GetValue(AttributeValueIndex(i), &entry_val);
ASSERT_EQ(entry_val, values[i]);
}
}
} // namespace draco

View File

@ -12,16 +12,16 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// //
#include "compression/attributes/mesh_normal_attribute_decoder.h" #include "compression/attributes/sequential_normal_attribute_decoder.h"
#include "compression/attributes/normal_compression_utils.h" #include "compression/attributes/normal_compression_utils.h"
namespace draco { namespace draco {
MeshNormalAttributeDecoder::MeshNormalAttributeDecoder() SequentialNormalAttributeDecoder::SequentialNormalAttributeDecoder()
: quantization_bits_(-1) {} : quantization_bits_(-1) {}
bool MeshNormalAttributeDecoder::Initialize(PointCloudDecoder *decoder, bool SequentialNormalAttributeDecoder::Initialize(PointCloudDecoder *decoder,
int attribute_id) { int attribute_id) {
if (!SequentialIntegerAttributeDecoder::Initialize(decoder, attribute_id)) if (!SequentialIntegerAttributeDecoder::Initialize(decoder, attribute_id))
return false; return false;
// Currently, this encoder works only for 3-component normal vectors. // Currently, this encoder works only for 3-component normal vectors.
@ -30,7 +30,7 @@ bool MeshNormalAttributeDecoder::Initialize(PointCloudDecoder *decoder,
return true; return true;
} }
bool MeshNormalAttributeDecoder::DecodeIntegerValues( bool SequentialNormalAttributeDecoder::DecodeIntegerValues(
const std::vector<PointIndex> &point_ids, DecoderBuffer *in_buffer) { const std::vector<PointIndex> &point_ids, DecoderBuffer *in_buffer) {
uint8_t quantization_bits; uint8_t quantization_bits;
if (!in_buffer->Decode(&quantization_bits)) if (!in_buffer->Decode(&quantization_bits))
@ -40,7 +40,7 @@ bool MeshNormalAttributeDecoder::DecodeIntegerValues(
in_buffer); in_buffer);
} }
bool MeshNormalAttributeDecoder::StoreValues(uint32_t num_points) { bool SequentialNormalAttributeDecoder::StoreValues(uint32_t num_points) {
// Convert all quantized values back to floats. // Convert all quantized values back to floats.
const int32_t max_quantized_value = (1 << quantization_bits_) - 1; const int32_t max_quantized_value = (1 << quantization_bits_) - 1;
const float max_quantized_value_f = static_cast<float>(max_quantized_value); const float max_quantized_value_f = static_cast<float>(max_quantized_value);
@ -50,7 +50,7 @@ bool MeshNormalAttributeDecoder::StoreValues(uint32_t num_points) {
float att_val[3]; float att_val[3];
int quant_val_id = 0; int quant_val_id = 0;
int out_byte_pos = 0; int out_byte_pos = 0;
for (int i = 0; i < num_points; ++i) { for (uint32_t i = 0; i < num_points; ++i) {
const int32_t s = values()->at(quant_val_id++); const int32_t s = values()->at(quant_val_id++);
const int32_t t = values()->at(quant_val_id++); const int32_t t = values()->at(quant_val_id++);
QuantizedOctaherdalCoordsToUnitVector(s, t, max_quantized_value_f, att_val); QuantizedOctaherdalCoordsToUnitVector(s, t, max_quantized_value_f, att_val);

View File

@ -12,8 +12,8 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// //
#ifndef DRACO_COMPRESSION_ATTRIBUTES_MESH_NORMAL_ATTRIBUTE_DECODER_H_ #ifndef DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_NORMAL_ATTRIBUTE_DECODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_MESH_NORMAL_ATTRIBUTE_DECODER_H_ #define DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_NORMAL_ATTRIBUTE_DECODER_H_
#include "compression/attributes/prediction_schemes/prediction_scheme_decoder_factory.h" #include "compression/attributes/prediction_schemes/prediction_scheme_decoder_factory.h"
#include "compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform.h" #include "compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform.h"
@ -21,11 +21,11 @@
namespace draco { namespace draco {
// Decoder for attributes encoded with MeshNormalAttributeEncoder. // Decoder for attributes encoded with SequentialNormalAttributeEncoder.
// TODO(hemmer): rename to SequentialNormalAttributeDecoder class SequentialNormalAttributeDecoder
class MeshNormalAttributeDecoder : public SequentialIntegerAttributeDecoder { : public SequentialIntegerAttributeDecoder {
public: public:
MeshNormalAttributeDecoder(); SequentialNormalAttributeDecoder();
bool Initialize(PointCloudDecoder *decoder, int attribute_id) override; bool Initialize(PointCloudDecoder *decoder, int attribute_id) override;
protected: protected:
@ -61,4 +61,4 @@ class MeshNormalAttributeDecoder : public SequentialIntegerAttributeDecoder {
} // namespace draco } // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_MESH_NORMAL_ATTRIBUTE_DECODER_H_ #endif // DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_NORMAL_ATTRIBUTE_DECODER_H_

View File

@ -12,13 +12,13 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// //
#include "compression/attributes/mesh_normal_attribute_encoder.h" #include "compression/attributes/sequential_normal_attribute_encoder.h"
#include "compression/attributes/normal_compression_utils.h" #include "compression/attributes/normal_compression_utils.h"
namespace draco { namespace draco {
bool MeshNormalAttributeEncoder::Initialize(PointCloudEncoder *encoder, bool SequentialNormalAttributeEncoder::Initialize(PointCloudEncoder *encoder,
int attribute_id) { int attribute_id) {
if (!SequentialIntegerAttributeEncoder::Initialize(encoder, attribute_id)) if (!SequentialIntegerAttributeEncoder::Initialize(encoder, attribute_id))
return false; return false;
// Currently this encoder works only for 3-component normal vectors. // Currently this encoder works only for 3-component normal vectors.
@ -27,7 +27,7 @@ bool MeshNormalAttributeEncoder::Initialize(PointCloudEncoder *encoder,
return true; return true;
} }
bool MeshNormalAttributeEncoder::PrepareValues( bool SequentialNormalAttributeEncoder::PrepareValues(
const std::vector<PointIndex> &point_ids) { const std::vector<PointIndex> &point_ids) {
// Quantize all encoded values. // Quantize all encoded values.
const int quantization_bits = encoder()->options()->GetAttributeInt( const int quantization_bits = encoder()->options()->GetAttributeInt(
@ -39,7 +39,7 @@ bool MeshNormalAttributeEncoder::PrepareValues(
values()->clear(); values()->clear();
float att_val[3]; float att_val[3];
values()->reserve(point_ids.size() * 2); values()->reserve(point_ids.size() * 2);
for (int i = 0; i < point_ids.size(); ++i) { for (uint32_t i = 0; i < point_ids.size(); ++i) {
const AttributeValueIndex att_id = attribute()->mapped_index(point_ids[i]); const AttributeValueIndex att_id = attribute()->mapped_index(point_ids[i]);
attribute()->GetValue(att_id, att_val); attribute()->GetValue(att_id, att_val);
// Encode the vector into a s and t octaherdal coordinates. // Encode the vector into a s and t octaherdal coordinates.

View File

@ -12,8 +12,8 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
// //
#ifndef DRACO_COMPRESSION_ATTRIBUTES_MESH_NORMAL_ATTRIBUTE_ENCODER_H_ #ifndef DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_NORMAL_ATTRIBUTE_ENCODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_MESH_NORMAL_ATTRIBUTE_ENCODER_H_ #define DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_NORMAL_ATTRIBUTE_ENCODER_H_
#include "compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h" #include "compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h"
#include "compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform.h" #include "compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform.h"
@ -27,8 +27,8 @@ namespace draco {
// in a better compression rate under the same accuracy settings. Note that this // in a better compression rate under the same accuracy settings. Note that this
// encoder doesn't preserve the lengths of input vectors, therefore it will not // encoder doesn't preserve the lengths of input vectors, therefore it will not
// work correctly when the input values are not normalized. // work correctly when the input values are not normalized.
// TODO(hemmer): rename to SequentialNormalAttributeEncoder class SequentialNormalAttributeEncoder
class MeshNormalAttributeEncoder : public SequentialIntegerAttributeEncoder { : public SequentialIntegerAttributeEncoder {
public: public:
uint8_t GetUniqueId() const override { uint8_t GetUniqueId() const override {
return SEQUENTIAL_ATTRIBUTE_ENCODER_NORMALS; return SEQUENTIAL_ATTRIBUTE_ENCODER_NORMALS;
@ -40,7 +40,7 @@ class MeshNormalAttributeEncoder : public SequentialIntegerAttributeEncoder {
bool PrepareValues(const std::vector<PointIndex> &point_ids) override; bool PrepareValues(const std::vector<PointIndex> &point_ids) override;
std::unique_ptr<PredictionSchemeTypedInterface<int32_t>> std::unique_ptr<PredictionSchemeTypedInterface<int32_t>>
CreateIntPredictionScheme(PredictionSchemeMethod method) override { CreateIntPredictionScheme(PredictionSchemeMethod /* method */) override {
typedef PredictionSchemeNormalOctahedronTransform<int32_t> Transform; typedef PredictionSchemeNormalOctahedronTransform<int32_t> Transform;
const int32_t quantization_bits = encoder()->options()->GetAttributeInt( const int32_t quantization_bits = encoder()->options()->GetAttributeInt(
attribute_id(), "quantization_bits", -1); attribute_id(), "quantization_bits", -1);
@ -53,4 +53,4 @@ class MeshNormalAttributeEncoder : public SequentialIntegerAttributeEncoder {
} // namespace draco } // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_MESH_NORMAL_ATTRIBUTE_ENCODER_H_ #endif // DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_NORMAL_ATTRIBUTE_ENCODER_H_

View File

@ -71,7 +71,7 @@ bool SequentialQuantizationAttributeDecoder::DequantizeValues(
int out_byte_pos = 0; int out_byte_pos = 0;
Dequantizer dequantizer; Dequantizer dequantizer;
dequantizer.Init(max_value_dif_, max_quantized_value); dequantizer.Init(max_value_dif_, max_quantized_value);
for (int i = 0; i < num_values; ++i) { for (uint32_t i = 0; i < num_values; ++i) {
for (int c = 0; c < num_components; ++c) { for (int c = 0; c < num_components; ++c) {
float value = dequantizer.DequantizeFloat(values()->at(quant_val_id++)); float value = dequantizer.DequantizeFloat(values()->at(quant_val_id++));
value = value + min_value_[c]; value = value + min_value_[c];

View File

@ -125,7 +125,7 @@ bool SequentialQuantizationAttributeEncoder::QuantizeValues(
const uint32_t max_quantized_value = (1 << (quantization_bits)) - 1; const uint32_t max_quantized_value = (1 << (quantization_bits)) - 1;
Quantizer quantizer; Quantizer quantizer;
quantizer.Init(max_value_dif_, max_quantized_value); quantizer.Init(max_value_dif_, max_quantized_value);
for (int i = 0; i < point_ids.size(); ++i) { for (uint32_t i = 0; i < point_ids.size(); ++i) {
const AttributeValueIndex att_id = attrib->mapped_index(point_ids[i]); const AttributeValueIndex att_id = attrib->mapped_index(point_ids[i]);
attribute()->GetValue(att_id, att_val.get()); attribute()->GetValue(att_id, att_val.get());
for (int c = 0; c < num_components; ++c) { for (int c = 0; c < num_components; ++c) {

View File

@ -32,14 +32,14 @@ EncoderOptions::EncoderOptions() {}
void EncoderOptions::SetGlobalOptions(const Options &o) { global_options_ = o; } void EncoderOptions::SetGlobalOptions(const Options &o) { global_options_ = o; }
void EncoderOptions::SetAttributeOptions(int32_t att_id, const Options &o) { void EncoderOptions::SetAttributeOptions(int32_t att_id, const Options &o) {
if (attribute_options_.size() <= att_id) { if (attribute_options_.size() <= static_cast<size_t>(att_id)) {
attribute_options_.resize(att_id + 1); attribute_options_.resize(att_id + 1);
} }
attribute_options_[att_id] = o; attribute_options_[att_id] = o;
} }
Options *EncoderOptions::GetAttributeOptions(int32_t att_id) { Options *EncoderOptions::GetAttributeOptions(int32_t att_id) {
if (attribute_options_.size() <= att_id) { if (attribute_options_.size() <= static_cast<size_t>(att_id)) {
attribute_options_.resize(att_id + 1); attribute_options_.resize(att_id + 1);
} }
return &attribute_options_[att_id]; return &attribute_options_[att_id];
@ -95,7 +95,7 @@ void EncoderOptions::SetGlobalString(const std::string &name,
void EncoderOptions::SetAttributeInt(int32_t att_id, const std::string &name, void EncoderOptions::SetAttributeInt(int32_t att_id, const std::string &name,
int val) { int val) {
if (att_id >= attribute_options_.size()) { if (att_id >= static_cast<int32_t>(attribute_options_.size())) {
attribute_options_.resize(att_id + 1); attribute_options_.resize(att_id + 1);
} }
attribute_options_[att_id].SetInt(name, val); attribute_options_[att_id].SetInt(name, val);
@ -103,7 +103,7 @@ void EncoderOptions::SetAttributeInt(int32_t att_id, const std::string &name,
void EncoderOptions::SetAttributeBool(int32_t att_id, const std::string &name, void EncoderOptions::SetAttributeBool(int32_t att_id, const std::string &name,
bool val) { bool val) {
if (att_id >= attribute_options_.size()) { if (att_id >= static_cast<int32_t>(attribute_options_.size())) {
attribute_options_.resize(att_id + 1); attribute_options_.resize(att_id + 1);
} }
attribute_options_[att_id].SetBool(name, val); attribute_options_[att_id].SetBool(name, val);
@ -111,7 +111,7 @@ void EncoderOptions::SetAttributeBool(int32_t att_id, const std::string &name,
void EncoderOptions::SetAttributeString(int32_t att_id, const std::string &name, void EncoderOptions::SetAttributeString(int32_t att_id, const std::string &name,
const std::string &val) { const std::string &val) {
if (att_id >= attribute_options_.size()) { if (att_id >= static_cast<int32_t>(attribute_options_.size())) {
attribute_options_.resize(att_id + 1); attribute_options_.resize(att_id + 1);
} }
attribute_options_[att_id].SetString(name, val); attribute_options_[att_id].SetString(name, val);
@ -119,7 +119,7 @@ void EncoderOptions::SetAttributeString(int32_t att_id, const std::string &name,
int EncoderOptions::GetAttributeInt(int32_t att_id, const std::string &name, int EncoderOptions::GetAttributeInt(int32_t att_id, const std::string &name,
int default_val) const { int default_val) const {
if (att_id < attribute_options_.size()) { if (att_id < static_cast<int32_t>(attribute_options_.size())) {
if (attribute_options_[att_id].IsOptionSet(name)) if (attribute_options_[att_id].IsOptionSet(name))
return attribute_options_[att_id].GetInt(name, default_val); return attribute_options_[att_id].GetInt(name, default_val);
} }
@ -128,7 +128,7 @@ int EncoderOptions::GetAttributeInt(int32_t att_id, const std::string &name,
bool EncoderOptions::GetAttributeBool(int32_t att_id, const std::string &name, bool EncoderOptions::GetAttributeBool(int32_t att_id, const std::string &name,
bool default_val) const { bool default_val) const {
if (att_id < attribute_options_.size()) { if (att_id < static_cast<int32_t>(attribute_options_.size())) {
if (attribute_options_[att_id].IsOptionSet(name)) if (attribute_options_[att_id].IsOptionSet(name))
return attribute_options_[att_id].GetBool(name, default_val); return attribute_options_[att_id].GetBool(name, default_val);
} }
@ -138,7 +138,7 @@ bool EncoderOptions::GetAttributeBool(int32_t att_id, const std::string &name,
std::string EncoderOptions::GetAttributeString( std::string EncoderOptions::GetAttributeString(
int32_t att_id, const std::string &name, int32_t att_id, const std::string &name,
const std::string &default_val) const { const std::string &default_val) const {
if (att_id < attribute_options_.size()) { if (att_id < static_cast<int32_t>(attribute_options_.size())) {
if (attribute_options_[att_id].IsOptionSet(name)) if (attribute_options_[att_id].IsOptionSet(name))
return attribute_options_[att_id].GetString(name, default_val); return attribute_options_[att_id].GetString(name, default_val);
} }

View File

@ -57,7 +57,9 @@ bool EncodePointCloudToBuffer(const PointCloud &pc,
const EncoderOptions &options, const EncoderOptions &options,
EncoderBuffer *out_buffer) { EncoderBuffer *out_buffer) {
std::unique_ptr<PointCloudEncoder> encoder; std::unique_ptr<PointCloudEncoder> encoder;
if (options.GetSpeed() < 10 && pc.num_attributes() == 1) { const int encoding_method = options.GetGlobalInt("encoding_method", -1);
if (encoding_method == POINT_CLOUD_KD_TREE_ENCODING ||
(options.GetSpeed() < 10 && pc.num_attributes() == 1)) {
const PointAttribute *const att = pc.attribute(0); const PointAttribute *const att = pc.attribute(0);
bool create_kd_tree_encoder = true; bool create_kd_tree_encoder = true;
// Kd-Tree encoder can be currently used only under following conditions: // Kd-Tree encoder can be currently used only under following conditions:
@ -76,9 +78,13 @@ bool EncodePointCloudToBuffer(const PointCloud &pc,
options.GetAttributeInt(0, "quantization_bits", -1) <= 0) options.GetAttributeInt(0, "quantization_bits", -1) <= 0)
create_kd_tree_encoder = false; // Quantization not enabled. create_kd_tree_encoder = false; // Quantization not enabled.
if (create_kd_tree_encoder) { if (create_kd_tree_encoder) {
// Create kD-tree encoder. // Create kD-tree encoder (all checks passed).
encoder = encoder =
std::unique_ptr<PointCloudEncoder>(new PointCloudKdTreeEncoder()); std::unique_ptr<PointCloudEncoder>(new PointCloudKdTreeEncoder());
} else if (encoding_method == POINT_CLOUD_KD_TREE_ENCODING) {
// Encoding method was explicitly specified but we cannot use it for the
// given input (some of the checks above failed).
return false;
} }
} }
if (!encoder) { if (!encoder) {
@ -143,6 +149,10 @@ void SetUseBuiltInAttributeCompression(EncoderOptions *options, bool enabled) {
enabled); enabled);
} }
void SetEncodingMethod(EncoderOptions *options, int encoding_method) {
options->GetGlobalOptions()->SetInt("encoding_method", encoding_method);
}
void SetNamedAttributePredictionScheme(EncoderOptions *options, void SetNamedAttributePredictionScheme(EncoderOptions *options,
const PointCloud &pc, const PointCloud &pc,
GeometryAttribute::Type type, GeometryAttribute::Type type,

View File

@ -73,6 +73,23 @@ void SetAttributeQuantization(Options *options, int quantization_bits);
// Default: [true]. // Default: [true].
void SetUseBuiltInAttributeCompression(EncoderOptions *options, bool enabled); void SetUseBuiltInAttributeCompression(EncoderOptions *options, bool enabled);
// Sets the desired encoding method for a given geometry. By default, encoding
// method is selected based on the properties of the input geometry and based on
// the other options selected in the used EncoderOptions (such as desired
// encoding and decoding speed). This function should be called only when a
// specific method is required.
//
// |encoding_method| can be one of the following as defined in
// compression/config/compression_shared.h :
// POINT_CLOUD_SEQUENTIAL_ENCODING
// POINT_CLOUD_KD_TREE_ENCODING
// MESH_SEQUENTIAL_ENCODING
// MESH_EDGEBREAKER_ENCODING
//
// If the selected method cannot be used for the given input, the subsequent
// call of EncodePointCloudToBuffer or EncodeMeshToBuffer is going to fail.
void SetEncodingMethod(EncoderOptions *options, int encoding_method);
// Sets the desired prediction method for a given attribute. By default, // Sets the desired prediction method for a given attribute. By default,
// prediction scheme is selected automatically by the encoder using other // prediction scheme is selected automatically by the encoder using other
// provided options (such as speed) and input geometry type (mesh, point cloud). // provided options (such as speed) and input geometry type (mesh, point cloud).

View File

@ -41,14 +41,14 @@ class MeshDecoder : public PointCloudDecoder {
// Returns the attribute connectivity data or nullptr if it does not exist. // Returns the attribute connectivity data or nullptr if it does not exist.
virtual const MeshAttributeCornerTable *GetAttributeCornerTable( virtual const MeshAttributeCornerTable *GetAttributeCornerTable(
int att_id) const { int /* att_id */) const {
return nullptr; return nullptr;
} }
// Returns the decoding data for a given attribute or nullptr when the data // Returns the decoding data for a given attribute or nullptr when the data
// does not exist. // does not exist.
virtual const MeshAttributeIndicesEncodingData *GetAttributeEncodingData( virtual const MeshAttributeIndicesEncodingData *GetAttributeEncodingData(
int att_id) const { int /* att_id */) const {
return nullptr; return nullptr;
} }

View File

@ -39,17 +39,6 @@ namespace draco {
// For more description about how the edges are used, see comment inside // For more description about how the edges are used, see comment inside
// ZipConnectivity() method. // ZipConnectivity() method.
// The initial status of all edges.
static constexpr CornerIndex EDGEBREAKER_FREE_EDGE_DEFAULT(kInvalidCornerIndex);
// A mark assigned to free edge that was part of a face encoded as
// with TOPOLOGY_C (or the init face).
static constexpr CornerIndex EDGEBREAKER_FREE_EDGE_A(kInvalidCornerIndex + 1);
// A mark assigned to free edge that was created when processing faces
// with other topologies.
static constexpr CornerIndex EDGEBREAKER_FREE_EDGE_B(kInvalidCornerIndex + 2);
template <class TraversalDecoder> template <class TraversalDecoder>
MeshEdgeBreakerDecoderImpl<TraversalDecoder>::MeshEdgeBreakerDecoderImpl() MeshEdgeBreakerDecoderImpl<TraversalDecoder>::MeshEdgeBreakerDecoderImpl()
: decoder_(nullptr), : decoder_(nullptr),
@ -71,7 +60,7 @@ template <class TraversalDecoder>
const MeshAttributeCornerTable * const MeshAttributeCornerTable *
MeshEdgeBreakerDecoderImpl<TraversalDecoder>::GetAttributeCornerTable( MeshEdgeBreakerDecoderImpl<TraversalDecoder>::GetAttributeCornerTable(
int att_id) const { int att_id) const {
for (int i = 0; i < attribute_data_.size(); ++i) { for (uint32_t i = 0; i < attribute_data_.size(); ++i) {
const AttributesDecoder *const dec = const AttributesDecoder *const dec =
decoder_->attributes_decoder(attribute_data_[i].decoder_id); decoder_->attributes_decoder(attribute_data_[i].decoder_id);
for (int j = 0; j < dec->num_attributes(); ++j) { for (int j = 0; j < dec->num_attributes(); ++j) {
@ -89,7 +78,7 @@ template <class TraversalDecoder>
const MeshAttributeIndicesEncodingData * const MeshAttributeIndicesEncodingData *
MeshEdgeBreakerDecoderImpl<TraversalDecoder>::GetAttributeEncodingData( MeshEdgeBreakerDecoderImpl<TraversalDecoder>::GetAttributeEncodingData(
int att_id) const { int att_id) const {
for (int i = 0; i < attribute_data_.size(); ++i) { for (uint32_t i = 0; i < attribute_data_.size(); ++i) {
const AttributesDecoder *const dec = const AttributesDecoder *const dec =
decoder_->attributes_decoder(attribute_data_[i].decoder_id); decoder_->attributes_decoder(attribute_data_[i].decoder_id);
for (int j = 0; j < dec->num_attributes(); ++j) { for (int j = 0; j < dec->num_attributes(); ++j) {
@ -295,7 +284,7 @@ bool MeshEdgeBreakerDecoderImpl<TraversalDecoder>::DecodeConnectivity() {
// Decode attribute connectivity. // Decode attribute connectivity.
// Prepare data structure for decoding non-position attribute connectivites. // Prepare data structure for decoding non-position attribute connectivites.
for (int i = 0; i < attribute_data_.size(); ++i) { for (uint32_t i = 0; i < attribute_data_.size(); ++i) {
attribute_data_[i].connectivity_data.InitEmpty(corner_table_.get()); attribute_data_[i].connectivity_data.InitEmpty(corner_table_.get());
// Add all seams. // Add all seams.
for (int32_t c : attribute_data_[i].attribute_seam_corners) { for (int32_t c : attribute_data_[i].attribute_seam_corners) {
@ -307,7 +296,7 @@ bool MeshEdgeBreakerDecoderImpl<TraversalDecoder>::DecodeConnectivity() {
pos_encoding_data_.vertex_to_encoded_attribute_value_index_map.resize( pos_encoding_data_.vertex_to_encoded_attribute_value_index_map.resize(
corner_table_->num_vertices()); corner_table_->num_vertices());
for (int i = 0; i < attribute_data_.size(); ++i) { for (uint32_t i = 0; i < attribute_data_.size(); ++i) {
// For non-position attributes, preallocate the vertex to value mapping // For non-position attributes, preallocate the vertex to value mapping
// using the maximum number of vertices from the base corner table and the // using the maximum number of vertices from the base corner table and the
// attribute corner table (since the attribute decoder may use either of // attribute corner table (since the attribute decoder may use either of
@ -621,7 +610,7 @@ MeshEdgeBreakerDecoderImpl<TraversalDecoder>::DecodeHoleAndTopologySplitEvents(
uint32_t num_topology_splits; uint32_t num_topology_splits;
if (!decoder_buffer->Decode(&num_topology_splits)) if (!decoder_buffer->Decode(&num_topology_splits))
return -1; return -1;
for (int i = 0; i < num_topology_splits; ++i) { for (uint32_t i = 0; i < num_topology_splits; ++i) {
TopologySplitEventData event_data; TopologySplitEventData event_data;
if (!decoder_buffer->Decode(&event_data.split_symbol_id)) if (!decoder_buffer->Decode(&event_data.split_symbol_id))
return -1; return -1;
@ -637,7 +626,7 @@ MeshEdgeBreakerDecoderImpl<TraversalDecoder>::DecodeHoleAndTopologySplitEvents(
uint32_t num_hole_events; uint32_t num_hole_events;
if (!decoder_buffer->Decode(&num_hole_events)) if (!decoder_buffer->Decode(&num_hole_events))
return -1; return -1;
for (int i = 0; i < num_hole_events; ++i) { for (uint32_t i = 0; i < num_hole_events; ++i) {
HoleEventData event_data; HoleEventData event_data;
if (!decoder_buffer->Decode(&event_data)) if (!decoder_buffer->Decode(&event_data))
return -1; return -1;
@ -658,13 +647,13 @@ bool MeshEdgeBreakerDecoderImpl<
if (opp_corner < 0) { if (opp_corner < 0) {
// Don't decode attribute seams on boundary edges (every boundary edge // Don't decode attribute seams on boundary edges (every boundary edge
// is automatically an attribute seam). // is automatically an attribute seam).
for (int32_t i = 0; i < attribute_data_.size(); ++i) { for (uint32_t i = 0; i < attribute_data_.size(); ++i) {
attribute_data_[i].attribute_seam_corners.push_back(corners[c].value()); attribute_data_[i].attribute_seam_corners.push_back(corners[c].value());
} }
continue; continue;
} }
for (int32_t i = 0; i < attribute_data_.size(); ++i) { for (uint32_t i = 0; i < attribute_data_.size(); ++i) {
const bool is_seam = traversal_decoder_.DecodeAttributeSeam(i); const bool is_seam = traversal_decoder_.DecodeAttributeSeam(i);
if (is_seam) if (is_seam)
attribute_data_[i].attribute_seam_corners.push_back(corners[c].value()); attribute_data_[i].attribute_seam_corners.push_back(corners[c].value());
@ -724,7 +713,7 @@ bool MeshEdgeBreakerDecoderImpl<TraversalDecoder>::AssignPointsToCorners() {
} else { } else {
// If we are not on the boundary we need to find the first seam (of any // If we are not on the boundary we need to find the first seam (of any
// attribute). // attribute).
for (int i = 0; i < attribute_data_.size(); ++i) { for (uint32_t i = 0; i < attribute_data_.size(); ++i) {
if (!attribute_data_[i].connectivity_data.IsCornerOnSeam(c)) if (!attribute_data_[i].connectivity_data.IsCornerOnSeam(c))
continue; // No seam for this attribute, ignore it. continue; // No seam for this attribute, ignore it.
// Else there needs to be at least one seam edge. // Else there needs to be at least one seam edge.
@ -762,7 +751,7 @@ bool MeshEdgeBreakerDecoderImpl<TraversalDecoder>::AssignPointsToCorners() {
c = corner_table_->SwingRight(c); c = corner_table_->SwingRight(c);
while (c >= 0 && c != deduplication_first_corner) { while (c >= 0 && c != deduplication_first_corner) {
bool attribute_seam = false; bool attribute_seam = false;
for (int i = 0; i < attribute_data_.size(); ++i) { for (uint32_t i = 0; i < attribute_data_.size(); ++i) {
if (attribute_data_[i].connectivity_data.Vertex(c) != if (attribute_data_[i].connectivity_data.Vertex(c) !=
attribute_data_[i].connectivity_data.Vertex(prev_c)) { attribute_data_[i].connectivity_data.Vertex(prev_c)) {
// Attribute index changed from the previous corner. We need to add a // Attribute index changed from the previous corner. We need to add a

View File

@ -52,7 +52,7 @@ template <class TraversalEncoder>
const MeshAttributeCornerTable * const MeshAttributeCornerTable *
MeshEdgeBreakerEncoderImpl<TraversalEncoder>::GetAttributeCornerTable( MeshEdgeBreakerEncoderImpl<TraversalEncoder>::GetAttributeCornerTable(
int att_id) const { int att_id) const {
for (int i = 0; i < attribute_data_.size(); ++i) { for (uint32_t i = 0; i < attribute_data_.size(); ++i) {
if (attribute_data_[i].attribute_index == att_id) { if (attribute_data_[i].attribute_index == att_id) {
if (attribute_data_[i].is_connectivity_used) if (attribute_data_[i].is_connectivity_used)
return &attribute_data_[i].connectivity_data; return &attribute_data_[i].connectivity_data;
@ -66,7 +66,7 @@ template <class TraversalEncoder>
const MeshAttributeIndicesEncodingData * const MeshAttributeIndicesEncodingData *
MeshEdgeBreakerEncoderImpl<TraversalEncoder>::GetAttributeEncodingData( MeshEdgeBreakerEncoderImpl<TraversalEncoder>::GetAttributeEncodingData(
int att_id) const { int att_id) const {
for (int i = 0; i < attribute_data_.size(); ++i) { for (uint32_t i = 0; i < attribute_data_.size(); ++i) {
if (attribute_data_[i].attribute_index == att_id) if (attribute_data_[i].attribute_index == att_id)
return &attribute_data_[i].encoding_data; return &attribute_data_[i].encoding_data;
} }
@ -84,7 +84,7 @@ bool MeshEdgeBreakerEncoderImpl<TraversalEncoder>::GenerateAttributesEncoder(
const PointAttribute *const att = const PointAttribute *const att =
GetEncoder()->point_cloud()->attribute(att_id); GetEncoder()->point_cloud()->attribute(att_id);
int32_t att_data_id = -1; int32_t att_data_id = -1;
for (int i = 0; i < attribute_data_.size(); ++i) { for (uint32_t i = 0; i < attribute_data_.size(); ++i) {
if (attribute_data_[i].attribute_index == att_id) { if (attribute_data_[i].attribute_index == att_id) {
att_data_id = i; att_data_id = i;
break; break;
@ -348,7 +348,7 @@ bool MeshEdgeBreakerEncoderImpl<TraversalEncoder>::EncodeConnectivity() {
// Encode topology split events. // Encode topology split events.
uint32_t num_events = topology_split_event_data_.size(); uint32_t num_events = topology_split_event_data_.size();
encoder_->buffer()->Encode(num_events); encoder_->buffer()->Encode(num_events);
for (int i = 0; i < num_events; ++i) { for (uint32_t i = 0; i < num_events; ++i) {
// TODO(ostava): We can do a better encoding of the event data but it's not // TODO(ostava): We can do a better encoding of the event data but it's not
// really needed for now. // really needed for now.
const TopologySplitEventData &event_data = topology_split_event_data_[i]; const TopologySplitEventData &event_data = topology_split_event_data_[i];
@ -361,7 +361,7 @@ bool MeshEdgeBreakerEncoderImpl<TraversalEncoder>::EncodeConnectivity() {
// Encode hole events data. // Encode hole events data.
num_events = hole_event_data_.size(); num_events = hole_event_data_.size();
encoder_->buffer()->Encode(num_events); encoder_->buffer()->Encode(num_events);
for (int i = 0; i < num_events; ++i) { for (uint32_t i = 0; i < num_events; ++i) {
// TODO(ostava): We can do a better encoding of the event data but it's not // TODO(ostava): We can do a better encoding of the event data but it's not
// really needed for now. // really needed for now.
// This should be also made platform independent. // This should be also made platform independent.
@ -653,7 +653,7 @@ int MeshEdgeBreakerEncoderImpl<TraversalEncoder>::GetSplitSymbolIdOnFace(
template <class TraversalEncoder> template <class TraversalEncoder>
void MeshEdgeBreakerEncoderImpl< void MeshEdgeBreakerEncoderImpl<
TraversalEncoder>::CheckAndStoreTopologySplitEvent(int src_symbol_id, TraversalEncoder>::CheckAndStoreTopologySplitEvent(int src_symbol_id,
int src_face_id, int /* src_face_id */,
EdgeFaceName src_edge, EdgeFaceName src_edge,
int neighbor_face_id) { int neighbor_face_id) {
const int symbol_id = GetSplitSymbolIdOnFace(neighbor_face_id); const int symbol_id = GetSplitSymbolIdOnFace(neighbor_face_id);
@ -719,7 +719,7 @@ bool MeshEdgeBreakerEncoderImpl<
if (opp_corner < 0) if (opp_corner < 0)
continue; // Don't encode attribute seams on boundary edges. continue; // Don't encode attribute seams on boundary edges.
for (int32_t i = 0; i < attribute_data_.size(); ++i) { for (uint32_t i = 0; i < attribute_data_.size(); ++i) {
if (attribute_data_[i].connectivity_data.IsCornerOppositeToSeamEdge( if (attribute_data_[i].connectivity_data.IsCornerOppositeToSeamEdge(
corners[c])) { corners[c])) {
traversal_encoder_.EncodeAttributeSeam(i, true); traversal_encoder_.EncodeAttributeSeam(i, true);

View File

@ -40,7 +40,7 @@ class MeshEdgeBreakerTraversalDecoder {
// Used to tell the decoder what is the number of expected decoded vertices. // Used to tell the decoder what is the number of expected decoded vertices.
// Ignored by default. // Ignored by default.
void SetNumEncodedVertices(int num_vertices) {} void SetNumEncodedVertices(int /* num_vertices */) {}
// Set the number of non-position attribute data for which we need to decode // Set the number of non-position attribute data for which we need to decode
// the connectivity. // the connectivity.
@ -76,30 +76,30 @@ class MeshEdgeBreakerTraversalDecoder {
// Returns the configuration of a new initial face. // Returns the configuration of a new initial face.
inline bool DecodeStartFaceConfiguration() { inline bool DecodeStartFaceConfiguration() {
uint32_t face_configuration; uint32_t face_configuration;
start_face_buffer_.DecodeBits32(1, &face_configuration); start_face_buffer_.DecodeLeastSignificantBits32(1, &face_configuration);
return face_configuration; return face_configuration;
} }
// Returns the next edgebreaker symbol that was reached during the traversal. // Returns the next edgebreaker symbol that was reached during the traversal.
inline uint32_t DecodeSymbol() { inline uint32_t DecodeSymbol() {
uint32_t symbol; uint32_t symbol;
buffer_.DecodeBits32(1, &symbol); buffer_.DecodeLeastSignificantBits32(1, &symbol);
if (symbol == TOPOLOGY_C) { if (symbol == TOPOLOGY_C) {
return symbol; return symbol;
} }
// Else decode two additional bits. // Else decode two additional bits.
uint32_t symbol_suffix; uint32_t symbol_suffix;
buffer_.DecodeBits32(2, &symbol_suffix); buffer_.DecodeLeastSignificantBits32(2, &symbol_suffix);
symbol |= (symbol_suffix << 1); symbol |= (symbol_suffix << 1);
return symbol; return symbol;
} }
// Called whenever a new active corner is set in the decoder. // Called whenever a new active corner is set in the decoder.
inline void NewActiveCornerReached(CornerIndex corner) {} inline void NewActiveCornerReached(CornerIndex /* corner */) {}
// Called whenever |source| vertex is about to be merged into the |dest| // Called whenever |source| vertex is about to be merged into the |dest|
// vertex. // vertex.
inline void MergeVertices(VertexIndex dest, VertexIndex source) {} inline void MergeVertices(VertexIndex /* dest */, VertexIndex /* source */) {}
// Returns true if there is an attribute seam for the next processed pair // Returns true if there is an attribute seam for the next processed pair
// of visited faces. // of visited faces.

View File

@ -55,12 +55,12 @@ class MeshEdgeBreakerTraversalEncoder {
// Called when a traversal starts from a new initial face. // Called when a traversal starts from a new initial face.
inline void EncodeStartFaceConfiguration(bool interior) { inline void EncodeStartFaceConfiguration(bool interior) {
start_face_buffer_.EncodeBits32(interior ? 1 : 0, 1); start_face_buffer_.EncodeLeastSignificantBits32(1, interior ? 1 : 0);
} }
// Called when a new corner is reached during the traversal. No-op for the // Called when a new corner is reached during the traversal. No-op for the
// default encoder. // default encoder.
inline void NewCornerReached(CornerIndex corner) {} inline void NewCornerReached(CornerIndex /* corner */) {}
// Called whenever a new symbol is reached during the edgebreaker traversal. // Called whenever a new symbol is reached during the edgebreaker traversal.
inline void EncodeSymbol(EdgeBreakerTopologyBitPattern symbol) { inline void EncodeSymbol(EdgeBreakerTopologyBitPattern symbol) {
@ -84,8 +84,8 @@ class MeshEdgeBreakerTraversalEncoder {
traversal_buffer_.StartBitEncoding( traversal_buffer_.StartBitEncoding(
encoder_impl_->GetEncoder()->mesh()->num_faces() * 3, true); encoder_impl_->GetEncoder()->mesh()->num_faces() * 3, true);
for (int i = symbols_.size() - 1; i >= 0; --i) { for (int i = symbols_.size() - 1; i >= 0; --i) {
traversal_buffer_.EncodeBits32( traversal_buffer_.EncodeLeastSignificantBits32(
symbols_[i], edge_breaker_topology_bit_pattern_length[symbols_[i]]); edge_breaker_topology_bit_pattern_length[symbols_[i]], symbols_[i]);
} }
traversal_buffer_.EndBitEncoding(); traversal_buffer_.EndBitEncoding();
traversal_buffer_.Encode(start_face_buffer_.data(), traversal_buffer_.Encode(start_face_buffer_.data(),

View File

@ -40,7 +40,7 @@ class MeshEdgeBreakerTraversalPredictiveEncoder
corner_table_ = encoder->GetCornerTable(); corner_table_ = encoder->GetCornerTable();
// Initialize valences of all vertices. // Initialize valences of all vertices.
vertex_valences_.resize(corner_table_->num_vertices()); vertex_valences_.resize(corner_table_->num_vertices());
for (int i = 0; i < vertex_valences_.size(); ++i) { for (uint32_t i = 0; i < vertex_valences_.size(); ++i) {
vertex_valences_[i] = corner_table_->Valence(VertexIndex(i)); vertex_valences_[i] = corner_table_->Valence(VertexIndex(i));
} }
} }

View File

@ -42,14 +42,14 @@ class MeshEncoder : public PointCloudEncoder {
// Returns the attribute connectivity data or nullptr if it does not exist. // Returns the attribute connectivity data or nullptr if it does not exist.
virtual const MeshAttributeCornerTable *GetAttributeCornerTable( virtual const MeshAttributeCornerTable *GetAttributeCornerTable(
int att_id) const { int /* att_id */) const {
return nullptr; return nullptr;
} }
// Returns the encoding data for a given attribute or nullptr when the data // Returns the encoding data for a given attribute or nullptr when the data
// does not exist. // does not exist.
virtual const MeshAttributeIndicesEncodingData *GetAttributeEncodingData( virtual const MeshAttributeIndicesEncodingData *GetAttributeEncodingData(
int att_id) const { int /* att_id */) const {
return nullptr; return nullptr;
} }

View File

@ -0,0 +1,93 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "compression/mesh/mesh_encoder.h"
#include "compression/encode.h"
#include "core/draco_test_base.h"
#include "core/draco_test_utils.h"
#include "io/obj_decoder.h"
namespace draco {
class MeshEncoderTest : public ::testing::TestWithParam<const char *> {
protected:
MeshEncoderTest() {}
// Fills out_method with id of the encoding method used for the test.
// Returns false if the encoding method is not set properly.
bool GetMethod(MeshEncoderMethod *out_method) const {
if (strcmp(GetParam(), "sequential") == 0) {
*out_method = MESH_SEQUENTIAL_ENCODING;
return true;
}
if (strcmp(GetParam(), "edgebreaker") == 0) {
*out_method = MESH_EDGEBREAKER_ENCODING;
return true;
}
return false;
}
std::unique_ptr<Mesh> DecodeObj(const std::string &file_name) const {
const std::string path = GetTestFileFullPath(file_name);
std::unique_ptr<Mesh> mesh(new Mesh());
ObjDecoder decoder;
if (!decoder.DecodeFromFile(path, mesh.get()))
return nullptr;
return mesh;
}
};
TEST_P(MeshEncoderTest, EncodeGoldenMesh) {
// This test verifies that a given set of meshes are encoded to an expected
// output. This is useful for catching bugs in code changes that are not
// supposed to change the encoding.
// The test is expected to fail when the encoding is modified. In such case,
// the golden files need to be updated to reflect the changes.
MeshEncoderMethod method;
ASSERT_TRUE(GetMethod(&method))
<< "Test is run for an unknown encoding method";
const std::string file_name = "test_nm.obj";
std::string golden_file_name = file_name;
golden_file_name += '.';
golden_file_name += GetParam();
golden_file_name += ".out";
const std::unique_ptr<Mesh> mesh(DecodeObj(file_name));
ASSERT_NE(mesh, nullptr) << "Failed to load test model " << file_name;
EncoderOptions options = CreateDefaultEncoderOptions();
EncoderBuffer buffer;
ASSERT_TRUE(EncodeMeshToBuffer(*mesh.get(), options, &buffer))
<< "Failed encoding test mesh " << file_name << " with method "
<< GetParam();
if (!FLAGS_update_golden_files) {
EXPECT_TRUE(
CompareGoldenFile(golden_file_name, buffer.data(), buffer.size()))
<< "Encoded data is different from the golden file. Please verify that "
"the"
" encoding works as expected and update the golden file if necessary"
" (run the test with --update_golden_files flag).";
} else {
// Save the files into the local folder.
EXPECT_TRUE(
GenerateGoldenFile(golden_file_name, buffer.data(), buffer.size()))
<< "Failed to generate new golden file for " << file_name;
}
}
INSTANTIATE_TEST_CASE_P(MeshEncoderTests, MeshEncoderTest,
::testing::Values("sequential", "edgebreaker"));
} // namespace draco

View File

@ -43,7 +43,7 @@ namespace draco {
// there are more leading zeros, which is then compressed better by the // there are more leading zeros, which is then compressed better by the
// arithmetic encoding. // arithmetic encoding.
// TODO(hemmer): Make name consistent with other point cloud encoders. // TODO(hemmer): Remove class because it duplicates quantization code.
class FloatPointsKdTreeEncoder { class FloatPointsKdTreeEncoder {
public: public:
FloatPointsKdTreeEncoder(); FloatPointsKdTreeEncoder();

View File

@ -16,8 +16,6 @@
#include "compression/point_cloud/algorithms/point_cloud_types.h" #include "compression/point_cloud/algorithms/point_cloud_types.h"
// TODO(hemmer): make independent from dimension 3
namespace draco { namespace draco {
template class IntegerPointsKdTreeDecoder<Point3ui, 0>; template class IntegerPointsKdTreeDecoder<Point3ui, 0>;
@ -32,4 +30,16 @@ template class IntegerPointsKdTreeDecoder<Point3ui, 8>;
template class IntegerPointsKdTreeDecoder<Point3ui, 9>; template class IntegerPointsKdTreeDecoder<Point3ui, 9>;
template class IntegerPointsKdTreeDecoder<Point3ui, 10>; template class IntegerPointsKdTreeDecoder<Point3ui, 10>;
template class IntegerPointsKdTreeDecoder<Point4ui, 0>;
template class IntegerPointsKdTreeDecoder<Point4ui, 1>;
template class IntegerPointsKdTreeDecoder<Point4ui, 2>;
template class IntegerPointsKdTreeDecoder<Point4ui, 3>;
template class IntegerPointsKdTreeDecoder<Point4ui, 4>;
template class IntegerPointsKdTreeDecoder<Point4ui, 5>;
template class IntegerPointsKdTreeDecoder<Point4ui, 6>;
template class IntegerPointsKdTreeDecoder<Point4ui, 7>;
template class IntegerPointsKdTreeDecoder<Point4ui, 8>;
template class IntegerPointsKdTreeDecoder<Point4ui, 9>;
template class IntegerPointsKdTreeDecoder<Point4ui, 10>;
} // namespace draco } // namespace draco

View File

@ -85,7 +85,6 @@ struct IntegerPointsKdTreeDecoderCompressionPolicy<10>
// Decodes a point cloud encoded by IntegerPointsKdTreeEncoder. // Decodes a point cloud encoded by IntegerPointsKdTreeEncoder.
// |PointDiT| is a type representing a point with uint32_t coordinates. // |PointDiT| is a type representing a point with uint32_t coordinates.
// must provide construction from three uint32_t and operator[]. // must provide construction from three uint32_t and operator[].
// TODO(hemmer): compression_level_t_t
template <class PointDiT, int compression_level_t> template <class PointDiT, int compression_level_t>
class IntegerPointsKdTreeDecoder { class IntegerPointsKdTreeDecoder {
typedef IntegerPointsKdTreeDecoderCompressionPolicy<compression_level_t> typedef IntegerPointsKdTreeDecoderCompressionPolicy<compression_level_t>
@ -104,11 +103,11 @@ class IntegerPointsKdTreeDecoder {
bool DecodePoints(DecoderBuffer *buffer, OutputIteratorT oit); bool DecodePoints(DecoderBuffer *buffer, OutputIteratorT oit);
private: private:
// For the sack of readability of code, we decided to make this exception // For the sake of readability of code, we decided to make this exception
// from the naming scheme. // from the naming scheme.
static constexpr int D = PointTraits<PointDiT>::Dimension(); static constexpr int D = PointTraits<PointDiT>::Dimension();
uint32_t GetAxis(uint32_t num_remaining_points, PointDiT base, uint32_t GetAxis(uint32_t num_remaining_points, const PointDiT &base,
std::array<uint32_t, D> levels, uint32_t last_axis); std::array<uint32_t, D> levels, uint32_t last_axis);
template <class OutputIteratorT> template <class OutputIteratorT>
@ -117,12 +116,12 @@ class IntegerPointsKdTreeDecoder {
OutputIteratorT oit); OutputIteratorT oit);
void DecodeNumber(int nbits, uint32_t *value) { void DecodeNumber(int nbits, uint32_t *value) {
numbers_decoder_.DecodeBits32(nbits, value); numbers_decoder_.DecodeLeastSignificantBits32(nbits, value);
} }
struct DecodingStatus { struct DecodingStatus {
DecodingStatus( DecodingStatus(
uint32_t num_remaining_points_, PointDiT old_base_, uint32_t num_remaining_points_, const PointDiT &old_base_,
std::array<uint32_t, PointTraits<PointDiT>::Dimension()> levels_, std::array<uint32_t, PointTraits<PointDiT>::Dimension()> levels_,
uint32_t last_axis_) uint32_t last_axis_)
: num_remaining_points(num_remaining_points_), : num_remaining_points(num_remaining_points_),
@ -175,7 +174,7 @@ bool IntegerPointsKdTreeDecoder<PointDiT, compression_level_t>::DecodePoints(
template <class PointDiT, int compression_level_t> template <class PointDiT, int compression_level_t>
uint32_t IntegerPointsKdTreeDecoder<PointDiT, compression_level_t>::GetAxis( uint32_t IntegerPointsKdTreeDecoder<PointDiT, compression_level_t>::GetAxis(
uint32_t num_remaining_points, PointDiT base, uint32_t num_remaining_points, const PointDiT & /* base */,
std::array<uint32_t, D> levels, uint32_t last_axis) { std::array<uint32_t, D> levels, uint32_t last_axis) {
if (!Policy::select_axis) if (!Policy::select_axis)
return DRACO_INCREMENT_MOD(last_axis, D); return DRACO_INCREMENT_MOD(last_axis, D);
@ -188,7 +187,7 @@ uint32_t IntegerPointsKdTreeDecoder<PointDiT, compression_level_t>::GetAxis(
} }
} }
} else { } else {
axis_decoder_.DecodeBits32(4, &best_axis); axis_decoder_.DecodeLeastSignificantBits32(4, &best_axis);
} }
return best_axis; return best_axis;
@ -219,7 +218,7 @@ void IntegerPointsKdTreeDecoder<PointDiT, compression_level_t>::OctreeDecode(
// All axes have been fully subdivided, just output points. // All axes have been fully subdivided, just output points.
if ((bit_length_ - level) == 0) { if ((bit_length_ - level) == 0) {
for (int i = 0; i < num_remaining_points; i++) { for (int i = 0; i < static_cast<int>(num_remaining_points); i++) {
*oit++ = old_base; *oit++ = old_base;
} }
continue; continue;
@ -238,14 +237,14 @@ void IntegerPointsKdTreeDecoder<PointDiT, compression_level_t>::OctreeDecode(
num_remaining_bits[i] = bit_length_ - levels[axes[i]]; num_remaining_bits[i] = bit_length_ - levels[axes[i]];
} }
for (int i = 0; i < num_remaining_points; ++i) { for (uint32_t i = 0; i < num_remaining_points; ++i) {
// Get remaining bits, mind the carry if not starting at x. // Get remaining bits, mind the carry if not starting at x.
PointDiT p = PointTraits<PointDiT>::Origin(); PointDiT p = PointTraits<PointDiT>::Origin();
for (int i = 0; i < D; i++) { for (int j = 0; j < static_cast<int>(D); j++) {
if (num_remaining_bits[i]) if (num_remaining_bits[j])
remaining_bits_decoder_.DecodeBits32(num_remaining_bits[i], remaining_bits_decoder_.DecodeLeastSignificantBits32(
&p[axes[i]]); num_remaining_bits[j], &p[axes[j]]);
p[axes[i]] = old_base[axes[i]] | p[axes[i]]; p[axes[j]] = old_base[axes[j]] | p[axes[j]];
} }
*oit++ = p; *oit++ = p;
} }

View File

@ -16,8 +16,6 @@
#include "compression/point_cloud/algorithms/point_cloud_types.h" #include "compression/point_cloud/algorithms/point_cloud_types.h"
// TODO(hemmer): make independent from dimension 3
namespace draco { namespace draco {
template class IntegerPointsKdTreeEncoder<Point3ui, 0>; template class IntegerPointsKdTreeEncoder<Point3ui, 0>;
@ -32,4 +30,16 @@ template class IntegerPointsKdTreeEncoder<Point3ui, 8>;
template class IntegerPointsKdTreeEncoder<Point3ui, 9>; template class IntegerPointsKdTreeEncoder<Point3ui, 9>;
template class IntegerPointsKdTreeEncoder<Point3ui, 10>; template class IntegerPointsKdTreeEncoder<Point3ui, 10>;
template class IntegerPointsKdTreeEncoder<Point4ui, 0>;
template class IntegerPointsKdTreeEncoder<Point4ui, 1>;
template class IntegerPointsKdTreeEncoder<Point4ui, 2>;
template class IntegerPointsKdTreeEncoder<Point4ui, 3>;
template class IntegerPointsKdTreeEncoder<Point4ui, 4>;
template class IntegerPointsKdTreeEncoder<Point4ui, 5>;
template class IntegerPointsKdTreeEncoder<Point4ui, 6>;
template class IntegerPointsKdTreeEncoder<Point4ui, 7>;
template class IntegerPointsKdTreeEncoder<Point4ui, 8>;
template class IntegerPointsKdTreeEncoder<Point4ui, 9>;
template class IntegerPointsKdTreeEncoder<Point4ui, 10>;
} // namespace draco } // namespace draco

View File

@ -137,7 +137,7 @@ class IntegerPointsKdTreeEncoder {
static constexpr int D = PointTraits<PointDiT>::Dimension(); static constexpr int D = PointTraits<PointDiT>::Dimension();
template <class RandomAccessIteratorT> template <class RandomAccessIteratorT>
uint32_t GetAxis(RandomAccessIteratorT begin, RandomAccessIteratorT end, uint32_t GetAxis(RandomAccessIteratorT begin, RandomAccessIteratorT end,
PointDiT old_base, std::array<uint32_t, D> levels, const PointDiT &old_base, std::array<uint32_t, D> levels,
uint32_t last_axis); uint32_t last_axis);
template <class RandomAccessIteratorT> template <class RandomAccessIteratorT>
@ -156,14 +156,14 @@ class IntegerPointsKdTreeEncoder {
}; };
void EncodeNumber(int nbits, uint32_t value) { void EncodeNumber(int nbits, uint32_t value) {
numbers_encoder_.EncodeBits32(nbits, value); numbers_encoder_.EncodeLeastSignificantBits32(nbits, value);
} }
template <class RandomAccessIteratorT> template <class RandomAccessIteratorT>
struct EncodingStatus { struct EncodingStatus {
EncodingStatus( EncodingStatus(
RandomAccessIteratorT begin_, RandomAccessIteratorT end_, RandomAccessIteratorT begin_, RandomAccessIteratorT end_,
PointDiT old_base_, const PointDiT &old_base_,
std::array<uint32_t, PointTraits<PointDiT>::Dimension()> levels_, std::array<uint32_t, PointTraits<PointDiT>::Dimension()> levels_,
uint32_t last_axis_) uint32_t last_axis_)
: begin(begin_), : begin(begin_),
@ -224,8 +224,9 @@ bool IntegerPointsKdTreeEncoder<PointDiT, compression_level_t>::EncodePoints(
template <class PointDiT, int compression_level_t> template <class PointDiT, int compression_level_t>
template <class RandomAccessIteratorT> template <class RandomAccessIteratorT>
uint32_t IntegerPointsKdTreeEncoder<PointDiT, compression_level_t>::GetAxis( uint32_t IntegerPointsKdTreeEncoder<PointDiT, compression_level_t>::GetAxis(
RandomAccessIteratorT begin, RandomAccessIteratorT end, PointDiT old_base, RandomAccessIteratorT begin, RandomAccessIteratorT end,
std::array<uint32_t, D> levels, uint32_t last_axis) { const PointDiT &old_base, std::array<uint32_t, D> levels,
uint32_t last_axis) {
if (!Policy::select_axis) if (!Policy::select_axis)
return DRACO_INCREMENT_MOD(last_axis, D); return DRACO_INCREMENT_MOD(last_axis, D);
@ -237,7 +238,6 @@ uint32_t IntegerPointsKdTreeEncoder<PointDiT, compression_level_t>::GetAxis(
DCHECK_EQ(true, end - begin != 0); DCHECK_EQ(true, end - begin != 0);
// TODO(hemmer): Try to find the optimal value for this cut off.
uint32_t best_axis = 0; uint32_t best_axis = 0;
if (end - begin < 64) { if (end - begin < 64) {
for (uint32_t axis = 1; axis < D; ++axis) { for (uint32_t axis = 1; axis < D; ++axis) {
@ -281,7 +281,7 @@ uint32_t IntegerPointsKdTreeEncoder<PointDiT, compression_level_t>::GetAxis(
} }
} }
} }
axis_encoder_.EncodeBits32(4, best_axis); axis_encoder_.EncodeLeastSignificantBits32(4, best_axis);
} }
return best_axis; return best_axis;
@ -332,12 +332,13 @@ void IntegerPointsKdTreeEncoder<PointDiT, compression_level_t>::OctreeEncode(
num_remaining_bits[i] = bit_length_ - levels[axes[i]]; num_remaining_bits[i] = bit_length_ - levels[axes[i]];
} }
for (int i = 0; i < num_remaining_points; ++i) { for (uint32_t i = 0; i < num_remaining_points; ++i) {
const PointDiT &p = *(begin + i); const PointDiT &p = *(begin + i);
for (int i = 0; i < D; i++) { for (int j = 0; j < D; j++) {
if (num_remaining_bits[i]) if (num_remaining_bits[j]) {
remaining_bits_encoder_.EncodeBits32(num_remaining_bits[i], remaining_bits_encoder_.EncodeLeastSignificantBits32(
p[axes[i]]); num_remaining_bits[j], p[axes[j]]);
}
} }
} }
continue; continue;

View File

@ -35,7 +35,7 @@ class PointCloudDecoder {
void SetAttributesDecoder(int att_decoder_id, void SetAttributesDecoder(int att_decoder_id,
std::unique_ptr<AttributesDecoder> decoder) { std::unique_ptr<AttributesDecoder> decoder) {
if (att_decoder_id >= attributes_decoders_.size()) if (att_decoder_id >= static_cast<int>(attributes_decoders_.size()))
attributes_decoders_.resize(att_decoder_id + 1); attributes_decoders_.resize(att_decoder_id + 1);
attributes_decoders_[att_decoder_id] = std::move(decoder); attributes_decoders_[att_decoder_id] = std::move(decoder);
} }

View File

@ -91,7 +91,7 @@ bool PointCloudEncoder::GenerateAttributesEncoders() {
return false; return false;
} }
attribute_to_encoder_map_.resize(point_cloud_->num_attributes()); attribute_to_encoder_map_.resize(point_cloud_->num_attributes());
for (int i = 0; i < attributes_encoders_.size(); ++i) { for (uint32_t i = 0; i < attributes_encoders_.size(); ++i) {
for (int j = 0; j < attributes_encoders_[i]->num_attributes(); ++j) { for (int j = 0; j < attributes_encoders_[i]->num_attributes(); ++j) {
attribute_to_encoder_map_[attributes_encoders_[i]->GetAttributeId(j)] = i; attribute_to_encoder_map_[attributes_encoders_[i]->GetAttributeId(j)] = i;
} }
@ -143,11 +143,11 @@ bool PointCloudEncoder::RearrangeAttributesEncoders() {
// but it will require changes in the current API. // but it will require changes in the current API.
attributes_encoder_ids_order_.resize(attributes_encoders_.size()); attributes_encoder_ids_order_.resize(attributes_encoders_.size());
std::vector<bool> is_encoder_processed(attributes_encoders_.size(), false); std::vector<bool> is_encoder_processed(attributes_encoders_.size(), false);
int num_processed_encoders = 0; uint32_t num_processed_encoders = 0;
while (num_processed_encoders < attributes_encoders_.size()) { while (num_processed_encoders < attributes_encoders_.size()) {
// Flagged when any of the encoder get processed. // Flagged when any of the encoder get processed.
bool encoder_processed = false; bool encoder_processed = false;
for (int i = 0; i < attributes_encoders_.size(); ++i) { for (uint32_t i = 0; i < attributes_encoders_.size(); ++i) {
if (is_encoder_processed[i]) if (is_encoder_processed[i])
continue; // Encoder already processed. continue; // Encoder already processed.
// Check if all parent encoders are already processed. // Check if all parent encoders are already processed.
@ -156,7 +156,7 @@ bool PointCloudEncoder::RearrangeAttributesEncoders() {
const int32_t att_id = attributes_encoders_[i]->GetAttributeId(p); const int32_t att_id = attributes_encoders_[i]->GetAttributeId(p);
for (int ap = 0; for (int ap = 0;
ap < attributes_encoders_[i]->NumParentAttributes(att_id); ++ap) { ap < attributes_encoders_[i]->NumParentAttributes(att_id); ++ap) {
const int32_t parent_att_id = const uint32_t parent_att_id =
attributes_encoders_[i]->GetParentAttributeId(att_id, ap); attributes_encoders_[i]->GetParentAttributeId(att_id, ap);
const int32_t parent_encoder_id = const int32_t parent_encoder_id =
attribute_to_encoder_map_[parent_att_id]; attribute_to_encoder_map_[parent_att_id];
@ -188,7 +188,8 @@ bool PointCloudEncoder::RearrangeAttributesEncoders() {
std::vector<bool> is_attribute_processed(point_cloud_->num_attributes(), std::vector<bool> is_attribute_processed(point_cloud_->num_attributes(),
false); false);
int num_processed_attributes; int num_processed_attributes;
for (int ae_order = 0; ae_order < attributes_encoders_.size(); ++ae_order) { for (uint32_t ae_order = 0; ae_order < attributes_encoders_.size();
++ae_order) {
const int ae = attributes_encoder_ids_order_[ae_order]; const int ae = attributes_encoder_ids_order_[ae_order];
const int32_t num_encoder_attributes = const int32_t num_encoder_attributes =
attributes_encoders_[ae]->num_attributes(); attributes_encoders_[ae]->num_attributes();

View File

@ -101,7 +101,7 @@ class PointCloudEncoder {
// Encodes any data that is necessary to recreate a given attribute encoder. // Encodes any data that is necessary to recreate a given attribute encoder.
// Note: this is called in order in which the attribute encoders are going to // Note: this is called in order in which the attribute encoders are going to
// be encoded. // be encoded.
virtual bool EncodeAttributesEncoderIdentifier(int32_t att_encoder_id) { virtual bool EncodeAttributesEncoderIdentifier(int32_t /* att_encoder_id */) {
return true; return true;
} }

View File

@ -0,0 +1,119 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "compression/point_cloud/point_cloud_kd_tree_decoder.h"
#include "compression/point_cloud/point_cloud_kd_tree_encoder.h"
#include "core/draco_test_base.h"
#include "core/draco_test_utils.h"
#include "core/vector_d.h"
#include "io/obj_decoder.h"
#include "point_cloud/point_cloud_builder.h"
namespace draco {
class PointCloudKdTreeEncodingTest : public ::testing::Test {
protected:
std::unique_ptr<PointCloud> DecodeObj(const std::string &file_name) const {
const std::string path = GetTestFileFullPath(file_name);
ObjDecoder decoder;
std::unique_ptr<PointCloud> pc(new PointCloud());
if (!decoder.DecodeFromFile(path, pc.get()))
return nullptr;
return pc;
}
void ComparePointClouds(const PointCloud &p0, const PointCloud &p1) const {
ASSERT_EQ(p0.num_points(), p1.num_points());
ASSERT_EQ(p0.num_attributes(), p1.num_attributes());
// Currently works only with one attribute.
ASSERT_EQ(p0.num_attributes(), 1);
ASSERT_EQ(p0.attribute(0)->components_count(), 3);
std::vector<VectorD<double, 3>> points0, points1;
for (PointIndex i(0); i < p0.num_points(); ++i) {
VectorD<double, 3> pos0, pos1;
p0.attribute(0)->ConvertValue(p0.attribute(0)->mapped_index(i), &pos0[0]);
p1.attribute(0)->ConvertValue(p1.attribute(0)->mapped_index(i), &pos1[0]);
points0.push_back(pos0);
points1.push_back(pos1);
}
// To compare the point clouds we sort points from both inputs separately,
// and then we compare all matching points one by one.
// TODO(ostava): Note that this is not guaranteed to work for quantized
// point clouds because the order of points may actually change because
// of the quantization. The test should be make more robust to handle such
// case.
std::sort(points0.begin(), points0.end());
std::sort(points1.begin(), points1.end());
for (uint32_t i = 0; i < points0.size(); ++i) {
ASSERT_LE((points0[i] - points1[i]).SquaredNorm(), 1e-2);
}
}
void TestKdTreeEncoding(const PointCloud &pc) {
EncoderBuffer buffer;
PointCloudKdTreeEncoder encoder;
EncoderOptions options = EncoderOptions::CreateDefaultOptions();
options.SetGlobalInt("quantization_bits", 12);
encoder.SetPointCloud(pc);
ASSERT_TRUE(encoder.Encode(options, &buffer));
DecoderBuffer dec_buffer;
dec_buffer.Init(buffer.data(), buffer.size());
PointCloudKdTreeDecoder decoder;
std::unique_ptr<PointCloud> out_pc(new PointCloud());
ASSERT_TRUE(decoder.Decode(&dec_buffer, out_pc.get()));
ComparePointClouds(pc, *out_pc.get());
}
void TestFloatEncoding(const std::string &file_name) {
std::unique_ptr<PointCloud> pc = DecodeObj(file_name);
ASSERT_NE(pc, nullptr);
TestKdTreeEncoding(*pc.get());
}
};
TEST_F(PointCloudKdTreeEncodingTest, TestFloatKdTreeEncoding) {
TestFloatEncoding("cube_subd.obj");
}
TEST_F(PointCloudKdTreeEncodingTest, TestIntKdTreeEncoding) {
constexpr int num_points = 120;
std::vector<std::array<uint32_t, 3>> points(num_points);
for (int i = 0; i < num_points; ++i) {
std::array<uint32_t, 3> pos;
// Generate some pseudo-random points.
pos[0] = 8 * ((i * 7) % 127);
pos[1] = 13 * ((i * 3) % 321);
pos[2] = 29 * ((i * 19) % 450);
points[i] = pos;
}
PointCloudBuilder builder;
builder.Start(num_points);
const int att_id =
builder.AddAttribute(GeometryAttribute::POSITION, 3, DT_UINT32);
for (PointIndex i(0); i < num_points; ++i) {
builder.SetAttributeValueForPoint(att_id, PointIndex(i),
&(points[i.value()])[0]);
}
std::unique_ptr<PointCloud> pc = builder.Finalize(false);
ASSERT_NE(pc, nullptr);
TestKdTreeEncoding(*pc.get());
}
} // namespace draco

View File

@ -0,0 +1,62 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "compression/point_cloud/point_cloud_sequential_decoder.h"
#include "compression/point_cloud/point_cloud_sequential_encoder.h"
#include "core/draco_test_base.h"
#include "core/draco_test_utils.h"
#include "io/obj_decoder.h"
namespace draco {
class PointCloudSequentialEncodingTest : public ::testing::Test {
protected:
std::unique_ptr<PointCloud> DecodeObj(const std::string &file_name) const {
const std::string path = GetTestFileFullPath(file_name);
ObjDecoder decoder;
std::unique_ptr<PointCloud> pc(new PointCloud());
if (!decoder.DecodeFromFile(path, pc.get()))
return nullptr;
return pc;
}
void TestEncoding(const std::string &file_name) {
std::unique_ptr<PointCloud> pc = DecodeObj(file_name);
ASSERT_NE(pc, nullptr);
EncoderBuffer buffer;
PointCloudSequentialEncoder encoder;
EncoderOptions options = EncoderOptions::CreateDefaultOptions();
encoder.SetPointCloud(*pc.get());
ASSERT_TRUE(encoder.Encode(options, &buffer));
DecoderBuffer dec_buffer;
dec_buffer.Init(buffer.data(), buffer.size());
PointCloudSequentialDecoder decoder;
std::unique_ptr<PointCloud> out_pc(new PointCloud());
ASSERT_TRUE(decoder.Decode(&dec_buffer, out_pc.get()));
ASSERT_EQ(out_pc->num_points(), pc->num_points());
}
};
TEST_F(PointCloudSequentialEncodingTest, DoesEncodeAndDecode) {
TestEncoding("test_nm.obj");
}
// TODO(ostava): Test the reusability of a single instance of the encoder and
// decoder class.
} // namespace draco

View File

@ -88,7 +88,6 @@ void AdaptiveRAnsBitDecoder::StartDecoding(DecoderBuffer *source_buffer) {
source_buffer->Advance(size_in_bytes); source_buffer->Advance(size_in_bytes);
} }
// TODO(hemmer): Consider moving these to the .h file.
bool AdaptiveRAnsBitDecoder::DecodeNextBit() { bool AdaptiveRAnsBitDecoder::DecodeNextBit() {
const uint8_t p0 = clamp_probability(p0_f_); const uint8_t p0 = clamp_probability(p0_f_);
const bool bit = static_cast<bool>(rabs_read(&ans_decoder_, p0)); const bool bit = static_cast<bool>(rabs_read(&ans_decoder_, p0));
@ -96,7 +95,8 @@ bool AdaptiveRAnsBitDecoder::DecodeNextBit() {
return bit; return bit;
} }
void AdaptiveRAnsBitDecoder::DecodeBits32(int nbits, uint32_t *value) { void AdaptiveRAnsBitDecoder::DecodeLeastSignificantBits32(int nbits,
uint32_t *value) {
DCHECK_EQ(true, nbits <= 32); DCHECK_EQ(true, nbits <= 32);
DCHECK_EQ(true, nbits > 0); DCHECK_EQ(true, nbits > 0);

View File

@ -38,7 +38,7 @@ class AdaptiveRAnsBitEncoder {
// Encode |nibts| of |value|, starting from the least significant bit. // Encode |nibts| of |value|, starting from the least significant bit.
// |nbits| must be > 0 and <= 32. // |nbits| must be > 0 and <= 32.
void EncodeBits32(int nbits, uint32_t value) { void EncodeLeastSignificantBits32(int nbits, uint32_t value) {
DCHECK_EQ(true, nbits <= 32); DCHECK_EQ(true, nbits <= 32);
DCHECK_EQ(true, nbits > 0); DCHECK_EQ(true, nbits > 0);
uint32_t selector = (1 << (nbits - 1)); uint32_t selector = (1 << (nbits - 1));
@ -72,7 +72,7 @@ class AdaptiveRAnsBitDecoder {
// Decode the next |nbits| and return the sequence in |value|. |nbits| must be // Decode the next |nbits| and return the sequence in |value|. |nbits| must be
// > 0 and <= 32. // > 0 and <= 32.
void DecodeBits32(int nbits, uint32_t *value); void DecodeLeastSignificantBits32(int nbits, uint32_t *value);
void EndDecoding() {} void EndDecoding() {}

View File

@ -86,7 +86,7 @@ static uint32_t mem_get_le24(const void *vmem) {
return val; return val;
} }
static uint32_t mem_get_le32(const void *vmem) { static inline uint32_t mem_get_le32(const void *vmem) {
uint32_t val; uint32_t val;
const uint8_t *mem = (const uint8_t *)vmem; const uint8_t *mem = (const uint8_t *)vmem;
@ -472,11 +472,11 @@ class RAnsDecoder {
probability_table_.resize(num_symbols); probability_table_.resize(num_symbols);
uint32_t cum_prob = 0; uint32_t cum_prob = 0;
uint32_t act_prob = 0; uint32_t act_prob = 0;
for (int i = 0; i < num_symbols; ++i) { for (uint32_t i = 0; i < num_symbols; ++i) {
probability_table_[i].prob = token_probs[i]; probability_table_[i].prob = token_probs[i];
probability_table_[i].cum_prob = cum_prob; probability_table_[i].cum_prob = cum_prob;
cum_prob += token_probs[i]; cum_prob += token_probs[i];
for (int j = act_prob; j < cum_prob; ++j) { for (uint32_t j = act_prob; j < cum_prob; ++j) {
lut_table_[j] = i; lut_table_[j] = i;
} }
act_prob = cum_prob; act_prob = cum_prob;

View File

@ -17,8 +17,7 @@
// TODO(fgalligan): Remove this file. // TODO(fgalligan): Remove this file.
namespace draco { namespace draco {
BitEncoder::BitEncoder(char *data, size_t length) BitEncoder::BitEncoder(char *data) : bit_buffer_(data), bit_offset_(0) {}
: bit_buffer_(data), bit_buffer_length_(length), bit_offset_(0) {}
BitDecoder::BitDecoder() BitDecoder::BitDecoder()
: bit_buffer_(nullptr), bit_buffer_end_(nullptr), bit_offset_(0) {} : bit_buffer_(nullptr), bit_buffer_end_(nullptr), bit_offset_(0) {}

View File

@ -27,9 +27,8 @@ namespace draco {
// Class to encode bits to a bit buffer. // Class to encode bits to a bit buffer.
class BitEncoder { class BitEncoder {
public: public:
// |data| is the buffer to write the bits into. |length| is the size of the // |data| is the buffer to write the bits into.
// buffer. explicit BitEncoder(char *data);
BitEncoder(char *data, size_t length);
// Write |nbits| of |data| into the bit buffer. // Write |nbits| of |data| into the bit buffer.
void PutBits(uint32_t data, int32_t nbits) { void PutBits(uint32_t data, int32_t nbits) {
@ -45,7 +44,7 @@ class BitEncoder {
// TODO(fgalligan): Remove this function once we know we do not need the // TODO(fgalligan): Remove this function once we know we do not need the
// old API anymore. // old API anymore.
// This is a function of an old API, that currently does nothing. // This is a function of an old API, that currently does nothing.
void Flush(int left_over_bit_value) {} void Flush(int /* left_over_bit_value */) {}
// Return the number of bits required to store the given number // Return the number of bits required to store the given number
static uint32_t BitsRequired(uint32_t x) { static uint32_t BitsRequired(uint32_t x) {
@ -69,7 +68,6 @@ class BitEncoder {
} }
char *bit_buffer_; char *bit_buffer_;
size_t bit_buffer_length_;
size_t bit_offset_; size_t bit_offset_;
}; };
@ -98,7 +96,7 @@ class BitDecoder {
inline uint32_t EnsureBits(int k) { inline uint32_t EnsureBits(int k) {
DCHECK_LE(k, 24); DCHECK_LE(k, 24);
DCHECK_LE(k, AvailBits()); DCHECK_LE(static_cast<uint64_t>(k), AvailBits());
uint32_t buf = 0; uint32_t buf = 0;
for (int i = 0; i < k; ++i) { for (int i = 0; i < k; ++i) {

113
core/bit_coder_test.cc Normal file
View File

@ -0,0 +1,113 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "core/bit_coder.h"
#include "core/draco_test_base.h"
namespace draco {
class BitDecoderTest : public ::testing::Test {};
class BitEncoderTest : public ::testing::Test {};
TEST_F(BitDecoderTest, TestBitCodersByteAligned) {
constexpr int buffer_size = 32;
char buffer[buffer_size];
BitEncoder encoder(buffer);
const uint8_t data[] = {0x76, 0x54, 0x32, 0x10, 0x76, 0x54, 0x32, 0x10};
const int bytes_to_encode = sizeof(data);
for (int i = 0; i < bytes_to_encode; ++i) {
encoder.PutBits(data[i], sizeof(data[i]) * 8);
ASSERT_EQ((i + 1) * sizeof(data[i]) * 8, encoder.Bits());
}
BitDecoder decoder;
decoder.reset(static_cast<const void *>(buffer), bytes_to_encode);
for (int i = 0; i < bytes_to_encode; ++i) {
uint32_t x = 0;
ASSERT_TRUE(decoder.GetBits(8, &x));
ASSERT_EQ(x, data[i]);
}
ASSERT_EQ(bytes_to_encode * 8u, decoder.BitsDecoded());
}
TEST_F(BitDecoderTest, TestBitCodersNonByte) {
constexpr int buffer_size = 32;
char buffer[buffer_size];
BitEncoder encoder(buffer);
const uint8_t data[] = {0x76, 0x54, 0x32, 0x10, 0x76, 0x54, 0x32, 0x10};
const uint32_t bits_to_encode = 51;
const int bytes_to_encode = (bits_to_encode / 8) + 1;
for (int i = 0; i < bytes_to_encode; ++i) {
const int num_bits = (encoder.Bits() + 8 <= bits_to_encode)
? 8
: bits_to_encode - encoder.Bits();
encoder.PutBits(data[i], num_bits);
}
BitDecoder decoder;
decoder.reset(static_cast<const void *>(buffer), bytes_to_encode);
int64_t bits_to_decode = encoder.Bits();
for (int i = 0; i < bytes_to_encode; ++i) {
uint32_t x = 0;
const int num_bits = (bits_to_decode > 8) ? 8 : bits_to_decode;
ASSERT_TRUE(decoder.GetBits(num_bits, &x));
const int bits_to_shift = 8 - num_bits;
const uint8_t test_byte =
((data[i] << bits_to_shift) & 0xff) >> bits_to_shift;
ASSERT_EQ(x, test_byte);
bits_to_decode -= 8;
}
ASSERT_EQ(bits_to_encode, decoder.BitsDecoded());
}
TEST_F(BitDecoderTest, TestSingleBits) {
const int data = 0xaaaa;
BitDecoder decoder;
decoder.reset(static_cast<const void *>(&data), sizeof(data));
for (uint32_t i = 0; i < 16; ++i) {
uint32_t x = 0;
ASSERT_TRUE(decoder.GetBits(1, &x));
ASSERT_EQ(x, (i % 2));
}
ASSERT_EQ(16u, decoder.BitsDecoded());
}
TEST_F(BitDecoderTest, TestMultipleBits) {
const uint8_t data[] = {0x76, 0x54, 0x32, 0x10, 0x76, 0x54, 0x32, 0x10};
BitDecoder decoder;
decoder.reset(static_cast<const void *>(data), sizeof(data));
uint32_t x = 0;
for (uint32_t i = 0; i < 2; ++i) {
ASSERT_TRUE(decoder.GetBits(16, &x));
ASSERT_EQ(x, 0x5476u);
ASSERT_EQ(16 + (i * 32), decoder.BitsDecoded());
ASSERT_TRUE(decoder.GetBits(16, &x));
ASSERT_EQ(x, 0x1032u);
ASSERT_EQ(32 + (i * 32), decoder.BitsDecoded());
}
}
} // namespace draco

View File

@ -34,7 +34,7 @@ void DataBuffer::Update(const void *data, int64_t size, int64_t offset) {
// If no data is provided, just resize the buffer. // If no data is provided, just resize the buffer.
data_.resize(size + offset); data_.resize(size + offset);
} else { } else {
if (size + offset > data_.size()) if (size + offset > static_cast<int64_t>(data_.size()))
data_.resize(size + offset); data_.resize(size + offset);
const uint8_t *const byte_data = static_cast<const uint8_t *>(data); const uint8_t *const byte_data = static_cast<const uint8_t *>(data);
std::copy(byte_data, byte_data + size, data_.data() + offset); std::copy(byte_data, byte_data + size, data_.data() + offset);

View File

@ -49,7 +49,7 @@ class DecoderBuffer {
// Decodes up to 32 bits into out_val. Can be called only in between // Decodes up to 32 bits into out_val. Can be called only in between
// StartBitDecoding and EndBitDeoding. Otherwise returns false. // StartBitDecoding and EndBitDeoding. Otherwise returns false.
bool DecodeBits32(int nbits, uint32_t *out_value) { bool DecodeLeastSignificantBits32(int nbits, uint32_t *out_value) {
if (!bit_decoder_active()) if (!bit_decoder_active())
return false; return false;
bit_decoder_.GetBits(nbits, out_value); bit_decoder_.GetBits(nbits, out_value);
@ -68,7 +68,7 @@ class DecoderBuffer {
} }
bool Decode(void *out_data, size_t size_to_decode) { bool Decode(void *out_data, size_t size_to_decode) {
if (data_size_ < pos_ + size_to_decode) if (data_size_ < static_cast<int64_t>(pos_ + size_to_decode))
return false; // Buffer overflow. return false; // Buffer overflow.
memcpy(out_data, (data_ + pos_), size_to_decode); memcpy(out_data, (data_ + pos_), size_to_decode);
pos_ += size_to_decode; pos_ += size_to_decode;
@ -79,14 +79,14 @@ class DecoderBuffer {
template <typename T> template <typename T>
bool Peek(T *out_val) { bool Peek(T *out_val) {
const size_t size_to_decode = sizeof(T); const size_t size_to_decode = sizeof(T);
if (data_size_ < pos_ + size_to_decode) if (data_size_ < static_cast<int64_t>(pos_ + size_to_decode))
return false; // Buffer overflow. return false; // Buffer overflow.
memcpy(out_val, (data_ + pos_), size_to_decode); memcpy(out_val, (data_ + pos_), size_to_decode);
return true; return true;
} }
bool Peek(void *out_data, size_t size_to_peek) { bool Peek(void *out_data, size_t size_to_peek) {
if (data_size_ < pos_ + size_to_peek) if (data_size_ < static_cast<int64_t>(pos_ + size_to_peek))
return false; // Buffer overflow. return false; // Buffer overflow.
memcpy(out_data, (data_ + pos_), size_to_peek); memcpy(out_data, (data_ + pos_), size_to_peek);
return true; return true;

View File

@ -46,7 +46,7 @@ class DirectBitEncoder {
// Encode |nibts| of |value|, starting from the least significant bit. // Encode |nibts| of |value|, starting from the least significant bit.
// |nbits| must be > 0 and <= 32. // |nbits| must be > 0 and <= 32.
void EncodeBits32(int nbits, uint32_t value) { void EncodeLeastSignificantBits32(int nbits, uint32_t value) {
DCHECK_EQ(true, nbits <= 32); DCHECK_EQ(true, nbits <= 32);
DCHECK_EQ(true, nbits > 0); DCHECK_EQ(true, nbits > 0);
@ -107,10 +107,10 @@ class DirectBitDecoder {
// Decode the next |nbits| and return the sequence in |value|. |nbits| must be // Decode the next |nbits| and return the sequence in |value|. |nbits| must be
// > 0 and <= 32. // > 0 and <= 32.
void DecodeBits32(int nbits, uint32_t *value) { void DecodeLeastSignificantBits32(int nbits, uint32_t *value) {
DCHECK_EQ(true, nbits <= 32); DCHECK_EQ(true, nbits <= 32);
DCHECK_EQ(true, nbits > 0); DCHECK_EQ(true, nbits > 0);
const uint32_t remaining = 32 - num_used_bits_; const int remaining = 32 - num_used_bits_;
if (nbits <= remaining) { if (nbits <= remaining) {
*value = (*pos_ << num_used_bits_) >> (32 - nbits); *value = (*pos_ << num_used_bits_) >> (32 - nbits);
num_used_bits_ += nbits; num_used_bits_ += nbits;

11
core/draco_test_base.h Normal file
View File

@ -0,0 +1,11 @@
// Wrapper for including googletest indirectly. Useful when the location of the
// googletest sources must change depending on build environment and repository
// source location.
#ifndef DRACO_CORE_DRACO_TEST_BASE_H_
#define DRACO_CORE_DRACO_TEST_BASE_H_
static bool FLAGS_update_golden_files;
#include "gtest/gtest.h"
#include "testing/draco_test_config.h"
#endif // DRACO_CORE_DRACO_TEST_BASE_H_

81
core/draco_test_utils.cc Normal file
View File

@ -0,0 +1,81 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "core/draco_test_utils.h"
#include <fstream>
#include "core/macros.h"
#include "draco_test_base.h"
namespace draco {
namespace {
static constexpr char kTestDataDir[] = DRACO_TEST_DATA_DIR;
} // namespace
std::string GetTestFileFullPath(const std::string &file_name) {
return std::string(kTestDataDir) + std::string("/") + file_name;
}
bool GenerateGoldenFile(const std::string &golden_file_name, const void *data,
int data_size) {
// TODO(ostava): This will work only when the test is executed locally
// from blaze-bin/ folder. We should look for ways how to
// make it work when it's run using the "blaze test" command.
const std::string path = GetTestFileFullPath(golden_file_name);
std::ofstream file(path, std::ios::binary);
if (!file)
return false;
file.write(static_cast<const char *>(data), data_size);
file.close();
return true;
}
bool CompareGoldenFile(const std::string &golden_file_name, const void *data,
int data_size) {
const std::string golden_path = GetTestFileFullPath(golden_file_name);
std::ifstream in_file(golden_path);
if (!in_file || data_size < 0)
return false;
const char *const data_c8 = static_cast<const char *>(data);
constexpr int buffer_size = 1024;
char buffer[buffer_size];
size_t extracted_size = 0;
size_t remaining_data_size = data_size;
int offset = 0;
while ((extracted_size = in_file.read(buffer, buffer_size).gcount()) > 0) {
if (remaining_data_size <= 0)
break; // Input and golden sizes are different.
size_t size_to_check = extracted_size;
if (remaining_data_size < size_to_check)
size_to_check = remaining_data_size;
for (uint32_t i = 0; i < size_to_check; ++i) {
if (buffer[i] != data_c8[offset++]) {
LOG(INFO) << "Test output differed from golden file at byte "
<< offset - 1;
return false;
}
}
remaining_data_size -= extracted_size;
}
if (remaining_data_size != extracted_size) {
// Both of these values should be 0 at the end.
LOG(INFO) << "Test output size differed from golden file size";
return false;
}
return true;
}
} // namespace draco

39
core/draco_test_utils.h Normal file
View File

@ -0,0 +1,39 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_CORE_DRACO_TEST_UTILS_H_
#define DRACO_CORE_DRACO_TEST_UTILS_H_
#include "core/draco_test_base.h"
namespace draco {
// Returns the full path to a given test file.
std::string GetTestFileFullPath(const std::string &file_name);
// Generates a new golden file and saves it into the correct folder.
// Returns false if the file couldn't be created.
bool GenerateGoldenFile(const std::string &golden_file_name, const void *data,
int data_size);
// Compare a golden file content with the input data.
// Function will log the first byte position where the data differ.
// Returns false if there are any differences.
bool CompareGoldenFile(const std::string &golden_file_name, const void *data,
int data_size);
} // namespace draco
#endif // DRACO_CORE_DRACO_TEST_UTILS_H_

6
core/draco_tests.cc Normal file
View File

@ -0,0 +1,6 @@
#include "core/draco_test_base.h"
int main(int argc, char* argv[]) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -18,7 +18,7 @@
namespace draco { namespace draco {
// Draco version is comprised of <major>.<minor>.<revision>. // Draco version is comprised of <major>.<minor>.<revision>.
static const char kDracoVersion[] = "0.9.0"; static const char kDracoVersion[] = "0.9.1";
const char *Version() { return kDracoVersion; } const char *Version() { return kDracoVersion; }

View File

@ -46,8 +46,8 @@ bool EncoderBuffer::StartBitEncoding(int64_t required_bits, bool encode_size) {
buffer_.resize(buffer_start_size + required_bytes); buffer_.resize(buffer_start_size + required_bytes);
// Get the buffer data pointer for the bit encoder. // Get the buffer data pointer for the bit encoder.
const char *const data = buffer_.data() + buffer_start_size; const char *const data = buffer_.data() + buffer_start_size;
bit_encoder_ = std::unique_ptr<BitEncoder>( bit_encoder_ =
new BitEncoder(const_cast<char *>(data), required_bytes)); std::unique_ptr<BitEncoder>(new BitEncoder(const_cast<char *>(data)));
return true; return true;
} }

View File

@ -45,14 +45,14 @@ class EncoderBuffer {
// Encode up to 32 bits into the buffer. Can be called only in between // Encode up to 32 bits into the buffer. Can be called only in between
// StartBitEncoding and EndBitEncoding. Otherwise returns false. // StartBitEncoding and EndBitEncoding. Otherwise returns false.
// TODO(hemmer): Swap arguments to make it consistent with DecoderBuffer. bool EncodeLeastSignificantBits32(int nbits, uint32_t value) {
bool EncodeBits32(uint32_t value, int nbits) {
if (!bit_encoder_active()) if (!bit_encoder_active())
return false; return false;
bit_encoder_->PutBits(value, nbits); bit_encoder_->PutBits(value, nbits);
return true; return true;
} }
public:
// Encode an arbitrary data type. // Encode an arbitrary data type.
// Can be used only when we are not encoding a bit-sequence. // Can be used only when we are not encoding a bit-sequence.
// Returns false when the value couldn't be encoded. // Returns false when the value couldn't be encoded.

View File

@ -48,7 +48,7 @@ class FoldedBit32Encoder {
// Encode |nbits| of |value|, starting from the least significant bit. // Encode |nbits| of |value|, starting from the least significant bit.
// |nbits| must be > 0 and <= 32. // |nbits| must be > 0 and <= 32.
void EncodeBits32(int nbits, uint32_t value) { void EncodeLeastSignificantBits32(int nbits, uint32_t value) {
uint32_t selector = 1 << (nbits - 1); uint32_t selector = 1 << (nbits - 1);
for (int i = 0; i < nbits; i++) { for (int i = 0; i < nbits; i++) {
const bool bit = (value & selector); const bool bit = (value & selector);
@ -96,7 +96,7 @@ class FoldedBit32Decoder {
// Decode the next |nbits| and return the sequence in |value|. |nbits| must be // Decode the next |nbits| and return the sequence in |value|. |nbits| must be
// > 0 and <= 32. // > 0 and <= 32.
void DecodeBits32(int nbits, uint32_t *value) { void DecodeLeastSignificantBits32(int nbits, uint32_t *value) {
uint32_t result = 0; uint32_t result = 0;
for (int i = 0; i < nbits; ++i) { for (int i = 0; i < nbits; ++i) {
const bool bit = folded_number_decoders_[i].DecodeNextBit(); const bool bit = folded_number_decoders_[i].DecodeNextBit();

View File

@ -32,14 +32,14 @@ uint64_t FingerprintString(const char *s, size_t len) {
uint64_t new_hash = seed; uint64_t new_hash = seed;
if (num_chars_left > 7) { if (num_chars_left > 7) {
const int off = i * 8; const int off2 = i * 8;
new_hash = static_cast<uint64_t>(s[off]) << 56 | new_hash = static_cast<uint64_t>(s[off2]) << 56 |
static_cast<uint64_t>(s[off + 1]) << 48 | static_cast<uint64_t>(s[off2 + 1]) << 48 |
static_cast<uint64_t>(s[off + 2]) << 40 | static_cast<uint64_t>(s[off2 + 2]) << 40 |
static_cast<uint64_t>(s[off + 3]) << 32 | static_cast<uint64_t>(s[off2 + 3]) << 32 |
static_cast<uint64_t>(s[off + 4]) << 24 | static_cast<uint64_t>(s[off2 + 4]) << 24 |
static_cast<uint64_t>(s[off + 5]) << 16 | static_cast<uint64_t>(s[off2 + 5]) << 16 |
static_cast<uint64_t>(s[off + 6]) << 8 | s[off + 7]; static_cast<uint64_t>(s[off2 + 6]) << 8 | s[off2 + 7];
} else { } else {
for (int j = 0; j < num_chars_left; ++j) { for (int j = 0; j < num_chars_left; ++j) {
new_hash |= static_cast<uint64_t>(s[off + j]) new_hash |= static_cast<uint64_t>(s[off + j])

View File

@ -47,7 +47,7 @@ template <typename T>
struct HashArray { struct HashArray {
size_t operator()(const T &a) const { size_t operator()(const T &a) const {
size_t hash = 79; // Magic number. size_t hash = 79; // Magic number.
for (int i = 0; i < std::tuple_size<T>::value; ++i) { for (unsigned int i = 0; i < std::tuple_size<T>::value; ++i) {
hash = HashCombine(hash, ValueHash(a[i])); hash = HashCombine(hash, ValueHash(a[i]));
} }
return hash; return hash;

View File

@ -52,6 +52,10 @@ namespace draco {
#define FALLTHROUGH_INTENDED void(0); #define FALLTHROUGH_INTENDED void(0);
#endif #endif
#ifndef LOG
#define LOG(...) std::cout
#endif
#ifndef VLOG #ifndef VLOG
#define VLOG(...) std::cout #define VLOG(...) std::cout
#endif #endif

4
core/math_utils_test.cc Normal file
View File

@ -0,0 +1,4 @@
#include "core/math_utils.h"
#include "core/draco_test_base.h"
TEST(MathUtils, Mod) { EXPECT_EQ(DRACO_INCREMENT_MOD(1, 1 << 1), 0); }

View File

@ -0,0 +1,51 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "core/quantization_utils.h"
#include "core/draco_test_base.h"
namespace draco {
class QuantizationUtilsTest : public ::testing::Test {};
TEST_F(QuantizationUtilsTest, TestQuantizer) {
Quantizer quantizer;
quantizer.Init(10.f, 255);
EXPECT_EQ(quantizer.QuantizeFloat(0.f), 0);
EXPECT_EQ(quantizer.QuantizeFloat(10.f), 255);
EXPECT_EQ(quantizer.QuantizeFloat(-10.f), -255);
EXPECT_EQ(quantizer.QuantizeFloat(4.999f), 127);
EXPECT_EQ(quantizer.QuantizeFloat(5.f), 128);
EXPECT_EQ(quantizer.QuantizeFloat(-4.9999f), -127);
EXPECT_EQ(quantizer.QuantizeFloat(-5.f), -128);
EXPECT_EQ(quantizer.QuantizeFloat(-5.0001f), -128);
// Out of range quantization.
// The behavior is technically undefined, but both quantizer and dequantizer
// should still work correctly unless the quantized values overflow.
EXPECT_LT(quantizer.QuantizeFloat(-15.f), -255);
EXPECT_GT(quantizer.QuantizeFloat(15.f), 255);
}
TEST_F(QuantizationUtilsTest, TestDequantizer) {
Dequantizer dequantizer;
dequantizer.Init(10.f, 255);
EXPECT_EQ(dequantizer.DequantizeFloat(0), 0.f);
EXPECT_EQ(dequantizer.DequantizeFloat(255), 10.f);
EXPECT_EQ(dequantizer.DequantizeFloat(-255), -10.f);
EXPECT_EQ(dequantizer.DequantizeFloat(128), 10.f * (128.f / 255.f));
}
} // namespace draco

View File

@ -40,7 +40,7 @@ void RAnsBitEncoder::EncodeBit(bool bit) {
} }
} }
void RAnsBitEncoder::EncodeBits32(int nbits, uint32_t value) { void RAnsBitEncoder::EncodeLeastSignificantBits32(int nbits, uint32_t value) {
DCHECK_EQ(true, nbits <= 32); DCHECK_EQ(true, nbits <= 32);
DCHECK_EQ(true, nbits > 0); DCHECK_EQ(true, nbits > 0);
@ -87,7 +87,6 @@ void RAnsBitEncoder::EndEncoding(EncoderBuffer *target_buffer) {
zero_prob += (zero_prob == 0); zero_prob += (zero_prob == 0);
// Space for 32 bit integer and some extra space. // Space for 32 bit integer and some extra space.
// TODO(hemmer): Find out if this is really safe.
std::vector<uint8_t> buffer((bits_.size() + 8) * 8); std::vector<uint8_t> buffer((bits_.size() + 8) * 8);
AnsCoder ans_coder; AnsCoder ans_coder;
ans_write_init(&ans_coder, buffer.data()); ans_write_init(&ans_coder, buffer.data());
@ -142,7 +141,7 @@ bool RAnsBitDecoder::DecodeNextBit() {
return bit > 0; return bit > 0;
} }
void RAnsBitDecoder::DecodeBits32(int nbits, uint32_t *value) { void RAnsBitDecoder::DecodeLeastSignificantBits32(int nbits, uint32_t *value) {
DCHECK_EQ(true, nbits <= 32); DCHECK_EQ(true, nbits <= 32);
DCHECK_EQ(true, nbits > 0); DCHECK_EQ(true, nbits > 0);

View File

@ -40,7 +40,7 @@ class RAnsBitEncoder {
// Encode |nibts| of |value|, starting from the least significant bit. // Encode |nibts| of |value|, starting from the least significant bit.
// |nbits| must be > 0 and <= 32. // |nbits| must be > 0 and <= 32.
void EncodeBits32(int nbits, uint32_t value); void EncodeLeastSignificantBits32(int nbits, uint32_t value);
// Ends the bit encoding and stores the result into the target_buffer. // Ends the bit encoding and stores the result into the target_buffer.
void EndEncoding(EncoderBuffer *target_buffer); void EndEncoding(EncoderBuffer *target_buffer);
@ -68,7 +68,7 @@ class RAnsBitDecoder {
// Decode the next |nbits| and return the sequence in |value|. |nbits| must be // Decode the next |nbits| and return the sequence in |value|. |nbits| must be
// > 0 and <= 32. // > 0 and <= 32.
void DecodeBits32(int nbits, uint32_t *value); void DecodeLeastSignificantBits32(int nbits, uint32_t *value);
void EndDecoding() {} void EndDecoding() {}

7
core/rans_coding_test.cc Normal file
View File

@ -0,0 +1,7 @@
#include "core/rans_coding.h"
#include "core/adaptive_rans_coding.h"
#include "core/draco_test_base.h"
// Just including rans_coding.h and adaptive_rans_coding.h gets an asan error
// when compiling (blaze test :rans_coding_test --config=asan)
TEST(RansCodingTest, LinkerTest) {}

View File

@ -56,7 +56,7 @@ bool RAnsSymbolDecoder<max_symbol_bit_length_t>::Create(DecoderBuffer *buffer) {
return false; return false;
probability_table_.resize(num_symbols_); probability_table_.resize(num_symbols_);
// Decode the table. // Decode the table.
for (int i = 0; i < num_symbols_; ++i) { for (uint32_t i = 0; i < num_symbols_; ++i) {
uint32_t prob = 0; uint32_t prob = 0;
uint8_t byte_prob = 0; uint8_t byte_prob = 0;
// Decode the first byte and extract the number of extra bytes we need to // Decode the first byte and extract the number of extra bytes we need to

View File

@ -145,13 +145,13 @@ bool RAnsSymbolEncoder<max_symbol_bit_length_t>::Create(
return false; // Most frequent symbol would be empty. return false; // Most frequent symbol would be empty.
break; break;
} }
const int32_t new_prob = const int32_t new_prob = static_cast<int32_t>(
floor(act_rel_error_d * floor(act_rel_error_d *
static_cast<double>(probability_table_[symbol_id].prob)); static_cast<double>(probability_table_[symbol_id].prob)));
int32_t fix = probability_table_[symbol_id].prob - new_prob; int32_t fix = probability_table_[symbol_id].prob - new_prob;
if (fix == 0) if (fix == 0u)
fix = 1; fix = 1;
if (fix >= probability_table_[symbol_id].prob) if (fix >= static_cast<int32_t>(probability_table_[symbol_id].prob))
fix = probability_table_[symbol_id].prob - 1; fix = probability_table_[symbol_id].prob - 1;
if (fix > error) if (fix > error)
fix = error; fix = error;
@ -198,7 +198,7 @@ void RAnsSymbolEncoder<max_symbol_bit_length_t>::EncodeTable(
buffer->Encode(num_symbols_); buffer->Encode(num_symbols_);
// Use varint encoding for the probabilities (first two bits represent the // Use varint encoding for the probabilities (first two bits represent the
// number of bytes used - 1). // number of bytes used - 1).
for (int i = 0; i < num_symbols_; ++i) { for (uint32_t i = 0; i < num_symbols_; ++i) {
const uint32_t prob = probability_table_[i].prob; const uint32_t prob = probability_table_[i].prob;
int num_extra_bytes = 0; int num_extra_bytes = 0;
if (prob >= (1 << 6)) { if (prob >= (1 << 6)) {

119
core/symbol_coding_test.cc Normal file
View File

@ -0,0 +1,119 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "core/decoder_buffer.h"
#include "core/draco_test_base.h"
#include "core/encoder_buffer.h"
#include "core/symbol_decoding.h"
#include "core/symbol_encoding.h"
namespace draco {
class SymbolCodingTest : public ::testing::Test {
protected:
SymbolCodingTest() {}
};
TEST_F(SymbolCodingTest, TestLargeNumbers) {
// This test verifies that SymbolCoding successfully encodes an array of large
// numbers.
const uint32_t in[] = {12345678, 1223333, 111, 5};
const int num_values = sizeof(in) / sizeof(uint32_t);
EncoderBuffer eb;
ASSERT_TRUE(EncodeSymbols(in, num_values, 1, &eb));
std::vector<uint32_t> out;
out.resize(num_values);
DecoderBuffer db;
db.Init(eb.data(), eb.size());
ASSERT_TRUE(DecodeSymbols(num_values, 1, &db, &out[0]));
for (int i = 0; i < num_values; ++i) {
EXPECT_EQ(in[i], out[i]);
}
}
TEST_F(SymbolCodingTest, TestManyNumbers) {
// This test verifies that SymbolCoding successfully encodes an array of
// several numbers that repeat many times.
// Value/frequency pairs.
const std::pair<uint32_t, uint32_t> in[] = {
{12, 1500}, {1025, 31000}, {7, 1}, {9, 5}, {0, 6432}};
const int num_pairs = sizeof(in) / sizeof(std::pair<uint32_t, uint32_t>);
std::vector<uint32_t> in_values;
for (int i = 0; i < num_pairs; ++i) {
in_values.insert(in_values.end(), in[i].second, in[i].first);
}
EncoderBuffer eb;
ASSERT_TRUE(EncodeSymbols(in_values.data(), in_values.size(), 1, &eb));
std::vector<uint32_t> out_values;
out_values.resize(in_values.size());
DecoderBuffer db;
db.Init(eb.data(), eb.size());
ASSERT_TRUE(DecodeSymbols(in_values.size(), 1, &db, &out_values[0]));
for (uint32_t i = 0; i < in_values.size(); ++i) {
ASSERT_EQ(in_values[i], out_values[i]);
}
}
TEST_F(SymbolCodingTest, TestEmpty) {
// This test verifies that SymbolCoding successfully encodes an empty array.
EncoderBuffer eb;
ASSERT_TRUE(EncodeSymbols(nullptr, 0, 1, &eb));
DecoderBuffer db;
db.Init(eb.data(), eb.size());
ASSERT_TRUE(DecodeSymbols(0, 1, &db, nullptr));
}
TEST_F(SymbolCodingTest, TestOneSymbol) {
// This test verifies that SymbolCoding successfully encodes an a single
// symbol.
EncoderBuffer eb;
const std::vector<uint32_t> in(1200, 0);
ASSERT_TRUE(EncodeSymbols(in.data(), in.size(), 1, &eb));
std::vector<uint32_t> out(in.size());
DecoderBuffer db;
db.Init(eb.data(), eb.size());
ASSERT_TRUE(DecodeSymbols(in.size(), 1, &db, &out[0]));
for (uint32_t i = 0; i < in.size(); ++i) {
ASSERT_EQ(in[i], out[i]);
}
}
TEST_F(SymbolCodingTest, TestBitLengthsl) {
// This test verifies that SymbolCoding successfully encodes symbols of
// various bitlengths
EncoderBuffer eb;
std::vector<uint32_t> in;
constexpr int bit_lengths = 18;
for (int i = 0; i < bit_lengths; ++i) {
in.push_back(1 << i);
}
std::vector<uint32_t> out(in.size());
for (int i = 0; i < bit_lengths; ++i) {
eb.Clear();
ASSERT_TRUE(EncodeSymbols(in.data(), i + 1, 1, &eb));
DecoderBuffer db;
db.Init(eb.data(), eb.size());
ASSERT_TRUE(DecodeSymbols(i + 1, 1, &db, &out[0]));
for (int j = 0; j < i + 1; ++j) {
ASSERT_EQ(in[j], out[j]);
}
}
}
} // namespace draco

View File

@ -83,7 +83,7 @@ bool DecodeTaggedSymbols(int num_values, int num_components,
// Decode the actual value. // Decode the actual value.
for (int j = 0; j < num_components; ++j) { for (int j = 0; j < num_components; ++j) {
uint32_t val; uint32_t val;
if (!src_buffer->DecodeBits32(bit_length, &val)) if (!src_buffer->DecodeLeastSignificantBits32(bit_length, &val))
return false; return false;
out_values[value_id++] = val; out_values[value_id++] = val;
} }

View File

@ -98,7 +98,7 @@ bool EncodeSymbols(const uint32_t *symbols, int num_values, int num_components,
// Compute the total bit length used by all values. This will be used for // Compute the total bit length used by all values. This will be used for
// computing a heuristic that chooses the optimal entropy encoding scheme. // computing a heuristic that chooses the optimal entropy encoding scheme.
uint64_t total_bit_length = 0; uint64_t total_bit_length = 0;
for (int64_t i = 0; i < bit_lengths.size(); ++i) { for (size_t i = 0; i < bit_lengths.size(); ++i) {
total_bit_length += bit_lengths[i]; total_bit_length += bit_lengths[i];
} }
@ -106,13 +106,13 @@ bool EncodeSymbols(const uint32_t *symbols, int num_values, int num_components,
// The average number of bits necessary for encoding a single entry value. // The average number of bits necessary for encoding a single entry value.
const int64_t average_bit_length = const int64_t average_bit_length =
ceil(static_cast<double>(total_bit_length) / static_cast<int64_t>(ceil(static_cast<double>(total_bit_length) /
static_cast<double>(num_component_values)); static_cast<double>(num_component_values)));
// The estimated average number of bits necessary for encoding a single // The estimated average number of bits necessary for encoding a single
// bit-length tag. // bit-length tag.
int64_t average_bits_per_tag = int64_t average_bits_per_tag = static_cast<int64_t>(
ceil(static_cast<float>(bits::MostSignificantBit(average_bit_length)) / ceil(static_cast<float>(bits::MostSignificantBit(average_bit_length)) /
static_cast<float>(num_components)); static_cast<float>(num_components)));
if (average_bits_per_tag <= 0) if (average_bits_per_tag <= 0)
average_bits_per_tag = 1; average_bits_per_tag = 1;
@ -162,7 +162,7 @@ bool EncodeTaggedSymbols(const uint32_t *symbols, int num_values,
// Compute the frequencies from input data. // Compute the frequencies from input data.
// Maximum integer value for the values across all components. // Maximum integer value for the values across all components.
for (int i = 0; i < bit_lengths.size(); ++i) { for (size_t i = 0; i < bit_lengths.size(); ++i) {
// Update the frequency of the associated entry id. // Update the frequency of the associated entry id.
++frequencies[bit_lengths[i]]; ++frequencies[bit_lengths[i]];
} }
@ -193,7 +193,8 @@ bool EncodeTaggedSymbols(const uint32_t *symbols, int num_values,
const int j = num_values - num_components - i; const int j = num_values - num_components - i;
const int value_bit_length = bit_lengths[j / num_components]; const int value_bit_length = bit_lengths[j / num_components];
for (int c = 0; c < num_components; ++c) { for (int c = 0; c < num_components; ++c) {
value_buffer.EncodeBits32(symbols[j + c], value_bit_length); value_buffer.EncodeLeastSignificantBits32(value_bit_length,
symbols[j + c]);
} }
} }
} else { } else {
@ -203,7 +204,7 @@ bool EncodeTaggedSymbols(const uint32_t *symbols, int num_values,
tag_encoder.EncodeSymbol(bit_length); tag_encoder.EncodeSymbol(bit_length);
// Now encode all values using the stored bit_length. // Now encode all values using the stored bit_length.
for (int j = 0; j < num_components; ++j) { for (int j = 0; j < num_components; ++j) {
value_buffer.EncodeBits32(symbols[i + j], bit_length); value_buffer.EncodeLeastSignificantBits32(bit_length, symbols[i + j]);
} }
} }
} }

72
core/vector_d_test.cc Normal file
View File

@ -0,0 +1,72 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "core/vector_d.h"
#include "core/draco_test_base.h"
namespace {
using draco::Vector3f;
class VectorDTest : public ::testing::Test {
protected:
};
TEST_F(VectorDTest, TestOperators) {
{
const Vector3f v;
ASSERT_EQ(v[0], 0);
ASSERT_EQ(v[1], 0);
ASSERT_EQ(v[2], 0);
}
const Vector3f v(1, 2, 3);
ASSERT_EQ(v[0], 1);
ASSERT_EQ(v[1], 2);
ASSERT_EQ(v[2], 3);
Vector3f w = v;
bool comp = (v == w);
ASSERT_TRUE(comp);
comp = (v != w);
ASSERT_TRUE(!comp);
ASSERT_EQ(w[0], 1);
ASSERT_EQ(w[1], 2);
ASSERT_EQ(w[2], 3);
w = -v;
ASSERT_EQ(w[0], -1);
ASSERT_EQ(w[1], -2);
ASSERT_EQ(w[2], -3);
w = v + v;
ASSERT_EQ(w[0], 2);
ASSERT_EQ(w[1], 4);
ASSERT_EQ(w[2], 6);
w = w - v;
ASSERT_EQ(w[0], 1);
ASSERT_EQ(w[1], 2);
ASSERT_EQ(w[2], 3);
w = v * float(2);
ASSERT_EQ(w[0], 2);
ASSERT_EQ(w[1], 4);
ASSERT_EQ(w[2], 6);
ASSERT_EQ(v.SquaredNorm(), 14);
ASSERT_EQ(v.Dot(v), 14);
}
} // namespace

View File

@ -360,7 +360,7 @@ bool ObjDecoder::ParseMaterialLib(bool *error) {
return true; return true;
} }
bool ObjDecoder::ParseMaterial(bool *error) { bool ObjDecoder::ParseMaterial(bool * /* error */) {
if (counting_mode_) if (counting_mode_)
return false; // Skip when we are counting definitions. return false; // Skip when we are counting definitions.
if (material_att_id_ < 0) if (material_att_id_ < 0)
@ -458,7 +458,7 @@ bool ObjDecoder::ParseMaterialFile(const std::string &file_name, bool *error) {
return true; return true;
} }
bool ObjDecoder::ParseMaterialFileDefinition(bool *error) { bool ObjDecoder::ParseMaterialFileDefinition(bool * /* error */) {
char c; char c;
parser::SkipWhitespace(buffer()); parser::SkipWhitespace(buffer());
if (!buffer()->Peek(&c)) { if (!buffer()->Peek(&c)) {

52
io/obj_decoder_test.cc Normal file
View File

@ -0,0 +1,52 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include <sstream>
#include "core/draco_test_base.h"
#include "core/draco_test_utils.h"
#include "io/obj_decoder.h"
namespace draco {
class ObjDecoderTest : public ::testing::Test {
protected:
template <class Geometry>
std::unique_ptr<Geometry> DecodeObj(const std::string &file_name) const {
const std::string path = GetTestFileFullPath(file_name);
ObjDecoder decoder;
std::unique_ptr<Geometry> geometry(new Geometry());
if (!decoder.DecodeFromFile(path, geometry.get()))
return nullptr;
return geometry;
}
void test_decoding(const std::string &file_name) {
const std::unique_ptr<Mesh> mesh(DecodeObj<Mesh>(file_name));
ASSERT_NE(mesh, nullptr) << "Failed to load test model " << file_name;
ASSERT_GT(mesh->num_faces(), 0);
const std::unique_ptr<PointCloud> pc(DecodeObj<PointCloud>(file_name));
ASSERT_NE(pc, nullptr) << "Failed to load test model " << file_name;
ASSERT_GT(pc->num_points(), 0u);
}
};
TEST_F(ObjDecoderTest, ExtraVertexOBJ) {
const std::string file_name = "extra_vertex.obj";
test_decoding(file_name);
}
} // namespace draco

View File

@ -17,6 +17,7 @@
#include <algorithm> #include <algorithm>
#include <cctype> #include <cctype>
#include <cmath> #include <cmath>
#include <iterator>
namespace draco { namespace draco {
namespace parser { namespace parser {

51
io/ply_decoder_test.cc Normal file
View File

@ -0,0 +1,51 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "io/ply_decoder.h"
#include "core/draco_test_base.h"
#include "core/draco_test_utils.h"
namespace draco {
class PlyDecoderTest : public ::testing::Test {
protected:
template <class Geometry>
std::unique_ptr<Geometry> DecodePly(const std::string &file_name) const {
const std::string path = GetTestFileFullPath(file_name);
PlyDecoder decoder;
std::unique_ptr<Geometry> geometry(new Geometry());
if (!decoder.DecodeFromFile(path, geometry.get()))
return nullptr;
return geometry;
}
void test_decoding_method(const std::string &file_name, int num_faces,
uint32_t num_points) {
const std::unique_ptr<Mesh> mesh(DecodePly<Mesh>(file_name));
ASSERT_NE(mesh, nullptr) << "Failed to load test model " << file_name;
ASSERT_EQ(mesh->num_faces(), num_faces);
const std::unique_ptr<PointCloud> pc(DecodePly<PointCloud>(file_name));
ASSERT_NE(pc, nullptr) << "Failed to load test model " << file_name;
ASSERT_EQ(pc->num_points(), num_points);
}
};
TEST_F(PlyDecoderTest, TestPlyDecoding) {
const std::string file_name = "test_pos_color.ply";
test_decoding_method(file_name, 224, 114);
}
} // namespace draco

View File

@ -15,6 +15,8 @@
#ifndef DRACO_IO_PLY_PROPERTY_READER_H_ #ifndef DRACO_IO_PLY_PROPERTY_READER_H_
#define DRACO_IO_PLY_PROPERTY_READER_H_ #define DRACO_IO_PLY_PROPERTY_READER_H_
#include <functional>
#include "io/ply_reader.h" #include "io/ply_reader.h"
namespace draco { namespace draco {

View File

@ -172,7 +172,7 @@ bool PlyReader::ParseProperty(DecoderBuffer *buffer) {
} }
bool PlyReader::ParsePropertiesData(DecoderBuffer *buffer) { bool PlyReader::ParsePropertiesData(DecoderBuffer *buffer) {
for (int i = 0; i < elements_.size(); ++i) { for (int i = 0; i < static_cast<int>(elements_.size()); ++i) {
if (format_ == kLittleEndian) { if (format_ == kLittleEndian) {
if (!ParseElementData(buffer, i)) { if (!ParseElementData(buffer, i)) {
return false; return false;

142
io/ply_reader_test.cc Normal file
View File

@ -0,0 +1,142 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "io/ply_reader.h"
#include <fstream>
#include "core/draco_test_base.h"
#include "core/draco_test_utils.h"
#include "io/ply_property_reader.h"
namespace draco {
class PlyReaderTest : public ::testing::Test {
protected:
std::vector<char> ReadPlyFile(const std::string &file_name) const {
const std::string path = GetTestFileFullPath(file_name);
std::ifstream file(path.c_str(), std::ios::binary);
if (!file)
return std::vector<char>();
auto is_size = file.tellg();
file.seekg(0, std::ios::end);
is_size = file.tellg() - is_size;
file.seekg(0, std::ios::beg);
std::vector<char> data(is_size);
file.read(&data[0], is_size);
return data;
}
};
TEST_F(PlyReaderTest, TestReader) {
const std::string file_name = "test_pos_color.ply";
const std::vector<char> data = ReadPlyFile(file_name);
DecoderBuffer buf;
buf.Init(data.data(), data.size());
PlyReader reader;
ASSERT_TRUE(reader.Read(&buf));
ASSERT_EQ(reader.num_elements(), 2);
ASSERT_EQ(reader.element(0).num_properties(), 7);
ASSERT_EQ(reader.element(1).num_properties(), 1);
ASSERT_TRUE(reader.element(1).property(0).is_list());
ASSERT_TRUE(reader.element(0).GetPropertyByName("red") != nullptr);
const PlyProperty *const prop = reader.element(0).GetPropertyByName("red");
PlyPropertyReader<uint8_t> reader_uint8(prop);
PlyPropertyReader<uint32_t> reader_uint32(prop);
PlyPropertyReader<float> reader_float(prop);
for (int i = 0; i < reader.element(0).num_entries(); ++i) {
ASSERT_EQ(reader_uint8.ReadValue(i), reader_uint32.ReadValue(i));
ASSERT_EQ(reader_uint8.ReadValue(i), reader_float.ReadValue(i));
}
}
TEST_F(PlyReaderTest, TestReaderAscii) {
const std::string file_name = "test_pos_color.ply";
const std::vector<char> data = ReadPlyFile(file_name);
DecoderBuffer buf;
buf.Init(data.data(), data.size());
PlyReader reader;
ASSERT_TRUE(reader.Read(&buf));
const std::string file_name_ascii = "test_pos_color_ascii.ply";
const std::vector<char> data_ascii = ReadPlyFile(file_name_ascii);
buf.Init(data_ascii.data(), data_ascii.size());
PlyReader reader_ascii;
ASSERT_TRUE(reader_ascii.Read(&buf));
ASSERT_EQ(reader.num_elements(), reader_ascii.num_elements());
ASSERT_EQ(reader.element(0).num_properties(),
reader_ascii.element(0).num_properties());
ASSERT_TRUE(reader.element(0).GetPropertyByName("x") != nullptr);
const PlyProperty *const prop = reader.element(0).GetPropertyByName("x");
const PlyProperty *const prop_ascii =
reader_ascii.element(0).GetPropertyByName("x");
PlyPropertyReader<float> reader_float(prop);
PlyPropertyReader<float> reader_float_ascii(prop_ascii);
for (int i = 0; i < reader.element(0).num_entries(); ++i) {
ASSERT_NEAR(reader_float.ReadValue(i), reader_float_ascii.ReadValue(i),
1e-4f);
}
}
TEST_F(PlyReaderTest, TestReaderExtraWhitespace) {
const std::string file_name = "test_extra_whitespace.ply";
const std::vector<char> data = ReadPlyFile(file_name);
DecoderBuffer buf;
buf.Init(data.data(), data.size());
PlyReader reader;
ASSERT_TRUE(reader.Read(&buf));
ASSERT_EQ(reader.num_elements(), 2);
ASSERT_EQ(reader.element(0).num_properties(), 7);
ASSERT_EQ(reader.element(1).num_properties(), 1);
ASSERT_TRUE(reader.element(1).property(0).is_list());
ASSERT_TRUE(reader.element(0).GetPropertyByName("red") != nullptr);
const PlyProperty *const prop = reader.element(0).GetPropertyByName("red");
PlyPropertyReader<uint8_t> reader_uint8(prop);
PlyPropertyReader<uint32_t> reader_uint32(prop);
PlyPropertyReader<float> reader_float(prop);
for (int i = 0; i < reader.element(0).num_entries(); ++i) {
ASSERT_EQ(reader_uint8.ReadValue(i), reader_uint32.ReadValue(i));
ASSERT_EQ(reader_uint8.ReadValue(i), reader_float.ReadValue(i));
}
}
TEST_F(PlyReaderTest, TestReaderMoreDataTypes) {
const std::string file_name = "test_more_datatypes.ply";
const std::vector<char> data = ReadPlyFile(file_name);
DecoderBuffer buf;
buf.Init(data.data(), data.size());
PlyReader reader;
ASSERT_TRUE(reader.Read(&buf));
ASSERT_EQ(reader.num_elements(), 2);
ASSERT_EQ(reader.element(0).num_properties(), 7);
ASSERT_EQ(reader.element(1).num_properties(), 1);
ASSERT_TRUE(reader.element(1).property(0).is_list());
ASSERT_TRUE(reader.element(0).GetPropertyByName("red") != nullptr);
const PlyProperty *const prop = reader.element(0).GetPropertyByName("red");
PlyPropertyReader<uint8_t> reader_uint8(prop);
PlyPropertyReader<uint32_t> reader_uint32(prop);
PlyPropertyReader<float> reader_float(prop);
for (int i = 0; i < reader.element(0).num_entries(); ++i) {
ASSERT_EQ(reader_uint8.ReadValue(i), reader_uint32.ReadValue(i));
ASSERT_EQ(reader_uint8.ReadValue(i), reader_float.ReadValue(i));
}
}
} // namespace draco

64
io/point_cloud_io_test.cc Normal file
View File

@ -0,0 +1,64 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "io/point_cloud_io.h"
#include <sstream>
#include "core/draco_test_base.h"
#include "core/draco_test_utils.h"
#include "io/obj_decoder.h"
namespace draco {
class IoPointCloudIoTest : public ::testing::Test {
protected:
std::unique_ptr<PointCloud> DecodeObj(const std::string &file_name) const {
const std::string path = GetTestFileFullPath(file_name);
std::unique_ptr<PointCloud> pc(new PointCloud());
ObjDecoder decoder;
if (!decoder.DecodeFromFile(path, pc.get()))
return nullptr;
return pc;
}
void test_compression_method(PointCloudEncodingMethod method,
const std::string &file_name) {
const std::unique_ptr<PointCloud> pc(DecodeObj(file_name));
ASSERT_NE(pc, nullptr) << "Failed to load test model " << file_name;
std::stringstream ss;
WritePointCloudIntoStream(pc.get(), ss, method);
ASSERT_TRUE(ss.good());
std::unique_ptr<PointCloud> decoded_pc;
ReadPointCloudFromStream(&decoded_pc, ss);
ASSERT_TRUE(ss.good());
}
};
TEST_F(IoPointCloudIoTest, EncodeWithBinary) {
test_compression_method(POINT_CLOUD_SEQUENTIAL_ENCODING, "test_nm.obj");
test_compression_method(POINT_CLOUD_SEQUENTIAL_ENCODING, "sphere.obj");
}
TEST_F(IoPointCloudIoTest, ObjFileInput) {
// Tests whether loading obj point clouds from files works as expected.
const std::unique_ptr<PointCloud> pc =
ReadPointCloudFromFile(GetTestFileFullPath("test_nm.obj"));
ASSERT_NE(pc, nullptr) << "Failed to load the obj point cloud.";
EXPECT_EQ(pc->num_points(), 97u) << "Obj point cloud not loaded properly.";
}
} // namespace draco

View File

@ -67,7 +67,7 @@ bool CornerTable::ComputeOppositeCorners(int *num_vertices) {
num_corners_on_vertices.reserve(num_corners()); num_corners_on_vertices.reserve(num_corners());
for (CornerIndex c(0); c < num_corners(); ++c) { for (CornerIndex c(0); c < num_corners(); ++c) {
const VertexIndex v1 = Vertex(c); const VertexIndex v1 = Vertex(c);
if (v1.value() >= num_corners_on_vertices.size()) if (v1.value() >= static_cast<int>(num_corners_on_vertices.size()))
num_corners_on_vertices.resize(v1.value() + 1, 0); num_corners_on_vertices.resize(v1.value() + 1, 0);
// For each corner there is always exactly one outgoing half-edge attached // For each corner there is always exactly one outgoing half-edge attached
// to its vertex. // to its vertex.
@ -93,7 +93,7 @@ bool CornerTable::ComputeOppositeCorners(int *num_vertices) {
// vertices. // vertices.
std::vector<int> vertex_offset(num_corners_on_vertices.size()); std::vector<int> vertex_offset(num_corners_on_vertices.size());
int offset = 0; int offset = 0;
for (int i = 0; i < num_corners_on_vertices.size(); ++i) { for (size_t i = 0; i < num_corners_on_vertices.size(); ++i) {
vertex_offset[i] = offset; vertex_offset[i] = offset;
offset += num_corners_on_vertices[i]; offset += num_corners_on_vertices[i];
} }
@ -120,7 +120,7 @@ bool CornerTable::ComputeOppositeCorners(int *num_vertices) {
// The maximum number of half-edges attached to the sink vertex. // The maximum number of half-edges attached to the sink vertex.
const int num_corners_on_vert = num_corners_on_vertices[sink_v.value()]; const int num_corners_on_vert = num_corners_on_vertices[sink_v.value()];
// Where to look for the first half-edge on the sink vertex. // Where to look for the first half-edge on the sink vertex.
int offset = vertex_offset[sink_v.value()]; offset = vertex_offset[sink_v.value()];
for (int i = 0; i < num_corners_on_vert; ++i, ++offset) { for (int i = 0; i < num_corners_on_vert; ++i, ++offset) {
const VertexIndex other_v = vertex_edges[offset].sink_vert; const VertexIndex other_v = vertex_edges[offset].sink_vert;
if (other_v < 0) if (other_v < 0)
@ -148,7 +148,7 @@ bool CornerTable::ComputeOppositeCorners(int *num_vertices) {
// No opposite corner found. Insert the new edge // No opposite corner found. Insert the new edge
const int num_corners_on_source_vert = const int num_corners_on_source_vert =
num_corners_on_vertices[source_v.value()]; num_corners_on_vertices[source_v.value()];
int offset = vertex_offset[source_v.value()]; offset = vertex_offset[source_v.value()];
for (int i = 0; i < num_corners_on_source_vert; ++i, ++offset) { for (int i = 0; i < num_corners_on_source_vert; ++i, ++offset) {
// Find the first unused half-edge slot on the source vertex. // Find the first unused half-edge slot on the source vertex.
if (vertex_edges[offset].sink_vert < 0) { if (vertex_edges[offset].sink_vert < 0) {

View File

@ -211,7 +211,7 @@ class CornerTable {
const FaceIndex face = Face(corner_id); const FaceIndex face = Face(corner_id);
faces_[face][LocalIndex(corner_id)] = vert_id; faces_[face][LocalIndex(corner_id)] = vert_id;
if (vert_id >= 0) { if (vert_id >= 0) {
if (vertex_corners_.size() <= vert_id.value()) if (vertex_corners_.size() <= static_cast<size_t>(vert_id.value()))
vertex_corners_.resize(vert_id.value() + 1); vertex_corners_.resize(vert_id.value() + 1);
vertex_corners_[vert_id] = corner_id; vertex_corners_[vert_id] = corner_id;
} }

View File

@ -61,13 +61,13 @@ class Mesh : public PointCloud {
FaceIndex::ValueType num_faces() const { return faces_.size(); } FaceIndex::ValueType num_faces() const { return faces_.size(); }
const Face &face(FaceIndex face_id) const { const Face &face(FaceIndex face_id) const {
DCHECK_LE(0, face_id.value()); DCHECK_LE(0, face_id.value());
DCHECK_LT(face_id.value(), faces_.size()); DCHECK_LT(face_id.value(), static_cast<int>(faces_.size()));
return faces_[face_id]; return faces_[face_id];
} }
void SetAttribute(int att_id, std::unique_ptr<PointAttribute> pa) override { void SetAttribute(int att_id, std::unique_ptr<PointAttribute> pa) override {
PointCloud::SetAttribute(att_id, std::move(pa)); PointCloud::SetAttribute(att_id, std::move(pa));
if (attribute_data_.size() <= att_id) { if (static_cast<int>(attribute_data_.size()) <= att_id) {
attribute_data_.resize(att_id + 1); attribute_data_.resize(att_id + 1);
} }
} }

View File

@ -67,7 +67,7 @@ bool MeshCleanup::operator()(Mesh *mesh, const MeshCleanupOptions &options) {
const PointIndex::ValueType num_original_points = mesh->num_points(); const PointIndex::ValueType num_original_points = mesh->num_points();
// Map from old points to the new ones. // Map from old points to the new ones.
IndexTypeVector<PointIndex, PointIndex> point_map(num_original_points); IndexTypeVector<PointIndex, PointIndex> point_map(num_original_points);
if (num_new_points < mesh->num_points()) { if (num_new_points < static_cast<int>(mesh->num_points())) {
// Some of the points were removed. We need to remap the old points to the // Some of the points were removed. We need to remap the old points to the
// new ones. // new ones.
num_new_points = 0; num_new_points = 0;
@ -118,7 +118,7 @@ bool MeshCleanup::operator()(Mesh *mesh, const MeshCleanupOptions &options) {
bool att_indices_changed = false; bool att_indices_changed = false;
// If there are some unused attribute entries, remap the attribute values // If there are some unused attribute entries, remap the attribute values
// in the attribute buffer. // in the attribute buffer.
if (num_used_entries < att->size()) { if (num_used_entries < static_cast<int>(att->size())) {
att_index_map.resize(att->size()); att_index_map.resize(att->size());
num_used_entries = 0; num_used_entries = 0;
for (AttributeValueIndex i(0); i < att->size(); ++i) { for (AttributeValueIndex i(0); i < att->size(); ++i) {
@ -143,7 +143,7 @@ bool MeshCleanup::operator()(Mesh *mesh, const MeshCleanupOptions &options) {
if (att->is_mapping_identity()) { if (att->is_mapping_identity()) {
// The mapping was identity. It'll remain identity only if the // The mapping was identity. It'll remain identity only if the
// number of point and attribute indices is still the same. // number of point and attribute indices is still the same.
if (num_used_entries != mesh->num_points()) { if (num_used_entries != static_cast<int>(mesh->num_points())) {
// We need to create an explicit mapping. // We need to create an explicit mapping.
// First we need to initialize the explicit map to the original // First we need to initialize the explicit map to the original
// number of points to recreate the original identity map. // number of points to recreate the original identity map.

131
mesh/mesh_cleanup_test.cc Normal file
View File

@ -0,0 +1,131 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "mesh/mesh_cleanup.h"
#include "core/draco_test_base.h"
#include "core/vector_d.h"
#include "mesh/triangle_soup_mesh_builder.h"
namespace draco {
class MeshCleanupTest : public ::testing::Test {};
TEST_F(MeshCleanupTest, TestDegneratedFaces) {
// This test verifies that the mesh cleanup tools removes degenerated faces.
TriangleSoupMeshBuilder mb;
mb.Start(2);
const int pos_att_id =
mb.AddAttribute(GeometryAttribute::POSITION, 3, DT_FLOAT32);
// clang-format off
mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(0),
Vector3f(0.f, 0.f, 0.f).data(),
Vector3f(1.f, 0.f, 0.f).data(),
Vector3f(0.f, 1.f, 0.f).data());
mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(1),
Vector3f(0.f, 1.f, 0.f).data(),
Vector3f(1.f, 0.f, 0.f).data(),
Vector3f(1.f, 0.f, 0.f).data());
// clang-format on
std::unique_ptr<Mesh> mesh = mb.Finalize();
ASSERT_NE(mesh, nullptr) << "Failed to build the test mesh.";
ASSERT_EQ(mesh->num_faces(), 2) << "Wrong number of faces in the input mesh.";
MeshCleanupOptions cleanup_options;
MeshCleanup cleanup;
ASSERT_TRUE(cleanup(mesh.get(), cleanup_options))
<< "Failed to cleanup the mesh.";
ASSERT_EQ(mesh->num_faces(), 1) << "Failed to remove degenerated faces.";
}
TEST_F(MeshCleanupTest, TestDegneratedFacesAndIsolatedVertices) {
// This test verifies that the mesh cleanup tools removes degenerated faces
// and isolated vertices.
TriangleSoupMeshBuilder mb;
mb.Start(2);
const int pos_att_id =
mb.AddAttribute(GeometryAttribute::POSITION, 3, DT_FLOAT32);
// clang-format off
mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(0),
Vector3f(0.f, 0.f, 0.f).data(),
Vector3f(1.f, 0.f, 0.f).data(),
Vector3f(0.f, 1.f, 0.f).data());
mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(1),
Vector3f(10.f, 1.f, 0.f).data(),
Vector3f(1.f, 0.f, 0.f).data(),
Vector3f(10.f, 1.f, 0.f).data());
// clang-format on
std::unique_ptr<Mesh> mesh = mb.Finalize();
ASSERT_NE(mesh, nullptr) << "Failed to build the test mesh.";
ASSERT_EQ(mesh->num_faces(), 2) << "Wrong number of faces in the input mesh.";
ASSERT_EQ(mesh->num_points(), 4u)
<< "Wrong number of point ids in the input mesh.";
const MeshCleanupOptions cleanup_options;
MeshCleanup cleanup;
ASSERT_TRUE(cleanup(mesh.get(), cleanup_options))
<< "Failed to cleanup the mesh.";
ASSERT_EQ(mesh->num_faces(), 1) << "Failed to remove degenerated faces.";
ASSERT_EQ(mesh->num_points(), 3u)
<< "Failed to remove isolated attribute indices.";
}
TEST_F(MeshCleanupTest, TestAttributes) {
TriangleSoupMeshBuilder mb;
mb.Start(2);
const int pos_att_id =
mb.AddAttribute(GeometryAttribute::POSITION, 3, DT_FLOAT32);
const int generic_att_id =
mb.AddAttribute(GeometryAttribute::GENERIC, 2, DT_FLOAT32);
// clang-format off
mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(0),
Vector3f(0.f, 0.f, 0.f).data(),
Vector3f(1.f, 0.f, 0.f).data(),
Vector3f(0.f, 1.f, 0.f).data());
mb.SetAttributeValuesForFace(generic_att_id, FaceIndex(0),
Vector2f(0.f, 0.f).data(),
Vector2f(0.f, 0.f).data(),
Vector2f(0.f, 0.f).data());
mb.SetAttributeValuesForFace(pos_att_id, FaceIndex(1),
Vector3f(10.f, 1.f, 0.f).data(),
Vector3f(1.f, 0.f, 0.f).data(),
Vector3f(10.f, 1.f, 0.f).data());
mb.SetAttributeValuesForFace(generic_att_id, FaceIndex(1),
Vector2f(1.f, 0.f).data(),
Vector2f(1.f, 0.f).data(),
Vector2f(1.f, 0.f).data());
// clang-format on
std::unique_ptr<Mesh> mesh = mb.Finalize();
ASSERT_NE(mesh, nullptr) << "Failed to build the test mesh.";
ASSERT_EQ(mesh->num_faces(), 2) << "Wrong number of faces in the input mesh.";
ASSERT_EQ(mesh->num_points(), 5u)
<< "Wrong number of point ids in the input mesh.";
ASSERT_EQ(mesh->attribute(1)->size(), 2u)
<< "Wrong number of generic attribute entries.";
const MeshCleanupOptions cleanup_options;
MeshCleanup cleanup;
ASSERT_TRUE(cleanup(mesh.get(), cleanup_options))
<< "Failed to cleanup the mesh.";
ASSERT_EQ(mesh->num_faces(), 1) << "Failed to remove degenerated faces.";
ASSERT_EQ(mesh->num_points(), 3u)
<< "Failed to remove isolated attribute indices.";
ASSERT_EQ(mesh->attribute(0)->size(), 3u)
<< "Wrong number of unique positions after cleanup.";
ASSERT_EQ(mesh->attribute(1)->size(), 1u)
<< "Wrong number of generic attribute entries after cleanup.";
}
} // namespace draco

47
mesh/mesh_test.cc Normal file
View File

@ -0,0 +1,47 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "mesh/mesh.h"
#include "core/draco_test_base.h"
namespace draco {
class MeshTest : public ::testing::Test {};
TEST_F(MeshTest, CtorTest) {
// This test verifies that Mesh Ctor does the job.
const Mesh mesh;
ASSERT_EQ(mesh.num_points(), 0u);
ASSERT_EQ(mesh.GetNamedAttributeId(GeometryAttribute::POSITION), -1);
ASSERT_EQ(mesh.GetNamedAttribute(GeometryAttribute::POSITION), nullptr);
ASSERT_EQ(mesh.num_attributes(), 0);
ASSERT_EQ(mesh.num_faces(), 0);
}
TEST_F(MeshTest, GenTinyMesh) {
// TODO(hemmer): create a simple mesh builder class to facilitate testing.
// This test checks properties of a tiny Mesh, i.e., initialized by hand.
// TODO(hemmer): interface makes it impossible to do further testing
// Builder functions are all protected, no access to the mesh from the
// outside.
// Mesh::Face f1 {{1,2,3}};
// Mesh::Face f2 {{3, 4, 5}};
// MeshBuilder builder;
// builder.Start(2);
// builder.SetNumVertices(6);
}
} // namespace draco

View File

@ -70,7 +70,7 @@ std::unique_ptr<Mesh> TriangleSoupMeshBuilder::Finalize() {
return nullptr; return nullptr;
// Also deduplicate vertex indices. // Also deduplicate vertex indices.
mesh_->DeduplicatePointIds(); mesh_->DeduplicatePointIds();
for (int i = 0; i < attribute_element_types_.size(); ++i) { for (size_t i = 0; i < attribute_element_types_.size(); ++i) {
if (attribute_element_types_[i] >= 0) { if (attribute_element_types_[i] >= 0) {
mesh_->SetAttributeElementType(i, static_cast<MeshAttributeElementType>( mesh_->SetAttributeElementType(i, static_cast<MeshAttributeElementType>(
attribute_element_types_[i])); attribute_element_types_[i]));

View File

@ -53,7 +53,8 @@ const PointAttribute *PointCloud::GetNamedAttribute(
const PointAttribute *PointCloud::GetNamedAttributeByCustomId( const PointAttribute *PointCloud::GetNamedAttributeByCustomId(
GeometryAttribute::Type type, uint16_t custom_id) const { GeometryAttribute::Type type, uint16_t custom_id) const {
for (int att_id = 0; att_id < named_attribute_index_[type].size(); ++att_id) { for (size_t att_id = 0; att_id < named_attribute_index_[type].size();
++att_id) {
if (attributes_[named_attribute_index_[type][att_id]]->custom_id() == if (attributes_[named_attribute_index_[type][att_id]]->custom_id() ==
custom_id) custom_id)
return attributes_[named_attribute_index_[type][att_id]].get(); return attributes_[named_attribute_index_[type][att_id]].get();
@ -90,7 +91,7 @@ int PointCloud::AddAttribute(
void PointCloud::SetAttribute(int att_id, std::unique_ptr<PointAttribute> pa) { void PointCloud::SetAttribute(int att_id, std::unique_ptr<PointAttribute> pa) {
DCHECK(att_id >= 0); DCHECK(att_id >= 0);
if (attributes_.size() <= att_id) { if (static_cast<int>(attributes_.size()) <= att_id) {
attributes_.resize(att_id + 1); attributes_.resize(att_id + 1);
} }
if (pa->attribute_type() < GeometryAttribute::NAMED_ATTRIBUTES_COUNT) { if (pa->attribute_type() < GeometryAttribute::NAMED_ATTRIBUTES_COUNT) {

View File

@ -52,7 +52,7 @@ class PointCloud {
int32_t num_attributes() const { return attributes_.size(); } int32_t num_attributes() const { return attributes_.size(); }
const PointAttribute *attribute(int32_t att_id) const { const PointAttribute *attribute(int32_t att_id) const {
DCHECK_LE(0, att_id); DCHECK_LE(0, att_id);
DCHECK_LT(att_id, attributes_.size()); DCHECK_LT(att_id, static_cast<int32_t>(attributes_.size()));
return attributes_[att_id].get(); return attributes_[att_id].get();
} }
@ -60,7 +60,7 @@ class PointCloud {
// maintain the attribute's consistency with draco::PointCloud. // maintain the attribute's consistency with draco::PointCloud.
PointAttribute *attribute(int32_t att_id) { PointAttribute *attribute(int32_t att_id) {
DCHECK_LE(0, att_id); DCHECK_LE(0, att_id);
DCHECK_LT(att_id, attributes_.size()); DCHECK_LT(att_id, static_cast<int32_t>(attributes_.size()));
return attributes_[att_id].get(); return attributes_[att_id].get();
} }
@ -116,12 +116,13 @@ struct PointCloudHasher {
hash = HashCombine(pc.attributes_.size(), hash); hash = HashCombine(pc.attributes_.size(), hash);
for (int i = 0; i < GeometryAttribute::NAMED_ATTRIBUTES_COUNT; ++i) { for (int i = 0; i < GeometryAttribute::NAMED_ATTRIBUTES_COUNT; ++i) {
hash = HashCombine(pc.named_attribute_index_[i].size(), hash); hash = HashCombine(pc.named_attribute_index_[i].size(), hash);
for (int j = 0; j < pc.named_attribute_index_[i].size(); ++j) { for (int j = 0; j < static_cast<int>(pc.named_attribute_index_[i].size());
++j) {
hash = HashCombine(pc.named_attribute_index_[i][j], hash); hash = HashCombine(pc.named_attribute_index_[i][j], hash);
} }
} }
// Hash attributes. // Hash attributes.
for (int i = 0; i < pc.attributes_.size(); ++i) { for (int i = 0; i < static_cast<int>(pc.attributes_.size()); ++i) {
PointAttributeHasher att_hasher; PointAttributeHasher att_hasher;
hash = HashCombine(att_hasher(*pc.attributes_[i]), hash); hash = HashCombine(att_hasher(*pc.attributes_[i]), hash);
} }

View File

@ -0,0 +1,71 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "point_cloud/point_cloud_builder.h"
namespace draco {
PointCloudBuilder::PointCloudBuilder() {}
void PointCloudBuilder::Start(PointIndex::ValueType num_points) {
point_cloud_ = std::unique_ptr<PointCloud>(new PointCloud());
point_cloud_->set_num_points(num_points);
}
int PointCloudBuilder::AddAttribute(GeometryAttribute::Type attribute_type,
int8_t num_components, DataType data_type) {
GeometryAttribute ga;
ga.Init(attribute_type, nullptr, num_components, data_type, false,
DataTypeLength(data_type) * num_components, 0);
return point_cloud_->AddAttribute(ga, true, point_cloud_->num_points());
}
void PointCloudBuilder::SetAttributeValueForPoint(int att_id,
PointIndex point_index,
const void *attribute_value) {
PointAttribute *const att = point_cloud_->attribute(att_id);
att->SetAttributeValue(att->mapped_index(point_index), attribute_value);
}
void PointCloudBuilder::SetAttributeValuesForAllPoints(
int att_id, const void *attribute_values, int stride) {
PointAttribute *const att = point_cloud_->attribute(att_id);
const int data_stride =
DataTypeLength(att->data_type()) * att->components_count();
if (stride == 0)
stride = data_stride;
if (stride == data_stride) {
// Fast copy path.
att->buffer()->Write(0, attribute_values,
point_cloud_->num_points() * data_stride);
} else {
// Copy attribute entries one by one.
for (PointIndex i(0); i < point_cloud_->num_points(); ++i) {
att->SetAttributeValue(
att->mapped_index(i),
static_cast<const uint8_t *>(attribute_values) + stride * i.value());
}
}
}
std::unique_ptr<PointCloud> PointCloudBuilder::Finalize(
bool deduplicate_points) {
if (deduplicate_points) {
point_cloud_->DeduplicateAttributeValues();
point_cloud_->DeduplicatePointIds();
}
return std::move(point_cloud_);
}
} // namespace draco

View File

@ -0,0 +1,80 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_POINT_CLOUD_POINT_CLOUD_BUILDER_H_
#define DRACO_POINT_CLOUD_POINT_CLOUD_BUILDER_H_
#include "point_cloud/point_cloud.h"
namespace draco {
// A helper class for constructing PointCloud instances from other data sources.
// Usage:
// PointCloudBuilder builder;
// // Initialize the builder for a given number of points (required).
// builder.Start(num_points);
// // Specify desired attributes.
// int pos_att_id =
// builder.AddAttribute(GeometryAttribute::POSITION, 3, DT_FLOAT32);
// // Add attribute values.
// for (PointIndex i(0); i < num_points; ++i) {
// builder.SetAttributeValueForPoint(pos_att_id, i, input_pos[i.value()]);
// }
// // Get the final PointCloud.
// constexpr bool deduplicate_points = false;
// std::unique_ptr<PointCloud> pc = builder.Finalize(deduplicate_points);
class PointCloudBuilder {
public:
PointCloudBuilder();
// Starts collecting point cloud data.
// The bahavior of other functions is undefined before this method is called.
void Start(PointIndex::ValueType num_points);
int AddAttribute(GeometryAttribute::Type attribute_type,
int8_t num_components, DataType data_type);
// Sets attribute value for a specific point.
// |attribute_value| must contain data in the format specified by the
// AddAttribute method.
void SetAttributeValueForPoint(int att_id, PointIndex point_index,
const void *attribute_value);
// Sets attribute values for all points. All the values must be stored in the
// input |attribute_values| buffer. |stride| can be used to define the byte
// offset between two consecutive attribute values. If |stride| is set to 0,
// the stride is automatically computed based on the format of the given
// attribute.
void SetAttributeValuesForAllPoints(int att_id, const void *attribute_values,
int stride);
// Finalizes the PointCloud or returns nullptr on error.
// If |deduplicate_points| is set to true, the following happens:
// 1. Attribute values with duplicate entries are deduplicated.
// 2. Point ids that are mapped to the same attribute values are
// deduplicated.
// Therefore, if |deduplicate_points| is true the final PointCloud can have
// a different number of point from the value specified in the Start method.
// Once this function is called, the builder becomes invalid and cannot be
// used until the method Start() is called again.
std::unique_ptr<PointCloud> Finalize(bool deduplicate_points);
private:
std::unique_ptr<PointCloud> point_cloud_;
};
} // namespace draco
#endif // DRACO_POINT_CLOUD_POINT_CLOUD_BUILDER_H_

Some files were not shown because too many files have changed in this diff Show More