From 73bb3c8530f2863db55b2c93c293e2f2f07a067e Mon Sep 17 00:00:00 2001
From: Ondrej Stava
Date: Wed, 12 Apr 2017 12:09:14 -0700
Subject: [PATCH] Version 0.10.0 snapshot
- Improved compression for triangular meshes (~10%)
- Added WebAssembly decoder
- Code cleanup + robustness fixes
---
CMakeLists.txt | 76 ++-
Makefile.emcc | 102 ++++-
README.md | 66 +++
.../attributes/mesh_traversal_sequencer.h | 2 +
...n_scheme_constrained_multi_parallelogram.h | 432 ++++++++++++++++++
...sh_prediction_scheme_multi_parallelogram.h | 44 +-
.../mesh_prediction_scheme_parallelogram.h | 49 +-
...h_prediction_scheme_parallelogram_shared.h | 30 ++
.../mesh_prediction_scheme_tex_coords.h | 28 +-
.../prediction_scheme_decoder_factory.h | 2 +-
.../prediction_scheme_encoder_factory.cc | 8 +-
.../prediction_scheme_encoder_factory.h | 2 +-
.../prediction_scheme_factory.h | 18 +-
...ormal_octahedron_canonicalized_transform.h | 262 +++++++++++
...octahedron_canonicalized_transform_test.cc | 184 ++++++++
...ction_scheme_normal_octahedron_transform.h | 3 -
.../sequential_integer_attribute_decoder.cc | 15 +-
.../sequential_normal_attribute_decoder.h | 10 +
.../sequential_normal_attribute_encoder.h | 5 +-
compression/config/compression_shared.h | 45 +-
compression/decode.cc | 57 +--
compression/encode.cc | 22 -
compression/mesh/mesh_edgebreaker_decoder.cc | 5 +
.../mesh/mesh_edgebreaker_decoder_impl.cc | 161 +++++--
.../mesh/mesh_edgebreaker_decoder_impl.h | 6 +
compression/mesh/mesh_edgebreaker_encoder.cc | 15 +-
.../mesh/mesh_edgebreaker_encoder_impl.cc | 121 +++--
.../mesh/mesh_edgebreaker_encoder_impl.h | 14 +
.../mesh_edgebreaker_encoder_impl_interface.h | 4 +
.../mesh/mesh_edgebreaker_encoding_test.cc | 1 +
compression/mesh/mesh_edgebreaker_shared.h | 27 ++
.../mesh/mesh_edgebreaker_traversal_decoder.h | 2 +-
.../mesh/mesh_edgebreaker_traversal_encoder.h | 8 +-
...edgebreaker_traversal_predictive_decoder.h | 2 +-
...edgebreaker_traversal_predictive_encoder.h | 6 +-
...sh_edgebreaker_traversal_valence_decoder.h | 163 +++++++
...sh_edgebreaker_traversal_valence_encoder.h | 240 ++++++++++
compression/mesh/mesh_encoder.h | 11 -
compression/mesh/mesh_encoder_test.cc | 12 +-
compression/mesh/mesh_sequential_decoder.cc | 2 +-
.../algorithms/float_points_tree_decoder.cc | 3 +-
.../algorithms/float_points_tree_decoder.h | 14 +-
.../algorithms/float_points_tree_encoder.h | 8 +-
.../integer_points_kd_tree_decoder.h | 8 +-
.../integer_points_kd_tree_encoder.h | 8 +-
.../point_cloud_compression_method.h | 5 +-
.../point_cloud/point_cloud_decoder.cc | 44 +-
compression/point_cloud/point_cloud_decoder.h | 13 +
.../point_cloud/point_cloud_encoder.cc | 21 +
compression/point_cloud/point_cloud_encoder.h | 3 +
core/adaptive_rans_bit_coding_shared.h | 43 ++
...coding.cc => adaptive_rans_bit_decoder.cc} | 58 +--
core/adaptive_rans_bit_decoder.h | 54 +++
core/adaptive_rans_bit_encoder.cc | 59 +++
...s_coding.h => adaptive_rans_bit_encoder.h} | 35 +-
core/ans.h | 23 +-
...ct_bit_coding.cc => direct_bit_decoder.cc} | 23 +-
core/direct_bit_decoder.h | 79 ++++
core/direct_bit_encoder.cc | 39 ++
...rect_bit_coding.h => direct_bit_encoder.h} | 59 +--
core/draco_version.h | 2 +-
core/folded_integer_bit_decoder.h | 76 +++
..._coding.h => folded_integer_bit_encoder.h} | 56 +--
core/macros.h | 6 +-
core/options.cc | 2 +-
core/rans_bit_decoder.cc | 66 +++
core/rans_bit_decoder.h | 54 +++
core/{rans_coding.cc => rans_bit_encoder.cc} | 48 +-
core/{rans_coding.h => rans_bit_encoder.h} | 34 +-
core/rans_coding_test.cc | 6 +-
core/rans_symbol_coding.h | 11 +
core/rans_symbol_decoder.h | 40 +-
core/rans_symbol_encoder.h | 40 +-
core/shannon_entropy.cc | 33 ++
core/shannon_entropy.h | 38 ++
core/symbol_bit_decoder.cc | 47 ++
core/symbol_bit_decoder.h | 36 ++
core/symbol_bit_encoder.cc | 29 ++
core/symbol_bit_encoder.h | 36 ++
core/symbol_coding_test.cc | 17 +
core/symbol_decoding.cc | 9 -
core/symbol_decoding.h | 15 +-
core/symbol_encoding.cc | 81 ++--
core/symbol_encoding.h | 18 +-
core/varint_decoding.h | 55 +++
core/varint_encoding.h | 52 +++
javascript/draco_decoder.js | 15 +-
javascript/draco_decoder.wasm | Bin 0 -> 363294 bytes
javascript/draco_mesh_decoder.js | 15 +-
javascript/draco_point_cloud_decoder.js | 14 +-
javascript/draco_wasm_wrapper.js | 85 ++++
javascript/emscripten/finalize.js | 22 +
javascript/emscripten/prepareCallbacks.js | 38 ++
javascript/example/DRACOLoader.js | 55 ++-
javascript/example/README | 6 +-
javascript/example/webgl_loader_draco.html | 132 ++++--
javascript/time_draco_decode.html | 119 ++++-
mesh/corner_table.cc | 5 +-
mesh/corner_table.h | 2 +-
mesh/edgebreaker_traverser.h | 1 +
mesh/mesh_test.cc | 47 --
mesh/prediction_degree_traverser.h | 237 ++++++++++
testdata/test_nm.obj.edgebreaker.0.10.0.drc | Bin 0 -> 2529 bytes
....out => test_nm.obj.edgebreaker.0.9.1.drc} | Bin
testdata/test_nm.obj.sequential.0.10.0.drc | Bin 0 -> 2877 bytes
testdata/test_nm.obj.sequential.0.9.1.drc | Bin 0 -> 2877 bytes
testdata/test_nm.obj.sequential.out | Bin 2546 -> 0 bytes
tools/draco_decoder.cc | 2 +-
108 files changed, 3710 insertions(+), 853 deletions(-)
create mode 100644 compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram.h
create mode 100644 compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform.h
create mode 100644 compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_test.cc
create mode 100644 compression/mesh/mesh_edgebreaker_traversal_valence_decoder.h
create mode 100644 compression/mesh/mesh_edgebreaker_traversal_valence_encoder.h
create mode 100644 core/adaptive_rans_bit_coding_shared.h
rename core/{adaptive_rans_coding.cc => adaptive_rans_bit_decoder.cc} (54%)
create mode 100644 core/adaptive_rans_bit_decoder.h
create mode 100644 core/adaptive_rans_bit_encoder.cc
rename core/{adaptive_rans_coding.h => adaptive_rans_bit_encoder.h} (64%)
rename core/{direct_bit_coding.cc => direct_bit_decoder.cc} (68%)
create mode 100644 core/direct_bit_decoder.h
create mode 100644 core/direct_bit_encoder.cc
rename core/{direct_bit_coding.h => direct_bit_encoder.h} (61%)
create mode 100644 core/folded_integer_bit_decoder.h
rename core/{folded_bit32_coding.h => folded_integer_bit_encoder.h} (63%)
create mode 100644 core/rans_bit_decoder.cc
create mode 100644 core/rans_bit_decoder.h
rename core/{rans_coding.cc => rans_bit_encoder.cc} (75%)
rename core/{rans_coding.h => rans_bit_encoder.h} (66%)
create mode 100644 core/shannon_entropy.cc
create mode 100644 core/shannon_entropy.h
create mode 100644 core/symbol_bit_decoder.cc
create mode 100644 core/symbol_bit_decoder.h
create mode 100644 core/symbol_bit_encoder.cc
create mode 100644 core/symbol_bit_encoder.h
create mode 100644 core/varint_decoding.h
create mode 100644 core/varint_encoding.h
create mode 100644 javascript/draco_decoder.wasm
create mode 100644 javascript/draco_wasm_wrapper.js
create mode 100644 javascript/emscripten/finalize.js
create mode 100644 javascript/emscripten/prepareCallbacks.js
delete mode 100644 mesh/mesh_test.cc
create mode 100644 mesh/prediction_degree_traverser.h
create mode 100644 testdata/test_nm.obj.edgebreaker.0.10.0.drc
rename testdata/{test_nm.obj.edgebreaker.out => test_nm.obj.edgebreaker.0.9.1.drc} (100%)
create mode 100644 testdata/test_nm.obj.sequential.0.10.0.drc
create mode 100644 testdata/test_nm.obj.sequential.0.9.1.drc
delete mode 100644 testdata/test_nm.obj.sequential.out
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 71c80a3..15656f2 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -13,23 +13,26 @@ option(ENABLE_POINT_CLOUD_COMPRESSION "" ON)
option(ENABLE_MESH_COMPRESSION "" ON)
option(ENABLE_STANDARD_EDGEBREAKER "" ON)
option(ENABLE_PREDICTIVE_EDGEBREAKER "" ON)
+option(ENABLE_EXTRA_SPEED "" OFF)
option(ENABLE_EXTRA_WARNINGS "" OFF)
option(ENABLE_TESTS "Enables tests." OFF)
option(ENABLE_WERROR "" OFF)
option(ENABLE_WEXTRA "" OFF)
option(IGNORE_EMPTY_BUILD_TYPE "" OFF)
+option(ENABLE_WASM "" OFF)
if (ENABLE_POINT_CLOUD_COMPRESSION)
add_cxx_preproc_definition("DRACO_POINT_CLOUD_COMPRESSION_SUPPORTED")
endif ()
if (ENABLE_MESH_COMPRESSION)
add_cxx_preproc_definition("DRACO_MESH_COMPRESSION_SUPPORTED")
-endif ()
-if (ENABLE_STANDARD_EDGEBREAKER)
- add_cxx_preproc_definition("DRACO_STANDARD_EDGEBREAKER_SUPPORTED")
-endif ()
-if (ENABLE_PREDICTIVE_EDGEBREAKER)
- add_cxx_preproc_definition("DRACO_PREDICTIVE_EDGEBREAKER_SUPPORTED")
+
+ if (ENABLE_STANDARD_EDGEBREAKER)
+ add_cxx_preproc_definition("DRACO_STANDARD_EDGEBREAKER_SUPPORTED")
+ endif ()
+ if (ENABLE_PREDICTIVE_EDGEBREAKER)
+ add_cxx_preproc_definition("DRACO_PREDICTIVE_EDGEBREAKER_SUPPORTED")
+ endif ()
endif ()
# Turn on more compiler warnings.
@@ -187,6 +190,7 @@ set(draco_compression_attributes_pred_schemes_sources
"${draco_root}/compression/attributes/prediction_schemes/prediction_scheme_interface.h"
"${draco_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme.h"
"${draco_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_data.h"
+ "${draco_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram.h"
"${draco_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram.h"
"${draco_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram.h"
"${draco_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h"
@@ -197,6 +201,7 @@ set(draco_compression_attributes_pred_schemes_sources
"${draco_root}/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.cc"
"${draco_root}/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h"
"${draco_root}/compression/attributes/prediction_schemes/prediction_scheme_factory.h"
+ "${draco_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform.h"
"${draco_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform.h"
"${draco_root}/compression/attributes/prediction_schemes/prediction_scheme_transform.h"
"${draco_root}/compression/attributes/prediction_schemes/prediction_scheme_wrap_transform.h")
@@ -227,6 +232,7 @@ set(draco_compression_mesh_decoder_sources
"${draco_root}/compression/mesh/mesh_edgebreaker_shared.h"
"${draco_root}/compression/mesh/mesh_edgebreaker_traversal_decoder.h"
"${draco_root}/compression/mesh/mesh_edgebreaker_traversal_predictive_decoder.h"
+ "${draco_root}/compression/mesh/mesh_edgebreaker_traversal_valence_decoder.h"
"${draco_root}/compression/mesh/mesh_sequential_decoder.cc"
"${draco_root}/compression/mesh/mesh_sequential_decoder.h")
@@ -239,6 +245,7 @@ set(draco_compression_mesh_encoder_sources
"${draco_root}/compression/mesh/mesh_edgebreaker_shared.h"
"${draco_root}/compression/mesh/mesh_edgebreaker_traversal_encoder.h"
"${draco_root}/compression/mesh/mesh_edgebreaker_traversal_predictive_encoder.h"
+ "${draco_root}/compression/mesh/mesh_edgebreaker_traversal_valence_encoder.h"
"${draco_root}/compression/mesh/mesh_encoder.cc"
"${draco_root}/compression/mesh/mesh_encoder.h"
"${draco_root}/compression/mesh/mesh_encoder_helpers.h"
@@ -262,8 +269,11 @@ set(draco_compression_point_cloud_encoder_sources
"${draco_root}/compression/point_cloud/point_cloud_sequential_encoder.h")
set(draco_core_sources
- "${draco_root}/core/adaptive_rans_coding.cc"
- "${draco_root}/core/adaptive_rans_coding.h"
+ "${draco_root}/core/adaptive_rans_bit_coding_shared.h"
+ "${draco_root}/core/adaptive_rans_bit_decoder.h"
+ "${draco_root}/core/adaptive_rans_bit_decoder.cc"
+ "${draco_root}/core/adaptive_rans_bit_encoder.h"
+ "${draco_root}/core/adaptive_rans_bit_encoder.cc"
"${draco_root}/core/ans.h"
"${draco_root}/core/bit_coder.cc"
"${draco_root}/core/bit_coder.h"
@@ -274,8 +284,10 @@ set(draco_core_sources
"${draco_root}/core/data_buffer.h"
"${draco_root}/core/decoder_buffer.cc"
"${draco_root}/core/decoder_buffer.h"
- "${draco_root}/core/direct_bit_coding.cc"
- "${draco_root}/core/direct_bit_coding.h"
+ "${draco_root}/core/direct_bit_decoder.h"
+ "${draco_root}/core/direct_bit_decoder.cc"
+ "${draco_root}/core/direct_bit_encoder.h"
+ "${draco_root}/core/direct_bit_encoder.cc"
"${draco_root}/core/divide.cc"
"${draco_root}/core/divide.h"
"${draco_root}/core/draco_index_type.h"
@@ -284,7 +296,8 @@ set(draco_core_sources
"${draco_root}/core/draco_types.h"
"${draco_root}/core/encoder_buffer.cc"
"${draco_root}/core/encoder_buffer.h"
- "${draco_root}/core/folded_bit32_coding.h"
+ "${draco_root}/core/folded_integer_bit_decoder.h"
+ "${draco_root}/core/folded_integer_bit_encoder.h"
"${draco_root}/core/hash_utils.cc"
"${draco_root}/core/hash_utils.h"
"${draco_root}/core/macros.h"
@@ -293,15 +306,25 @@ set(draco_core_sources
"${draco_root}/core/options.h"
"${draco_root}/core/quantization_utils.cc"
"${draco_root}/core/quantization_utils.h"
- "${draco_root}/core/rans_coding.cc"
- "${draco_root}/core/rans_coding.h"
+ "${draco_root}/core/rans_bit_decoder.h"
+ "${draco_root}/core/rans_bit_decoder.cc"
+ "${draco_root}/core/rans_bit_encoder.h"
+ "${draco_root}/core/rans_bit_encoder.cc"
"${draco_root}/core/rans_symbol_coding.h"
"${draco_root}/core/rans_symbol_decoder.h"
"${draco_root}/core/rans_symbol_encoder.h"
+ "${draco_root}/core/shannon_entropy.h"
+ "${draco_root}/core/shannon_entropy.cc"
+ "${draco_root}/core/symbol_bit_decoder.h"
+ "${draco_root}/core/symbol_bit_decoder.cc"
+ "${draco_root}/core/symbol_bit_encoder.h"
+ "${draco_root}/core/symbol_bit_encoder.cc"
"${draco_root}/core/symbol_decoding.cc"
"${draco_root}/core/symbol_decoding.h"
"${draco_root}/core/symbol_encoding.cc"
"${draco_root}/core/symbol_encoding.h"
+ "${draco_root}/core/varint_decoding.h"
+ "${draco_root}/core/varint_encoding.h"
"${draco_root}/core/vector_d.h")
set(draco_io_sources
@@ -384,6 +407,7 @@ set(draco_test_sources
"${draco_root}/core/draco_test_base.h"
"${draco_root}/core/draco_test_utils.cc"
"${draco_root}/core/draco_test_utils.h"
+ "${draco_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_test.cc"
"${draco_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_test.cc"
"${draco_root}/compression/attributes/sequential_integer_attribute_encoding_test.cc"
"${draco_root}/compression/mesh/mesh_encoder_test.cc"
@@ -402,7 +426,6 @@ set(draco_test_sources
"${draco_root}/io/point_cloud_io_test.cc"
"${draco_root}/mesh/mesh_are_equivalent_test.cc"
"${draco_root}/mesh/mesh_cleanup_test.cc"
- "${draco_root}/mesh/mesh_test.cc"
"${draco_root}/mesh/triangle_soup_mesh_builder_test.cc"
"${draco_root}/point_cloud/point_cloud_builder_test.cc")
@@ -420,11 +443,19 @@ if (EMSCRIPTEN)
add_compiler_flag_if_supported("-s ALLOW_MEMORY_GROWTH=1")
add_compiler_flag_if_supported("--memory-init-file 0")
add_compiler_flag_if_supported("-fno-omit-frame-pointer")
- add_compiler_flag_if_supported(-s MODULARIZE=1)
- add_compiler_flag_if_supported(-s EXPORT_NAME="'DracoModule'")
- add_compiler_flag_if_supported(--llvm-lto 1)
- add_compiler_flag_if_supported(-s NO_FILESYSTEM=1)
- add_compiler_flag_if_supported(-s ELIMINATE_DUPLICATE_FUNCTIONS=1)
+ add_compiler_flag_if_supported("-s MODULARIZE=1")
+ add_compiler_flag_if_supported("-s EXPORT_NAME=\"'DracoModule'\"")
+ if (ENABLE_EXTRA_SPEED)
+ add_compiler_flag_if_supported("--llvm-lto 1")
+ endif ()
+ add_compiler_flag_if_supported("-s NO_FILESYSTEM=1")
+ add_compiler_flag_if_supported("-s ELIMINATE_DUPLICATE_FUNCTIONS=1")
+ add_compiler_flag_if_supported("-s EXPORTED_RUNTIME_METHODS=[]")
+ add_compiler_flag_if_supported("-s PRECISE_F32=1")
+ if (ENABLE_WASM)
+ add_compiler_flag_if_supported("-s WASM=1")
+ add_compiler_flag_if_supported("-s BINARYEN_IMPRECISE=1")
+ endif ()
if (CMAKE_BUILD_TYPE STREQUAL "")
# Force -O3 when no build type is specified.
@@ -470,7 +501,12 @@ if (EMSCRIPTEN)
# Make $draco_js_sources source files depend on glue.cpp.
set_property(SOURCE ${draco_js_sources} APPEND PROPERTY OBJECT_DEPENDS
${draco_build_dir}/glue.cpp)
- em_link_post_js(draco_decoder "${draco_build_dir}/glue.js")
+ em_link_pre_js(draco_decoder
+ "${draco_root}/javascript/emscripten/prepareCallbacks.js"
+ "${draco_root}/javascript/emscripten/version.js")
+ em_link_post_js(draco_decoder
+ "${draco_build_dir}/glue.js"
+ "${draco_root}/javascript/emscripten/finalize.js")
else ()
# Standard Draco libs, encoder and decoder.
# Object collections that mirror the Draco directory structure.
diff --git a/Makefile.emcc b/Makefile.emcc
index 2113d58..f7a9e43 100644
--- a/Makefile.emcc
+++ b/Makefile.emcc
@@ -59,8 +59,15 @@ ALL_C_OPTS := -std=c++11
# Options for speed.
ALL_C_OPTS += -O3
-ALL_C_OPTS += --llvm-lto 1 -s NO_FILESYSTEM=1 -s ELIMINATE_DUPLICATE_FUNCTIONS=1
+ALL_C_OPTS += -s NO_FILESYSTEM=1 -s ELIMINATE_DUPLICATE_FUNCTIONS=1
ALL_C_OPTS += -s EXPORTED_RUNTIME_METHODS=[]
+ALL_C_OPTS += -s PRECISE_F32=1
+
+# Option to get about a 10% speed increase at the cost of about 10% in size.
+# Use "make target DRACO_PERFORMANCE_TYPE=extra_speed"
+ifeq ($(DRACO_PERFORMANCE_TYPE), extra_speed)
+ ALL_C_OPTS += --llvm-lto 1
+endif
# Options for debug
#ALL_C_OPTS += -g -s DEMANGLE_SUPPORT=1
@@ -79,6 +86,12 @@ ALL_C_OPTS += --memory-init-file 0
# Options to separate asm.js and mem file.
#ALL_C_OPTS += --separate-asm --memory-init-file 1
+# Options to output WebAssembly code.
+# Use "make target DRACO_BUILD_TYPE=wasm"
+ifeq ($(DRACO_BUILD_TYPE), wasm)
+ ALL_C_OPTS += -s WASM=1 -s BINARYEN_IMPRECISE=1
+endif
+
CFLAGS := $(ALL_C_OPTS)
CXXFLAGS := $(ALL_C_OPTS)
CFLAGS += -Wno-sign-compare -fno-omit-frame-pointer
@@ -100,20 +113,32 @@ ENCODER_BUFFER_OBJS := core/encoder_buffer.o
DECODER_BUFFER_A := libdecoder_buffer.a
DECODER_BUFFER_OBJS := core/bit_coder.o core/decoder_buffer.o
-RANS_CODING_A := librans_coding.a
-RANS_CODING_OBJS := core/divide.o core/rans_coding.o
+RANS_BIT_DECODER_A := librans_bit_decoder.a
+RANS_BIT_DECODER_OBJS := core/divide.o core/rans_bit_decoder.o
-ADAPTIVE_RANS_CODING_A := libadaptive_rans_coding.a
-ADAPTIVE_RANS_CODING_OBJS := core/adaptive_rans_coding.o
+RANS_BIT_ENCODER_A := librans_bit_encoder.a
+RANS_BIT_ENCODER_OBJS := core/divide.o core/rans_bit_encoder.o
+
+ADAPTIVE_RANS_BIT_DECODER_A := libadaptive_rans_bit_decoder.a
+ADAPTIVE_RANS_BIT_DECODER_OBJS := core/adaptive_rans_bit_decoder.o
+
+ADAPTIVE_RANS_BIT_ENCODER_A := libadaptive_rans_bit_encoder.a
+ADAPTIVE_RANS_BIT_ENCODER_OBJS := core/adaptive_rans_bit_encoder.o
CORNER_TABLE_A := libcorner_table.a
-CORNER_TABLE_OBJS := mesh/corner_table.o mesh/corner_table.o
+CORNER_TABLE_OBJS := mesh/corner_table.o
+
+SHANNON_ENTROPY_A := libshannon_entropy.a
+SHANNON_ENTROPY_OBJS := core/shannon_entropy.o
SYMBOL_CODING_A := libsymbol_coding.a
SYMBOL_CODING_OBJS := core/symbol_decoding.o core/symbol_encoding.o
-DIRECT_BIT_CODING_A := libdirect_bit_coding.a
-DIRECT_BIT_CODING_OBJS := core/direct_bit_coding.o
+DIRECT_BIT_DECODER_A := libdirect_bit_decoder.a
+DIRECT_BIT_DECODER_OBJS := core/direct_bit_decoder.o
+
+DIRECT_BIT_ENCODER_A := libdirect_bit_encoder.a
+DIRECT_BIT_ENCODER_OBJS := core/direct_bit_encoder.o
DRACO_TYPES_A := libdraco_types.a
DRACO_TYPES_OBJS := core/draco_types.o
@@ -256,9 +281,12 @@ INTEGER_POINTS_KD_TREE_ENCODER_OBJS := \
compression/point_cloud/algorithms/integer_points_kd_tree_encoder.o
CORNER_TABLE_OBJSA := $(addprefix $(OBJDIR)/,$(CORNER_TABLE_OBJS:.o=_a.o))
+SHANNON_ENTROPY_OBJSA := $(addprefix $(OBJDIR)/,$(SHANNON_ENTROPY_OBJS:.o=_a.o))
SYMBOL_CODING_OBJSA := $(addprefix $(OBJDIR)/,$(SYMBOL_CODING_OBJS:.o=_a.o))
-DIRECT_BIT_CODING_OBJSA := \
- $(addprefix $(OBJDIR)/,$(DIRECT_BIT_CODING_OBJS:.o=_a.o))
+DIRECT_BIT_DECODER_OBJSA := \
+ $(addprefix $(OBJDIR)/,$(DIRECT_BIT_DECODER_OBJS:.o=_a.o))
+DIRECT_BIT_ENCODER_OBJSA := \
+ $(addprefix $(OBJDIR)/,$(DIRECT_BIT_ENCODER_OBJS:.o=_a.o))
DECODER_BUFFER_OBJSA := $(addprefix $(OBJDIR)/,$(DECODER_BUFFER_OBJS:.o=_a.o))
DATA_BUFFER_OBJSA := $(addprefix $(OBJDIR)/,$(DATA_BUFFER_OBJS:.o=_a.o))
DRACO_TYPES_OBJSA := $(addprefix $(OBJDIR)/,$(DRACO_TYPES_OBJS:.o=_a.o))
@@ -339,9 +367,14 @@ QUANTIZATION_UTILS_OBJSA := \
CYCLE_TIMER_OBJSA := $(addprefix $(OBJDIR)/,$(CYCLE_TIMER_OBJS:.o=_a.o))
ENCODER_BUFFER_OBJSA := $(addprefix $(OBJDIR)/,$(ENCODER_BUFFER_OBJS:.o=_a.o))
-RANS_CODING_OBJSA := $(addprefix $(OBJDIR)/,$(RANS_CODING_OBJS:.o=_a.o))
-ADAPTIVE_RANS_CODING_OBJSA := \
- $(addprefix $(OBJDIR)/,$(ADAPTIVE_RANS_CODING_OBJS:.o=_a.o))
+RANS_BIT_DECODER_OBJSA := \
+ $(addprefix $(OBJDIR)/,$(RANS_BIT_DECODER_OBJS:.o=_a.o))
+RANS_BIT_ENCODER_OBJSA := \
+ $(addprefix $(OBJDIR)/,$(RANS_BIT_ENCODER_OBJS:.o=_a.o))
+ADAPTIVE_RANS_BIT_DECODER_OBJSA := \
+ $(addprefix $(OBJDIR)/,$(ADAPTIVE_RANS_BIT_DECODER_OBJS:.o=_a.o))
+ADAPTIVE_RANS_BIT_ENCODER_OBJSA := \
+ $(addprefix $(OBJDIR)/,$(ADAPTIVE_RANS_BIT_ENCODER_OBJS:.o=_a.o))
OBJ_DECODER_OBJSA := $(addprefix $(OBJDIR)/,$(OBJ_DECODER_OBJS:.o=_a.o))
MESH_IO_OBJSA := $(addprefix $(OBJDIR)/,$(MESH_IO_OBJS:.o=_a.o))
PLY_ENCODER_OBJSA := $(addprefix $(OBJDIR)/,$(PLY_ENCODER_OBJS:.o=_a.o))
@@ -358,12 +391,16 @@ INTEGER_POINTS_KD_TREE_ENCODER_OBJSA := \
# Core objs
DRACO_CORE_OBJSA := $(DRACO_TYPES_OBJSA)
-DRACO_CORE_OBJSA += $(DIRECT_BIT_CODING_OBJSA)
-DRACO_CORE_OBJSA += $(RANS_CODING_OBJSA)
-DRACO_CORE_OBJSA += $(ADAPTIVE_RANS_CODING_OBJSA)
+DRACO_CORE_OBJSA += $(DIRECT_BIT_DECODER_OBJSA)
+DRACO_CORE_OBJSA += $(DIRECT_BIT_ENCODER_OBJSA)
+DRACO_CORE_OBJSA += $(RANS_BIT_DECODER_OBJSA)
+DRACO_CORE_OBJSA += $(RANS_BIT_ENCODER_OBJSA)
+DRACO_CORE_OBJSA += $(ADAPTIVE_RANS_BIT_DECODER_OBJSA)
+DRACO_CORE_OBJSA += $(ADAPTIVE_RANS_BIT_ENCODER_OBJSA)
# Shared objs needed for both encoder and decoder
DRACO_SHARED_OBJSA := $(CORNER_TABLE_OBJSA) $(SYMBOL_CODING_OBJSA)
+DRACO_SHARED_OBJSA += $(SHANNON_ENTROPY_OBJSA)
DRACO_SHARED_OBJSA += $(DATA_BUFFER_OBJSA) $(DRACO_CORE_OBJSA)
DRACO_SHARED_OBJSA += $(GEOMETRY_ATTRIBUTE_OBJSA)
DRACO_SHARED_OBJSA += $(POINT_ATTRIBUTE_OBJSA)
@@ -371,7 +408,8 @@ DRACO_SHARED_OBJSA += $(POINT_CLOUD_OBJSA)
DRACO_SHARED_OBJSA += $(MESH_OBJSA)
DRACO_SHARED_OBJSA += $(MESH_MISC_OBJSA) $(MESH_ATTRIBUTE_CORNER_TABLE_OBJSA)
DRACO_SHARED_OBJSA += $(CYCLE_TIMER_OBJSA)
-DRACO_SHARED_OBJSA += $(RANS_CODING_OBJSA)
+DRACO_SHARED_OBJSA += $(RANS_BIT_DECODER_OBJSA)
+DRACO_SHARED_OBJSA += $(RANS_BIT_ENCODER_OBJSA)
DRACO_SHARED_OBJSA += $(QUANTIZATION_UTILS_OBJSA)
# Encoder specific objs
@@ -464,8 +502,10 @@ LIBS += $(LIBDIR)/libsequential_normal_attribute_encoder.a
LIBS += $(LIBDIR)/libcorner_table.a
LIBS += $(LIBDIR)/libmesh_attribute_corner_table.a
LIBS += $(LIBDIR)/libmesh_misc.a
+LIBS += $(LIBDIR)/libshannon_entropy.a
LIBS += $(LIBDIR)/libsymbol_coding.a
-LIBS += $(LIBDIR)/librans_coding.a
+LIBS += $(LIBDIR)/librans_bit_decoder.a
+LIBS += $(LIBDIR)/librans_bit_encoder.a
LIBS += $(LIBDIR)/libdata_buffer.a
LIBS += $(LIBDIR)/libdraco_types.a
LIBS += $(LIBDIR)/libdecoder_buffer.a
@@ -476,8 +516,10 @@ POINTS_LIBS := $(LIBDIR)/libfloat_points_tree_decoder.a
POINTS_LIBS += $(LIBDIR)/libfloat_points_tree_encoder.a
POINTS_LIBS += $(LIBDIR)/libinteger_points_kd_tree_decoder.a
POINTS_LIBS += $(LIBDIR)/libinteger_points_kd_tree_encoder.a
-POINTS_LIBS += $(LIBDIR)/libdirect_bit_coding.a
-POINTS_LIBS += $(LIBDIR)/libadaptive_rans_coding.a
+POINTS_LIBS += $(LIBDIR)/libdirect_bit_decoder.a
+POINTS_LIBS += $(LIBDIR)/libdirect_bit_encoder.a
+POINTS_LIBS += $(LIBDIR)/libadaptive_rans_bit_decoder.a
+POINTS_LIBS += $(LIBDIR)/libadaptive_rans_bit_encoder.a
DEPS := $(DRACO_OBJSA:_a.o=.d)
CLEAN := $(DEPS) $(OBJSA) $(LIBS) $(POINTS_LIBS)
@@ -493,15 +535,21 @@ build_glue:
python $(BINDER) $(IDL) glue
draco_decoder: $(OBJDIR)/javascript/emscripten/draco_glue_wrapper.o $(OBJDIR)/javascript/emscripten/webidl_wrapper.o $(DRACO_CORE_OBJSA) $(DRACO_SHARED_OBJSA) $(DRACO_DECODER_OBJSA)
- $(CXX) $(ALL_C_OPTS) $^ --post-js glue.js -o $@.js
+ $(CXX) $(ALL_C_OPTS) $^ --pre-js javascript/emscripten/prepareCallbacks.js --pre-js javascript/emscripten/version.js --post-js glue.js --post-js javascript/emscripten/finalize.js -o $@.js
$(LIBDIR)/libcorner_table.a: $(CORNER_TABLE_OBJSA)
$(AR) rcs $@ $^
+$(LIBDIR)/libshannon_entropy.a: $(SHANNON_ENTROPY_OBJSA)
+ $(AR) rcs $@ $^
+
$(LIBDIR)/libsymbol_coding.a: $(SYMBOL_CODING_OBJSA)
$(AR) rcs $@ $^
-$(LIBDIR)/libdirect_bit_coding.a: $(DIRECT_BIT_CODING_OBJSA)
+$(LIBDIR)/libdirect_bit_decoder.a: $(DIRECT_BIT_DECODER_OBJSA)
+ $(AR) rcs $@ $^
+
+$(LIBDIR)/libdirect_bit_encoder.a: $(DIRECT_BIT_ENCODER_OBJSA)
$(AR) rcs $@ $^
$(LIBDIR)/libdecoder_buffer.a: $(DECODER_BUFFER_OBJSA)
@@ -608,10 +656,16 @@ $(LIBDIR)/libquantization_utils.a: $(QUANTIZATION_UTILS_OBJSA)
$(LIBDIR)/libcycle_timer.a: $(CYCLE_TIMER_OBJSA)
$(AR) rcs $@ $^
-$(LIBDIR)/librans_coding.a: $(RANS_CODING_OBJSA)
+$(LIBDIR)/librans_bit_decoder.a: $(RANS_BIT_DECODER_OBJSA)
$(AR) rcs $@ $^
-$(LIBDIR)/libadaptive_rans_coding.a: $(ADAPTIVE_RANS_CODING_OBJSA)
+$(LIBDIR)/librans_bit_encoder.a: $(RANS_BIT_ENCODER_OBJSA)
+ $(AR) rcs $@ $^
+
+$(LIBDIR)/libadaptive_rans_bit_decoder.a: $(ADAPTIVE_RANS_BIT_DECODER_OBJSA)
+ $(AR) rcs $@ $^
+
+$(LIBDIR)/libadaptive_rans_bit_encoder.a: $(ADAPTIVE_RANS_BIT_ENCODER_OBJSA)
$(AR) rcs $@ $^
$(LIBDIR)/libobj_decoder.a: $(OBJ_DECODER_OBJSA)
diff --git a/README.md b/README.md
index 6d2d6a4..d08ea1a 100644
--- a/README.md
+++ b/README.md
@@ -3,6 +3,21 @@
+News
+=======
+### Version 0.10.0 released
+This release brings improved mesh compression and faster decoding in browser:
+* On average 10% better compression of triangular meshes (up to 20% for purely
+ spatial meshes without any extra attributes).
+* Up to 2X faster decoding in browsers with our newly provided WebAssembly
+ decoder.
+ * Supported in most modern browsers including Chrome, Firefox, and Edge.
+ * Decoder size is about 50% smaller compared to the javascript version.
+* New version is backward compatibile with 0.9.x encoders.
+ * Note that 0.10.0 is not forward compatibile. I.e., files encoded with 0.10.0
+ cannot be decoded with 0.9.x decoders.
+
+
Description
===========
@@ -174,6 +189,57 @@ $ export EMSCRIPTEN=/path/to/emscripten/tools/parent
# Emscripten.cmake can be found within your Emscripten installation directory,
# it should be the subdir: cmake/Modules/Platform/Emscripten.cmake
$ cmake path/to/draco -DCMAKE_TOOLCHAIN_FILE=/path/to/Emscripten.cmake
+
+# Build the Javascript decoder.
+$ make
+~~~~~
+
+WebAssembly Decoder
+-------------------
+
+The WebAssembly decoder can be built using the existing cmake build file by
+passing the path the Emscripten's cmake toolchain file at cmake generation time
+in the CMAKE_TOOLCHAIN_FILE variable and enabling the WASM build option.
+In addition, the EMSCRIPTEN environment variable must be set to the local path
+of the parent directory of the Emscripten tools directory.
+
+Make sure to have the correct version of Emscripten installed for WebAssembly
+builds. See https://developer.mozilla.org/en-US/docs/WebAssembly.
+
+~~~~~ bash
+# Make the path to emscripten available to cmake.
+$ export EMSCRIPTEN=/path/to/emscripten/tools/parent
+
+# Emscripten.cmake can be found within your Emscripten installation directory,
+# it should be the subdir: cmake/Modules/Platform/Emscripten.cmake
+$ cmake path/to/draco -DCMAKE_TOOLCHAIN_FILE=/path/to/Emscripten.cmake -DENABLE_WASM=ON
+
+# Build the WebAssembly decoder.
+$ make
+
+# Run the Javascript wrapper through Closure.
+$ java -jar closure.jar --compilation_level SIMPLE --js draco_decoder.js --js_output_file draco_wasm_wrapper.js
+
+~~~~~
+
+WebAssembly Mesh Only Decoder
+-----------------------------
+
+~~~~~ bash
+
+# cmake command line for mesh only WebAssembly decoder.
+$ cmake path/to/draco -DCMAKE_TOOLCHAIN_FILE=/path/to/Emscripten.cmake -DENABLE_WASM=ON -DENABLE_POINT_CLOUD_COMPRESSION=OFF
+
+~~~~~
+
+WebAssembly Point Cloud Only Decoder
+-----------------------------
+
+~~~~~ bash
+
+# cmake command line for point cloud only WebAssembly decoder.
+$ cmake path/to/draco -DCMAKE_TOOLCHAIN_FILE=/path/to/Emscripten.cmake -DENABLE_WASM=ON -DENABLE_MESH_COMPRESSION=OFF
+
~~~~~
diff --git a/compression/attributes/mesh_traversal_sequencer.h b/compression/attributes/mesh_traversal_sequencer.h
index 04adf56..3fb1c22 100644
--- a/compression/attributes/mesh_traversal_sequencer.h
+++ b/compression/attributes/mesh_traversal_sequencer.h
@@ -70,6 +70,7 @@ class MeshTraversalSequencer : public PointsSequencer {
protected:
bool GenerateSequenceInternal() override {
+ traverser_.OnTraversalStart();
if (corner_order_) {
for (uint32_t i = 0; i < corner_order_->size(); ++i) {
ProcessCorner(corner_order_->at(i));
@@ -80,6 +81,7 @@ class MeshTraversalSequencer : public PointsSequencer {
ProcessCorner(CornerIndex(3 * i));
}
}
+ traverser_.OnTraversalEnd();
return true;
}
diff --git a/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram.h b/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram.h
new file mode 100644
index 0000000..6acb3a2
--- /dev/null
+++ b/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram.h
@@ -0,0 +1,432 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_H_
+
+#include
+#include
+
+#include "compression/attributes/prediction_schemes/mesh_prediction_scheme.h"
+#include "compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h"
+#include "core/rans_bit_decoder.h"
+#include "core/rans_bit_encoder.h"
+#include "core/varint_decoding.h"
+#include "core/varint_encoding.h"
+
+namespace draco {
+
+// Compared to standard multi parallelogram, constrained multi parallelogram can
+// explicitly select which of the available parallelograms are going to be used
+// for the prediction by marking crease edges between two triangles. This
+// requires storing extra data, but it allows the predictor to avoid using
+// parallelograms that would lead to poor predictions. For improved efficiency,
+// our current implementation limits the maximum number of used parallelograms
+// to four, which covers >95% of the cases (on average, there are only two
+// parallelograms available for any given vertex).
+// TODO(ostava): Split this into two classes (encoder x decoder).
+template
+class MeshPredictionSchemeConstrainedMultiParallelogram
+ : public MeshPredictionScheme {
+ public:
+ using CorrType = typename PredictionScheme::CorrType;
+ using CornerTable = typename MeshDataT::CornerTable;
+
+ explicit MeshPredictionSchemeConstrainedMultiParallelogram(
+ const PointAttribute *attribute)
+ : MeshPredictionScheme(attribute),
+ selected_mode_(OPTIMAL_MULTI_PARALLELOGRAM) {}
+ MeshPredictionSchemeConstrainedMultiParallelogram(
+ const PointAttribute *attribute, const TransformT &transform,
+ const MeshDataT &mesh_data)
+ : MeshPredictionScheme(
+ attribute, transform, mesh_data),
+ selected_mode_(OPTIMAL_MULTI_PARALLELOGRAM) {}
+
+ bool Encode(const DataTypeT *in_data, CorrType *out_corr, int size,
+ int num_components,
+ const PointIndex *entry_to_point_id_map) override;
+ bool Decode(const CorrType *in_corr, DataTypeT *out_data, int size,
+ int num_components,
+ const PointIndex *entry_to_point_id_map) override;
+
+ bool EncodePredictionData(EncoderBuffer *buffer) override;
+ bool DecodePredictionData(DecoderBuffer *buffer) override;
+
+ PredictionSchemeMethod GetPredictionMethod() const override {
+ return MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM;
+ }
+
+ bool IsInitialized() const override {
+ return this->mesh_data().IsInitialized();
+ }
+
+ private:
+ enum Mode {
+ // Selects the optimal multi-parallelogram from up to 4 available
+ // parallelograms.
+ OPTIMAL_MULTI_PARALLELOGRAM = 0,
+ };
+
+ static constexpr int kMaxNumParallelograms = 4;
+ // Crease edges are used to store whether any given edge should be used for
+ // parallelogram prediction or not. New values are added in the order in which
+ // the edges are processed. For better compression, the flags are stored in
+ // in separate contexts based on the number of available parallelograms at a
+ // given vertex.
+ std::vector is_crease_edge_[kMaxNumParallelograms];
+ Mode selected_mode_;
+};
+
+template
+bool MeshPredictionSchemeConstrainedMultiParallelogram<
+ DataTypeT, TransformT,
+ MeshDataT>::Encode(const DataTypeT *in_data, CorrType *out_corr, int size,
+ int num_components,
+ const PointIndex * /* entry_to_point_id_map */) {
+ this->transform().InitializeEncoding(in_data, size, num_components);
+ const CornerTable *const table = this->mesh_data().corner_table();
+ const std::vector *const vertex_to_data_map =
+ this->mesh_data().vertex_to_data_map();
+
+ // Predicted values for all simple parallelograms encountered at any given
+ // vertex.
+ std::vector pred_vals[kMaxNumParallelograms];
+ for (int i = 0; i < kMaxNumParallelograms; ++i) {
+ pred_vals[i].resize(num_components);
+ }
+ // Used to store predicted value for various multi-parallelogram predictions
+ // (combinations of simple parallelogram predictions).
+ std::vector multi_pred_vals(num_components);
+
+ // Struct for holding data about prediction configuration for different sets
+ // of used parallelograms.
+ struct PredictionConfiguration {
+ PredictionConfiguration()
+ : error(std::numeric_limits::max()),
+ configuration(0),
+ num_used_parallelograms(0) {}
+ int error;
+ uint8_t configuration; // Bitfield, 1 use parallelogram, 0 don't use it.
+ int num_used_parallelograms;
+ std::vector predicted_value;
+ };
+
+ // Bit-field used for computing permutations of exlcluded edges
+ // (parallelograms).
+ bool exluded_parallelograms[kMaxNumParallelograms];
+
+ // We start processing the vertices from the end because this prediction uses
+ // data from previous entries that could be overwritten when an entry is
+ // processed.
+ for (int p = this->mesh_data().data_to_corner_map()->size() - 1; p > 0; --p) {
+ const CornerIndex start_corner_id =
+ this->mesh_data().data_to_corner_map()->at(p);
+
+ // Go over all corners attached to the vertex and compute the predicted
+ // value from the parallelograms defined by their opposite faces.
+ CornerIndex corner_id(start_corner_id);
+ int num_parallelograms = 0;
+ bool first_pass = true;
+ while (corner_id >= 0) {
+ if (ComputeParallelogramPrediction(
+ p, corner_id, table, *vertex_to_data_map, in_data, num_components,
+ &(pred_vals[num_parallelograms][0]))) {
+ // Parallelogram prediction applied and stored in
+ // |pred_vals[num_parallelograms]|
+ ++num_parallelograms;
+ // Stop processing when we reach the maximum number of allowed
+ // parallelograms.
+ if (num_parallelograms == kMaxNumParallelograms)
+ break;
+ }
+
+ // Proceed to the next corner attached to the vertex. First swing left
+ // and if we reach a boundary, swing right from the start corner.
+ if (first_pass) {
+ corner_id = table->SwingLeft(corner_id);
+ } else {
+ corner_id = table->SwingRight(corner_id);
+ }
+ if (corner_id == start_corner_id) {
+ break;
+ }
+ if (corner_id < 0 && first_pass) {
+ first_pass = false;
+ corner_id = table->SwingRight(start_corner_id);
+ }
+ }
+
+ // Offset to the target (destination) vertex.
+ const int dst_offset = p * num_components;
+ int error = 0;
+
+ // Compute all prediction errors for all possible configurations of
+ // available parallelograms.
+
+ // Variable for holding the best configuration that has been found so far.
+ PredictionConfiguration best_prediction;
+
+ // Compute delta coding error (configuration when no parallelogram is
+ // selected).
+ const int src_offset = (p - 1) * num_components;
+ for (int i = 0; i < num_components; ++i) {
+ error += (std::abs(in_data[dst_offset + i] - in_data[src_offset + i]));
+ }
+
+ best_prediction.error = error;
+ best_prediction.configuration = 0;
+ best_prediction.num_used_parallelograms = 0;
+ best_prediction.predicted_value.assign(
+ in_data + src_offset, in_data + src_offset + num_components);
+
+ // Compute prediction error for different cases of used parallelograms.
+ for (int num_used_parallelograms = 1;
+ num_used_parallelograms <= num_parallelograms;
+ ++num_used_parallelograms) {
+ // Mark all parallelograms as excluded.
+ std::fill(exluded_parallelograms,
+ exluded_parallelograms + num_parallelograms, true);
+ // Mark the first |num_used_parallelograms| as not excluded.
+ for (int j = 0; j < num_used_parallelograms; ++j) {
+ exluded_parallelograms[j] = false;
+ }
+ // Permute over the excluded edges and compute error for each
+ // configuration (permutation of excluded parallelograms).
+ do {
+ // Reset the multi-parallelogram predicted values.
+ for (int j = 0; j < num_components; ++j) {
+ multi_pred_vals[j] = 0;
+ }
+ uint8_t configuration = 0;
+ for (int j = 0; j < num_parallelograms; ++j) {
+ if (exluded_parallelograms[j])
+ continue;
+ for (int c = 0; c < num_components; ++c) {
+ multi_pred_vals[c] += pred_vals[j][c];
+ }
+ // Set j'th bit of the configuration.
+ configuration |= (1 << j);
+ }
+ error = 0;
+ for (int j = 0; j < num_components; ++j) {
+ multi_pred_vals[j] /= num_used_parallelograms;
+ error += std::abs(multi_pred_vals[j] - in_data[dst_offset + j]);
+ }
+ if (error < best_prediction.error) {
+ best_prediction.error = error;
+ best_prediction.configuration = configuration;
+ best_prediction.num_used_parallelograms = num_used_parallelograms;
+ best_prediction.predicted_value.assign(multi_pred_vals.begin(),
+ multi_pred_vals.end());
+ }
+ } while (std::next_permutation(
+ exluded_parallelograms, exluded_parallelograms + num_parallelograms));
+ }
+
+ for (int i = 0; i < num_parallelograms; ++i) {
+ if ((best_prediction.configuration & (1 << i)) == 0) {
+ // Parallelogram not used, mark the edge as crease.
+ is_crease_edge_[num_parallelograms - 1].push_back(true);
+ } else {
+ // Parallelogram used. Add it to the predicted value and mark the
+ // edge as not a crease.
+ is_crease_edge_[num_parallelograms - 1].push_back(false);
+ }
+ }
+ this->transform().ComputeCorrection(in_data + dst_offset,
+ best_prediction.predicted_value.data(),
+ out_corr, dst_offset);
+ }
+ // First element is always fixed because it cannot be predicted.
+ for (int i = 0; i < num_components; ++i) {
+ pred_vals[0][i] = static_cast(0);
+ }
+ this->transform().ComputeCorrection(in_data, pred_vals[0].data(), out_corr,
+ 0);
+ return true;
+}
+
+template
+bool MeshPredictionSchemeConstrainedMultiParallelogram<
+ DataTypeT, TransformT,
+ MeshDataT>::Decode(const CorrType *in_corr, DataTypeT *out_data,
+ int /* size */, int num_components,
+ const PointIndex * /* entry_to_point_id_map */) {
+ this->transform().InitializeDecoding(num_components);
+
+ // Predicted values for all simple parallelograms encountered at any given
+ // vertex.
+ std::vector pred_vals[kMaxNumParallelograms];
+ for (int i = 0; i < kMaxNumParallelograms; ++i) {
+ pred_vals[i].resize(num_components, 0);
+ }
+ this->transform().ComputeOriginalValue(pred_vals[0].data(), in_corr, out_data,
+ 0);
+
+ const CornerTable *const table = this->mesh_data().corner_table();
+ const std::vector *const vertex_to_data_map =
+ this->mesh_data().vertex_to_data_map();
+
+ // Current position in the |is_crease_edge_| array for each context.
+ std::vector is_crease_edge_pos(kMaxNumParallelograms, 0);
+
+ // Used to store predicted value for multi-parallelogram prediction.
+ std::vector multi_pred_vals(num_components);
+
+ const int corner_map_size = this->mesh_data().data_to_corner_map()->size();
+ for (int p = 1; p < corner_map_size; ++p) {
+ const CornerIndex start_corner_id =
+ this->mesh_data().data_to_corner_map()->at(p);
+
+ CornerIndex corner_id(start_corner_id);
+ int num_parallelograms = 0;
+ bool first_pass = true;
+ while (corner_id >= 0) {
+ if (ComputeParallelogramPrediction(
+ p, corner_id, table, *vertex_to_data_map, out_data,
+ num_components, &(pred_vals[num_parallelograms][0]))) {
+ // Parallelogram prediction applied and stored in
+ // |pred_vals[num_parallelograms]|
+ ++num_parallelograms;
+ // Stop processing when we reach the maximum number of allowed
+ // parallelograms.
+ if (num_parallelograms == kMaxNumParallelograms)
+ break;
+ }
+
+ // Proceed to the next corner attached to the vertex. First swing left
+ // and if we reach a boundary, swing right from the start corner.
+ if (first_pass) {
+ corner_id = table->SwingLeft(corner_id);
+ } else {
+ corner_id = table->SwingRight(corner_id);
+ }
+ if (corner_id == start_corner_id) {
+ break;
+ }
+ if (corner_id < 0 && first_pass) {
+ first_pass = false;
+ corner_id = table->SwingRight(start_corner_id);
+ }
+ }
+
+ // Check which of the available parallelograms are actually used and compute
+ // the final predicted value.
+ int num_used_parallelograms = 0;
+ if (num_parallelograms > 0) {
+ for (int i = 0; i < num_components; ++i) {
+ multi_pred_vals[i] = 0;
+ }
+ // Check which parallelograms are actually used.
+ for (int i = 0; i < num_parallelograms; ++i) {
+ const int context = num_parallelograms - 1;
+ const bool is_crease =
+ is_crease_edge_[context][is_crease_edge_pos[context]++];
+ if (!is_crease) {
+ ++num_used_parallelograms;
+ for (int j = 0; j < num_components; ++j) {
+ multi_pred_vals[j] += pred_vals[i][j];
+ }
+ }
+ }
+ }
+ const int dst_offset = p * num_components;
+ if (num_used_parallelograms == 0) {
+ // No parallelogram was valid.
+ // We use the last decoded point as a reference.
+ const int src_offset = (p - 1) * num_components;
+ this->transform().ComputeOriginalValue(out_data + src_offset, in_corr,
+ out_data + dst_offset, dst_offset);
+ } else {
+ // Compute the correction from the predicted value.
+ for (int c = 0; c < num_components; ++c) {
+ multi_pred_vals[c] /= num_used_parallelograms;
+ }
+ this->transform().ComputeOriginalValue(multi_pred_vals.data(), in_corr,
+ out_data + dst_offset, dst_offset);
+ }
+ }
+ return true;
+}
+
+template
+bool MeshPredictionSchemeConstrainedMultiParallelogram<
+ DataTypeT, TransformT, MeshDataT>::EncodePredictionData(EncoderBuffer
+ *buffer) {
+ // Encode prediction mode.
+ buffer->Encode(static_cast(selected_mode_));
+
+ // Encode selected edges using separate rans bit coder for each context.
+ for (int i = 0; i < kMaxNumParallelograms; ++i) {
+ // |i| is the context based on the number of available parallelograms, which
+ // is always equal to |i + 1|.
+ const int num_used_parallelograms = i + 1;
+ EncodeVarint(is_crease_edge_[i].size(), buffer);
+ if (is_crease_edge_[i].size()) {
+ RAnsBitEncoder encoder;
+ encoder.StartEncoding();
+ // Encode the crease edge flags in the reverse vertex order that is needed
+ // be the decoder. Note that for the currently supported mode, each vertex
+ // has exactly |num_used_parallelograms| edges that need to be encoded.
+ for (int j = is_crease_edge_[i].size() - num_used_parallelograms; j >= 0;
+ j -= num_used_parallelograms) {
+ // Go over all edges of the current vertex.
+ for (int k = 0; k < num_used_parallelograms; ++k) {
+ encoder.EncodeBit(is_crease_edge_[i][j + k]);
+ }
+ }
+ encoder.EndEncoding(buffer);
+ }
+ }
+ return MeshPredictionScheme::EncodePredictionData(buffer);
+}
+
+template
+bool MeshPredictionSchemeConstrainedMultiParallelogram<
+ DataTypeT, TransformT, MeshDataT>::DecodePredictionData(DecoderBuffer
+ *buffer) {
+ // Decode prediction mode.
+ uint8_t mode;
+ if (!buffer->Decode(&mode)) {
+ return false;
+ }
+
+ if (mode != OPTIMAL_MULTI_PARALLELOGRAM) {
+ // Unsupported mode.
+ return false;
+ }
+
+ // Encode selected edges using separate rans bit coder for each context.
+ for (int i = 0; i < kMaxNumParallelograms; ++i) {
+ uint32_t num_flags;
+ DecodeVarint(&num_flags, buffer);
+ if (num_flags > 0) {
+ is_crease_edge_[i].resize(num_flags);
+ RAnsBitDecoder decoder;
+ decoder.StartDecoding(buffer);
+ for (int j = 0; j < num_flags; ++j) {
+ is_crease_edge_[i][j] = decoder.DecodeNextBit();
+ }
+ decoder.EndDecoding();
+ }
+ }
+ return MeshPredictionScheme::DecodePredictionData(buffer);
+}
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_H_
diff --git a/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram.h b/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram.h
index f25de07..34ce431 100644
--- a/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram.h
+++ b/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram.h
@@ -70,6 +70,8 @@ bool MeshPredictionSchemeMultiParallelogram::
this->mesh_data().vertex_to_data_map();
std::unique_ptr pred_vals(new DataTypeT[num_components]());
+ std::unique_ptr parallelogram_pred_vals(
+ new DataTypeT[num_components]());
// We start processing from the end because this prediction uses data from
// previous entries that could be overwritten when an entry is processed.
@@ -85,30 +87,16 @@ bool MeshPredictionSchemeMultiParallelogram::
pred_vals[i] = static_cast(0);
}
while (corner_id >= 0) {
- // TODO(ostava): Move code shared between multiple predictors into a new
- // file.
- int vert_opp = p, vert_next = p, vert_prev = p;
- const CornerIndex opp_corner = table->Opposite(corner_id);
- if (opp_corner >= 0) {
- GetParallelogramEntries(opp_corner, table, *vertex_to_data_map,
- &vert_opp, &vert_next, &vert_prev);
- }
- if (vert_opp < p && vert_next < p && vert_prev < p) {
- // Apply the parallelogram prediction.
- const int v_opp_off = vert_opp * num_components;
- const int v_next_off = vert_next * num_components;
- const int v_prev_off = vert_prev * num_components;
+ if (ComputeParallelogramPrediction(
+ p, corner_id, table, *vertex_to_data_map, in_data, num_components,
+ parallelogram_pred_vals.get())) {
for (int c = 0; c < num_components; ++c) {
- pred_vals[c] += (in_data[v_next_off + c] + in_data[v_prev_off + c]) -
- in_data[v_opp_off + c];
+ pred_vals[c] += parallelogram_pred_vals[c];
}
++num_parallelograms;
}
// Proceed to the next corner attached to the vertex.
- // TODO(ostava): This will not go around the whole neighborhood on
- // vertices on a mesh boundary. We need to SwingLeft from the start vertex
- // again to get the full coverage.
corner_id = table->SwingRight(corner_id);
if (corner_id == start_corner_id) {
corner_id = kInvalidCornerIndex;
@@ -145,6 +133,8 @@ bool MeshPredictionSchemeMultiParallelogram::
this->transform().InitializeDecoding(num_components);
std::unique_ptr pred_vals(new DataTypeT[num_components]());
+ std::unique_ptr parallelogram_pred_vals(
+ new DataTypeT[num_components]());
this->transform().ComputeOriginalValue(pred_vals.get(), in_corr, out_data, 0);
@@ -163,21 +153,11 @@ bool MeshPredictionSchemeMultiParallelogram::
pred_vals[i] = static_cast(0);
}
while (corner_id >= 0) {
- int vert_opp = p, vert_next = p, vert_prev = p;
- const CornerIndex opp_corner = table->Opposite(corner_id);
- if (opp_corner >= 0) {
- GetParallelogramEntries(opp_corner, table, *vertex_to_data_map,
- &vert_opp, &vert_next, &vert_prev);
- }
- if (vert_opp < p && vert_next < p && vert_prev < p) {
- // Apply the parallelogram prediction.
- const int v_opp_off = vert_opp * num_components;
- const int v_next_off = vert_next * num_components;
- const int v_prev_off = vert_prev * num_components;
+ if (ComputeParallelogramPrediction(
+ p, corner_id, table, *vertex_to_data_map, out_data,
+ num_components, parallelogram_pred_vals.get())) {
for (int c = 0; c < num_components; ++c) {
- pred_vals[c] +=
- (out_data[v_next_off + c] + out_data[v_prev_off + c]) -
- out_data[v_opp_off + c];
+ pred_vals[c] += parallelogram_pred_vals[c];
}
++num_parallelograms;
}
diff --git a/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram.h b/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram.h
index 4e5de17..b3a91e9 100644
--- a/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram.h
+++ b/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram.h
@@ -77,32 +77,18 @@ bool MeshPredictionSchemeParallelogram::
this->mesh_data().vertex_to_data_map();
for (int p = this->mesh_data().data_to_corner_map()->size() - 1; p > 0; --p) {
const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p);
- // Initialize the vertex ids to "p" which ensures that if the opposite
- // corner does not exist we will not use the vertices to predict the
- // encoded value.
- int vert_opp = p, vert_next = p, vert_prev = p;
- const CornerIndex opp_corner = table->Opposite(corner_id);
- if (opp_corner >= 0) {
- // Get vertices on the opposite face.
- GetParallelogramEntries(opp_corner, table, *vertex_to_data_map, &vert_opp,
- &vert_next, &vert_prev);
- }
const int dst_offset = p * num_components;
- if (vert_opp >= p || vert_next >= p || vert_prev >= p) {
- // Some of the vertices are not valid (not encoded yet).
- // We use the last encoded point as a reference.
+ if (!ComputeParallelogramPrediction(p, corner_id, table,
+ *vertex_to_data_map, in_data,
+ num_components, pred_vals.get())) {
+ // Parallelogram could not be computed, Possible because some of the
+ // vertices are not valid (not encoded yet).
+ // We use the last encoded point as a reference (delta coding).
const int src_offset = (p - 1) * num_components;
this->transform().ComputeCorrection(
in_data + dst_offset, in_data + src_offset, out_corr, dst_offset);
} else {
// Apply the parallelogram prediction.
- const int v_opp_off = vert_opp * num_components;
- const int v_next_off = vert_next * num_components;
- const int v_prev_off = vert_prev * num_components;
- for (int c = 0; c < num_components; ++c) {
- pred_vals[c] = (in_data[v_next_off + c] + in_data[v_prev_off + c]) -
- in_data[v_opp_off + c];
- }
this->transform().ComputeCorrection(in_data + dst_offset, pred_vals.get(),
out_corr, dst_offset);
}
@@ -133,29 +119,18 @@ bool MeshPredictionSchemeParallelogram::
const int corner_map_size = this->mesh_data().data_to_corner_map()->size();
for (int p = 1; p < corner_map_size; ++p) {
const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p);
- int vert_opp = p, vert_next = p, vert_prev = p;
- const CornerIndex opp_corner = table->Opposite(corner_id);
- if (opp_corner >= 0) {
- // Get vertices on the opposite face.
- GetParallelogramEntries(opp_corner, table, *vertex_to_data_map, &vert_opp,
- &vert_next, &vert_prev);
- }
const int dst_offset = p * num_components;
- if (vert_opp >= p || vert_next >= p || vert_prev >= p) {
- // Some of the vertices are not valid (not decoded yet).
- // We use the last decoded point as a reference.
+ if (!ComputeParallelogramPrediction(p, corner_id, table,
+ *vertex_to_data_map, out_data,
+ num_components, pred_vals.get())) {
+ // Parallelogram could not be computed, Possible because some of the
+ // vertices are not valid (not encoded yet).
+ // We use the last encoded point as a reference (delta coding).
const int src_offset = (p - 1) * num_components;
this->transform().ComputeOriginalValue(out_data + src_offset, in_corr,
out_data + dst_offset, dst_offset);
} else {
// Apply the parallelogram prediction.
- const int v_opp_off = vert_opp * num_components;
- const int v_next_off = vert_next * num_components;
- const int v_prev_off = vert_prev * num_components;
- for (int c = 0; c < num_components; ++c) {
- pred_vals[c] = (out_data[v_next_off + c] + out_data[v_prev_off + c]) -
- out_data[v_opp_off + c];
- }
this->transform().ComputeOriginalValue(pred_vals.get(), in_corr,
out_data + dst_offset, dst_offset);
}
diff --git a/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h b/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h
index b392dc2..e8610a9 100644
--- a/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h
+++ b/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h
@@ -35,6 +35,36 @@ inline void GetParallelogramEntries(
*prev_entry = vertex_to_data_map[table->Vertex(table->Previous(ci)).value()];
}
+// Computes parallelogram prediction for a given corner and data entry id.
+// The prediction is stored in |out_prediction|.
+// Function returns false when the prediction couldn't be computed, e.g. because
+// not all entry points were available.
+template
+inline bool ComputeParallelogramPrediction(
+ int data_entry_id, const CornerIndex ci, const CornerTableT *table,
+ const std::vector &vertex_to_data_map, const DataTypeT *in_data,
+ int num_components, DataTypeT *out_prediction) {
+ const CornerIndex oci = table->Opposite(ci);
+ if (oci < 0)
+ return false;
+ int vert_opp, vert_next, vert_prev;
+ GetParallelogramEntries(oci, table, vertex_to_data_map,
+ &vert_opp, &vert_next, &vert_prev);
+ if (vert_opp < data_entry_id && vert_next < data_entry_id &&
+ vert_prev < data_entry_id) {
+ // Apply the parallelogram prediction.
+ const int v_opp_off = vert_opp * num_components;
+ const int v_next_off = vert_next * num_components;
+ const int v_prev_off = vert_prev * num_components;
+ for (int c = 0; c < num_components; ++c) {
+ out_prediction[c] = (in_data[v_next_off + c] + in_data[v_prev_off + c]) -
+ in_data[v_opp_off + c];
+ }
+ return true;
+ }
+ return false; // Not all data is available for prediction
+}
+
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_PARALLELOGRAM_SHARED_H_
diff --git a/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords.h b/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords.h
index f9ef843..d2eca2c 100644
--- a/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords.h
+++ b/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords.h
@@ -18,7 +18,8 @@
#include
#include "compression/attributes/prediction_schemes/mesh_prediction_scheme.h"
-#include "core/rans_coding.h"
+#include "core/rans_bit_decoder.h"
+#include "core/rans_bit_encoder.h"
#include "core/vector_d.h"
#include "mesh/corner_table.h"
@@ -40,12 +41,13 @@ class MeshPredictionSchemeTexCoords
typename MeshPredictionScheme::CorrType;
MeshPredictionSchemeTexCoords(const PointAttribute *attribute,
const TransformT &transform,
- const MeshDataT &mesh_data)
+ const MeshDataT &mesh_data, int version)
: MeshPredictionScheme(
attribute, transform, mesh_data),
pos_attribute_(nullptr),
entry_to_point_id_map_(nullptr),
- num_components_(0) {}
+ num_components_(0),
+ version_(version) {}
bool Encode(const DataTypeT *in_data, CorrType *out_corr, int size,
int num_components,
@@ -111,6 +113,7 @@ class MeshPredictionSchemeTexCoords
int num_components_;
// Encoded / decoded array of UV flips.
std::vector orientations_;
+ int version_;
};
template
@@ -181,7 +184,7 @@ bool MeshPredictionSchemeTexCoords::
DecodePredictionData(DecoderBuffer *buffer) {
// Decode the delta coded orientations.
int32_t num_orientations = 0;
- if (!buffer->Decode(&num_orientations))
+ if (!buffer->Decode(&num_orientations) || num_orientations < 0)
return false;
orientations_.resize(num_orientations);
bool last_orientation = true;
@@ -270,10 +273,19 @@ void MeshPredictionSchemeTexCoords::
// normalization explicitly and instead we can just use the squared norm
// of |pn| as a denominator of the resulting dot product of non normalized
// vectors.
- const float s = pn.Dot(cn) / pn_norm2_squared;
- // To get the coordinate t, we can use formula:
- // t = |C-N - (P-N) * s| / |P-N|
- const float t = sqrt((cn - pn * s).SquaredNorm() / pn_norm2_squared);
+ float s, t;
+ // |pn_norm2_squared| can be exactly 0 when the next_pos and prev_pos are
+ // the same positions (e.g. because they were quantized to the same
+ // location).
+ if (version_ < DRACO_BITSTREAM_VERSION(1, 2) || pn_norm2_squared > 0) {
+ s = pn.Dot(cn) / pn_norm2_squared;
+ // To get the coordinate t, we can use formula:
+ // t = |C-N - (P-N) * s| / |P-N|
+ t = sqrt((cn - pn * s).SquaredNorm() / pn_norm2_squared);
+ } else {
+ s = 0;
+ t = 0;
+ }
// Now we need to transform the point (s, t) to the texture coordinate space
// UV. We know the UV coordinates on points N and P (N_UV and P_UV). Lets
diff --git a/compression/attributes/prediction_schemes/prediction_scheme_decoder_factory.h b/compression/attributes/prediction_schemes/prediction_scheme_decoder_factory.h
index 493840b..e152e4a 100644
--- a/compression/attributes/prediction_schemes/prediction_scheme_decoder_factory.h
+++ b/compression/attributes/prediction_schemes/prediction_scheme_decoder_factory.h
@@ -42,7 +42,7 @@ CreatePredictionSchemeForDecoder(PredictionSchemeMethod method, int att_id,
const MeshDecoder *const mesh_decoder =
static_cast(decoder);
auto ret = CreateMeshPredictionScheme(
- mesh_decoder, method, att_id, transform);
+ mesh_decoder, method, att_id, transform, decoder->bitstream_version());
if (ret)
return ret;
// Otherwise try to create another prediction scheme.
diff --git a/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.cc b/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.cc
index c6d2304..c3e1f93 100644
--- a/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.cc
+++ b/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.cc
@@ -35,12 +35,14 @@ PredictionSchemeMethod SelectPredictionMethod(
if (encoder->options()->GetSpeed() >= 8) {
return PREDICTION_DIFFERENCE;
}
- if (encoder->options()->GetSpeed() >= 2) {
- // Parallelogram prediction is used for speeds 2 - 7.
+ if (encoder->options()->GetSpeed() >= 2 ||
+ encoder->point_cloud()->num_points() < 40) {
+ // Parallelogram prediction is used for speeds 2 - 7 or when the overhead
+ // of using constrained multi parallelogram would be too high.
return MESH_PREDICTION_PARALLELOGRAM;
}
// Multi-parallelogram is used for speeds 0, 1.
- return MESH_PREDICTION_MULTI_PARALLELOGRAM;
+ return MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM;
}
// Default option is delta coding.
return PREDICTION_DIFFERENCE;
diff --git a/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h b/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h
index 975cbd7..af94f4e 100644
--- a/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h
+++ b/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h
@@ -52,7 +52,7 @@ CreatePredictionSchemeForEncoder(PredictionSchemeMethod method, int att_id,
const MeshEncoder *const mesh_encoder =
static_cast(encoder);
auto ret = CreateMeshPredictionScheme(
- mesh_encoder, method, att_id, transform);
+ mesh_encoder, method, att_id, transform, kDracoBitstreamVersion);
if (ret)
return ret;
// Otherwise try to create another prediction scheme.
diff --git a/compression/attributes/prediction_schemes/prediction_scheme_factory.h b/compression/attributes/prediction_schemes/prediction_scheme_factory.h
index 17d1838..d39d6a7 100644
--- a/compression/attributes/prediction_schemes/prediction_scheme_factory.h
+++ b/compression/attributes/prediction_schemes/prediction_scheme_factory.h
@@ -23,6 +23,7 @@
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_FACTORY_H_
#include "compression/attributes/mesh_attribute_indices_encoding_data.h"
+#include "compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram.h"
#include "compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram.h"
#include "compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram.h"
#include "compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords.h"
@@ -50,7 +51,8 @@ std::unique_ptr>
CreateMeshPredictionSchemeInternal(PredictionSchemeMethod method,
const PointAttribute *attribute,
const TransformT &transform,
- const MeshDataT &mesh_data) {
+ const MeshDataT &mesh_data,
+ uint16_t bitstream_version) {
if (method == MESH_PREDICTION_PARALLELOGRAM) {
return std::unique_ptr>(
new MeshPredictionSchemeParallelogram(
@@ -60,10 +62,14 @@ CreateMeshPredictionSchemeInternal(PredictionSchemeMethod method,
new MeshPredictionSchemeMultiParallelogram(
attribute, transform, mesh_data));
+ } else if (method == MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM) {
+ return std::unique_ptr>(
+ new MeshPredictionSchemeConstrainedMultiParallelogram<
+ DataTypeT, TransformT, MeshDataT>(attribute, transform, mesh_data));
} else if (method == MESH_PREDICTION_TEX_COORDS) {
return std::unique_ptr>(
new MeshPredictionSchemeTexCoords(
- attribute, transform, mesh_data));
+ attribute, transform, mesh_data, bitstream_version));
}
return nullptr;
}
@@ -72,11 +78,13 @@ template
std::unique_ptr>
CreateMeshPredictionScheme(const EncodingDataSourceT *source,
PredictionSchemeMethod method, int att_id,
- const TransformT &transform) {
+ const TransformT &transform,
+ uint16_t bitstream_version) {
const PointAttribute *const att = source->point_cloud()->attribute(att_id);
if (source->GetGeometryType() == TRIANGULAR_MESH &&
(method == MESH_PREDICTION_PARALLELOGRAM ||
method == MESH_PREDICTION_MULTI_PARALLELOGRAM ||
+ method == MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM ||
method == MESH_PREDICTION_TEX_COORDS)) {
const CornerTable *const ct = source->GetCornerTable();
const MeshAttributeIndicesEncodingData *const encoding_data =
@@ -96,7 +104,7 @@ CreateMeshPredictionScheme(const EncodingDataSourceT *source,
&encoding_data->vertex_to_encoded_attribute_value_index_map);
auto ret =
CreateMeshPredictionSchemeInternal(
- method, att, transform, md);
+ method, att, transform, md, bitstream_version);
if (ret)
return ret;
} else {
@@ -107,7 +115,7 @@ CreateMeshPredictionScheme(const EncodingDataSourceT *source,
&encoding_data->vertex_to_encoded_attribute_value_index_map);
auto ret =
CreateMeshPredictionSchemeInternal(
- method, att, transform, md);
+ method, att, transform, md, bitstream_version);
if (ret)
return ret;
}
diff --git a/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform.h b/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform.h
new file mode 100644
index 0000000..8cee145
--- /dev/null
+++ b/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform.h
@@ -0,0 +1,262 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_CANONICALIZED_TRANSFORM_H_
+#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_CANONICALIZED_TRANSFORM_H_
+
+#include
+
+#include "compression/attributes/normal_compression_utils.h"
+#include "compression/attributes/prediction_schemes/prediction_scheme.h"
+#include "core/macros.h"
+#include "core/vector_d.h"
+
+namespace draco {
+
+// The transform works on octahedral coordinates for normals. The square is
+// subdivided into four inner triangles (diamond) and four outer triangles. The
+// inner trianlges are associated with the upper part of the octahedron and the
+// outer triangles are associated with the lower part.
+// Given a preditiction value P and the actual value Q that should be encoded,
+// this transform first checks if P is outside the diamond. If so, the outer
+// triangles are flipped towards the inside and vice versa. Then it checks if p
+// is in the bottom left quadrant. If it is not, it rotates p and q accordingly.
+// The actual correction value is then based on the mapped and rotated P and Q
+// values. The inversion tends to result in shorter correction vectors and the
+// rotation makes it so that all long correction values are positive, reducing
+// the possible value range of the correction values and increasing the
+// occurence of positive large correction values, which helps the entropy
+// encoder. This is possible since P is also known by the decoder, see also
+// ComputeCorrection and ComputeOriginalValue functions.
+// Note that the tile is not periodic, which implies that the outer edges can
+// not be identified, which requires us to use an odd number of values on each
+// axis.
+// DataTypeT is expected to be some integral type.
+//
+template
+class PredictionSchemeNormalOctahedronCanonicalizedTransform
+ : public PredictionSchemeTransform {
+ public:
+ typedef VectorD Point2;
+ typedef DataTypeT CorrType;
+ typedef DataTypeT DataType;
+
+ PredictionSchemeNormalOctahedronCanonicalizedTransform()
+ : mod_value_(0), max_value_(0) {}
+ // We expect the mod value to be of the form 2^b-1.
+ PredictionSchemeNormalOctahedronCanonicalizedTransform(DataType mod_value)
+ : mod_value_(mod_value), max_value_((mod_value - 1) / 2) {}
+
+ PredictionSchemeTransformType GetType() const {
+ return PREDICTION_TRANSFORM_NORMAL_OCTAHEDRON_CANONICALIZED;
+ }
+
+ // We can return true as we keep correction values positive.
+ bool AreCorrectionsPositive() const { return true; }
+
+ bool EncodeTransformData(EncoderBuffer *buffer) {
+ buffer->Encode(mod_value_);
+ buffer->Encode(max_value_);
+ return true;
+ }
+
+ bool DecodeTransformData(DecoderBuffer *buffer) {
+ if (!buffer->Decode(&mod_value_))
+ return false;
+ if (!buffer->Decode(&max_value_))
+ return false;
+ return true;
+ }
+
+ inline void ComputeCorrection(const DataType *orig_vals,
+ const DataType *pred_vals,
+ CorrType *out_corr_vals, int val_id) const {
+ DCHECK_LE(pred_vals[0], max_value_ * 2);
+ DCHECK_LE(pred_vals[1], max_value_ * 2);
+ DCHECK_LE(orig_vals[0], max_value_ * 2);
+ DCHECK_LE(orig_vals[1], max_value_ * 2);
+ DCHECK_LE(0, pred_vals[0]);
+ DCHECK_LE(0, pred_vals[1]);
+ DCHECK_LE(0, orig_vals[0]);
+ DCHECK_LE(0, orig_vals[1]);
+
+ const Point2 orig = Point2(orig_vals[0], orig_vals[1]);
+ const Point2 pred = Point2(pred_vals[0], pred_vals[1]);
+ const Point2 corr = ComputeCorrection(orig, pred);
+ DCHECK_EQ(true, Verify(orig, pred, corr));
+
+ out_corr_vals[val_id] = corr[0];
+ out_corr_vals[val_id + 1] = corr[1];
+ }
+
+ inline void ComputeOriginalValue(const DataType *pred_vals,
+ const CorrType *corr_vals,
+ DataType *out_orig_vals, int val_id) const {
+ DCHECK_LE(pred_vals[0], 2 * max_value_);
+ DCHECK_LE(pred_vals[1], 2 * max_value_);
+ DCHECK_LE(corr_vals[val_id], 2 * max_value_);
+ DCHECK_LE(corr_vals[val_id + 1], 2 * max_value_);
+
+ DCHECK_LE(0, pred_vals[0]);
+ DCHECK_LE(0, pred_vals[1]);
+ DCHECK_LE(0, corr_vals[val_id]);
+ DCHECK_LE(0, corr_vals[val_id + 1]);
+
+ const Point2 pred = Point2(pred_vals[0], pred_vals[1]);
+ const Point2 corr = Point2(corr_vals[val_id], corr_vals[val_id + 1]);
+ const Point2 orig = ComputeOriginalValue(pred, corr);
+
+ out_orig_vals[0] = orig[0];
+ out_orig_vals[1] = orig[1];
+ }
+
+ int32_t GetRotationCount(Point2 pred) const {
+ const DataType sign_x = pred[0];
+ const DataType sign_y = pred[1];
+
+ int32_t rotation_count = 0;
+ if (sign_x == 0) {
+ if (sign_y == 0) {
+ rotation_count = 0;
+ } else if (sign_y > 0) {
+ rotation_count = 3;
+ } else {
+ rotation_count = 1;
+ }
+ } else if (sign_x > 0) {
+ if (sign_y >= 0) {
+ rotation_count = 2;
+ } else {
+ rotation_count = 1;
+ }
+ } else {
+ if (sign_y <= 0) {
+ rotation_count = 0;
+ } else {
+ rotation_count = 3;
+ }
+ }
+ return rotation_count;
+ }
+
+ Point2 RotatePoint(Point2 p, int32_t rotation_count) const {
+ switch (rotation_count) {
+ case 1:
+ return Point2(p[1], -p[0]);
+ case 2:
+ return Point2(-p[0], -p[1]);
+ case 3:
+ return Point2(-p[1], p[0]);
+ default:
+ return p;
+ }
+ }
+
+ bool IsInBottomLeft(const Point2 &p) const {
+ if (p[0] == 0 && p[1] == 0)
+ return true;
+ return (p[0] < 0 && p[1] <= 0);
+ }
+
+ private:
+ Point2 ComputeCorrection(Point2 orig, Point2 pred) const {
+ const Point2 t(max_value_, max_value_);
+ orig = orig - t;
+ pred = pred - t;
+ if (!IsInDiamond(max_value_, pred[0], pred[1])) {
+ InvertRepresentation(max_value_, &orig[0], &orig[1]);
+ InvertRepresentation(max_value_, &pred[0], &pred[1]);
+ }
+ if (!IsInBottomLeft(pred)) {
+ int32_t rotation_count = GetRotationCount(pred);
+ orig = RotatePoint(orig, rotation_count);
+ pred = RotatePoint(pred, rotation_count);
+ }
+ Point2 corr = orig - pred;
+ corr[0] = MakePositive(corr[0]);
+ corr[1] = MakePositive(corr[1]);
+ return corr;
+ }
+
+ Point2 ComputeOriginalValue(Point2 pred, Point2 corr) const {
+ const Point2 t(max_value_, max_value_);
+ pred = pred - t;
+ const bool pred_is_in_diamond = IsInDiamond(max_value_, pred[0], pred[1]);
+ if (!pred_is_in_diamond) {
+ InvertRepresentation(max_value_, &pred[0], &pred[1]);
+ }
+ const bool pred_is_in_bottom_left = IsInBottomLeft(pred);
+ const int32_t rotation_count = GetRotationCount(pred);
+ if (!pred_is_in_bottom_left) {
+ pred = RotatePoint(pred, rotation_count);
+ }
+ Point2 orig = pred + corr;
+ orig[0] = ModMax(orig[0]);
+ orig[1] = ModMax(orig[1]);
+ if (!pred_is_in_bottom_left) {
+ const int32_t reverse_rotation_count = (4 - rotation_count) % 4;
+ orig = RotatePoint(orig, reverse_rotation_count);
+ }
+ if (!pred_is_in_diamond) {
+ InvertRepresentation(max_value_, &orig[0], &orig[1]);
+ }
+ orig = orig + t;
+ return orig;
+ }
+
+ // For correction values.
+ DataType MakePositive(DataType x) const {
+ DCHECK_LE(x, max_value_ * 2);
+ if (x < 0)
+ return x + mod_value_;
+ return x;
+ }
+
+ DataType ModMax(DataType x) const {
+ if (x > max_value_)
+ return x - mod_value_;
+ if (x < -max_value_)
+ return x + mod_value_;
+ return x;
+ }
+
+ // Only called in debug mode.
+ bool Verify(const Point2 &orig, const Point2 pred, const Point2 corr) const {
+ const Point2 veri = ComputeOriginalValue(pred, corr);
+ return AreEquivalent(orig, veri);
+ }
+
+ // Only called in debug mode
+ bool AreEquivalent(Point2 p, Point2 q) const {
+ const Point2 t(max_value_, max_value_);
+ p = p - t;
+ q = q - t;
+ if (std::abs(p[0]) == max_value_ && p[1] < 0)
+ p[1] = -p[1];
+ if (std::abs(p[1]) == max_value_ && p[0] < 0)
+ p[0] = -p[0];
+ if (std::abs(q[0]) == max_value_ && q[1] < 0)
+ q[1] = -q[1];
+ if (std::abs(q[1]) == max_value_ && q[0] < 0)
+ q[0] = -q[0];
+ return (p[0] == q[0] && p[1] == q[1]);
+ }
+
+ DataType mod_value_;
+ DataType max_value_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_CANONICALIZED_TRANSFORM_H_
diff --git a/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_test.cc b/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_test.cc
new file mode 100644
index 0000000..76194bc
--- /dev/null
+++ b/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_test.cc
@@ -0,0 +1,184 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform.h"
+#include "core/draco_test_base.h"
+
+namespace {
+
+class PredictionSchemeNormalOctahedronCanonicalizedTransformTest
+ : public ::testing::Test {
+ protected:
+ typedef draco::PredictionSchemeNormalOctahedronCanonicalizedTransform
+ Transform;
+ typedef Transform::Point2 Point2;
+
+ void TestComputeCorrection(const Transform &transform, const int32_t &ox,
+ const int32_t &oy, const int32_t &px,
+ const int32_t &py, const int32_t &cx,
+ const int32_t &cy) {
+ const int32_t o[2] = {ox + 7, oy + 7};
+ const int32_t p[2] = {px + 7, py + 7};
+ int32_t corr[2] = {500, 500};
+ transform.ComputeCorrection(o, p, corr, 0);
+ ASSERT_EQ(corr[0], (cx + 15) % 15);
+ ASSERT_EQ(corr[1], (cy + 15) % 15);
+ }
+
+ void TestGetRotationCount(const Transform &transform, const Point2 &pred,
+ const int32_t &rot_dir) {
+ const int32_t rotation_count = transform.GetRotationCount(pred);
+ ASSERT_EQ(rot_dir, rotation_count);
+ }
+
+ void TestRotateRepresentation(const Transform &transform, const Point2 &org,
+ const Point2 &pred, const Point2 &rot_org,
+ const Point2 &rot_pred) {
+ const int32_t rotation_count = transform.GetRotationCount(pred);
+ const Point2 res_org = transform.RotatePoint(org, rotation_count);
+ const Point2 res_pred = transform.RotatePoint(pred, rotation_count);
+ ASSERT_EQ(rot_org[0], res_org[0]);
+ ASSERT_EQ(rot_org[1], res_org[1]);
+ ASSERT_EQ(rot_pred[0], res_pred[0]);
+ ASSERT_EQ(rot_pred[1], res_pred[1]);
+ }
+};
+
+TEST_F(PredictionSchemeNormalOctahedronCanonicalizedTransformTest, Init) {
+ const Transform transform(15);
+ ASSERT_TRUE(transform.AreCorrectionsPositive());
+}
+
+TEST_F(PredictionSchemeNormalOctahedronCanonicalizedTransformTest,
+ IsInBottomLeft) {
+ const Transform transform(15);
+ ASSERT_TRUE(transform.IsInBottomLeft(Point2(0, 0)));
+ ASSERT_TRUE(transform.IsInBottomLeft(Point2(-1, -1)));
+ ASSERT_TRUE(transform.IsInBottomLeft(Point2(-7, -7)));
+
+ ASSERT_FALSE(transform.IsInBottomLeft(Point2(1, 1)));
+ ASSERT_FALSE(transform.IsInBottomLeft(Point2(7, 7)));
+ ASSERT_FALSE(transform.IsInBottomLeft(Point2(-1, 1)));
+ ASSERT_FALSE(transform.IsInBottomLeft(Point2(-7, 7)));
+ ASSERT_FALSE(transform.IsInBottomLeft(Point2(1, -1)));
+ ASSERT_FALSE(transform.IsInBottomLeft(Point2(7, -7)));
+}
+
+TEST_F(PredictionSchemeNormalOctahedronCanonicalizedTransformTest,
+ GetRotationCount) {
+ const Transform transform(15);
+ TestGetRotationCount(transform, Point2(1, 2), 2); // top right
+ TestGetRotationCount(transform, Point2(-1, 2), 3); // top left
+ TestGetRotationCount(transform, Point2(1, -2), 1); // bottom right
+ TestGetRotationCount(transform, Point2(-1, -2), 0); // bottom left
+ TestGetRotationCount(transform, Point2(0, 2), 3); // top left
+ TestGetRotationCount(transform, Point2(0, -2), 1); // bottom right
+ TestGetRotationCount(transform, Point2(2, 0), 2); // top right
+ TestGetRotationCount(transform, Point2(-2, 0), 0); // bottom left
+ TestGetRotationCount(transform, Point2(0, 0), 0); // bottom left
+}
+
+TEST_F(PredictionSchemeNormalOctahedronCanonicalizedTransformTest,
+ RotateRepresentation) {
+ const Transform transform(15);
+ // p top left; shift clockwise by 3
+ TestRotateRepresentation(transform, Point2(1, 2), Point2(-3, 1),
+ Point2(-2, 1), Point2(-1, -3)); // q top right
+ TestRotateRepresentation(transform, Point2(-1, -2), Point2(-3, 1),
+ Point2(2, -1), Point2(-1, -3)); // q bottom left
+ TestRotateRepresentation(transform, Point2(1, -2), Point2(-3, 1),
+ Point2(2, 1), Point2(-1, -3)); // q bottom right
+ TestRotateRepresentation(transform, Point2(-1, 2), Point2(-3, 1),
+ Point2(-2, -1), Point2(-1, -3)); // q top left
+ // p top right; shift clockwise by 2 (flip)
+ TestRotateRepresentation(transform, Point2(1, 1), Point2(1, 3),
+ Point2(-1, -1), Point2(-1, -3)); // q top right
+ TestRotateRepresentation(transform, Point2(-1, -2), Point2(1, 3),
+ Point2(1, 2), Point2(-1, -3)); // q bottom left
+ TestRotateRepresentation(transform, Point2(-1, 2), Point2(1, 3),
+ Point2(1, -2), Point2(-1, -3)); // q top left
+ TestRotateRepresentation(transform, Point2(1, -2), Point2(1, 3),
+ Point2(-1, 2), Point2(-1, -3)); // q bottom right
+ // p bottom right; shift clockwise by 1
+ TestRotateRepresentation(transform, Point2(1, 2), Point2(3, -1),
+ Point2(2, -1), Point2(-1, -3)); // q top right
+ TestRotateRepresentation(transform, Point2(1, -2), Point2(3, -1),
+ Point2(-2, -1), Point2(-1, -3)); // q bottom right
+ TestRotateRepresentation(transform, Point2(-1, -2), Point2(3, -1),
+ Point2(-2, 1), Point2(-1, -3)); // q bottom left
+ TestRotateRepresentation(transform, Point2(-1, 2), Point2(3, -1),
+ Point2(2, 1), Point2(-1, -3)); // q top left
+ // p bottom left; no change
+ TestRotateRepresentation(transform, Point2(1, 2), Point2(-1, -3),
+ Point2(1, 2), Point2(-1, -3)); // q top right
+ TestRotateRepresentation(transform, Point2(-1, 2), Point2(-1, -3),
+ Point2(-1, 2), Point2(-1, -3)); // q top left
+ TestRotateRepresentation(transform, Point2(1, -2), Point2(-1, -3),
+ Point2(1, -2), Point2(-1, -3)); // q bottom right
+ TestRotateRepresentation(transform, Point2(-1, -2), Point2(-1, -3),
+ Point2(-1, -2), Point2(-1, -3)); // q bottom left
+}
+
+TEST_F(PredictionSchemeNormalOctahedronCanonicalizedTransformTest,
+ ComputeCorrection) {
+ const Transform transform(15);
+ TestComputeCorrection(transform, 0, 0, 0, 0, 0, 0);
+ TestComputeCorrection(transform, 1, 1, 1, 1, 0, 0);
+ // inside diamond; p top right
+ TestComputeCorrection(transform, 3, 4, 1, 2, -2, -2); // q top right
+ TestComputeCorrection(transform, -3, 4, 1, 2, 4, -2); // q top left
+ TestComputeCorrection(transform, 3, -4, 1, 2, -2, 6); // q bottom right
+ TestComputeCorrection(transform, -3, -4, 1, 2, 4, 6); // q bottom left
+ // inside diamond; p top left
+ TestComputeCorrection(transform, 3, 4, -1, 2, -2, 4); // q top right
+ TestComputeCorrection(transform, -3, 4, -1, 2, -2, -2); // q top left
+ TestComputeCorrection(transform, 3, -4, -1, 2, 6, 4); // q bottom right
+ TestComputeCorrection(transform, -3, -4, -1, 2, 6, -2); // q bottom left
+ // inside diamond; p bottom right
+ TestComputeCorrection(transform, 3, 4, 1, -2, 6, -2); // q top right
+ TestComputeCorrection(transform, -3, 4, 1, -2, 6, 4); // q top left
+ TestComputeCorrection(transform, 3, -4, 1, -2, -2, -2); // q bottom right
+ TestComputeCorrection(transform, -3, -4, 1, -2, -2, 4); // q bottom left
+ // inside diamond; p bottom left
+ TestComputeCorrection(transform, 3, 4, -1, -2, 4, 6); // q top right
+ TestComputeCorrection(transform, -3, 4, -1, -2, -2, 6); // q top left
+ TestComputeCorrection(transform, 3, -4, -1, -2, 4, -2); // q bottom right
+ TestComputeCorrection(transform, -3, -4, -1, -2, -2, -2); // q bottom left
+ // outside diamond; p top right
+ TestComputeCorrection(transform, 1, 2, 5, 4, -2, -4); // q top right
+ TestComputeCorrection(transform, -1, 2, 5, 4, -7, -4); // q top left
+ TestComputeCorrection(transform, 1, -2, 5, 4, -2, -7); // q bottom right
+ TestComputeCorrection(transform, -1, -2, 5, 4, -7, -7); // q bottom left
+ // outside diamond; p top left
+ TestComputeCorrection(transform, 1, 2, -5, 4, -4, -7); // q top right
+ TestComputeCorrection(transform, -1, 2, -5, 4, -4, -2); // q top left
+ TestComputeCorrection(transform, 1, -2, -5, 4, -7, -7); // q bottom right
+ TestComputeCorrection(transform, -1, -2, -5, 4, -7, -2); // q bottom left
+ // outside diamond; p bottom right
+ TestComputeCorrection(transform, 1, 2, 5, -4, -7, -2); // q top right
+ TestComputeCorrection(transform, -1, 2, 5, -4, -7, -7); // q top left
+ TestComputeCorrection(transform, 1, -2, 5, -4, -4, -2); // q bottom right
+ TestComputeCorrection(transform, -1, -2, 5, -4, -4, -7); // q bottom left
+ // outside diamond; p bottom left
+ TestComputeCorrection(transform, 1, 2, -5, -4, -7, -7); // q top right
+ TestComputeCorrection(transform, -1, 2, -5, -4, -2, -7); // q top left
+ TestComputeCorrection(transform, 1, -2, -5, -4, -7, -4); // q bottom right
+ TestComputeCorrection(transform, -1, -2, -5, -4, -2, -4); // q bottom left
+
+ TestComputeCorrection(transform, -1, -2, 7, 7, -5, -6);
+ TestComputeCorrection(transform, 0, 0, 7, 7, 7, 7);
+ TestComputeCorrection(transform, -1, -2, 0, -2, 0, 1);
+}
+
+} // namespace
diff --git a/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform.h b/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform.h
index 3e2a71f..5cfb1c4 100644
--- a/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform.h
+++ b/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform.h
@@ -40,9 +40,6 @@ namespace draco {
// axis.
// DataTypeT is expected to be some integral type.
//
-// This relates
-// * IDF# 44535
-// * Patent Application: GP-200957-00-US
template
class PredictionSchemeNormalOctahedronTransform
: public PredictionSchemeTransform {
diff --git a/compression/attributes/sequential_integer_attribute_decoder.cc b/compression/attributes/sequential_integer_attribute_decoder.cc
index 60c2ad6..36daa17 100644
--- a/compression/attributes/sequential_integer_attribute_decoder.cc
+++ b/compression/attributes/sequential_integer_attribute_decoder.cc
@@ -70,6 +70,8 @@ SequentialIntegerAttributeDecoder::CreateIntPredictionScheme(
bool SequentialIntegerAttributeDecoder::DecodeIntegerValues(
const std::vector &point_ids, DecoderBuffer *in_buffer) {
const int num_components = GetNumValueComponents();
+ if (num_components <= 0)
+ return false;
const int32_t num_values = point_ids.size();
values_.resize(num_values * num_components);
uint8_t compressed;
@@ -96,8 +98,8 @@ bool SequentialIntegerAttributeDecoder::DecodeIntegerValues(
}
}
- if (prediction_scheme_ == nullptr ||
- !prediction_scheme_->AreCorrectionsPositive()) {
+ if (!values_.empty() && (prediction_scheme_ == nullptr ||
+ !prediction_scheme_->AreCorrectionsPositive())) {
// Convert the values back to the original signed format.
ConvertSymbolsToSignedInts(
reinterpret_cast(values_.data()), values_.size(),
@@ -109,9 +111,12 @@ bool SequentialIntegerAttributeDecoder::DecodeIntegerValues(
if (!prediction_scheme_->DecodePredictionData(in_buffer))
return false;
- if (!prediction_scheme_->Decode(values_.data(), &values_[0], values_.size(),
- num_components, point_ids.data())) {
- return false;
+ if (!values_.empty()) {
+ if (!prediction_scheme_->Decode(values_.data(), &values_[0],
+ values_.size(), num_components,
+ point_ids.data())) {
+ return false;
+ }
}
}
return true;
diff --git a/compression/attributes/sequential_normal_attribute_decoder.h b/compression/attributes/sequential_normal_attribute_decoder.h
index 192fe55..ff7b304 100644
--- a/compression/attributes/sequential_normal_attribute_decoder.h
+++ b/compression/attributes/sequential_normal_attribute_decoder.h
@@ -16,6 +16,7 @@
#define DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_NORMAL_ATTRIBUTE_DECODER_H_
#include "compression/attributes/prediction_schemes/prediction_scheme_decoder_factory.h"
+#include "compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform.h"
#include "compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform.h"
#include "compression/attributes/sequential_integer_attribute_decoder.h"
@@ -52,6 +53,15 @@ class SequentialNormalAttributeDecoder
return CreatePredictionSchemeForDecoder(
method, attribute_id(), decoder());
}
+ case PREDICTION_TRANSFORM_NORMAL_OCTAHEDRON_CANONICALIZED: {
+ typedef PredictionSchemeNormalOctahedronCanonicalizedTransform
+ Transform;
+ // At this point the decoder has not read the quantization bits,
+ // which is why we must construct the transform by default.
+ // See Transform.DecodeTransformData for more details.
+ return CreatePredictionSchemeForDecoder(
+ method, attribute_id(), decoder());
+ }
default:
return nullptr; // Currently, we support only octahedron transform and
// octahedron transform canonicalized.
diff --git a/compression/attributes/sequential_normal_attribute_encoder.h b/compression/attributes/sequential_normal_attribute_encoder.h
index 62d35cb..9514564 100644
--- a/compression/attributes/sequential_normal_attribute_encoder.h
+++ b/compression/attributes/sequential_normal_attribute_encoder.h
@@ -16,7 +16,7 @@
#define DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_NORMAL_ATTRIBUTE_ENCODER_H_
#include "compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h"
-#include "compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform.h"
+#include "compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform.h"
#include "compression/attributes/sequential_integer_attribute_encoder.h"
namespace draco {
@@ -41,7 +41,8 @@ class SequentialNormalAttributeEncoder
std::unique_ptr>
CreateIntPredictionScheme(PredictionSchemeMethod /* method */) override {
- typedef PredictionSchemeNormalOctahedronTransform Transform;
+ typedef PredictionSchemeNormalOctahedronCanonicalizedTransform
+ Transform;
const int32_t quantization_bits = encoder()->options()->GetAttributeInt(
attribute_id(), "quantization_bits", -1);
const int32_t max_value = (1 << quantization_bits) - 1;
diff --git a/compression/config/compression_shared.h b/compression/config/compression_shared.h
index 9b41587..67bbd25 100644
--- a/compression/config/compression_shared.h
+++ b/compression/config/compression_shared.h
@@ -15,8 +15,23 @@
#ifndef DRACO_COMPRESSION_CONFIG_COMPRESSION_SHARED_H_
#define DRACO_COMPRESSION_CONFIG_COMPRESSION_SHARED_H_
+#include
+
namespace draco {
+// Latest Draco bit-stream version.
+static constexpr uint8_t kDracoBitstreamVersionMajor = 1;
+static constexpr uint8_t kDracoBitstreamVersionMinor = 2;
+
+// Macro that converts the Draco bit-stream into one uint16_t number. Useful
+// mostly when checking version numbers.
+#define DRACO_BITSTREAM_VERSION(MAJOR, MINOR) \
+ ((static_cast(MAJOR) << 8) | MINOR)
+
+// Concatenated latest bit-stream version.
+static constexpr uint16_t kDracoBitstreamVersion = DRACO_BITSTREAM_VERSION(
+ kDracoBitstreamVersionMajor, kDracoBitstreamVersionMinor);
+
// Currently, we support point cloud and triangular mesh encoding.
enum EncodedGeometryType {
INVALID_GEOMETRY_TYPE = -1,
@@ -62,9 +77,10 @@ enum PredictionSchemeMethod {
// Used when no specific prediction scheme is required.
PREDICTION_UNDEFINED = -1,
PREDICTION_DIFFERENCE = 0,
- MESH_PREDICTION_PARALLELOGRAM,
- MESH_PREDICTION_MULTI_PARALLELOGRAM,
- MESH_PREDICTION_TEX_COORDS,
+ MESH_PREDICTION_PARALLELOGRAM = 1,
+ MESH_PREDICTION_MULTI_PARALLELOGRAM = 2,
+ MESH_PREDICTION_TEX_COORDS = 3,
+ MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM = 4,
NUM_PREDICTION_SCHEMES
};
@@ -79,8 +95,27 @@ enum PredictionSchemeTransformType {
PREDICTION_TRANSFORM_WRAP = 1,
// Specialized transform for normal coordinates using inverted tiles.
PREDICTION_TRANSFORM_NORMAL_OCTAHEDRON = 2,
- // Reserved for internal use.
- PREDICTION_TRANSFORM_RESERVED_0 = 3,
+ // Specialized transform for normal coordinates using canonicalized inverted
+ // tiles.
+ PREDICTION_TRANSFORM_NORMAL_OCTAHEDRON_CANONICALIZED = 3,
+};
+
+// List of all mesh traversal methods supported by Draco framework.
+enum MeshTraversalMethod {
+ MESH_TRAVERSAL_DEPTH_FIRST = 0,
+ MESH_TRAVERSAL_PREDICTION_DEGREE = 1,
+ MESH_TRAVERSAL_RESERVED_1 = 2,
+ MESH_TRAVERSAL_RESERVED_2 = 3,
+};
+
+// Draco header V1
+struct DracoHeader {
+ int8_t draco_string[5];
+ uint8_t version_major;
+ uint8_t version_minor;
+ uint8_t encoder_type;
+ uint8_t encoder_method;
+ uint16_t flags;
};
} // namespace draco
diff --git a/compression/decode.cc b/compression/decode.cc
index 099b509..d2ed109 100644
--- a/compression/decode.cc
+++ b/compression/decode.cc
@@ -28,38 +28,12 @@
namespace draco {
-bool ParseHeader(DecoderBuffer *in_buffer, EncodedGeometryType *out_type,
- int8_t *out_method) {
- char draco_str[6] = {0};
- if (!in_buffer->Decode(draco_str, 5))
- return false;
- if (strcmp(draco_str, "DRACO") != 0)
- return false; // Wrong file format?
- uint8_t major_version, minor_version;
- if (!in_buffer->Decode(&major_version))
- return false;
- if (!in_buffer->Decode(&minor_version))
- return false;
- uint8_t encoder_type, encoder_method;
- if (!in_buffer->Decode(&encoder_type))
- return false;
- if (!in_buffer->Decode(&encoder_method))
- return false;
- uint16_t flags;
- if (!in_buffer->Decode(&flags))
- return false;
- *out_type = static_cast(encoder_type);
- *out_method = encoder_method;
- return true;
-}
-
EncodedGeometryType GetEncodedGeometryType(DecoderBuffer *in_buffer) {
DecoderBuffer temp_buffer(*in_buffer);
- EncodedGeometryType geom_type;
- int8_t method;
- if (!ParseHeader(&temp_buffer, &geom_type, &method))
+ DracoHeader header;
+ if (!PointCloudDecoder::DecodeHeader(&temp_buffer, &header))
return INVALID_GEOMETRY_TYPE;
- return geom_type;
+ return static_cast(header.encoder_type);
}
#ifdef DRACO_POINT_CLOUD_COMPRESSION_SUPPORTED
@@ -87,14 +61,14 @@ std::unique_ptr CreateMeshDecoder(uint8_t method) {
std::unique_ptr DecodePointCloudFromBuffer(
DecoderBuffer *in_buffer) {
- EncodedGeometryType encoder_type;
- int8_t method;
- if (!ParseHeader(in_buffer, &encoder_type, &method))
+ DecoderBuffer temp_buffer(*in_buffer);
+ DracoHeader header;
+ if (!PointCloudDecoder::DecodeHeader(&temp_buffer, &header))
return nullptr;
- if (encoder_type == POINT_CLOUD) {
+ if (header.encoder_type == POINT_CLOUD) {
#ifdef DRACO_POINT_CLOUD_COMPRESSION_SUPPORTED
std::unique_ptr decoder =
- CreatePointCloudDecoder(method);
+ CreatePointCloudDecoder(header.encoder_method);
if (!decoder)
return nullptr;
std::unique_ptr point_cloud(new PointCloud());
@@ -102,9 +76,10 @@ std::unique_ptr DecodePointCloudFromBuffer(
return nullptr;
return point_cloud;
#endif
- } else if (encoder_type == TRIANGULAR_MESH) {
+ } else if (header.encoder_type == TRIANGULAR_MESH) {
#ifdef DRACO_MESH_COMPRESSION_SUPPORTED
- std::unique_ptr decoder = CreateMeshDecoder(method);
+ std::unique_ptr decoder =
+ CreateMeshDecoder(header.encoder_method);
if (!decoder)
return nullptr;
std::unique_ptr mesh(new Mesh());
@@ -118,13 +93,13 @@ std::unique_ptr DecodePointCloudFromBuffer(
std::unique_ptr DecodeMeshFromBuffer(DecoderBuffer *in_buffer) {
#ifdef DRACO_MESH_COMPRESSION_SUPPORTED
- EncodedGeometryType encoder_type;
- int8_t method;
- if (!ParseHeader(in_buffer, &encoder_type, &method))
+ DecoderBuffer temp_buffer(*in_buffer);
+ DracoHeader header;
+ if (!PointCloudDecoder::DecodeHeader(&temp_buffer, &header))
return nullptr;
std::unique_ptr decoder;
- if (encoder_type == TRIANGULAR_MESH) {
- decoder = CreateMeshDecoder(method);
+ if (header.encoder_type == TRIANGULAR_MESH) {
+ decoder = CreateMeshDecoder(header.encoder_method);
}
if (!decoder)
return nullptr;
diff --git a/compression/encode.cc b/compression/encode.cc
index 45c24ef..26018ab 100644
--- a/compression/encode.cc
+++ b/compression/encode.cc
@@ -21,33 +21,11 @@
namespace draco {
-// Encodes header common to all methods.
-bool EncodeHeader(const PointCloudEncoder &encoder, EncoderBuffer *out_buffer) {
- // Encode the header according to our v1 specification.
- // Five bytes for Draco format.
- out_buffer->Encode("DRACO", 5);
- // Version (major, minor).
- const uint8_t major_version = 1;
- const uint8_t minor_version = 1;
- out_buffer->Encode(major_version);
- out_buffer->Encode(minor_version);
- // Type of the encoder (point cloud, mesh, ...).
- const uint8_t encoder_type = encoder.GetGeometryType();
- out_buffer->Encode(encoder_type);
- // Unique identifier for the selected encoding method (edgebreaker, etc...).
- out_buffer->Encode(encoder.GetEncodingMethod());
- // Reserved for flags.
- out_buffer->Encode(static_cast(0));
- return true;
-}
-
bool EncodeGeometryToBuffer(PointCloudEncoder *encoder,
const EncoderOptions &options,
EncoderBuffer *out_buffer) {
if (!encoder)
return false;
- if (!EncodeHeader(*encoder, out_buffer))
- return false;
if (!encoder->Encode(options, out_buffer))
return false;
return true;
diff --git a/compression/mesh/mesh_edgebreaker_decoder.cc b/compression/mesh/mesh_edgebreaker_decoder.cc
index 00a6f4f..16424d3 100644
--- a/compression/mesh/mesh_edgebreaker_decoder.cc
+++ b/compression/mesh/mesh_edgebreaker_decoder.cc
@@ -15,6 +15,7 @@
#include "compression/mesh/mesh_edgebreaker_decoder.h"
#include "compression/mesh/mesh_edgebreaker_decoder_impl.h"
#include "compression/mesh/mesh_edgebreaker_traversal_predictive_decoder.h"
+#include "compression/mesh/mesh_edgebreaker_traversal_valence_decoder.h"
namespace draco {
@@ -40,6 +41,10 @@ bool MeshEdgeBreakerDecoder::InitializeDecoder() {
new MeshEdgeBreakerDecoderImpl<
MeshEdgeBreakerTraversalPredictiveDecoder>());
#endif
+ } else if (traversal_decoder_type == 2) {
+ impl_ = std::unique_ptr(
+ new MeshEdgeBreakerDecoderImpl<
+ MeshEdgeBreakerTraversalValenceDecoder>());
}
if (!impl_) {
return false;
diff --git a/compression/mesh/mesh_edgebreaker_decoder_impl.cc b/compression/mesh/mesh_edgebreaker_decoder_impl.cc
index 73e321c..350bcca 100644
--- a/compression/mesh/mesh_edgebreaker_decoder_impl.cc
+++ b/compression/mesh/mesh_edgebreaker_decoder_impl.cc
@@ -17,12 +17,13 @@
#include
#include "compression/attributes/mesh_attribute_indices_encoding_observer.h"
-#include "compression/attributes/mesh_traversal_sequencer.h"
#include "compression/attributes/sequential_attribute_decoders_controller.h"
#include "compression/mesh/mesh_edgebreaker_decoder.h"
#include "compression/mesh/mesh_edgebreaker_traversal_predictive_decoder.h"
+#include "compression/mesh/mesh_edgebreaker_traversal_valence_decoder.h"
#include "mesh/corner_table_traversal_processor.h"
#include "mesh/edgebreaker_traverser.h"
+#include "mesh/prediction_degree_traverser.h"
namespace draco {
@@ -61,8 +62,11 @@ const MeshAttributeCornerTable *
MeshEdgeBreakerDecoderImpl::GetAttributeCornerTable(
int att_id) const {
for (uint32_t i = 0; i < attribute_data_.size(); ++i) {
+ const int decoder_id = attribute_data_[i].decoder_id;
+ if (decoder_id < 0 || decoder_id >= decoder_->num_attributes_decoders())
+ continue;
const AttributesDecoder *const dec =
- decoder_->attributes_decoder(attribute_data_[i].decoder_id);
+ decoder_->attributes_decoder(decoder_id);
for (int j = 0; j < dec->num_attributes(); ++j) {
if (dec->GetAttributeId(j) == att_id) {
if (attribute_data_[i].is_connectivity_used)
@@ -79,8 +83,11 @@ const MeshAttributeIndicesEncodingData *
MeshEdgeBreakerDecoderImpl::GetAttributeEncodingData(
int att_id) const {
for (uint32_t i = 0; i < attribute_data_.size(); ++i) {
+ const int decoder_id = attribute_data_[i].decoder_id;
+ if (decoder_id < 0 || decoder_id >= decoder_->num_attributes_decoders())
+ continue;
const AttributesDecoder *const dec =
- decoder_->attributes_decoder(attribute_data_[i].decoder_id);
+ decoder_->attributes_decoder(decoder_id);
for (int j = 0; j < dec->num_attributes(); ++j) {
if (dec->GetAttributeId(j) == att_id)
return &attribute_data_[i].encoding_data;
@@ -89,6 +96,30 @@ MeshEdgeBreakerDecoderImpl::GetAttributeEncodingData(
return &pos_encoding_data_;
}
+template
+template
+std::unique_ptr
+MeshEdgeBreakerDecoderImpl::CreateVertexTraversalSequencer(
+ MeshAttributeIndicesEncodingData *encoding_data) {
+ typedef typename TraverserT::TraversalObserver AttObserver;
+ typedef typename TraverserT::TraversalProcessor AttProcessor;
+
+ const Mesh *mesh = decoder_->mesh();
+ std::unique_ptr> traversal_sequencer(
+ new MeshTraversalSequencer(mesh, encoding_data));
+
+ TraverserT att_traverser;
+ AttObserver att_observer(corner_table_.get(), mesh, traversal_sequencer.get(),
+ encoding_data);
+ AttProcessor att_processor;
+
+ att_processor.ResetProcessor(corner_table_.get());
+ att_traverser.Init(std::move(att_processor), att_observer);
+
+ traversal_sequencer->SetTraverser(att_traverser);
+ return std::move(traversal_sequencer);
+}
+
template
bool MeshEdgeBreakerDecoderImpl::CreateAttributesDecoder(
int32_t att_decoder_id) {
@@ -106,6 +137,15 @@ bool MeshEdgeBreakerDecoderImpl::CreateAttributesDecoder(
attribute_data_[att_data_id].decoder_id = att_decoder_id;
}
+ MeshTraversalMethod traversal_method = MESH_TRAVERSAL_DEPTH_FIRST;
+ if (decoder_->bitstream_version() >= DRACO_BITSTREAM_VERSION(1, 2)) {
+ uint8_t traversal_method_encoded;
+ if (!decoder_->buffer()->Decode(&traversal_method_encoded))
+ return false;
+ traversal_method =
+ static_cast(traversal_method_encoded);
+ }
+
const Mesh *mesh = decoder_->mesh();
std::unique_ptr sequencer;
@@ -113,9 +153,6 @@ bool MeshEdgeBreakerDecoderImpl::CreateAttributesDecoder(
// Per-vertex attribute decoder.
typedef CornerTableTraversalProcessor AttProcessor;
typedef MeshAttributeIndicesEncodingObserver AttObserver;
- // Traverser that is used to generate the encoding order of each attribute.
- typedef EdgeBreakerTraverser AttTraverser;
-
MeshAttributeIndicesEncodingData *encoding_data = nullptr;
if (att_data_id < 0) {
encoding_data = &pos_encoding_data_;
@@ -125,22 +162,20 @@ bool MeshEdgeBreakerDecoderImpl::CreateAttributesDecoder(
// later on.
attribute_data_[att_data_id].is_connectivity_used = false;
}
-
- std::unique_ptr> traversal_sequencer(
- new MeshTraversalSequencer(mesh, encoding_data));
-
- AttTraverser att_traverser;
- AttObserver att_observer(corner_table_.get(), mesh,
- traversal_sequencer.get(), encoding_data);
- AttProcessor att_processor;
-
- att_processor.ResetProcessor(corner_table_.get());
- att_traverser.Init(att_processor, att_observer);
-
- traversal_sequencer->SetTraverser(att_traverser);
- sequencer = std::move(traversal_sequencer);
-
+ if (traversal_method == MESH_TRAVERSAL_DEPTH_FIRST) {
+ // Traverser that is used to generate the encoding order of each
+ // attribute.
+ typedef EdgeBreakerTraverser AttTraverser;
+ sequencer = CreateVertexTraversalSequencer(encoding_data);
+ } else if (traversal_method == MESH_TRAVERSAL_PREDICTION_DEGREE) {
+ typedef PredictionDegreeTraverser AttTraverser;
+ sequencer = CreateVertexTraversalSequencer(encoding_data);
+ } else {
+ return false; // Unsupported method
+ }
} else {
+ if (traversal_method != MESH_TRAVERSAL_DEPTH_FIRST)
+ return false; // Unsupported method.
if (att_data_id < 0)
return false; // Attribute data must be specified.
@@ -203,7 +238,8 @@ bool MeshEdgeBreakerDecoderImpl::DecodeConnectivity() {
corner_table_ = std::unique_ptr(new CornerTable());
if (corner_table_ == nullptr)
return false;
- corner_table_->Reset(num_faces);
+ if (!corner_table_->Reset(num_faces))
+ return false;
processed_corner_ids_.clear();
processed_corner_ids_.reserve(num_faces);
processed_connectivity_corners_.clear();
@@ -637,27 +673,71 @@ MeshEdgeBreakerDecoderImpl::DecodeHoleAndTopologySplitEvents(
uint32_t num_topology_splits;
if (!decoder_buffer->Decode(&num_topology_splits))
return -1;
- for (uint32_t i = 0; i < num_topology_splits; ++i) {
- TopologySplitEventData event_data;
- if (!decoder_buffer->Decode(&event_data.split_symbol_id))
- return -1;
- if (!decoder_buffer->Decode(&event_data.source_symbol_id))
- return -1;
- uint8_t edge_data;
- if (!decoder_buffer->Decode(&edge_data))
- return -1;
- event_data.source_edge = edge_data & 1;
- event_data.split_edge = (edge_data >> 1) & 1;
- topology_split_data_.push_back(event_data);
+ if (num_topology_splits > 0) {
+ if (decoder_->bitstream_version() >= DRACO_BITSTREAM_VERSION(1, 2)) {
+ // Decode source and split symbol ids using delta and varint coding. See
+ // description in mesh_edgebreaker_encoder_impl.cc for more details.
+ int last_source_symbol_id = 0;
+ for (uint32_t i = 0; i < num_topology_splits; ++i) {
+ TopologySplitEventData event_data;
+ uint32_t delta;
+ DecodeVarint(&delta, decoder_buffer);
+ event_data.source_symbol_id = delta + last_source_symbol_id;
+ DecodeVarint(&delta, decoder_buffer);
+ event_data.split_symbol_id =
+ event_data.source_symbol_id - static_cast(delta);
+ last_source_symbol_id = event_data.source_symbol_id;
+ topology_split_data_.push_back(event_data);
+ }
+ // Split edges are decoded from a direct bit decoder.
+ decoder_buffer->StartBitDecoding(false, nullptr);
+ for (uint32_t i = 0; i < num_topology_splits; ++i) {
+ uint32_t edge_data;
+ decoder_buffer->DecodeLeastSignificantBits32(2, &edge_data);
+ TopologySplitEventData &event_data = topology_split_data_[i];
+ event_data.source_edge = edge_data & 1;
+ event_data.split_edge = (edge_data >> 1) & 1;
+ }
+ decoder_buffer->EndBitDecoding();
+ } else {
+ for (uint32_t i = 0; i < num_topology_splits; ++i) {
+ TopologySplitEventData event_data;
+ if (!decoder_buffer->Decode(&event_data.split_symbol_id))
+ return -1;
+ if (!decoder_buffer->Decode(&event_data.source_symbol_id))
+ return -1;
+ uint8_t edge_data;
+ if (!decoder_buffer->Decode(&edge_data))
+ return -1;
+ event_data.source_edge = edge_data & 1;
+ event_data.split_edge = (edge_data >> 1) & 1;
+ topology_split_data_.push_back(event_data);
+ }
+ }
}
uint32_t num_hole_events;
if (!decoder_buffer->Decode(&num_hole_events))
return -1;
- for (uint32_t i = 0; i < num_hole_events; ++i) {
- HoleEventData event_data;
- if (!decoder_buffer->Decode(&event_data))
- return -1;
- hole_event_data_.push_back(event_data);
+ if (num_hole_events > 0) {
+ if (decoder_->bitstream_version() >= DRACO_BITSTREAM_VERSION(1, 2)) {
+ // Decode hole symbol ids using delta and varint coding.
+ int last_symbol_id = 0;
+ for (uint32_t i = 0; i < num_hole_events; ++i) {
+ HoleEventData event_data;
+ uint32_t delta;
+ DecodeVarint(&delta, decoder_buffer);
+ event_data.symbol_id = delta + last_symbol_id;
+ last_symbol_id = event_data.symbol_id;
+ hole_event_data_.push_back(event_data);
+ }
+ } else {
+ for (uint32_t i = 0; i < num_hole_events; ++i) {
+ HoleEventData event_data;
+ if (!decoder_buffer->Decode(&event_data))
+ return -1;
+ hole_event_data_.push_back(event_data);
+ }
+ }
}
return decoder_buffer->decoded_size();
}
@@ -813,5 +893,6 @@ bool MeshEdgeBreakerDecoderImpl::AssignPointsToCorners() {
template class MeshEdgeBreakerDecoderImpl;
template class MeshEdgeBreakerDecoderImpl<
MeshEdgeBreakerTraversalPredictiveDecoder>;
-
+template class MeshEdgeBreakerDecoderImpl<
+ MeshEdgeBreakerTraversalValenceDecoder>;
} // namespace draco
diff --git a/compression/mesh/mesh_edgebreaker_decoder_impl.h b/compression/mesh/mesh_edgebreaker_decoder_impl.h
index 358d78e..8c4f272 100644
--- a/compression/mesh/mesh_edgebreaker_decoder_impl.h
+++ b/compression/mesh/mesh_edgebreaker_decoder_impl.h
@@ -19,6 +19,7 @@
#include
#include "compression/attributes/mesh_attribute_indices_encoding_data.h"
+#include "compression/attributes/mesh_traversal_sequencer.h"
#include "compression/mesh/mesh_edgebreaker_decoder_impl_interface.h"
#include "compression/mesh/mesh_edgebreaker_shared.h"
#include "core/decoder_buffer.h"
@@ -66,6 +67,11 @@ class MeshEdgeBreakerDecoderImpl : public MeshEdgeBreakerDecoderImplInterface {
}
private:
+ // Creates a vertex traversal sequencer for the specified |TraverserT| type.
+ template
+ std::unique_ptr CreateVertexTraversalSequencer(
+ MeshAttributeIndicesEncodingData *encoding_data);
+
// Decodes connectivty between vertices (vertex indices).
// Returns the number of vertices created by the decoder or -1 on error.
int DecodeConnectivity(int num_symbols);
diff --git a/compression/mesh/mesh_edgebreaker_encoder.cc b/compression/mesh/mesh_edgebreaker_encoder.cc
index e7b84be..ca5c9e4 100644
--- a/compression/mesh/mesh_edgebreaker_encoder.cc
+++ b/compression/mesh/mesh_edgebreaker_encoder.cc
@@ -16,6 +16,7 @@
#include "compression/mesh/mesh_edgebreaker_encoder_impl.h"
#include "compression/mesh/mesh_edgebreaker_traversal_predictive_encoder.h"
+#include "compression/mesh/mesh_edgebreaker_traversal_valence_encoder.h"
namespace draco {
@@ -28,16 +29,24 @@ bool MeshEdgeBreakerEncoder::InitializeEncoder() {
options()->IsFeatureSupported(features::kPredictiveEdgebreaker);
impl_ = nullptr;
+ // For tiny meshes it's usually better to use the basic edgebreaker as the
+ // overhead of the predictive one may turn out to be too big.
+ // TODO(ostava): For now we have a set limit for forcing the basic edgebreaker
+ // based on the number of faces, but a more complex heuristic may be used if
+ // needed.
+ const bool is_tiny_mesh = mesh()->num_faces() < 1000;
+
if (is_standard_edgebreaker_avaialable &&
- (options()->GetSpeed() >= 5 || !is_predictive_edgebreaker_avaialable)) {
+ (options()->GetSpeed() >= 5 || !is_predictive_edgebreaker_avaialable ||
+ is_tiny_mesh)) {
buffer()->Encode(static_cast(0));
impl_ = std::unique_ptr(
new MeshEdgeBreakerEncoderImpl());
} else if (is_predictive_edgebreaker_avaialable) {
- buffer()->Encode(static_cast(1));
+ buffer()->Encode(static_cast(2));
impl_ = std::unique_ptr(
new MeshEdgeBreakerEncoderImpl<
- MeshEdgeBreakerTraversalPredictiveEncoder>());
+ MeshEdgeBreakerTraversalValenceEncoder>());
}
if (!impl_)
return false;
diff --git a/compression/mesh/mesh_edgebreaker_encoder_impl.cc b/compression/mesh/mesh_edgebreaker_encoder_impl.cc
index e6c372e..031b94f 100644
--- a/compression/mesh/mesh_edgebreaker_encoder_impl.cc
+++ b/compression/mesh/mesh_edgebreaker_encoder_impl.cc
@@ -17,14 +17,15 @@
#include
#include "compression/attributes/mesh_attribute_indices_encoding_observer.h"
-#include "compression/attributes/mesh_traversal_sequencer.h"
#include "compression/attributes/sequential_attribute_encoders_controller.h"
#include "compression/mesh/mesh_edgebreaker_encoder.h"
#include "compression/mesh/mesh_edgebreaker_traversal_predictive_encoder.h"
+#include "compression/mesh/mesh_edgebreaker_traversal_valence_encoder.h"
#include "mesh/corner_table_iterators.h"
#include "mesh/corner_table_traversal_processor.h"
#include "mesh/edgebreaker_traverser.h"
#include "mesh/mesh_misc_functions.h"
+#include "mesh/prediction_degree_traverser.h"
namespace draco {
@@ -73,6 +74,30 @@ MeshEdgeBreakerEncoderImpl::GetAttributeEncodingData(
return &pos_encoding_data_;
}
+template
+template
+std::unique_ptr
+MeshEdgeBreakerEncoderImpl::CreateVertexTraversalSequencer(
+ MeshAttributeIndicesEncodingData *encoding_data) {
+ typedef typename TraverserT::TraversalObserver AttObserver;
+ typedef typename TraverserT::TraversalProcessor AttProcessor;
+
+ std::unique_ptr> traversal_sequencer(
+ new MeshTraversalSequencer(mesh_, encoding_data));
+
+ AttProcessor att_processor;
+ AttObserver att_observer(corner_table_.get(), mesh_,
+ traversal_sequencer.get(), encoding_data);
+ TraverserT att_traverser;
+
+ att_processor.ResetProcessor(corner_table_.get());
+ att_traverser.Init(std::move(att_processor), att_observer);
+
+ traversal_sequencer->SetCornerOrder(processed_connectivity_corners_);
+ traversal_sequencer->SetTraverser(att_traverser);
+ return std::move(traversal_sequencer);
+}
+
template
bool MeshEdgeBreakerEncoderImpl::GenerateAttributesEncoder(
int32_t att_id) {
@@ -90,6 +115,7 @@ bool MeshEdgeBreakerEncoderImpl::GenerateAttributesEncoder(
break;
}
}
+ MeshTraversalMethod traversal_method = MESH_TRAVERSAL_DEPTH_FIRST;
std::unique_ptr sequencer;
if (att->attribute_type() == GeometryAttribute::POSITION ||
element_type == MESH_VERTEX_ATTRIBUTE ||
@@ -99,8 +125,6 @@ bool MeshEdgeBreakerEncoderImpl::GenerateAttributesEncoder(
// mesh.
typedef CornerTableTraversalProcessor AttProcessor;
typedef MeshAttributeIndicesEncodingObserver AttObserver;
- // Traverser that is used to generate the encoding order of each attribute.
- typedef EdgeBreakerTraverser AttTraverser;
MeshAttributeIndicesEncodingData *encoding_data;
if (att->attribute_type() == GeometryAttribute::POSITION) {
@@ -110,20 +134,19 @@ bool MeshEdgeBreakerEncoderImpl::GenerateAttributesEncoder(
attribute_data_[att_data_id].is_connectivity_used = false;
}
- std::unique_ptr> traversal_sequencer(
- new MeshTraversalSequencer(mesh_, encoding_data));
-
- AttProcessor att_processor;
- AttObserver att_observer(corner_table_.get(), mesh_,
- traversal_sequencer.get(), encoding_data);
- AttTraverser att_traverser;
-
- att_processor.ResetProcessor(corner_table_.get());
- att_traverser.Init(att_processor, att_observer);
-
- traversal_sequencer->SetCornerOrder(processed_connectivity_corners_);
- traversal_sequencer->SetTraverser(att_traverser);
- sequencer = std::move(traversal_sequencer);
+ if (att->attribute_type() == GeometryAttribute::POSITION &&
+ GetEncoder()->options()->GetSpeed() == 0) {
+ // Traverser that is used to generate the encoding order of each
+ // attribute.
+ typedef PredictionDegreeTraverser AttTraverser;
+ sequencer = CreateVertexTraversalSequencer(encoding_data);
+ traversal_method = MESH_TRAVERSAL_PREDICTION_DEGREE;
+ } else {
+ // Traverser that is used to generate the encoding order of each
+ // attribute.
+ typedef EdgeBreakerTraverser AttTraverser;
+ sequencer = CreateVertexTraversalSequencer(encoding_data);
+ }
} else {
// Else use a general per-corner encoder.
typedef CornerTableTraversalProcessor
@@ -155,6 +178,12 @@ bool MeshEdgeBreakerEncoderImpl::GenerateAttributesEncoder(
if (!sequencer)
return false;
+ if (att_data_id == -1) {
+ pos_traversal_method_ = traversal_method;
+ } else {
+ attribute_data_[att_data_id].traversal_method = traversal_method;
+ }
+
std::unique_ptr att_controller(
new SequentialAttributeEncodersController(std::move(sequencer), att_id));
@@ -174,9 +203,13 @@ bool MeshEdgeBreakerEncoderImpl::
// Also encode the type of the encoder that we used.
int32_t element_type = MESH_VERTEX_ATTRIBUTE;
+ MeshTraversalMethod traversal_method;
if (att_data_id >= 0) {
const int32_t att_id = attribute_data_[att_data_id].attribute_index;
element_type = GetEncoder()->mesh()->GetAttributeElementType(att_id);
+ traversal_method = attribute_data_[att_data_id].traversal_method;
+ } else {
+ traversal_method = pos_traversal_method_;
}
if (element_type == MESH_VERTEX_ATTRIBUTE ||
(element_type == MESH_CORNER_ATTRIBUTE &&
@@ -187,6 +220,8 @@ bool MeshEdgeBreakerEncoderImpl::
// Per-corner encoder.
encoder_->buffer()->Encode(static_cast(MESH_CORNER_ATTRIBUTE));
}
+ // Encode the mesh traversal method.
+ encoder_->buffer()->Encode(static_cast(traversal_method));
return true;
}
@@ -348,24 +383,46 @@ bool MeshEdgeBreakerEncoderImpl::EncodeConnectivity() {
// Encode topology split events.
uint32_t num_events = topology_split_event_data_.size();
encoder_->buffer()->Encode(num_events);
- for (uint32_t i = 0; i < num_events; ++i) {
- // TODO(ostava): We can do a better encoding of the event data but it's not
- // really needed for now.
- const TopologySplitEventData &event_data = topology_split_event_data_[i];
- encoder_->buffer()->Encode(event_data.split_symbol_id);
- encoder_->buffer()->Encode(event_data.source_symbol_id);
- const uint8_t edge_data =
- (event_data.source_edge | (event_data.split_edge << 1));
- encoder_->buffer()->Encode(edge_data);
+ if (num_events > 0) {
+ // Encode split symbols using delta and varint coding. Split edges are
+ // encoded using direct bit coding.
+ int last_source_symbol_id = 0; // Used for delta coding.
+ for (uint32_t i = 0; i < num_events; ++i) {
+ const TopologySplitEventData &event_data = topology_split_event_data_[i];
+ // Encode source symbol id as delta from the previous source symbol id.
+ // Source symbol ids are always stored in increasing order so the delta is
+ // going to be positive.
+ EncodeVarint(
+ event_data.source_symbol_id - last_source_symbol_id,
+ encoder_->buffer());
+ // Encode split symbol id as delta from the current source symbol id.
+ // Split symbol id is always smaller than source symbol id so the below
+ // delta is going to be positive.
+ EncodeVarint(
+ event_data.source_symbol_id - event_data.split_symbol_id,
+ encoder_->buffer());
+ last_source_symbol_id = event_data.source_symbol_id;
+ }
+ encoder_->buffer()->StartBitEncoding(num_events * 2, false);
+ for (uint32_t i = 0; i < num_events; ++i) {
+ const TopologySplitEventData &event_data = topology_split_event_data_[i];
+ encoder_->buffer()->EncodeLeastSignificantBits32(
+ 2, event_data.source_edge | (event_data.split_edge << 1));
+ }
+ encoder_->buffer()->EndBitEncoding();
}
// Encode hole events data.
num_events = hole_event_data_.size();
encoder_->buffer()->Encode(num_events);
- for (uint32_t i = 0; i < num_events; ++i) {
- // TODO(ostava): We can do a better encoding of the event data but it's not
- // really needed for now.
- // This should be also made platform independent.
- encoder_->buffer()->Encode((hole_event_data_[i]));
+ if (num_events > 0) {
+ // Encode hole symbol ids using delta and varint coding. The symbol ids are
+ // always stored in increasing order so the deltas are going to be positive.
+ int last_symbol_id = 0;
+ for (uint32_t i = 0; i < num_events; ++i) {
+ EncodeVarint(hole_event_data_[i].symbol_id - last_symbol_id,
+ encoder_->buffer());
+ last_symbol_id = hole_event_data_[i].symbol_id;
+ }
}
return true;
}
@@ -734,5 +791,7 @@ bool MeshEdgeBreakerEncoderImpl<
template class MeshEdgeBreakerEncoderImpl;
template class MeshEdgeBreakerEncoderImpl<
MeshEdgeBreakerTraversalPredictiveEncoder>;
+template class MeshEdgeBreakerEncoderImpl<
+ MeshEdgeBreakerTraversalValenceEncoder>;
} // namespace draco
diff --git a/compression/mesh/mesh_edgebreaker_encoder_impl.h b/compression/mesh/mesh_edgebreaker_encoder_impl.h
index 6bd0b83..f232e1d 100644
--- a/compression/mesh/mesh_edgebreaker_encoder_impl.h
+++ b/compression/mesh/mesh_edgebreaker_encoder_impl.h
@@ -18,6 +18,8 @@
#include
#include "compression/attributes/mesh_attribute_indices_encoding_data.h"
+#include "compression/attributes/mesh_traversal_sequencer.h"
+#include "compression/config/compression_shared.h"
#include "compression/mesh/mesh_edgebreaker_encoder_impl_interface.h"
#include "compression/mesh/mesh_edgebreaker_shared.h"
#include "core/encoder_buffer.h"
@@ -32,6 +34,7 @@ template
class MeshEdgeBreakerEncoderImpl : public MeshEdgeBreakerEncoderImplInterface {
public:
MeshEdgeBreakerEncoderImpl();
+ MeshEdgeBreakerEncoderImpl(const TraversalEncoderT &traversal_encoder);
bool Init(MeshEdgeBreakerEncoder *encoder) override;
const MeshAttributeCornerTable *GetAttributeCornerTable(
@@ -46,6 +49,7 @@ class MeshEdgeBreakerEncoderImpl : public MeshEdgeBreakerEncoderImplInterface {
const CornerTable *GetCornerTable() const override {
return corner_table_.get();
}
+ bool IsFaceEncoded(FaceIndex fi) const { return visited_faces_[fi.value()]; }
MeshEdgeBreakerEncoder *GetEncoder() const override { return encoder_; }
private:
@@ -53,6 +57,11 @@ class MeshEdgeBreakerEncoderImpl : public MeshEdgeBreakerEncoderImplInterface {
// Returns false on error.
bool InitAttributeData();
+ // Creates a vertex traversal sequencer for the specified |TraverserT| type.
+ template
+ std::unique_ptr CreateVertexTraversalSequencer(
+ MeshAttributeIndicesEncodingData *encoding_data);
+
// Finds the configuration of the initial face that starts the traversal.
// Configurations are determined by location of holes around the init face
// and they are described in mesh_edgebreaker_shared.h.
@@ -128,6 +137,9 @@ class MeshEdgeBreakerEncoderImpl : public MeshEdgeBreakerEncoderImplInterface {
// Attribute data for position encoding.
MeshAttributeIndicesEncodingData pos_encoding_data_;
+ // Traversal method used for the position attribute.
+ MeshTraversalMethod pos_traversal_method_;
+
// Array storing corners in the order they were visited during the
// connectivity encoding (always storing the tip corner of each newly visited
// face).
@@ -171,6 +183,8 @@ class MeshEdgeBreakerEncoderImpl : public MeshEdgeBreakerEncoderImplInterface {
bool is_connectivity_used;
// Data about attribute encoding order.
MeshAttributeIndicesEncodingData encoding_data;
+ // Traversal method used to generate the encoding data for this attribute.
+ MeshTraversalMethod traversal_method;
};
std::vector attribute_data_;
diff --git a/compression/mesh/mesh_edgebreaker_encoder_impl_interface.h b/compression/mesh/mesh_edgebreaker_encoder_impl_interface.h
index f173a91..cf2a9e2 100644
--- a/compression/mesh/mesh_edgebreaker_encoder_impl_interface.h
+++ b/compression/mesh/mesh_edgebreaker_encoder_impl_interface.h
@@ -45,6 +45,10 @@ class MeshEdgeBreakerEncoderImplInterface {
// Returns corner table of the encoded mesh.
virtual const CornerTable *GetCornerTable() const = 0;
+
+ // Returns true if a given face has been already encoded.
+ virtual bool IsFaceEncoded(FaceIndex fi) const = 0;
+
virtual MeshEdgeBreakerEncoder *GetEncoder() const = 0;
};
diff --git a/compression/mesh/mesh_edgebreaker_encoding_test.cc b/compression/mesh/mesh_edgebreaker_encoding_test.cc
index b4b642c..164839b 100644
--- a/compression/mesh/mesh_edgebreaker_encoding_test.cc
+++ b/compression/mesh/mesh_edgebreaker_encoding_test.cc
@@ -147,4 +147,5 @@ TEST_F(MeshEdgebreakerEncodingTest, TestDecoderReuse) {
<< "Decoded meshes are not the same";
}
+
} // namespace draco
diff --git a/compression/mesh/mesh_edgebreaker_shared.h b/compression/mesh/mesh_edgebreaker_shared.h
index c7d4d67..4880974 100644
--- a/compression/mesh/mesh_edgebreaker_shared.h
+++ b/compression/mesh/mesh_edgebreaker_shared.h
@@ -50,6 +50,8 @@ namespace draco {
// \ / S \ / / E \
// *-------* *-------*
//
+// TODO(osava): Get rid of the topology bit pattern. It's important only for
+// encoding but the algorithms should use EdgeBreakerSymbol instead.
enum EdgeBreakerTopologyBitPattern {
TOPOLOGY_C = 0x0, // 0
TOPOLOGY_S = 0x1, // 1 0 0
@@ -64,11 +66,31 @@ enum EdgeBreakerTopologyBitPattern {
TOPOLOGY_INVALID
};
+enum EdgeBreakerSymbol {
+ EDGEBREAKER_SYMBOL_C = 0,
+ EDGEBREAKER_SYMBOL_S,
+ EDGEBREAKER_SYMBOL_L,
+ EDGEBREAKER_SYMBOL_R,
+ EDGEBREAKER_SYMBOL_E,
+ EDGEBREAKER_SYMBOL_INVALID
+};
+
// Bit-length of symbols in the EdgeBreakerTopologyBitPattern stored as a
// look up table for faster indexing.
constexpr int32_t edge_breaker_topology_bit_pattern_length[] = {1, 3, 0, 3,
0, 3, 0, 3};
+// Zero-indexed symbol id for each of topology pattern.
+constexpr EdgeBreakerSymbol edge_breaker_topology_to_symbol_id[] = {
+ EDGEBREAKER_SYMBOL_C, EDGEBREAKER_SYMBOL_S,
+ EDGEBREAKER_SYMBOL_INVALID, EDGEBREAKER_SYMBOL_L,
+ EDGEBREAKER_SYMBOL_INVALID, EDGEBREAKER_SYMBOL_R,
+ EDGEBREAKER_SYMBOL_INVALID, EDGEBREAKER_SYMBOL_E};
+
+// Reverse mapping between symbol id and topology pattern symbol.
+constexpr EdgeBreakerTopologyBitPattern edge_breaker_symbol_to_topology_id[] = {
+ TOPOLOGY_C, TOPOLOGY_S, TOPOLOGY_L, TOPOLOGY_R, TOPOLOGY_E};
+
// Types of edges used during mesh traversal relative to the tip vertex of a
// visited triangle.
enum EdgeFaceName : uint8_t { LEFT_FACE_EDGE = 0, RIGHT_FACE_EDGE = 1 };
@@ -100,6 +122,11 @@ struct HoleEventData {
explicit HoleEventData(int32_t sym_id) : symbol_id(sym_id) {}
};
+// List of supported modes for valence based edgebreaker coding.
+enum EdgeBreakerValenceCodingMode {
+ EDGEBREAKER_VALENCE_MODE_2_7 = 0, // Use contexts for valences in range 2-7.
+};
+
} // namespace draco
#endif // DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_SHARED_H_
diff --git a/compression/mesh/mesh_edgebreaker_traversal_decoder.h b/compression/mesh/mesh_edgebreaker_traversal_decoder.h
index 7a486e3..20f6725 100644
--- a/compression/mesh/mesh_edgebreaker_traversal_decoder.h
+++ b/compression/mesh/mesh_edgebreaker_traversal_decoder.h
@@ -18,7 +18,7 @@
#include "compression/mesh/mesh_edgebreaker_decoder.h"
#include "compression/mesh/mesh_edgebreaker_decoder_impl_interface.h"
#include "compression/mesh/mesh_edgebreaker_shared.h"
-#include "core/rans_coding.h"
+#include "core/rans_bit_decoder.h"
namespace draco {
diff --git a/compression/mesh/mesh_edgebreaker_traversal_encoder.h b/compression/mesh/mesh_edgebreaker_traversal_encoder.h
index 3bc560e..ae12e50 100644
--- a/compression/mesh/mesh_edgebreaker_traversal_encoder.h
+++ b/compression/mesh/mesh_edgebreaker_traversal_encoder.h
@@ -18,7 +18,7 @@
#include "compression/mesh/mesh_edgebreaker_encoder.h"
#include "compression/mesh/mesh_edgebreaker_encoder_impl_interface.h"
#include "core/macros.h"
-#include "core/rans_coding.h"
+#include "core/rans_bit_encoder.h"
namespace draco {
@@ -32,8 +32,9 @@ class MeshEdgeBreakerTraversalEncoder {
public:
MeshEdgeBreakerTraversalEncoder()
: encoder_impl_(nullptr), attribute_connectivity_encoders_(nullptr) {}
- void Init(MeshEdgeBreakerEncoderImplInterface *encoder) {
+ bool Init(MeshEdgeBreakerEncoderImplInterface *encoder) {
encoder_impl_ = encoder;
+ return true;
}
// Called before the traversal encoding is started.
@@ -105,6 +106,9 @@ class MeshEdgeBreakerTraversalEncoder {
protected:
EncoderBuffer *GetOutputBuffer() { return &traversal_buffer_; }
+ const MeshEdgeBreakerEncoderImplInterface *encoder_impl() const {
+ return encoder_impl_;
+ }
private:
// Buffers for storing encoded data.
diff --git a/compression/mesh/mesh_edgebreaker_traversal_predictive_decoder.h b/compression/mesh/mesh_edgebreaker_traversal_predictive_decoder.h
index 53474f3..7555fec 100644
--- a/compression/mesh/mesh_edgebreaker_traversal_predictive_decoder.h
+++ b/compression/mesh/mesh_edgebreaker_traversal_predictive_decoder.h
@@ -41,7 +41,7 @@ class MeshEdgeBreakerTraversalPredictiveDecoder
if (!MeshEdgeBreakerTraversalDecoder::Start(out_buffer))
return false;
int32_t num_split_symbols;
- if (!out_buffer->Decode(&num_split_symbols))
+ if (!out_buffer->Decode(&num_split_symbols) || num_split_symbols < 0)
return false;
// Add one vertex for each split symbol.
num_vertices_ += num_split_symbols;
diff --git a/compression/mesh/mesh_edgebreaker_traversal_predictive_encoder.h b/compression/mesh/mesh_edgebreaker_traversal_predictive_encoder.h
index 3a19faf..508ca9b 100644
--- a/compression/mesh/mesh_edgebreaker_traversal_predictive_encoder.h
+++ b/compression/mesh/mesh_edgebreaker_traversal_predictive_encoder.h
@@ -35,14 +35,16 @@ class MeshEdgeBreakerTraversalPredictiveEncoder
last_corner_(kInvalidCornerIndex),
num_symbols_(0) {}
- void Init(MeshEdgeBreakerEncoderImplInterface *encoder) {
- MeshEdgeBreakerTraversalEncoder::Init(encoder);
+ bool Init(MeshEdgeBreakerEncoderImplInterface *encoder) {
+ if (!MeshEdgeBreakerTraversalEncoder::Init(encoder))
+ return false;
corner_table_ = encoder->GetCornerTable();
// Initialize valences of all vertices.
vertex_valences_.resize(corner_table_->num_vertices());
for (uint32_t i = 0; i < vertex_valences_.size(); ++i) {
vertex_valences_[i] = corner_table_->Valence(VertexIndex(i));
}
+ return true;
}
inline void NewCornerReached(CornerIndex corner) { last_corner_ = corner; }
diff --git a/compression/mesh/mesh_edgebreaker_traversal_valence_decoder.h b/compression/mesh/mesh_edgebreaker_traversal_valence_decoder.h
new file mode 100644
index 0000000..201e22e
--- /dev/null
+++ b/compression/mesh/mesh_edgebreaker_traversal_valence_decoder.h
@@ -0,0 +1,163 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_TRAVERSAL_VALENCE_DECODER_H_
+#define DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_TRAVERSAL_VALENCE_DECODER_H_
+
+#include "compression/mesh/mesh_edgebreaker_traversal_decoder.h"
+#include "core/symbol_decoding.h"
+#include "core/varint_decoding.h"
+
+namespace draco {
+
+// Decoder for traversal encoded with MeshEdgeBreakerTraversalValenceEncoder.
+// The decoder maintains valences of the decoded portion of the traversed mesh
+// and it uses them to select entropy context used for decoding of the actual
+// symbols.
+class MeshEdgeBreakerTraversalValenceDecoder
+ : public MeshEdgeBreakerTraversalDecoder {
+ public:
+ MeshEdgeBreakerTraversalValenceDecoder()
+ : corner_table_(nullptr),
+ num_vertices_(0),
+ last_symbol_(-1),
+ active_context_(-1),
+ min_valence_(2),
+ max_valence_(7) {}
+ void Init(MeshEdgeBreakerDecoderImplInterface *decoder) {
+ MeshEdgeBreakerTraversalDecoder::Init(decoder);
+ corner_table_ = decoder->GetCornerTable();
+ }
+ void SetNumEncodedVertices(int num_vertices) { num_vertices_ = num_vertices; }
+
+ bool Start(DecoderBuffer *out_buffer) {
+ if (!MeshEdgeBreakerTraversalDecoder::Start(out_buffer))
+ return false;
+ int32_t num_split_symbols;
+ if (!out_buffer->Decode(&num_split_symbols))
+ return false;
+ // Add one extra vertex for each split symbol.
+ num_vertices_ += num_split_symbols;
+ // Set the valences of all initial vertices to 0.
+ vertex_valences_.resize(num_vertices_, 0);
+
+ int8_t mode;
+ if (!out_buffer->Decode(&mode))
+ return false;
+ if (mode == EDGEBREAKER_VALENCE_MODE_2_7) {
+ min_valence_ = 2;
+ max_valence_ = 7;
+ } else {
+ // Unsupported mode.
+ return false;
+ }
+
+ const int num_unique_valences = max_valence_ - min_valence_ + 1;
+
+ // Decode all symbols for all contexts.
+ context_symbols_.resize(num_unique_valences);
+ context_counters_.resize(context_symbols_.size());
+ for (int i = 0; i < context_symbols_.size(); ++i) {
+ uint32_t num_symbols;
+ DecodeVarint(&num_symbols, out_buffer);
+ if (num_symbols > 0) {
+ context_symbols_[i].resize(num_symbols);
+ DecodeSymbols(num_symbols, 1, out_buffer, context_symbols_[i].data());
+ // All symbols are going to be processed from the back.
+ context_counters_[i] = num_symbols;
+ }
+ }
+ return true;
+ }
+
+ inline uint32_t DecodeSymbol() {
+ // First check if we have a valid context.
+ if (active_context_ != -1) {
+ const int symbol_id =
+ context_symbols_[active_context_]
+ [--context_counters_[active_context_]];
+ last_symbol_ = edge_breaker_symbol_to_topology_id[symbol_id];
+ } else {
+ // We don't have a predicted symbol or the symbol was mis-predicted.
+ // Decode it directly.
+ last_symbol_ = MeshEdgeBreakerTraversalDecoder::DecodeSymbol();
+ }
+ return last_symbol_;
+ }
+
+ inline void NewActiveCornerReached(CornerIndex corner) {
+ const CornerIndex next = corner_table_->Next(corner);
+ const CornerIndex prev = corner_table_->Previous(corner);
+ // Update valences.
+ switch (last_symbol_) {
+ case TOPOLOGY_C:
+ case TOPOLOGY_S:
+ vertex_valences_[corner_table_->Vertex(next)] += 1;
+ vertex_valences_[corner_table_->Vertex(prev)] += 1;
+ break;
+ case TOPOLOGY_R:
+ vertex_valences_[corner_table_->Vertex(corner)] += 1;
+ vertex_valences_[corner_table_->Vertex(next)] += 1;
+ vertex_valences_[corner_table_->Vertex(prev)] += 2;
+ break;
+ case TOPOLOGY_L:
+ vertex_valences_[corner_table_->Vertex(corner)] += 1;
+ vertex_valences_[corner_table_->Vertex(next)] += 2;
+ vertex_valences_[corner_table_->Vertex(prev)] += 1;
+ break;
+ case TOPOLOGY_E:
+ vertex_valences_[corner_table_->Vertex(corner)] += 2;
+ vertex_valences_[corner_table_->Vertex(next)] += 2;
+ vertex_valences_[corner_table_->Vertex(prev)] += 2;
+ break;
+ default:
+ break;
+ }
+ // Compute the new context that is going to be used to decode the next
+ // symbol.
+ const int active_valence = vertex_valences_[corner_table_->Vertex(next)];
+ int clamped_valence;
+ if (active_valence < min_valence_) {
+ clamped_valence = min_valence_;
+ } else if (active_valence > max_valence_) {
+ clamped_valence = max_valence_;
+ } else {
+ clamped_valence = active_valence;
+ }
+
+ active_context_ = (clamped_valence - min_valence_);
+ }
+
+ inline void MergeVertices(VertexIndex dest, VertexIndex source) {
+ // Update valences on the merged vertices.
+ vertex_valences_[dest] += vertex_valences_[source];
+ }
+
+ private:
+ const CornerTable *corner_table_;
+ int num_vertices_;
+ IndexTypeVector vertex_valences_;
+ int last_symbol_;
+ int active_context_;
+
+ int min_valence_;
+ int max_valence_;
+ std::vector> context_symbols_;
+ // Points to the active symbol in each context.
+ std::vector context_counters_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_TRAVERSAL_VALENCE_DECODER_H_
diff --git a/compression/mesh/mesh_edgebreaker_traversal_valence_encoder.h b/compression/mesh/mesh_edgebreaker_traversal_valence_encoder.h
new file mode 100644
index 0000000..f4b0278
--- /dev/null
+++ b/compression/mesh/mesh_edgebreaker_traversal_valence_encoder.h
@@ -0,0 +1,240 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#ifndef DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_TRAVERSAL_VALENCE_ENCODER_H_
+#define DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_TRAVERSAL_VALENCE_ENCODER_H_
+
+#include "compression/mesh/mesh_edgebreaker_traversal_encoder.h"
+#include "core/symbol_encoding.h"
+#include "core/varint_encoding.h"
+
+namespace draco {
+
+// Predictive encoder for the Edgebreaker symbols based on valences of the
+// previously encoded vertices, following the method described in: Szymczak'02,
+// "Optimized Edgebreaker Encoding for Large and Regular Triangle Meshes". Each
+// valence is used to specify a different entropy context for encoding of the
+// symbols.
+// Encoder can operate in various predefined modes that can be used to select
+// the way in which the entropy contexts are computed (e.g. using different
+// clamping for valences, or even using different inputs to compute the
+// contexts), see EdgeBreakerValenceCodingMode in mesh_edgebreaker_shared.h for
+// a list of supported modes.
+class MeshEdgeBreakerTraversalValenceEncoder
+ : public MeshEdgeBreakerTraversalEncoder {
+ public:
+ MeshEdgeBreakerTraversalValenceEncoder()
+ : corner_table_(nullptr),
+ prev_symbol_(-1),
+ num_split_symbols_(0),
+ last_corner_(kInvalidCornerIndex),
+ num_symbols_(0),
+ min_valence_(2),
+ max_valence_(7),
+ mode_(EDGEBREAKER_VALENCE_MODE_2_7) {}
+
+ bool Init(MeshEdgeBreakerEncoderImplInterface *encoder) {
+ if (!MeshEdgeBreakerTraversalEncoder::Init(encoder))
+ return false;
+ if (mode_ == EDGEBREAKER_VALENCE_MODE_2_7) {
+ min_valence_ = 2;
+ max_valence_ = 7;
+ } else {
+ return false; // Unsupported mode.
+ }
+ corner_table_ = encoder->GetCornerTable();
+
+ // Initialize valences of all vertices.
+ vertex_valences_.resize(corner_table_->num_vertices());
+ for (VertexIndex i(0); i < vertex_valences_.size(); ++i) {
+ vertex_valences_[i] = corner_table_->Valence(VertexIndex(i));
+ }
+
+ // Replicate the corner to vertex map from the corner table. We need to do
+ // this because the map may get updated during encoding because we add new
+ // vertices when we encouter split symbols.
+ corner_to_vertex_map_.resize(corner_table_->num_corners());
+ for (CornerIndex i(0); i < corner_table_->num_corners(); ++i) {
+ corner_to_vertex_map_[i] = corner_table_->Vertex(i);
+ }
+ const int32_t num_unique_valences = max_valence_ - min_valence_ + 1;
+
+ context_symbols_.resize(num_unique_valences);
+ return true;
+ }
+
+ inline void NewCornerReached(CornerIndex corner) { last_corner_ = corner; }
+
+ inline void EncodeSymbol(EdgeBreakerTopologyBitPattern symbol) {
+ ++num_symbols_;
+ // Update valences on the mesh and compute the context that is going to be
+ // used to encode the processed symbol.
+ // Note that the valences are computed for the so far unencoded part of the
+ // mesh (i.e. the decoding is reverse). Adding a new symbol either reduces
+ // valences on the vertices or leaves the valence unchanged.
+
+ const CornerIndex next = corner_table_->Next(last_corner_);
+ const CornerIndex prev = corner_table_->Previous(last_corner_);
+
+ // Get valence on the tip corner of the active edge (outgoing edge that is
+ // going to be used in reverse decoding of the connectivity to predict the
+ // next symbol).
+ const int active_valence = vertex_valences_[corner_to_vertex_map_[next]];
+ switch (symbol) {
+ case TOPOLOGY_C:
+ // Compute prediction.
+ FALLTHROUGH_INTENDED;
+ case TOPOLOGY_S:
+ // Update velences.
+ vertex_valences_[corner_to_vertex_map_[next]] -= 1;
+ vertex_valences_[corner_to_vertex_map_[prev]] -= 1;
+ if (symbol == TOPOLOGY_S) {
+ // Whenever we reach a split symbol, we need to split the vertex into
+ // two and attach all corners on the left and right sides of the split
+ // vertex to the respective vertices (see image below). This is
+ // necessary since the decoder works in the reverse order and it
+ // merges the two vertices only after the split symbol is processed.
+ //
+ // * -----
+ // / \--------
+ // / \--------
+ // / \-------
+ // *-------v-------*
+ // \ /c\ /
+ // \ / \ /
+ // \ /n S p\ /
+ // *.......*
+ //
+
+ // Count the number of faces on the left side of the split vertex and
+ // update the valence on the "left vertex".
+ int num_left_faces = 0;
+ CornerIndex act_c = corner_table_->Opposite(prev);
+ while (act_c >= 0) {
+ if (encoder_impl()->IsFaceEncoded(corner_table_->Face(act_c)))
+ break; // Stop when we reach the first visited face.
+ ++num_left_faces;
+ act_c = corner_table_->Opposite(corner_table_->Next(act_c));
+ }
+ vertex_valences_[corner_to_vertex_map_[last_corner_]] =
+ num_left_faces + 1;
+
+ // Create a new vertex for the right side and count the number of
+ // faces that should be attached to this vertex.
+ const int new_vert_id = vertex_valences_.size();
+ int num_right_faces = 0;
+
+ act_c = corner_table_->Opposite(next);
+ while (act_c >= 0) {
+ if (encoder_impl()->IsFaceEncoded(corner_table_->Face(act_c)))
+ break; // Stop when we reach the first visited face.
+ ++num_right_faces;
+ // Map corners on the right side to the newly created vertex.
+ corner_to_vertex_map_[corner_table_->Next(act_c)] = new_vert_id;
+ act_c = corner_table_->Opposite(corner_table_->Previous(act_c));
+ }
+ vertex_valences_.push_back(num_right_faces + 1);
+
+ ++num_split_symbols_;
+ }
+ break;
+ case TOPOLOGY_R:
+ // Update valences.
+ vertex_valences_[corner_to_vertex_map_[last_corner_]] -= 1;
+ vertex_valences_[corner_to_vertex_map_[next]] -= 1;
+ vertex_valences_[corner_to_vertex_map_[prev]] -= 2;
+ break;
+ case TOPOLOGY_L:
+
+ vertex_valences_[corner_to_vertex_map_[last_corner_]] -= 1;
+ vertex_valences_[corner_to_vertex_map_[next]] -= 2;
+ vertex_valences_[corner_to_vertex_map_[prev]] -= 1;
+ break;
+ case TOPOLOGY_E:
+ vertex_valences_[corner_to_vertex_map_[last_corner_]] -= 2;
+ vertex_valences_[corner_to_vertex_map_[next]] -= 2;
+ vertex_valences_[corner_to_vertex_map_[prev]] -= 2;
+ break;
+ default:
+ break;
+ }
+
+ if (prev_symbol_ != -1) {
+ int clamped_valence;
+ if (active_valence < min_valence_) {
+ clamped_valence = min_valence_;
+ } else if (active_valence > max_valence_) {
+ clamped_valence = max_valence_;
+ } else {
+ clamped_valence = active_valence;
+ }
+
+ const int context = clamped_valence - min_valence_;
+ context_symbols_[context].push_back(
+ edge_breaker_topology_to_symbol_id[prev_symbol_]);
+ }
+
+ prev_symbol_ = symbol;
+ }
+
+ void Done() {
+ // We still need to store the last encoded symbol.
+ if (prev_symbol_ != -1) {
+ MeshEdgeBreakerTraversalEncoder::EncodeSymbol(
+ static_cast(prev_symbol_));
+ }
+ // Store the init face configurations and the explicitly encoded symbols.
+ MeshEdgeBreakerTraversalEncoder::Done();
+ // Encode the number of split symbols (needed to set the correct number of
+ // vertices on the decoder side).
+ GetOutputBuffer()->Encode(num_split_symbols_);
+ // Encode the valance encoder mode.
+ GetOutputBuffer()->Encode(static_cast(mode_));
+ // Store the contexts.
+ for (int i = 0; i < context_symbols_.size(); ++i) {
+ EncodeVarint(context_symbols_[i].size(), GetOutputBuffer());
+ if (context_symbols_[i].size() > 0) {
+ EncodeSymbols(context_symbols_[i].data(), context_symbols_[i].size(), 1,
+ GetOutputBuffer());
+ }
+ }
+ }
+
+ int NumEncodedSymbols() const { return num_symbols_; }
+
+ private:
+ const CornerTable *corner_table_;
+ // Explicit map between corners and vertices. We cannot use the one stored
+ // in the |corner_table_| because we may need to add additional vertices to
+ // handle split symbols.
+ IndexTypeVector corner_to_vertex_map_;
+ IndexTypeVector vertex_valences_;
+ // Previously encoded symbol.
+ int32_t prev_symbol_;
+ // The total number of encoded split symbols.
+ int32_t num_split_symbols_;
+ CornerIndex last_corner_;
+ // Explicitly count the number of encoded symbols.
+ int num_symbols_;
+
+ int min_valence_;
+ int max_valence_;
+ EdgeBreakerValenceCodingMode mode_;
+
+ std::vector> context_symbols_;
+};
+
+} // namespace draco
+
+#endif // DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_TRAVERSAL_VALENCE_ENCODER_H_
diff --git a/compression/mesh/mesh_encoder.h b/compression/mesh/mesh_encoder.h
index 42a5b18..90f4636 100644
--- a/compression/mesh/mesh_encoder.h
+++ b/compression/mesh/mesh_encoder.h
@@ -61,17 +61,6 @@ class MeshEncoder : public PointCloudEncoder {
// Needs to be implemented by the derived classes.
virtual bool EncodeConnectivity() = 0;
- // TODO(ostava): Prediction schemes need refactoring.
- /*
- // This method should be overriden by derived class to perform custom
- // initialization of various prediction schemes.
- virtual bool InitPredictionSchemeInternal(
- const MeshAttributeEncoder *att_encoder,
- PredictionSchemeInterface *scheme) {
- return true;
- }
- */
-
void set_mesh(const Mesh *mesh) { mesh_ = mesh; }
private:
diff --git a/compression/mesh/mesh_encoder_test.cc b/compression/mesh/mesh_encoder_test.cc
index a171ff4..7921948 100644
--- a/compression/mesh/mesh_encoder_test.cc
+++ b/compression/mesh/mesh_encoder_test.cc
@@ -15,6 +15,7 @@
#include "compression/mesh/mesh_encoder.h"
#include "compression/encode.h"
+#include "core/decoder_buffer.h"
#include "core/draco_test_base.h"
#include "core/draco_test_utils.h"
#include "io/obj_decoder.h"
@@ -62,16 +63,23 @@ TEST_P(MeshEncoderTest, EncodeGoldenMesh) {
std::string golden_file_name = file_name;
golden_file_name += '.';
golden_file_name += GetParam();
- golden_file_name += ".out";
+ golden_file_name += ".0.10.0.drc";
const std::unique_ptr mesh(DecodeObj(file_name));
ASSERT_NE(mesh, nullptr) << "Failed to load test model " << file_name;
EncoderOptions options = CreateDefaultEncoderOptions();
+ SetEncodingMethod(&options, method);
EncoderBuffer buffer;
ASSERT_TRUE(EncodeMeshToBuffer(*mesh.get(), options, &buffer))
<< "Failed encoding test mesh " << file_name << " with method "
<< GetParam();
-
+ // Check that the encoded mesh was really encoded with the selected method.
+ DecoderBuffer decoder_buffer;
+ decoder_buffer.Init(buffer.data(), buffer.size());
+ decoder_buffer.Advance(8); // Skip the header to the encoding method id.
+ uint8_t encoded_method;
+ decoder_buffer.Decode(&encoded_method);
+ ASSERT_EQ(encoded_method, method);
if (!FLAGS_update_golden_files) {
EXPECT_TRUE(
CompareGoldenFile(golden_file_name, buffer.data(), buffer.size()))
diff --git a/compression/mesh/mesh_sequential_decoder.cc b/compression/mesh/mesh_sequential_decoder.cc
index 8bea551..79911ba 100644
--- a/compression/mesh/mesh_sequential_decoder.cc
+++ b/compression/mesh/mesh_sequential_decoder.cc
@@ -24,7 +24,7 @@ MeshSequentialDecoder::MeshSequentialDecoder() {}
bool MeshSequentialDecoder::DecodeConnectivity() {
int32_t num_faces;
- if (!buffer()->Decode(&num_faces))
+ if (!buffer()->Decode(&num_faces) || num_faces < 0)
return false;
int32_t num_points;
if (!buffer()->Decode(&num_points))
diff --git a/compression/point_cloud/algorithms/float_points_tree_decoder.cc b/compression/point_cloud/algorithms/float_points_tree_decoder.cc
index 8797201..58cabb5 100644
--- a/compression/point_cloud/algorithms/float_points_tree_decoder.cc
+++ b/compression/point_cloud/algorithms/float_points_tree_decoder.cc
@@ -111,7 +111,8 @@ bool FloatPointsTreeDecoder::DecodePointCloudKdTreeInternal(
}
}
- DCHECK_EQ(true, qpoints->size() == num_points_);
+ if (qpoints->size() != num_points_)
+ return false;
return true;
}
diff --git a/compression/point_cloud/algorithms/float_points_tree_decoder.h b/compression/point_cloud/algorithms/float_points_tree_decoder.h
index 44a22dd..1c500b4 100644
--- a/compression/point_cloud/algorithms/float_points_tree_decoder.h
+++ b/compression/point_cloud/algorithms/float_points_tree_decoder.h
@@ -49,7 +49,13 @@ class FloatPointsTreeDecoder {
float range() const { return qinfo_.range; }
uint32_t num_points() const { return num_points_; }
uint32_t version() const { return version_; }
- std::string identification_string() const { return "FloatPointsTreeDecoder"; }
+ std::string identification_string() const {
+ if (method_ == KDTREE) {
+ return "FloatPointsTreeDecoder: IntegerPointsKDTreeDecoder";
+ } else {
+ return "FloatPointsTreeDecoder: Unsupported Method";
+ }
+ }
private:
bool DecodePointCloudKdTreeInternal(DecoderBuffer *buffer,
@@ -57,6 +63,7 @@ class FloatPointsTreeDecoder {
static const uint32_t version_ = 3;
QuantizationInfo qinfo_;
+ PointCloudCompressionMethod method_;
uint32_t num_points_;
uint32_t compression_level_;
};
@@ -75,10 +82,9 @@ bool FloatPointsTreeDecoder::DecodePointCloud(DecoderBuffer *buffer,
if (!buffer->Decode(&method_number))
return false;
- const PointCloudCompressionMethod method =
- static_cast(method_number);
+ method_ = static_cast(method_number);
- if (method == KDTREE) {
+ if (method_ == KDTREE) {
if (!DecodePointCloudKdTreeInternal(buffer, &qpoints))
return false;
} else { // Unsupported method.
diff --git a/compression/point_cloud/algorithms/float_points_tree_encoder.h b/compression/point_cloud/algorithms/float_points_tree_encoder.h
index feb6f6b..3767b9c 100644
--- a/compression/point_cloud/algorithms/float_points_tree_encoder.h
+++ b/compression/point_cloud/algorithms/float_points_tree_encoder.h
@@ -63,7 +63,13 @@ class FloatPointsTreeEncoder {
uint32_t &compression_level() { return compression_level_; }
float range() const { return qinfo_.range; }
uint32_t num_points() const { return num_points_; }
- std::string identification_string() const { return "FloatPointsTreeEncoder"; }
+ std::string identification_string() const {
+ if (method_ == KDTREE) {
+ return "FloatPointsTreeEncoder: IntegerPointsKDTreeEncoder";
+ } else {
+ return "FloatPointsTreeEncoder: Unsupported Method";
+ }
+ }
private:
void Clear() { buffer_.Clear(); }
diff --git a/compression/point_cloud/algorithms/integer_points_kd_tree_decoder.h b/compression/point_cloud/algorithms/integer_points_kd_tree_decoder.h
index 15a4e2f..41c4541 100644
--- a/compression/point_cloud/algorithms/integer_points_kd_tree_decoder.h
+++ b/compression/point_cloud/algorithms/integer_points_kd_tree_decoder.h
@@ -22,13 +22,13 @@
#include "compression/point_cloud/algorithms/point_cloud_types.h"
#include "compression/point_cloud/algorithms/queuing_policy.h"
-#include "core/adaptive_rans_coding.h"
+#include "core/adaptive_rans_bit_decoder.h"
#include "core/bit_utils.h"
#include "core/decoder_buffer.h"
-#include "core/direct_bit_coding.h"
-#include "core/folded_bit32_coding.h"
+#include "core/direct_bit_decoder.h"
+#include "core/folded_integer_bit_decoder.h"
#include "core/math_utils.h"
-#include "core/rans_coding.h"
+#include "core/rans_bit_decoder.h"
namespace draco {
diff --git a/compression/point_cloud/algorithms/integer_points_kd_tree_encoder.h b/compression/point_cloud/algorithms/integer_points_kd_tree_encoder.h
index d89637d..b28eec7 100644
--- a/compression/point_cloud/algorithms/integer_points_kd_tree_encoder.h
+++ b/compression/point_cloud/algorithms/integer_points_kd_tree_encoder.h
@@ -22,13 +22,13 @@
#include "compression/point_cloud/algorithms/point_cloud_types.h"
#include "compression/point_cloud/algorithms/queuing_policy.h"
-#include "core/adaptive_rans_coding.h"
+#include "core/adaptive_rans_bit_encoder.h"
#include "core/bit_utils.h"
-#include "core/direct_bit_coding.h"
+#include "core/direct_bit_encoder.h"
#include "core/encoder_buffer.h"
-#include "core/folded_bit32_coding.h"
+#include "core/folded_integer_bit_encoder.h"
#include "core/math_utils.h"
-#include "core/rans_coding.h"
+#include "core/rans_bit_encoder.h"
namespace draco {
diff --git a/compression/point_cloud/algorithms/point_cloud_compression_method.h b/compression/point_cloud/algorithms/point_cloud_compression_method.h
index 9ee9597..18307b5 100644
--- a/compression/point_cloud/algorithms/point_cloud_compression_method.h
+++ b/compression/point_cloud/algorithms/point_cloud_compression_method.h
@@ -24,8 +24,9 @@ enum PointCloudCompressionMethod {
// Devillers to d dimensions.
// "Progressive lossless compression of arbitrary simplicial complexes"
// http://dx.doi.org/10.1145/566570.566591
- KDTREE,
- RESERVED_POINT_CLOUD_METHOD_1, // Reserved for internal use.
+ KDTREE = 1,
+ RESERVED_POINT_CLOUD_METHOD_2 = 2, // Reserved for internal use.
+ RESERVED_POINT_CLOUD_METHOD_3 = 0, // Reserved for internal use.
};
} // namespace draco
diff --git a/compression/point_cloud/point_cloud_decoder.cc b/compression/point_cloud/point_cloud_decoder.cc
index 2df62d2..f34f7b6 100644
--- a/compression/point_cloud/point_cloud_decoder.cc
+++ b/compression/point_cloud/point_cloud_decoder.cc
@@ -17,12 +17,54 @@
namespace draco {
PointCloudDecoder::PointCloudDecoder()
- : point_cloud_(nullptr), buffer_(nullptr) {}
+ : point_cloud_(nullptr),
+ buffer_(nullptr),
+ version_major_(0),
+ version_minor_(0) {}
+
+bool PointCloudDecoder::DecodeHeader(DecoderBuffer *buffer,
+ DracoHeader *out_header) {
+ // TODO(ostava): Add error codes for better error reporting.
+ if (!buffer->Decode(out_header->draco_string, 5))
+ return false;
+ if (memcmp(out_header->draco_string, "DRACO", 5) != 0)
+ return false; // Wrong file format?
+ if (!buffer->Decode(&(out_header->version_major)))
+ return false;
+ if (!buffer->Decode(&(out_header->version_minor)))
+ return false;
+ if (!buffer->Decode(&(out_header->encoder_type)))
+ return false;
+ if (!buffer->Decode(&(out_header->encoder_method)))
+ return false;
+ if (!buffer->Decode(&(out_header->flags)))
+ return false;
+ return true;
+}
bool PointCloudDecoder::Decode(DecoderBuffer *in_buffer,
PointCloud *out_point_cloud) {
buffer_ = in_buffer;
point_cloud_ = out_point_cloud;
+ DracoHeader header;
+ if (!DecodeHeader(buffer_, &header))
+ return false;
+ // Sanity check that we are really using the right decoder (mostly for cases
+ // where the Decode method was called manually outside of our main API.
+ if (header.encoder_type != GetGeometryType())
+ return false;
+ // TODO(ostava): We should check the method as well, but currently decoders
+ // don't expose the decoding method id.
+ version_major_ = header.version_major;
+ version_minor_ = header.version_minor;
+
+ // Check for version compatibility.
+ if (version_major_ < 1 || version_major_ > kDracoBitstreamVersionMajor)
+ return false;
+ if (version_major_ == kDracoBitstreamVersionMajor &&
+ version_minor_ > kDracoBitstreamVersionMinor)
+ return false;
+
if (!InitializeDecoder())
return false;
if (!DecodeGeometryData())
diff --git a/compression/point_cloud/point_cloud_decoder.h b/compression/point_cloud/point_cloud_decoder.h
index f214244..5fe50e4 100644
--- a/compression/point_cloud/point_cloud_decoder.h
+++ b/compression/point_cloud/point_cloud_decoder.h
@@ -30,6 +30,10 @@ class PointCloudDecoder {
virtual EncodedGeometryType GetGeometryType() const { return POINT_CLOUD; }
+ // Decodes a Draco header int othe provided |out_header|.
+ // Returns false on error.
+ static bool DecodeHeader(DecoderBuffer *buffer, DracoHeader *out_header);
+
// The main entry point for point cloud decoding.
bool Decode(DecoderBuffer *in_buffer, PointCloud *out_point_cloud);
@@ -39,6 +43,11 @@ class PointCloudDecoder {
attributes_decoders_.resize(att_decoder_id + 1);
attributes_decoders_[att_decoder_id] = std::move(decoder);
}
+
+ uint16_t bitstream_version() const {
+ return DRACO_BITSTREAM_VERSION(version_major_, version_minor_);
+ }
+
const AttributesDecoder *attributes_decoder(int dec_id) {
return attributes_decoders_[dec_id].get();
}
@@ -74,6 +83,10 @@ class PointCloudDecoder {
// Input buffer holding the encoded data.
DecoderBuffer *buffer_;
+
+ // Bit-stream version of the encoder that encoded the input data.
+ uint8_t version_major_;
+ uint8_t version_minor_;
};
} // namespace draco
diff --git a/compression/point_cloud/point_cloud_encoder.cc b/compression/point_cloud/point_cloud_encoder.cc
index cd382a3..ed1c84c 100644
--- a/compression/point_cloud/point_cloud_encoder.cc
+++ b/compression/point_cloud/point_cloud_encoder.cc
@@ -35,6 +35,8 @@ bool PointCloudEncoder::Encode(const EncoderOptions &options,
if (!point_cloud_)
return false;
+ if (!EncodeHeader())
+ return false;
if (!InitializeEncoder())
return false;
if (!EncodeEncoderData())
@@ -46,6 +48,25 @@ bool PointCloudEncoder::Encode(const EncoderOptions &options,
return true;
}
+bool PointCloudEncoder::EncodeHeader() {
+ // Encode the header according to our v1 specification.
+ // Five bytes for Draco format.
+ buffer_->Encode("DRACO", 5);
+ // Version (major, minor).
+ const uint8_t version_major = kDracoBitstreamVersionMajor;
+ const uint8_t version_minor = kDracoBitstreamVersionMinor;
+ buffer_->Encode(version_major);
+ buffer_->Encode(version_minor);
+ // Type of the encoder (point cloud, mesh, ...).
+ const uint8_t encoder_type = GetGeometryType();
+ buffer_->Encode(encoder_type);
+ // Unique identifier for the selected encoding method (edgebreaker, etc...).
+ buffer_->Encode(GetEncodingMethod());
+ // Reserved for flags.
+ buffer_->Encode(static_cast(0));
+ return true;
+}
+
bool PointCloudEncoder::EncodePointAttributes() {
if (!GenerateAttributesEncoders())
return false;
diff --git a/compression/point_cloud/point_cloud_encoder.h b/compression/point_cloud/point_cloud_encoder.h
index be293b2..a36975f 100644
--- a/compression/point_cloud/point_cloud_encoder.h
+++ b/compression/point_cloud/point_cloud_encoder.h
@@ -109,6 +109,9 @@ class PointCloudEncoder {
virtual bool EncodeAllAttributes();
private:
+ // Encodes Draco header that is the same for all encoders.
+ bool EncodeHeader();
+
// Rearranges attribute encoders and their attributes to reflect the
// underlying attribute dependencies. This ensures that the attributes are
// encoded in the correct order (parent attributes before their children).
diff --git a/core/adaptive_rans_bit_coding_shared.h b/core/adaptive_rans_bit_coding_shared.h
new file mode 100644
index 0000000..5a94b15
--- /dev/null
+++ b/core/adaptive_rans_bit_coding_shared.h
@@ -0,0 +1,43 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// File provides shared functions for adaptive rANS bit coding.
+#ifndef DRACO_CORE_ADAPTIVE_RANS_BIT_CODING_SHARED_H_
+#define DRACO_CORE_ADAPTIVE_RANS_BIT_CODING_SHARED_H_
+
+#include "core/macros.h"
+
+namespace draco {
+
+// Clamp the probability p to a uint8_t in the range [1,255].
+inline uint8_t clamp_probability(double p) {
+ DCHECK_LE(p, 1.0);
+ DCHECK_LE(0.0, p);
+ uint32_t p_int = static_cast((p * 256) + 0.5);
+ p_int -= (p_int == 256);
+ p_int += (p_int == 0);
+ return static_cast(p_int);
+}
+
+// Update the probablity according to new incoming bit.
+inline double update_probability(double old_p, bool bit) {
+ static constexpr double w = 128.0;
+ static constexpr double w0 = (w - 1.0) / w;
+ static constexpr double w1 = 1.0 / w;
+ return old_p * w0 + (!bit) * w1;
+}
+
+} // namespace draco
+
+#endif // DRACO_CORE_ADAPTIVE_RANS_BIT_CODING_SHARED_H_
diff --git a/core/adaptive_rans_coding.cc b/core/adaptive_rans_bit_decoder.cc
similarity index 54%
rename from core/adaptive_rans_coding.cc
rename to core/adaptive_rans_bit_decoder.cc
index e1f24a5..d06aa2b 100644
--- a/core/adaptive_rans_coding.cc
+++ b/core/adaptive_rans_bit_decoder.cc
@@ -12,66 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//
-#include "core/adaptive_rans_coding.h"
+#include "core/adaptive_rans_bit_decoder.h"
-#include
+#include "core/adaptive_rans_bit_coding_shared.h"
namespace draco {
-uint8_t clamp_probability(double p) {
- DCHECK_LE(p, 1.0);
- DCHECK_LE(0.0, p);
- uint32_t p_int = static_cast((p * 256) + 0.5);
- p_int -= (p_int == 256);
- p_int += (p_int == 0);
- return static_cast(p_int);
-}
-
-double update_probability(double old_p, bool bit) {
- static constexpr double w = 128.0;
- static constexpr double w0 = (w - 1.0) / w;
- static constexpr double w1 = 1.0 / w;
- return old_p * w0 + (!bit) * w1;
-}
-
-AdaptiveRAnsBitEncoder::AdaptiveRAnsBitEncoder() {}
-
-AdaptiveRAnsBitEncoder::~AdaptiveRAnsBitEncoder() { Clear(); }
-
-void AdaptiveRAnsBitEncoder::StartEncoding() { Clear(); }
-
-void AdaptiveRAnsBitEncoder::EndEncoding(EncoderBuffer *target_buffer) {
- // Buffer for ans to write.
- std::vector buffer(bits_.size() + 16);
- AnsCoder ans_coder;
- ans_write_init(&ans_coder, buffer.data());
-
- // Unfortunaetly we have to encode the bits in reversed order, while the
- // probabilities that should be given are those of the forward sequence.
- double p0_f = 0.5;
- std::vector p0s;
- p0s.reserve(bits_.size());
- for (bool b : bits_) {
- p0s.push_back(clamp_probability(p0_f));
- p0_f = update_probability(p0_f, b);
- }
- auto bit = bits_.rbegin();
- auto pit = p0s.rbegin();
- while (bit != bits_.rend()) {
- rabs_write(&ans_coder, *bit, *pit);
- ++bit;
- ++pit;
- }
-
- const uint32_t size_in_bytes = ans_write_end(&ans_coder);
- target_buffer->Encode(size_in_bytes);
- target_buffer->Encode(buffer.data(), size_in_bytes);
-
- Clear();
-}
-
-void AdaptiveRAnsBitEncoder::Clear() { bits_.clear(); }
-
AdaptiveRAnsBitDecoder::AdaptiveRAnsBitDecoder() : p0_f_(0.5) {}
AdaptiveRAnsBitDecoder::~AdaptiveRAnsBitDecoder() { Clear(); }
diff --git a/core/adaptive_rans_bit_decoder.h b/core/adaptive_rans_bit_decoder.h
new file mode 100644
index 0000000..27fb42c
--- /dev/null
+++ b/core/adaptive_rans_bit_decoder.h
@@ -0,0 +1,54 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// File provides basic classes and functions for rANS bit decoding.
+#ifndef DRACO_CORE_ADAPTIVE_RANS_BIT_DECODER_H_
+#define DRACO_CORE_ADAPTIVE_RANS_BIT_DECODER_H_
+
+#include
+
+#include "core/ans.h"
+#include "core/decoder_buffer.h"
+
+namespace draco {
+
+// Class for decoding a sequence of bits that were encoded with
+// AdaptiveRAnsBitEncoder.
+class AdaptiveRAnsBitDecoder {
+ public:
+ AdaptiveRAnsBitDecoder();
+ ~AdaptiveRAnsBitDecoder();
+
+ // Sets |source_buffer| as the buffer to decode bits from.
+ bool StartDecoding(DecoderBuffer *source_buffer);
+
+ // Decode one bit. Returns true if the bit is a 1, otherwsie false.
+ bool DecodeNextBit();
+
+ // Decode the next |nbits| and return the sequence in |value|. |nbits| must be
+ // > 0 and <= 32.
+ void DecodeLeastSignificantBits32(int nbits, uint32_t *value);
+
+ void EndDecoding() {}
+
+ private:
+ void Clear();
+
+ AnsDecoder ans_decoder_;
+ double p0_f_;
+};
+
+} // namespace draco
+
+#endif // DRACO_CORE_ADAPTIVE_RANS_BIT_DECODER_H_
diff --git a/core/adaptive_rans_bit_encoder.cc b/core/adaptive_rans_bit_encoder.cc
new file mode 100644
index 0000000..a6350d5
--- /dev/null
+++ b/core/adaptive_rans_bit_encoder.cc
@@ -0,0 +1,59 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "core/adaptive_rans_bit_encoder.h"
+
+#include "core/adaptive_rans_bit_coding_shared.h"
+
+namespace draco {
+
+AdaptiveRAnsBitEncoder::AdaptiveRAnsBitEncoder() {}
+
+AdaptiveRAnsBitEncoder::~AdaptiveRAnsBitEncoder() { Clear(); }
+
+void AdaptiveRAnsBitEncoder::StartEncoding() { Clear(); }
+
+void AdaptiveRAnsBitEncoder::EndEncoding(EncoderBuffer *target_buffer) {
+ // Buffer for ans to write.
+ std::vector buffer(bits_.size() + 16);
+ AnsCoder ans_coder;
+ ans_write_init(&ans_coder, buffer.data());
+
+ // Unfortunaetly we have to encode the bits in reversed order, while the
+ // probabilities that should be given are those of the forward sequence.
+ double p0_f = 0.5;
+ std::vector p0s;
+ p0s.reserve(bits_.size());
+ for (bool b : bits_) {
+ p0s.push_back(clamp_probability(p0_f));
+ p0_f = update_probability(p0_f, b);
+ }
+ auto bit = bits_.rbegin();
+ auto pit = p0s.rbegin();
+ while (bit != bits_.rend()) {
+ rabs_write(&ans_coder, *bit, *pit);
+ ++bit;
+ ++pit;
+ }
+
+ const uint32_t size_in_bytes = ans_write_end(&ans_coder);
+ target_buffer->Encode(size_in_bytes);
+ target_buffer->Encode(buffer.data(), size_in_bytes);
+
+ Clear();
+}
+
+void AdaptiveRAnsBitEncoder::Clear() { bits_.clear(); }
+
+} // namespace draco
diff --git a/core/adaptive_rans_coding.h b/core/adaptive_rans_bit_encoder.h
similarity index 64%
rename from core/adaptive_rans_coding.h
rename to core/adaptive_rans_bit_encoder.h
index 0e125c9..da0cc5a 100644
--- a/core/adaptive_rans_coding.h
+++ b/core/adaptive_rans_bit_encoder.h
@@ -12,14 +12,13 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//
-// File provides basic classes and functions for rANS coding.
-#ifndef DRACO_CORE_ADAPTIVE_RANS_CODING_H_
-#define DRACO_CORE_ADAPTIVE_RANS_CODING_H_
+// File provides basic classes and functions for rANS bit encoding.
+#ifndef DRACO_CORE_ADAPTIVE_RANS_BIT_ENCODER_H_
+#define DRACO_CORE_ADAPTIVE_RANS_BIT_ENCODER_H_
#include
#include "core/ans.h"
-#include "core/decoder_buffer.h"
#include "core/encoder_buffer.h"
namespace draco {
@@ -57,32 +56,6 @@ class AdaptiveRAnsBitEncoder {
std::vector bits_;
};
-// Class for decoding a sequence of bits that were encoded with
-// AdaptiveRAnsBitEncoder.
-class AdaptiveRAnsBitDecoder {
- public:
- AdaptiveRAnsBitDecoder();
- ~AdaptiveRAnsBitDecoder();
-
- // Sets |source_buffer| as the buffer to decode bits from.
- bool StartDecoding(DecoderBuffer *source_buffer);
-
- // Decode one bit. Returns true if the bit is a 1, otherwsie false.
- bool DecodeNextBit();
-
- // Decode the next |nbits| and return the sequence in |value|. |nbits| must be
- // > 0 and <= 32.
- void DecodeLeastSignificantBits32(int nbits, uint32_t *value);
-
- void EndDecoding() {}
-
- private:
- void Clear();
-
- AnsDecoder ans_decoder_;
- double p0_f_;
-};
-
} // namespace draco
-#endif // DRACO_CORE_ADAPTIVE_RANS_CODING_H_
+#endif // DRACO_CORE_ADAPTIVE_RANS_BIT_ENCODER_H_
diff --git a/core/ans.h b/core/ans.h
index 019db57..c02fb48 100644
--- a/core/ans.h
+++ b/core/ans.h
@@ -18,14 +18,13 @@
// See http://arxiv.org/abs/1311.2540v2 for more informaiton on rANS.
// This file is based off libvpx's ans.h.
-#include
-
#include
#define ANS_DIVIDE_BY_MULTIPLY 1
#if ANS_DIVIDE_BY_MULTIPLY
#include "core/divide.h"
#endif
+#include "core/macros.h"
namespace draco {
@@ -98,14 +97,14 @@ static inline uint32_t mem_get_le32(const void *vmem) {
}
static inline void mem_put_le16(void *vmem, uint32_t val) {
- uint8_t *mem = (uint8_t *)vmem;
+ uint8_t *mem = reinterpret_cast(vmem);
mem[0] = (val >> 0) & 0xff;
mem[1] = (val >> 8) & 0xff;
}
static inline void mem_put_le24(void *vmem, uint32_t val) {
- uint8_t *mem = (uint8_t *)vmem;
+ uint8_t *mem = reinterpret_cast(vmem);
mem[0] = (val >> 0) & 0xff;
mem[1] = (val >> 8) & 0xff;
@@ -113,7 +112,7 @@ static inline void mem_put_le24(void *vmem, uint32_t val) {
}
static inline void mem_put_le32(void *vmem, uint32_t val) {
- uint8_t *mem = (uint8_t *)vmem;
+ uint8_t *mem = reinterpret_cast(vmem);
mem[0] = (val >> 0) & 0xff;
mem[1] = (val >> 8) & 0xff;
@@ -130,8 +129,8 @@ static inline void ans_write_init(struct AnsCoder *const ans,
static inline int ans_write_end(struct AnsCoder *const ans) {
uint32_t state;
- assert(ans->state >= l_base);
- assert(ans->state < l_base * io_base);
+ DCHECK_GE(ans->state, l_base);
+ DCHECK_LT(ans->state, l_base * io_base);
state = ans->state - l_base;
if (state < (1 << 6)) {
ans->buf[ans->buf_offset] = (0x00 << 6) + state;
@@ -143,7 +142,7 @@ static inline int ans_write_end(struct AnsCoder *const ans) {
mem_put_le24(ans->buf + ans->buf_offset, (0x02 << 22) + state);
return ans->buf_offset + 3;
} else {
- assert(0 && "State is too large to be serialized");
+ DCHECK(0 && "State is too large to be serialized");
return ans->buf_offset;
}
}
@@ -288,7 +287,7 @@ static inline int uabs_read_bit(struct AnsDecoder *ans) {
while (state < l_base && ans->buf_offset > 0) {
state = state * io_base + ans->buf[--ans->buf_offset];
}
- s = (int)(state & 1);
+ s = static_cast(state & 1);
ans->state = state >> 1;
return s;
}
@@ -355,8 +354,8 @@ class RAnsEncoder {
// Needs to be called after all symbols are encoded.
inline int write_end() {
uint32_t state;
- assert(ans_.state >= l_rans_base);
- assert(ans_.state < l_rans_base * io_base);
+ DCHECK_GE(ans_.state, l_rans_base);
+ DCHECK_LT(ans_.state, l_rans_base * io_base);
state = ans_.state - l_rans_base;
if (state < (1 << 6)) {
ans_.buf[ans_.buf_offset] = (0x00 << 6) + state;
@@ -371,7 +370,7 @@ class RAnsEncoder {
mem_put_le32(ans_.buf + ans_.buf_offset, (0x03 << 30) + state);
return ans_.buf_offset + 4;
} else {
- assert(0 && "State is too large to be serialized");
+ DCHECK(0 && "State is too large to be serialized");
return ans_.buf_offset;
}
}
diff --git a/core/direct_bit_coding.cc b/core/direct_bit_decoder.cc
similarity index 68%
rename from core/direct_bit_coding.cc
rename to core/direct_bit_decoder.cc
index f7e9417..9d20bbc 100644
--- a/core/direct_bit_coding.cc
+++ b/core/direct_bit_decoder.cc
@@ -12,31 +12,10 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//
-#include "core/direct_bit_coding.h"
-#include
+#include "core/direct_bit_decoder.h"
namespace draco {
-DirectBitEncoder::DirectBitEncoder() : local_bits_(0), num_local_bits_(0) {}
-
-DirectBitEncoder::~DirectBitEncoder() { Clear(); }
-
-void DirectBitEncoder::StartEncoding() { Clear(); }
-
-void DirectBitEncoder::EndEncoding(EncoderBuffer *target_buffer) {
- bits_.push_back(local_bits_);
- const uint32_t size_in_byte = bits_.size() * 4;
- target_buffer->Encode(size_in_byte);
- target_buffer->Encode(bits_.data(), size_in_byte);
- Clear();
-}
-
-void DirectBitEncoder::Clear() {
- bits_.clear();
- local_bits_ = 0;
- num_local_bits_ = 0;
-}
-
DirectBitDecoder::DirectBitDecoder() : pos_(bits_.end()), num_used_bits_(0) {}
DirectBitDecoder::~DirectBitDecoder() { Clear(); }
diff --git a/core/direct_bit_decoder.h b/core/direct_bit_decoder.h
new file mode 100644
index 0000000..44cc158
--- /dev/null
+++ b/core/direct_bit_decoder.h
@@ -0,0 +1,79 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// File provides direct encoding of bits with arthmetic encoder interface.
+#ifndef DRACO_CORE_DIRECT_BIT_DECODER_H_
+#define DRACO_CORE_DIRECT_BIT_DECODER_H_
+
+#include
+
+#include "core/decoder_buffer.h"
+
+namespace draco {
+
+class DirectBitDecoder {
+ public:
+ DirectBitDecoder();
+ ~DirectBitDecoder();
+
+ // Sets |source_buffer| as the buffer to decode bits from.
+ bool StartDecoding(DecoderBuffer *source_buffer);
+
+ // Decode one bit. Returns true if the bit is a 1, otherwsie false.
+ bool DecodeNextBit() {
+ const uint32_t selector = 1 << (31 - num_used_bits_);
+ const bool bit = *pos_ & selector;
+ ++num_used_bits_;
+ if (num_used_bits_ == 32) {
+ ++pos_;
+ num_used_bits_ = 0;
+ }
+ return bit;
+ }
+
+ // Decode the next |nbits| and return the sequence in |value|. |nbits| must be
+ // > 0 and <= 32.
+ void DecodeLeastSignificantBits32(int nbits, uint32_t *value) {
+ DCHECK_EQ(true, nbits <= 32);
+ DCHECK_EQ(true, nbits > 0);
+ const int remaining = 32 - num_used_bits_;
+ if (nbits <= remaining) {
+ *value = (*pos_ << num_used_bits_) >> (32 - nbits);
+ num_used_bits_ += nbits;
+ if (num_used_bits_ == 32) {
+ ++pos_;
+ num_used_bits_ = 0;
+ }
+ } else {
+ const uint32_t value_l = ((*pos_) << num_used_bits_);
+ num_used_bits_ = nbits - remaining;
+ ++pos_;
+ const uint32_t value_r = (*pos_) >> (32 - num_used_bits_);
+ *value = (value_l >> (32 - num_used_bits_ - remaining)) | value_r;
+ }
+ }
+
+ void EndDecoding() {}
+
+ private:
+ void Clear();
+
+ std::vector bits_;
+ std::vector::const_iterator pos_;
+ uint32_t num_used_bits_;
+};
+
+} // namespace draco
+
+#endif // DRACO_CORE_DIRECT_BIT_DECODER_H_
diff --git a/core/direct_bit_encoder.cc b/core/direct_bit_encoder.cc
new file mode 100644
index 0000000..10bb6fc
--- /dev/null
+++ b/core/direct_bit_encoder.cc
@@ -0,0 +1,39 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "core/direct_bit_encoder.h"
+
+namespace draco {
+
+DirectBitEncoder::DirectBitEncoder() : local_bits_(0), num_local_bits_(0) {}
+
+DirectBitEncoder::~DirectBitEncoder() { Clear(); }
+
+void DirectBitEncoder::StartEncoding() { Clear(); }
+
+void DirectBitEncoder::EndEncoding(EncoderBuffer *target_buffer) {
+ bits_.push_back(local_bits_);
+ const uint32_t size_in_byte = bits_.size() * 4;
+ target_buffer->Encode(size_in_byte);
+ target_buffer->Encode(bits_.data(), size_in_byte);
+ Clear();
+}
+
+void DirectBitEncoder::Clear() {
+ bits_.clear();
+ local_bits_ = 0;
+ num_local_bits_ = 0;
+}
+
+} // namespace draco
diff --git a/core/direct_bit_coding.h b/core/direct_bit_encoder.h
similarity index 61%
rename from core/direct_bit_coding.h
rename to core/direct_bit_encoder.h
index 36b6c4f..41086fe 100644
--- a/core/direct_bit_coding.h
+++ b/core/direct_bit_encoder.h
@@ -13,12 +13,11 @@
// limitations under the License.
//
// File provides direct encoding of bits with arthmetic encoder interface.
-#ifndef DRACO_CORE_DIRECT_BIT_CODING_H_
-#define DRACO_CORE_DIRECT_BIT_CODING_H_
+#ifndef DRACO_CORE_DIRECT_BIT_ENCODER_H_
+#define DRACO_CORE_DIRECT_BIT_ENCODER_H_
#include
-#include "core/decoder_buffer.h"
#include "core/encoder_buffer.h"
namespace draco {
@@ -85,58 +84,6 @@ class DirectBitEncoder {
uint32_t num_local_bits_;
};
-class DirectBitDecoder {
- public:
- DirectBitDecoder();
- ~DirectBitDecoder();
-
- // Sets |source_buffer| as the buffer to decode bits from.
- bool StartDecoding(DecoderBuffer *source_buffer);
-
- // Decode one bit. Returns true if the bit is a 1, otherwsie false.
- bool DecodeNextBit() {
- const uint32_t selector = 1 << (31 - num_used_bits_);
- const bool bit = *pos_ & selector;
- ++num_used_bits_;
- if (num_used_bits_ == 32) {
- ++pos_;
- num_used_bits_ = 0;
- }
- return bit;
- }
-
- // Decode the next |nbits| and return the sequence in |value|. |nbits| must be
- // > 0 and <= 32.
- void DecodeLeastSignificantBits32(int nbits, uint32_t *value) {
- DCHECK_EQ(true, nbits <= 32);
- DCHECK_EQ(true, nbits > 0);
- const int remaining = 32 - num_used_bits_;
- if (nbits <= remaining) {
- *value = (*pos_ << num_used_bits_) >> (32 - nbits);
- num_used_bits_ += nbits;
- if (num_used_bits_ == 32) {
- ++pos_;
- num_used_bits_ = 0;
- }
- } else {
- const uint32_t value_l = ((*pos_) << num_used_bits_);
- num_used_bits_ = nbits - remaining;
- ++pos_;
- const uint32_t value_r = (*pos_) >> (32 - num_used_bits_);
- *value = (value_l >> (32 - num_used_bits_ - remaining)) | value_r;
- }
- }
-
- void EndDecoding() {}
-
- private:
- void Clear();
-
- std::vector bits_;
- std::vector::const_iterator pos_;
- uint32_t num_used_bits_;
-};
-
} // namespace draco
-#endif // DRACO_CORE_DIRECT_BIT_CODING_H_
+#endif // DRACO_CORE_DIRECT_BIT_ENCODER_H_
diff --git a/core/draco_version.h b/core/draco_version.h
index 48b9403..492a074 100644
--- a/core/draco_version.h
+++ b/core/draco_version.h
@@ -18,7 +18,7 @@
namespace draco {
// Draco version is comprised of ...
-static const char kDracoVersion[] = "0.9.1";
+static const char kDracoVersion[] = "0.10.0";
const char *Version() { return kDracoVersion; }
diff --git a/core/folded_integer_bit_decoder.h b/core/folded_integer_bit_decoder.h
new file mode 100644
index 0000000..6064b49
--- /dev/null
+++ b/core/folded_integer_bit_decoder.h
@@ -0,0 +1,76 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// File provides direct encoding of bits with arithmetic encoder interface.
+#ifndef DRACO_CORE_FOLDED_INTEGER_BIT_DECODER_H_
+#define DRACO_CORE_FOLDED_INTEGER_BIT_DECODER_H_
+
+#include
+
+#include "core/decoder_buffer.h"
+
+namespace draco {
+
+// See FoldedBit32Encoder for more details.
+template
+class FoldedBit32Decoder {
+ public:
+ FoldedBit32Decoder() {}
+ ~FoldedBit32Decoder() {}
+
+ // Sets |source_buffer| as the buffer to decode bits from.
+ bool StartDecoding(DecoderBuffer *source_buffer) {
+ for (int i = 0; i < 32; i++) {
+ if (!folded_number_decoders_[i].StartDecoding(source_buffer))
+ return false;
+ }
+ return bit_decoder_.StartDecoding(source_buffer);
+ }
+
+ // Decode one bit. Returns true if the bit is a 1, otherwise false.
+ bool DecodeNextBit() { return bit_decoder_.DecodeNextBit(); }
+
+ // Decode the next |nbits| and return the sequence in |value|. |nbits| must be
+ // > 0 and <= 32.
+ void DecodeLeastSignificantBits32(int nbits, uint32_t *value) {
+ uint32_t result = 0;
+ for (int i = 0; i < nbits; ++i) {
+ const bool bit = folded_number_decoders_[i].DecodeNextBit();
+ result = (result << 1) + bit;
+ }
+ *value = result;
+ }
+
+ void EndDecoding() {
+ for (int i = 0; i < 32; i++) {
+ folded_number_decoders_[i].EndDecoding();
+ }
+ bit_decoder_.EndDecoding();
+ }
+
+ private:
+ void Clear() {
+ for (int i = 0; i < 32; i++) {
+ folded_number_decoders_[i].Clear();
+ }
+ bit_decoder_.Clear();
+ }
+
+ std::array folded_number_decoders_;
+ BitDecoderT bit_decoder_;
+};
+
+} // namespace draco
+
+#endif // DRACO_CORE_FOLDED_INTEGER_BIT_DECODER_H_
diff --git a/core/folded_bit32_coding.h b/core/folded_integer_bit_encoder.h
similarity index 63%
rename from core/folded_bit32_coding.h
rename to core/folded_integer_bit_encoder.h
index 93d00a3..fff36f2 100644
--- a/core/folded_bit32_coding.h
+++ b/core/folded_integer_bit_encoder.h
@@ -13,12 +13,11 @@
// limitations under the License.
//
// File provides direct encoding of bits with arithmetic encoder interface.
-#ifndef DRACO_CORE_FOLDED_BIT32_CODING_H_
-#define DRACO_CORE_FOLDED_BIT32_CODING_H_
+#ifndef DRACO_CORE_FOLDED_INTEGER_BIT_ENCODER_H_
+#define DRACO_CORE_FOLDED_INTEGER_BIT_ENCODER_H_
#include
-#include "core/decoder_buffer.h"
#include "core/encoder_buffer.h"
namespace draco {
@@ -29,6 +28,7 @@ namespace draco {
// The behavior is essentially the same as other arithmetic encoding schemes,
// the only difference is that encoding and decoding of bits must be absolutely
// symmetric, bits handed in by EncodeBit32 must be also decoded in this way.
+// This is the FoldedBit32Encoder, see also FoldedBit32Decoder.
template
class FoldedBit32Encoder {
public:
@@ -77,54 +77,6 @@ class FoldedBit32Encoder {
BitEncoderT bit_encoder_;
};
-template
-class FoldedBit32Decoder {
- public:
- FoldedBit32Decoder() {}
- ~FoldedBit32Decoder() {}
-
- // Sets |source_buffer| as the buffer to decode bits from.
- bool StartDecoding(DecoderBuffer *source_buffer) {
- for (int i = 0; i < 32; i++) {
- if (!folded_number_decoders_[i].StartDecoding(source_buffer))
- return false;
- }
- return bit_decoder_.StartDecoding(source_buffer);
- }
-
- // Decode one bit. Returns true if the bit is a 1, otherwise false.
- bool DecodeNextBit() { return bit_decoder_.DecodeNextBit(); }
-
- // Decode the next |nbits| and return the sequence in |value|. |nbits| must be
- // > 0 and <= 32.
- void DecodeLeastSignificantBits32(int nbits, uint32_t *value) {
- uint32_t result = 0;
- for (int i = 0; i < nbits; ++i) {
- const bool bit = folded_number_decoders_[i].DecodeNextBit();
- result = (result << 1) + bit;
- }
- *value = result;
- }
-
- void EndDecoding() {
- for (int i = 0; i < 32; i++) {
- folded_number_decoders_[i].EndDecoding();
- }
- bit_decoder_.EndDecoding();
- }
-
- private:
- void Clear() {
- for (int i = 0; i < 32; i++) {
- folded_number_decoders_[i].Clear();
- }
- bit_decoder_.Clear();
- }
-
- std::array folded_number_decoders_;
- BitDecoderT bit_decoder_;
-};
-
} // namespace draco
-#endif // DRACO_CORE_FOLDED_BIT32_CODING_H_
+#endif // DRACO_CORE_FOLDED_INTEGER_BIT_ENCODER_H_
diff --git a/core/macros.h b/core/macros.h
index c7c1de5..6d6c31f 100644
--- a/core/macros.h
+++ b/core/macros.h
@@ -32,17 +32,21 @@ namespace draco {
#define CHECK(x) (assert(x));
#define CHECK_EQ(a, b) assert((a) == (b));
+#define CHECK_NE(a, b) assert((a) != (b));
#define CHECK_GE(a, b) assert((a) >= (b));
#define CHECK_GT(a, b) assert((a) > (b));
-#define CHECK_NE(a, b) assert((a) != (b));
+#define CHECK_LE(a, b) assert((a) <= (b));
+#define CHECK_LT(a, b) assert((a) < (b));
#define CHECK_NOTNULL(x) assert((x) != NULL);
#define DCHECK(x) (assert(x));
#define DCHECK_EQ(a, b) assert((a) == (b));
+#define DCHECK_NE(a, b) assert((a) != (b));
#define DCHECK_GE(a, b) assert((a) >= (b));
#define DCHECK_GT(a, b) assert((a) > (b));
#define DCHECK_LE(a, b) assert((a) <= (b));
#define DCHECK_LT(a, b) assert((a) < (b));
+#define DCHECK_NOTNULL(x) assert((x) != NULL);
#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
TypeName(const TypeName &) = delete; \
diff --git a/core/options.cc b/core/options.cc
index 2609b20..e06a12b 100644
--- a/core/options.cc
+++ b/core/options.cc
@@ -23,7 +23,7 @@ std::string ValToString(int val) {
sprintf(temp, "%d", val);
return temp;
}
-} // namespace anonymous
+} // namespace
namespace draco {
diff --git a/core/rans_bit_decoder.cc b/core/rans_bit_decoder.cc
new file mode 100644
index 0000000..5ccf3d9
--- /dev/null
+++ b/core/rans_bit_decoder.cc
@@ -0,0 +1,66 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+#include "core/rans_bit_decoder.h"
+
+#include "core/bit_utils.h"
+
+namespace draco {
+
+RAnsBitDecoder::RAnsBitDecoder() : prob_zero_(0) {}
+
+RAnsBitDecoder::~RAnsBitDecoder() { Clear(); }
+
+bool RAnsBitDecoder::StartDecoding(DecoderBuffer *source_buffer) {
+ Clear();
+
+ if (!source_buffer->Decode(&prob_zero_))
+ return false;
+
+ uint32_t size_in_bytes;
+ if (!source_buffer->Decode(&size_in_bytes))
+ return false;
+
+ if (size_in_bytes > source_buffer->remaining_size())
+ return false;
+
+ if (ans_read_init(&ans_decoder_,
+ reinterpret_cast(
+ const_cast(source_buffer->data_head())),
+ size_in_bytes) != 0)
+ return false;
+ source_buffer->Advance(size_in_bytes);
+ return true;
+}
+
+bool RAnsBitDecoder::DecodeNextBit() {
+ const uint8_t bit = rabs_read(&ans_decoder_, prob_zero_);
+ return bit > 0;
+}
+
+void RAnsBitDecoder::DecodeLeastSignificantBits32(int nbits, uint32_t *value) {
+ DCHECK_EQ(true, nbits <= 32);
+ DCHECK_EQ(true, nbits > 0);
+
+ uint32_t result = 0;
+ while (nbits) {
+ result = (result << 1) + DecodeNextBit();
+ --nbits;
+ }
+ *value = result;
+}
+
+void RAnsBitDecoder::Clear() { ans_read_end(&ans_decoder_); }
+
+} // namespace draco
diff --git a/core/rans_bit_decoder.h b/core/rans_bit_decoder.h
new file mode 100644
index 0000000..474435b
--- /dev/null
+++ b/core/rans_bit_decoder.h
@@ -0,0 +1,54 @@
+// Copyright 2016 The Draco Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// File provides basic classes and functions for rANS coding.
+#ifndef DRACO_CORE_RANS_BIT_DECODER_H_
+#define DRACO_CORE_RANS_BIT_DECODER_H_
+
+#include
+
+#include "core/ans.h"
+#include "core/decoder_buffer.h"
+
+namespace draco {
+
+// Class for decoding a sequence of bits that were encoded with RAnsBitEncoder.
+class RAnsBitDecoder {
+ public:
+ RAnsBitDecoder();
+ ~RAnsBitDecoder();
+
+ // Sets |source_buffer| as the buffer to decode bits from.
+ // Returns false when the data is invalid.
+ bool StartDecoding(DecoderBuffer *source_buffer);
+
+ // Decode one bit. Returns true if the bit is a 1, otherwsie false.
+ bool DecodeNextBit();
+
+ // Decode the next |nbits| and return the sequence in |value|. |nbits| must be
+ // > 0 and <= 32.
+ void DecodeLeastSignificantBits32(int nbits, uint32_t *value);
+
+ void EndDecoding() {}
+
+ private:
+ void Clear();
+
+ AnsDecoder ans_decoder_;
+ uint8_t prob_zero_;
+};
+
+} // namespace draco
+
+#endif // DRACO_CORE_RANS_BIT_DECODER_H_
diff --git a/core/rans_coding.cc b/core/rans_bit_encoder.cc
similarity index 75%
rename from core/rans_coding.cc
rename to core/rans_bit_encoder.cc
index dfb17f3..323343d 100644
--- a/core/rans_coding.cc
+++ b/core/rans_bit_encoder.cc
@@ -12,8 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//
-#include "core/rans_coding.h"
+#include "core/rans_bit_encoder.h"
+#include "core/ans.h"
#include "core/bit_utils.h"
namespace draco {
@@ -118,49 +119,4 @@ void RAnsBitEncoder::Clear() {
num_local_bits_ = 0;
}
-RAnsBitDecoder::RAnsBitDecoder() : prob_zero_(0) {}
-
-RAnsBitDecoder::~RAnsBitDecoder() { Clear(); }
-
-bool RAnsBitDecoder::StartDecoding(DecoderBuffer *source_buffer) {
- Clear();
-
- if (!source_buffer->Decode(&prob_zero_))
- return false;
-
- uint32_t size_in_bytes;
- if (!source_buffer->Decode(&size_in_bytes))
- return false;
-
- if (size_in_bytes > source_buffer->remaining_size())
- return false;
-
- if (ans_read_init(&ans_decoder_,
- reinterpret_cast(
- const_cast(source_buffer->data_head())),
- size_in_bytes) != 0)
- return false;
- source_buffer->Advance(size_in_bytes);
- return true;
-}
-
-bool RAnsBitDecoder::DecodeNextBit() {
- const uint8_t bit = rabs_read(&ans_decoder_, prob_zero_);
- return bit > 0;
-}
-
-void RAnsBitDecoder::DecodeLeastSignificantBits32(int nbits, uint32_t *value) {
- DCHECK_EQ(true, nbits <= 32);
- DCHECK_EQ(true, nbits > 0);
-
- uint32_t result = 0;
- while (nbits) {
- result = (result << 1) + DecodeNextBit();
- --nbits;
- }
- *value = result;
-}
-
-void RAnsBitDecoder::Clear() { ans_read_end(&ans_decoder_); }
-
} // namespace draco
diff --git a/core/rans_coding.h b/core/rans_bit_encoder.h
similarity index 66%
rename from core/rans_coding.h
rename to core/rans_bit_encoder.h
index a59102a..b6752e1 100644
--- a/core/rans_coding.h
+++ b/core/rans_bit_encoder.h
@@ -13,13 +13,11 @@
// limitations under the License.
//
// File provides basic classes and functions for rANS coding.
-#ifndef DRACO_CORE_RANS_CODING_H_
-#define DRACO_CORE_RANS_CODING_H_
+#ifndef DRACO_CORE_RANS_BIT_ENCODER_H_
+#define DRACO_CORE_RANS_BIT_ENCODER_H_
#include
-#include "core/ans.h"
-#include "core/decoder_buffer.h"
#include "core/encoder_buffer.h"
namespace draco {
@@ -54,32 +52,6 @@ class RAnsBitEncoder {
uint32_t num_local_bits_;
};
-// Class for decoding a sequence of bits that were encoded with RAnsBitEncoder.
-class RAnsBitDecoder {
- public:
- RAnsBitDecoder();
- ~RAnsBitDecoder();
-
- // Sets |source_buffer| as the buffer to decode bits from.
- // Returns false when the data is invalid.
- bool StartDecoding(DecoderBuffer *source_buffer);
-
- // Decode one bit. Returns true if the bit is a 1, otherwsie false.
- bool DecodeNextBit();
-
- // Decode the next |nbits| and return the sequence in |value|. |nbits| must be
- // > 0 and <= 32.
- void DecodeLeastSignificantBits32(int nbits, uint32_t *value);
-
- void EndDecoding() {}
-
- private:
- void Clear();
-
- AnsDecoder ans_decoder_;
- uint8_t prob_zero_;
-};
-
} // namespace draco
-#endif // DRACO_CORE_RANS_CODING_H_
+#endif // DRACO_CORE_RANS_BIT_ENCODER_H_
diff --git a/core/rans_coding_test.cc b/core/rans_coding_test.cc
index 12b2a42..817fa76 100644
--- a/core/rans_coding_test.cc
+++ b/core/rans_coding_test.cc
@@ -1,6 +1,8 @@
-#include "core/rans_coding.h"
-#include "core/adaptive_rans_coding.h"
+#include "core/adaptive_rans_bit_decoder.h"
+#include "core/adaptive_rans_bit_encoder.h"
#include "core/draco_test_base.h"
+#include "core/rans_bit_decoder.h"
+#include "core/rans_bit_encoder.h"
// Just including rans_coding.h and adaptive_rans_coding.h gets an asan error
// when compiling (blaze test :rans_coding_test --config=asan)
diff --git a/core/rans_symbol_coding.h b/core/rans_symbol_coding.h
index 1e295b8..fe64041 100644
--- a/core/rans_symbol_coding.h
+++ b/core/rans_symbol_coding.h
@@ -37,6 +37,17 @@ constexpr int ComputeRAnsPrecisionFromMaxSymbolBitLength(int max_bit_length) {
: ComputeRAnsUnclampedPrecision(max_bit_length);
}
+// Compute approximate frequency table size needed for storing the provided
+// symbols.
+static int64_t ApproximateRAnsFrequencyTableBits(int32_t max_value,
+ int num_unique_symbols) {
+ // Approximate number of bits for storing zero frequency entries using the
+ // run length encoding (with max length of 64).
+ const int64_t table_zero_frequency_bits =
+ 8 * (num_unique_symbols + (max_value - num_unique_symbols) / 64);
+ return 8 * num_unique_symbols + table_zero_frequency_bits;
+}
+
} // namespace draco
#endif // DRACO_CORE_RANS_SYMBOL_CODING_H_
diff --git a/core/rans_symbol_decoder.h b/core/rans_symbol_decoder.h
index 566cd1b..f713371 100644
--- a/core/rans_symbol_decoder.h
+++ b/core/rans_symbol_decoder.h
@@ -61,22 +61,38 @@ bool RAnsSymbolDecoder::Create(DecoderBuffer *buffer) {
return true;
// Decode the table.
for (uint32_t i = 0; i < num_symbols_; ++i) {
- uint32_t prob = 0;
- uint8_t byte_prob = 0;
+ uint8_t prob_data = 0;
// Decode the first byte and extract the number of extra bytes we need to
- // get.
- if (!buffer->Decode(&byte_prob))
+ // get, or the offset to the next symbol with non-zero probability.
+ if (!buffer->Decode(&prob_data))
return false;
- const int extra_bytes = byte_prob & 3;
- prob = byte_prob >> 2;
- for (int b = 0; b < extra_bytes; ++b) {
- uint8_t eb;
- if (!buffer->Decode(&eb))
+ // Token is stored in the first two bits of the first byte. Values 0-2 are
+ // used to indicate the number of extra bytes, and value 3 is a special
+ // symbol used to denote run-length coding of zero probability entries.
+ // See rans_symbol_encoder.h for more details.
+ const int token = prob_data & 3;
+ if (token == 3) {
+ const uint32_t offset = prob_data >> 2;
+ if (i + offset >= num_symbols_)
return false;
- // Shift 8 bits for each extra byte and subtract 2 for the two first bits.
- prob |= static_cast(eb) << (8 * (b + 1) - 2);
+ // Set zero probability for all symbols in the specified range.
+ for (uint32_t j = 0; j < offset + 1; ++j) {
+ probability_table_[i + j] = 0;
+ }
+ i += offset;
+ } else {
+ const int extra_bytes = token;
+ uint32_t prob = prob_data >> 2;
+ for (int b = 0; b < extra_bytes; ++b) {
+ uint8_t eb;
+ if (!buffer->Decode(&eb))
+ return false;
+ // Shift 8 bits for each extra byte and subtract 2 for the two first
+ // bits.
+ prob |= static_cast(eb) << (8 * (b + 1) - 2);
+ }
+ probability_table_[i] = prob;
}
- probability_table_[i] = prob;
}
if (!ans_.rans_build_look_up_table(&probability_table_[0], num_symbols_))
return false;
diff --git a/core/rans_symbol_encoder.h b/core/rans_symbol_encoder.h
index a319f95..fdc6265 100644
--- a/core/rans_symbol_encoder.h
+++ b/core/rans_symbol_encoder.h
@@ -64,7 +64,7 @@ class RAnsSymbolEncoder {
};
// Encodes the probability table into the output buffer.
- void EncodeTable(EncoderBuffer *buffer);
+ bool EncodeTable(EncoderBuffer *buffer);
static constexpr int max_symbols_ = 1 << max_symbol_bit_length_t;
static constexpr int rans_precision_bits_ =
@@ -188,12 +188,13 @@ bool RAnsSymbolEncoder::Create(
num_bits += static_cast(frequencies[i]) * log2(norm_prob);
}
num_expected_bits_ = static_cast(ceil(-num_bits));
- EncodeTable(buffer);
+ if (!EncodeTable(buffer))
+ return false;
return true;
}
template
-void RAnsSymbolEncoder::EncodeTable(
+bool RAnsSymbolEncoder::EncodeTable(
EncoderBuffer *buffer) {
buffer->Encode(num_symbols_);
// Use varint encoding for the probabilities (first two bits represent the
@@ -206,17 +207,38 @@ void RAnsSymbolEncoder::EncodeTable(
if (prob >= (1 << 14)) {
num_extra_bytes++;
if (prob >= (1 << 22)) {
- num_extra_bytes++;
+ // The maximum number of precision bits is 20 so we should not really
+ // get to this point.
+ return false;
}
}
}
- // Encode the first byte (including the number of extra bytes).
- buffer->Encode(static_cast((prob << 2) | (num_extra_bytes & 3)));
- // Encode the extra bytes.
- for (int b = 0; b < num_extra_bytes; ++b) {
- buffer->Encode(static_cast(prob >> (8 * (b + 1) - 2)));
+ if (prob == 0) {
+ // When the probability of the symbol is 0, set the first two bits to 1
+ // (unique identifier) and use the remaining 6 bits to store the offset
+ // to the next symbol with non-zero probability.
+ uint32_t offset = 0;
+ for (; offset < (1 << 6) - 1; ++offset) {
+ // Note: we don't have to check whether the next symbol id is larger
+ // than num_symbols_ because we know that the last symbol always has
+ // non-zero probability.
+ const uint32_t next_prob = probability_table_[i + offset + 1].prob;
+ if (next_prob > 0) {
+ break;
+ }
+ }
+ buffer->Encode(static_cast((offset << 2) | 3));
+ i += offset;
+ } else {
+ // Encode the first byte (including the number of extra bytes).
+ buffer->Encode(static_cast((prob << 2) | (num_extra_bytes & 3)));
+ // Encode the extra bytes.
+ for (int b = 0; b < num_extra_bytes; ++b) {
+ buffer->Encode(static_cast(prob >> (8 * (b + 1) - 2)));
+ }
}
}
+ return true;
}
template
diff --git a/core/shannon_entropy.cc b/core/shannon_entropy.cc
new file mode 100644
index 0000000..4645777
--- /dev/null
+++ b/core/shannon_entropy.cc
@@ -0,0 +1,33 @@
+#include "core/shannon_entropy.h"
+
+#include
+#include
+
+namespace draco {
+
+int64_t ComputeShannonEntropy(const uint32_t *symbols, int num_symbols,
+ int max_value, int *out_num_unique_symbols) {
+ // First find frequency of all unique symbols in the input array.
+ int num_unique_symbols = 0;
+ std::vector