mirror of
https://git.mirrors.martin98.com/https://github.com/google/draco
synced 2025-08-11 15:39:11 +08:00
Version 0.10.0 snapshot
- Improved compression for triangular meshes (~10%) - Added WebAssembly decoder - Code cleanup + robustness fixes
This commit is contained in:
parent
0d42cbb586
commit
73bb3c8530
@ -13,23 +13,26 @@ option(ENABLE_POINT_CLOUD_COMPRESSION "" ON)
|
||||
option(ENABLE_MESH_COMPRESSION "" ON)
|
||||
option(ENABLE_STANDARD_EDGEBREAKER "" ON)
|
||||
option(ENABLE_PREDICTIVE_EDGEBREAKER "" ON)
|
||||
option(ENABLE_EXTRA_SPEED "" OFF)
|
||||
option(ENABLE_EXTRA_WARNINGS "" OFF)
|
||||
option(ENABLE_TESTS "Enables tests." OFF)
|
||||
option(ENABLE_WERROR "" OFF)
|
||||
option(ENABLE_WEXTRA "" OFF)
|
||||
option(IGNORE_EMPTY_BUILD_TYPE "" OFF)
|
||||
option(ENABLE_WASM "" OFF)
|
||||
|
||||
if (ENABLE_POINT_CLOUD_COMPRESSION)
|
||||
add_cxx_preproc_definition("DRACO_POINT_CLOUD_COMPRESSION_SUPPORTED")
|
||||
endif ()
|
||||
if (ENABLE_MESH_COMPRESSION)
|
||||
add_cxx_preproc_definition("DRACO_MESH_COMPRESSION_SUPPORTED")
|
||||
endif ()
|
||||
if (ENABLE_STANDARD_EDGEBREAKER)
|
||||
add_cxx_preproc_definition("DRACO_STANDARD_EDGEBREAKER_SUPPORTED")
|
||||
endif ()
|
||||
if (ENABLE_PREDICTIVE_EDGEBREAKER)
|
||||
add_cxx_preproc_definition("DRACO_PREDICTIVE_EDGEBREAKER_SUPPORTED")
|
||||
|
||||
if (ENABLE_STANDARD_EDGEBREAKER)
|
||||
add_cxx_preproc_definition("DRACO_STANDARD_EDGEBREAKER_SUPPORTED")
|
||||
endif ()
|
||||
if (ENABLE_PREDICTIVE_EDGEBREAKER)
|
||||
add_cxx_preproc_definition("DRACO_PREDICTIVE_EDGEBREAKER_SUPPORTED")
|
||||
endif ()
|
||||
endif ()
|
||||
|
||||
# Turn on more compiler warnings.
|
||||
@ -187,6 +190,7 @@ set(draco_compression_attributes_pred_schemes_sources
|
||||
"${draco_root}/compression/attributes/prediction_schemes/prediction_scheme_interface.h"
|
||||
"${draco_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme.h"
|
||||
"${draco_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_data.h"
|
||||
"${draco_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram.h"
|
||||
"${draco_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram.h"
|
||||
"${draco_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram.h"
|
||||
"${draco_root}/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h"
|
||||
@ -197,6 +201,7 @@ set(draco_compression_attributes_pred_schemes_sources
|
||||
"${draco_root}/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.cc"
|
||||
"${draco_root}/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h"
|
||||
"${draco_root}/compression/attributes/prediction_schemes/prediction_scheme_factory.h"
|
||||
"${draco_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform.h"
|
||||
"${draco_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform.h"
|
||||
"${draco_root}/compression/attributes/prediction_schemes/prediction_scheme_transform.h"
|
||||
"${draco_root}/compression/attributes/prediction_schemes/prediction_scheme_wrap_transform.h")
|
||||
@ -227,6 +232,7 @@ set(draco_compression_mesh_decoder_sources
|
||||
"${draco_root}/compression/mesh/mesh_edgebreaker_shared.h"
|
||||
"${draco_root}/compression/mesh/mesh_edgebreaker_traversal_decoder.h"
|
||||
"${draco_root}/compression/mesh/mesh_edgebreaker_traversal_predictive_decoder.h"
|
||||
"${draco_root}/compression/mesh/mesh_edgebreaker_traversal_valence_decoder.h"
|
||||
"${draco_root}/compression/mesh/mesh_sequential_decoder.cc"
|
||||
"${draco_root}/compression/mesh/mesh_sequential_decoder.h")
|
||||
|
||||
@ -239,6 +245,7 @@ set(draco_compression_mesh_encoder_sources
|
||||
"${draco_root}/compression/mesh/mesh_edgebreaker_shared.h"
|
||||
"${draco_root}/compression/mesh/mesh_edgebreaker_traversal_encoder.h"
|
||||
"${draco_root}/compression/mesh/mesh_edgebreaker_traversal_predictive_encoder.h"
|
||||
"${draco_root}/compression/mesh/mesh_edgebreaker_traversal_valence_encoder.h"
|
||||
"${draco_root}/compression/mesh/mesh_encoder.cc"
|
||||
"${draco_root}/compression/mesh/mesh_encoder.h"
|
||||
"${draco_root}/compression/mesh/mesh_encoder_helpers.h"
|
||||
@ -262,8 +269,11 @@ set(draco_compression_point_cloud_encoder_sources
|
||||
"${draco_root}/compression/point_cloud/point_cloud_sequential_encoder.h")
|
||||
|
||||
set(draco_core_sources
|
||||
"${draco_root}/core/adaptive_rans_coding.cc"
|
||||
"${draco_root}/core/adaptive_rans_coding.h"
|
||||
"${draco_root}/core/adaptive_rans_bit_coding_shared.h"
|
||||
"${draco_root}/core/adaptive_rans_bit_decoder.h"
|
||||
"${draco_root}/core/adaptive_rans_bit_decoder.cc"
|
||||
"${draco_root}/core/adaptive_rans_bit_encoder.h"
|
||||
"${draco_root}/core/adaptive_rans_bit_encoder.cc"
|
||||
"${draco_root}/core/ans.h"
|
||||
"${draco_root}/core/bit_coder.cc"
|
||||
"${draco_root}/core/bit_coder.h"
|
||||
@ -274,8 +284,10 @@ set(draco_core_sources
|
||||
"${draco_root}/core/data_buffer.h"
|
||||
"${draco_root}/core/decoder_buffer.cc"
|
||||
"${draco_root}/core/decoder_buffer.h"
|
||||
"${draco_root}/core/direct_bit_coding.cc"
|
||||
"${draco_root}/core/direct_bit_coding.h"
|
||||
"${draco_root}/core/direct_bit_decoder.h"
|
||||
"${draco_root}/core/direct_bit_decoder.cc"
|
||||
"${draco_root}/core/direct_bit_encoder.h"
|
||||
"${draco_root}/core/direct_bit_encoder.cc"
|
||||
"${draco_root}/core/divide.cc"
|
||||
"${draco_root}/core/divide.h"
|
||||
"${draco_root}/core/draco_index_type.h"
|
||||
@ -284,7 +296,8 @@ set(draco_core_sources
|
||||
"${draco_root}/core/draco_types.h"
|
||||
"${draco_root}/core/encoder_buffer.cc"
|
||||
"${draco_root}/core/encoder_buffer.h"
|
||||
"${draco_root}/core/folded_bit32_coding.h"
|
||||
"${draco_root}/core/folded_integer_bit_decoder.h"
|
||||
"${draco_root}/core/folded_integer_bit_encoder.h"
|
||||
"${draco_root}/core/hash_utils.cc"
|
||||
"${draco_root}/core/hash_utils.h"
|
||||
"${draco_root}/core/macros.h"
|
||||
@ -293,15 +306,25 @@ set(draco_core_sources
|
||||
"${draco_root}/core/options.h"
|
||||
"${draco_root}/core/quantization_utils.cc"
|
||||
"${draco_root}/core/quantization_utils.h"
|
||||
"${draco_root}/core/rans_coding.cc"
|
||||
"${draco_root}/core/rans_coding.h"
|
||||
"${draco_root}/core/rans_bit_decoder.h"
|
||||
"${draco_root}/core/rans_bit_decoder.cc"
|
||||
"${draco_root}/core/rans_bit_encoder.h"
|
||||
"${draco_root}/core/rans_bit_encoder.cc"
|
||||
"${draco_root}/core/rans_symbol_coding.h"
|
||||
"${draco_root}/core/rans_symbol_decoder.h"
|
||||
"${draco_root}/core/rans_symbol_encoder.h"
|
||||
"${draco_root}/core/shannon_entropy.h"
|
||||
"${draco_root}/core/shannon_entropy.cc"
|
||||
"${draco_root}/core/symbol_bit_decoder.h"
|
||||
"${draco_root}/core/symbol_bit_decoder.cc"
|
||||
"${draco_root}/core/symbol_bit_encoder.h"
|
||||
"${draco_root}/core/symbol_bit_encoder.cc"
|
||||
"${draco_root}/core/symbol_decoding.cc"
|
||||
"${draco_root}/core/symbol_decoding.h"
|
||||
"${draco_root}/core/symbol_encoding.cc"
|
||||
"${draco_root}/core/symbol_encoding.h"
|
||||
"${draco_root}/core/varint_decoding.h"
|
||||
"${draco_root}/core/varint_encoding.h"
|
||||
"${draco_root}/core/vector_d.h")
|
||||
|
||||
set(draco_io_sources
|
||||
@ -384,6 +407,7 @@ set(draco_test_sources
|
||||
"${draco_root}/core/draco_test_base.h"
|
||||
"${draco_root}/core/draco_test_utils.cc"
|
||||
"${draco_root}/core/draco_test_utils.h"
|
||||
"${draco_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_test.cc"
|
||||
"${draco_root}/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_test.cc"
|
||||
"${draco_root}/compression/attributes/sequential_integer_attribute_encoding_test.cc"
|
||||
"${draco_root}/compression/mesh/mesh_encoder_test.cc"
|
||||
@ -402,7 +426,6 @@ set(draco_test_sources
|
||||
"${draco_root}/io/point_cloud_io_test.cc"
|
||||
"${draco_root}/mesh/mesh_are_equivalent_test.cc"
|
||||
"${draco_root}/mesh/mesh_cleanup_test.cc"
|
||||
"${draco_root}/mesh/mesh_test.cc"
|
||||
"${draco_root}/mesh/triangle_soup_mesh_builder_test.cc"
|
||||
"${draco_root}/point_cloud/point_cloud_builder_test.cc")
|
||||
|
||||
@ -420,11 +443,19 @@ if (EMSCRIPTEN)
|
||||
add_compiler_flag_if_supported("-s ALLOW_MEMORY_GROWTH=1")
|
||||
add_compiler_flag_if_supported("--memory-init-file 0")
|
||||
add_compiler_flag_if_supported("-fno-omit-frame-pointer")
|
||||
add_compiler_flag_if_supported(-s MODULARIZE=1)
|
||||
add_compiler_flag_if_supported(-s EXPORT_NAME="'DracoModule'")
|
||||
add_compiler_flag_if_supported(--llvm-lto 1)
|
||||
add_compiler_flag_if_supported(-s NO_FILESYSTEM=1)
|
||||
add_compiler_flag_if_supported(-s ELIMINATE_DUPLICATE_FUNCTIONS=1)
|
||||
add_compiler_flag_if_supported("-s MODULARIZE=1")
|
||||
add_compiler_flag_if_supported("-s EXPORT_NAME=\"'DracoModule'\"")
|
||||
if (ENABLE_EXTRA_SPEED)
|
||||
add_compiler_flag_if_supported("--llvm-lto 1")
|
||||
endif ()
|
||||
add_compiler_flag_if_supported("-s NO_FILESYSTEM=1")
|
||||
add_compiler_flag_if_supported("-s ELIMINATE_DUPLICATE_FUNCTIONS=1")
|
||||
add_compiler_flag_if_supported("-s EXPORTED_RUNTIME_METHODS=[]")
|
||||
add_compiler_flag_if_supported("-s PRECISE_F32=1")
|
||||
if (ENABLE_WASM)
|
||||
add_compiler_flag_if_supported("-s WASM=1")
|
||||
add_compiler_flag_if_supported("-s BINARYEN_IMPRECISE=1")
|
||||
endif ()
|
||||
|
||||
if (CMAKE_BUILD_TYPE STREQUAL "")
|
||||
# Force -O3 when no build type is specified.
|
||||
@ -470,7 +501,12 @@ if (EMSCRIPTEN)
|
||||
# Make $draco_js_sources source files depend on glue.cpp.
|
||||
set_property(SOURCE ${draco_js_sources} APPEND PROPERTY OBJECT_DEPENDS
|
||||
${draco_build_dir}/glue.cpp)
|
||||
em_link_post_js(draco_decoder "${draco_build_dir}/glue.js")
|
||||
em_link_pre_js(draco_decoder
|
||||
"${draco_root}/javascript/emscripten/prepareCallbacks.js"
|
||||
"${draco_root}/javascript/emscripten/version.js")
|
||||
em_link_post_js(draco_decoder
|
||||
"${draco_build_dir}/glue.js"
|
||||
"${draco_root}/javascript/emscripten/finalize.js")
|
||||
else ()
|
||||
# Standard Draco libs, encoder and decoder.
|
||||
# Object collections that mirror the Draco directory structure.
|
||||
|
102
Makefile.emcc
102
Makefile.emcc
@ -59,8 +59,15 @@ ALL_C_OPTS := -std=c++11
|
||||
|
||||
# Options for speed.
|
||||
ALL_C_OPTS += -O3
|
||||
ALL_C_OPTS += --llvm-lto 1 -s NO_FILESYSTEM=1 -s ELIMINATE_DUPLICATE_FUNCTIONS=1
|
||||
ALL_C_OPTS += -s NO_FILESYSTEM=1 -s ELIMINATE_DUPLICATE_FUNCTIONS=1
|
||||
ALL_C_OPTS += -s EXPORTED_RUNTIME_METHODS=[]
|
||||
ALL_C_OPTS += -s PRECISE_F32=1
|
||||
|
||||
# Option to get about a 10% speed increase at the cost of about 10% in size.
|
||||
# Use "make target DRACO_PERFORMANCE_TYPE=extra_speed"
|
||||
ifeq ($(DRACO_PERFORMANCE_TYPE), extra_speed)
|
||||
ALL_C_OPTS += --llvm-lto 1
|
||||
endif
|
||||
|
||||
# Options for debug
|
||||
#ALL_C_OPTS += -g -s DEMANGLE_SUPPORT=1
|
||||
@ -79,6 +86,12 @@ ALL_C_OPTS += --memory-init-file 0
|
||||
# Options to separate asm.js and mem file.
|
||||
#ALL_C_OPTS += --separate-asm --memory-init-file 1
|
||||
|
||||
# Options to output WebAssembly code.
|
||||
# Use "make target DRACO_BUILD_TYPE=wasm"
|
||||
ifeq ($(DRACO_BUILD_TYPE), wasm)
|
||||
ALL_C_OPTS += -s WASM=1 -s BINARYEN_IMPRECISE=1
|
||||
endif
|
||||
|
||||
CFLAGS := $(ALL_C_OPTS)
|
||||
CXXFLAGS := $(ALL_C_OPTS)
|
||||
CFLAGS += -Wno-sign-compare -fno-omit-frame-pointer
|
||||
@ -100,20 +113,32 @@ ENCODER_BUFFER_OBJS := core/encoder_buffer.o
|
||||
DECODER_BUFFER_A := libdecoder_buffer.a
|
||||
DECODER_BUFFER_OBJS := core/bit_coder.o core/decoder_buffer.o
|
||||
|
||||
RANS_CODING_A := librans_coding.a
|
||||
RANS_CODING_OBJS := core/divide.o core/rans_coding.o
|
||||
RANS_BIT_DECODER_A := librans_bit_decoder.a
|
||||
RANS_BIT_DECODER_OBJS := core/divide.o core/rans_bit_decoder.o
|
||||
|
||||
ADAPTIVE_RANS_CODING_A := libadaptive_rans_coding.a
|
||||
ADAPTIVE_RANS_CODING_OBJS := core/adaptive_rans_coding.o
|
||||
RANS_BIT_ENCODER_A := librans_bit_encoder.a
|
||||
RANS_BIT_ENCODER_OBJS := core/divide.o core/rans_bit_encoder.o
|
||||
|
||||
ADAPTIVE_RANS_BIT_DECODER_A := libadaptive_rans_bit_decoder.a
|
||||
ADAPTIVE_RANS_BIT_DECODER_OBJS := core/adaptive_rans_bit_decoder.o
|
||||
|
||||
ADAPTIVE_RANS_BIT_ENCODER_A := libadaptive_rans_bit_encoder.a
|
||||
ADAPTIVE_RANS_BIT_ENCODER_OBJS := core/adaptive_rans_bit_encoder.o
|
||||
|
||||
CORNER_TABLE_A := libcorner_table.a
|
||||
CORNER_TABLE_OBJS := mesh/corner_table.o mesh/corner_table.o
|
||||
CORNER_TABLE_OBJS := mesh/corner_table.o
|
||||
|
||||
SHANNON_ENTROPY_A := libshannon_entropy.a
|
||||
SHANNON_ENTROPY_OBJS := core/shannon_entropy.o
|
||||
|
||||
SYMBOL_CODING_A := libsymbol_coding.a
|
||||
SYMBOL_CODING_OBJS := core/symbol_decoding.o core/symbol_encoding.o
|
||||
|
||||
DIRECT_BIT_CODING_A := libdirect_bit_coding.a
|
||||
DIRECT_BIT_CODING_OBJS := core/direct_bit_coding.o
|
||||
DIRECT_BIT_DECODER_A := libdirect_bit_decoder.a
|
||||
DIRECT_BIT_DECODER_OBJS := core/direct_bit_decoder.o
|
||||
|
||||
DIRECT_BIT_ENCODER_A := libdirect_bit_encoder.a
|
||||
DIRECT_BIT_ENCODER_OBJS := core/direct_bit_encoder.o
|
||||
|
||||
DRACO_TYPES_A := libdraco_types.a
|
||||
DRACO_TYPES_OBJS := core/draco_types.o
|
||||
@ -256,9 +281,12 @@ INTEGER_POINTS_KD_TREE_ENCODER_OBJS := \
|
||||
compression/point_cloud/algorithms/integer_points_kd_tree_encoder.o
|
||||
|
||||
CORNER_TABLE_OBJSA := $(addprefix $(OBJDIR)/,$(CORNER_TABLE_OBJS:.o=_a.o))
|
||||
SHANNON_ENTROPY_OBJSA := $(addprefix $(OBJDIR)/,$(SHANNON_ENTROPY_OBJS:.o=_a.o))
|
||||
SYMBOL_CODING_OBJSA := $(addprefix $(OBJDIR)/,$(SYMBOL_CODING_OBJS:.o=_a.o))
|
||||
DIRECT_BIT_CODING_OBJSA := \
|
||||
$(addprefix $(OBJDIR)/,$(DIRECT_BIT_CODING_OBJS:.o=_a.o))
|
||||
DIRECT_BIT_DECODER_OBJSA := \
|
||||
$(addprefix $(OBJDIR)/,$(DIRECT_BIT_DECODER_OBJS:.o=_a.o))
|
||||
DIRECT_BIT_ENCODER_OBJSA := \
|
||||
$(addprefix $(OBJDIR)/,$(DIRECT_BIT_ENCODER_OBJS:.o=_a.o))
|
||||
DECODER_BUFFER_OBJSA := $(addprefix $(OBJDIR)/,$(DECODER_BUFFER_OBJS:.o=_a.o))
|
||||
DATA_BUFFER_OBJSA := $(addprefix $(OBJDIR)/,$(DATA_BUFFER_OBJS:.o=_a.o))
|
||||
DRACO_TYPES_OBJSA := $(addprefix $(OBJDIR)/,$(DRACO_TYPES_OBJS:.o=_a.o))
|
||||
@ -339,9 +367,14 @@ QUANTIZATION_UTILS_OBJSA := \
|
||||
CYCLE_TIMER_OBJSA := $(addprefix $(OBJDIR)/,$(CYCLE_TIMER_OBJS:.o=_a.o))
|
||||
|
||||
ENCODER_BUFFER_OBJSA := $(addprefix $(OBJDIR)/,$(ENCODER_BUFFER_OBJS:.o=_a.o))
|
||||
RANS_CODING_OBJSA := $(addprefix $(OBJDIR)/,$(RANS_CODING_OBJS:.o=_a.o))
|
||||
ADAPTIVE_RANS_CODING_OBJSA := \
|
||||
$(addprefix $(OBJDIR)/,$(ADAPTIVE_RANS_CODING_OBJS:.o=_a.o))
|
||||
RANS_BIT_DECODER_OBJSA := \
|
||||
$(addprefix $(OBJDIR)/,$(RANS_BIT_DECODER_OBJS:.o=_a.o))
|
||||
RANS_BIT_ENCODER_OBJSA := \
|
||||
$(addprefix $(OBJDIR)/,$(RANS_BIT_ENCODER_OBJS:.o=_a.o))
|
||||
ADAPTIVE_RANS_BIT_DECODER_OBJSA := \
|
||||
$(addprefix $(OBJDIR)/,$(ADAPTIVE_RANS_BIT_DECODER_OBJS:.o=_a.o))
|
||||
ADAPTIVE_RANS_BIT_ENCODER_OBJSA := \
|
||||
$(addprefix $(OBJDIR)/,$(ADAPTIVE_RANS_BIT_ENCODER_OBJS:.o=_a.o))
|
||||
OBJ_DECODER_OBJSA := $(addprefix $(OBJDIR)/,$(OBJ_DECODER_OBJS:.o=_a.o))
|
||||
MESH_IO_OBJSA := $(addprefix $(OBJDIR)/,$(MESH_IO_OBJS:.o=_a.o))
|
||||
PLY_ENCODER_OBJSA := $(addprefix $(OBJDIR)/,$(PLY_ENCODER_OBJS:.o=_a.o))
|
||||
@ -358,12 +391,16 @@ INTEGER_POINTS_KD_TREE_ENCODER_OBJSA := \
|
||||
|
||||
# Core objs
|
||||
DRACO_CORE_OBJSA := $(DRACO_TYPES_OBJSA)
|
||||
DRACO_CORE_OBJSA += $(DIRECT_BIT_CODING_OBJSA)
|
||||
DRACO_CORE_OBJSA += $(RANS_CODING_OBJSA)
|
||||
DRACO_CORE_OBJSA += $(ADAPTIVE_RANS_CODING_OBJSA)
|
||||
DRACO_CORE_OBJSA += $(DIRECT_BIT_DECODER_OBJSA)
|
||||
DRACO_CORE_OBJSA += $(DIRECT_BIT_ENCODER_OBJSA)
|
||||
DRACO_CORE_OBJSA += $(RANS_BIT_DECODER_OBJSA)
|
||||
DRACO_CORE_OBJSA += $(RANS_BIT_ENCODER_OBJSA)
|
||||
DRACO_CORE_OBJSA += $(ADAPTIVE_RANS_BIT_DECODER_OBJSA)
|
||||
DRACO_CORE_OBJSA += $(ADAPTIVE_RANS_BIT_ENCODER_OBJSA)
|
||||
|
||||
# Shared objs needed for both encoder and decoder
|
||||
DRACO_SHARED_OBJSA := $(CORNER_TABLE_OBJSA) $(SYMBOL_CODING_OBJSA)
|
||||
DRACO_SHARED_OBJSA += $(SHANNON_ENTROPY_OBJSA)
|
||||
DRACO_SHARED_OBJSA += $(DATA_BUFFER_OBJSA) $(DRACO_CORE_OBJSA)
|
||||
DRACO_SHARED_OBJSA += $(GEOMETRY_ATTRIBUTE_OBJSA)
|
||||
DRACO_SHARED_OBJSA += $(POINT_ATTRIBUTE_OBJSA)
|
||||
@ -371,7 +408,8 @@ DRACO_SHARED_OBJSA += $(POINT_CLOUD_OBJSA)
|
||||
DRACO_SHARED_OBJSA += $(MESH_OBJSA)
|
||||
DRACO_SHARED_OBJSA += $(MESH_MISC_OBJSA) $(MESH_ATTRIBUTE_CORNER_TABLE_OBJSA)
|
||||
DRACO_SHARED_OBJSA += $(CYCLE_TIMER_OBJSA)
|
||||
DRACO_SHARED_OBJSA += $(RANS_CODING_OBJSA)
|
||||
DRACO_SHARED_OBJSA += $(RANS_BIT_DECODER_OBJSA)
|
||||
DRACO_SHARED_OBJSA += $(RANS_BIT_ENCODER_OBJSA)
|
||||
DRACO_SHARED_OBJSA += $(QUANTIZATION_UTILS_OBJSA)
|
||||
|
||||
# Encoder specific objs
|
||||
@ -464,8 +502,10 @@ LIBS += $(LIBDIR)/libsequential_normal_attribute_encoder.a
|
||||
LIBS += $(LIBDIR)/libcorner_table.a
|
||||
LIBS += $(LIBDIR)/libmesh_attribute_corner_table.a
|
||||
LIBS += $(LIBDIR)/libmesh_misc.a
|
||||
LIBS += $(LIBDIR)/libshannon_entropy.a
|
||||
LIBS += $(LIBDIR)/libsymbol_coding.a
|
||||
LIBS += $(LIBDIR)/librans_coding.a
|
||||
LIBS += $(LIBDIR)/librans_bit_decoder.a
|
||||
LIBS += $(LIBDIR)/librans_bit_encoder.a
|
||||
LIBS += $(LIBDIR)/libdata_buffer.a
|
||||
LIBS += $(LIBDIR)/libdraco_types.a
|
||||
LIBS += $(LIBDIR)/libdecoder_buffer.a
|
||||
@ -476,8 +516,10 @@ POINTS_LIBS := $(LIBDIR)/libfloat_points_tree_decoder.a
|
||||
POINTS_LIBS += $(LIBDIR)/libfloat_points_tree_encoder.a
|
||||
POINTS_LIBS += $(LIBDIR)/libinteger_points_kd_tree_decoder.a
|
||||
POINTS_LIBS += $(LIBDIR)/libinteger_points_kd_tree_encoder.a
|
||||
POINTS_LIBS += $(LIBDIR)/libdirect_bit_coding.a
|
||||
POINTS_LIBS += $(LIBDIR)/libadaptive_rans_coding.a
|
||||
POINTS_LIBS += $(LIBDIR)/libdirect_bit_decoder.a
|
||||
POINTS_LIBS += $(LIBDIR)/libdirect_bit_encoder.a
|
||||
POINTS_LIBS += $(LIBDIR)/libadaptive_rans_bit_decoder.a
|
||||
POINTS_LIBS += $(LIBDIR)/libadaptive_rans_bit_encoder.a
|
||||
|
||||
DEPS := $(DRACO_OBJSA:_a.o=.d)
|
||||
CLEAN := $(DEPS) $(OBJSA) $(LIBS) $(POINTS_LIBS)
|
||||
@ -493,15 +535,21 @@ build_glue:
|
||||
python $(BINDER) $(IDL) glue
|
||||
|
||||
draco_decoder: $(OBJDIR)/javascript/emscripten/draco_glue_wrapper.o $(OBJDIR)/javascript/emscripten/webidl_wrapper.o $(DRACO_CORE_OBJSA) $(DRACO_SHARED_OBJSA) $(DRACO_DECODER_OBJSA)
|
||||
$(CXX) $(ALL_C_OPTS) $^ --post-js glue.js -o $@.js
|
||||
$(CXX) $(ALL_C_OPTS) $^ --pre-js javascript/emscripten/prepareCallbacks.js --pre-js javascript/emscripten/version.js --post-js glue.js --post-js javascript/emscripten/finalize.js -o $@.js
|
||||
|
||||
$(LIBDIR)/libcorner_table.a: $(CORNER_TABLE_OBJSA)
|
||||
$(AR) rcs $@ $^
|
||||
|
||||
$(LIBDIR)/libshannon_entropy.a: $(SHANNON_ENTROPY_OBJSA)
|
||||
$(AR) rcs $@ $^
|
||||
|
||||
$(LIBDIR)/libsymbol_coding.a: $(SYMBOL_CODING_OBJSA)
|
||||
$(AR) rcs $@ $^
|
||||
|
||||
$(LIBDIR)/libdirect_bit_coding.a: $(DIRECT_BIT_CODING_OBJSA)
|
||||
$(LIBDIR)/libdirect_bit_decoder.a: $(DIRECT_BIT_DECODER_OBJSA)
|
||||
$(AR) rcs $@ $^
|
||||
|
||||
$(LIBDIR)/libdirect_bit_encoder.a: $(DIRECT_BIT_ENCODER_OBJSA)
|
||||
$(AR) rcs $@ $^
|
||||
|
||||
$(LIBDIR)/libdecoder_buffer.a: $(DECODER_BUFFER_OBJSA)
|
||||
@ -608,10 +656,16 @@ $(LIBDIR)/libquantization_utils.a: $(QUANTIZATION_UTILS_OBJSA)
|
||||
$(LIBDIR)/libcycle_timer.a: $(CYCLE_TIMER_OBJSA)
|
||||
$(AR) rcs $@ $^
|
||||
|
||||
$(LIBDIR)/librans_coding.a: $(RANS_CODING_OBJSA)
|
||||
$(LIBDIR)/librans_bit_decoder.a: $(RANS_BIT_DECODER_OBJSA)
|
||||
$(AR) rcs $@ $^
|
||||
|
||||
$(LIBDIR)/libadaptive_rans_coding.a: $(ADAPTIVE_RANS_CODING_OBJSA)
|
||||
$(LIBDIR)/librans_bit_encoder.a: $(RANS_BIT_ENCODER_OBJSA)
|
||||
$(AR) rcs $@ $^
|
||||
|
||||
$(LIBDIR)/libadaptive_rans_bit_decoder.a: $(ADAPTIVE_RANS_BIT_DECODER_OBJSA)
|
||||
$(AR) rcs $@ $^
|
||||
|
||||
$(LIBDIR)/libadaptive_rans_bit_encoder.a: $(ADAPTIVE_RANS_BIT_ENCODER_OBJSA)
|
||||
$(AR) rcs $@ $^
|
||||
|
||||
$(LIBDIR)/libobj_decoder.a: $(OBJ_DECODER_OBJSA)
|
||||
|
66
README.md
66
README.md
@ -3,6 +3,21 @@
|
||||
<img src="docs/DracoLogo.jpeg" />
|
||||
</p>
|
||||
|
||||
News
|
||||
=======
|
||||
### Version 0.10.0 released
|
||||
This release brings improved mesh compression and faster decoding in browser:
|
||||
* On average 10% better compression of triangular meshes (up to 20% for purely
|
||||
spatial meshes without any extra attributes).
|
||||
* Up to 2X faster decoding in browsers with our newly provided WebAssembly
|
||||
decoder.
|
||||
* Supported in most modern browsers including Chrome, Firefox, and Edge.
|
||||
* Decoder size is about 50% smaller compared to the javascript version.
|
||||
* New version is backward compatibile with 0.9.x encoders.
|
||||
* Note that 0.10.0 is not forward compatibile. I.e., files encoded with 0.10.0
|
||||
cannot be decoded with 0.9.x decoders.
|
||||
|
||||
|
||||
Description
|
||||
===========
|
||||
|
||||
@ -174,6 +189,57 @@ $ export EMSCRIPTEN=/path/to/emscripten/tools/parent
|
||||
# Emscripten.cmake can be found within your Emscripten installation directory,
|
||||
# it should be the subdir: cmake/Modules/Platform/Emscripten.cmake
|
||||
$ cmake path/to/draco -DCMAKE_TOOLCHAIN_FILE=/path/to/Emscripten.cmake
|
||||
|
||||
# Build the Javascript decoder.
|
||||
$ make
|
||||
~~~~~
|
||||
|
||||
WebAssembly Decoder
|
||||
-------------------
|
||||
|
||||
The WebAssembly decoder can be built using the existing cmake build file by
|
||||
passing the path the Emscripten's cmake toolchain file at cmake generation time
|
||||
in the CMAKE_TOOLCHAIN_FILE variable and enabling the WASM build option.
|
||||
In addition, the EMSCRIPTEN environment variable must be set to the local path
|
||||
of the parent directory of the Emscripten tools directory.
|
||||
|
||||
Make sure to have the correct version of Emscripten installed for WebAssembly
|
||||
builds. See https://developer.mozilla.org/en-US/docs/WebAssembly.
|
||||
|
||||
~~~~~ bash
|
||||
# Make the path to emscripten available to cmake.
|
||||
$ export EMSCRIPTEN=/path/to/emscripten/tools/parent
|
||||
|
||||
# Emscripten.cmake can be found within your Emscripten installation directory,
|
||||
# it should be the subdir: cmake/Modules/Platform/Emscripten.cmake
|
||||
$ cmake path/to/draco -DCMAKE_TOOLCHAIN_FILE=/path/to/Emscripten.cmake -DENABLE_WASM=ON
|
||||
|
||||
# Build the WebAssembly decoder.
|
||||
$ make
|
||||
|
||||
# Run the Javascript wrapper through Closure.
|
||||
$ java -jar closure.jar --compilation_level SIMPLE --js draco_decoder.js --js_output_file draco_wasm_wrapper.js
|
||||
|
||||
~~~~~
|
||||
|
||||
WebAssembly Mesh Only Decoder
|
||||
-----------------------------
|
||||
|
||||
~~~~~ bash
|
||||
|
||||
# cmake command line for mesh only WebAssembly decoder.
|
||||
$ cmake path/to/draco -DCMAKE_TOOLCHAIN_FILE=/path/to/Emscripten.cmake -DENABLE_WASM=ON -DENABLE_POINT_CLOUD_COMPRESSION=OFF
|
||||
|
||||
~~~~~
|
||||
|
||||
WebAssembly Point Cloud Only Decoder
|
||||
-----------------------------
|
||||
|
||||
~~~~~ bash
|
||||
|
||||
# cmake command line for point cloud only WebAssembly decoder.
|
||||
$ cmake path/to/draco -DCMAKE_TOOLCHAIN_FILE=/path/to/Emscripten.cmake -DENABLE_WASM=ON -DENABLE_MESH_COMPRESSION=OFF
|
||||
|
||||
~~~~~
|
||||
|
||||
|
||||
|
@ -70,6 +70,7 @@ class MeshTraversalSequencer : public PointsSequencer {
|
||||
|
||||
protected:
|
||||
bool GenerateSequenceInternal() override {
|
||||
traverser_.OnTraversalStart();
|
||||
if (corner_order_) {
|
||||
for (uint32_t i = 0; i < corner_order_->size(); ++i) {
|
||||
ProcessCorner(corner_order_->at(i));
|
||||
@ -80,6 +81,7 @@ class MeshTraversalSequencer : public PointsSequencer {
|
||||
ProcessCorner(CornerIndex(3 * i));
|
||||
}
|
||||
}
|
||||
traverser_.OnTraversalEnd();
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -0,0 +1,432 @@
|
||||
// Copyright 2016 The Draco Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_H_
|
||||
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_H_
|
||||
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
|
||||
#include "compression/attributes/prediction_schemes/mesh_prediction_scheme.h"
|
||||
#include "compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h"
|
||||
#include "core/rans_bit_decoder.h"
|
||||
#include "core/rans_bit_encoder.h"
|
||||
#include "core/varint_decoding.h"
|
||||
#include "core/varint_encoding.h"
|
||||
|
||||
namespace draco {
|
||||
|
||||
// Compared to standard multi parallelogram, constrained multi parallelogram can
|
||||
// explicitly select which of the available parallelograms are going to be used
|
||||
// for the prediction by marking crease edges between two triangles. This
|
||||
// requires storing extra data, but it allows the predictor to avoid using
|
||||
// parallelograms that would lead to poor predictions. For improved efficiency,
|
||||
// our current implementation limits the maximum number of used parallelograms
|
||||
// to four, which covers >95% of the cases (on average, there are only two
|
||||
// parallelograms available for any given vertex).
|
||||
// TODO(ostava): Split this into two classes (encoder x decoder).
|
||||
template <typename DataTypeT, class TransformT, class MeshDataT>
|
||||
class MeshPredictionSchemeConstrainedMultiParallelogram
|
||||
: public MeshPredictionScheme<DataTypeT, TransformT, MeshDataT> {
|
||||
public:
|
||||
using CorrType = typename PredictionScheme<DataTypeT, TransformT>::CorrType;
|
||||
using CornerTable = typename MeshDataT::CornerTable;
|
||||
|
||||
explicit MeshPredictionSchemeConstrainedMultiParallelogram(
|
||||
const PointAttribute *attribute)
|
||||
: MeshPredictionScheme<DataTypeT, TransformT, MeshDataT>(attribute),
|
||||
selected_mode_(OPTIMAL_MULTI_PARALLELOGRAM) {}
|
||||
MeshPredictionSchemeConstrainedMultiParallelogram(
|
||||
const PointAttribute *attribute, const TransformT &transform,
|
||||
const MeshDataT &mesh_data)
|
||||
: MeshPredictionScheme<DataTypeT, TransformT, MeshDataT>(
|
||||
attribute, transform, mesh_data),
|
||||
selected_mode_(OPTIMAL_MULTI_PARALLELOGRAM) {}
|
||||
|
||||
bool Encode(const DataTypeT *in_data, CorrType *out_corr, int size,
|
||||
int num_components,
|
||||
const PointIndex *entry_to_point_id_map) override;
|
||||
bool Decode(const CorrType *in_corr, DataTypeT *out_data, int size,
|
||||
int num_components,
|
||||
const PointIndex *entry_to_point_id_map) override;
|
||||
|
||||
bool EncodePredictionData(EncoderBuffer *buffer) override;
|
||||
bool DecodePredictionData(DecoderBuffer *buffer) override;
|
||||
|
||||
PredictionSchemeMethod GetPredictionMethod() const override {
|
||||
return MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM;
|
||||
}
|
||||
|
||||
bool IsInitialized() const override {
|
||||
return this->mesh_data().IsInitialized();
|
||||
}
|
||||
|
||||
private:
|
||||
enum Mode {
|
||||
// Selects the optimal multi-parallelogram from up to 4 available
|
||||
// parallelograms.
|
||||
OPTIMAL_MULTI_PARALLELOGRAM = 0,
|
||||
};
|
||||
|
||||
static constexpr int kMaxNumParallelograms = 4;
|
||||
// Crease edges are used to store whether any given edge should be used for
|
||||
// parallelogram prediction or not. New values are added in the order in which
|
||||
// the edges are processed. For better compression, the flags are stored in
|
||||
// in separate contexts based on the number of available parallelograms at a
|
||||
// given vertex.
|
||||
std::vector<bool> is_crease_edge_[kMaxNumParallelograms];
|
||||
Mode selected_mode_;
|
||||
};
|
||||
|
||||
template <typename DataTypeT, class TransformT, class MeshDataT>
|
||||
bool MeshPredictionSchemeConstrainedMultiParallelogram<
|
||||
DataTypeT, TransformT,
|
||||
MeshDataT>::Encode(const DataTypeT *in_data, CorrType *out_corr, int size,
|
||||
int num_components,
|
||||
const PointIndex * /* entry_to_point_id_map */) {
|
||||
this->transform().InitializeEncoding(in_data, size, num_components);
|
||||
const CornerTable *const table = this->mesh_data().corner_table();
|
||||
const std::vector<int32_t> *const vertex_to_data_map =
|
||||
this->mesh_data().vertex_to_data_map();
|
||||
|
||||
// Predicted values for all simple parallelograms encountered at any given
|
||||
// vertex.
|
||||
std::vector<DataTypeT> pred_vals[kMaxNumParallelograms];
|
||||
for (int i = 0; i < kMaxNumParallelograms; ++i) {
|
||||
pred_vals[i].resize(num_components);
|
||||
}
|
||||
// Used to store predicted value for various multi-parallelogram predictions
|
||||
// (combinations of simple parallelogram predictions).
|
||||
std::vector<DataTypeT> multi_pred_vals(num_components);
|
||||
|
||||
// Struct for holding data about prediction configuration for different sets
|
||||
// of used parallelograms.
|
||||
struct PredictionConfiguration {
|
||||
PredictionConfiguration()
|
||||
: error(std::numeric_limits<int>::max()),
|
||||
configuration(0),
|
||||
num_used_parallelograms(0) {}
|
||||
int error;
|
||||
uint8_t configuration; // Bitfield, 1 use parallelogram, 0 don't use it.
|
||||
int num_used_parallelograms;
|
||||
std::vector<DataTypeT> predicted_value;
|
||||
};
|
||||
|
||||
// Bit-field used for computing permutations of exlcluded edges
|
||||
// (parallelograms).
|
||||
bool exluded_parallelograms[kMaxNumParallelograms];
|
||||
|
||||
// We start processing the vertices from the end because this prediction uses
|
||||
// data from previous entries that could be overwritten when an entry is
|
||||
// processed.
|
||||
for (int p = this->mesh_data().data_to_corner_map()->size() - 1; p > 0; --p) {
|
||||
const CornerIndex start_corner_id =
|
||||
this->mesh_data().data_to_corner_map()->at(p);
|
||||
|
||||
// Go over all corners attached to the vertex and compute the predicted
|
||||
// value from the parallelograms defined by their opposite faces.
|
||||
CornerIndex corner_id(start_corner_id);
|
||||
int num_parallelograms = 0;
|
||||
bool first_pass = true;
|
||||
while (corner_id >= 0) {
|
||||
if (ComputeParallelogramPrediction(
|
||||
p, corner_id, table, *vertex_to_data_map, in_data, num_components,
|
||||
&(pred_vals[num_parallelograms][0]))) {
|
||||
// Parallelogram prediction applied and stored in
|
||||
// |pred_vals[num_parallelograms]|
|
||||
++num_parallelograms;
|
||||
// Stop processing when we reach the maximum number of allowed
|
||||
// parallelograms.
|
||||
if (num_parallelograms == kMaxNumParallelograms)
|
||||
break;
|
||||
}
|
||||
|
||||
// Proceed to the next corner attached to the vertex. First swing left
|
||||
// and if we reach a boundary, swing right from the start corner.
|
||||
if (first_pass) {
|
||||
corner_id = table->SwingLeft(corner_id);
|
||||
} else {
|
||||
corner_id = table->SwingRight(corner_id);
|
||||
}
|
||||
if (corner_id == start_corner_id) {
|
||||
break;
|
||||
}
|
||||
if (corner_id < 0 && first_pass) {
|
||||
first_pass = false;
|
||||
corner_id = table->SwingRight(start_corner_id);
|
||||
}
|
||||
}
|
||||
|
||||
// Offset to the target (destination) vertex.
|
||||
const int dst_offset = p * num_components;
|
||||
int error = 0;
|
||||
|
||||
// Compute all prediction errors for all possible configurations of
|
||||
// available parallelograms.
|
||||
|
||||
// Variable for holding the best configuration that has been found so far.
|
||||
PredictionConfiguration best_prediction;
|
||||
|
||||
// Compute delta coding error (configuration when no parallelogram is
|
||||
// selected).
|
||||
const int src_offset = (p - 1) * num_components;
|
||||
for (int i = 0; i < num_components; ++i) {
|
||||
error += (std::abs(in_data[dst_offset + i] - in_data[src_offset + i]));
|
||||
}
|
||||
|
||||
best_prediction.error = error;
|
||||
best_prediction.configuration = 0;
|
||||
best_prediction.num_used_parallelograms = 0;
|
||||
best_prediction.predicted_value.assign(
|
||||
in_data + src_offset, in_data + src_offset + num_components);
|
||||
|
||||
// Compute prediction error for different cases of used parallelograms.
|
||||
for (int num_used_parallelograms = 1;
|
||||
num_used_parallelograms <= num_parallelograms;
|
||||
++num_used_parallelograms) {
|
||||
// Mark all parallelograms as excluded.
|
||||
std::fill(exluded_parallelograms,
|
||||
exluded_parallelograms + num_parallelograms, true);
|
||||
// Mark the first |num_used_parallelograms| as not excluded.
|
||||
for (int j = 0; j < num_used_parallelograms; ++j) {
|
||||
exluded_parallelograms[j] = false;
|
||||
}
|
||||
// Permute over the excluded edges and compute error for each
|
||||
// configuration (permutation of excluded parallelograms).
|
||||
do {
|
||||
// Reset the multi-parallelogram predicted values.
|
||||
for (int j = 0; j < num_components; ++j) {
|
||||
multi_pred_vals[j] = 0;
|
||||
}
|
||||
uint8_t configuration = 0;
|
||||
for (int j = 0; j < num_parallelograms; ++j) {
|
||||
if (exluded_parallelograms[j])
|
||||
continue;
|
||||
for (int c = 0; c < num_components; ++c) {
|
||||
multi_pred_vals[c] += pred_vals[j][c];
|
||||
}
|
||||
// Set j'th bit of the configuration.
|
||||
configuration |= (1 << j);
|
||||
}
|
||||
error = 0;
|
||||
for (int j = 0; j < num_components; ++j) {
|
||||
multi_pred_vals[j] /= num_used_parallelograms;
|
||||
error += std::abs(multi_pred_vals[j] - in_data[dst_offset + j]);
|
||||
}
|
||||
if (error < best_prediction.error) {
|
||||
best_prediction.error = error;
|
||||
best_prediction.configuration = configuration;
|
||||
best_prediction.num_used_parallelograms = num_used_parallelograms;
|
||||
best_prediction.predicted_value.assign(multi_pred_vals.begin(),
|
||||
multi_pred_vals.end());
|
||||
}
|
||||
} while (std::next_permutation(
|
||||
exluded_parallelograms, exluded_parallelograms + num_parallelograms));
|
||||
}
|
||||
|
||||
for (int i = 0; i < num_parallelograms; ++i) {
|
||||
if ((best_prediction.configuration & (1 << i)) == 0) {
|
||||
// Parallelogram not used, mark the edge as crease.
|
||||
is_crease_edge_[num_parallelograms - 1].push_back(true);
|
||||
} else {
|
||||
// Parallelogram used. Add it to the predicted value and mark the
|
||||
// edge as not a crease.
|
||||
is_crease_edge_[num_parallelograms - 1].push_back(false);
|
||||
}
|
||||
}
|
||||
this->transform().ComputeCorrection(in_data + dst_offset,
|
||||
best_prediction.predicted_value.data(),
|
||||
out_corr, dst_offset);
|
||||
}
|
||||
// First element is always fixed because it cannot be predicted.
|
||||
for (int i = 0; i < num_components; ++i) {
|
||||
pred_vals[0][i] = static_cast<DataTypeT>(0);
|
||||
}
|
||||
this->transform().ComputeCorrection(in_data, pred_vals[0].data(), out_corr,
|
||||
0);
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename DataTypeT, class TransformT, class MeshDataT>
|
||||
bool MeshPredictionSchemeConstrainedMultiParallelogram<
|
||||
DataTypeT, TransformT,
|
||||
MeshDataT>::Decode(const CorrType *in_corr, DataTypeT *out_data,
|
||||
int /* size */, int num_components,
|
||||
const PointIndex * /* entry_to_point_id_map */) {
|
||||
this->transform().InitializeDecoding(num_components);
|
||||
|
||||
// Predicted values for all simple parallelograms encountered at any given
|
||||
// vertex.
|
||||
std::vector<DataTypeT> pred_vals[kMaxNumParallelograms];
|
||||
for (int i = 0; i < kMaxNumParallelograms; ++i) {
|
||||
pred_vals[i].resize(num_components, 0);
|
||||
}
|
||||
this->transform().ComputeOriginalValue(pred_vals[0].data(), in_corr, out_data,
|
||||
0);
|
||||
|
||||
const CornerTable *const table = this->mesh_data().corner_table();
|
||||
const std::vector<int32_t> *const vertex_to_data_map =
|
||||
this->mesh_data().vertex_to_data_map();
|
||||
|
||||
// Current position in the |is_crease_edge_| array for each context.
|
||||
std::vector<int> is_crease_edge_pos(kMaxNumParallelograms, 0);
|
||||
|
||||
// Used to store predicted value for multi-parallelogram prediction.
|
||||
std::vector<DataTypeT> multi_pred_vals(num_components);
|
||||
|
||||
const int corner_map_size = this->mesh_data().data_to_corner_map()->size();
|
||||
for (int p = 1; p < corner_map_size; ++p) {
|
||||
const CornerIndex start_corner_id =
|
||||
this->mesh_data().data_to_corner_map()->at(p);
|
||||
|
||||
CornerIndex corner_id(start_corner_id);
|
||||
int num_parallelograms = 0;
|
||||
bool first_pass = true;
|
||||
while (corner_id >= 0) {
|
||||
if (ComputeParallelogramPrediction(
|
||||
p, corner_id, table, *vertex_to_data_map, out_data,
|
||||
num_components, &(pred_vals[num_parallelograms][0]))) {
|
||||
// Parallelogram prediction applied and stored in
|
||||
// |pred_vals[num_parallelograms]|
|
||||
++num_parallelograms;
|
||||
// Stop processing when we reach the maximum number of allowed
|
||||
// parallelograms.
|
||||
if (num_parallelograms == kMaxNumParallelograms)
|
||||
break;
|
||||
}
|
||||
|
||||
// Proceed to the next corner attached to the vertex. First swing left
|
||||
// and if we reach a boundary, swing right from the start corner.
|
||||
if (first_pass) {
|
||||
corner_id = table->SwingLeft(corner_id);
|
||||
} else {
|
||||
corner_id = table->SwingRight(corner_id);
|
||||
}
|
||||
if (corner_id == start_corner_id) {
|
||||
break;
|
||||
}
|
||||
if (corner_id < 0 && first_pass) {
|
||||
first_pass = false;
|
||||
corner_id = table->SwingRight(start_corner_id);
|
||||
}
|
||||
}
|
||||
|
||||
// Check which of the available parallelograms are actually used and compute
|
||||
// the final predicted value.
|
||||
int num_used_parallelograms = 0;
|
||||
if (num_parallelograms > 0) {
|
||||
for (int i = 0; i < num_components; ++i) {
|
||||
multi_pred_vals[i] = 0;
|
||||
}
|
||||
// Check which parallelograms are actually used.
|
||||
for (int i = 0; i < num_parallelograms; ++i) {
|
||||
const int context = num_parallelograms - 1;
|
||||
const bool is_crease =
|
||||
is_crease_edge_[context][is_crease_edge_pos[context]++];
|
||||
if (!is_crease) {
|
||||
++num_used_parallelograms;
|
||||
for (int j = 0; j < num_components; ++j) {
|
||||
multi_pred_vals[j] += pred_vals[i][j];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
const int dst_offset = p * num_components;
|
||||
if (num_used_parallelograms == 0) {
|
||||
// No parallelogram was valid.
|
||||
// We use the last decoded point as a reference.
|
||||
const int src_offset = (p - 1) * num_components;
|
||||
this->transform().ComputeOriginalValue(out_data + src_offset, in_corr,
|
||||
out_data + dst_offset, dst_offset);
|
||||
} else {
|
||||
// Compute the correction from the predicted value.
|
||||
for (int c = 0; c < num_components; ++c) {
|
||||
multi_pred_vals[c] /= num_used_parallelograms;
|
||||
}
|
||||
this->transform().ComputeOriginalValue(multi_pred_vals.data(), in_corr,
|
||||
out_data + dst_offset, dst_offset);
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename DataTypeT, class TransformT, class MeshDataT>
|
||||
bool MeshPredictionSchemeConstrainedMultiParallelogram<
|
||||
DataTypeT, TransformT, MeshDataT>::EncodePredictionData(EncoderBuffer
|
||||
*buffer) {
|
||||
// Encode prediction mode.
|
||||
buffer->Encode(static_cast<uint8_t>(selected_mode_));
|
||||
|
||||
// Encode selected edges using separate rans bit coder for each context.
|
||||
for (int i = 0; i < kMaxNumParallelograms; ++i) {
|
||||
// |i| is the context based on the number of available parallelograms, which
|
||||
// is always equal to |i + 1|.
|
||||
const int num_used_parallelograms = i + 1;
|
||||
EncodeVarint<uint32_t>(is_crease_edge_[i].size(), buffer);
|
||||
if (is_crease_edge_[i].size()) {
|
||||
RAnsBitEncoder encoder;
|
||||
encoder.StartEncoding();
|
||||
// Encode the crease edge flags in the reverse vertex order that is needed
|
||||
// be the decoder. Note that for the currently supported mode, each vertex
|
||||
// has exactly |num_used_parallelograms| edges that need to be encoded.
|
||||
for (int j = is_crease_edge_[i].size() - num_used_parallelograms; j >= 0;
|
||||
j -= num_used_parallelograms) {
|
||||
// Go over all edges of the current vertex.
|
||||
for (int k = 0; k < num_used_parallelograms; ++k) {
|
||||
encoder.EncodeBit(is_crease_edge_[i][j + k]);
|
||||
}
|
||||
}
|
||||
encoder.EndEncoding(buffer);
|
||||
}
|
||||
}
|
||||
return MeshPredictionScheme<DataTypeT, TransformT,
|
||||
MeshDataT>::EncodePredictionData(buffer);
|
||||
}
|
||||
|
||||
template <typename DataTypeT, class TransformT, class MeshDataT>
|
||||
bool MeshPredictionSchemeConstrainedMultiParallelogram<
|
||||
DataTypeT, TransformT, MeshDataT>::DecodePredictionData(DecoderBuffer
|
||||
*buffer) {
|
||||
// Decode prediction mode.
|
||||
uint8_t mode;
|
||||
if (!buffer->Decode(&mode)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (mode != OPTIMAL_MULTI_PARALLELOGRAM) {
|
||||
// Unsupported mode.
|
||||
return false;
|
||||
}
|
||||
|
||||
// Encode selected edges using separate rans bit coder for each context.
|
||||
for (int i = 0; i < kMaxNumParallelograms; ++i) {
|
||||
uint32_t num_flags;
|
||||
DecodeVarint<uint32_t>(&num_flags, buffer);
|
||||
if (num_flags > 0) {
|
||||
is_crease_edge_[i].resize(num_flags);
|
||||
RAnsBitDecoder decoder;
|
||||
decoder.StartDecoding(buffer);
|
||||
for (int j = 0; j < num_flags; ++j) {
|
||||
is_crease_edge_[i][j] = decoder.DecodeNextBit();
|
||||
}
|
||||
decoder.EndDecoding();
|
||||
}
|
||||
}
|
||||
return MeshPredictionScheme<DataTypeT, TransformT,
|
||||
MeshDataT>::DecodePredictionData(buffer);
|
||||
}
|
||||
|
||||
} // namespace draco
|
||||
|
||||
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_H_
|
@ -70,6 +70,8 @@ bool MeshPredictionSchemeMultiParallelogram<DataTypeT, TransformT, MeshDataT>::
|
||||
this->mesh_data().vertex_to_data_map();
|
||||
|
||||
std::unique_ptr<DataTypeT[]> pred_vals(new DataTypeT[num_components]());
|
||||
std::unique_ptr<DataTypeT[]> parallelogram_pred_vals(
|
||||
new DataTypeT[num_components]());
|
||||
|
||||
// We start processing from the end because this prediction uses data from
|
||||
// previous entries that could be overwritten when an entry is processed.
|
||||
@ -85,30 +87,16 @@ bool MeshPredictionSchemeMultiParallelogram<DataTypeT, TransformT, MeshDataT>::
|
||||
pred_vals[i] = static_cast<DataTypeT>(0);
|
||||
}
|
||||
while (corner_id >= 0) {
|
||||
// TODO(ostava): Move code shared between multiple predictors into a new
|
||||
// file.
|
||||
int vert_opp = p, vert_next = p, vert_prev = p;
|
||||
const CornerIndex opp_corner = table->Opposite(corner_id);
|
||||
if (opp_corner >= 0) {
|
||||
GetParallelogramEntries(opp_corner, table, *vertex_to_data_map,
|
||||
&vert_opp, &vert_next, &vert_prev);
|
||||
}
|
||||
if (vert_opp < p && vert_next < p && vert_prev < p) {
|
||||
// Apply the parallelogram prediction.
|
||||
const int v_opp_off = vert_opp * num_components;
|
||||
const int v_next_off = vert_next * num_components;
|
||||
const int v_prev_off = vert_prev * num_components;
|
||||
if (ComputeParallelogramPrediction(
|
||||
p, corner_id, table, *vertex_to_data_map, in_data, num_components,
|
||||
parallelogram_pred_vals.get())) {
|
||||
for (int c = 0; c < num_components; ++c) {
|
||||
pred_vals[c] += (in_data[v_next_off + c] + in_data[v_prev_off + c]) -
|
||||
in_data[v_opp_off + c];
|
||||
pred_vals[c] += parallelogram_pred_vals[c];
|
||||
}
|
||||
++num_parallelograms;
|
||||
}
|
||||
|
||||
// Proceed to the next corner attached to the vertex.
|
||||
// TODO(ostava): This will not go around the whole neighborhood on
|
||||
// vertices on a mesh boundary. We need to SwingLeft from the start vertex
|
||||
// again to get the full coverage.
|
||||
corner_id = table->SwingRight(corner_id);
|
||||
if (corner_id == start_corner_id) {
|
||||
corner_id = kInvalidCornerIndex;
|
||||
@ -145,6 +133,8 @@ bool MeshPredictionSchemeMultiParallelogram<DataTypeT, TransformT, MeshDataT>::
|
||||
this->transform().InitializeDecoding(num_components);
|
||||
|
||||
std::unique_ptr<DataTypeT[]> pred_vals(new DataTypeT[num_components]());
|
||||
std::unique_ptr<DataTypeT[]> parallelogram_pred_vals(
|
||||
new DataTypeT[num_components]());
|
||||
|
||||
this->transform().ComputeOriginalValue(pred_vals.get(), in_corr, out_data, 0);
|
||||
|
||||
@ -163,21 +153,11 @@ bool MeshPredictionSchemeMultiParallelogram<DataTypeT, TransformT, MeshDataT>::
|
||||
pred_vals[i] = static_cast<DataTypeT>(0);
|
||||
}
|
||||
while (corner_id >= 0) {
|
||||
int vert_opp = p, vert_next = p, vert_prev = p;
|
||||
const CornerIndex opp_corner = table->Opposite(corner_id);
|
||||
if (opp_corner >= 0) {
|
||||
GetParallelogramEntries(opp_corner, table, *vertex_to_data_map,
|
||||
&vert_opp, &vert_next, &vert_prev);
|
||||
}
|
||||
if (vert_opp < p && vert_next < p && vert_prev < p) {
|
||||
// Apply the parallelogram prediction.
|
||||
const int v_opp_off = vert_opp * num_components;
|
||||
const int v_next_off = vert_next * num_components;
|
||||
const int v_prev_off = vert_prev * num_components;
|
||||
if (ComputeParallelogramPrediction(
|
||||
p, corner_id, table, *vertex_to_data_map, out_data,
|
||||
num_components, parallelogram_pred_vals.get())) {
|
||||
for (int c = 0; c < num_components; ++c) {
|
||||
pred_vals[c] +=
|
||||
(out_data[v_next_off + c] + out_data[v_prev_off + c]) -
|
||||
out_data[v_opp_off + c];
|
||||
pred_vals[c] += parallelogram_pred_vals[c];
|
||||
}
|
||||
++num_parallelograms;
|
||||
}
|
||||
|
@ -77,32 +77,18 @@ bool MeshPredictionSchemeParallelogram<DataTypeT, TransformT, MeshDataT>::
|
||||
this->mesh_data().vertex_to_data_map();
|
||||
for (int p = this->mesh_data().data_to_corner_map()->size() - 1; p > 0; --p) {
|
||||
const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p);
|
||||
// Initialize the vertex ids to "p" which ensures that if the opposite
|
||||
// corner does not exist we will not use the vertices to predict the
|
||||
// encoded value.
|
||||
int vert_opp = p, vert_next = p, vert_prev = p;
|
||||
const CornerIndex opp_corner = table->Opposite(corner_id);
|
||||
if (opp_corner >= 0) {
|
||||
// Get vertices on the opposite face.
|
||||
GetParallelogramEntries(opp_corner, table, *vertex_to_data_map, &vert_opp,
|
||||
&vert_next, &vert_prev);
|
||||
}
|
||||
const int dst_offset = p * num_components;
|
||||
if (vert_opp >= p || vert_next >= p || vert_prev >= p) {
|
||||
// Some of the vertices are not valid (not encoded yet).
|
||||
// We use the last encoded point as a reference.
|
||||
if (!ComputeParallelogramPrediction(p, corner_id, table,
|
||||
*vertex_to_data_map, in_data,
|
||||
num_components, pred_vals.get())) {
|
||||
// Parallelogram could not be computed, Possible because some of the
|
||||
// vertices are not valid (not encoded yet).
|
||||
// We use the last encoded point as a reference (delta coding).
|
||||
const int src_offset = (p - 1) * num_components;
|
||||
this->transform().ComputeCorrection(
|
||||
in_data + dst_offset, in_data + src_offset, out_corr, dst_offset);
|
||||
} else {
|
||||
// Apply the parallelogram prediction.
|
||||
const int v_opp_off = vert_opp * num_components;
|
||||
const int v_next_off = vert_next * num_components;
|
||||
const int v_prev_off = vert_prev * num_components;
|
||||
for (int c = 0; c < num_components; ++c) {
|
||||
pred_vals[c] = (in_data[v_next_off + c] + in_data[v_prev_off + c]) -
|
||||
in_data[v_opp_off + c];
|
||||
}
|
||||
this->transform().ComputeCorrection(in_data + dst_offset, pred_vals.get(),
|
||||
out_corr, dst_offset);
|
||||
}
|
||||
@ -133,29 +119,18 @@ bool MeshPredictionSchemeParallelogram<DataTypeT, TransformT, MeshDataT>::
|
||||
const int corner_map_size = this->mesh_data().data_to_corner_map()->size();
|
||||
for (int p = 1; p < corner_map_size; ++p) {
|
||||
const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p);
|
||||
int vert_opp = p, vert_next = p, vert_prev = p;
|
||||
const CornerIndex opp_corner = table->Opposite(corner_id);
|
||||
if (opp_corner >= 0) {
|
||||
// Get vertices on the opposite face.
|
||||
GetParallelogramEntries(opp_corner, table, *vertex_to_data_map, &vert_opp,
|
||||
&vert_next, &vert_prev);
|
||||
}
|
||||
const int dst_offset = p * num_components;
|
||||
if (vert_opp >= p || vert_next >= p || vert_prev >= p) {
|
||||
// Some of the vertices are not valid (not decoded yet).
|
||||
// We use the last decoded point as a reference.
|
||||
if (!ComputeParallelogramPrediction(p, corner_id, table,
|
||||
*vertex_to_data_map, out_data,
|
||||
num_components, pred_vals.get())) {
|
||||
// Parallelogram could not be computed, Possible because some of the
|
||||
// vertices are not valid (not encoded yet).
|
||||
// We use the last encoded point as a reference (delta coding).
|
||||
const int src_offset = (p - 1) * num_components;
|
||||
this->transform().ComputeOriginalValue(out_data + src_offset, in_corr,
|
||||
out_data + dst_offset, dst_offset);
|
||||
} else {
|
||||
// Apply the parallelogram prediction.
|
||||
const int v_opp_off = vert_opp * num_components;
|
||||
const int v_next_off = vert_next * num_components;
|
||||
const int v_prev_off = vert_prev * num_components;
|
||||
for (int c = 0; c < num_components; ++c) {
|
||||
pred_vals[c] = (out_data[v_next_off + c] + out_data[v_prev_off + c]) -
|
||||
out_data[v_opp_off + c];
|
||||
}
|
||||
this->transform().ComputeOriginalValue(pred_vals.get(), in_corr,
|
||||
out_data + dst_offset, dst_offset);
|
||||
}
|
||||
|
@ -35,6 +35,36 @@ inline void GetParallelogramEntries(
|
||||
*prev_entry = vertex_to_data_map[table->Vertex(table->Previous(ci)).value()];
|
||||
}
|
||||
|
||||
// Computes parallelogram prediction for a given corner and data entry id.
|
||||
// The prediction is stored in |out_prediction|.
|
||||
// Function returns false when the prediction couldn't be computed, e.g. because
|
||||
// not all entry points were available.
|
||||
template <class CornerTableT, typename DataTypeT>
|
||||
inline bool ComputeParallelogramPrediction(
|
||||
int data_entry_id, const CornerIndex ci, const CornerTableT *table,
|
||||
const std::vector<int32_t> &vertex_to_data_map, const DataTypeT *in_data,
|
||||
int num_components, DataTypeT *out_prediction) {
|
||||
const CornerIndex oci = table->Opposite(ci);
|
||||
if (oci < 0)
|
||||
return false;
|
||||
int vert_opp, vert_next, vert_prev;
|
||||
GetParallelogramEntries<CornerTableT>(oci, table, vertex_to_data_map,
|
||||
&vert_opp, &vert_next, &vert_prev);
|
||||
if (vert_opp < data_entry_id && vert_next < data_entry_id &&
|
||||
vert_prev < data_entry_id) {
|
||||
// Apply the parallelogram prediction.
|
||||
const int v_opp_off = vert_opp * num_components;
|
||||
const int v_next_off = vert_next * num_components;
|
||||
const int v_prev_off = vert_prev * num_components;
|
||||
for (int c = 0; c < num_components; ++c) {
|
||||
out_prediction[c] = (in_data[v_next_off + c] + in_data[v_prev_off + c]) -
|
||||
in_data[v_opp_off + c];
|
||||
}
|
||||
return true;
|
||||
}
|
||||
return false; // Not all data is available for prediction
|
||||
}
|
||||
|
||||
} // namespace draco
|
||||
|
||||
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_PARALLELOGRAM_SHARED_H_
|
||||
|
@ -18,7 +18,8 @@
|
||||
#include <math.h>
|
||||
|
||||
#include "compression/attributes/prediction_schemes/mesh_prediction_scheme.h"
|
||||
#include "core/rans_coding.h"
|
||||
#include "core/rans_bit_decoder.h"
|
||||
#include "core/rans_bit_encoder.h"
|
||||
#include "core/vector_d.h"
|
||||
#include "mesh/corner_table.h"
|
||||
|
||||
@ -40,12 +41,13 @@ class MeshPredictionSchemeTexCoords
|
||||
typename MeshPredictionScheme<DataTypeT, TransformT, MeshDataT>::CorrType;
|
||||
MeshPredictionSchemeTexCoords(const PointAttribute *attribute,
|
||||
const TransformT &transform,
|
||||
const MeshDataT &mesh_data)
|
||||
const MeshDataT &mesh_data, int version)
|
||||
: MeshPredictionScheme<DataTypeT, TransformT, MeshDataT>(
|
||||
attribute, transform, mesh_data),
|
||||
pos_attribute_(nullptr),
|
||||
entry_to_point_id_map_(nullptr),
|
||||
num_components_(0) {}
|
||||
num_components_(0),
|
||||
version_(version) {}
|
||||
|
||||
bool Encode(const DataTypeT *in_data, CorrType *out_corr, int size,
|
||||
int num_components,
|
||||
@ -111,6 +113,7 @@ class MeshPredictionSchemeTexCoords
|
||||
int num_components_;
|
||||
// Encoded / decoded array of UV flips.
|
||||
std::vector<bool> orientations_;
|
||||
int version_;
|
||||
};
|
||||
|
||||
template <typename DataTypeT, class TransformT, class MeshDataT>
|
||||
@ -181,7 +184,7 @@ bool MeshPredictionSchemeTexCoords<DataTypeT, TransformT, MeshDataT>::
|
||||
DecodePredictionData(DecoderBuffer *buffer) {
|
||||
// Decode the delta coded orientations.
|
||||
int32_t num_orientations = 0;
|
||||
if (!buffer->Decode(&num_orientations))
|
||||
if (!buffer->Decode(&num_orientations) || num_orientations < 0)
|
||||
return false;
|
||||
orientations_.resize(num_orientations);
|
||||
bool last_orientation = true;
|
||||
@ -270,10 +273,19 @@ void MeshPredictionSchemeTexCoords<DataTypeT, TransformT, MeshDataT>::
|
||||
// normalization explicitly and instead we can just use the squared norm
|
||||
// of |pn| as a denominator of the resulting dot product of non normalized
|
||||
// vectors.
|
||||
const float s = pn.Dot(cn) / pn_norm2_squared;
|
||||
// To get the coordinate t, we can use formula:
|
||||
// t = |C-N - (P-N) * s| / |P-N|
|
||||
const float t = sqrt((cn - pn * s).SquaredNorm() / pn_norm2_squared);
|
||||
float s, t;
|
||||
// |pn_norm2_squared| can be exactly 0 when the next_pos and prev_pos are
|
||||
// the same positions (e.g. because they were quantized to the same
|
||||
// location).
|
||||
if (version_ < DRACO_BITSTREAM_VERSION(1, 2) || pn_norm2_squared > 0) {
|
||||
s = pn.Dot(cn) / pn_norm2_squared;
|
||||
// To get the coordinate t, we can use formula:
|
||||
// t = |C-N - (P-N) * s| / |P-N|
|
||||
t = sqrt((cn - pn * s).SquaredNorm() / pn_norm2_squared);
|
||||
} else {
|
||||
s = 0;
|
||||
t = 0;
|
||||
}
|
||||
|
||||
// Now we need to transform the point (s, t) to the texture coordinate space
|
||||
// UV. We know the UV coordinates on points N and P (N_UV and P_UV). Lets
|
||||
|
@ -42,7 +42,7 @@ CreatePredictionSchemeForDecoder(PredictionSchemeMethod method, int att_id,
|
||||
const MeshDecoder *const mesh_decoder =
|
||||
static_cast<const MeshDecoder *>(decoder);
|
||||
auto ret = CreateMeshPredictionScheme<MeshDecoder, DataTypeT, TransformT>(
|
||||
mesh_decoder, method, att_id, transform);
|
||||
mesh_decoder, method, att_id, transform, decoder->bitstream_version());
|
||||
if (ret)
|
||||
return ret;
|
||||
// Otherwise try to create another prediction scheme.
|
||||
|
@ -35,12 +35,14 @@ PredictionSchemeMethod SelectPredictionMethod(
|
||||
if (encoder->options()->GetSpeed() >= 8) {
|
||||
return PREDICTION_DIFFERENCE;
|
||||
}
|
||||
if (encoder->options()->GetSpeed() >= 2) {
|
||||
// Parallelogram prediction is used for speeds 2 - 7.
|
||||
if (encoder->options()->GetSpeed() >= 2 ||
|
||||
encoder->point_cloud()->num_points() < 40) {
|
||||
// Parallelogram prediction is used for speeds 2 - 7 or when the overhead
|
||||
// of using constrained multi parallelogram would be too high.
|
||||
return MESH_PREDICTION_PARALLELOGRAM;
|
||||
}
|
||||
// Multi-parallelogram is used for speeds 0, 1.
|
||||
return MESH_PREDICTION_MULTI_PARALLELOGRAM;
|
||||
return MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM;
|
||||
}
|
||||
// Default option is delta coding.
|
||||
return PREDICTION_DIFFERENCE;
|
||||
|
@ -52,7 +52,7 @@ CreatePredictionSchemeForEncoder(PredictionSchemeMethod method, int att_id,
|
||||
const MeshEncoder *const mesh_encoder =
|
||||
static_cast<const MeshEncoder *>(encoder);
|
||||
auto ret = CreateMeshPredictionScheme<MeshEncoder, DataTypeT, TransformT>(
|
||||
mesh_encoder, method, att_id, transform);
|
||||
mesh_encoder, method, att_id, transform, kDracoBitstreamVersion);
|
||||
if (ret)
|
||||
return ret;
|
||||
// Otherwise try to create another prediction scheme.
|
||||
|
@ -23,6 +23,7 @@
|
||||
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_FACTORY_H_
|
||||
|
||||
#include "compression/attributes/mesh_attribute_indices_encoding_data.h"
|
||||
#include "compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram.h"
|
||||
#include "compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram.h"
|
||||
#include "compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram.h"
|
||||
#include "compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords.h"
|
||||
@ -50,7 +51,8 @@ std::unique_ptr<PredictionScheme<DataTypeT, TransformT>>
|
||||
CreateMeshPredictionSchemeInternal(PredictionSchemeMethod method,
|
||||
const PointAttribute *attribute,
|
||||
const TransformT &transform,
|
||||
const MeshDataT &mesh_data) {
|
||||
const MeshDataT &mesh_data,
|
||||
uint16_t bitstream_version) {
|
||||
if (method == MESH_PREDICTION_PARALLELOGRAM) {
|
||||
return std::unique_ptr<PredictionScheme<DataTypeT, TransformT>>(
|
||||
new MeshPredictionSchemeParallelogram<DataTypeT, TransformT, MeshDataT>(
|
||||
@ -60,10 +62,14 @@ CreateMeshPredictionSchemeInternal(PredictionSchemeMethod method,
|
||||
new MeshPredictionSchemeMultiParallelogram<DataTypeT, TransformT,
|
||||
MeshDataT>(
|
||||
attribute, transform, mesh_data));
|
||||
} else if (method == MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM) {
|
||||
return std::unique_ptr<PredictionScheme<DataTypeT, TransformT>>(
|
||||
new MeshPredictionSchemeConstrainedMultiParallelogram<
|
||||
DataTypeT, TransformT, MeshDataT>(attribute, transform, mesh_data));
|
||||
} else if (method == MESH_PREDICTION_TEX_COORDS) {
|
||||
return std::unique_ptr<PredictionScheme<DataTypeT, TransformT>>(
|
||||
new MeshPredictionSchemeTexCoords<DataTypeT, TransformT, MeshDataT>(
|
||||
attribute, transform, mesh_data));
|
||||
attribute, transform, mesh_data, bitstream_version));
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
@ -72,11 +78,13 @@ template <class EncodingDataSourceT, typename DataTypeT, class TransformT>
|
||||
std::unique_ptr<PredictionScheme<DataTypeT, TransformT>>
|
||||
CreateMeshPredictionScheme(const EncodingDataSourceT *source,
|
||||
PredictionSchemeMethod method, int att_id,
|
||||
const TransformT &transform) {
|
||||
const TransformT &transform,
|
||||
uint16_t bitstream_version) {
|
||||
const PointAttribute *const att = source->point_cloud()->attribute(att_id);
|
||||
if (source->GetGeometryType() == TRIANGULAR_MESH &&
|
||||
(method == MESH_PREDICTION_PARALLELOGRAM ||
|
||||
method == MESH_PREDICTION_MULTI_PARALLELOGRAM ||
|
||||
method == MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM ||
|
||||
method == MESH_PREDICTION_TEX_COORDS)) {
|
||||
const CornerTable *const ct = source->GetCornerTable();
|
||||
const MeshAttributeIndicesEncodingData *const encoding_data =
|
||||
@ -96,7 +104,7 @@ CreateMeshPredictionScheme(const EncodingDataSourceT *source,
|
||||
&encoding_data->vertex_to_encoded_attribute_value_index_map);
|
||||
auto ret =
|
||||
CreateMeshPredictionSchemeInternal<DataTypeT, TransformT, MeshData>(
|
||||
method, att, transform, md);
|
||||
method, att, transform, md, bitstream_version);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
@ -107,7 +115,7 @@ CreateMeshPredictionScheme(const EncodingDataSourceT *source,
|
||||
&encoding_data->vertex_to_encoded_attribute_value_index_map);
|
||||
auto ret =
|
||||
CreateMeshPredictionSchemeInternal<DataTypeT, TransformT, MeshData>(
|
||||
method, att, transform, md);
|
||||
method, att, transform, md, bitstream_version);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -0,0 +1,262 @@
|
||||
// Copyright 2016 The Draco Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_CANONICALIZED_TRANSFORM_H_
|
||||
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_CANONICALIZED_TRANSFORM_H_
|
||||
|
||||
#include <cmath>
|
||||
|
||||
#include "compression/attributes/normal_compression_utils.h"
|
||||
#include "compression/attributes/prediction_schemes/prediction_scheme.h"
|
||||
#include "core/macros.h"
|
||||
#include "core/vector_d.h"
|
||||
|
||||
namespace draco {
|
||||
|
||||
// The transform works on octahedral coordinates for normals. The square is
|
||||
// subdivided into four inner triangles (diamond) and four outer triangles. The
|
||||
// inner trianlges are associated with the upper part of the octahedron and the
|
||||
// outer triangles are associated with the lower part.
|
||||
// Given a preditiction value P and the actual value Q that should be encoded,
|
||||
// this transform first checks if P is outside the diamond. If so, the outer
|
||||
// triangles are flipped towards the inside and vice versa. Then it checks if p
|
||||
// is in the bottom left quadrant. If it is not, it rotates p and q accordingly.
|
||||
// The actual correction value is then based on the mapped and rotated P and Q
|
||||
// values. The inversion tends to result in shorter correction vectors and the
|
||||
// rotation makes it so that all long correction values are positive, reducing
|
||||
// the possible value range of the correction values and increasing the
|
||||
// occurence of positive large correction values, which helps the entropy
|
||||
// encoder. This is possible since P is also known by the decoder, see also
|
||||
// ComputeCorrection and ComputeOriginalValue functions.
|
||||
// Note that the tile is not periodic, which implies that the outer edges can
|
||||
// not be identified, which requires us to use an odd number of values on each
|
||||
// axis.
|
||||
// DataTypeT is expected to be some integral type.
|
||||
//
|
||||
template <typename DataTypeT>
|
||||
class PredictionSchemeNormalOctahedronCanonicalizedTransform
|
||||
: public PredictionSchemeTransform<DataTypeT, DataTypeT> {
|
||||
public:
|
||||
typedef VectorD<DataTypeT, 2> Point2;
|
||||
typedef DataTypeT CorrType;
|
||||
typedef DataTypeT DataType;
|
||||
|
||||
PredictionSchemeNormalOctahedronCanonicalizedTransform()
|
||||
: mod_value_(0), max_value_(0) {}
|
||||
// We expect the mod value to be of the form 2^b-1.
|
||||
PredictionSchemeNormalOctahedronCanonicalizedTransform(DataType mod_value)
|
||||
: mod_value_(mod_value), max_value_((mod_value - 1) / 2) {}
|
||||
|
||||
PredictionSchemeTransformType GetType() const {
|
||||
return PREDICTION_TRANSFORM_NORMAL_OCTAHEDRON_CANONICALIZED;
|
||||
}
|
||||
|
||||
// We can return true as we keep correction values positive.
|
||||
bool AreCorrectionsPositive() const { return true; }
|
||||
|
||||
bool EncodeTransformData(EncoderBuffer *buffer) {
|
||||
buffer->Encode(mod_value_);
|
||||
buffer->Encode(max_value_);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool DecodeTransformData(DecoderBuffer *buffer) {
|
||||
if (!buffer->Decode(&mod_value_))
|
||||
return false;
|
||||
if (!buffer->Decode(&max_value_))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
inline void ComputeCorrection(const DataType *orig_vals,
|
||||
const DataType *pred_vals,
|
||||
CorrType *out_corr_vals, int val_id) const {
|
||||
DCHECK_LE(pred_vals[0], max_value_ * 2);
|
||||
DCHECK_LE(pred_vals[1], max_value_ * 2);
|
||||
DCHECK_LE(orig_vals[0], max_value_ * 2);
|
||||
DCHECK_LE(orig_vals[1], max_value_ * 2);
|
||||
DCHECK_LE(0, pred_vals[0]);
|
||||
DCHECK_LE(0, pred_vals[1]);
|
||||
DCHECK_LE(0, orig_vals[0]);
|
||||
DCHECK_LE(0, orig_vals[1]);
|
||||
|
||||
const Point2 orig = Point2(orig_vals[0], orig_vals[1]);
|
||||
const Point2 pred = Point2(pred_vals[0], pred_vals[1]);
|
||||
const Point2 corr = ComputeCorrection(orig, pred);
|
||||
DCHECK_EQ(true, Verify(orig, pred, corr));
|
||||
|
||||
out_corr_vals[val_id] = corr[0];
|
||||
out_corr_vals[val_id + 1] = corr[1];
|
||||
}
|
||||
|
||||
inline void ComputeOriginalValue(const DataType *pred_vals,
|
||||
const CorrType *corr_vals,
|
||||
DataType *out_orig_vals, int val_id) const {
|
||||
DCHECK_LE(pred_vals[0], 2 * max_value_);
|
||||
DCHECK_LE(pred_vals[1], 2 * max_value_);
|
||||
DCHECK_LE(corr_vals[val_id], 2 * max_value_);
|
||||
DCHECK_LE(corr_vals[val_id + 1], 2 * max_value_);
|
||||
|
||||
DCHECK_LE(0, pred_vals[0]);
|
||||
DCHECK_LE(0, pred_vals[1]);
|
||||
DCHECK_LE(0, corr_vals[val_id]);
|
||||
DCHECK_LE(0, corr_vals[val_id + 1]);
|
||||
|
||||
const Point2 pred = Point2(pred_vals[0], pred_vals[1]);
|
||||
const Point2 corr = Point2(corr_vals[val_id], corr_vals[val_id + 1]);
|
||||
const Point2 orig = ComputeOriginalValue(pred, corr);
|
||||
|
||||
out_orig_vals[0] = orig[0];
|
||||
out_orig_vals[1] = orig[1];
|
||||
}
|
||||
|
||||
int32_t GetRotationCount(Point2 pred) const {
|
||||
const DataType sign_x = pred[0];
|
||||
const DataType sign_y = pred[1];
|
||||
|
||||
int32_t rotation_count = 0;
|
||||
if (sign_x == 0) {
|
||||
if (sign_y == 0) {
|
||||
rotation_count = 0;
|
||||
} else if (sign_y > 0) {
|
||||
rotation_count = 3;
|
||||
} else {
|
||||
rotation_count = 1;
|
||||
}
|
||||
} else if (sign_x > 0) {
|
||||
if (sign_y >= 0) {
|
||||
rotation_count = 2;
|
||||
} else {
|
||||
rotation_count = 1;
|
||||
}
|
||||
} else {
|
||||
if (sign_y <= 0) {
|
||||
rotation_count = 0;
|
||||
} else {
|
||||
rotation_count = 3;
|
||||
}
|
||||
}
|
||||
return rotation_count;
|
||||
}
|
||||
|
||||
Point2 RotatePoint(Point2 p, int32_t rotation_count) const {
|
||||
switch (rotation_count) {
|
||||
case 1:
|
||||
return Point2(p[1], -p[0]);
|
||||
case 2:
|
||||
return Point2(-p[0], -p[1]);
|
||||
case 3:
|
||||
return Point2(-p[1], p[0]);
|
||||
default:
|
||||
return p;
|
||||
}
|
||||
}
|
||||
|
||||
bool IsInBottomLeft(const Point2 &p) const {
|
||||
if (p[0] == 0 && p[1] == 0)
|
||||
return true;
|
||||
return (p[0] < 0 && p[1] <= 0);
|
||||
}
|
||||
|
||||
private:
|
||||
Point2 ComputeCorrection(Point2 orig, Point2 pred) const {
|
||||
const Point2 t(max_value_, max_value_);
|
||||
orig = orig - t;
|
||||
pred = pred - t;
|
||||
if (!IsInDiamond(max_value_, pred[0], pred[1])) {
|
||||
InvertRepresentation(max_value_, &orig[0], &orig[1]);
|
||||
InvertRepresentation(max_value_, &pred[0], &pred[1]);
|
||||
}
|
||||
if (!IsInBottomLeft(pred)) {
|
||||
int32_t rotation_count = GetRotationCount(pred);
|
||||
orig = RotatePoint(orig, rotation_count);
|
||||
pred = RotatePoint(pred, rotation_count);
|
||||
}
|
||||
Point2 corr = orig - pred;
|
||||
corr[0] = MakePositive(corr[0]);
|
||||
corr[1] = MakePositive(corr[1]);
|
||||
return corr;
|
||||
}
|
||||
|
||||
Point2 ComputeOriginalValue(Point2 pred, Point2 corr) const {
|
||||
const Point2 t(max_value_, max_value_);
|
||||
pred = pred - t;
|
||||
const bool pred_is_in_diamond = IsInDiamond(max_value_, pred[0], pred[1]);
|
||||
if (!pred_is_in_diamond) {
|
||||
InvertRepresentation(max_value_, &pred[0], &pred[1]);
|
||||
}
|
||||
const bool pred_is_in_bottom_left = IsInBottomLeft(pred);
|
||||
const int32_t rotation_count = GetRotationCount(pred);
|
||||
if (!pred_is_in_bottom_left) {
|
||||
pred = RotatePoint(pred, rotation_count);
|
||||
}
|
||||
Point2 orig = pred + corr;
|
||||
orig[0] = ModMax(orig[0]);
|
||||
orig[1] = ModMax(orig[1]);
|
||||
if (!pred_is_in_bottom_left) {
|
||||
const int32_t reverse_rotation_count = (4 - rotation_count) % 4;
|
||||
orig = RotatePoint(orig, reverse_rotation_count);
|
||||
}
|
||||
if (!pred_is_in_diamond) {
|
||||
InvertRepresentation(max_value_, &orig[0], &orig[1]);
|
||||
}
|
||||
orig = orig + t;
|
||||
return orig;
|
||||
}
|
||||
|
||||
// For correction values.
|
||||
DataType MakePositive(DataType x) const {
|
||||
DCHECK_LE(x, max_value_ * 2);
|
||||
if (x < 0)
|
||||
return x + mod_value_;
|
||||
return x;
|
||||
}
|
||||
|
||||
DataType ModMax(DataType x) const {
|
||||
if (x > max_value_)
|
||||
return x - mod_value_;
|
||||
if (x < -max_value_)
|
||||
return x + mod_value_;
|
||||
return x;
|
||||
}
|
||||
|
||||
// Only called in debug mode.
|
||||
bool Verify(const Point2 &orig, const Point2 pred, const Point2 corr) const {
|
||||
const Point2 veri = ComputeOriginalValue(pred, corr);
|
||||
return AreEquivalent(orig, veri);
|
||||
}
|
||||
|
||||
// Only called in debug mode
|
||||
bool AreEquivalent(Point2 p, Point2 q) const {
|
||||
const Point2 t(max_value_, max_value_);
|
||||
p = p - t;
|
||||
q = q - t;
|
||||
if (std::abs(p[0]) == max_value_ && p[1] < 0)
|
||||
p[1] = -p[1];
|
||||
if (std::abs(p[1]) == max_value_ && p[0] < 0)
|
||||
p[0] = -p[0];
|
||||
if (std::abs(q[0]) == max_value_ && q[1] < 0)
|
||||
q[1] = -q[1];
|
||||
if (std::abs(q[1]) == max_value_ && q[0] < 0)
|
||||
q[0] = -q[0];
|
||||
return (p[0] == q[0] && p[1] == q[1]);
|
||||
}
|
||||
|
||||
DataType mod_value_;
|
||||
DataType max_value_;
|
||||
};
|
||||
|
||||
} // namespace draco
|
||||
|
||||
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_NORMAL_OCTAHEDRON_CANONICALIZED_TRANSFORM_H_
|
@ -0,0 +1,184 @@
|
||||
// Copyright 2016 The Draco Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
#include "compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform.h"
|
||||
#include "core/draco_test_base.h"
|
||||
|
||||
namespace {
|
||||
|
||||
class PredictionSchemeNormalOctahedronCanonicalizedTransformTest
|
||||
: public ::testing::Test {
|
||||
protected:
|
||||
typedef draco::PredictionSchemeNormalOctahedronCanonicalizedTransform<int32_t>
|
||||
Transform;
|
||||
typedef Transform::Point2 Point2;
|
||||
|
||||
void TestComputeCorrection(const Transform &transform, const int32_t &ox,
|
||||
const int32_t &oy, const int32_t &px,
|
||||
const int32_t &py, const int32_t &cx,
|
||||
const int32_t &cy) {
|
||||
const int32_t o[2] = {ox + 7, oy + 7};
|
||||
const int32_t p[2] = {px + 7, py + 7};
|
||||
int32_t corr[2] = {500, 500};
|
||||
transform.ComputeCorrection(o, p, corr, 0);
|
||||
ASSERT_EQ(corr[0], (cx + 15) % 15);
|
||||
ASSERT_EQ(corr[1], (cy + 15) % 15);
|
||||
}
|
||||
|
||||
void TestGetRotationCount(const Transform &transform, const Point2 &pred,
|
||||
const int32_t &rot_dir) {
|
||||
const int32_t rotation_count = transform.GetRotationCount(pred);
|
||||
ASSERT_EQ(rot_dir, rotation_count);
|
||||
}
|
||||
|
||||
void TestRotateRepresentation(const Transform &transform, const Point2 &org,
|
||||
const Point2 &pred, const Point2 &rot_org,
|
||||
const Point2 &rot_pred) {
|
||||
const int32_t rotation_count = transform.GetRotationCount(pred);
|
||||
const Point2 res_org = transform.RotatePoint(org, rotation_count);
|
||||
const Point2 res_pred = transform.RotatePoint(pred, rotation_count);
|
||||
ASSERT_EQ(rot_org[0], res_org[0]);
|
||||
ASSERT_EQ(rot_org[1], res_org[1]);
|
||||
ASSERT_EQ(rot_pred[0], res_pred[0]);
|
||||
ASSERT_EQ(rot_pred[1], res_pred[1]);
|
||||
}
|
||||
};
|
||||
|
||||
TEST_F(PredictionSchemeNormalOctahedronCanonicalizedTransformTest, Init) {
|
||||
const Transform transform(15);
|
||||
ASSERT_TRUE(transform.AreCorrectionsPositive());
|
||||
}
|
||||
|
||||
TEST_F(PredictionSchemeNormalOctahedronCanonicalizedTransformTest,
|
||||
IsInBottomLeft) {
|
||||
const Transform transform(15);
|
||||
ASSERT_TRUE(transform.IsInBottomLeft(Point2(0, 0)));
|
||||
ASSERT_TRUE(transform.IsInBottomLeft(Point2(-1, -1)));
|
||||
ASSERT_TRUE(transform.IsInBottomLeft(Point2(-7, -7)));
|
||||
|
||||
ASSERT_FALSE(transform.IsInBottomLeft(Point2(1, 1)));
|
||||
ASSERT_FALSE(transform.IsInBottomLeft(Point2(7, 7)));
|
||||
ASSERT_FALSE(transform.IsInBottomLeft(Point2(-1, 1)));
|
||||
ASSERT_FALSE(transform.IsInBottomLeft(Point2(-7, 7)));
|
||||
ASSERT_FALSE(transform.IsInBottomLeft(Point2(1, -1)));
|
||||
ASSERT_FALSE(transform.IsInBottomLeft(Point2(7, -7)));
|
||||
}
|
||||
|
||||
TEST_F(PredictionSchemeNormalOctahedronCanonicalizedTransformTest,
|
||||
GetRotationCount) {
|
||||
const Transform transform(15);
|
||||
TestGetRotationCount(transform, Point2(1, 2), 2); // top right
|
||||
TestGetRotationCount(transform, Point2(-1, 2), 3); // top left
|
||||
TestGetRotationCount(transform, Point2(1, -2), 1); // bottom right
|
||||
TestGetRotationCount(transform, Point2(-1, -2), 0); // bottom left
|
||||
TestGetRotationCount(transform, Point2(0, 2), 3); // top left
|
||||
TestGetRotationCount(transform, Point2(0, -2), 1); // bottom right
|
||||
TestGetRotationCount(transform, Point2(2, 0), 2); // top right
|
||||
TestGetRotationCount(transform, Point2(-2, 0), 0); // bottom left
|
||||
TestGetRotationCount(transform, Point2(0, 0), 0); // bottom left
|
||||
}
|
||||
|
||||
TEST_F(PredictionSchemeNormalOctahedronCanonicalizedTransformTest,
|
||||
RotateRepresentation) {
|
||||
const Transform transform(15);
|
||||
// p top left; shift clockwise by 3
|
||||
TestRotateRepresentation(transform, Point2(1, 2), Point2(-3, 1),
|
||||
Point2(-2, 1), Point2(-1, -3)); // q top right
|
||||
TestRotateRepresentation(transform, Point2(-1, -2), Point2(-3, 1),
|
||||
Point2(2, -1), Point2(-1, -3)); // q bottom left
|
||||
TestRotateRepresentation(transform, Point2(1, -2), Point2(-3, 1),
|
||||
Point2(2, 1), Point2(-1, -3)); // q bottom right
|
||||
TestRotateRepresentation(transform, Point2(-1, 2), Point2(-3, 1),
|
||||
Point2(-2, -1), Point2(-1, -3)); // q top left
|
||||
// p top right; shift clockwise by 2 (flip)
|
||||
TestRotateRepresentation(transform, Point2(1, 1), Point2(1, 3),
|
||||
Point2(-1, -1), Point2(-1, -3)); // q top right
|
||||
TestRotateRepresentation(transform, Point2(-1, -2), Point2(1, 3),
|
||||
Point2(1, 2), Point2(-1, -3)); // q bottom left
|
||||
TestRotateRepresentation(transform, Point2(-1, 2), Point2(1, 3),
|
||||
Point2(1, -2), Point2(-1, -3)); // q top left
|
||||
TestRotateRepresentation(transform, Point2(1, -2), Point2(1, 3),
|
||||
Point2(-1, 2), Point2(-1, -3)); // q bottom right
|
||||
// p bottom right; shift clockwise by 1
|
||||
TestRotateRepresentation(transform, Point2(1, 2), Point2(3, -1),
|
||||
Point2(2, -1), Point2(-1, -3)); // q top right
|
||||
TestRotateRepresentation(transform, Point2(1, -2), Point2(3, -1),
|
||||
Point2(-2, -1), Point2(-1, -3)); // q bottom right
|
||||
TestRotateRepresentation(transform, Point2(-1, -2), Point2(3, -1),
|
||||
Point2(-2, 1), Point2(-1, -3)); // q bottom left
|
||||
TestRotateRepresentation(transform, Point2(-1, 2), Point2(3, -1),
|
||||
Point2(2, 1), Point2(-1, -3)); // q top left
|
||||
// p bottom left; no change
|
||||
TestRotateRepresentation(transform, Point2(1, 2), Point2(-1, -3),
|
||||
Point2(1, 2), Point2(-1, -3)); // q top right
|
||||
TestRotateRepresentation(transform, Point2(-1, 2), Point2(-1, -3),
|
||||
Point2(-1, 2), Point2(-1, -3)); // q top left
|
||||
TestRotateRepresentation(transform, Point2(1, -2), Point2(-1, -3),
|
||||
Point2(1, -2), Point2(-1, -3)); // q bottom right
|
||||
TestRotateRepresentation(transform, Point2(-1, -2), Point2(-1, -3),
|
||||
Point2(-1, -2), Point2(-1, -3)); // q bottom left
|
||||
}
|
||||
|
||||
TEST_F(PredictionSchemeNormalOctahedronCanonicalizedTransformTest,
|
||||
ComputeCorrection) {
|
||||
const Transform transform(15);
|
||||
TestComputeCorrection(transform, 0, 0, 0, 0, 0, 0);
|
||||
TestComputeCorrection(transform, 1, 1, 1, 1, 0, 0);
|
||||
// inside diamond; p top right
|
||||
TestComputeCorrection(transform, 3, 4, 1, 2, -2, -2); // q top right
|
||||
TestComputeCorrection(transform, -3, 4, 1, 2, 4, -2); // q top left
|
||||
TestComputeCorrection(transform, 3, -4, 1, 2, -2, 6); // q bottom right
|
||||
TestComputeCorrection(transform, -3, -4, 1, 2, 4, 6); // q bottom left
|
||||
// inside diamond; p top left
|
||||
TestComputeCorrection(transform, 3, 4, -1, 2, -2, 4); // q top right
|
||||
TestComputeCorrection(transform, -3, 4, -1, 2, -2, -2); // q top left
|
||||
TestComputeCorrection(transform, 3, -4, -1, 2, 6, 4); // q bottom right
|
||||
TestComputeCorrection(transform, -3, -4, -1, 2, 6, -2); // q bottom left
|
||||
// inside diamond; p bottom right
|
||||
TestComputeCorrection(transform, 3, 4, 1, -2, 6, -2); // q top right
|
||||
TestComputeCorrection(transform, -3, 4, 1, -2, 6, 4); // q top left
|
||||
TestComputeCorrection(transform, 3, -4, 1, -2, -2, -2); // q bottom right
|
||||
TestComputeCorrection(transform, -3, -4, 1, -2, -2, 4); // q bottom left
|
||||
// inside diamond; p bottom left
|
||||
TestComputeCorrection(transform, 3, 4, -1, -2, 4, 6); // q top right
|
||||
TestComputeCorrection(transform, -3, 4, -1, -2, -2, 6); // q top left
|
||||
TestComputeCorrection(transform, 3, -4, -1, -2, 4, -2); // q bottom right
|
||||
TestComputeCorrection(transform, -3, -4, -1, -2, -2, -2); // q bottom left
|
||||
// outside diamond; p top right
|
||||
TestComputeCorrection(transform, 1, 2, 5, 4, -2, -4); // q top right
|
||||
TestComputeCorrection(transform, -1, 2, 5, 4, -7, -4); // q top left
|
||||
TestComputeCorrection(transform, 1, -2, 5, 4, -2, -7); // q bottom right
|
||||
TestComputeCorrection(transform, -1, -2, 5, 4, -7, -7); // q bottom left
|
||||
// outside diamond; p top left
|
||||
TestComputeCorrection(transform, 1, 2, -5, 4, -4, -7); // q top right
|
||||
TestComputeCorrection(transform, -1, 2, -5, 4, -4, -2); // q top left
|
||||
TestComputeCorrection(transform, 1, -2, -5, 4, -7, -7); // q bottom right
|
||||
TestComputeCorrection(transform, -1, -2, -5, 4, -7, -2); // q bottom left
|
||||
// outside diamond; p bottom right
|
||||
TestComputeCorrection(transform, 1, 2, 5, -4, -7, -2); // q top right
|
||||
TestComputeCorrection(transform, -1, 2, 5, -4, -7, -7); // q top left
|
||||
TestComputeCorrection(transform, 1, -2, 5, -4, -4, -2); // q bottom right
|
||||
TestComputeCorrection(transform, -1, -2, 5, -4, -4, -7); // q bottom left
|
||||
// outside diamond; p bottom left
|
||||
TestComputeCorrection(transform, 1, 2, -5, -4, -7, -7); // q top right
|
||||
TestComputeCorrection(transform, -1, 2, -5, -4, -2, -7); // q top left
|
||||
TestComputeCorrection(transform, 1, -2, -5, -4, -7, -4); // q bottom right
|
||||
TestComputeCorrection(transform, -1, -2, -5, -4, -2, -4); // q bottom left
|
||||
|
||||
TestComputeCorrection(transform, -1, -2, 7, 7, -5, -6);
|
||||
TestComputeCorrection(transform, 0, 0, 7, 7, 7, 7);
|
||||
TestComputeCorrection(transform, -1, -2, 0, -2, 0, 1);
|
||||
}
|
||||
|
||||
} // namespace
|
@ -40,9 +40,6 @@ namespace draco {
|
||||
// axis.
|
||||
// DataTypeT is expected to be some integral type.
|
||||
//
|
||||
// This relates
|
||||
// * IDF# 44535
|
||||
// * Patent Application: GP-200957-00-US
|
||||
template <typename DataTypeT>
|
||||
class PredictionSchemeNormalOctahedronTransform
|
||||
: public PredictionSchemeTransform<DataTypeT, DataTypeT> {
|
||||
|
@ -70,6 +70,8 @@ SequentialIntegerAttributeDecoder::CreateIntPredictionScheme(
|
||||
bool SequentialIntegerAttributeDecoder::DecodeIntegerValues(
|
||||
const std::vector<PointIndex> &point_ids, DecoderBuffer *in_buffer) {
|
||||
const int num_components = GetNumValueComponents();
|
||||
if (num_components <= 0)
|
||||
return false;
|
||||
const int32_t num_values = point_ids.size();
|
||||
values_.resize(num_values * num_components);
|
||||
uint8_t compressed;
|
||||
@ -96,8 +98,8 @@ bool SequentialIntegerAttributeDecoder::DecodeIntegerValues(
|
||||
}
|
||||
}
|
||||
|
||||
if (prediction_scheme_ == nullptr ||
|
||||
!prediction_scheme_->AreCorrectionsPositive()) {
|
||||
if (!values_.empty() && (prediction_scheme_ == nullptr ||
|
||||
!prediction_scheme_->AreCorrectionsPositive())) {
|
||||
// Convert the values back to the original signed format.
|
||||
ConvertSymbolsToSignedInts(
|
||||
reinterpret_cast<const uint32_t *>(values_.data()), values_.size(),
|
||||
@ -109,9 +111,12 @@ bool SequentialIntegerAttributeDecoder::DecodeIntegerValues(
|
||||
if (!prediction_scheme_->DecodePredictionData(in_buffer))
|
||||
return false;
|
||||
|
||||
if (!prediction_scheme_->Decode(values_.data(), &values_[0], values_.size(),
|
||||
num_components, point_ids.data())) {
|
||||
return false;
|
||||
if (!values_.empty()) {
|
||||
if (!prediction_scheme_->Decode(values_.data(), &values_[0],
|
||||
values_.size(), num_components,
|
||||
point_ids.data())) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
|
@ -16,6 +16,7 @@
|
||||
#define DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_NORMAL_ATTRIBUTE_DECODER_H_
|
||||
|
||||
#include "compression/attributes/prediction_schemes/prediction_scheme_decoder_factory.h"
|
||||
#include "compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform.h"
|
||||
#include "compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform.h"
|
||||
#include "compression/attributes/sequential_integer_attribute_decoder.h"
|
||||
|
||||
@ -52,6 +53,15 @@ class SequentialNormalAttributeDecoder
|
||||
return CreatePredictionSchemeForDecoder<int32_t, Transform>(
|
||||
method, attribute_id(), decoder());
|
||||
}
|
||||
case PREDICTION_TRANSFORM_NORMAL_OCTAHEDRON_CANONICALIZED: {
|
||||
typedef PredictionSchemeNormalOctahedronCanonicalizedTransform<int32_t>
|
||||
Transform;
|
||||
// At this point the decoder has not read the quantization bits,
|
||||
// which is why we must construct the transform by default.
|
||||
// See Transform.DecodeTransformData for more details.
|
||||
return CreatePredictionSchemeForDecoder<int32_t, Transform>(
|
||||
method, attribute_id(), decoder());
|
||||
}
|
||||
default:
|
||||
return nullptr; // Currently, we support only octahedron transform and
|
||||
// octahedron transform canonicalized.
|
||||
|
@ -16,7 +16,7 @@
|
||||
#define DRACO_COMPRESSION_ATTRIBUTES_SEQUENTIAL_NORMAL_ATTRIBUTE_ENCODER_H_
|
||||
|
||||
#include "compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h"
|
||||
#include "compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform.h"
|
||||
#include "compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform.h"
|
||||
#include "compression/attributes/sequential_integer_attribute_encoder.h"
|
||||
|
||||
namespace draco {
|
||||
@ -41,7 +41,8 @@ class SequentialNormalAttributeEncoder
|
||||
|
||||
std::unique_ptr<PredictionSchemeTypedInterface<int32_t>>
|
||||
CreateIntPredictionScheme(PredictionSchemeMethod /* method */) override {
|
||||
typedef PredictionSchemeNormalOctahedronTransform<int32_t> Transform;
|
||||
typedef PredictionSchemeNormalOctahedronCanonicalizedTransform<int32_t>
|
||||
Transform;
|
||||
const int32_t quantization_bits = encoder()->options()->GetAttributeInt(
|
||||
attribute_id(), "quantization_bits", -1);
|
||||
const int32_t max_value = (1 << quantization_bits) - 1;
|
||||
|
@ -15,8 +15,23 @@
|
||||
#ifndef DRACO_COMPRESSION_CONFIG_COMPRESSION_SHARED_H_
|
||||
#define DRACO_COMPRESSION_CONFIG_COMPRESSION_SHARED_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
namespace draco {
|
||||
|
||||
// Latest Draco bit-stream version.
|
||||
static constexpr uint8_t kDracoBitstreamVersionMajor = 1;
|
||||
static constexpr uint8_t kDracoBitstreamVersionMinor = 2;
|
||||
|
||||
// Macro that converts the Draco bit-stream into one uint16_t number. Useful
|
||||
// mostly when checking version numbers.
|
||||
#define DRACO_BITSTREAM_VERSION(MAJOR, MINOR) \
|
||||
((static_cast<uint16_t>(MAJOR) << 8) | MINOR)
|
||||
|
||||
// Concatenated latest bit-stream version.
|
||||
static constexpr uint16_t kDracoBitstreamVersion = DRACO_BITSTREAM_VERSION(
|
||||
kDracoBitstreamVersionMajor, kDracoBitstreamVersionMinor);
|
||||
|
||||
// Currently, we support point cloud and triangular mesh encoding.
|
||||
enum EncodedGeometryType {
|
||||
INVALID_GEOMETRY_TYPE = -1,
|
||||
@ -62,9 +77,10 @@ enum PredictionSchemeMethod {
|
||||
// Used when no specific prediction scheme is required.
|
||||
PREDICTION_UNDEFINED = -1,
|
||||
PREDICTION_DIFFERENCE = 0,
|
||||
MESH_PREDICTION_PARALLELOGRAM,
|
||||
MESH_PREDICTION_MULTI_PARALLELOGRAM,
|
||||
MESH_PREDICTION_TEX_COORDS,
|
||||
MESH_PREDICTION_PARALLELOGRAM = 1,
|
||||
MESH_PREDICTION_MULTI_PARALLELOGRAM = 2,
|
||||
MESH_PREDICTION_TEX_COORDS = 3,
|
||||
MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM = 4,
|
||||
NUM_PREDICTION_SCHEMES
|
||||
};
|
||||
|
||||
@ -79,8 +95,27 @@ enum PredictionSchemeTransformType {
|
||||
PREDICTION_TRANSFORM_WRAP = 1,
|
||||
// Specialized transform for normal coordinates using inverted tiles.
|
||||
PREDICTION_TRANSFORM_NORMAL_OCTAHEDRON = 2,
|
||||
// Reserved for internal use.
|
||||
PREDICTION_TRANSFORM_RESERVED_0 = 3,
|
||||
// Specialized transform for normal coordinates using canonicalized inverted
|
||||
// tiles.
|
||||
PREDICTION_TRANSFORM_NORMAL_OCTAHEDRON_CANONICALIZED = 3,
|
||||
};
|
||||
|
||||
// List of all mesh traversal methods supported by Draco framework.
|
||||
enum MeshTraversalMethod {
|
||||
MESH_TRAVERSAL_DEPTH_FIRST = 0,
|
||||
MESH_TRAVERSAL_PREDICTION_DEGREE = 1,
|
||||
MESH_TRAVERSAL_RESERVED_1 = 2,
|
||||
MESH_TRAVERSAL_RESERVED_2 = 3,
|
||||
};
|
||||
|
||||
// Draco header V1
|
||||
struct DracoHeader {
|
||||
int8_t draco_string[5];
|
||||
uint8_t version_major;
|
||||
uint8_t version_minor;
|
||||
uint8_t encoder_type;
|
||||
uint8_t encoder_method;
|
||||
uint16_t flags;
|
||||
};
|
||||
|
||||
} // namespace draco
|
||||
|
@ -28,38 +28,12 @@
|
||||
|
||||
namespace draco {
|
||||
|
||||
bool ParseHeader(DecoderBuffer *in_buffer, EncodedGeometryType *out_type,
|
||||
int8_t *out_method) {
|
||||
char draco_str[6] = {0};
|
||||
if (!in_buffer->Decode(draco_str, 5))
|
||||
return false;
|
||||
if (strcmp(draco_str, "DRACO") != 0)
|
||||
return false; // Wrong file format?
|
||||
uint8_t major_version, minor_version;
|
||||
if (!in_buffer->Decode(&major_version))
|
||||
return false;
|
||||
if (!in_buffer->Decode(&minor_version))
|
||||
return false;
|
||||
uint8_t encoder_type, encoder_method;
|
||||
if (!in_buffer->Decode(&encoder_type))
|
||||
return false;
|
||||
if (!in_buffer->Decode(&encoder_method))
|
||||
return false;
|
||||
uint16_t flags;
|
||||
if (!in_buffer->Decode(&flags))
|
||||
return false;
|
||||
*out_type = static_cast<EncodedGeometryType>(encoder_type);
|
||||
*out_method = encoder_method;
|
||||
return true;
|
||||
}
|
||||
|
||||
EncodedGeometryType GetEncodedGeometryType(DecoderBuffer *in_buffer) {
|
||||
DecoderBuffer temp_buffer(*in_buffer);
|
||||
EncodedGeometryType geom_type;
|
||||
int8_t method;
|
||||
if (!ParseHeader(&temp_buffer, &geom_type, &method))
|
||||
DracoHeader header;
|
||||
if (!PointCloudDecoder::DecodeHeader(&temp_buffer, &header))
|
||||
return INVALID_GEOMETRY_TYPE;
|
||||
return geom_type;
|
||||
return static_cast<EncodedGeometryType>(header.encoder_type);
|
||||
}
|
||||
|
||||
#ifdef DRACO_POINT_CLOUD_COMPRESSION_SUPPORTED
|
||||
@ -87,14 +61,14 @@ std::unique_ptr<MeshDecoder> CreateMeshDecoder(uint8_t method) {
|
||||
|
||||
std::unique_ptr<PointCloud> DecodePointCloudFromBuffer(
|
||||
DecoderBuffer *in_buffer) {
|
||||
EncodedGeometryType encoder_type;
|
||||
int8_t method;
|
||||
if (!ParseHeader(in_buffer, &encoder_type, &method))
|
||||
DecoderBuffer temp_buffer(*in_buffer);
|
||||
DracoHeader header;
|
||||
if (!PointCloudDecoder::DecodeHeader(&temp_buffer, &header))
|
||||
return nullptr;
|
||||
if (encoder_type == POINT_CLOUD) {
|
||||
if (header.encoder_type == POINT_CLOUD) {
|
||||
#ifdef DRACO_POINT_CLOUD_COMPRESSION_SUPPORTED
|
||||
std::unique_ptr<PointCloudDecoder> decoder =
|
||||
CreatePointCloudDecoder(method);
|
||||
CreatePointCloudDecoder(header.encoder_method);
|
||||
if (!decoder)
|
||||
return nullptr;
|
||||
std::unique_ptr<PointCloud> point_cloud(new PointCloud());
|
||||
@ -102,9 +76,10 @@ std::unique_ptr<PointCloud> DecodePointCloudFromBuffer(
|
||||
return nullptr;
|
||||
return point_cloud;
|
||||
#endif
|
||||
} else if (encoder_type == TRIANGULAR_MESH) {
|
||||
} else if (header.encoder_type == TRIANGULAR_MESH) {
|
||||
#ifdef DRACO_MESH_COMPRESSION_SUPPORTED
|
||||
std::unique_ptr<MeshDecoder> decoder = CreateMeshDecoder(method);
|
||||
std::unique_ptr<MeshDecoder> decoder =
|
||||
CreateMeshDecoder(header.encoder_method);
|
||||
if (!decoder)
|
||||
return nullptr;
|
||||
std::unique_ptr<Mesh> mesh(new Mesh());
|
||||
@ -118,13 +93,13 @@ std::unique_ptr<PointCloud> DecodePointCloudFromBuffer(
|
||||
|
||||
std::unique_ptr<Mesh> DecodeMeshFromBuffer(DecoderBuffer *in_buffer) {
|
||||
#ifdef DRACO_MESH_COMPRESSION_SUPPORTED
|
||||
EncodedGeometryType encoder_type;
|
||||
int8_t method;
|
||||
if (!ParseHeader(in_buffer, &encoder_type, &method))
|
||||
DecoderBuffer temp_buffer(*in_buffer);
|
||||
DracoHeader header;
|
||||
if (!PointCloudDecoder::DecodeHeader(&temp_buffer, &header))
|
||||
return nullptr;
|
||||
std::unique_ptr<MeshDecoder> decoder;
|
||||
if (encoder_type == TRIANGULAR_MESH) {
|
||||
decoder = CreateMeshDecoder(method);
|
||||
if (header.encoder_type == TRIANGULAR_MESH) {
|
||||
decoder = CreateMeshDecoder(header.encoder_method);
|
||||
}
|
||||
if (!decoder)
|
||||
return nullptr;
|
||||
|
@ -21,33 +21,11 @@
|
||||
|
||||
namespace draco {
|
||||
|
||||
// Encodes header common to all methods.
|
||||
bool EncodeHeader(const PointCloudEncoder &encoder, EncoderBuffer *out_buffer) {
|
||||
// Encode the header according to our v1 specification.
|
||||
// Five bytes for Draco format.
|
||||
out_buffer->Encode("DRACO", 5);
|
||||
// Version (major, minor).
|
||||
const uint8_t major_version = 1;
|
||||
const uint8_t minor_version = 1;
|
||||
out_buffer->Encode(major_version);
|
||||
out_buffer->Encode(minor_version);
|
||||
// Type of the encoder (point cloud, mesh, ...).
|
||||
const uint8_t encoder_type = encoder.GetGeometryType();
|
||||
out_buffer->Encode(encoder_type);
|
||||
// Unique identifier for the selected encoding method (edgebreaker, etc...).
|
||||
out_buffer->Encode(encoder.GetEncodingMethod());
|
||||
// Reserved for flags.
|
||||
out_buffer->Encode(static_cast<uint16_t>(0));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool EncodeGeometryToBuffer(PointCloudEncoder *encoder,
|
||||
const EncoderOptions &options,
|
||||
EncoderBuffer *out_buffer) {
|
||||
if (!encoder)
|
||||
return false;
|
||||
if (!EncodeHeader(*encoder, out_buffer))
|
||||
return false;
|
||||
if (!encoder->Encode(options, out_buffer))
|
||||
return false;
|
||||
return true;
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include "compression/mesh/mesh_edgebreaker_decoder.h"
|
||||
#include "compression/mesh/mesh_edgebreaker_decoder_impl.h"
|
||||
#include "compression/mesh/mesh_edgebreaker_traversal_predictive_decoder.h"
|
||||
#include "compression/mesh/mesh_edgebreaker_traversal_valence_decoder.h"
|
||||
|
||||
namespace draco {
|
||||
|
||||
@ -40,6 +41,10 @@ bool MeshEdgeBreakerDecoder::InitializeDecoder() {
|
||||
new MeshEdgeBreakerDecoderImpl<
|
||||
MeshEdgeBreakerTraversalPredictiveDecoder>());
|
||||
#endif
|
||||
} else if (traversal_decoder_type == 2) {
|
||||
impl_ = std::unique_ptr<MeshEdgeBreakerDecoderImplInterface>(
|
||||
new MeshEdgeBreakerDecoderImpl<
|
||||
MeshEdgeBreakerTraversalValenceDecoder>());
|
||||
}
|
||||
if (!impl_) {
|
||||
return false;
|
||||
|
@ -17,12 +17,13 @@
|
||||
#include <algorithm>
|
||||
|
||||
#include "compression/attributes/mesh_attribute_indices_encoding_observer.h"
|
||||
#include "compression/attributes/mesh_traversal_sequencer.h"
|
||||
#include "compression/attributes/sequential_attribute_decoders_controller.h"
|
||||
#include "compression/mesh/mesh_edgebreaker_decoder.h"
|
||||
#include "compression/mesh/mesh_edgebreaker_traversal_predictive_decoder.h"
|
||||
#include "compression/mesh/mesh_edgebreaker_traversal_valence_decoder.h"
|
||||
#include "mesh/corner_table_traversal_processor.h"
|
||||
#include "mesh/edgebreaker_traverser.h"
|
||||
#include "mesh/prediction_degree_traverser.h"
|
||||
|
||||
namespace draco {
|
||||
|
||||
@ -61,8 +62,11 @@ const MeshAttributeCornerTable *
|
||||
MeshEdgeBreakerDecoderImpl<TraversalDecoder>::GetAttributeCornerTable(
|
||||
int att_id) const {
|
||||
for (uint32_t i = 0; i < attribute_data_.size(); ++i) {
|
||||
const int decoder_id = attribute_data_[i].decoder_id;
|
||||
if (decoder_id < 0 || decoder_id >= decoder_->num_attributes_decoders())
|
||||
continue;
|
||||
const AttributesDecoder *const dec =
|
||||
decoder_->attributes_decoder(attribute_data_[i].decoder_id);
|
||||
decoder_->attributes_decoder(decoder_id);
|
||||
for (int j = 0; j < dec->num_attributes(); ++j) {
|
||||
if (dec->GetAttributeId(j) == att_id) {
|
||||
if (attribute_data_[i].is_connectivity_used)
|
||||
@ -79,8 +83,11 @@ const MeshAttributeIndicesEncodingData *
|
||||
MeshEdgeBreakerDecoderImpl<TraversalDecoder>::GetAttributeEncodingData(
|
||||
int att_id) const {
|
||||
for (uint32_t i = 0; i < attribute_data_.size(); ++i) {
|
||||
const int decoder_id = attribute_data_[i].decoder_id;
|
||||
if (decoder_id < 0 || decoder_id >= decoder_->num_attributes_decoders())
|
||||
continue;
|
||||
const AttributesDecoder *const dec =
|
||||
decoder_->attributes_decoder(attribute_data_[i].decoder_id);
|
||||
decoder_->attributes_decoder(decoder_id);
|
||||
for (int j = 0; j < dec->num_attributes(); ++j) {
|
||||
if (dec->GetAttributeId(j) == att_id)
|
||||
return &attribute_data_[i].encoding_data;
|
||||
@ -89,6 +96,30 @@ MeshEdgeBreakerDecoderImpl<TraversalDecoder>::GetAttributeEncodingData(
|
||||
return &pos_encoding_data_;
|
||||
}
|
||||
|
||||
template <class TraversalDecoder>
|
||||
template <class TraverserT>
|
||||
std::unique_ptr<PointsSequencer>
|
||||
MeshEdgeBreakerDecoderImpl<TraversalDecoder>::CreateVertexTraversalSequencer(
|
||||
MeshAttributeIndicesEncodingData *encoding_data) {
|
||||
typedef typename TraverserT::TraversalObserver AttObserver;
|
||||
typedef typename TraverserT::TraversalProcessor AttProcessor;
|
||||
|
||||
const Mesh *mesh = decoder_->mesh();
|
||||
std::unique_ptr<MeshTraversalSequencer<TraverserT>> traversal_sequencer(
|
||||
new MeshTraversalSequencer<TraverserT>(mesh, encoding_data));
|
||||
|
||||
TraverserT att_traverser;
|
||||
AttObserver att_observer(corner_table_.get(), mesh, traversal_sequencer.get(),
|
||||
encoding_data);
|
||||
AttProcessor att_processor;
|
||||
|
||||
att_processor.ResetProcessor(corner_table_.get());
|
||||
att_traverser.Init(std::move(att_processor), att_observer);
|
||||
|
||||
traversal_sequencer->SetTraverser(att_traverser);
|
||||
return std::move(traversal_sequencer);
|
||||
}
|
||||
|
||||
template <class TraversalDecoder>
|
||||
bool MeshEdgeBreakerDecoderImpl<TraversalDecoder>::CreateAttributesDecoder(
|
||||
int32_t att_decoder_id) {
|
||||
@ -106,6 +137,15 @@ bool MeshEdgeBreakerDecoderImpl<TraversalDecoder>::CreateAttributesDecoder(
|
||||
attribute_data_[att_data_id].decoder_id = att_decoder_id;
|
||||
}
|
||||
|
||||
MeshTraversalMethod traversal_method = MESH_TRAVERSAL_DEPTH_FIRST;
|
||||
if (decoder_->bitstream_version() >= DRACO_BITSTREAM_VERSION(1, 2)) {
|
||||
uint8_t traversal_method_encoded;
|
||||
if (!decoder_->buffer()->Decode(&traversal_method_encoded))
|
||||
return false;
|
||||
traversal_method =
|
||||
static_cast<MeshTraversalMethod>(traversal_method_encoded);
|
||||
}
|
||||
|
||||
const Mesh *mesh = decoder_->mesh();
|
||||
std::unique_ptr<PointsSequencer> sequencer;
|
||||
|
||||
@ -113,9 +153,6 @@ bool MeshEdgeBreakerDecoderImpl<TraversalDecoder>::CreateAttributesDecoder(
|
||||
// Per-vertex attribute decoder.
|
||||
typedef CornerTableTraversalProcessor<CornerTable> AttProcessor;
|
||||
typedef MeshAttributeIndicesEncodingObserver<CornerTable> AttObserver;
|
||||
// Traverser that is used to generate the encoding order of each attribute.
|
||||
typedef EdgeBreakerTraverser<AttProcessor, AttObserver> AttTraverser;
|
||||
|
||||
MeshAttributeIndicesEncodingData *encoding_data = nullptr;
|
||||
if (att_data_id < 0) {
|
||||
encoding_data = &pos_encoding_data_;
|
||||
@ -125,22 +162,20 @@ bool MeshEdgeBreakerDecoderImpl<TraversalDecoder>::CreateAttributesDecoder(
|
||||
// later on.
|
||||
attribute_data_[att_data_id].is_connectivity_used = false;
|
||||
}
|
||||
|
||||
std::unique_ptr<MeshTraversalSequencer<AttTraverser>> traversal_sequencer(
|
||||
new MeshTraversalSequencer<AttTraverser>(mesh, encoding_data));
|
||||
|
||||
AttTraverser att_traverser;
|
||||
AttObserver att_observer(corner_table_.get(), mesh,
|
||||
traversal_sequencer.get(), encoding_data);
|
||||
AttProcessor att_processor;
|
||||
|
||||
att_processor.ResetProcessor(corner_table_.get());
|
||||
att_traverser.Init(att_processor, att_observer);
|
||||
|
||||
traversal_sequencer->SetTraverser(att_traverser);
|
||||
sequencer = std::move(traversal_sequencer);
|
||||
|
||||
if (traversal_method == MESH_TRAVERSAL_DEPTH_FIRST) {
|
||||
// Traverser that is used to generate the encoding order of each
|
||||
// attribute.
|
||||
typedef EdgeBreakerTraverser<AttProcessor, AttObserver> AttTraverser;
|
||||
sequencer = CreateVertexTraversalSequencer<AttTraverser>(encoding_data);
|
||||
} else if (traversal_method == MESH_TRAVERSAL_PREDICTION_DEGREE) {
|
||||
typedef PredictionDegreeTraverser<AttProcessor, AttObserver> AttTraverser;
|
||||
sequencer = CreateVertexTraversalSequencer<AttTraverser>(encoding_data);
|
||||
} else {
|
||||
return false; // Unsupported method
|
||||
}
|
||||
} else {
|
||||
if (traversal_method != MESH_TRAVERSAL_DEPTH_FIRST)
|
||||
return false; // Unsupported method.
|
||||
if (att_data_id < 0)
|
||||
return false; // Attribute data must be specified.
|
||||
|
||||
@ -203,7 +238,8 @@ bool MeshEdgeBreakerDecoderImpl<TraversalDecoder>::DecodeConnectivity() {
|
||||
corner_table_ = std::unique_ptr<CornerTable>(new CornerTable());
|
||||
if (corner_table_ == nullptr)
|
||||
return false;
|
||||
corner_table_->Reset(num_faces);
|
||||
if (!corner_table_->Reset(num_faces))
|
||||
return false;
|
||||
processed_corner_ids_.clear();
|
||||
processed_corner_ids_.reserve(num_faces);
|
||||
processed_connectivity_corners_.clear();
|
||||
@ -637,27 +673,71 @@ MeshEdgeBreakerDecoderImpl<TraversalDecoder>::DecodeHoleAndTopologySplitEvents(
|
||||
uint32_t num_topology_splits;
|
||||
if (!decoder_buffer->Decode(&num_topology_splits))
|
||||
return -1;
|
||||
for (uint32_t i = 0; i < num_topology_splits; ++i) {
|
||||
TopologySplitEventData event_data;
|
||||
if (!decoder_buffer->Decode(&event_data.split_symbol_id))
|
||||
return -1;
|
||||
if (!decoder_buffer->Decode(&event_data.source_symbol_id))
|
||||
return -1;
|
||||
uint8_t edge_data;
|
||||
if (!decoder_buffer->Decode(&edge_data))
|
||||
return -1;
|
||||
event_data.source_edge = edge_data & 1;
|
||||
event_data.split_edge = (edge_data >> 1) & 1;
|
||||
topology_split_data_.push_back(event_data);
|
||||
if (num_topology_splits > 0) {
|
||||
if (decoder_->bitstream_version() >= DRACO_BITSTREAM_VERSION(1, 2)) {
|
||||
// Decode source and split symbol ids using delta and varint coding. See
|
||||
// description in mesh_edgebreaker_encoder_impl.cc for more details.
|
||||
int last_source_symbol_id = 0;
|
||||
for (uint32_t i = 0; i < num_topology_splits; ++i) {
|
||||
TopologySplitEventData event_data;
|
||||
uint32_t delta;
|
||||
DecodeVarint<uint32_t>(&delta, decoder_buffer);
|
||||
event_data.source_symbol_id = delta + last_source_symbol_id;
|
||||
DecodeVarint<uint32_t>(&delta, decoder_buffer);
|
||||
event_data.split_symbol_id =
|
||||
event_data.source_symbol_id - static_cast<int32_t>(delta);
|
||||
last_source_symbol_id = event_data.source_symbol_id;
|
||||
topology_split_data_.push_back(event_data);
|
||||
}
|
||||
// Split edges are decoded from a direct bit decoder.
|
||||
decoder_buffer->StartBitDecoding(false, nullptr);
|
||||
for (uint32_t i = 0; i < num_topology_splits; ++i) {
|
||||
uint32_t edge_data;
|
||||
decoder_buffer->DecodeLeastSignificantBits32(2, &edge_data);
|
||||
TopologySplitEventData &event_data = topology_split_data_[i];
|
||||
event_data.source_edge = edge_data & 1;
|
||||
event_data.split_edge = (edge_data >> 1) & 1;
|
||||
}
|
||||
decoder_buffer->EndBitDecoding();
|
||||
} else {
|
||||
for (uint32_t i = 0; i < num_topology_splits; ++i) {
|
||||
TopologySplitEventData event_data;
|
||||
if (!decoder_buffer->Decode(&event_data.split_symbol_id))
|
||||
return -1;
|
||||
if (!decoder_buffer->Decode(&event_data.source_symbol_id))
|
||||
return -1;
|
||||
uint8_t edge_data;
|
||||
if (!decoder_buffer->Decode(&edge_data))
|
||||
return -1;
|
||||
event_data.source_edge = edge_data & 1;
|
||||
event_data.split_edge = (edge_data >> 1) & 1;
|
||||
topology_split_data_.push_back(event_data);
|
||||
}
|
||||
}
|
||||
}
|
||||
uint32_t num_hole_events;
|
||||
if (!decoder_buffer->Decode(&num_hole_events))
|
||||
return -1;
|
||||
for (uint32_t i = 0; i < num_hole_events; ++i) {
|
||||
HoleEventData event_data;
|
||||
if (!decoder_buffer->Decode(&event_data))
|
||||
return -1;
|
||||
hole_event_data_.push_back(event_data);
|
||||
if (num_hole_events > 0) {
|
||||
if (decoder_->bitstream_version() >= DRACO_BITSTREAM_VERSION(1, 2)) {
|
||||
// Decode hole symbol ids using delta and varint coding.
|
||||
int last_symbol_id = 0;
|
||||
for (uint32_t i = 0; i < num_hole_events; ++i) {
|
||||
HoleEventData event_data;
|
||||
uint32_t delta;
|
||||
DecodeVarint<uint32_t>(&delta, decoder_buffer);
|
||||
event_data.symbol_id = delta + last_symbol_id;
|
||||
last_symbol_id = event_data.symbol_id;
|
||||
hole_event_data_.push_back(event_data);
|
||||
}
|
||||
} else {
|
||||
for (uint32_t i = 0; i < num_hole_events; ++i) {
|
||||
HoleEventData event_data;
|
||||
if (!decoder_buffer->Decode(&event_data))
|
||||
return -1;
|
||||
hole_event_data_.push_back(event_data);
|
||||
}
|
||||
}
|
||||
}
|
||||
return decoder_buffer->decoded_size();
|
||||
}
|
||||
@ -813,5 +893,6 @@ bool MeshEdgeBreakerDecoderImpl<TraversalDecoder>::AssignPointsToCorners() {
|
||||
template class MeshEdgeBreakerDecoderImpl<MeshEdgeBreakerTraversalDecoder>;
|
||||
template class MeshEdgeBreakerDecoderImpl<
|
||||
MeshEdgeBreakerTraversalPredictiveDecoder>;
|
||||
|
||||
template class MeshEdgeBreakerDecoderImpl<
|
||||
MeshEdgeBreakerTraversalValenceDecoder>;
|
||||
} // namespace draco
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include <unordered_set>
|
||||
|
||||
#include "compression/attributes/mesh_attribute_indices_encoding_data.h"
|
||||
#include "compression/attributes/mesh_traversal_sequencer.h"
|
||||
#include "compression/mesh/mesh_edgebreaker_decoder_impl_interface.h"
|
||||
#include "compression/mesh/mesh_edgebreaker_shared.h"
|
||||
#include "core/decoder_buffer.h"
|
||||
@ -66,6 +67,11 @@ class MeshEdgeBreakerDecoderImpl : public MeshEdgeBreakerDecoderImplInterface {
|
||||
}
|
||||
|
||||
private:
|
||||
// Creates a vertex traversal sequencer for the specified |TraverserT| type.
|
||||
template <class TraverserT>
|
||||
std::unique_ptr<PointsSequencer> CreateVertexTraversalSequencer(
|
||||
MeshAttributeIndicesEncodingData *encoding_data);
|
||||
|
||||
// Decodes connectivty between vertices (vertex indices).
|
||||
// Returns the number of vertices created by the decoder or -1 on error.
|
||||
int DecodeConnectivity(int num_symbols);
|
||||
|
@ -16,6 +16,7 @@
|
||||
|
||||
#include "compression/mesh/mesh_edgebreaker_encoder_impl.h"
|
||||
#include "compression/mesh/mesh_edgebreaker_traversal_predictive_encoder.h"
|
||||
#include "compression/mesh/mesh_edgebreaker_traversal_valence_encoder.h"
|
||||
|
||||
namespace draco {
|
||||
|
||||
@ -28,16 +29,24 @@ bool MeshEdgeBreakerEncoder::InitializeEncoder() {
|
||||
options()->IsFeatureSupported(features::kPredictiveEdgebreaker);
|
||||
|
||||
impl_ = nullptr;
|
||||
// For tiny meshes it's usually better to use the basic edgebreaker as the
|
||||
// overhead of the predictive one may turn out to be too big.
|
||||
// TODO(ostava): For now we have a set limit for forcing the basic edgebreaker
|
||||
// based on the number of faces, but a more complex heuristic may be used if
|
||||
// needed.
|
||||
const bool is_tiny_mesh = mesh()->num_faces() < 1000;
|
||||
|
||||
if (is_standard_edgebreaker_avaialable &&
|
||||
(options()->GetSpeed() >= 5 || !is_predictive_edgebreaker_avaialable)) {
|
||||
(options()->GetSpeed() >= 5 || !is_predictive_edgebreaker_avaialable ||
|
||||
is_tiny_mesh)) {
|
||||
buffer()->Encode(static_cast<uint8_t>(0));
|
||||
impl_ = std::unique_ptr<MeshEdgeBreakerEncoderImplInterface>(
|
||||
new MeshEdgeBreakerEncoderImpl<MeshEdgeBreakerTraversalEncoder>());
|
||||
} else if (is_predictive_edgebreaker_avaialable) {
|
||||
buffer()->Encode(static_cast<uint8_t>(1));
|
||||
buffer()->Encode(static_cast<uint8_t>(2));
|
||||
impl_ = std::unique_ptr<MeshEdgeBreakerEncoderImplInterface>(
|
||||
new MeshEdgeBreakerEncoderImpl<
|
||||
MeshEdgeBreakerTraversalPredictiveEncoder>());
|
||||
MeshEdgeBreakerTraversalValenceEncoder>());
|
||||
}
|
||||
if (!impl_)
|
||||
return false;
|
||||
|
@ -17,14 +17,15 @@
|
||||
#include <algorithm>
|
||||
|
||||
#include "compression/attributes/mesh_attribute_indices_encoding_observer.h"
|
||||
#include "compression/attributes/mesh_traversal_sequencer.h"
|
||||
#include "compression/attributes/sequential_attribute_encoders_controller.h"
|
||||
#include "compression/mesh/mesh_edgebreaker_encoder.h"
|
||||
#include "compression/mesh/mesh_edgebreaker_traversal_predictive_encoder.h"
|
||||
#include "compression/mesh/mesh_edgebreaker_traversal_valence_encoder.h"
|
||||
#include "mesh/corner_table_iterators.h"
|
||||
#include "mesh/corner_table_traversal_processor.h"
|
||||
#include "mesh/edgebreaker_traverser.h"
|
||||
#include "mesh/mesh_misc_functions.h"
|
||||
#include "mesh/prediction_degree_traverser.h"
|
||||
|
||||
namespace draco {
|
||||
|
||||
@ -73,6 +74,30 @@ MeshEdgeBreakerEncoderImpl<TraversalEncoder>::GetAttributeEncodingData(
|
||||
return &pos_encoding_data_;
|
||||
}
|
||||
|
||||
template <class TraversalEncoder>
|
||||
template <class TraverserT>
|
||||
std::unique_ptr<PointsSequencer>
|
||||
MeshEdgeBreakerEncoderImpl<TraversalEncoder>::CreateVertexTraversalSequencer(
|
||||
MeshAttributeIndicesEncodingData *encoding_data) {
|
||||
typedef typename TraverserT::TraversalObserver AttObserver;
|
||||
typedef typename TraverserT::TraversalProcessor AttProcessor;
|
||||
|
||||
std::unique_ptr<MeshTraversalSequencer<TraverserT>> traversal_sequencer(
|
||||
new MeshTraversalSequencer<TraverserT>(mesh_, encoding_data));
|
||||
|
||||
AttProcessor att_processor;
|
||||
AttObserver att_observer(corner_table_.get(), mesh_,
|
||||
traversal_sequencer.get(), encoding_data);
|
||||
TraverserT att_traverser;
|
||||
|
||||
att_processor.ResetProcessor(corner_table_.get());
|
||||
att_traverser.Init(std::move(att_processor), att_observer);
|
||||
|
||||
traversal_sequencer->SetCornerOrder(processed_connectivity_corners_);
|
||||
traversal_sequencer->SetTraverser(att_traverser);
|
||||
return std::move(traversal_sequencer);
|
||||
}
|
||||
|
||||
template <class TraversalEncoder>
|
||||
bool MeshEdgeBreakerEncoderImpl<TraversalEncoder>::GenerateAttributesEncoder(
|
||||
int32_t att_id) {
|
||||
@ -90,6 +115,7 @@ bool MeshEdgeBreakerEncoderImpl<TraversalEncoder>::GenerateAttributesEncoder(
|
||||
break;
|
||||
}
|
||||
}
|
||||
MeshTraversalMethod traversal_method = MESH_TRAVERSAL_DEPTH_FIRST;
|
||||
std::unique_ptr<PointsSequencer> sequencer;
|
||||
if (att->attribute_type() == GeometryAttribute::POSITION ||
|
||||
element_type == MESH_VERTEX_ATTRIBUTE ||
|
||||
@ -99,8 +125,6 @@ bool MeshEdgeBreakerEncoderImpl<TraversalEncoder>::GenerateAttributesEncoder(
|
||||
// mesh.
|
||||
typedef CornerTableTraversalProcessor<CornerTable> AttProcessor;
|
||||
typedef MeshAttributeIndicesEncodingObserver<CornerTable> AttObserver;
|
||||
// Traverser that is used to generate the encoding order of each attribute.
|
||||
typedef EdgeBreakerTraverser<AttProcessor, AttObserver> AttTraverser;
|
||||
|
||||
MeshAttributeIndicesEncodingData *encoding_data;
|
||||
if (att->attribute_type() == GeometryAttribute::POSITION) {
|
||||
@ -110,20 +134,19 @@ bool MeshEdgeBreakerEncoderImpl<TraversalEncoder>::GenerateAttributesEncoder(
|
||||
attribute_data_[att_data_id].is_connectivity_used = false;
|
||||
}
|
||||
|
||||
std::unique_ptr<MeshTraversalSequencer<AttTraverser>> traversal_sequencer(
|
||||
new MeshTraversalSequencer<AttTraverser>(mesh_, encoding_data));
|
||||
|
||||
AttProcessor att_processor;
|
||||
AttObserver att_observer(corner_table_.get(), mesh_,
|
||||
traversal_sequencer.get(), encoding_data);
|
||||
AttTraverser att_traverser;
|
||||
|
||||
att_processor.ResetProcessor(corner_table_.get());
|
||||
att_traverser.Init(att_processor, att_observer);
|
||||
|
||||
traversal_sequencer->SetCornerOrder(processed_connectivity_corners_);
|
||||
traversal_sequencer->SetTraverser(att_traverser);
|
||||
sequencer = std::move(traversal_sequencer);
|
||||
if (att->attribute_type() == GeometryAttribute::POSITION &&
|
||||
GetEncoder()->options()->GetSpeed() == 0) {
|
||||
// Traverser that is used to generate the encoding order of each
|
||||
// attribute.
|
||||
typedef PredictionDegreeTraverser<AttProcessor, AttObserver> AttTraverser;
|
||||
sequencer = CreateVertexTraversalSequencer<AttTraverser>(encoding_data);
|
||||
traversal_method = MESH_TRAVERSAL_PREDICTION_DEGREE;
|
||||
} else {
|
||||
// Traverser that is used to generate the encoding order of each
|
||||
// attribute.
|
||||
typedef EdgeBreakerTraverser<AttProcessor, AttObserver> AttTraverser;
|
||||
sequencer = CreateVertexTraversalSequencer<AttTraverser>(encoding_data);
|
||||
}
|
||||
} else {
|
||||
// Else use a general per-corner encoder.
|
||||
typedef CornerTableTraversalProcessor<MeshAttributeCornerTable>
|
||||
@ -155,6 +178,12 @@ bool MeshEdgeBreakerEncoderImpl<TraversalEncoder>::GenerateAttributesEncoder(
|
||||
if (!sequencer)
|
||||
return false;
|
||||
|
||||
if (att_data_id == -1) {
|
||||
pos_traversal_method_ = traversal_method;
|
||||
} else {
|
||||
attribute_data_[att_data_id].traversal_method = traversal_method;
|
||||
}
|
||||
|
||||
std::unique_ptr<SequentialAttributeEncodersController> att_controller(
|
||||
new SequentialAttributeEncodersController(std::move(sequencer), att_id));
|
||||
|
||||
@ -174,9 +203,13 @@ bool MeshEdgeBreakerEncoderImpl<TraversalEncoder>::
|
||||
|
||||
// Also encode the type of the encoder that we used.
|
||||
int32_t element_type = MESH_VERTEX_ATTRIBUTE;
|
||||
MeshTraversalMethod traversal_method;
|
||||
if (att_data_id >= 0) {
|
||||
const int32_t att_id = attribute_data_[att_data_id].attribute_index;
|
||||
element_type = GetEncoder()->mesh()->GetAttributeElementType(att_id);
|
||||
traversal_method = attribute_data_[att_data_id].traversal_method;
|
||||
} else {
|
||||
traversal_method = pos_traversal_method_;
|
||||
}
|
||||
if (element_type == MESH_VERTEX_ATTRIBUTE ||
|
||||
(element_type == MESH_CORNER_ATTRIBUTE &&
|
||||
@ -187,6 +220,8 @@ bool MeshEdgeBreakerEncoderImpl<TraversalEncoder>::
|
||||
// Per-corner encoder.
|
||||
encoder_->buffer()->Encode(static_cast<uint8_t>(MESH_CORNER_ATTRIBUTE));
|
||||
}
|
||||
// Encode the mesh traversal method.
|
||||
encoder_->buffer()->Encode(static_cast<uint8_t>(traversal_method));
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -348,24 +383,46 @@ bool MeshEdgeBreakerEncoderImpl<TraversalEncoder>::EncodeConnectivity() {
|
||||
// Encode topology split events.
|
||||
uint32_t num_events = topology_split_event_data_.size();
|
||||
encoder_->buffer()->Encode(num_events);
|
||||
for (uint32_t i = 0; i < num_events; ++i) {
|
||||
// TODO(ostava): We can do a better encoding of the event data but it's not
|
||||
// really needed for now.
|
||||
const TopologySplitEventData &event_data = topology_split_event_data_[i];
|
||||
encoder_->buffer()->Encode(event_data.split_symbol_id);
|
||||
encoder_->buffer()->Encode(event_data.source_symbol_id);
|
||||
const uint8_t edge_data =
|
||||
(event_data.source_edge | (event_data.split_edge << 1));
|
||||
encoder_->buffer()->Encode(edge_data);
|
||||
if (num_events > 0) {
|
||||
// Encode split symbols using delta and varint coding. Split edges are
|
||||
// encoded using direct bit coding.
|
||||
int last_source_symbol_id = 0; // Used for delta coding.
|
||||
for (uint32_t i = 0; i < num_events; ++i) {
|
||||
const TopologySplitEventData &event_data = topology_split_event_data_[i];
|
||||
// Encode source symbol id as delta from the previous source symbol id.
|
||||
// Source symbol ids are always stored in increasing order so the delta is
|
||||
// going to be positive.
|
||||
EncodeVarint<uint32_t>(
|
||||
event_data.source_symbol_id - last_source_symbol_id,
|
||||
encoder_->buffer());
|
||||
// Encode split symbol id as delta from the current source symbol id.
|
||||
// Split symbol id is always smaller than source symbol id so the below
|
||||
// delta is going to be positive.
|
||||
EncodeVarint<uint32_t>(
|
||||
event_data.source_symbol_id - event_data.split_symbol_id,
|
||||
encoder_->buffer());
|
||||
last_source_symbol_id = event_data.source_symbol_id;
|
||||
}
|
||||
encoder_->buffer()->StartBitEncoding(num_events * 2, false);
|
||||
for (uint32_t i = 0; i < num_events; ++i) {
|
||||
const TopologySplitEventData &event_data = topology_split_event_data_[i];
|
||||
encoder_->buffer()->EncodeLeastSignificantBits32(
|
||||
2, event_data.source_edge | (event_data.split_edge << 1));
|
||||
}
|
||||
encoder_->buffer()->EndBitEncoding();
|
||||
}
|
||||
// Encode hole events data.
|
||||
num_events = hole_event_data_.size();
|
||||
encoder_->buffer()->Encode(num_events);
|
||||
for (uint32_t i = 0; i < num_events; ++i) {
|
||||
// TODO(ostava): We can do a better encoding of the event data but it's not
|
||||
// really needed for now.
|
||||
// This should be also made platform independent.
|
||||
encoder_->buffer()->Encode((hole_event_data_[i]));
|
||||
if (num_events > 0) {
|
||||
// Encode hole symbol ids using delta and varint coding. The symbol ids are
|
||||
// always stored in increasing order so the deltas are going to be positive.
|
||||
int last_symbol_id = 0;
|
||||
for (uint32_t i = 0; i < num_events; ++i) {
|
||||
EncodeVarint<uint32_t>(hole_event_data_[i].symbol_id - last_symbol_id,
|
||||
encoder_->buffer());
|
||||
last_symbol_id = hole_event_data_[i].symbol_id;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
@ -734,5 +791,7 @@ bool MeshEdgeBreakerEncoderImpl<
|
||||
template class MeshEdgeBreakerEncoderImpl<MeshEdgeBreakerTraversalEncoder>;
|
||||
template class MeshEdgeBreakerEncoderImpl<
|
||||
MeshEdgeBreakerTraversalPredictiveEncoder>;
|
||||
template class MeshEdgeBreakerEncoderImpl<
|
||||
MeshEdgeBreakerTraversalValenceEncoder>;
|
||||
|
||||
} // namespace draco
|
||||
|
@ -18,6 +18,8 @@
|
||||
#include <unordered_map>
|
||||
|
||||
#include "compression/attributes/mesh_attribute_indices_encoding_data.h"
|
||||
#include "compression/attributes/mesh_traversal_sequencer.h"
|
||||
#include "compression/config/compression_shared.h"
|
||||
#include "compression/mesh/mesh_edgebreaker_encoder_impl_interface.h"
|
||||
#include "compression/mesh/mesh_edgebreaker_shared.h"
|
||||
#include "core/encoder_buffer.h"
|
||||
@ -32,6 +34,7 @@ template <class TraversalEncoderT>
|
||||
class MeshEdgeBreakerEncoderImpl : public MeshEdgeBreakerEncoderImplInterface {
|
||||
public:
|
||||
MeshEdgeBreakerEncoderImpl();
|
||||
MeshEdgeBreakerEncoderImpl(const TraversalEncoderT &traversal_encoder);
|
||||
bool Init(MeshEdgeBreakerEncoder *encoder) override;
|
||||
|
||||
const MeshAttributeCornerTable *GetAttributeCornerTable(
|
||||
@ -46,6 +49,7 @@ class MeshEdgeBreakerEncoderImpl : public MeshEdgeBreakerEncoderImplInterface {
|
||||
const CornerTable *GetCornerTable() const override {
|
||||
return corner_table_.get();
|
||||
}
|
||||
bool IsFaceEncoded(FaceIndex fi) const { return visited_faces_[fi.value()]; }
|
||||
MeshEdgeBreakerEncoder *GetEncoder() const override { return encoder_; }
|
||||
|
||||
private:
|
||||
@ -53,6 +57,11 @@ class MeshEdgeBreakerEncoderImpl : public MeshEdgeBreakerEncoderImplInterface {
|
||||
// Returns false on error.
|
||||
bool InitAttributeData();
|
||||
|
||||
// Creates a vertex traversal sequencer for the specified |TraverserT| type.
|
||||
template <class TraverserT>
|
||||
std::unique_ptr<PointsSequencer> CreateVertexTraversalSequencer(
|
||||
MeshAttributeIndicesEncodingData *encoding_data);
|
||||
|
||||
// Finds the configuration of the initial face that starts the traversal.
|
||||
// Configurations are determined by location of holes around the init face
|
||||
// and they are described in mesh_edgebreaker_shared.h.
|
||||
@ -128,6 +137,9 @@ class MeshEdgeBreakerEncoderImpl : public MeshEdgeBreakerEncoderImplInterface {
|
||||
// Attribute data for position encoding.
|
||||
MeshAttributeIndicesEncodingData pos_encoding_data_;
|
||||
|
||||
// Traversal method used for the position attribute.
|
||||
MeshTraversalMethod pos_traversal_method_;
|
||||
|
||||
// Array storing corners in the order they were visited during the
|
||||
// connectivity encoding (always storing the tip corner of each newly visited
|
||||
// face).
|
||||
@ -171,6 +183,8 @@ class MeshEdgeBreakerEncoderImpl : public MeshEdgeBreakerEncoderImplInterface {
|
||||
bool is_connectivity_used;
|
||||
// Data about attribute encoding order.
|
||||
MeshAttributeIndicesEncodingData encoding_data;
|
||||
// Traversal method used to generate the encoding data for this attribute.
|
||||
MeshTraversalMethod traversal_method;
|
||||
};
|
||||
std::vector<AttributeData> attribute_data_;
|
||||
|
||||
|
@ -45,6 +45,10 @@ class MeshEdgeBreakerEncoderImplInterface {
|
||||
|
||||
// Returns corner table of the encoded mesh.
|
||||
virtual const CornerTable *GetCornerTable() const = 0;
|
||||
|
||||
// Returns true if a given face has been already encoded.
|
||||
virtual bool IsFaceEncoded(FaceIndex fi) const = 0;
|
||||
|
||||
virtual MeshEdgeBreakerEncoder *GetEncoder() const = 0;
|
||||
};
|
||||
|
||||
|
@ -147,4 +147,5 @@ TEST_F(MeshEdgebreakerEncodingTest, TestDecoderReuse) {
|
||||
<< "Decoded meshes are not the same";
|
||||
}
|
||||
|
||||
|
||||
} // namespace draco
|
||||
|
@ -50,6 +50,8 @@ namespace draco {
|
||||
// \ / S \ / / E \
|
||||
// *-------* *-------*
|
||||
//
|
||||
// TODO(osava): Get rid of the topology bit pattern. It's important only for
|
||||
// encoding but the algorithms should use EdgeBreakerSymbol instead.
|
||||
enum EdgeBreakerTopologyBitPattern {
|
||||
TOPOLOGY_C = 0x0, // 0
|
||||
TOPOLOGY_S = 0x1, // 1 0 0
|
||||
@ -64,11 +66,31 @@ enum EdgeBreakerTopologyBitPattern {
|
||||
TOPOLOGY_INVALID
|
||||
};
|
||||
|
||||
enum EdgeBreakerSymbol {
|
||||
EDGEBREAKER_SYMBOL_C = 0,
|
||||
EDGEBREAKER_SYMBOL_S,
|
||||
EDGEBREAKER_SYMBOL_L,
|
||||
EDGEBREAKER_SYMBOL_R,
|
||||
EDGEBREAKER_SYMBOL_E,
|
||||
EDGEBREAKER_SYMBOL_INVALID
|
||||
};
|
||||
|
||||
// Bit-length of symbols in the EdgeBreakerTopologyBitPattern stored as a
|
||||
// look up table for faster indexing.
|
||||
constexpr int32_t edge_breaker_topology_bit_pattern_length[] = {1, 3, 0, 3,
|
||||
0, 3, 0, 3};
|
||||
|
||||
// Zero-indexed symbol id for each of topology pattern.
|
||||
constexpr EdgeBreakerSymbol edge_breaker_topology_to_symbol_id[] = {
|
||||
EDGEBREAKER_SYMBOL_C, EDGEBREAKER_SYMBOL_S,
|
||||
EDGEBREAKER_SYMBOL_INVALID, EDGEBREAKER_SYMBOL_L,
|
||||
EDGEBREAKER_SYMBOL_INVALID, EDGEBREAKER_SYMBOL_R,
|
||||
EDGEBREAKER_SYMBOL_INVALID, EDGEBREAKER_SYMBOL_E};
|
||||
|
||||
// Reverse mapping between symbol id and topology pattern symbol.
|
||||
constexpr EdgeBreakerTopologyBitPattern edge_breaker_symbol_to_topology_id[] = {
|
||||
TOPOLOGY_C, TOPOLOGY_S, TOPOLOGY_L, TOPOLOGY_R, TOPOLOGY_E};
|
||||
|
||||
// Types of edges used during mesh traversal relative to the tip vertex of a
|
||||
// visited triangle.
|
||||
enum EdgeFaceName : uint8_t { LEFT_FACE_EDGE = 0, RIGHT_FACE_EDGE = 1 };
|
||||
@ -100,6 +122,11 @@ struct HoleEventData {
|
||||
explicit HoleEventData(int32_t sym_id) : symbol_id(sym_id) {}
|
||||
};
|
||||
|
||||
// List of supported modes for valence based edgebreaker coding.
|
||||
enum EdgeBreakerValenceCodingMode {
|
||||
EDGEBREAKER_VALENCE_MODE_2_7 = 0, // Use contexts for valences in range 2-7.
|
||||
};
|
||||
|
||||
} // namespace draco
|
||||
|
||||
#endif // DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_SHARED_H_
|
||||
|
@ -18,7 +18,7 @@
|
||||
#include "compression/mesh/mesh_edgebreaker_decoder.h"
|
||||
#include "compression/mesh/mesh_edgebreaker_decoder_impl_interface.h"
|
||||
#include "compression/mesh/mesh_edgebreaker_shared.h"
|
||||
#include "core/rans_coding.h"
|
||||
#include "core/rans_bit_decoder.h"
|
||||
|
||||
namespace draco {
|
||||
|
||||
|
@ -18,7 +18,7 @@
|
||||
#include "compression/mesh/mesh_edgebreaker_encoder.h"
|
||||
#include "compression/mesh/mesh_edgebreaker_encoder_impl_interface.h"
|
||||
#include "core/macros.h"
|
||||
#include "core/rans_coding.h"
|
||||
#include "core/rans_bit_encoder.h"
|
||||
|
||||
namespace draco {
|
||||
|
||||
@ -32,8 +32,9 @@ class MeshEdgeBreakerTraversalEncoder {
|
||||
public:
|
||||
MeshEdgeBreakerTraversalEncoder()
|
||||
: encoder_impl_(nullptr), attribute_connectivity_encoders_(nullptr) {}
|
||||
void Init(MeshEdgeBreakerEncoderImplInterface *encoder) {
|
||||
bool Init(MeshEdgeBreakerEncoderImplInterface *encoder) {
|
||||
encoder_impl_ = encoder;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Called before the traversal encoding is started.
|
||||
@ -105,6 +106,9 @@ class MeshEdgeBreakerTraversalEncoder {
|
||||
|
||||
protected:
|
||||
EncoderBuffer *GetOutputBuffer() { return &traversal_buffer_; }
|
||||
const MeshEdgeBreakerEncoderImplInterface *encoder_impl() const {
|
||||
return encoder_impl_;
|
||||
}
|
||||
|
||||
private:
|
||||
// Buffers for storing encoded data.
|
||||
|
@ -41,7 +41,7 @@ class MeshEdgeBreakerTraversalPredictiveDecoder
|
||||
if (!MeshEdgeBreakerTraversalDecoder::Start(out_buffer))
|
||||
return false;
|
||||
int32_t num_split_symbols;
|
||||
if (!out_buffer->Decode(&num_split_symbols))
|
||||
if (!out_buffer->Decode(&num_split_symbols) || num_split_symbols < 0)
|
||||
return false;
|
||||
// Add one vertex for each split symbol.
|
||||
num_vertices_ += num_split_symbols;
|
||||
|
@ -35,14 +35,16 @@ class MeshEdgeBreakerTraversalPredictiveEncoder
|
||||
last_corner_(kInvalidCornerIndex),
|
||||
num_symbols_(0) {}
|
||||
|
||||
void Init(MeshEdgeBreakerEncoderImplInterface *encoder) {
|
||||
MeshEdgeBreakerTraversalEncoder::Init(encoder);
|
||||
bool Init(MeshEdgeBreakerEncoderImplInterface *encoder) {
|
||||
if (!MeshEdgeBreakerTraversalEncoder::Init(encoder))
|
||||
return false;
|
||||
corner_table_ = encoder->GetCornerTable();
|
||||
// Initialize valences of all vertices.
|
||||
vertex_valences_.resize(corner_table_->num_vertices());
|
||||
for (uint32_t i = 0; i < vertex_valences_.size(); ++i) {
|
||||
vertex_valences_[i] = corner_table_->Valence(VertexIndex(i));
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
inline void NewCornerReached(CornerIndex corner) { last_corner_ = corner; }
|
||||
|
163
compression/mesh/mesh_edgebreaker_traversal_valence_decoder.h
Normal file
163
compression/mesh/mesh_edgebreaker_traversal_valence_decoder.h
Normal file
@ -0,0 +1,163 @@
|
||||
// Copyright 2016 The Draco Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
#ifndef DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_TRAVERSAL_VALENCE_DECODER_H_
|
||||
#define DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_TRAVERSAL_VALENCE_DECODER_H_
|
||||
|
||||
#include "compression/mesh/mesh_edgebreaker_traversal_decoder.h"
|
||||
#include "core/symbol_decoding.h"
|
||||
#include "core/varint_decoding.h"
|
||||
|
||||
namespace draco {
|
||||
|
||||
// Decoder for traversal encoded with MeshEdgeBreakerTraversalValenceEncoder.
|
||||
// The decoder maintains valences of the decoded portion of the traversed mesh
|
||||
// and it uses them to select entropy context used for decoding of the actual
|
||||
// symbols.
|
||||
class MeshEdgeBreakerTraversalValenceDecoder
|
||||
: public MeshEdgeBreakerTraversalDecoder {
|
||||
public:
|
||||
MeshEdgeBreakerTraversalValenceDecoder()
|
||||
: corner_table_(nullptr),
|
||||
num_vertices_(0),
|
||||
last_symbol_(-1),
|
||||
active_context_(-1),
|
||||
min_valence_(2),
|
||||
max_valence_(7) {}
|
||||
void Init(MeshEdgeBreakerDecoderImplInterface *decoder) {
|
||||
MeshEdgeBreakerTraversalDecoder::Init(decoder);
|
||||
corner_table_ = decoder->GetCornerTable();
|
||||
}
|
||||
void SetNumEncodedVertices(int num_vertices) { num_vertices_ = num_vertices; }
|
||||
|
||||
bool Start(DecoderBuffer *out_buffer) {
|
||||
if (!MeshEdgeBreakerTraversalDecoder::Start(out_buffer))
|
||||
return false;
|
||||
int32_t num_split_symbols;
|
||||
if (!out_buffer->Decode(&num_split_symbols))
|
||||
return false;
|
||||
// Add one extra vertex for each split symbol.
|
||||
num_vertices_ += num_split_symbols;
|
||||
// Set the valences of all initial vertices to 0.
|
||||
vertex_valences_.resize(num_vertices_, 0);
|
||||
|
||||
int8_t mode;
|
||||
if (!out_buffer->Decode(&mode))
|
||||
return false;
|
||||
if (mode == EDGEBREAKER_VALENCE_MODE_2_7) {
|
||||
min_valence_ = 2;
|
||||
max_valence_ = 7;
|
||||
} else {
|
||||
// Unsupported mode.
|
||||
return false;
|
||||
}
|
||||
|
||||
const int num_unique_valences = max_valence_ - min_valence_ + 1;
|
||||
|
||||
// Decode all symbols for all contexts.
|
||||
context_symbols_.resize(num_unique_valences);
|
||||
context_counters_.resize(context_symbols_.size());
|
||||
for (int i = 0; i < context_symbols_.size(); ++i) {
|
||||
uint32_t num_symbols;
|
||||
DecodeVarint<uint32_t>(&num_symbols, out_buffer);
|
||||
if (num_symbols > 0) {
|
||||
context_symbols_[i].resize(num_symbols);
|
||||
DecodeSymbols(num_symbols, 1, out_buffer, context_symbols_[i].data());
|
||||
// All symbols are going to be processed from the back.
|
||||
context_counters_[i] = num_symbols;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
inline uint32_t DecodeSymbol() {
|
||||
// First check if we have a valid context.
|
||||
if (active_context_ != -1) {
|
||||
const int symbol_id =
|
||||
context_symbols_[active_context_]
|
||||
[--context_counters_[active_context_]];
|
||||
last_symbol_ = edge_breaker_symbol_to_topology_id[symbol_id];
|
||||
} else {
|
||||
// We don't have a predicted symbol or the symbol was mis-predicted.
|
||||
// Decode it directly.
|
||||
last_symbol_ = MeshEdgeBreakerTraversalDecoder::DecodeSymbol();
|
||||
}
|
||||
return last_symbol_;
|
||||
}
|
||||
|
||||
inline void NewActiveCornerReached(CornerIndex corner) {
|
||||
const CornerIndex next = corner_table_->Next(corner);
|
||||
const CornerIndex prev = corner_table_->Previous(corner);
|
||||
// Update valences.
|
||||
switch (last_symbol_) {
|
||||
case TOPOLOGY_C:
|
||||
case TOPOLOGY_S:
|
||||
vertex_valences_[corner_table_->Vertex(next)] += 1;
|
||||
vertex_valences_[corner_table_->Vertex(prev)] += 1;
|
||||
break;
|
||||
case TOPOLOGY_R:
|
||||
vertex_valences_[corner_table_->Vertex(corner)] += 1;
|
||||
vertex_valences_[corner_table_->Vertex(next)] += 1;
|
||||
vertex_valences_[corner_table_->Vertex(prev)] += 2;
|
||||
break;
|
||||
case TOPOLOGY_L:
|
||||
vertex_valences_[corner_table_->Vertex(corner)] += 1;
|
||||
vertex_valences_[corner_table_->Vertex(next)] += 2;
|
||||
vertex_valences_[corner_table_->Vertex(prev)] += 1;
|
||||
break;
|
||||
case TOPOLOGY_E:
|
||||
vertex_valences_[corner_table_->Vertex(corner)] += 2;
|
||||
vertex_valences_[corner_table_->Vertex(next)] += 2;
|
||||
vertex_valences_[corner_table_->Vertex(prev)] += 2;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
// Compute the new context that is going to be used to decode the next
|
||||
// symbol.
|
||||
const int active_valence = vertex_valences_[corner_table_->Vertex(next)];
|
||||
int clamped_valence;
|
||||
if (active_valence < min_valence_) {
|
||||
clamped_valence = min_valence_;
|
||||
} else if (active_valence > max_valence_) {
|
||||
clamped_valence = max_valence_;
|
||||
} else {
|
||||
clamped_valence = active_valence;
|
||||
}
|
||||
|
||||
active_context_ = (clamped_valence - min_valence_);
|
||||
}
|
||||
|
||||
inline void MergeVertices(VertexIndex dest, VertexIndex source) {
|
||||
// Update valences on the merged vertices.
|
||||
vertex_valences_[dest] += vertex_valences_[source];
|
||||
}
|
||||
|
||||
private:
|
||||
const CornerTable *corner_table_;
|
||||
int num_vertices_;
|
||||
IndexTypeVector<VertexIndex, int> vertex_valences_;
|
||||
int last_symbol_;
|
||||
int active_context_;
|
||||
|
||||
int min_valence_;
|
||||
int max_valence_;
|
||||
std::vector<std::vector<uint32_t>> context_symbols_;
|
||||
// Points to the active symbol in each context.
|
||||
std::vector<int> context_counters_;
|
||||
};
|
||||
|
||||
} // namespace draco
|
||||
|
||||
#endif // DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_TRAVERSAL_VALENCE_DECODER_H_
|
240
compression/mesh/mesh_edgebreaker_traversal_valence_encoder.h
Normal file
240
compression/mesh/mesh_edgebreaker_traversal_valence_encoder.h
Normal file
@ -0,0 +1,240 @@
|
||||
// Copyright 2016 The Draco Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
#ifndef DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_TRAVERSAL_VALENCE_ENCODER_H_
|
||||
#define DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_TRAVERSAL_VALENCE_ENCODER_H_
|
||||
|
||||
#include "compression/mesh/mesh_edgebreaker_traversal_encoder.h"
|
||||
#include "core/symbol_encoding.h"
|
||||
#include "core/varint_encoding.h"
|
||||
|
||||
namespace draco {
|
||||
|
||||
// Predictive encoder for the Edgebreaker symbols based on valences of the
|
||||
// previously encoded vertices, following the method described in: Szymczak'02,
|
||||
// "Optimized Edgebreaker Encoding for Large and Regular Triangle Meshes". Each
|
||||
// valence is used to specify a different entropy context for encoding of the
|
||||
// symbols.
|
||||
// Encoder can operate in various predefined modes that can be used to select
|
||||
// the way in which the entropy contexts are computed (e.g. using different
|
||||
// clamping for valences, or even using different inputs to compute the
|
||||
// contexts), see EdgeBreakerValenceCodingMode in mesh_edgebreaker_shared.h for
|
||||
// a list of supported modes.
|
||||
class MeshEdgeBreakerTraversalValenceEncoder
|
||||
: public MeshEdgeBreakerTraversalEncoder {
|
||||
public:
|
||||
MeshEdgeBreakerTraversalValenceEncoder()
|
||||
: corner_table_(nullptr),
|
||||
prev_symbol_(-1),
|
||||
num_split_symbols_(0),
|
||||
last_corner_(kInvalidCornerIndex),
|
||||
num_symbols_(0),
|
||||
min_valence_(2),
|
||||
max_valence_(7),
|
||||
mode_(EDGEBREAKER_VALENCE_MODE_2_7) {}
|
||||
|
||||
bool Init(MeshEdgeBreakerEncoderImplInterface *encoder) {
|
||||
if (!MeshEdgeBreakerTraversalEncoder::Init(encoder))
|
||||
return false;
|
||||
if (mode_ == EDGEBREAKER_VALENCE_MODE_2_7) {
|
||||
min_valence_ = 2;
|
||||
max_valence_ = 7;
|
||||
} else {
|
||||
return false; // Unsupported mode.
|
||||
}
|
||||
corner_table_ = encoder->GetCornerTable();
|
||||
|
||||
// Initialize valences of all vertices.
|
||||
vertex_valences_.resize(corner_table_->num_vertices());
|
||||
for (VertexIndex i(0); i < vertex_valences_.size(); ++i) {
|
||||
vertex_valences_[i] = corner_table_->Valence(VertexIndex(i));
|
||||
}
|
||||
|
||||
// Replicate the corner to vertex map from the corner table. We need to do
|
||||
// this because the map may get updated during encoding because we add new
|
||||
// vertices when we encouter split symbols.
|
||||
corner_to_vertex_map_.resize(corner_table_->num_corners());
|
||||
for (CornerIndex i(0); i < corner_table_->num_corners(); ++i) {
|
||||
corner_to_vertex_map_[i] = corner_table_->Vertex(i);
|
||||
}
|
||||
const int32_t num_unique_valences = max_valence_ - min_valence_ + 1;
|
||||
|
||||
context_symbols_.resize(num_unique_valences);
|
||||
return true;
|
||||
}
|
||||
|
||||
inline void NewCornerReached(CornerIndex corner) { last_corner_ = corner; }
|
||||
|
||||
inline void EncodeSymbol(EdgeBreakerTopologyBitPattern symbol) {
|
||||
++num_symbols_;
|
||||
// Update valences on the mesh and compute the context that is going to be
|
||||
// used to encode the processed symbol.
|
||||
// Note that the valences are computed for the so far unencoded part of the
|
||||
// mesh (i.e. the decoding is reverse). Adding a new symbol either reduces
|
||||
// valences on the vertices or leaves the valence unchanged.
|
||||
|
||||
const CornerIndex next = corner_table_->Next(last_corner_);
|
||||
const CornerIndex prev = corner_table_->Previous(last_corner_);
|
||||
|
||||
// Get valence on the tip corner of the active edge (outgoing edge that is
|
||||
// going to be used in reverse decoding of the connectivity to predict the
|
||||
// next symbol).
|
||||
const int active_valence = vertex_valences_[corner_to_vertex_map_[next]];
|
||||
switch (symbol) {
|
||||
case TOPOLOGY_C:
|
||||
// Compute prediction.
|
||||
FALLTHROUGH_INTENDED;
|
||||
case TOPOLOGY_S:
|
||||
// Update velences.
|
||||
vertex_valences_[corner_to_vertex_map_[next]] -= 1;
|
||||
vertex_valences_[corner_to_vertex_map_[prev]] -= 1;
|
||||
if (symbol == TOPOLOGY_S) {
|
||||
// Whenever we reach a split symbol, we need to split the vertex into
|
||||
// two and attach all corners on the left and right sides of the split
|
||||
// vertex to the respective vertices (see image below). This is
|
||||
// necessary since the decoder works in the reverse order and it
|
||||
// merges the two vertices only after the split symbol is processed.
|
||||
//
|
||||
// * -----
|
||||
// / \--------
|
||||
// / \--------
|
||||
// / \-------
|
||||
// *-------v-------*
|
||||
// \ /c\ /
|
||||
// \ / \ /
|
||||
// \ /n S p\ /
|
||||
// *.......*
|
||||
//
|
||||
|
||||
// Count the number of faces on the left side of the split vertex and
|
||||
// update the valence on the "left vertex".
|
||||
int num_left_faces = 0;
|
||||
CornerIndex act_c = corner_table_->Opposite(prev);
|
||||
while (act_c >= 0) {
|
||||
if (encoder_impl()->IsFaceEncoded(corner_table_->Face(act_c)))
|
||||
break; // Stop when we reach the first visited face.
|
||||
++num_left_faces;
|
||||
act_c = corner_table_->Opposite(corner_table_->Next(act_c));
|
||||
}
|
||||
vertex_valences_[corner_to_vertex_map_[last_corner_]] =
|
||||
num_left_faces + 1;
|
||||
|
||||
// Create a new vertex for the right side and count the number of
|
||||
// faces that should be attached to this vertex.
|
||||
const int new_vert_id = vertex_valences_.size();
|
||||
int num_right_faces = 0;
|
||||
|
||||
act_c = corner_table_->Opposite(next);
|
||||
while (act_c >= 0) {
|
||||
if (encoder_impl()->IsFaceEncoded(corner_table_->Face(act_c)))
|
||||
break; // Stop when we reach the first visited face.
|
||||
++num_right_faces;
|
||||
// Map corners on the right side to the newly created vertex.
|
||||
corner_to_vertex_map_[corner_table_->Next(act_c)] = new_vert_id;
|
||||
act_c = corner_table_->Opposite(corner_table_->Previous(act_c));
|
||||
}
|
||||
vertex_valences_.push_back(num_right_faces + 1);
|
||||
|
||||
++num_split_symbols_;
|
||||
}
|
||||
break;
|
||||
case TOPOLOGY_R:
|
||||
// Update valences.
|
||||
vertex_valences_[corner_to_vertex_map_[last_corner_]] -= 1;
|
||||
vertex_valences_[corner_to_vertex_map_[next]] -= 1;
|
||||
vertex_valences_[corner_to_vertex_map_[prev]] -= 2;
|
||||
break;
|
||||
case TOPOLOGY_L:
|
||||
|
||||
vertex_valences_[corner_to_vertex_map_[last_corner_]] -= 1;
|
||||
vertex_valences_[corner_to_vertex_map_[next]] -= 2;
|
||||
vertex_valences_[corner_to_vertex_map_[prev]] -= 1;
|
||||
break;
|
||||
case TOPOLOGY_E:
|
||||
vertex_valences_[corner_to_vertex_map_[last_corner_]] -= 2;
|
||||
vertex_valences_[corner_to_vertex_map_[next]] -= 2;
|
||||
vertex_valences_[corner_to_vertex_map_[prev]] -= 2;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (prev_symbol_ != -1) {
|
||||
int clamped_valence;
|
||||
if (active_valence < min_valence_) {
|
||||
clamped_valence = min_valence_;
|
||||
} else if (active_valence > max_valence_) {
|
||||
clamped_valence = max_valence_;
|
||||
} else {
|
||||
clamped_valence = active_valence;
|
||||
}
|
||||
|
||||
const int context = clamped_valence - min_valence_;
|
||||
context_symbols_[context].push_back(
|
||||
edge_breaker_topology_to_symbol_id[prev_symbol_]);
|
||||
}
|
||||
|
||||
prev_symbol_ = symbol;
|
||||
}
|
||||
|
||||
void Done() {
|
||||
// We still need to store the last encoded symbol.
|
||||
if (prev_symbol_ != -1) {
|
||||
MeshEdgeBreakerTraversalEncoder::EncodeSymbol(
|
||||
static_cast<EdgeBreakerTopologyBitPattern>(prev_symbol_));
|
||||
}
|
||||
// Store the init face configurations and the explicitly encoded symbols.
|
||||
MeshEdgeBreakerTraversalEncoder::Done();
|
||||
// Encode the number of split symbols (needed to set the correct number of
|
||||
// vertices on the decoder side).
|
||||
GetOutputBuffer()->Encode(num_split_symbols_);
|
||||
// Encode the valance encoder mode.
|
||||
GetOutputBuffer()->Encode(static_cast<int8_t>(mode_));
|
||||
// Store the contexts.
|
||||
for (int i = 0; i < context_symbols_.size(); ++i) {
|
||||
EncodeVarint<uint32_t>(context_symbols_[i].size(), GetOutputBuffer());
|
||||
if (context_symbols_[i].size() > 0) {
|
||||
EncodeSymbols(context_symbols_[i].data(), context_symbols_[i].size(), 1,
|
||||
GetOutputBuffer());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int NumEncodedSymbols() const { return num_symbols_; }
|
||||
|
||||
private:
|
||||
const CornerTable *corner_table_;
|
||||
// Explicit map between corners and vertices. We cannot use the one stored
|
||||
// in the |corner_table_| because we may need to add additional vertices to
|
||||
// handle split symbols.
|
||||
IndexTypeVector<CornerIndex, VertexIndex> corner_to_vertex_map_;
|
||||
IndexTypeVector<VertexIndex, int> vertex_valences_;
|
||||
// Previously encoded symbol.
|
||||
int32_t prev_symbol_;
|
||||
// The total number of encoded split symbols.
|
||||
int32_t num_split_symbols_;
|
||||
CornerIndex last_corner_;
|
||||
// Explicitly count the number of encoded symbols.
|
||||
int num_symbols_;
|
||||
|
||||
int min_valence_;
|
||||
int max_valence_;
|
||||
EdgeBreakerValenceCodingMode mode_;
|
||||
|
||||
std::vector<std::vector<uint32_t>> context_symbols_;
|
||||
};
|
||||
|
||||
} // namespace draco
|
||||
|
||||
#endif // DRACO_COMPRESSION_MESH_MESH_EDGEBREAKER_TRAVERSAL_VALENCE_ENCODER_H_
|
@ -61,17 +61,6 @@ class MeshEncoder : public PointCloudEncoder {
|
||||
// Needs to be implemented by the derived classes.
|
||||
virtual bool EncodeConnectivity() = 0;
|
||||
|
||||
// TODO(ostava): Prediction schemes need refactoring.
|
||||
/*
|
||||
// This method should be overriden by derived class to perform custom
|
||||
// initialization of various prediction schemes.
|
||||
virtual bool InitPredictionSchemeInternal(
|
||||
const MeshAttributeEncoder *att_encoder,
|
||||
PredictionSchemeInterface *scheme) {
|
||||
return true;
|
||||
}
|
||||
*/
|
||||
|
||||
void set_mesh(const Mesh *mesh) { mesh_ = mesh; }
|
||||
|
||||
private:
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include "compression/mesh/mesh_encoder.h"
|
||||
|
||||
#include "compression/encode.h"
|
||||
#include "core/decoder_buffer.h"
|
||||
#include "core/draco_test_base.h"
|
||||
#include "core/draco_test_utils.h"
|
||||
#include "io/obj_decoder.h"
|
||||
@ -62,16 +63,23 @@ TEST_P(MeshEncoderTest, EncodeGoldenMesh) {
|
||||
std::string golden_file_name = file_name;
|
||||
golden_file_name += '.';
|
||||
golden_file_name += GetParam();
|
||||
golden_file_name += ".out";
|
||||
golden_file_name += ".0.10.0.drc";
|
||||
const std::unique_ptr<Mesh> mesh(DecodeObj(file_name));
|
||||
ASSERT_NE(mesh, nullptr) << "Failed to load test model " << file_name;
|
||||
|
||||
EncoderOptions options = CreateDefaultEncoderOptions();
|
||||
SetEncodingMethod(&options, method);
|
||||
EncoderBuffer buffer;
|
||||
ASSERT_TRUE(EncodeMeshToBuffer(*mesh.get(), options, &buffer))
|
||||
<< "Failed encoding test mesh " << file_name << " with method "
|
||||
<< GetParam();
|
||||
|
||||
// Check that the encoded mesh was really encoded with the selected method.
|
||||
DecoderBuffer decoder_buffer;
|
||||
decoder_buffer.Init(buffer.data(), buffer.size());
|
||||
decoder_buffer.Advance(8); // Skip the header to the encoding method id.
|
||||
uint8_t encoded_method;
|
||||
decoder_buffer.Decode(&encoded_method);
|
||||
ASSERT_EQ(encoded_method, method);
|
||||
if (!FLAGS_update_golden_files) {
|
||||
EXPECT_TRUE(
|
||||
CompareGoldenFile(golden_file_name, buffer.data(), buffer.size()))
|
||||
|
@ -24,7 +24,7 @@ MeshSequentialDecoder::MeshSequentialDecoder() {}
|
||||
|
||||
bool MeshSequentialDecoder::DecodeConnectivity() {
|
||||
int32_t num_faces;
|
||||
if (!buffer()->Decode(&num_faces))
|
||||
if (!buffer()->Decode(&num_faces) || num_faces < 0)
|
||||
return false;
|
||||
int32_t num_points;
|
||||
if (!buffer()->Decode(&num_points))
|
||||
|
@ -111,7 +111,8 @@ bool FloatPointsTreeDecoder::DecodePointCloudKdTreeInternal(
|
||||
}
|
||||
}
|
||||
|
||||
DCHECK_EQ(true, qpoints->size() == num_points_);
|
||||
if (qpoints->size() != num_points_)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -49,7 +49,13 @@ class FloatPointsTreeDecoder {
|
||||
float range() const { return qinfo_.range; }
|
||||
uint32_t num_points() const { return num_points_; }
|
||||
uint32_t version() const { return version_; }
|
||||
std::string identification_string() const { return "FloatPointsTreeDecoder"; }
|
||||
std::string identification_string() const {
|
||||
if (method_ == KDTREE) {
|
||||
return "FloatPointsTreeDecoder: IntegerPointsKDTreeDecoder";
|
||||
} else {
|
||||
return "FloatPointsTreeDecoder: Unsupported Method";
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
bool DecodePointCloudKdTreeInternal(DecoderBuffer *buffer,
|
||||
@ -57,6 +63,7 @@ class FloatPointsTreeDecoder {
|
||||
|
||||
static const uint32_t version_ = 3;
|
||||
QuantizationInfo qinfo_;
|
||||
PointCloudCompressionMethod method_;
|
||||
uint32_t num_points_;
|
||||
uint32_t compression_level_;
|
||||
};
|
||||
@ -75,10 +82,9 @@ bool FloatPointsTreeDecoder::DecodePointCloud(DecoderBuffer *buffer,
|
||||
if (!buffer->Decode(&method_number))
|
||||
return false;
|
||||
|
||||
const PointCloudCompressionMethod method =
|
||||
static_cast<PointCloudCompressionMethod>(method_number);
|
||||
method_ = static_cast<PointCloudCompressionMethod>(method_number);
|
||||
|
||||
if (method == KDTREE) {
|
||||
if (method_ == KDTREE) {
|
||||
if (!DecodePointCloudKdTreeInternal(buffer, &qpoints))
|
||||
return false;
|
||||
} else { // Unsupported method.
|
||||
|
@ -63,7 +63,13 @@ class FloatPointsTreeEncoder {
|
||||
uint32_t &compression_level() { return compression_level_; }
|
||||
float range() const { return qinfo_.range; }
|
||||
uint32_t num_points() const { return num_points_; }
|
||||
std::string identification_string() const { return "FloatPointsTreeEncoder"; }
|
||||
std::string identification_string() const {
|
||||
if (method_ == KDTREE) {
|
||||
return "FloatPointsTreeEncoder: IntegerPointsKDTreeEncoder";
|
||||
} else {
|
||||
return "FloatPointsTreeEncoder: Unsupported Method";
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
void Clear() { buffer_.Clear(); }
|
||||
|
@ -22,13 +22,13 @@
|
||||
|
||||
#include "compression/point_cloud/algorithms/point_cloud_types.h"
|
||||
#include "compression/point_cloud/algorithms/queuing_policy.h"
|
||||
#include "core/adaptive_rans_coding.h"
|
||||
#include "core/adaptive_rans_bit_decoder.h"
|
||||
#include "core/bit_utils.h"
|
||||
#include "core/decoder_buffer.h"
|
||||
#include "core/direct_bit_coding.h"
|
||||
#include "core/folded_bit32_coding.h"
|
||||
#include "core/direct_bit_decoder.h"
|
||||
#include "core/folded_integer_bit_decoder.h"
|
||||
#include "core/math_utils.h"
|
||||
#include "core/rans_coding.h"
|
||||
#include "core/rans_bit_decoder.h"
|
||||
|
||||
namespace draco {
|
||||
|
||||
|
@ -22,13 +22,13 @@
|
||||
|
||||
#include "compression/point_cloud/algorithms/point_cloud_types.h"
|
||||
#include "compression/point_cloud/algorithms/queuing_policy.h"
|
||||
#include "core/adaptive_rans_coding.h"
|
||||
#include "core/adaptive_rans_bit_encoder.h"
|
||||
#include "core/bit_utils.h"
|
||||
#include "core/direct_bit_coding.h"
|
||||
#include "core/direct_bit_encoder.h"
|
||||
#include "core/encoder_buffer.h"
|
||||
#include "core/folded_bit32_coding.h"
|
||||
#include "core/folded_integer_bit_encoder.h"
|
||||
#include "core/math_utils.h"
|
||||
#include "core/rans_coding.h"
|
||||
#include "core/rans_bit_encoder.h"
|
||||
|
||||
namespace draco {
|
||||
|
||||
|
@ -24,8 +24,9 @@ enum PointCloudCompressionMethod {
|
||||
// Devillers to d dimensions.
|
||||
// "Progressive lossless compression of arbitrary simplicial complexes"
|
||||
// http://dx.doi.org/10.1145/566570.566591
|
||||
KDTREE,
|
||||
RESERVED_POINT_CLOUD_METHOD_1, // Reserved for internal use.
|
||||
KDTREE = 1,
|
||||
RESERVED_POINT_CLOUD_METHOD_2 = 2, // Reserved for internal use.
|
||||
RESERVED_POINT_CLOUD_METHOD_3 = 0, // Reserved for internal use.
|
||||
};
|
||||
|
||||
} // namespace draco
|
||||
|
@ -17,12 +17,54 @@
|
||||
namespace draco {
|
||||
|
||||
PointCloudDecoder::PointCloudDecoder()
|
||||
: point_cloud_(nullptr), buffer_(nullptr) {}
|
||||
: point_cloud_(nullptr),
|
||||
buffer_(nullptr),
|
||||
version_major_(0),
|
||||
version_minor_(0) {}
|
||||
|
||||
bool PointCloudDecoder::DecodeHeader(DecoderBuffer *buffer,
|
||||
DracoHeader *out_header) {
|
||||
// TODO(ostava): Add error codes for better error reporting.
|
||||
if (!buffer->Decode(out_header->draco_string, 5))
|
||||
return false;
|
||||
if (memcmp(out_header->draco_string, "DRACO", 5) != 0)
|
||||
return false; // Wrong file format?
|
||||
if (!buffer->Decode(&(out_header->version_major)))
|
||||
return false;
|
||||
if (!buffer->Decode(&(out_header->version_minor)))
|
||||
return false;
|
||||
if (!buffer->Decode(&(out_header->encoder_type)))
|
||||
return false;
|
||||
if (!buffer->Decode(&(out_header->encoder_method)))
|
||||
return false;
|
||||
if (!buffer->Decode(&(out_header->flags)))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PointCloudDecoder::Decode(DecoderBuffer *in_buffer,
|
||||
PointCloud *out_point_cloud) {
|
||||
buffer_ = in_buffer;
|
||||
point_cloud_ = out_point_cloud;
|
||||
DracoHeader header;
|
||||
if (!DecodeHeader(buffer_, &header))
|
||||
return false;
|
||||
// Sanity check that we are really using the right decoder (mostly for cases
|
||||
// where the Decode method was called manually outside of our main API.
|
||||
if (header.encoder_type != GetGeometryType())
|
||||
return false;
|
||||
// TODO(ostava): We should check the method as well, but currently decoders
|
||||
// don't expose the decoding method id.
|
||||
version_major_ = header.version_major;
|
||||
version_minor_ = header.version_minor;
|
||||
|
||||
// Check for version compatibility.
|
||||
if (version_major_ < 1 || version_major_ > kDracoBitstreamVersionMajor)
|
||||
return false;
|
||||
if (version_major_ == kDracoBitstreamVersionMajor &&
|
||||
version_minor_ > kDracoBitstreamVersionMinor)
|
||||
return false;
|
||||
|
||||
if (!InitializeDecoder())
|
||||
return false;
|
||||
if (!DecodeGeometryData())
|
||||
|
@ -30,6 +30,10 @@ class PointCloudDecoder {
|
||||
|
||||
virtual EncodedGeometryType GetGeometryType() const { return POINT_CLOUD; }
|
||||
|
||||
// Decodes a Draco header int othe provided |out_header|.
|
||||
// Returns false on error.
|
||||
static bool DecodeHeader(DecoderBuffer *buffer, DracoHeader *out_header);
|
||||
|
||||
// The main entry point for point cloud decoding.
|
||||
bool Decode(DecoderBuffer *in_buffer, PointCloud *out_point_cloud);
|
||||
|
||||
@ -39,6 +43,11 @@ class PointCloudDecoder {
|
||||
attributes_decoders_.resize(att_decoder_id + 1);
|
||||
attributes_decoders_[att_decoder_id] = std::move(decoder);
|
||||
}
|
||||
|
||||
uint16_t bitstream_version() const {
|
||||
return DRACO_BITSTREAM_VERSION(version_major_, version_minor_);
|
||||
}
|
||||
|
||||
const AttributesDecoder *attributes_decoder(int dec_id) {
|
||||
return attributes_decoders_[dec_id].get();
|
||||
}
|
||||
@ -74,6 +83,10 @@ class PointCloudDecoder {
|
||||
|
||||
// Input buffer holding the encoded data.
|
||||
DecoderBuffer *buffer_;
|
||||
|
||||
// Bit-stream version of the encoder that encoded the input data.
|
||||
uint8_t version_major_;
|
||||
uint8_t version_minor_;
|
||||
};
|
||||
|
||||
} // namespace draco
|
||||
|
@ -35,6 +35,8 @@ bool PointCloudEncoder::Encode(const EncoderOptions &options,
|
||||
|
||||
if (!point_cloud_)
|
||||
return false;
|
||||
if (!EncodeHeader())
|
||||
return false;
|
||||
if (!InitializeEncoder())
|
||||
return false;
|
||||
if (!EncodeEncoderData())
|
||||
@ -46,6 +48,25 @@ bool PointCloudEncoder::Encode(const EncoderOptions &options,
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PointCloudEncoder::EncodeHeader() {
|
||||
// Encode the header according to our v1 specification.
|
||||
// Five bytes for Draco format.
|
||||
buffer_->Encode("DRACO", 5);
|
||||
// Version (major, minor).
|
||||
const uint8_t version_major = kDracoBitstreamVersionMajor;
|
||||
const uint8_t version_minor = kDracoBitstreamVersionMinor;
|
||||
buffer_->Encode(version_major);
|
||||
buffer_->Encode(version_minor);
|
||||
// Type of the encoder (point cloud, mesh, ...).
|
||||
const uint8_t encoder_type = GetGeometryType();
|
||||
buffer_->Encode(encoder_type);
|
||||
// Unique identifier for the selected encoding method (edgebreaker, etc...).
|
||||
buffer_->Encode(GetEncodingMethod());
|
||||
// Reserved for flags.
|
||||
buffer_->Encode(static_cast<uint16_t>(0));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PointCloudEncoder::EncodePointAttributes() {
|
||||
if (!GenerateAttributesEncoders())
|
||||
return false;
|
||||
|
@ -109,6 +109,9 @@ class PointCloudEncoder {
|
||||
virtual bool EncodeAllAttributes();
|
||||
|
||||
private:
|
||||
// Encodes Draco header that is the same for all encoders.
|
||||
bool EncodeHeader();
|
||||
|
||||
// Rearranges attribute encoders and their attributes to reflect the
|
||||
// underlying attribute dependencies. This ensures that the attributes are
|
||||
// encoded in the correct order (parent attributes before their children).
|
||||
|
43
core/adaptive_rans_bit_coding_shared.h
Normal file
43
core/adaptive_rans_bit_coding_shared.h
Normal file
@ -0,0 +1,43 @@
|
||||
// Copyright 2016 The Draco Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// File provides shared functions for adaptive rANS bit coding.
|
||||
#ifndef DRACO_CORE_ADAPTIVE_RANS_BIT_CODING_SHARED_H_
|
||||
#define DRACO_CORE_ADAPTIVE_RANS_BIT_CODING_SHARED_H_
|
||||
|
||||
#include "core/macros.h"
|
||||
|
||||
namespace draco {
|
||||
|
||||
// Clamp the probability p to a uint8_t in the range [1,255].
|
||||
inline uint8_t clamp_probability(double p) {
|
||||
DCHECK_LE(p, 1.0);
|
||||
DCHECK_LE(0.0, p);
|
||||
uint32_t p_int = static_cast<uint32_t>((p * 256) + 0.5);
|
||||
p_int -= (p_int == 256);
|
||||
p_int += (p_int == 0);
|
||||
return static_cast<uint8_t>(p_int);
|
||||
}
|
||||
|
||||
// Update the probablity according to new incoming bit.
|
||||
inline double update_probability(double old_p, bool bit) {
|
||||
static constexpr double w = 128.0;
|
||||
static constexpr double w0 = (w - 1.0) / w;
|
||||
static constexpr double w1 = 1.0 / w;
|
||||
return old_p * w0 + (!bit) * w1;
|
||||
}
|
||||
|
||||
} // namespace draco
|
||||
|
||||
#endif // DRACO_CORE_ADAPTIVE_RANS_BIT_CODING_SHARED_H_
|
@ -12,66 +12,12 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
#include "core/adaptive_rans_coding.h"
|
||||
#include "core/adaptive_rans_bit_decoder.h"
|
||||
|
||||
#include <iostream>
|
||||
#include "core/adaptive_rans_bit_coding_shared.h"
|
||||
|
||||
namespace draco {
|
||||
|
||||
uint8_t clamp_probability(double p) {
|
||||
DCHECK_LE(p, 1.0);
|
||||
DCHECK_LE(0.0, p);
|
||||
uint32_t p_int = static_cast<uint32_t>((p * 256) + 0.5);
|
||||
p_int -= (p_int == 256);
|
||||
p_int += (p_int == 0);
|
||||
return static_cast<uint8_t>(p_int);
|
||||
}
|
||||
|
||||
double update_probability(double old_p, bool bit) {
|
||||
static constexpr double w = 128.0;
|
||||
static constexpr double w0 = (w - 1.0) / w;
|
||||
static constexpr double w1 = 1.0 / w;
|
||||
return old_p * w0 + (!bit) * w1;
|
||||
}
|
||||
|
||||
AdaptiveRAnsBitEncoder::AdaptiveRAnsBitEncoder() {}
|
||||
|
||||
AdaptiveRAnsBitEncoder::~AdaptiveRAnsBitEncoder() { Clear(); }
|
||||
|
||||
void AdaptiveRAnsBitEncoder::StartEncoding() { Clear(); }
|
||||
|
||||
void AdaptiveRAnsBitEncoder::EndEncoding(EncoderBuffer *target_buffer) {
|
||||
// Buffer for ans to write.
|
||||
std::vector<uint8_t> buffer(bits_.size() + 16);
|
||||
AnsCoder ans_coder;
|
||||
ans_write_init(&ans_coder, buffer.data());
|
||||
|
||||
// Unfortunaetly we have to encode the bits in reversed order, while the
|
||||
// probabilities that should be given are those of the forward sequence.
|
||||
double p0_f = 0.5;
|
||||
std::vector<uint8_t> p0s;
|
||||
p0s.reserve(bits_.size());
|
||||
for (bool b : bits_) {
|
||||
p0s.push_back(clamp_probability(p0_f));
|
||||
p0_f = update_probability(p0_f, b);
|
||||
}
|
||||
auto bit = bits_.rbegin();
|
||||
auto pit = p0s.rbegin();
|
||||
while (bit != bits_.rend()) {
|
||||
rabs_write(&ans_coder, *bit, *pit);
|
||||
++bit;
|
||||
++pit;
|
||||
}
|
||||
|
||||
const uint32_t size_in_bytes = ans_write_end(&ans_coder);
|
||||
target_buffer->Encode(size_in_bytes);
|
||||
target_buffer->Encode(buffer.data(), size_in_bytes);
|
||||
|
||||
Clear();
|
||||
}
|
||||
|
||||
void AdaptiveRAnsBitEncoder::Clear() { bits_.clear(); }
|
||||
|
||||
AdaptiveRAnsBitDecoder::AdaptiveRAnsBitDecoder() : p0_f_(0.5) {}
|
||||
|
||||
AdaptiveRAnsBitDecoder::~AdaptiveRAnsBitDecoder() { Clear(); }
|
54
core/adaptive_rans_bit_decoder.h
Normal file
54
core/adaptive_rans_bit_decoder.h
Normal file
@ -0,0 +1,54 @@
|
||||
// Copyright 2016 The Draco Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// File provides basic classes and functions for rANS bit decoding.
|
||||
#ifndef DRACO_CORE_ADAPTIVE_RANS_BIT_DECODER_H_
|
||||
#define DRACO_CORE_ADAPTIVE_RANS_BIT_DECODER_H_
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "core/ans.h"
|
||||
#include "core/decoder_buffer.h"
|
||||
|
||||
namespace draco {
|
||||
|
||||
// Class for decoding a sequence of bits that were encoded with
|
||||
// AdaptiveRAnsBitEncoder.
|
||||
class AdaptiveRAnsBitDecoder {
|
||||
public:
|
||||
AdaptiveRAnsBitDecoder();
|
||||
~AdaptiveRAnsBitDecoder();
|
||||
|
||||
// Sets |source_buffer| as the buffer to decode bits from.
|
||||
bool StartDecoding(DecoderBuffer *source_buffer);
|
||||
|
||||
// Decode one bit. Returns true if the bit is a 1, otherwsie false.
|
||||
bool DecodeNextBit();
|
||||
|
||||
// Decode the next |nbits| and return the sequence in |value|. |nbits| must be
|
||||
// > 0 and <= 32.
|
||||
void DecodeLeastSignificantBits32(int nbits, uint32_t *value);
|
||||
|
||||
void EndDecoding() {}
|
||||
|
||||
private:
|
||||
void Clear();
|
||||
|
||||
AnsDecoder ans_decoder_;
|
||||
double p0_f_;
|
||||
};
|
||||
|
||||
} // namespace draco
|
||||
|
||||
#endif // DRACO_CORE_ADAPTIVE_RANS_BIT_DECODER_H_
|
59
core/adaptive_rans_bit_encoder.cc
Normal file
59
core/adaptive_rans_bit_encoder.cc
Normal file
@ -0,0 +1,59 @@
|
||||
// Copyright 2016 The Draco Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
#include "core/adaptive_rans_bit_encoder.h"
|
||||
|
||||
#include "core/adaptive_rans_bit_coding_shared.h"
|
||||
|
||||
namespace draco {
|
||||
|
||||
AdaptiveRAnsBitEncoder::AdaptiveRAnsBitEncoder() {}
|
||||
|
||||
AdaptiveRAnsBitEncoder::~AdaptiveRAnsBitEncoder() { Clear(); }
|
||||
|
||||
void AdaptiveRAnsBitEncoder::StartEncoding() { Clear(); }
|
||||
|
||||
void AdaptiveRAnsBitEncoder::EndEncoding(EncoderBuffer *target_buffer) {
|
||||
// Buffer for ans to write.
|
||||
std::vector<uint8_t> buffer(bits_.size() + 16);
|
||||
AnsCoder ans_coder;
|
||||
ans_write_init(&ans_coder, buffer.data());
|
||||
|
||||
// Unfortunaetly we have to encode the bits in reversed order, while the
|
||||
// probabilities that should be given are those of the forward sequence.
|
||||
double p0_f = 0.5;
|
||||
std::vector<uint8_t> p0s;
|
||||
p0s.reserve(bits_.size());
|
||||
for (bool b : bits_) {
|
||||
p0s.push_back(clamp_probability(p0_f));
|
||||
p0_f = update_probability(p0_f, b);
|
||||
}
|
||||
auto bit = bits_.rbegin();
|
||||
auto pit = p0s.rbegin();
|
||||
while (bit != bits_.rend()) {
|
||||
rabs_write(&ans_coder, *bit, *pit);
|
||||
++bit;
|
||||
++pit;
|
||||
}
|
||||
|
||||
const uint32_t size_in_bytes = ans_write_end(&ans_coder);
|
||||
target_buffer->Encode(size_in_bytes);
|
||||
target_buffer->Encode(buffer.data(), size_in_bytes);
|
||||
|
||||
Clear();
|
||||
}
|
||||
|
||||
void AdaptiveRAnsBitEncoder::Clear() { bits_.clear(); }
|
||||
|
||||
} // namespace draco
|
@ -12,14 +12,13 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// File provides basic classes and functions for rANS coding.
|
||||
#ifndef DRACO_CORE_ADAPTIVE_RANS_CODING_H_
|
||||
#define DRACO_CORE_ADAPTIVE_RANS_CODING_H_
|
||||
// File provides basic classes and functions for rANS bit encoding.
|
||||
#ifndef DRACO_CORE_ADAPTIVE_RANS_BIT_ENCODER_H_
|
||||
#define DRACO_CORE_ADAPTIVE_RANS_BIT_ENCODER_H_
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "core/ans.h"
|
||||
#include "core/decoder_buffer.h"
|
||||
#include "core/encoder_buffer.h"
|
||||
|
||||
namespace draco {
|
||||
@ -57,32 +56,6 @@ class AdaptiveRAnsBitEncoder {
|
||||
std::vector<bool> bits_;
|
||||
};
|
||||
|
||||
// Class for decoding a sequence of bits that were encoded with
|
||||
// AdaptiveRAnsBitEncoder.
|
||||
class AdaptiveRAnsBitDecoder {
|
||||
public:
|
||||
AdaptiveRAnsBitDecoder();
|
||||
~AdaptiveRAnsBitDecoder();
|
||||
|
||||
// Sets |source_buffer| as the buffer to decode bits from.
|
||||
bool StartDecoding(DecoderBuffer *source_buffer);
|
||||
|
||||
// Decode one bit. Returns true if the bit is a 1, otherwsie false.
|
||||
bool DecodeNextBit();
|
||||
|
||||
// Decode the next |nbits| and return the sequence in |value|. |nbits| must be
|
||||
// > 0 and <= 32.
|
||||
void DecodeLeastSignificantBits32(int nbits, uint32_t *value);
|
||||
|
||||
void EndDecoding() {}
|
||||
|
||||
private:
|
||||
void Clear();
|
||||
|
||||
AnsDecoder ans_decoder_;
|
||||
double p0_f_;
|
||||
};
|
||||
|
||||
} // namespace draco
|
||||
|
||||
#endif // DRACO_CORE_ADAPTIVE_RANS_CODING_H_
|
||||
#endif // DRACO_CORE_ADAPTIVE_RANS_BIT_ENCODER_H_
|
23
core/ans.h
23
core/ans.h
@ -18,14 +18,13 @@
|
||||
// See http://arxiv.org/abs/1311.2540v2 for more informaiton on rANS.
|
||||
// This file is based off libvpx's ans.h.
|
||||
|
||||
#include <assert.h>
|
||||
|
||||
#include <vector>
|
||||
|
||||
#define ANS_DIVIDE_BY_MULTIPLY 1
|
||||
#if ANS_DIVIDE_BY_MULTIPLY
|
||||
#include "core/divide.h"
|
||||
#endif
|
||||
#include "core/macros.h"
|
||||
|
||||
namespace draco {
|
||||
|
||||
@ -98,14 +97,14 @@ static inline uint32_t mem_get_le32(const void *vmem) {
|
||||
}
|
||||
|
||||
static inline void mem_put_le16(void *vmem, uint32_t val) {
|
||||
uint8_t *mem = (uint8_t *)vmem;
|
||||
uint8_t *mem = reinterpret_cast<uint8_t *>(vmem);
|
||||
|
||||
mem[0] = (val >> 0) & 0xff;
|
||||
mem[1] = (val >> 8) & 0xff;
|
||||
}
|
||||
|
||||
static inline void mem_put_le24(void *vmem, uint32_t val) {
|
||||
uint8_t *mem = (uint8_t *)vmem;
|
||||
uint8_t *mem = reinterpret_cast<uint8_t *>(vmem);
|
||||
|
||||
mem[0] = (val >> 0) & 0xff;
|
||||
mem[1] = (val >> 8) & 0xff;
|
||||
@ -113,7 +112,7 @@ static inline void mem_put_le24(void *vmem, uint32_t val) {
|
||||
}
|
||||
|
||||
static inline void mem_put_le32(void *vmem, uint32_t val) {
|
||||
uint8_t *mem = (uint8_t *)vmem;
|
||||
uint8_t *mem = reinterpret_cast<uint8_t *>(vmem);
|
||||
|
||||
mem[0] = (val >> 0) & 0xff;
|
||||
mem[1] = (val >> 8) & 0xff;
|
||||
@ -130,8 +129,8 @@ static inline void ans_write_init(struct AnsCoder *const ans,
|
||||
|
||||
static inline int ans_write_end(struct AnsCoder *const ans) {
|
||||
uint32_t state;
|
||||
assert(ans->state >= l_base);
|
||||
assert(ans->state < l_base * io_base);
|
||||
DCHECK_GE(ans->state, l_base);
|
||||
DCHECK_LT(ans->state, l_base * io_base);
|
||||
state = ans->state - l_base;
|
||||
if (state < (1 << 6)) {
|
||||
ans->buf[ans->buf_offset] = (0x00 << 6) + state;
|
||||
@ -143,7 +142,7 @@ static inline int ans_write_end(struct AnsCoder *const ans) {
|
||||
mem_put_le24(ans->buf + ans->buf_offset, (0x02 << 22) + state);
|
||||
return ans->buf_offset + 3;
|
||||
} else {
|
||||
assert(0 && "State is too large to be serialized");
|
||||
DCHECK(0 && "State is too large to be serialized");
|
||||
return ans->buf_offset;
|
||||
}
|
||||
}
|
||||
@ -288,7 +287,7 @@ static inline int uabs_read_bit(struct AnsDecoder *ans) {
|
||||
while (state < l_base && ans->buf_offset > 0) {
|
||||
state = state * io_base + ans->buf[--ans->buf_offset];
|
||||
}
|
||||
s = (int)(state & 1);
|
||||
s = static_cast<int>(state & 1);
|
||||
ans->state = state >> 1;
|
||||
return s;
|
||||
}
|
||||
@ -355,8 +354,8 @@ class RAnsEncoder {
|
||||
// Needs to be called after all symbols are encoded.
|
||||
inline int write_end() {
|
||||
uint32_t state;
|
||||
assert(ans_.state >= l_rans_base);
|
||||
assert(ans_.state < l_rans_base * io_base);
|
||||
DCHECK_GE(ans_.state, l_rans_base);
|
||||
DCHECK_LT(ans_.state, l_rans_base * io_base);
|
||||
state = ans_.state - l_rans_base;
|
||||
if (state < (1 << 6)) {
|
||||
ans_.buf[ans_.buf_offset] = (0x00 << 6) + state;
|
||||
@ -371,7 +370,7 @@ class RAnsEncoder {
|
||||
mem_put_le32(ans_.buf + ans_.buf_offset, (0x03 << 30) + state);
|
||||
return ans_.buf_offset + 4;
|
||||
} else {
|
||||
assert(0 && "State is too large to be serialized");
|
||||
DCHECK(0 && "State is too large to be serialized");
|
||||
return ans_.buf_offset;
|
||||
}
|
||||
}
|
||||
|
@ -12,31 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
#include "core/direct_bit_coding.h"
|
||||
#include <iostream>
|
||||
#include "core/direct_bit_decoder.h"
|
||||
|
||||
namespace draco {
|
||||
|
||||
DirectBitEncoder::DirectBitEncoder() : local_bits_(0), num_local_bits_(0) {}
|
||||
|
||||
DirectBitEncoder::~DirectBitEncoder() { Clear(); }
|
||||
|
||||
void DirectBitEncoder::StartEncoding() { Clear(); }
|
||||
|
||||
void DirectBitEncoder::EndEncoding(EncoderBuffer *target_buffer) {
|
||||
bits_.push_back(local_bits_);
|
||||
const uint32_t size_in_byte = bits_.size() * 4;
|
||||
target_buffer->Encode(size_in_byte);
|
||||
target_buffer->Encode(bits_.data(), size_in_byte);
|
||||
Clear();
|
||||
}
|
||||
|
||||
void DirectBitEncoder::Clear() {
|
||||
bits_.clear();
|
||||
local_bits_ = 0;
|
||||
num_local_bits_ = 0;
|
||||
}
|
||||
|
||||
DirectBitDecoder::DirectBitDecoder() : pos_(bits_.end()), num_used_bits_(0) {}
|
||||
|
||||
DirectBitDecoder::~DirectBitDecoder() { Clear(); }
|
79
core/direct_bit_decoder.h
Normal file
79
core/direct_bit_decoder.h
Normal file
@ -0,0 +1,79 @@
|
||||
// Copyright 2016 The Draco Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// File provides direct encoding of bits with arthmetic encoder interface.
|
||||
#ifndef DRACO_CORE_DIRECT_BIT_DECODER_H_
|
||||
#define DRACO_CORE_DIRECT_BIT_DECODER_H_
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "core/decoder_buffer.h"
|
||||
|
||||
namespace draco {
|
||||
|
||||
class DirectBitDecoder {
|
||||
public:
|
||||
DirectBitDecoder();
|
||||
~DirectBitDecoder();
|
||||
|
||||
// Sets |source_buffer| as the buffer to decode bits from.
|
||||
bool StartDecoding(DecoderBuffer *source_buffer);
|
||||
|
||||
// Decode one bit. Returns true if the bit is a 1, otherwsie false.
|
||||
bool DecodeNextBit() {
|
||||
const uint32_t selector = 1 << (31 - num_used_bits_);
|
||||
const bool bit = *pos_ & selector;
|
||||
++num_used_bits_;
|
||||
if (num_used_bits_ == 32) {
|
||||
++pos_;
|
||||
num_used_bits_ = 0;
|
||||
}
|
||||
return bit;
|
||||
}
|
||||
|
||||
// Decode the next |nbits| and return the sequence in |value|. |nbits| must be
|
||||
// > 0 and <= 32.
|
||||
void DecodeLeastSignificantBits32(int nbits, uint32_t *value) {
|
||||
DCHECK_EQ(true, nbits <= 32);
|
||||
DCHECK_EQ(true, nbits > 0);
|
||||
const int remaining = 32 - num_used_bits_;
|
||||
if (nbits <= remaining) {
|
||||
*value = (*pos_ << num_used_bits_) >> (32 - nbits);
|
||||
num_used_bits_ += nbits;
|
||||
if (num_used_bits_ == 32) {
|
||||
++pos_;
|
||||
num_used_bits_ = 0;
|
||||
}
|
||||
} else {
|
||||
const uint32_t value_l = ((*pos_) << num_used_bits_);
|
||||
num_used_bits_ = nbits - remaining;
|
||||
++pos_;
|
||||
const uint32_t value_r = (*pos_) >> (32 - num_used_bits_);
|
||||
*value = (value_l >> (32 - num_used_bits_ - remaining)) | value_r;
|
||||
}
|
||||
}
|
||||
|
||||
void EndDecoding() {}
|
||||
|
||||
private:
|
||||
void Clear();
|
||||
|
||||
std::vector<uint32_t> bits_;
|
||||
std::vector<uint32_t>::const_iterator pos_;
|
||||
uint32_t num_used_bits_;
|
||||
};
|
||||
|
||||
} // namespace draco
|
||||
|
||||
#endif // DRACO_CORE_DIRECT_BIT_DECODER_H_
|
39
core/direct_bit_encoder.cc
Normal file
39
core/direct_bit_encoder.cc
Normal file
@ -0,0 +1,39 @@
|
||||
// Copyright 2016 The Draco Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
#include "core/direct_bit_encoder.h"
|
||||
|
||||
namespace draco {
|
||||
|
||||
DirectBitEncoder::DirectBitEncoder() : local_bits_(0), num_local_bits_(0) {}
|
||||
|
||||
DirectBitEncoder::~DirectBitEncoder() { Clear(); }
|
||||
|
||||
void DirectBitEncoder::StartEncoding() { Clear(); }
|
||||
|
||||
void DirectBitEncoder::EndEncoding(EncoderBuffer *target_buffer) {
|
||||
bits_.push_back(local_bits_);
|
||||
const uint32_t size_in_byte = bits_.size() * 4;
|
||||
target_buffer->Encode(size_in_byte);
|
||||
target_buffer->Encode(bits_.data(), size_in_byte);
|
||||
Clear();
|
||||
}
|
||||
|
||||
void DirectBitEncoder::Clear() {
|
||||
bits_.clear();
|
||||
local_bits_ = 0;
|
||||
num_local_bits_ = 0;
|
||||
}
|
||||
|
||||
} // namespace draco
|
@ -13,12 +13,11 @@
|
||||
// limitations under the License.
|
||||
//
|
||||
// File provides direct encoding of bits with arthmetic encoder interface.
|
||||
#ifndef DRACO_CORE_DIRECT_BIT_CODING_H_
|
||||
#define DRACO_CORE_DIRECT_BIT_CODING_H_
|
||||
#ifndef DRACO_CORE_DIRECT_BIT_ENCODER_H_
|
||||
#define DRACO_CORE_DIRECT_BIT_ENCODER_H_
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "core/decoder_buffer.h"
|
||||
#include "core/encoder_buffer.h"
|
||||
|
||||
namespace draco {
|
||||
@ -85,58 +84,6 @@ class DirectBitEncoder {
|
||||
uint32_t num_local_bits_;
|
||||
};
|
||||
|
||||
class DirectBitDecoder {
|
||||
public:
|
||||
DirectBitDecoder();
|
||||
~DirectBitDecoder();
|
||||
|
||||
// Sets |source_buffer| as the buffer to decode bits from.
|
||||
bool StartDecoding(DecoderBuffer *source_buffer);
|
||||
|
||||
// Decode one bit. Returns true if the bit is a 1, otherwsie false.
|
||||
bool DecodeNextBit() {
|
||||
const uint32_t selector = 1 << (31 - num_used_bits_);
|
||||
const bool bit = *pos_ & selector;
|
||||
++num_used_bits_;
|
||||
if (num_used_bits_ == 32) {
|
||||
++pos_;
|
||||
num_used_bits_ = 0;
|
||||
}
|
||||
return bit;
|
||||
}
|
||||
|
||||
// Decode the next |nbits| and return the sequence in |value|. |nbits| must be
|
||||
// > 0 and <= 32.
|
||||
void DecodeLeastSignificantBits32(int nbits, uint32_t *value) {
|
||||
DCHECK_EQ(true, nbits <= 32);
|
||||
DCHECK_EQ(true, nbits > 0);
|
||||
const int remaining = 32 - num_used_bits_;
|
||||
if (nbits <= remaining) {
|
||||
*value = (*pos_ << num_used_bits_) >> (32 - nbits);
|
||||
num_used_bits_ += nbits;
|
||||
if (num_used_bits_ == 32) {
|
||||
++pos_;
|
||||
num_used_bits_ = 0;
|
||||
}
|
||||
} else {
|
||||
const uint32_t value_l = ((*pos_) << num_used_bits_);
|
||||
num_used_bits_ = nbits - remaining;
|
||||
++pos_;
|
||||
const uint32_t value_r = (*pos_) >> (32 - num_used_bits_);
|
||||
*value = (value_l >> (32 - num_used_bits_ - remaining)) | value_r;
|
||||
}
|
||||
}
|
||||
|
||||
void EndDecoding() {}
|
||||
|
||||
private:
|
||||
void Clear();
|
||||
|
||||
std::vector<uint32_t> bits_;
|
||||
std::vector<uint32_t>::const_iterator pos_;
|
||||
uint32_t num_used_bits_;
|
||||
};
|
||||
|
||||
} // namespace draco
|
||||
|
||||
#endif // DRACO_CORE_DIRECT_BIT_CODING_H_
|
||||
#endif // DRACO_CORE_DIRECT_BIT_ENCODER_H_
|
@ -18,7 +18,7 @@
|
||||
namespace draco {
|
||||
|
||||
// Draco version is comprised of <major>.<minor>.<revision>.
|
||||
static const char kDracoVersion[] = "0.9.1";
|
||||
static const char kDracoVersion[] = "0.10.0";
|
||||
|
||||
const char *Version() { return kDracoVersion; }
|
||||
|
||||
|
76
core/folded_integer_bit_decoder.h
Normal file
76
core/folded_integer_bit_decoder.h
Normal file
@ -0,0 +1,76 @@
|
||||
// Copyright 2016 The Draco Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// File provides direct encoding of bits with arithmetic encoder interface.
|
||||
#ifndef DRACO_CORE_FOLDED_INTEGER_BIT_DECODER_H_
|
||||
#define DRACO_CORE_FOLDED_INTEGER_BIT_DECODER_H_
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "core/decoder_buffer.h"
|
||||
|
||||
namespace draco {
|
||||
|
||||
// See FoldedBit32Encoder for more details.
|
||||
template <class BitDecoderT>
|
||||
class FoldedBit32Decoder {
|
||||
public:
|
||||
FoldedBit32Decoder() {}
|
||||
~FoldedBit32Decoder() {}
|
||||
|
||||
// Sets |source_buffer| as the buffer to decode bits from.
|
||||
bool StartDecoding(DecoderBuffer *source_buffer) {
|
||||
for (int i = 0; i < 32; i++) {
|
||||
if (!folded_number_decoders_[i].StartDecoding(source_buffer))
|
||||
return false;
|
||||
}
|
||||
return bit_decoder_.StartDecoding(source_buffer);
|
||||
}
|
||||
|
||||
// Decode one bit. Returns true if the bit is a 1, otherwise false.
|
||||
bool DecodeNextBit() { return bit_decoder_.DecodeNextBit(); }
|
||||
|
||||
// Decode the next |nbits| and return the sequence in |value|. |nbits| must be
|
||||
// > 0 and <= 32.
|
||||
void DecodeLeastSignificantBits32(int nbits, uint32_t *value) {
|
||||
uint32_t result = 0;
|
||||
for (int i = 0; i < nbits; ++i) {
|
||||
const bool bit = folded_number_decoders_[i].DecodeNextBit();
|
||||
result = (result << 1) + bit;
|
||||
}
|
||||
*value = result;
|
||||
}
|
||||
|
||||
void EndDecoding() {
|
||||
for (int i = 0; i < 32; i++) {
|
||||
folded_number_decoders_[i].EndDecoding();
|
||||
}
|
||||
bit_decoder_.EndDecoding();
|
||||
}
|
||||
|
||||
private:
|
||||
void Clear() {
|
||||
for (int i = 0; i < 32; i++) {
|
||||
folded_number_decoders_[i].Clear();
|
||||
}
|
||||
bit_decoder_.Clear();
|
||||
}
|
||||
|
||||
std::array<BitDecoderT, 32> folded_number_decoders_;
|
||||
BitDecoderT bit_decoder_;
|
||||
};
|
||||
|
||||
} // namespace draco
|
||||
|
||||
#endif // DRACO_CORE_FOLDED_INTEGER_BIT_DECODER_H_
|
@ -13,12 +13,11 @@
|
||||
// limitations under the License.
|
||||
//
|
||||
// File provides direct encoding of bits with arithmetic encoder interface.
|
||||
#ifndef DRACO_CORE_FOLDED_BIT32_CODING_H_
|
||||
#define DRACO_CORE_FOLDED_BIT32_CODING_H_
|
||||
#ifndef DRACO_CORE_FOLDED_INTEGER_BIT_ENCODER_H_
|
||||
#define DRACO_CORE_FOLDED_INTEGER_BIT_ENCODER_H_
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "core/decoder_buffer.h"
|
||||
#include "core/encoder_buffer.h"
|
||||
|
||||
namespace draco {
|
||||
@ -29,6 +28,7 @@ namespace draco {
|
||||
// The behavior is essentially the same as other arithmetic encoding schemes,
|
||||
// the only difference is that encoding and decoding of bits must be absolutely
|
||||
// symmetric, bits handed in by EncodeBit32 must be also decoded in this way.
|
||||
// This is the FoldedBit32Encoder, see also FoldedBit32Decoder.
|
||||
template <class BitEncoderT>
|
||||
class FoldedBit32Encoder {
|
||||
public:
|
||||
@ -77,54 +77,6 @@ class FoldedBit32Encoder {
|
||||
BitEncoderT bit_encoder_;
|
||||
};
|
||||
|
||||
template <class BitDecoderT>
|
||||
class FoldedBit32Decoder {
|
||||
public:
|
||||
FoldedBit32Decoder() {}
|
||||
~FoldedBit32Decoder() {}
|
||||
|
||||
// Sets |source_buffer| as the buffer to decode bits from.
|
||||
bool StartDecoding(DecoderBuffer *source_buffer) {
|
||||
for (int i = 0; i < 32; i++) {
|
||||
if (!folded_number_decoders_[i].StartDecoding(source_buffer))
|
||||
return false;
|
||||
}
|
||||
return bit_decoder_.StartDecoding(source_buffer);
|
||||
}
|
||||
|
||||
// Decode one bit. Returns true if the bit is a 1, otherwise false.
|
||||
bool DecodeNextBit() { return bit_decoder_.DecodeNextBit(); }
|
||||
|
||||
// Decode the next |nbits| and return the sequence in |value|. |nbits| must be
|
||||
// > 0 and <= 32.
|
||||
void DecodeLeastSignificantBits32(int nbits, uint32_t *value) {
|
||||
uint32_t result = 0;
|
||||
for (int i = 0; i < nbits; ++i) {
|
||||
const bool bit = folded_number_decoders_[i].DecodeNextBit();
|
||||
result = (result << 1) + bit;
|
||||
}
|
||||
*value = result;
|
||||
}
|
||||
|
||||
void EndDecoding() {
|
||||
for (int i = 0; i < 32; i++) {
|
||||
folded_number_decoders_[i].EndDecoding();
|
||||
}
|
||||
bit_decoder_.EndDecoding();
|
||||
}
|
||||
|
||||
private:
|
||||
void Clear() {
|
||||
for (int i = 0; i < 32; i++) {
|
||||
folded_number_decoders_[i].Clear();
|
||||
}
|
||||
bit_decoder_.Clear();
|
||||
}
|
||||
|
||||
std::array<BitDecoderT, 32> folded_number_decoders_;
|
||||
BitDecoderT bit_decoder_;
|
||||
};
|
||||
|
||||
} // namespace draco
|
||||
|
||||
#endif // DRACO_CORE_FOLDED_BIT32_CODING_H_
|
||||
#endif // DRACO_CORE_FOLDED_INTEGER_BIT_ENCODER_H_
|
@ -32,17 +32,21 @@ namespace draco {
|
||||
|
||||
#define CHECK(x) (assert(x));
|
||||
#define CHECK_EQ(a, b) assert((a) == (b));
|
||||
#define CHECK_NE(a, b) assert((a) != (b));
|
||||
#define CHECK_GE(a, b) assert((a) >= (b));
|
||||
#define CHECK_GT(a, b) assert((a) > (b));
|
||||
#define CHECK_NE(a, b) assert((a) != (b));
|
||||
#define CHECK_LE(a, b) assert((a) <= (b));
|
||||
#define CHECK_LT(a, b) assert((a) < (b));
|
||||
#define CHECK_NOTNULL(x) assert((x) != NULL);
|
||||
|
||||
#define DCHECK(x) (assert(x));
|
||||
#define DCHECK_EQ(a, b) assert((a) == (b));
|
||||
#define DCHECK_NE(a, b) assert((a) != (b));
|
||||
#define DCHECK_GE(a, b) assert((a) >= (b));
|
||||
#define DCHECK_GT(a, b) assert((a) > (b));
|
||||
#define DCHECK_LE(a, b) assert((a) <= (b));
|
||||
#define DCHECK_LT(a, b) assert((a) < (b));
|
||||
#define DCHECK_NOTNULL(x) assert((x) != NULL);
|
||||
|
||||
#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
|
||||
TypeName(const TypeName &) = delete; \
|
||||
|
@ -23,7 +23,7 @@ std::string ValToString(int val) {
|
||||
sprintf(temp, "%d", val);
|
||||
return temp;
|
||||
}
|
||||
} // namespace anonymous
|
||||
} // namespace
|
||||
|
||||
namespace draco {
|
||||
|
||||
|
66
core/rans_bit_decoder.cc
Normal file
66
core/rans_bit_decoder.cc
Normal file
@ -0,0 +1,66 @@
|
||||
// Copyright 2016 The Draco Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
#include "core/rans_bit_decoder.h"
|
||||
|
||||
#include "core/bit_utils.h"
|
||||
|
||||
namespace draco {
|
||||
|
||||
RAnsBitDecoder::RAnsBitDecoder() : prob_zero_(0) {}
|
||||
|
||||
RAnsBitDecoder::~RAnsBitDecoder() { Clear(); }
|
||||
|
||||
bool RAnsBitDecoder::StartDecoding(DecoderBuffer *source_buffer) {
|
||||
Clear();
|
||||
|
||||
if (!source_buffer->Decode(&prob_zero_))
|
||||
return false;
|
||||
|
||||
uint32_t size_in_bytes;
|
||||
if (!source_buffer->Decode(&size_in_bytes))
|
||||
return false;
|
||||
|
||||
if (size_in_bytes > source_buffer->remaining_size())
|
||||
return false;
|
||||
|
||||
if (ans_read_init(&ans_decoder_,
|
||||
reinterpret_cast<uint8_t *>(
|
||||
const_cast<char *>(source_buffer->data_head())),
|
||||
size_in_bytes) != 0)
|
||||
return false;
|
||||
source_buffer->Advance(size_in_bytes);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool RAnsBitDecoder::DecodeNextBit() {
|
||||
const uint8_t bit = rabs_read(&ans_decoder_, prob_zero_);
|
||||
return bit > 0;
|
||||
}
|
||||
|
||||
void RAnsBitDecoder::DecodeLeastSignificantBits32(int nbits, uint32_t *value) {
|
||||
DCHECK_EQ(true, nbits <= 32);
|
||||
DCHECK_EQ(true, nbits > 0);
|
||||
|
||||
uint32_t result = 0;
|
||||
while (nbits) {
|
||||
result = (result << 1) + DecodeNextBit();
|
||||
--nbits;
|
||||
}
|
||||
*value = result;
|
||||
}
|
||||
|
||||
void RAnsBitDecoder::Clear() { ans_read_end(&ans_decoder_); }
|
||||
|
||||
} // namespace draco
|
54
core/rans_bit_decoder.h
Normal file
54
core/rans_bit_decoder.h
Normal file
@ -0,0 +1,54 @@
|
||||
// Copyright 2016 The Draco Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// File provides basic classes and functions for rANS coding.
|
||||
#ifndef DRACO_CORE_RANS_BIT_DECODER_H_
|
||||
#define DRACO_CORE_RANS_BIT_DECODER_H_
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "core/ans.h"
|
||||
#include "core/decoder_buffer.h"
|
||||
|
||||
namespace draco {
|
||||
|
||||
// Class for decoding a sequence of bits that were encoded with RAnsBitEncoder.
|
||||
class RAnsBitDecoder {
|
||||
public:
|
||||
RAnsBitDecoder();
|
||||
~RAnsBitDecoder();
|
||||
|
||||
// Sets |source_buffer| as the buffer to decode bits from.
|
||||
// Returns false when the data is invalid.
|
||||
bool StartDecoding(DecoderBuffer *source_buffer);
|
||||
|
||||
// Decode one bit. Returns true if the bit is a 1, otherwsie false.
|
||||
bool DecodeNextBit();
|
||||
|
||||
// Decode the next |nbits| and return the sequence in |value|. |nbits| must be
|
||||
// > 0 and <= 32.
|
||||
void DecodeLeastSignificantBits32(int nbits, uint32_t *value);
|
||||
|
||||
void EndDecoding() {}
|
||||
|
||||
private:
|
||||
void Clear();
|
||||
|
||||
AnsDecoder ans_decoder_;
|
||||
uint8_t prob_zero_;
|
||||
};
|
||||
|
||||
} // namespace draco
|
||||
|
||||
#endif // DRACO_CORE_RANS_BIT_DECODER_H_
|
@ -12,8 +12,9 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
#include "core/rans_coding.h"
|
||||
#include "core/rans_bit_encoder.h"
|
||||
|
||||
#include "core/ans.h"
|
||||
#include "core/bit_utils.h"
|
||||
|
||||
namespace draco {
|
||||
@ -118,49 +119,4 @@ void RAnsBitEncoder::Clear() {
|
||||
num_local_bits_ = 0;
|
||||
}
|
||||
|
||||
RAnsBitDecoder::RAnsBitDecoder() : prob_zero_(0) {}
|
||||
|
||||
RAnsBitDecoder::~RAnsBitDecoder() { Clear(); }
|
||||
|
||||
bool RAnsBitDecoder::StartDecoding(DecoderBuffer *source_buffer) {
|
||||
Clear();
|
||||
|
||||
if (!source_buffer->Decode(&prob_zero_))
|
||||
return false;
|
||||
|
||||
uint32_t size_in_bytes;
|
||||
if (!source_buffer->Decode(&size_in_bytes))
|
||||
return false;
|
||||
|
||||
if (size_in_bytes > source_buffer->remaining_size())
|
||||
return false;
|
||||
|
||||
if (ans_read_init(&ans_decoder_,
|
||||
reinterpret_cast<uint8_t *>(
|
||||
const_cast<char *>(source_buffer->data_head())),
|
||||
size_in_bytes) != 0)
|
||||
return false;
|
||||
source_buffer->Advance(size_in_bytes);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool RAnsBitDecoder::DecodeNextBit() {
|
||||
const uint8_t bit = rabs_read(&ans_decoder_, prob_zero_);
|
||||
return bit > 0;
|
||||
}
|
||||
|
||||
void RAnsBitDecoder::DecodeLeastSignificantBits32(int nbits, uint32_t *value) {
|
||||
DCHECK_EQ(true, nbits <= 32);
|
||||
DCHECK_EQ(true, nbits > 0);
|
||||
|
||||
uint32_t result = 0;
|
||||
while (nbits) {
|
||||
result = (result << 1) + DecodeNextBit();
|
||||
--nbits;
|
||||
}
|
||||
*value = result;
|
||||
}
|
||||
|
||||
void RAnsBitDecoder::Clear() { ans_read_end(&ans_decoder_); }
|
||||
|
||||
} // namespace draco
|
@ -13,13 +13,11 @@
|
||||
// limitations under the License.
|
||||
//
|
||||
// File provides basic classes and functions for rANS coding.
|
||||
#ifndef DRACO_CORE_RANS_CODING_H_
|
||||
#define DRACO_CORE_RANS_CODING_H_
|
||||
#ifndef DRACO_CORE_RANS_BIT_ENCODER_H_
|
||||
#define DRACO_CORE_RANS_BIT_ENCODER_H_
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "core/ans.h"
|
||||
#include "core/decoder_buffer.h"
|
||||
#include "core/encoder_buffer.h"
|
||||
|
||||
namespace draco {
|
||||
@ -54,32 +52,6 @@ class RAnsBitEncoder {
|
||||
uint32_t num_local_bits_;
|
||||
};
|
||||
|
||||
// Class for decoding a sequence of bits that were encoded with RAnsBitEncoder.
|
||||
class RAnsBitDecoder {
|
||||
public:
|
||||
RAnsBitDecoder();
|
||||
~RAnsBitDecoder();
|
||||
|
||||
// Sets |source_buffer| as the buffer to decode bits from.
|
||||
// Returns false when the data is invalid.
|
||||
bool StartDecoding(DecoderBuffer *source_buffer);
|
||||
|
||||
// Decode one bit. Returns true if the bit is a 1, otherwsie false.
|
||||
bool DecodeNextBit();
|
||||
|
||||
// Decode the next |nbits| and return the sequence in |value|. |nbits| must be
|
||||
// > 0 and <= 32.
|
||||
void DecodeLeastSignificantBits32(int nbits, uint32_t *value);
|
||||
|
||||
void EndDecoding() {}
|
||||
|
||||
private:
|
||||
void Clear();
|
||||
|
||||
AnsDecoder ans_decoder_;
|
||||
uint8_t prob_zero_;
|
||||
};
|
||||
|
||||
} // namespace draco
|
||||
|
||||
#endif // DRACO_CORE_RANS_CODING_H_
|
||||
#endif // DRACO_CORE_RANS_BIT_ENCODER_H_
|
@ -1,6 +1,8 @@
|
||||
#include "core/rans_coding.h"
|
||||
#include "core/adaptive_rans_coding.h"
|
||||
#include "core/adaptive_rans_bit_decoder.h"
|
||||
#include "core/adaptive_rans_bit_encoder.h"
|
||||
#include "core/draco_test_base.h"
|
||||
#include "core/rans_bit_decoder.h"
|
||||
#include "core/rans_bit_encoder.h"
|
||||
|
||||
// Just including rans_coding.h and adaptive_rans_coding.h gets an asan error
|
||||
// when compiling (blaze test :rans_coding_test --config=asan)
|
||||
|
@ -37,6 +37,17 @@ constexpr int ComputeRAnsPrecisionFromMaxSymbolBitLength(int max_bit_length) {
|
||||
: ComputeRAnsUnclampedPrecision(max_bit_length);
|
||||
}
|
||||
|
||||
// Compute approximate frequency table size needed for storing the provided
|
||||
// symbols.
|
||||
static int64_t ApproximateRAnsFrequencyTableBits(int32_t max_value,
|
||||
int num_unique_symbols) {
|
||||
// Approximate number of bits for storing zero frequency entries using the
|
||||
// run length encoding (with max length of 64).
|
||||
const int64_t table_zero_frequency_bits =
|
||||
8 * (num_unique_symbols + (max_value - num_unique_symbols) / 64);
|
||||
return 8 * num_unique_symbols + table_zero_frequency_bits;
|
||||
}
|
||||
|
||||
} // namespace draco
|
||||
|
||||
#endif // DRACO_CORE_RANS_SYMBOL_CODING_H_
|
||||
|
@ -61,22 +61,38 @@ bool RAnsSymbolDecoder<max_symbol_bit_length_t>::Create(DecoderBuffer *buffer) {
|
||||
return true;
|
||||
// Decode the table.
|
||||
for (uint32_t i = 0; i < num_symbols_; ++i) {
|
||||
uint32_t prob = 0;
|
||||
uint8_t byte_prob = 0;
|
||||
uint8_t prob_data = 0;
|
||||
// Decode the first byte and extract the number of extra bytes we need to
|
||||
// get.
|
||||
if (!buffer->Decode(&byte_prob))
|
||||
// get, or the offset to the next symbol with non-zero probability.
|
||||
if (!buffer->Decode(&prob_data))
|
||||
return false;
|
||||
const int extra_bytes = byte_prob & 3;
|
||||
prob = byte_prob >> 2;
|
||||
for (int b = 0; b < extra_bytes; ++b) {
|
||||
uint8_t eb;
|
||||
if (!buffer->Decode(&eb))
|
||||
// Token is stored in the first two bits of the first byte. Values 0-2 are
|
||||
// used to indicate the number of extra bytes, and value 3 is a special
|
||||
// symbol used to denote run-length coding of zero probability entries.
|
||||
// See rans_symbol_encoder.h for more details.
|
||||
const int token = prob_data & 3;
|
||||
if (token == 3) {
|
||||
const uint32_t offset = prob_data >> 2;
|
||||
if (i + offset >= num_symbols_)
|
||||
return false;
|
||||
// Shift 8 bits for each extra byte and subtract 2 for the two first bits.
|
||||
prob |= static_cast<uint32_t>(eb) << (8 * (b + 1) - 2);
|
||||
// Set zero probability for all symbols in the specified range.
|
||||
for (uint32_t j = 0; j < offset + 1; ++j) {
|
||||
probability_table_[i + j] = 0;
|
||||
}
|
||||
i += offset;
|
||||
} else {
|
||||
const int extra_bytes = token;
|
||||
uint32_t prob = prob_data >> 2;
|
||||
for (int b = 0; b < extra_bytes; ++b) {
|
||||
uint8_t eb;
|
||||
if (!buffer->Decode(&eb))
|
||||
return false;
|
||||
// Shift 8 bits for each extra byte and subtract 2 for the two first
|
||||
// bits.
|
||||
prob |= static_cast<uint32_t>(eb) << (8 * (b + 1) - 2);
|
||||
}
|
||||
probability_table_[i] = prob;
|
||||
}
|
||||
probability_table_[i] = prob;
|
||||
}
|
||||
if (!ans_.rans_build_look_up_table(&probability_table_[0], num_symbols_))
|
||||
return false;
|
||||
|
@ -64,7 +64,7 @@ class RAnsSymbolEncoder {
|
||||
};
|
||||
|
||||
// Encodes the probability table into the output buffer.
|
||||
void EncodeTable(EncoderBuffer *buffer);
|
||||
bool EncodeTable(EncoderBuffer *buffer);
|
||||
|
||||
static constexpr int max_symbols_ = 1 << max_symbol_bit_length_t;
|
||||
static constexpr int rans_precision_bits_ =
|
||||
@ -188,12 +188,13 @@ bool RAnsSymbolEncoder<max_symbol_bit_length_t>::Create(
|
||||
num_bits += static_cast<double>(frequencies[i]) * log2(norm_prob);
|
||||
}
|
||||
num_expected_bits_ = static_cast<uint64_t>(ceil(-num_bits));
|
||||
EncodeTable(buffer);
|
||||
if (!EncodeTable(buffer))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
template <int max_symbol_bit_length_t>
|
||||
void RAnsSymbolEncoder<max_symbol_bit_length_t>::EncodeTable(
|
||||
bool RAnsSymbolEncoder<max_symbol_bit_length_t>::EncodeTable(
|
||||
EncoderBuffer *buffer) {
|
||||
buffer->Encode(num_symbols_);
|
||||
// Use varint encoding for the probabilities (first two bits represent the
|
||||
@ -206,17 +207,38 @@ void RAnsSymbolEncoder<max_symbol_bit_length_t>::EncodeTable(
|
||||
if (prob >= (1 << 14)) {
|
||||
num_extra_bytes++;
|
||||
if (prob >= (1 << 22)) {
|
||||
num_extra_bytes++;
|
||||
// The maximum number of precision bits is 20 so we should not really
|
||||
// get to this point.
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Encode the first byte (including the number of extra bytes).
|
||||
buffer->Encode(static_cast<uint8_t>((prob << 2) | (num_extra_bytes & 3)));
|
||||
// Encode the extra bytes.
|
||||
for (int b = 0; b < num_extra_bytes; ++b) {
|
||||
buffer->Encode(static_cast<uint8_t>(prob >> (8 * (b + 1) - 2)));
|
||||
if (prob == 0) {
|
||||
// When the probability of the symbol is 0, set the first two bits to 1
|
||||
// (unique identifier) and use the remaining 6 bits to store the offset
|
||||
// to the next symbol with non-zero probability.
|
||||
uint32_t offset = 0;
|
||||
for (; offset < (1 << 6) - 1; ++offset) {
|
||||
// Note: we don't have to check whether the next symbol id is larger
|
||||
// than num_symbols_ because we know that the last symbol always has
|
||||
// non-zero probability.
|
||||
const uint32_t next_prob = probability_table_[i + offset + 1].prob;
|
||||
if (next_prob > 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
buffer->Encode(static_cast<uint8_t>((offset << 2) | 3));
|
||||
i += offset;
|
||||
} else {
|
||||
// Encode the first byte (including the number of extra bytes).
|
||||
buffer->Encode(static_cast<uint8_t>((prob << 2) | (num_extra_bytes & 3)));
|
||||
// Encode the extra bytes.
|
||||
for (int b = 0; b < num_extra_bytes; ++b) {
|
||||
buffer->Encode(static_cast<uint8_t>(prob >> (8 * (b + 1) - 2)));
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
template <int max_symbol_bit_length_t>
|
||||
|
33
core/shannon_entropy.cc
Normal file
33
core/shannon_entropy.cc
Normal file
@ -0,0 +1,33 @@
|
||||
#include "core/shannon_entropy.h"
|
||||
|
||||
#include <cmath>
|
||||
#include <vector>
|
||||
|
||||
namespace draco {
|
||||
|
||||
int64_t ComputeShannonEntropy(const uint32_t *symbols, int num_symbols,
|
||||
int max_value, int *out_num_unique_symbols) {
|
||||
// First find frequency of all unique symbols in the input array.
|
||||
int num_unique_symbols = 0;
|
||||
std::vector<int> symbol_frequencies(max_value + 1, 0);
|
||||
for (int i = 0; i < num_symbols; ++i) {
|
||||
++symbol_frequencies[symbols[i]];
|
||||
}
|
||||
double total_bits = 0;
|
||||
double num_symbols_d = num_symbols;
|
||||
for (int i = 0; i < max_value + 1; ++i) {
|
||||
if (symbol_frequencies[i] > 0) {
|
||||
++num_unique_symbols;
|
||||
// Compute Shannon entropy for the symbol.
|
||||
total_bits +=
|
||||
symbol_frequencies[i] *
|
||||
std::log2(static_cast<double>(symbol_frequencies[i]) / num_symbols_d);
|
||||
}
|
||||
}
|
||||
if (out_num_unique_symbols)
|
||||
*out_num_unique_symbols = num_unique_symbols;
|
||||
// Entropy is always negative.
|
||||
return -total_bits;
|
||||
}
|
||||
|
||||
} // namespace draco
|
38
core/shannon_entropy.h
Normal file
38
core/shannon_entropy.h
Normal file
@ -0,0 +1,38 @@
|
||||
// Copyright 2016 The Draco Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
#ifndef DRACO_CORE_SHANNON_ENTROPY_H_
|
||||
#define DRACO_CORE_SHANNON_ENTROPY_H_
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
namespace draco {
|
||||
|
||||
// Computes an approximate Shannon entropy of symbols stored in the provided
|
||||
// input array |symbols|. The entropy corresponds to the number of bits that is
|
||||
// required to represent/store all the symbols using an optimal entropy coding
|
||||
// algorithm. See for example "A mathematical theory of communication" by
|
||||
// Shannon'48 (http://ieeexplore.ieee.org/document/6773024/).
|
||||
//
|
||||
// |max_value| is a required input that define the maximum value in the input
|
||||
// |symbols| array.
|
||||
//
|
||||
// |out_num_unique_symbols| is an optional output argument that stores the
|
||||
// number of unique symbols contained within the |symbols| array.
|
||||
int64_t ComputeShannonEntropy(const uint32_t *symbols, int num_symbols,
|
||||
int max_value, int *out_num_unique_symbols);
|
||||
|
||||
} // namespace draco
|
||||
|
||||
#endif // DRACO_CORE_SHANNON_ENTROPY_H_
|
47
core/symbol_bit_decoder.cc
Normal file
47
core/symbol_bit_decoder.cc
Normal file
@ -0,0 +1,47 @@
|
||||
#include "core/symbol_bit_decoder.h"
|
||||
|
||||
#include "core/symbol_decoding.h"
|
||||
|
||||
namespace draco {
|
||||
|
||||
bool SymbolBitDecoder::StartDecoding(DecoderBuffer *source_buffer) {
|
||||
uint32_t size;
|
||||
if (!source_buffer->Decode(&size))
|
||||
return false;
|
||||
|
||||
symbols_.resize(size);
|
||||
if (!DecodeSymbols(size, 1, source_buffer, symbols_.data()))
|
||||
return false;
|
||||
std::reverse(symbols_.begin(), symbols_.end());
|
||||
return true;
|
||||
}
|
||||
|
||||
bool SymbolBitDecoder::DecodeNextBit() {
|
||||
uint32_t symbol;
|
||||
DecodeLeastSignificantBits32(1, &symbol);
|
||||
DCHECK(symbol == 0 || symbol == 1);
|
||||
return symbol == 1;
|
||||
}
|
||||
|
||||
void SymbolBitDecoder::DecodeLeastSignificantBits32(int nbits,
|
||||
uint32_t *value) {
|
||||
DCHECK_LE(1, nbits);
|
||||
DCHECK_LE(nbits, 32);
|
||||
DCHECK_NE(value, nullptr);
|
||||
// Testing: check to make sure there is something to decode.
|
||||
DCHECK_GT(symbols_.size(), 0);
|
||||
|
||||
(*value) = symbols_.back();
|
||||
symbols_.pop_back();
|
||||
|
||||
const int discarded_bits = 32 - nbits;
|
||||
(*value) <<= discarded_bits;
|
||||
(*value) >>= discarded_bits;
|
||||
}
|
||||
|
||||
void SymbolBitDecoder::Clear() {
|
||||
symbols_.clear();
|
||||
symbols_.shrink_to_fit();
|
||||
}
|
||||
|
||||
} // namespace draco
|
36
core/symbol_bit_decoder.h
Normal file
36
core/symbol_bit_decoder.h
Normal file
@ -0,0 +1,36 @@
|
||||
#ifndef DRACO_CORE_SYMBOL_BIT_DECODER_H_
|
||||
#define DRACO_CORE_SYMBOL_BIT_DECODER_H_
|
||||
|
||||
#include <algorithm>
|
||||
#include <vector>
|
||||
|
||||
#include "core/decoder_buffer.h"
|
||||
|
||||
namespace draco {
|
||||
|
||||
// Class for decoding bits using the symbol entropy encoding. Wraps
|
||||
// |DecodeSymbols|. Note that this uses a symbol-based encoding scheme for
|
||||
// encoding bits.
|
||||
class SymbolBitDecoder {
|
||||
public:
|
||||
// Sets |source_buffer| as the buffer to decode bits from.
|
||||
bool StartDecoding(DecoderBuffer *source_buffer);
|
||||
|
||||
// Decode one bit. Returns true if the bit is a 1, otherwsie false.
|
||||
bool DecodeNextBit();
|
||||
|
||||
// Decode the next |nbits| and return the sequence in |value|. |nbits| must be
|
||||
// > 0 and <= 32.
|
||||
void DecodeLeastSignificantBits32(int nbits, uint32_t *value);
|
||||
|
||||
void EndDecoding() { Clear(); }
|
||||
|
||||
private:
|
||||
void Clear();
|
||||
|
||||
std::vector<uint32_t> symbols_;
|
||||
};
|
||||
|
||||
} // namespace draco
|
||||
|
||||
#endif // DRACO_CORE_SYMBOL_BIT_DECODER_H_
|
29
core/symbol_bit_encoder.cc
Normal file
29
core/symbol_bit_encoder.cc
Normal file
@ -0,0 +1,29 @@
|
||||
#include "core/symbol_bit_encoder.h"
|
||||
|
||||
#include "core/symbol_encoding.h"
|
||||
|
||||
namespace draco {
|
||||
|
||||
void SymbolBitEncoder::EncodeLeastSignificantBits32(int nbits, uint32_t value) {
|
||||
DCHECK_LE(1, nbits);
|
||||
DCHECK_LE(nbits, 32);
|
||||
|
||||
const int discarded_bits = 32 - nbits;
|
||||
value <<= discarded_bits;
|
||||
value >>= discarded_bits;
|
||||
|
||||
symbols_.push_back(value);
|
||||
}
|
||||
|
||||
void SymbolBitEncoder::EndEncoding(EncoderBuffer *target_buffer) {
|
||||
target_buffer->Encode(static_cast<uint32_t>(symbols_.size()));
|
||||
EncodeSymbols(symbols_.data(), symbols_.size(), 1, target_buffer);
|
||||
Clear();
|
||||
}
|
||||
|
||||
void SymbolBitEncoder::Clear() {
|
||||
symbols_.clear();
|
||||
symbols_.shrink_to_fit();
|
||||
}
|
||||
|
||||
} // namespace draco
|
36
core/symbol_bit_encoder.h
Normal file
36
core/symbol_bit_encoder.h
Normal file
@ -0,0 +1,36 @@
|
||||
#ifndef DRACO_CORE_SYMBOL_BIT_ENCODER_H_
|
||||
#define DRACO_CORE_SYMBOL_BIT_ENCODER_H_
|
||||
|
||||
#include <algorithm>
|
||||
#include <vector>
|
||||
|
||||
#include "core/encoder_buffer.h"
|
||||
|
||||
namespace draco {
|
||||
|
||||
// Class for encoding bits using the symbol entropy encoding. Wraps
|
||||
// |EncodeSymbols|. Note that this uses a symbol-based encoding scheme for
|
||||
// encoding bits.
|
||||
class SymbolBitEncoder {
|
||||
public:
|
||||
// Must be called before any Encode* function is called.
|
||||
void StartEncoding() { Clear(); }
|
||||
|
||||
// Encode one bit. If |bit| is true encode a 1, otherwise encode a 0.
|
||||
void EncodeBit(bool bit) { EncodeLeastSignificantBits32(1, bit ? 1 : 0); }
|
||||
|
||||
// Encode |nibts| LSBs of |value| as a symbol. |nbits| must be > 0 and <= 32.
|
||||
void EncodeLeastSignificantBits32(int nbits, uint32_t value);
|
||||
|
||||
// Ends the bit encoding and stores the result into the target_buffer.
|
||||
void EndEncoding(EncoderBuffer *target_buffer);
|
||||
|
||||
private:
|
||||
void Clear();
|
||||
|
||||
std::vector<uint32_t> symbols_;
|
||||
};
|
||||
|
||||
} // namespace draco
|
||||
|
||||
#endif // DRACO_CORE_SYMBOL_BIT_ENCODER_H_
|
@ -23,6 +23,14 @@ namespace draco {
|
||||
class SymbolCodingTest : public ::testing::Test {
|
||||
protected:
|
||||
SymbolCodingTest() {}
|
||||
|
||||
template <class SignedIntTypeT>
|
||||
void TestConvertToSymbolAndBack(SignedIntTypeT x) {
|
||||
typedef typename std::make_unsigned<SignedIntTypeT>::type Symbol;
|
||||
Symbol symbol = ConvertSignedIntToSymbol(x);
|
||||
SignedIntTypeT y = ConvertSymbolToSignedInt(symbol);
|
||||
ASSERT_EQ(x, y);
|
||||
}
|
||||
};
|
||||
|
||||
TEST_F(SymbolCodingTest, TestLargeNumbers) {
|
||||
@ -133,4 +141,13 @@ TEST_F(SymbolCodingTest, TestLargeNumberCondition) {
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(SymbolCodingTest, TestConversionFullRange) {
|
||||
TestConvertToSymbolAndBack(static_cast<int8_t>(-128));
|
||||
TestConvertToSymbolAndBack(static_cast<int8_t>(-127));
|
||||
TestConvertToSymbolAndBack(static_cast<int8_t>(-1));
|
||||
TestConvertToSymbolAndBack(static_cast<int8_t>(0));
|
||||
TestConvertToSymbolAndBack(static_cast<int8_t>(1));
|
||||
TestConvertToSymbolAndBack(static_cast<int8_t>(127));
|
||||
}
|
||||
|
||||
} // namespace draco
|
||||
|
@ -29,15 +29,6 @@ void ConvertSymbolsToSignedInts(const uint32_t *in, int in_values,
|
||||
}
|
||||
}
|
||||
|
||||
int32_t ConvertSymbolToSignedInt(uint32_t val) {
|
||||
const bool is_negative = (val & 1);
|
||||
val >>= 1;
|
||||
int32_t ret = static_cast<int32_t>(val);
|
||||
if (is_negative)
|
||||
ret = -ret - 1;
|
||||
return ret;
|
||||
}
|
||||
|
||||
template <template <int> class SymbolDecoderT>
|
||||
bool DecodeTaggedSymbols(int num_values, int num_components,
|
||||
DecoderBuffer *src_buffer, uint32_t *out_values);
|
||||
|
@ -26,7 +26,20 @@ void ConvertSymbolsToSignedInts(const uint32_t *in, int in_values,
|
||||
|
||||
// Converts a single unsigned integer symbol encoded with an entropy encoder
|
||||
// back to a signed value.
|
||||
int32_t ConvertSymbolToSignedInt(uint32_t val);
|
||||
template <class IntTypeT>
|
||||
typename std::make_signed<IntTypeT>::type ConvertSymbolToSignedInt(
|
||||
IntTypeT val) {
|
||||
static_assert(std::is_integral<IntTypeT>::value, "IntTypeT is not integral.");
|
||||
typedef typename std::make_signed<IntTypeT>::type SignedType;
|
||||
const bool is_positive = !static_cast<bool>(val & 1);
|
||||
val >>= 1;
|
||||
if (is_positive) {
|
||||
return static_cast<SignedType>(val);
|
||||
}
|
||||
SignedType ret = static_cast<SignedType>(val);
|
||||
ret = -ret - 1;
|
||||
return ret;
|
||||
}
|
||||
|
||||
// Decodes an array of symbols that was previously encoded with an entropy code.
|
||||
// Returns false on error.
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
#include "core/bit_utils.h"
|
||||
#include "core/rans_symbol_encoder.h"
|
||||
#include "core/shannon_entropy.h"
|
||||
|
||||
namespace draco {
|
||||
|
||||
@ -37,22 +38,12 @@ void ConvertSignedIntsToSymbols(const int32_t *in, int in_values,
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t ConvertSignedIntToSymbol(int32_t val) {
|
||||
const bool is_negative = (val < 0);
|
||||
if (is_negative)
|
||||
val = -val - 1; // Map -1 to 0, -2 to -1, etc..
|
||||
val <<= 1;
|
||||
if (is_negative)
|
||||
val |= 1;
|
||||
return static_cast<uint32_t>(val);
|
||||
}
|
||||
|
||||
// Computes bit lengths of the input values. If num_components > 1, the values
|
||||
// are processed in "num_components" sized chunks and the bit length is always
|
||||
// computed for the largest value from the chunk.
|
||||
static void ComputeBitLengths(const uint32_t *symbols, int num_values,
|
||||
int num_components,
|
||||
std::vector<int> *out_bit_lengths,
|
||||
std::vector<uint32_t> *out_bit_lengths,
|
||||
uint32_t *out_max_value) {
|
||||
out_bit_lengths->reserve(num_values);
|
||||
*out_max_value = 0;
|
||||
@ -75,10 +66,37 @@ static void ComputeBitLengths(const uint32_t *symbols, int num_values,
|
||||
}
|
||||
}
|
||||
|
||||
static int64_t ApproximateTaggedSchemeBits(
|
||||
const std::vector<uint32_t> bit_lengths, int num_components) {
|
||||
// Compute the total bit length used by all values (the length of data encode
|
||||
// after tags).
|
||||
uint64_t total_bit_length = 0;
|
||||
for (size_t i = 0; i < bit_lengths.size(); ++i) {
|
||||
total_bit_length += bit_lengths[i];
|
||||
}
|
||||
// Compute the number of entropy bits for tags.
|
||||
int num_unique_symbols;
|
||||
const int64_t tag_bits = ComputeShannonEntropy(
|
||||
bit_lengths.data(), bit_lengths.size(), 32, &num_unique_symbols);
|
||||
const int64_t tag_table_bits =
|
||||
ApproximateRAnsFrequencyTableBits(num_unique_symbols, num_unique_symbols);
|
||||
return tag_bits + tag_table_bits + total_bit_length * num_components;
|
||||
}
|
||||
|
||||
static int64_t ApproximateRawSchemeBits(const uint32_t *symbols,
|
||||
int num_symbols, uint32_t max_value) {
|
||||
int num_unique_symbols;
|
||||
const int64_t data_bits = ComputeShannonEntropy(
|
||||
symbols, num_symbols, max_value, &num_unique_symbols);
|
||||
const int64_t table_bits =
|
||||
ApproximateRAnsFrequencyTableBits(max_value, num_unique_symbols);
|
||||
return table_bits + data_bits;
|
||||
}
|
||||
|
||||
template <template <int> class SymbolEncoderT>
|
||||
bool EncodeTaggedSymbols(const uint32_t *symbols, int num_values,
|
||||
int num_components,
|
||||
const std::vector<int> &bit_lengths,
|
||||
const std::vector<uint32_t> &bit_lengths,
|
||||
EncoderBuffer *target_buffer);
|
||||
|
||||
template <template <int> class SymbolEncoderT>
|
||||
@ -93,43 +111,20 @@ bool EncodeSymbols(const uint32_t *symbols, int num_values, int num_components,
|
||||
return true;
|
||||
if (num_components <= 0)
|
||||
num_components = 1;
|
||||
std::vector<int> bit_lengths;
|
||||
std::vector<uint32_t> bit_lengths;
|
||||
uint32_t max_value;
|
||||
ComputeBitLengths(symbols, num_values, num_components, &bit_lengths,
|
||||
&max_value);
|
||||
|
||||
// Compute the total bit length used by all values. This will be used for
|
||||
// computing a heuristic that chooses the optimal entropy encoding scheme.
|
||||
uint64_t total_bit_length = 0;
|
||||
for (size_t i = 0; i < bit_lengths.size(); ++i) {
|
||||
total_bit_length += bit_lengths[i];
|
||||
}
|
||||
|
||||
const int64_t num_component_values = num_values / num_components;
|
||||
|
||||
// The average number of bits necessary for encoding a single entry value.
|
||||
const int64_t average_bit_length =
|
||||
static_cast<int64_t>(ceil(static_cast<double>(total_bit_length) /
|
||||
static_cast<double>(num_component_values)));
|
||||
// The estimated average number of bits necessary for encoding a single
|
||||
// bit-length tag.
|
||||
int64_t average_bits_per_tag = static_cast<int64_t>(
|
||||
ceil(static_cast<float>(bits::MostSignificantBit(average_bit_length)) /
|
||||
static_cast<float>(num_components)));
|
||||
if (average_bits_per_tag <= 0)
|
||||
average_bits_per_tag = 1;
|
||||
|
||||
// Estimate the number of bits needed for encoding the values using the tagged
|
||||
// scheme. 32 * 8 is the overhead for encoding the entropy encoding table.
|
||||
// Approximate number of bits needed for storing the symbols using the tagged
|
||||
// scheme.
|
||||
const int64_t tagged_scheme_total_bits =
|
||||
num_component_values *
|
||||
(num_components * average_bit_length + average_bits_per_tag) +
|
||||
32 * 8;
|
||||
ApproximateTaggedSchemeBits(bit_lengths, num_components);
|
||||
|
||||
// Estimate the number of bits needed by the "raw" scheme. In this case,
|
||||
// max_value * 8 is the overhead of the entropy table.
|
||||
// Approximate number of bits needed for storing the symbols using the raw
|
||||
// scheme.
|
||||
const int64_t raw_scheme_total_bits =
|
||||
num_values * average_bit_length + max_value * 8;
|
||||
ApproximateRawSchemeBits(symbols, num_values, max_value);
|
||||
|
||||
// The maximum bit length of a single entry value that we can encode using
|
||||
// the raw scheme.
|
||||
@ -151,7 +146,7 @@ bool EncodeSymbols(const uint32_t *symbols, int num_values, int num_components,
|
||||
template <template <int> class SymbolEncoderT>
|
||||
bool EncodeTaggedSymbols(const uint32_t *symbols, int num_values,
|
||||
int num_components,
|
||||
const std::vector<int> &bit_lengths,
|
||||
const std::vector<uint32_t> &bit_lengths,
|
||||
EncoderBuffer *target_buffer) {
|
||||
// Create entries for entropy coding. Each entry corresponds to a different
|
||||
// number of bits that are necessary to encode a given value. Every value
|
||||
|
@ -15,6 +15,8 @@
|
||||
#ifndef DRACO_CORE_SYMBOL_ENCODING_H_
|
||||
#define DRACO_CORE_SYMBOL_ENCODING_H_
|
||||
|
||||
#include <type_traits>
|
||||
|
||||
#include "core/encoder_buffer.h"
|
||||
|
||||
namespace draco {
|
||||
@ -26,7 +28,21 @@ void ConvertSignedIntsToSymbols(const int32_t *in, int in_values,
|
||||
|
||||
// Helper function that converts a single signed integer value into an unsigned
|
||||
// integer symbol that can be encoded using an entropy encoder.
|
||||
uint32_t ConvertSignedIntToSymbol(int32_t val);
|
||||
template <class IntTypeT>
|
||||
typename std::make_unsigned<IntTypeT>::type ConvertSignedIntToSymbol(
|
||||
IntTypeT val) {
|
||||
typedef typename std::make_unsigned<IntTypeT>::type UnsignedType;
|
||||
static_assert(std::is_integral<IntTypeT>::value, "IntTypeT is not integral.");
|
||||
// Early exit if val is positive.
|
||||
if (val >= 0) {
|
||||
return static_cast<UnsignedType>(val) << 1;
|
||||
}
|
||||
val = -(val + 1); // Map -1 to 0, -2 to -1, etc..
|
||||
UnsignedType ret = static_cast<UnsignedType>(val);
|
||||
ret <<= 1;
|
||||
ret |= 1;
|
||||
return ret;
|
||||
}
|
||||
|
||||
// Encodes an array of symbols using an entropy coding. This function
|
||||
// automatically decides whether to encode the symbol values using using bit
|
||||
|
55
core/varint_decoding.h
Normal file
55
core/varint_decoding.h
Normal file
@ -0,0 +1,55 @@
|
||||
// Copyright 2016 The Draco Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
#ifndef DRACO_CORE_VARINT_DECODING_H_
|
||||
#define DRACO_CORE_VARINT_DECODING_H_
|
||||
|
||||
#include <type_traits>
|
||||
|
||||
#include "core/decoder_buffer.h"
|
||||
#include "core/symbol_decoding.h"
|
||||
|
||||
namespace draco {
|
||||
|
||||
// Decodes a specified integer as varint. Note that the IntTypeT must be the
|
||||
// same as the one used in the corresponding EncodeVarint() call.
|
||||
template <typename IntTypeT>
|
||||
void DecodeVarint(IntTypeT *out_val, DecoderBuffer *buffer) {
|
||||
if (std::is_unsigned<IntTypeT>::value) {
|
||||
// Coding of unsigned values.
|
||||
// 0-6 bit - data
|
||||
// 7 bit - next byte?
|
||||
uint8_t in;
|
||||
buffer->Decode(&in);
|
||||
if (in & (1 << 7)) {
|
||||
// Next byte is available, decode it first.
|
||||
DecodeVarint<IntTypeT>(out_val, buffer);
|
||||
// Append decoded info from this byte.
|
||||
*out_val <<= 7;
|
||||
*out_val |= in & ((1 << 7) - 1);
|
||||
} else {
|
||||
// Last byte reached
|
||||
*out_val = in;
|
||||
}
|
||||
} else {
|
||||
// IntTypeT is a signed value. Decode the symbol and convert to signed.
|
||||
typename std::make_unsigned<IntTypeT>::type symbol;
|
||||
DecodeVarint(&symbol, buffer);
|
||||
*out_val = ConvertSymbolToSignedInt(symbol);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace draco
|
||||
|
||||
#endif // DRACO_CORE_VARINT_DECODING_H_
|
52
core/varint_encoding.h
Normal file
52
core/varint_encoding.h
Normal file
@ -0,0 +1,52 @@
|
||||
// Copyright 2016 The Draco Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
#ifndef DRACO_CORE_VARINT_ENCODING_H_
|
||||
#define DRACO_CORE_VARINT_ENCODING_H_
|
||||
|
||||
#include <type_traits>
|
||||
|
||||
#include "core/encoder_buffer.h"
|
||||
#include "core/symbol_encoding.h"
|
||||
|
||||
namespace draco {
|
||||
|
||||
// Encodes a specified integer as varint. Note that different coding is used
|
||||
// when IntTypeT is an unsigned data type.
|
||||
template <typename IntTypeT>
|
||||
void EncodeVarint(IntTypeT val, EncoderBuffer *out_buffer) {
|
||||
if (std::is_unsigned<IntTypeT>::value) {
|
||||
// Coding of unsigned values.
|
||||
// 0-6 bit - data
|
||||
// 7 bit - next byte?
|
||||
uint8_t out = 0;
|
||||
out |= val & ((1 << 7) - 1);
|
||||
if (val >= (1 << 7)) {
|
||||
out |= (1 << 7);
|
||||
out_buffer->Encode(out);
|
||||
EncodeVarint<IntTypeT>(val >> 7, out_buffer);
|
||||
return;
|
||||
}
|
||||
out_buffer->Encode(out);
|
||||
} else {
|
||||
// IntTypeT is a signed value. Convert to unsigned symbol and encode.
|
||||
const typename std::make_unsigned<IntTypeT>::type symbol =
|
||||
ConvertSignedIntToSymbol(val);
|
||||
EncodeVarint(symbol, out_buffer);
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace draco
|
||||
|
||||
#endif // DRACO_CORE_VARINT_ENCODING_H_
|
File diff suppressed because one or more lines are too long
BIN
javascript/draco_decoder.wasm
Normal file
BIN
javascript/draco_decoder.wasm
Normal file
Binary file not shown.
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
85
javascript/draco_wasm_wrapper.js
Normal file
85
javascript/draco_wasm_wrapper.js
Normal file
@ -0,0 +1,85 @@
|
||||
var $jscomp=$jscomp||{};$jscomp.scope={};$jscomp.defineProperty="function"==typeof Object.defineProperties?Object.defineProperty:function(n,f,k){if(k.get||k.set)throw new TypeError("ES3 does not support getters and setters.");n!=Array.prototype&&n!=Object.prototype&&(n[f]=k.value)};$jscomp.getGlobal=function(n){return"undefined"!=typeof window&&window===n?n:"undefined"!=typeof global&&null!=global?global:n};$jscomp.global=$jscomp.getGlobal(this);
|
||||
$jscomp.polyfill=function(n,f,k,P){if(f){k=$jscomp.global;n=n.split(".");for(P=0;P<n.length-1;P++){var H=n[P];H in k||(k[H]={});k=k[H]}n=n[n.length-1];P=k[n];f=f(P);f!=P&&null!=f&&$jscomp.defineProperty(k,n,{configurable:!0,writable:!0,value:f})}};$jscomp.polyfill("Math.imul",function(n){return n?n:function(f,k){f=Number(f);k=Number(k);var n=f&65535,H=k&65535;return n*H+((f>>>16&65535)*H+n*(k>>>16&65535)<<16>>>0)|0}},"es6-impl","es3");
|
||||
$jscomp.polyfill("Math.clz32",function(n){return n?n:function(f){f=Number(f)>>>0;if(0===f)return 32;var k=0;0===(f&4294901760)&&(f<<=16,k+=16);0===(f&4278190080)&&(f<<=8,k+=8);0===(f&4026531840)&&(f<<=4,k+=4);0===(f&3221225472)&&(f<<=2,k+=2);0===(f&2147483648)&&k++;return k}},"es6-impl","es3");$jscomp.polyfill("Math.trunc",function(n){return n?n:function(f){f=Number(f);if(isNaN(f)||Infinity===f||-Infinity===f||0===f)return f;var k=Math.floor(Math.abs(f));return 0>f?-k:k}},"es6-impl","es3");
|
||||
var DracoModule=function(n){function f(a){eval.call(null,a)}function k(a,c){a||L("Assertion failed: "+c)}function P(a,c,d){c=c||"i8";"*"===c.charAt(c.length-1)&&(c="i32");switch(c){case "i1":return M[a>>0];case "i8":return M[a>>0];case "i16":return fa[a>>1];case "i32":return t[a>>2];case "i64":return t[a>>2];case "float":return ma[a>>2];case "double":return na[a>>3];default:L("invalid type for setValue: "+c)}return null}function H(a,c,d,e){var b,f;"number"===typeof a?(b=!0,f=a):(b=!1,f=a.length);
|
||||
var g="string"===typeof c?c:null;d=4==d?e:["function"===typeof S?S:h.staticAlloc,h.stackAlloc,h.staticAlloc,h.dynamicAlloc][void 0===d?2:d](Math.max(f,g?1:c.length));if(b){e=d;k(0==(d&3));for(a=d+(f&-4);e<a;e+=4)t[e>>2]=0;for(a=d+f;e<a;)M[e++>>0]=0;return d}if("i8"===g)return a.subarray||a.slice?D.set(a,d):D.set(new Uint8Array(a),d),d;e=0;for(var n,q;e<f;){var m=a[e];"function"===typeof m&&(m=h.getFunctionIndex(m));b=g||c[e];if(0===b)e++;else{"i64"==b&&(b="i32");var p=d+e,u=b,u=u||"i8";"*"===u.charAt(u.length-
|
||||
1)&&(u="i32");switch(u){case "i1":M[p>>0]=m;break;case "i8":M[p>>0]=m;break;case "i16":fa[p>>1]=m;break;case "i32":t[p>>2]=m;break;case "i64":tempI64=[m>>>0,(tempDouble=m,1<=+gb(tempDouble)?0<tempDouble?(hb(+ib(tempDouble/4294967296),4294967295)|0)>>>0:~~+jb((tempDouble-+(~~tempDouble>>>0))/4294967296)>>>0:0)];t[p>>2]=tempI64[0];t[p+4>>2]=tempI64[1];break;case "float":ma[p>>2]=m;break;case "double":na[p>>3]=m;break;default:L("invalid type for setValue: "+u)}q!==b&&(n=h.getNativeTypeSize(b),q=b);e+=
|
||||
n}}return d}function ga(b,c){if(0===c||!b)return"";for(var d=0,e,l=0;;){e=D[b+l>>0];d|=e;if(0==e&&!c)break;l++;if(c&&l==c)break}c||(c=l);e="";if(128>d){for(;0<c;)d=String.fromCharCode.apply(String,D.subarray(b,b+Math.min(c,1024))),e=e?e+d:d,b+=1024,c-=1024;return e}return a.UTF8ToString(b)}function Ia(a,c,d,e){if(!(0<e))return 0;var b=d;e=d+e-1;for(var k=0;k<a.length;++k){var g=a.charCodeAt(k);55296<=g&&57343>=g&&(g=65536+((g&1023)<<10)|a.charCodeAt(++k)&1023);if(127>=g){if(d>=e)break;c[d++]=g}else{if(2047>=
|
||||
g){if(d+1>=e)break;c[d++]=192|g>>6}else{if(65535>=g){if(d+2>=e)break;c[d++]=224|g>>12}else{if(2097151>=g){if(d+3>=e)break;c[d++]=240|g>>18}else{if(67108863>=g){if(d+4>=e)break;c[d++]=248|g>>24}else{if(d+5>=e)break;c[d++]=252|g>>30;c[d++]=128|g>>24&63}c[d++]=128|g>>18&63}c[d++]=128|g>>12&63}c[d++]=128|g>>6&63}c[d++]=128|g&63}}c[d]=0;return d-b}function Ja(a){for(var b=0,d=0;d<a.length;++d){var e=a.charCodeAt(d);55296<=e&&57343>=e&&(e=65536+((e&1023)<<10)|a.charCodeAt(++d)&1023);127>=e?++b:b=2047>=
|
||||
e?b+2:65535>=e?b+3:2097151>=e?b+4:67108863>=e?b+5:b+6}return b}function kb(b){return b.replace(/__Z[\w\d_]+/g,function(b){var c;a:{var e=a.___cxa_demangle||a.__cxa_demangle;if(e)try{var l=b.substr(1),k=Ja(l)+1,g=S(k);Ia(l,D,g,k);var f=S(4),m=e(g,0,0,f);if(0===P(f,"i32")&&m){c=ga(m);break a}}catch(tc){}finally{g&&va(g),f&&va(f),m&&va(m)}else h.warnOnce("warning: build with -s DEMANGLE_SUPPORT=1 to link in libcxxabi demangling");c=b}return b===c?b:b+" ["+c+"]"})}function Ka(){var b;a:{b=Error();if(!b.stack){try{throw Error(0);
|
||||
}catch(c){b=c}if(!b.stack){b="(no stack trace available)";break a}}b=b.stack.toString()}a.extraStackTrace&&(b+="\n"+a.extraStackTrace());return kb(b)}function wa(a,c){0<a%c&&(a+=c-a%c);return a}function xa(){a.HEAP8=M=new Int8Array(z);a.HEAP16=fa=new Int16Array(z);a.HEAP32=t=new Int32Array(z);a.HEAPU8=D=new Uint8Array(z);a.HEAPU16=La=new Uint16Array(z);a.HEAPU32=Ma=new Uint32Array(z);a.HEAPF32=ma=new Float32Array(z);a.HEAPF64=na=new Float64Array(z)}function Na(){var b=a.usingWasm?ya:Oa,c=2147483648-
|
||||
b;if(t[Q>>2]>c)return!1;for(A=Math.max(A,lb);A<t[Q>>2];)A=536870912>=A?wa(2*A,b):Math.min(wa((3*A+2147483648)/4,b),c);b=a.reallocBuffer(A);if(!b||b.byteLength!=A)return!1;a.buffer=z=b;xa();return!0}function Y(b){for(;0<b.length;){var c=b.shift();if("function"==typeof c)c();else{var d=c.func;"number"===typeof d?void 0===c.arg?a.dynCall_v(d):a.dynCall_vi(d,c.arg):d(void 0===c.arg?null:c.arg)}}}function Pa(a,c,d){d=0<d?d:Ja(a)+1;d=Array(d);a=Ia(a,d,0,d.length);c&&(d.length=a);return d}function Qa(b){V++;
|
||||
a.monitorRunDependencies&&a.monitorRunDependencies(V)}function Ra(b){V--;a.monitorRunDependencies&&a.monitorRunDependencies(V);0==V&&(null!==za&&(clearInterval(za),za=null),ha&&(b=ha,ha=null,b()))}function Z(){return!!Z.uncaught_exception}function ia(){var b=y.last;if(!b)return(h.setTempRet0(0),0)|0;var c=y.infos[b],d=c.type;if(!d)return(h.setTempRet0(0),b)|0;var e=Array.prototype.slice.call(arguments);a.___cxa_is_pointer_type(d);ia.buffer||(ia.buffer=S(4));t[ia.buffer>>2]=b;for(var b=ia.buffer,l=
|
||||
0;l<e.length;l++)if(e[l]&&a.___cxa_can_catch(e[l],d,b))return b=t[b>>2],c.adjusted=b,(h.setTempRet0(e[l]),b)|0;b=t[b>>2];return(h.setTempRet0(d),b)|0}function oa(b,c){W.push(function(){a.dynCall_vi(b,c)});oa.level=W.length}function ja(b,c){ja.seen||(ja.seen={});b in ja.seen||(a.dynCall_v(c),ja.seen[b]=1)}function T(b,c){u.varargs=c;try{var d=u.get(),e=u.get(),l=u.get(),f=0;T.buffer||(T.buffers=[null,[],[]],T.printChar=function(b,c){var d=T.buffers[b];k(d);if(0===c||10===c){var e=1===b?a.print:a.printErr,
|
||||
l;a:{for(var f=l=0;d[f];)++f;if(16<f-l&&d.subarray&&Sa)l=Sa.decode(d.subarray(l,f));else for(var g,m,h,n,p,q,f="";;){g=d[l++];if(!g){l=f;break a}g&128?(m=d[l++]&63,192==(g&224)?f+=String.fromCharCode((g&31)<<6|m):(h=d[l++]&63,224==(g&240)?g=(g&15)<<12|m<<6|h:(n=d[l++]&63,240==(g&248)?g=(g&7)<<18|m<<12|h<<6|n:(p=d[l++]&63,248==(g&252)?g=(g&3)<<24|m<<18|h<<12|n<<6|p:(q=d[l++]&63,g=(g&1)<<30|m<<24|h<<18|n<<12|p<<6|q))),65536>g?f+=String.fromCharCode(g):(g-=65536,f+=String.fromCharCode(55296|g>>10,56320|
|
||||
g&1023)))):f+=String.fromCharCode(g)}}e(l);d.length=0}else d.push(c)});for(var g=0;g<l;g++){for(var m=t[e+8*g>>2],h=t[e+(8*g+4)>>2],n=0;n<h;n++)T.printChar(d,D[m+n]);f+=h}return f}catch(ua){return"undefined"!==typeof FS&&ua instanceof FS.ErrnoError||L(ua),-ua.errno}}function S(a){return h.dynamicAlloc(a+8)+8&4294967288}function aa(a){this.name="ExitStatus";this.message="Program terminated with exit("+a+")";this.status=a}function Aa(b){function c(){if(!a.calledRun&&(a.calledRun=!0,!ba)){pa||(pa=!0,
|
||||
Y(Ba));Y(Ta);if(a.onRuntimeInitialized)a.onRuntimeInitialized();a._main&&Ua&&a.callMain(b);if(a.postRun)for("function"==typeof a.postRun&&(a.postRun=[a.postRun]);a.postRun.length;)Va.unshift(a.postRun.shift());Y(Va)}}b=b||a.arguments;null===Wa&&(Wa=Date.now());if(!(0<V)){if(a.preRun)for("function"==typeof a.preRun&&(a.preRun=[a.preRun]);a.preRun.length;)Xa.unshift(a.preRun.shift());Y(Xa);0<V||a.calledRun||(a.setStatus?(a.setStatus("Running..."),setTimeout(function(){setTimeout(function(){a.setStatus("")},
|
||||
1);c()},1)):c())}}function Ya(b,c){if(!c||!a.noExitRuntime){if(!a.noExitRuntime&&(ba=!0,I=mb,Y(W),a.onExit))a.onExit(b);ca?process.exit(b):ka&&"function"===typeof quit&&quit(b);throw new aa(b);}}function L(b){void 0!==b?(a.print(b),a.printErr(b),b=JSON.stringify(b)):b="";ba=!0;var c="abort("+b+") at "+Ka()+"\nIf this abort() is unexpected, build with -s ASSERTIONS=1 which can give more information.";Za&&Za.forEach(function(a){c=a(c,b)});throw c;}function r(){}function J(a){return(a||r).__cache__}
|
||||
function da(a,c){var b=J(c),e=b[a];if(e)return e;e=Object.create((c||r).prototype);e.ptr=a;return b[a]=e}function B(){this.ptr=nb();J(B)[this.ptr]=this}function p(){this.ptr=ob();J(p)[this.ptr]=this}function m(){this.ptr=pb();J(m)[this.ptr]=this}function E(){this.ptr=qb();J(E)[this.ptr]=this}function N(){this.ptr=rb();J(N)[this.ptr]=this}function F(){this.ptr=sb();J(F)[this.ptr]=this}function x(){this.ptr=tb();J(x)[this.ptr]=this}function R(){throw"cannot construct a VoidPtr, no constructor in IDL";
|
||||
}function G(){this.ptr=ub();J(G)[this.ptr]=this}var a=n=n||{},$a=!1,ab=!1;a.onRuntimeInitialized=function(){$a=!0;if(ab&&"function"===typeof a.onModuleLoaded)a.onModuleLoaded(a)};a.onModuleParsed=function(){ab=!0;if($a&&"function"===typeof a.onModuleLoaded)a.onModuleLoaded(a)};a.isVersionSupported=function(a){if("string"!==typeof a)return!1;a=a.split(".");return 2>a.length||3<a.length||0<a[0]||9<a[1]?!1:!0};a||(a=("undefined"!==typeof n?n:null)||{});var la={},U;for(U in a)a.hasOwnProperty(U)&&(la[U]=
|
||||
a[U]);var ea=!1,X=!1,ca=!1,ka=!1;if(a.ENVIRONMENT)if("WEB"===a.ENVIRONMENT)ea=!0;else if("WORKER"===a.ENVIRONMENT)X=!0;else if("NODE"===a.ENVIRONMENT)ca=!0;else if("SHELL"===a.ENVIRONMENT)ka=!0;else throw Error("The provided Module['ENVIRONMENT'] value is not valid. It must be one of: WEB|WORKER|NODE|SHELL.");else ea="object"===typeof window,X="function"===typeof importScripts,ca="object"===typeof process&&"function"===typeof require&&!ea&&!X,ka=!ea&&!ca&&!X;if(ca){a.print||(a.print=console.log);
|
||||
a.printErr||(a.printErr=console.warn);var Ca,Da;a.read=function(a,c){Ca||(Ca=require("fs"));Da||(Da=require("path"));a=Da.normalize(a);var b=Ca.readFileSync(a);return c?b:b.toString()};a.readBinary=function(b){b=a.read(b,!0);b.buffer||(b=new Uint8Array(b));k(b.buffer);return b};a.load=function(a){f(read(a))};a.thisProgram||(a.thisProgram=1<process.argv.length?process.argv[1].replace(/\\/g,"/"):"unknown-program");a.arguments=process.argv.slice(2);"undefined"!==typeof module&&(module.exports=a);process.on("uncaughtException",
|
||||
function(a){if(!(a instanceof aa))throw a;});a.inspect=function(){return"[Emscripten Module object]"}}else if(ka)a.print||(a.print=print),"undefined"!=typeof printErr&&(a.printErr=printErr),a.read="undefined"!=typeof read?read:function(){throw"no read() available";},a.readBinary=function(a){if("function"===typeof readbuffer)return new Uint8Array(readbuffer(a));a=read(a,"binary");k("object"===typeof a);return a},"undefined"!=typeof scriptArgs?a.arguments=scriptArgs:"undefined"!=typeof arguments&&(a.arguments=
|
||||
arguments);else if(ea||X)a.read=function(a){var b=new XMLHttpRequest;b.open("GET",a,!1);b.send(null);return b.responseText},a.readAsync=function(a,c,d){var b=new XMLHttpRequest;b.open("GET",a,!0);b.responseType="arraybuffer";b.onload=function(){200==b.status||0==b.status&&b.response?c(b.response):d()};b.onerror=d;b.send(null)},"undefined"!=typeof arguments&&(a.arguments=arguments),"undefined"!==typeof console?(a.print||(a.print=function(a){console.log(a)}),a.printErr||(a.printErr=function(a){console.warn(a)})):
|
||||
a.print||(a.print=function(a){}),X&&(a.load=importScripts),"undefined"===typeof a.setWindowTitle&&(a.setWindowTitle=function(a){document.title=a});else throw"Unknown runtime environment. Where are we?";!a.load&&a.read&&(a.load=function(b){f(a.read(b))});a.print||(a.print=function(){});a.printErr||(a.printErr=a.print);a.arguments||(a.arguments=[]);a.thisProgram||(a.thisProgram="./this.program");a.print=a.print;a.printErr=a.printErr;a.preRun=[];a.postRun=[];for(U in la)la.hasOwnProperty(U)&&(a[U]=la[U]);
|
||||
var la=void 0,h={setTempRet0:function(a){return tempRet0=a},getTempRet0:function(){return tempRet0},stackSave:function(){return I},stackRestore:function(a){I=a},getNativeTypeSize:function(a){switch(a){case "i1":case "i8":return 1;case "i16":return 2;case "i32":return 4;case "i64":return 8;case "float":return 4;case "double":return 8;default:return"*"===a[a.length-1]?h.QUANTUM_SIZE:"i"===a[0]?(a=parseInt(a.substr(1)),k(0===a%8),a/8):0}},getNativeFieldSize:function(a){return Math.max(h.getNativeTypeSize(a),
|
||||
h.QUANTUM_SIZE)},STACK_ALIGN:16,prepVararg:function(a,c){"double"===c||"i64"===c?a&7&&(k(4===(a&7)),a+=4):k(0===(a&3));return a},getAlignSize:function(a,c,d){return d||"i64"!=a&&"double"!=a?a?Math.min(c||(a?h.getNativeFieldSize(a):0),h.QUANTUM_SIZE):Math.min(c,8):8},dynCall:function(b,c,d){return d&&d.length?a["dynCall_"+b].apply(null,[c].concat(d)):a["dynCall_"+b].call(null,c)},functionPointers:[],addFunction:function(a){for(var b=0;b<h.functionPointers.length;b++)if(!h.functionPointers[b])return h.functionPointers[b]=
|
||||
a,2*(1+b);throw"Finished up all reserved function pointers. Use a higher value for RESERVED_FUNCTION_POINTERS.";},removeFunction:function(a){h.functionPointers[(a-2)/2]=null},warnOnce:function(b){h.warnOnce.shown||(h.warnOnce.shown={});h.warnOnce.shown[b]||(h.warnOnce.shown[b]=1,a.printErr(b))},funcWrappers:{},getFuncWrapper:function(a,c){k(c);h.funcWrappers[c]||(h.funcWrappers[c]={});var b=h.funcWrappers[c];b[a]||(b[a]=1===c.length?function(){return h.dynCall(c,a)}:2===c.length?function(b){return h.dynCall(c,
|
||||
a,[b])}:function(){return h.dynCall(c,a,Array.prototype.slice.call(arguments))});return b[a]},getCompilerSetting:function(a){throw"You must build with -s RETAIN_COMPILER_SETTINGS=1 for Runtime.getCompilerSetting or emscripten_get_compiler_setting to work";},stackAlloc:function(a){var b=I;I=I+a|0;I=I+15&-16;return b},staticAlloc:function(a){var b=O;O=O+a|0;O=O+15&-16;return b},dynamicAlloc:function(a){var b=t[Q>>2];a=(b+a+15|0)&-16;t[Q>>2]=a;return a>=A&&!Na()?(t[Q>>2]=b,0):b},alignMemory:function(a,
|
||||
c){return Math.ceil(a/(c?c:16))*(c?c:16)},makeBigInt:function(a,c,d){return d?+(a>>>0)+4294967296*+(c>>>0):+(a>>>0)+4294967296*+(c|0)},GLOBAL_BASE:1024,QUANTUM_SIZE:4,__dummy__:0},ba=0,Sa="undefined"!==typeof TextDecoder?new TextDecoder("utf8"):void 0;"undefined"!==typeof TextDecoder&&new TextDecoder("utf-16le");var ya=65536,Oa=16777216,lb=16777216,z,M,D,fa,La,t,Ma,ma,na,qa,O,Ea,I,ra,Fa,Q;qa=O=Ea=I=ra=Fa=Q=0;a.reallocBuffer||(a.reallocBuffer=function(a){var b;try{if(ArrayBuffer.transfer)b=ArrayBuffer.transfer(z,
|
||||
a);else{var d=M;b=new ArrayBuffer(a);(new Int8Array(b)).set(d)}}catch(e){return!1}return vb(b)?b:!1});var sa;try{sa=Function.prototype.call.bind(Object.getOwnPropertyDescriptor(ArrayBuffer.prototype,"byteLength").get),sa(new ArrayBuffer(4))}catch(b){sa=function(a){return a.byteLength}}var Ga=a.TOTAL_STACK||5242880,A=a.TOTAL_MEMORY||16777216;A<Ga&&a.printErr("TOTAL_MEMORY should be larger than TOTAL_STACK, was "+A+"! (TOTAL_STACK="+Ga+")");a.buffer?z=a.buffer:"object"===typeof WebAssembly&&"function"===
|
||||
typeof WebAssembly.Memory?(a.wasmMemory=new WebAssembly.Memory({initial:A/ya}),z=a.wasmMemory.buffer):z=new ArrayBuffer(A);xa();t[0]=1668509029;fa[1]=25459;if(115!==D[2]||99!==D[3])throw"Runtime error: expected the system to be little-endian!";a.HEAP=void 0;a.buffer=z;a.HEAP8=M;a.HEAP16=fa;a.HEAP32=t;a.HEAPU8=D;a.HEAPU16=La;a.HEAPU32=Ma;a.HEAPF32=ma;a.HEAPF64=na;var Xa=[],Ba=[],Ta=[],W=[],Va=[],pa=!1;Math.imul&&-5===Math.imul(4294967295,5)||(Math.imul=function(a,c){var b=a&65535,e=c&65535;return b*
|
||||
e+((a>>>16)*e+b*(c>>>16)<<16)|0});Math.imul=Math.imul;if(!Math.fround){var bb=new Float32Array(1);Math.fround=function(a){bb[0]=a;return bb[0]}}Math.fround=Math.fround;Math.clz32||(Math.clz32=function(a){a>>>=0;for(var b=0;32>b;b++)if(a&1<<31-b)return b;return 32});Math.clz32=Math.clz32;Math.trunc||(Math.trunc=function(a){return 0>a?Math.ceil(a):Math.floor(a)});Math.trunc=Math.trunc;var gb=Math.abs,jb=Math.ceil,ib=Math.floor,hb=Math.min,V=0,za=null,ha=null;a.preloadedImages={};a.preloadedAudios={};
|
||||
var K=null;(function(b){function c(a,b){var c=p;if(0>a.indexOf("."))c=(c||{})[a];else var d=a.split("."),c=(c||{})[d[0]],c=(c||{})[d[1]];b&&(c=(c||{})[b]);void 0===c&&L("bad lookupImport to ("+a+")."+b);return c}function d(c){var d=b.buffer;c.byteLength<d.byteLength&&b.printErr("the new buffer in mergeMemory is smaller than the previous one. in native wasm, we should grow memory here");var d=new Int8Array(d),e=new Int8Array(c);K||d.set(e.subarray(b.STATIC_BASE,b.STATIC_BASE+b.STATIC_BUMP),b.STATIC_BASE);
|
||||
e.set(d);a.buffer=z=c;xa()}function e(){var a;ea||X?(a=b.wasmBinary,k(a,"on the web, we need the wasm binary to be preloaded and set on Module['wasmBinary']. emcc.py will do that for you when generating HTML (but not JS)"),a=new Uint8Array(a)):a=b.readBinary(h);return a}function l(a,c,d){if("function"!==typeof b.asm||b.asm===u)b.asmPreload?b.asm=b.asmPreload:eval(b.read(n));return"function"!==typeof b.asm?(b.printErr("asm evalling did not set the module properly"),!1):b.asm(a,c,d)}function f(a,c,
|
||||
l){if("object"!==typeof WebAssembly)return b.printErr("no native wasm support detected"),!1;if(!(b.wasmMemory instanceof WebAssembly.Memory))return b.printErr("no native wasm Memory in use"),!1;c.memory=b.wasmMemory;p.global={NaN:NaN,Infinity:Infinity};p["global.Math"]=a.Math;p.env=c;b.printErr("asynchronously preparing wasm");Qa("wasm-instantiate");WebAssembly.instantiate(e(),p).then(function(a){q=a.instance.exports;q.memory&&d(q.memory);b.asm=q;b.usingWasm=!0;Ra("wasm-instantiate")})["catch"](function(a){b.printErr("failed to asynchronously prepare wasm:\n "+
|
||||
a)});return{}}var g=b.wasmJSMethod||"native-wasm";b.wasmJSMethod=g;var m=b.wasmTextFile||"draco_decoder.wast",h=b.wasmBinaryFile||"draco_decoder.wasm",n=b.asmjsCodeFile||"draco_decoder.temp.asm.js",p={global:null,env:null,asm2wasm:{"f64-rem":function(a,b){return a%b},"f64-to-int":function(a){return a|0},"i32s-div":function(a,b){return(a|0)/(b|0)|0},"i32u-div":function(a,b){return(a>>>0)/(b>>>0)>>>0},"i32s-rem":function(a,b){return(a|0)%(b|0)|0},"i32u-rem":function(a,b){return(a>>>0)%(b>>>0)>>>0},
|
||||
"debugger":function(){debugger}},parent:b},q=null;b.asmPreload=b.asm;b.reallocBuffer=function(a){a=wa(a,b.usingWasm?ya:Oa);var c=b.buffer,d=c.byteLength;if(b.usingWasm)try{return-1!==b.wasmMemory.grow((a-d)/65536)?b.buffer=b.wasmMemory.buffer:null}catch(uc){return null}else return q.__growWasmMemory((a-d)/65536),b.buffer!==c?b.buffer:null};b.asm=function(a,h,u){if(!h.table){var t=b.wasmTableSize;void 0===t&&(t=1024);var w=b.wasmMaxTableSize;h.table="object"===typeof WebAssembly&&"function"===typeof WebAssembly.Table?
|
||||
void 0!==w?new WebAssembly.Table({initial:t,maximum:w,element:"anyfunc"}):new WebAssembly.Table({initial:t,element:"anyfunc"}):Array(t);b.wasmTable=h.table}h.memoryBase||(h.memoryBase=b.STATIC_BASE);h.tableBase||(h.tableBase=0);for(var v,t=g.split(","),w=0;w<t.length;w++)if(v=t[w],b.printErr("trying binaryen method: "+v),"native-wasm"===v){if(v=f(a,h,u))break}else if("asmjs"===v){if(v=l(a,h,u))break}else if("interpret-asm2wasm"===v||"interpret-s-expr"===v||"interpret-binary"===v){var r,C;C=a;r=h;
|
||||
var y=u,x=v;if("function"!==typeof WasmJS)b.printErr("WasmJS not detected - polyfill not bundled?"),v=!1;else{v=WasmJS({});v.outside=b;v.info=p;v.lookupImport=c;k(y===b.buffer);p.global=C;p.env=r;k(y===b.buffer);r.memory=y;k(r.memory instanceof ArrayBuffer);v.providedTotalMemory=b.buffer.byteLength;C="interpret-binary"===x?e():b.read("interpret-asm2wasm"==x?n:m);if("interpret-asm2wasm"==x)r=v._malloc(C.length+1),v.writeAsciiToMemory(C,r),v._load_asm2wasm(r);else if("interpret-s-expr"===x)r=v._malloc(C.length+
|
||||
1),v.writeAsciiToMemory(C,r),v._load_s_expr2wasm(r);else if("interpret-binary"===x)r=v._malloc(C.length),v.HEAPU8.set(C,r),v._load_binary2wasm(r,C.length);else throw"what? "+x;v._free(r);v._instantiate(r);b.newBuffer&&(d(b.newBuffer),b.newBuffer=null);v=q=v.asmExports}if(v)break}else throw"bad method: "+v;if(!v)throw"no binaryen method succeeded. consider enabling more options, like interpreting, if you want that: https://github.com/kripken/emscripten/wiki/WebAssembly#binaryen-methods";b.printErr("binaryen method succeeded.");
|
||||
return v};var u=b.asm})(a);qa=1024;O=qa+20016;Ba.push();K=0<=a.wasmJSMethod.indexOf("asmjs")||0<=a.wasmJSMethod.indexOf("interpret-asm2wasm")?"draco_decoder.js.mem":null;a.STATIC_BASE=qa;a.STATIC_BUMP=20016;var wb=O;O+=16;a._llvm_bswap_i16=xb;var y={last:0,caught:[],infos:{},deAdjust:function(a){if(!a||y.infos[a])return a;for(var b in y.infos)if(y.infos[b].adjusted===a)return b;return a},addRef:function(a){a&&y.infos[a].refcount++},decRef:function(b){if(b){var c=y.infos[b];k(0<c.refcount);c.refcount--;
|
||||
0!==c.refcount||c.rethrown||(c.destructor&&a.dynCall_vi(c.destructor,b),delete y.infos[b],___cxa_free_exception(b))}},clearRef:function(a){a&&(y.infos[a].refcount=0)}};a._memset=yb;a._memcpy=zb;var u={varargs:0,get:function(a){u.varargs+=4;return t[u.varargs-4>>2]},getStr:function(){return ga(u.get())},get64:function(){var a=u.get(),c=u.get();0<=a?k(0===c):k(-1===c);return a},getZero:function(){k(0===u.get())}},ta={};a._sbrk=Ab;a._memmove=Bb;var Ha=1;a._pthread_self=Cb;a._malloc=S;W.push(function(){var b=
|
||||
a._fflush;b&&b(0);if(b=T.printChar){var c=T.buffers;c[1].length&&b(1,10);c[2].length&&b(2,10)}});Q=H(1,"i32",2);Ea=I=h.alignMemory(O);ra=Ea+Ga;Fa=h.alignMemory(ra);t[Q>>2]=Fa;a.wasmTableSize=504;a.wasmMaxTableSize=504;a.asmGlobalArg={Math:Math,Int8Array:Int8Array,Int16Array:Int16Array,Int32Array:Int32Array,Uint8Array:Uint8Array,Uint16Array:Uint16Array,Uint32Array:Uint32Array,Float32Array:Float32Array,Float64Array:Float64Array,NaN:NaN,Infinity:Infinity,byteLength:sa};a.asmLibraryArg={abort:L,assert:k,
|
||||
enlargeMemory:Na,getTotalMemory:function(){return A},abortOnCannotGrowMemory:function(){L("Cannot enlarge memory arrays. Either (1) compile with -s TOTAL_MEMORY=X with X higher than the current value "+A+", (2) compile with -s ALLOW_MEMORY_GROWTH=1 which adjusts the size at runtime but prevents some optimizations, (3) set Module.TOTAL_MEMORY to a higher value before the program runs, or if you want malloc to return NULL (0) instead of this abort, compile with -s ABORTING_MALLOC=0 ")},invoke_iiii:function(b,
|
||||
c,d,e){try{return a.dynCall_iiii(b,c,d,e)}catch(l){if("number"!==typeof l&&"longjmp"!==l)throw l;a.setThrew(1,0)}},invoke_viiiii:function(b,c,d,e,l,f){try{a.dynCall_viiiii(b,c,d,e,l,f)}catch(g){if("number"!==typeof g&&"longjmp"!==g)throw g;a.setThrew(1,0)}},invoke_vi:function(b,c){try{a.dynCall_vi(b,c)}catch(d){if("number"!==typeof d&&"longjmp"!==d)throw d;a.setThrew(1,0)}},invoke_iiiiiii:function(b,c,d,e,l,f,g){try{return a.dynCall_iiiiiii(b,c,d,e,l,f,g)}catch(w){if("number"!==typeof w&&"longjmp"!==
|
||||
w)throw w;a.setThrew(1,0)}},invoke_ii:function(b,c){try{return a.dynCall_ii(b,c)}catch(d){if("number"!==typeof d&&"longjmp"!==d)throw d;a.setThrew(1,0)}},invoke_viii:function(b,c,d,e){try{a.dynCall_viii(b,c,d,e)}catch(l){if("number"!==typeof l&&"longjmp"!==l)throw l;a.setThrew(1,0)}},invoke_v:function(b){try{a.dynCall_v(b)}catch(c){if("number"!==typeof c&&"longjmp"!==c)throw c;a.setThrew(1,0)}},invoke_viiiiii:function(b,c,d,e,l,f,g){try{a.dynCall_viiiiii(b,c,d,e,l,f,g)}catch(w){if("number"!==typeof w&&
|
||||
"longjmp"!==w)throw w;a.setThrew(1,0)}},invoke_iii:function(b,c,d){try{return a.dynCall_iii(b,c,d)}catch(e){if("number"!==typeof e&&"longjmp"!==e)throw e;a.setThrew(1,0)}},invoke_viiii:function(b,c,d,e,l){try{a.dynCall_viiii(b,c,d,e,l)}catch(C){if("number"!==typeof C&&"longjmp"!==C)throw C;a.setThrew(1,0)}},_pthread_cleanup_pop:function(){k(oa.level==W.length,"cannot pop if something else added meanwhile!");W.pop();oa.level=W.length},_pthread_getspecific:function(a){return ta[a]||0},_pthread_setspecific:function(a,
|
||||
c){if(!(a in ta))return 22;ta[a]=c;return 0},_pthread_cleanup_push:oa,___cxa_throw:function(a,c,d){y.infos[a]={ptr:a,adjusted:a,type:c,destructor:d,refcount:0,caught:!1,rethrown:!1};y.last=a;"uncaught_exception"in Z?Z.uncaught_exception++:Z.uncaught_exception=1;throw a+" - Exception catching is disabled, this exception cannot be caught. Compile with -s DISABLE_EXCEPTION_CATCHING=0 or DISABLE_EXCEPTION_CATCHING=2 to catch.";},_pthread_key_create:function(a,c){if(0==a)return 22;t[a>>2]=Ha;ta[Ha]=0;
|
||||
Ha++;return 0},_abort:function(){a.abort()},___setErrNo:function(b){a.___errno_location&&(t[a.___errno_location()>>2]=b);return b},___syscall6:function(a,c){u.varargs=c;try{var b=u.getStreamFromFD();FS.close(b);return 0}catch(e){return"undefined"!==typeof FS&&e instanceof FS.ErrnoError||L(e),-e.errno}},_pthread_once:ja,___syscall146:T,___cxa_begin_catch:function(a){var b=y.infos[a];b&&!b.caught&&(b.caught=!0,Z.uncaught_exception--);b&&(b.rethrown=!1);y.caught.push(a);y.addRef(y.deAdjust(a));return a},
|
||||
_emscripten_memcpy_big:function(a,c,d){D.set(D.subarray(c,c+d),a);return a},___gxx_personality_v0:function(){},___syscall140:function(a,c){u.varargs=c;try{var b=u.getStreamFromFD(),e=u.get(),l=u.get(),f=u.get(),g=u.get();k(0===e);FS.llseek(b,l,g);t[f>>2]=b.position;b.getdents&&0===l&&0===g&&(b.getdents=null);return 0}catch(w){return"undefined"!==typeof FS&&w instanceof FS.ErrnoError||L(w),-w.errno}},___resumeException:function(a){y.last||(y.last=a);throw a+" - Exception catching is disabled, this exception cannot be caught. Compile with -s DISABLE_EXCEPTION_CATCHING=0 or DISABLE_EXCEPTION_CATCHING=2 to catch.";
|
||||
},___cxa_find_matching_catch:ia,___assert_fail:function(a,c,d,e){ba=!0;throw"Assertion failed: "+ga(a)+", at: "+[c?ga(c):"unknown filename",d,e?ga(e):"unknown function"]+" at "+Ka();},___cxa_pure_virtual:function(){ba=!0;throw"Pure virtual function called!";},___cxa_allocate_exception:function(a){return S(a)},__ZSt18uncaught_exceptionv:Z,DYNAMICTOP_PTR:Q,tempDoublePtr:wb,ABORT:ba,STACKTOP:I,STACK_MAX:ra};var cb=a.asm(a.asmGlobalArg,a.asmLibraryArg,z);a.asm=cb;var Db=a._emscripten_bind_WebIDLWrapper_DecodeMeshFromBuffer_1=
|
||||
function(){return a.asm._emscripten_bind_WebIDLWrapper_DecodeMeshFromBuffer_1.apply(null,arguments)},sb=a._emscripten_bind_DecoderBuffer_DecoderBuffer_0=function(){return a.asm._emscripten_bind_DecoderBuffer_DecoderBuffer_0.apply(null,arguments)},Eb=a._emscripten_bind_Mesh___destroy___0=function(){return a.asm._emscripten_bind_Mesh___destroy___0.apply(null,arguments)};a.stackSave=function(){return a.asm.stackSave.apply(null,arguments)};var Fb=a._emscripten_bind_PointAttribute_components_count_0=function(){return a.asm._emscripten_bind_PointAttribute_components_count_0.apply(null,
|
||||
arguments)},Gb=a._emscripten_bind_WebIDLWrapper_GetEncodedGeometryType_1=function(){return a.asm._emscripten_bind_WebIDLWrapper_GetEncodedGeometryType_1.apply(null,arguments)},Hb=a._emscripten_bind_PointAttribute_byte_offset_0=function(){return a.asm._emscripten_bind_PointAttribute_byte_offset_0.apply(null,arguments)},Ib=a._emscripten_bind_WebIDLWrapper_DecodePointCloudFromBuffer_1=function(){return a.asm._emscripten_bind_WebIDLWrapper_DecodePointCloudFromBuffer_1.apply(null,arguments)},Jb=a._emscripten_bind_PointAttribute_normalized_0=
|
||||
function(){return a.asm._emscripten_bind_PointAttribute_normalized_0.apply(null,arguments)},Kb=a._emscripten_bind_PointCloud___destroy___0=function(){return a.asm._emscripten_bind_PointCloud___destroy___0.apply(null,arguments)},Lb=a._emscripten_bind_WebIDLWrapper_GetAttributeFloat_3=function(){return a.asm._emscripten_bind_WebIDLWrapper_GetAttributeFloat_3.apply(null,arguments)},Mb=a._emscripten_bind_WebIDLWrapper_GetAttributeId_2=function(){return a.asm._emscripten_bind_WebIDLWrapper_GetAttributeId_2.apply(null,
|
||||
arguments)};a.setTempRet0=function(){return a.asm.setTempRet0.apply(null,arguments)};a.setThrew=function(){return a.asm.setThrew.apply(null,arguments)};a.___cxa_is_pointer_type=function(){return a.asm.___cxa_is_pointer_type.apply(null,arguments)};var Nb=a._emscripten_bind_PointAttribute_size_0=function(){return a.asm._emscripten_bind_PointAttribute_size_0.apply(null,arguments)},Ob=a._emscripten_enum_draco_GeometryAttribute_Type_COLOR=function(){return a.asm._emscripten_enum_draco_GeometryAttribute_Type_COLOR.apply(null,
|
||||
arguments)},Pb=a._emscripten_enum_draco_GeometryAttribute_Type_POSITION=function(){return a.asm._emscripten_enum_draco_GeometryAttribute_Type_POSITION.apply(null,arguments)},Qb=a._emscripten_bind_VoidPtr___destroy___0=function(){return a.asm._emscripten_bind_VoidPtr___destroy___0.apply(null,arguments)},yb=a._memset=function(){return a.asm._memset.apply(null,arguments)},Rb=a._emscripten_bind_PointAttribute_attribute_type_0=function(){return a.asm._emscripten_bind_PointAttribute_attribute_type_0.apply(null,
|
||||
arguments)},Ab=a._sbrk=function(){return a.asm._sbrk.apply(null,arguments)},Sb=a._emscripten_bind_DecoderBuffer_Init_2=function(){return a.asm._emscripten_bind_DecoderBuffer_Init_2.apply(null,arguments)},zb=a._memcpy=function(){return a.asm._memcpy.apply(null,arguments)},Tb=a._emscripten_bind_DecoderBuffer___destroy___0=function(){return a.asm._emscripten_bind_DecoderBuffer___destroy___0.apply(null,arguments)};a.stackAlloc=function(){return a.asm.stackAlloc.apply(null,arguments)};var Ub=a._emscripten_bind_PointAttribute_custom_id_0=
|
||||
function(){return a.asm._emscripten_bind_PointAttribute_custom_id_0.apply(null,arguments)},Vb=a._emscripten_enum_draco_GeometryAttribute_Type_INVALID=function(){return a.asm._emscripten_enum_draco_GeometryAttribute_Type_INVALID.apply(null,arguments)},Wb=a._emscripten_enum_draco_EncodedGeometryType_TRIANGULAR_MESH=function(){return a.asm._emscripten_enum_draco_EncodedGeometryType_TRIANGULAR_MESH.apply(null,arguments)},Xb=a._emscripten_bind_Mesh_num_points_0=function(){return a.asm._emscripten_bind_Mesh_num_points_0.apply(null,
|
||||
arguments)},tb=a._emscripten_bind_Mesh_Mesh_0=function(){return a.asm._emscripten_bind_Mesh_Mesh_0.apply(null,arguments)},Yb=a._emscripten_bind_WebIDLWrapper_GetAttribute_2=function(){return a.asm._emscripten_bind_WebIDLWrapper_GetAttribute_2.apply(null,arguments)},qb=a._emscripten_bind_DracoFloat32Array_DracoFloat32Array_0=function(){return a.asm._emscripten_bind_DracoFloat32Array_DracoFloat32Array_0.apply(null,arguments)},Zb=a._emscripten_enum_draco_EncodedGeometryType_POINT_CLOUD=function(){return a.asm._emscripten_enum_draco_EncodedGeometryType_POINT_CLOUD.apply(null,
|
||||
arguments)},$b=a._emscripten_bind_WebIDLWrapper___destroy___0=function(){return a.asm._emscripten_bind_WebIDLWrapper___destroy___0.apply(null,arguments)},ac=a._emscripten_enum_draco_GeometryAttribute_Type_GENERIC=function(){return a.asm._emscripten_enum_draco_GeometryAttribute_Type_GENERIC.apply(null,arguments)},bc=a._emscripten_bind_WebIDLWrapper_GetFaceFromMesh_3=function(){return a.asm._emscripten_bind_WebIDLWrapper_GetFaceFromMesh_3.apply(null,arguments)},ub=a._emscripten_bind_DracoInt32Array_DracoInt32Array_0=
|
||||
function(){return a.asm._emscripten_bind_DracoInt32Array_DracoInt32Array_0.apply(null,arguments)},Cb=a._pthread_self=function(){return a.asm._pthread_self.apply(null,arguments)},pb=a._emscripten_bind_PointAttribute_PointAttribute_0=function(){return a.asm._emscripten_bind_PointAttribute_PointAttribute_0.apply(null,arguments)},xb=a._llvm_bswap_i16=function(){return a.asm._llvm_bswap_i16.apply(null,arguments)},cc=a._emscripten_bind_Mesh_num_attributes_0=function(){return a.asm._emscripten_bind_Mesh_num_attributes_0.apply(null,
|
||||
arguments)},dc=a._emscripten_bind_DracoFloat32Array_GetValue_1=function(){return a.asm._emscripten_bind_DracoFloat32Array_GetValue_1.apply(null,arguments)},ec=a._emscripten_bind_DracoFloat32Array___destroy___0=function(){return a.asm._emscripten_bind_DracoFloat32Array___destroy___0.apply(null,arguments)},fc=a._emscripten_bind_PointCloud_num_points_0=function(){return a.asm._emscripten_bind_PointCloud_num_points_0.apply(null,arguments)},gc=a._emscripten_bind_DracoInt32Array___destroy___0=function(){return a.asm._emscripten_bind_DracoInt32Array___destroy___0.apply(null,
|
||||
arguments)},rb=a._emscripten_bind_GeometryAttribute_GeometryAttribute_0=function(){return a.asm._emscripten_bind_GeometryAttribute_GeometryAttribute_0.apply(null,arguments)};a.runPostSets=function(){return a.asm.runPostSets.apply(null,arguments)};var hc=a._emscripten_bind_PointAttribute_data_type_0=function(){return a.asm._emscripten_bind_PointAttribute_data_type_0.apply(null,arguments)};a.getTempRet0=function(){return a.asm.getTempRet0.apply(null,arguments)};var va=a._free=function(){return a.asm._free.apply(null,
|
||||
arguments)},ic=a._emscripten_bind_GeometryAttribute___destroy___0=function(){return a.asm._emscripten_bind_GeometryAttribute___destroy___0.apply(null,arguments)},jc=a._emscripten_enum_draco_EncodedGeometryType_INVALID_GEOMETRY_TYPE=function(){return a.asm._emscripten_enum_draco_EncodedGeometryType_INVALID_GEOMETRY_TYPE.apply(null,arguments)},kc=a._emscripten_bind_Mesh_num_faces_0=function(){return a.asm._emscripten_bind_Mesh_num_faces_0.apply(null,arguments)};a.establishStackSpace=function(){return a.asm.establishStackSpace.apply(null,
|
||||
arguments)};var lc=a._emscripten_enum_draco_GeometryAttribute_Type_NORMAL=function(){return a.asm._emscripten_enum_draco_GeometryAttribute_Type_NORMAL.apply(null,arguments)},mc=a._emscripten_bind_PointAttribute_byte_stride_0=function(){return a.asm._emscripten_bind_PointAttribute_byte_stride_0.apply(null,arguments)},S=a._malloc=function(){return a.asm._malloc.apply(null,arguments)},ob=a._emscripten_bind_WebIDLWrapper_WebIDLWrapper_0=function(){return a.asm._emscripten_bind_WebIDLWrapper_WebIDLWrapper_0.apply(null,
|
||||
arguments)},Bb=a._memmove=function(){return a.asm._memmove.apply(null,arguments)},vb=a._emscripten_replace_memory=function(){return a.asm._emscripten_replace_memory.apply(null,arguments)},nc=a._emscripten_bind_PointCloud_num_attributes_0=function(){return a.asm._emscripten_bind_PointCloud_num_attributes_0.apply(null,arguments)},oc=a._emscripten_enum_draco_GeometryAttribute_Type_TEX_COORD=function(){return a.asm._emscripten_enum_draco_GeometryAttribute_Type_TEX_COORD.apply(null,arguments)},pc=a._emscripten_bind_DracoInt32Array_GetValue_1=
|
||||
function(){return a.asm._emscripten_bind_DracoInt32Array_GetValue_1.apply(null,arguments)},qc=a._emscripten_bind_WebIDLWrapper_GetAttributeFloatForAllPoints_3=function(){return a.asm._emscripten_bind_WebIDLWrapper_GetAttributeFloatForAllPoints_3.apply(null,arguments)};a.stackRestore=function(){return a.asm.stackRestore.apply(null,arguments)};var nb=a._emscripten_bind_PointCloud_PointCloud_0=function(){return a.asm._emscripten_bind_PointCloud_PointCloud_0.apply(null,arguments)},rc=a._emscripten_bind_PointAttribute___destroy___0=
|
||||
function(){return a.asm._emscripten_bind_PointAttribute___destroy___0.apply(null,arguments)};a.___cxa_can_catch=function(){return a.asm.___cxa_can_catch.apply(null,arguments)};a.dynCall_iiii=function(){return a.asm.dynCall_iiii.apply(null,arguments)};a.dynCall_viiiii=function(){return a.asm.dynCall_viiiii.apply(null,arguments)};a.dynCall_vi=function(){return a.asm.dynCall_vi.apply(null,arguments)};a.dynCall_iiiiiii=function(){return a.asm.dynCall_iiiiiii.apply(null,arguments)};a.dynCall_ii=function(){return a.asm.dynCall_ii.apply(null,
|
||||
arguments)};a.dynCall_viii=function(){return a.asm.dynCall_viii.apply(null,arguments)};a.dynCall_v=function(){return a.asm.dynCall_v.apply(null,arguments)};a.dynCall_viiiiii=function(){return a.asm.dynCall_viiiiii.apply(null,arguments)};a.dynCall_iii=function(){return a.asm.dynCall_iii.apply(null,arguments)};a.dynCall_viiii=function(){return a.asm.dynCall_viiii.apply(null,arguments)};h.stackAlloc=a.stackAlloc;h.stackSave=a.stackSave;h.stackRestore=a.stackRestore;h.establishStackSpace=a.establishStackSpace;
|
||||
h.setTempRet0=a.setTempRet0;h.getTempRet0=a.getTempRet0;a.asm=cb;if(K)if("function"===typeof a.locateFile?K=a.locateFile(K):a.memoryInitializerPrefixURL&&(K=a.memoryInitializerPrefixURL+K),ca||ka){var sc=a.readBinary(K);D.set(sc,h.GLOBAL_BASE)}else{var eb=function(){a.readAsync(K,db,function(){throw"could not load memory initializer "+K;})};Qa("memory initializer");var db=function(b){b.byteLength&&(b=new Uint8Array(b));D.set(b,h.GLOBAL_BASE);a.memoryInitializerRequest&&delete a.memoryInitializerRequest.response;
|
||||
Ra("memory initializer")};if(a.memoryInitializerRequest){var fb=function(){var b=a.memoryInitializerRequest;200!==b.status&&0!==b.status?(console.warn("a problem seems to have happened with Module.memoryInitializerRequest, status: "+b.status+", retrying "+K),eb()):db(b.response)};a.memoryInitializerRequest.response?setTimeout(fb,0):a.memoryInitializerRequest.addEventListener("load",fb)}else eb()}aa.prototype=Error();aa.prototype.constructor=aa;var mb,Wa=null,ha=function c(){a.calledRun||Aa();a.calledRun||
|
||||
(ha=c)};a.callMain=a.callMain=function(c){function d(){for(var a=0;3>a;a++)l.push(0)}c=c||[];pa||(pa=!0,Y(Ba));var e=c.length+1,l=[H(Pa(a.thisProgram),"i8",0)];d();for(var f=0;f<e-1;f+=1)l.push(H(Pa(c[f]),"i8",0)),d();l.push(0);l=H(l,"i32",0);try{var g=a._main(e,l,0);Ya(g,!0)}catch(w){if(!(w instanceof aa))if("SimulateInfiniteLoop"==w)a.noExitRuntime=!0;else throw w&&"object"===typeof w&&w.stack&&a.printErr("exception thrown: "+[w,w.stack]),w;}finally{}};a.run=a.run=Aa;a.exit=a.exit=Ya;var Za=[];
|
||||
a.abort=a.abort=L;if(a.preInit)for("function"==typeof a.preInit&&(a.preInit=[a.preInit]);0<a.preInit.length;)a.preInit.pop()();var Ua=!0;a.noInitialRun&&(Ua=!1);Aa();r.prototype=Object.create(r.prototype);r.prototype.constructor=r;r.prototype.__class__=r;r.__cache__={};a.WrapperObject=r;a.getCache=J;a.wrapPointer=da;a.castObject=function(a,d){return da(a.ptr,d)};a.NULL=da(0);a.destroy=function(a){if(!a.__destroy__)throw"Error: Cannot destroy object. (Did you create it yourself?)";a.__destroy__();
|
||||
delete J(a.__class__)[a.ptr]};a.compare=function(a,d){return a.ptr===d.ptr};a.getPointer=function(a){return a.ptr};a.getClass=function(a){return a.__class__};var q={buffer:0,size:0,pos:0,temps:[],needed:0,prepare:function(){if(q.needed){for(var c=0;c<q.temps.length;c++)a._free(q.temps[c]);q.temps.length=0;a._free(q.buffer);q.buffer=0;q.size+=q.needed;q.needed=0}q.buffer||(q.size+=128,q.buffer=a._malloc(q.size),k(q.buffer));q.pos=0},alloc:function(c,d){k(q.buffer);var e=c.length*d.BYTES_PER_ELEMENT,
|
||||
e=e+7&-8,f;q.pos+e>=q.size?(k(0<e),q.needed+=e,f=a._malloc(e),q.temps.push(f)):(f=q.buffer+q.pos,q.pos+=e);return f},copy:function(a,d,e){switch(d.BYTES_PER_ELEMENT){case 2:e>>=1;break;case 4:e>>=2;break;case 8:e>>=3}for(var c=0;c<a.length;c++)d[e+c]=a[c]}};B.prototype=Object.create(r.prototype);B.prototype.constructor=B;B.prototype.__class__=B;B.__cache__={};a.PointCloud=B;B.prototype.num_attributes=B.prototype.num_attributes=function(){return nc(this.ptr)};B.prototype.num_points=B.prototype.num_points=
|
||||
function(){return fc(this.ptr)};B.prototype.__destroy__=B.prototype.__destroy__=function(){Kb(this.ptr)};p.prototype=Object.create(r.prototype);p.prototype.constructor=p;p.prototype.__class__=p;p.__cache__={};a.WebIDLWrapper=p;p.prototype.GetEncodedGeometryType=p.prototype.GetEncodedGeometryType=function(a){var c=this.ptr;a&&"object"===typeof a&&(a=a.ptr);return Gb(c,a)};p.prototype.DecodePointCloudFromBuffer=p.prototype.DecodePointCloudFromBuffer=function(a){var c=this.ptr;a&&"object"===typeof a&&
|
||||
(a=a.ptr);return da(Ib(c,a),B)};p.prototype.DecodeMeshFromBuffer=p.prototype.DecodeMeshFromBuffer=function(a){var c=this.ptr;a&&"object"===typeof a&&(a=a.ptr);return da(Db(c,a),x)};p.prototype.GetAttributeId=p.prototype.GetAttributeId=function(a,d){var c=this.ptr;a&&"object"===typeof a&&(a=a.ptr);d&&"object"===typeof d&&(d=d.ptr);return Mb(c,a,d)};p.prototype.GetAttribute=p.prototype.GetAttribute=function(a,d){var c=this.ptr;a&&"object"===typeof a&&(a=a.ptr);d&&"object"===typeof d&&(d=d.ptr);return da(Yb(c,
|
||||
a,d),m)};p.prototype.GetFaceFromMesh=p.prototype.GetFaceFromMesh=function(a,d,e){var c=this.ptr;a&&"object"===typeof a&&(a=a.ptr);d&&"object"===typeof d&&(d=d.ptr);e&&"object"===typeof e&&(e=e.ptr);return!!bc(c,a,d,e)};p.prototype.GetAttributeFloat=p.prototype.GetAttributeFloat=function(a,d,e){var c=this.ptr;a&&"object"===typeof a&&(a=a.ptr);d&&"object"===typeof d&&(d=d.ptr);e&&"object"===typeof e&&(e=e.ptr);return!!Lb(c,a,d,e)};p.prototype.GetAttributeFloatForAllPoints=p.prototype.GetAttributeFloatForAllPoints=
|
||||
function(a,d,e){var c=this.ptr;a&&"object"===typeof a&&(a=a.ptr);d&&"object"===typeof d&&(d=d.ptr);e&&"object"===typeof e&&(e=e.ptr);return!!qc(c,a,d,e)};p.prototype.__destroy__=p.prototype.__destroy__=function(){$b(this.ptr)};m.prototype=Object.create(r.prototype);m.prototype.constructor=m;m.prototype.__class__=m;m.__cache__={};a.PointAttribute=m;m.prototype.size=m.prototype.size=function(){return Nb(this.ptr)};m.prototype.attribute_type=m.prototype.attribute_type=function(){return Rb(this.ptr)};
|
||||
m.prototype.data_type=m.prototype.data_type=function(){return hc(this.ptr)};m.prototype.components_count=m.prototype.components_count=function(){return Fb(this.ptr)};m.prototype.normalized=m.prototype.normalized=function(){return!!Jb(this.ptr)};m.prototype.byte_stride=m.prototype.byte_stride=function(){return mc(this.ptr)};m.prototype.byte_offset=m.prototype.byte_offset=function(){return Hb(this.ptr)};m.prototype.custom_id=m.prototype.custom_id=function(){return Ub(this.ptr)};m.prototype.__destroy__=
|
||||
m.prototype.__destroy__=function(){rc(this.ptr)};E.prototype=Object.create(r.prototype);E.prototype.constructor=E;E.prototype.__class__=E;E.__cache__={};a.DracoFloat32Array=E;E.prototype.GetValue=E.prototype.GetValue=function(a){var c=this.ptr;a&&"object"===typeof a&&(a=a.ptr);return dc(c,a)};E.prototype.__destroy__=E.prototype.__destroy__=function(){ec(this.ptr)};N.prototype=Object.create(r.prototype);N.prototype.constructor=N;N.prototype.__class__=N;N.__cache__={};a.GeometryAttribute=N;N.prototype.__destroy__=
|
||||
N.prototype.__destroy__=function(){ic(this.ptr)};F.prototype=Object.create(r.prototype);F.prototype.constructor=F;F.prototype.__class__=F;F.__cache__={};a.DecoderBuffer=F;F.prototype.Init=F.prototype.Init=function(a,d){var c=this.ptr;q.prepare();if("object"==typeof a){var f=a;if("object"===typeof f){var h=q.alloc(f,M);q.copy(f,M,h);a=h}else a=f}d&&"object"===typeof d&&(d=d.ptr);Sb(c,a,d)};F.prototype.__destroy__=F.prototype.__destroy__=function(){Tb(this.ptr)};x.prototype=Object.create(r.prototype);
|
||||
x.prototype.constructor=x;x.prototype.__class__=x;x.__cache__={};a.Mesh=x;x.prototype.num_faces=x.prototype.num_faces=function(){return kc(this.ptr)};x.prototype.num_attributes=x.prototype.num_attributes=function(){return cc(this.ptr)};x.prototype.num_points=x.prototype.num_points=function(){return Xb(this.ptr)};x.prototype.__destroy__=x.prototype.__destroy__=function(){Eb(this.ptr)};R.prototype=Object.create(r.prototype);R.prototype.constructor=R;R.prototype.__class__=R;R.__cache__={};a.VoidPtr=
|
||||
R;R.prototype.__destroy__=R.prototype.__destroy__=function(){Qb(this.ptr)};G.prototype=Object.create(r.prototype);G.prototype.constructor=G;G.prototype.__class__=G;G.__cache__={};a.DracoInt32Array=G;G.prototype.GetValue=G.prototype.GetValue=function(a){var c=this.ptr;a&&"object"===typeof a&&(a=a.ptr);return pc(c,a)};G.prototype.__destroy__=G.prototype.__destroy__=function(){gc(this.ptr)};(function(){function c(){a.INVALID_GEOMETRY_TYPE=jc();a.POINT_CLOUD=Zb();a.TRIANGULAR_MESH=Wb();a.INVALID=Vb();
|
||||
a.POSITION=Pb();a.NORMAL=lc();a.COLOR=Ob();a.TEX_COORD=oc();a.GENERIC=ac()}a.calledRun?c():Ta.unshift(c)})();if("function"===typeof a.onModuleParsed)a.onModuleParsed();return n};
|
22
javascript/emscripten/finalize.js
Normal file
22
javascript/emscripten/finalize.js
Normal file
@ -0,0 +1,22 @@
|
||||
// Copyright 2017 The Draco Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Calls the 'onModuleParsed' callback if provided. This file is included as the
|
||||
// last one in the generated javascript and it gives the caller a way to check
|
||||
// that all previous content was successfully processed.
|
||||
// Note: emscripten's |onRuntimeInitialized| is called before any --post-js
|
||||
// files are included which is not equivalent to this callback.
|
||||
if (typeof Module['onModuleParsed'] === 'function') {
|
||||
Module['onModuleParsed']();
|
||||
}
|
38
javascript/emscripten/prepareCallbacks.js
Normal file
38
javascript/emscripten/prepareCallbacks.js
Normal file
@ -0,0 +1,38 @@
|
||||
// Copyright 2017 The Draco Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Prepares callbacks that can be used to inform the caller that the module has
|
||||
// been fully loaded.
|
||||
var isRuntimeInitialized = false;
|
||||
var isModuleParsed = false;
|
||||
|
||||
// These two callbacks can be called in arbitrary order. We call the final
|
||||
// function |onModuleLoaded| after both of these callbacks have been called.
|
||||
Module['onRuntimeInitialized'] = function() {
|
||||
isRuntimeInitialized = true;
|
||||
if (isModuleParsed) {
|
||||
if (typeof Module['onModuleLoaded'] === 'function') {
|
||||
Module['onModuleLoaded'](Module);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
Module['onModuleParsed'] = function() {
|
||||
isModuleParsed = true;
|
||||
if (isRuntimeInitialized) {
|
||||
if (typeof Module['onModuleLoaded'] === 'function') {
|
||||
Module['onModuleLoaded'](Module);
|
||||
}
|
||||
}
|
||||
};
|
@ -19,6 +19,7 @@ THREE.DRACOLoader = function(manager) {
|
||||
THREE.DefaultLoadingManager;
|
||||
this.materials = null;
|
||||
this.verbosity = 0;
|
||||
this.dracoDecoderType = {};
|
||||
};
|
||||
|
||||
|
||||
@ -32,7 +33,7 @@ THREE.DRACOLoader.prototype = {
|
||||
loader.setPath(this.path);
|
||||
loader.setResponseType('arraybuffer');
|
||||
loader.load(url, function(blob) {
|
||||
onLoad(scope.decodeDracoFile(blob));
|
||||
scope.decodeDracoFile(blob, onLoad);
|
||||
}, onProgress, onError);
|
||||
},
|
||||
|
||||
@ -44,8 +45,19 @@ THREE.DRACOLoader.prototype = {
|
||||
this.verbosity = level;
|
||||
},
|
||||
|
||||
decodeDracoFile: function(rawBuffer) {
|
||||
const dracoDecoder = THREE.DRACOLoader.getDecoder();
|
||||
setDracoDecoderType: function(dracoDecoderType) {
|
||||
this.dracoDecoderType = dracoDecoderType;
|
||||
},
|
||||
|
||||
decodeDracoFile: function(rawBuffer, callback) {
|
||||
const scope = this;
|
||||
THREE.DRACOLoader.getDecoder(this.dracoDecoderType,
|
||||
function(dracoDecoder) {
|
||||
scope.decodeDracoFileInternal(rawBuffer, dracoDecoder, callback);
|
||||
});
|
||||
},
|
||||
|
||||
decodeDracoFileInternal : function(rawBuffer, dracoDecoder, callback) {
|
||||
/*
|
||||
* Here is how to use Draco Javascript decoder and get the geometry.
|
||||
*/
|
||||
@ -70,11 +82,12 @@ THREE.DRACOLoader.prototype = {
|
||||
console.error(errorMsg);
|
||||
throw new Error(errorMsg);
|
||||
}
|
||||
return this.convertDracoGeometryTo3JS(wrapper, geometryType, buffer);
|
||||
callback(this.convertDracoGeometryTo3JS(dracoDecoder, wrapper,
|
||||
geometryType, buffer));
|
||||
},
|
||||
|
||||
convertDracoGeometryTo3JS: function(wrapper, geometryType, buffer) {
|
||||
const dracoDecoder = THREE.DRACOLoader.getDecoder();
|
||||
convertDracoGeometryTo3JS: function(dracoDecoder, wrapper, geometryType,
|
||||
buffer) {
|
||||
let dracoGeometry;
|
||||
const start_time = performance.now();
|
||||
if (geometryType == dracoDecoder.TRIANGULAR_MESH) {
|
||||
@ -263,26 +276,38 @@ THREE.DRACOLoader.prototype = {
|
||||
return geometry;
|
||||
},
|
||||
|
||||
isVersionSupported: function(version) {
|
||||
return THREE.DRACOLoader.getDecoder().isVersionSupported(version);
|
||||
isVersionSupported: function(version, callback) {
|
||||
return THREE.DRACOLoader.getDecoder(this.dracoDecoderType,
|
||||
function(decoder) { return decoder.isVersionSupported(version); });
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Returns a singleton instance of the DracoModule decoder. Creating multiple
|
||||
* copies of the decoder is expensive.
|
||||
* Creates and returns a singleton instance of the DracoModule decoder.
|
||||
* The module loading is done asynchronously for WebAssembly. Initialized module
|
||||
* can be accessed through the callback function |onDracoModuleLoadedCallback|.
|
||||
*/
|
||||
THREE.DRACOLoader.getDecoder = (function() {
|
||||
let decoder;
|
||||
|
||||
return function() {
|
||||
return function(dracoDecoderType, onDracoModuleLoadedCallback) {
|
||||
if (typeof DracoModule === 'undefined') {
|
||||
throw new Error('THREE.DRACOLoader: DracoModule not found.');
|
||||
}
|
||||
|
||||
decoder = decoder || DracoModule();
|
||||
|
||||
return decoder;
|
||||
if (typeof decoder !== 'undefined') {
|
||||
// Module already initialized.
|
||||
if (typeof onDracoModuleLoadedCallback !== 'undefined') {
|
||||
onDracoModuleLoadedCallback(decoder);
|
||||
}
|
||||
} else {
|
||||
dracoDecoderType['onModuleLoaded'] = function(module) {
|
||||
if (typeof onDracoModuleLoadedCallback === 'function') {
|
||||
decoder = module;
|
||||
onDracoModuleLoadedCallback(module);
|
||||
}
|
||||
};
|
||||
DracoModule(dracoDecoderType);
|
||||
}
|
||||
};
|
||||
|
||||
})();
|
||||
|
@ -1,6 +1,6 @@
|
||||
The code shows a simple example of integration of threejs and draco javascript
|
||||
decoder. From the example, you should be able to loading an encoded draco mesh
|
||||
file and visualize it through threejs's fancy 3D tools.
|
||||
or WebAssembly decoder. From the example, you should be able to load an
|
||||
encoded draco mesh file and visualize it through threejs's fancy 3D tools.
|
||||
|
||||
How to run the example code:
|
||||
|
||||
@ -14,4 +14,4 @@ an empty scene rendered by threejs.
|
||||
|
||||
(4) Click "Choose File" to select a draco encoded file (.drc) and you should be
|
||||
able to see the model. e.g. bunny.drc. This is model encoded using Draco
|
||||
default compression. The original ply is located here "testdata/bun_zipper.ply".
|
||||
default compression. The original ply is located here "testdata/bun_zipper.ply".
|
@ -42,7 +42,6 @@
|
||||
<pre id="fileDisplayArea"><pre>
|
||||
</div>
|
||||
<script src="https://cdn.rawgit.com/mrdoob/three.js/r84/build/three.min.js"></script>
|
||||
<script src="../draco_decoder.js"></script>
|
||||
<script src="DRACOLoader.js"></script>
|
||||
<script>
|
||||
'use strict';
|
||||
@ -54,8 +53,62 @@
|
||||
let windowHalfX = window.innerWidth / 2;
|
||||
let windowHalfY = window.innerHeight / 2;
|
||||
|
||||
init();
|
||||
animate();
|
||||
// Global Draco decoder type.
|
||||
let dracoDecoderType = {};
|
||||
let dracoLoader;
|
||||
|
||||
loadDracoDecoder();
|
||||
|
||||
// This function loads a JavaScript file and adds it to the page. "path"
|
||||
// is the path to the JavaScript file. "onLoadFunc" is the function to be
|
||||
// called when the JavaScript file has been loaded.
|
||||
function loadJavaScriptFile(path, onLoadFunc) {
|
||||
const head = document.getElementsByTagName('head')[0];
|
||||
const element = document.createElement('script');
|
||||
element.type = 'text/javascript';
|
||||
element.src = path;
|
||||
if (onLoadFunc !== null)
|
||||
element.onload = onLoadFunc;
|
||||
|
||||
head.appendChild(element);
|
||||
}
|
||||
|
||||
function loadWebAssemblyDecoder() {
|
||||
dracoDecoderType['wasmBinaryFile'] = '../draco_decoder.wasm';
|
||||
const xhr = new XMLHttpRequest();
|
||||
xhr.open('GET', '../draco_decoder.wasm', true);
|
||||
xhr.responseType = 'arraybuffer';
|
||||
xhr.onload = function() {
|
||||
// draco_wasm_wrapper.js must be loaded before DracoModule is
|
||||
// created. The object passed into DracoModule() must contain a
|
||||
// property with the name of wasmBinary and the value must be an
|
||||
// ArrayBuffer containing the contents of the .wasm file.
|
||||
dracoDecoderType['wasmBinary'] = xhr.response;
|
||||
createDracoDecoder();
|
||||
};
|
||||
xhr.send(null)
|
||||
}
|
||||
|
||||
// This function will test if the browser has support for WebAssembly. If
|
||||
// it does it will download the WebAssembly Draco decoder, if not it will
|
||||
// download the asmjs Draco decoder.
|
||||
// TODO: Investigate moving the Draco decoder loading code
|
||||
// over to DRACOLoader.js.
|
||||
function loadDracoDecoder() {
|
||||
if (typeof WebAssembly !== 'object') {
|
||||
// No WebAssembly support
|
||||
loadJavaScriptFile('../draco_decoder.js', createDracoDecoder);
|
||||
} else {
|
||||
loadJavaScriptFile('../draco_wasm_wrapper.js', loadWebAssemblyDecoder);
|
||||
}
|
||||
}
|
||||
|
||||
function createDracoDecoder() {
|
||||
dracoLoader = new THREE.DRACOLoader();
|
||||
dracoLoader.setDracoDecoderType(dracoDecoderType);
|
||||
init();
|
||||
animate();
|
||||
}
|
||||
|
||||
function init() {
|
||||
container = document.createElement('div');
|
||||
@ -140,46 +193,49 @@
|
||||
|
||||
const reader = new FileReader();
|
||||
reader.onload = function(e) {
|
||||
const dracoLoader = new THREE.DRACOLoader();
|
||||
// Enable logging to console output.
|
||||
dracoLoader.setVerbosity(1);
|
||||
const bufferGeometry = dracoLoader.decodeDracoFile(reader.result);
|
||||
if (dracoLoader.decode_time !== undefined) {
|
||||
fileDisplayArea.innerText = 'Decode time = ' + dracoLoader.decode_time + '\n' +
|
||||
'Import time = ' + dracoLoader.import_time;
|
||||
}
|
||||
const material = new THREE.MeshStandardMaterial({vertexColors: THREE.VertexColors});
|
||||
dracoLoader.decodeDracoFile(reader.result, function(bufferGeometry) {
|
||||
if (dracoLoader.decode_time !== undefined) {
|
||||
fileDisplayArea.innerText = 'Decode time = ' + dracoLoader.decode_time + '\n' +
|
||||
'Import time = ' + dracoLoader.import_time;
|
||||
}
|
||||
const material = new THREE.MeshStandardMaterial({vertexColors: THREE.VertexColors});
|
||||
|
||||
let geometry;
|
||||
// Point cloud does not have face indices.
|
||||
if (bufferGeometry.index == null) {
|
||||
geometry = new THREE.Points(bufferGeometry, material);
|
||||
} else {
|
||||
bufferGeometry.computeVertexNormals();
|
||||
geometry = new THREE.Mesh(bufferGeometry, material);
|
||||
}
|
||||
// Compute range of the geometry coordinates for proper rendering.
|
||||
bufferGeometry.computeBoundingBox();
|
||||
const sizeX = bufferGeometry.boundingBox.max.x - bufferGeometry.boundingBox.min.x;
|
||||
const sizeY = bufferGeometry.boundingBox.max.y - bufferGeometry.boundingBox.min.y;
|
||||
const sizeZ = bufferGeometry.boundingBox.max.z - bufferGeometry.boundingBox.min.z;
|
||||
const diagonalSize = Math.sqrt(sizeX * sizeX + sizeY * sizeY + sizeZ * sizeZ);
|
||||
const scale = 1.0 / diagonalSize;
|
||||
const midX = (bufferGeometry.boundingBox.min.x + bufferGeometry.boundingBox.max.x) / 2;
|
||||
const midY = (bufferGeometry.boundingBox.min.y + bufferGeometry.boundingBox.max.y) / 2;
|
||||
const midZ = (bufferGeometry.boundingBox.min.z + bufferGeometry.boundingBox.max.z) / 2;
|
||||
let geometry;
|
||||
// Point cloud does not have face indices.
|
||||
if (bufferGeometry.index == null) {
|
||||
geometry = new THREE.Points(bufferGeometry, material);
|
||||
} else {
|
||||
bufferGeometry.computeVertexNormals();
|
||||
geometry = new THREE.Mesh(bufferGeometry, material);
|
||||
}
|
||||
// Compute range of the geometry coordinates for proper rendering.
|
||||
bufferGeometry.computeBoundingBox();
|
||||
const sizeX = bufferGeometry.boundingBox.max.x - bufferGeometry.boundingBox.min.x;
|
||||
const sizeY = bufferGeometry.boundingBox.max.y - bufferGeometry.boundingBox.min.y;
|
||||
const sizeZ = bufferGeometry.boundingBox.max.z - bufferGeometry.boundingBox.min.z;
|
||||
const diagonalSize = Math.sqrt(sizeX * sizeX + sizeY * sizeY + sizeZ * sizeZ);
|
||||
const scale = 1.0 / diagonalSize;
|
||||
const midX =
|
||||
(bufferGeometry.boundingBox.min.x + bufferGeometry.boundingBox.max.x) / 2;
|
||||
const midY =
|
||||
(bufferGeometry.boundingBox.min.y + bufferGeometry.boundingBox.max.y) / 2;
|
||||
const midZ =
|
||||
(bufferGeometry.boundingBox.min.z + bufferGeometry.boundingBox.max.z) / 2;
|
||||
|
||||
geometry.scale.multiplyScalar(scale);
|
||||
geometry.position.x = -midX * scale;
|
||||
geometry.position.y = -midY * scale;
|
||||
geometry.position.z = -midZ * scale;
|
||||
geometry.castShadow = true;
|
||||
geometry.receiveShadow = true;
|
||||
geometry.scale.multiplyScalar(scale);
|
||||
geometry.position.x = -midX * scale;
|
||||
geometry.position.y = -midY * scale;
|
||||
geometry.position.z = -midZ * scale;
|
||||
geometry.castShadow = true;
|
||||
geometry.receiveShadow = true;
|
||||
|
||||
const selectedObject = scene.getObjectByName("my_mesh");
|
||||
scene.remove(selectedObject);
|
||||
geometry.name = "my_mesh";
|
||||
scene.add(geometry);
|
||||
const selectedObject = scene.getObjectByName("my_mesh");
|
||||
scene.remove(selectedObject);
|
||||
geometry.name = "my_mesh";
|
||||
scene.add(geometry);
|
||||
});
|
||||
}
|
||||
reader.readAsArrayBuffer(file);
|
||||
});
|
||||
|
@ -1,15 +1,78 @@
|
||||
<html>
|
||||
<head>
|
||||
<title>Draco Javascript Decode Timing</title>
|
||||
|
||||
<script type="text/javascript" src="draco_decoder.js"> </script>
|
||||
<title>Draco Decode Timing</title>
|
||||
|
||||
<script type="text/javascript">
|
||||
'use strict';
|
||||
|
||||
// Global Draco decoder.
|
||||
let dracoDecoder = {};
|
||||
let dracoDecoderType = {};
|
||||
|
||||
// This function loads a JavaScript file and adds it to the page. "path" is
|
||||
// the path to the JavaScript file. "onLoadFunc" is the function to be called
|
||||
// when the JavaScript file has been loaded.
|
||||
function loadJavaScriptFile(path, onLoadFunc) {
|
||||
const head = document.getElementsByTagName('head')[0];
|
||||
const element = document.createElement('script');
|
||||
element.type = 'text/javascript';
|
||||
element.src = path;
|
||||
if (onLoadFunc !== null)
|
||||
element.onload = onLoadFunc;
|
||||
|
||||
head.appendChild(element);
|
||||
}
|
||||
|
||||
function loadWebAssemblyDecoder() {
|
||||
dracoDecoderType['wasmBinaryFile'] = 'draco_decoder.wasm';
|
||||
|
||||
const xhr = new XMLHttpRequest();
|
||||
xhr.open('GET', 'draco_decoder.wasm', true);
|
||||
xhr.responseType = 'arraybuffer';
|
||||
|
||||
xhr.onload = function() {
|
||||
// For WebAssembly the object passed into DracoModule() must contain a
|
||||
// property with the name of wasmBinary and the value must be an
|
||||
// ArrayBuffer containing the contents of the .wasm file.
|
||||
dracoDecoderType['wasmBinary'] = xhr.response;
|
||||
createDracoDecoder();
|
||||
};
|
||||
|
||||
xhr.send(null)
|
||||
}
|
||||
|
||||
function createDracoDecoder() {
|
||||
// draco_decoder.js or draco_wasm_wrapper.js must be loaded before
|
||||
// DracoModule is created.
|
||||
if (typeof dracoDecoderType === 'undefined')
|
||||
dracoDecoderType = {};
|
||||
dracoDecoderType['onModuleLoaded'] = function(module) {
|
||||
enableButtons();
|
||||
};
|
||||
const create_t0 = performance.now();
|
||||
|
||||
dracoDecoder = DracoModule(dracoDecoderType);
|
||||
const create_t1 = performance.now();
|
||||
addCell('DracoModule', true);
|
||||
addCell(' ' + (create_t1 - create_t0), false);
|
||||
}
|
||||
|
||||
// This function will test if the browser has support for WebAssembly. If it
|
||||
// does it will download the WebAssembly Draco decoder, if not it will download
|
||||
// the asmjs Draco decoder.
|
||||
function loadDracoDecoder() {
|
||||
if (typeof WebAssembly !== 'object') {
|
||||
// No WebAssembly support. DracoModule must be called with no parameters
|
||||
// or an empty object to create a JavaScript decoder.
|
||||
loadJavaScriptFile('draco_decoder.js', createDracoDecoder);
|
||||
} else {
|
||||
loadJavaScriptFile('draco_wasm_wrapper.js', loadWebAssemblyDecoder);
|
||||
}
|
||||
}
|
||||
|
||||
// Functions to handle logging output.
|
||||
// String to hold table output.
|
||||
let dt = '';
|
||||
const dracoDecoder = DracoModule();
|
||||
|
||||
function startTable() {
|
||||
dt += '<table><tr>';
|
||||
@ -31,27 +94,47 @@ function finishTable() {
|
||||
document.getElementById('tableOutput').innerHTML = dt;
|
||||
}
|
||||
|
||||
function s_log(str, end_line, reset) {
|
||||
if (reset)
|
||||
document.getElementById('status').innerHTML = '';
|
||||
document.getElementById('status').innerHTML += str;
|
||||
if (end_line)
|
||||
document.getElementById('status').innerHTML += "<br/>";
|
||||
}
|
||||
|
||||
// Functions to handle the input from the buttons.
|
||||
function enableButtons() {
|
||||
document.getElementById('decodeOne').disabled = false;
|
||||
document.getElementById('decodeMult').disabled = false;
|
||||
}
|
||||
|
||||
function onDecodeClick() {
|
||||
startTable();
|
||||
const inputs = document.getElementById('u').value.split(',');
|
||||
s_log('Decoding ' + inputs.length + ' files...', true, true);
|
||||
TestMeshDecodingAsync(inputs, 0);
|
||||
const build =
|
||||
(typeof WebAssembly !== 'object') ? 'JavaScript' : 'WebAssembly';
|
||||
s_log('Decoding ' + inputs.length + ' files... using ' + build, true, true);
|
||||
testMeshDecodingAsync(inputs, 0);
|
||||
}
|
||||
|
||||
function onDecodeMultipleClick() {
|
||||
startTable();
|
||||
const inputs = document.getElementById('u').value.split(',');
|
||||
const decode_count = parseInt(document.getElementById('decode_count').value);
|
||||
s_log('Decoding ' + (decode_count * inputs.length) + ' files...', true, true);
|
||||
const build =
|
||||
(typeof WebAssembly !== 'object') ? 'JavaScript' : 'WebAssembly';
|
||||
s_log('Decoding ' + (decode_count * inputs.length) + ' files... using ' +
|
||||
build, true, true);
|
||||
|
||||
let fileList = [];
|
||||
for (let i = 0; i < decode_count; ++i) {
|
||||
fileList = fileList.concat(inputs);
|
||||
}
|
||||
TestMeshDecodingAsync(fileList, 0);
|
||||
testMeshDecodingAsync(fileList, 0);
|
||||
}
|
||||
|
||||
function TestMeshDecodingAsync(filenameList, index) {
|
||||
// Decode geometry.
|
||||
function testMeshDecodingAsync(filenameList, index) {
|
||||
const xhr = new XMLHttpRequest();
|
||||
xhr.open("GET", filenameList[index], true);
|
||||
xhr.responseType = "arraybuffer";
|
||||
@ -90,7 +173,7 @@ function TestMeshDecodingAsync(filenameList, index) {
|
||||
|
||||
if (index < filenameList.length - 1) {
|
||||
index = index + 1;
|
||||
TestMeshDecodingAsync(filenameList, index);
|
||||
testMeshDecodingAsync(filenameList, index);
|
||||
} else {
|
||||
finishTable();
|
||||
}
|
||||
@ -100,21 +183,17 @@ function TestMeshDecodingAsync(filenameList, index) {
|
||||
xhr.send(null);
|
||||
}
|
||||
|
||||
function s_log(str, end_line, reset) {
|
||||
if (reset)
|
||||
document.getElementById('status').innerHTML = '';
|
||||
document.getElementById('status').innerHTML += str;
|
||||
if (end_line)
|
||||
document.getElementById('status').innerHTML += "<br/>";
|
||||
}
|
||||
loadDracoDecoder();
|
||||
|
||||
</script>
|
||||
</head>
|
||||
<body>
|
||||
<H1>Draco Javascript Decode Timing</H1>
|
||||
<H1>Draco Decode Timing</H1>
|
||||
Draco file to be decoded. If more than one file, add as comma separated list. E.g. "file1.drc,file2.drc,file3.drc"</br>
|
||||
<input id="u" type="text" size="80" value="input.drc"/><input type="button" value="Decode" onClick="onDecodeClick();">
|
||||
<input id="decode_count" type="text" size="10" value="10"/><input type="button" value="Decode Multiple" onClick="onDecodeMultipleClick();">
|
||||
<input id="u" type="text" size="80" value="input.drc"/>
|
||||
<input type="button" value="Decode" id="decodeOne" onClick="onDecodeClick();" disabled>
|
||||
<input id="decode_count" type="text" size="10" value="10"/>
|
||||
<input type="button" value="Decode Multiple" id="decodeMult" onClick="onDecodeMultipleClick();" disabled>
|
||||
<br/>
|
||||
<div id="status"> </div></br>
|
||||
<div id="tableOutput"> </div>
|
||||
|
@ -41,10 +41,13 @@ bool CornerTable::Initialize(
|
||||
return true;
|
||||
}
|
||||
|
||||
void CornerTable::Reset(int num_faces) {
|
||||
bool CornerTable::Reset(int num_faces) {
|
||||
if (num_faces < 0)
|
||||
return false;
|
||||
faces_.assign(num_faces, kInvalidFace);
|
||||
opposite_corners_.assign(num_faces * 3, kInvalidCornerIndex);
|
||||
vertex_corners_.reserve(num_faces * 3);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool CornerTable::ComputeOppositeCorners(int *num_vertices) {
|
||||
|
@ -58,7 +58,7 @@ class CornerTable {
|
||||
bool Initialize(const IndexTypeVector<FaceIndex, FaceType> &faces);
|
||||
|
||||
// Resets the corner table to the given number of invalid faces.
|
||||
void Reset(int num_faces);
|
||||
bool Reset(int num_faces);
|
||||
|
||||
inline int num_vertices() const { return vertex_corners_.size(); }
|
||||
inline int num_corners() const { return faces_.size() * 3; }
|
||||
|
@ -81,6 +81,7 @@ template <class TraversalProcessorT, class TraversalObserverT,
|
||||
class EdgeBreakerTraverser {
|
||||
public:
|
||||
typedef TraversalProcessorT TraversalProcessor;
|
||||
typedef TraversalObserverT TraversalObserver;
|
||||
typedef typename TraversalProcessorT::CornerTable CornerTable;
|
||||
|
||||
EdgeBreakerTraverser() {}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user