mirror of
https://git.mirrors.martin98.com/https://github.com/google/draco
synced 2025-04-22 13:49:57 +08:00
Version 1.3.1 release
* Fix issue with multiple attributes when skipping an attribute transform
This commit is contained in:
parent
1029ea6a59
commit
56c2e6b46c
@ -5,6 +5,9 @@
|
|||||||
|
|
||||||
News
|
News
|
||||||
=======
|
=======
|
||||||
|
### Version 1.3.1 release
|
||||||
|
* Fix issue with multiple attributes when skipping an attribute transform
|
||||||
|
|
||||||
### Version 1.3.0 release
|
### Version 1.3.0 release
|
||||||
* Improved kD-tree based point cloud encoding
|
* Improved kD-tree based point cloud encoding
|
||||||
* Now applicable to point clouds with any number of attributes
|
* Now applicable to point clouds with any number of attributes
|
||||||
@ -333,7 +336,7 @@ this writing, requires that libc++ is used for the stl argument when the
|
|||||||
toolchain is generated.
|
toolchain is generated.
|
||||||
|
|
||||||
After building the tools they can be moved to an android device via the use of
|
After building the tools they can be moved to an android device via the use of
|
||||||
`adb push`, and then run with an `adb shell` instance.
|
`adb push`, and then run within an `adb shell` instance.
|
||||||
|
|
||||||
|
|
||||||
Usage
|
Usage
|
||||||
|
File diff suppressed because one or more lines are too long
Binary file not shown.
File diff suppressed because one or more lines are too long
Binary file not shown.
@ -5,6 +5,9 @@
|
|||||||
|
|
||||||
News
|
News
|
||||||
=======
|
=======
|
||||||
|
### Version 1.3.1 release
|
||||||
|
* Fix issue with multiple attributes when skipping an attribute transform
|
||||||
|
|
||||||
### Version 1.3.0 release
|
### Version 1.3.0 release
|
||||||
* Improved kD-tree based point cloud encoding
|
* Improved kD-tree based point cloud encoding
|
||||||
* Now applicable to point clouds with any number of attributes
|
* Now applicable to point clouds with any number of attributes
|
||||||
|
File diff suppressed because one or more lines are too long
@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "draco3d",
|
"name": "draco3d",
|
||||||
"version": "1.3.0",
|
"version": "1.3.1",
|
||||||
"description": "Draco is a library for compressing and decompressing 3D geometric meshes and point clouds. It is intended to improve the storage and transmission of 3D graphics.",
|
"description": "Draco is a library for compressing and decompressing 3D geometric meshes and point clouds. It is intended to improve the storage and transmission of 3D graphics.",
|
||||||
"main": "draco3d.js",
|
"main": "draco3d.js",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
|
@ -16,6 +16,9 @@ TODO: Add glTF branch url.
|
|||||||
|
|
||||||
News
|
News
|
||||||
=======
|
=======
|
||||||
|
### Version 1.3.1 release
|
||||||
|
* Fix issue with multiple attributes when skipping an attribute transform
|
||||||
|
|
||||||
### Version 1.3.0 release
|
### Version 1.3.0 release
|
||||||
* Improved kD-tree based point cloud encoding
|
* Improved kD-tree based point cloud encoding
|
||||||
* Now applicable to point clouds with any number of attributes
|
* Now applicable to point clouds with any number of attributes
|
||||||
|
File diff suppressed because one or more lines are too long
@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "draco3dgltf",
|
"name": "draco3dgltf",
|
||||||
"version": "1.3.0",
|
"version": "1.3.1",
|
||||||
"description": "This package contains a specific version of Draco 3D geometric compression library that is used for glTF Draco mesh compression extension.",
|
"description": "This package contains a specific version of Draco 3D geometric compression library that is used for glTF Draco mesh compression extension.",
|
||||||
"main": "draco3dgltf.js",
|
"main": "draco3dgltf.js",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
|
@ -426,7 +426,7 @@ bool KdTreeAttributesDecoder::TransformAttributesToOriginalFormat() {
|
|||||||
if (quantized_portable_attributes_.empty() && min_signed_values_.empty()) {
|
if (quantized_portable_attributes_.empty() && min_signed_values_.empty()) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
int num_processed_attributes = 0;
|
int num_processed_quantized_attributes = 0;
|
||||||
int num_processed_signed_components = 0;
|
int num_processed_signed_components = 0;
|
||||||
// Dequantize attributes that needed it.
|
// Dequantize attributes that needed it.
|
||||||
for (int i = 0; i < GetNumAttributes(); ++i) {
|
for (int i = 0; i < GetNumAttributes(); ++i) {
|
||||||
@ -456,10 +456,26 @@ bool KdTreeAttributesDecoder::TransformAttributesToOriginalFormat() {
|
|||||||
// transform and shared with the SequentialQuantizationAttributeDecoder.
|
// transform and shared with the SequentialQuantizationAttributeDecoder.
|
||||||
|
|
||||||
const PointAttribute *const src_att =
|
const PointAttribute *const src_att =
|
||||||
quantized_portable_attributes_[num_processed_attributes].get();
|
quantized_portable_attributes_[num_processed_quantized_attributes]
|
||||||
|
.get();
|
||||||
|
|
||||||
const AttributeQuantizationTransform &transform =
|
const AttributeQuantizationTransform &transform =
|
||||||
attribute_quantization_transforms_[num_processed_attributes];
|
attribute_quantization_transforms_
|
||||||
|
[num_processed_quantized_attributes];
|
||||||
|
|
||||||
|
num_processed_quantized_attributes++;
|
||||||
|
|
||||||
|
if (GetDecoder()->options()->GetAttributeBool(
|
||||||
|
att->attribute_type(), "skip_attribute_transform", false)) {
|
||||||
|
// Attribute transform should not be performed. In this case, we replace
|
||||||
|
// the output geometry attribute with the portable attribute.
|
||||||
|
// TODO(ostava): We can potentially avoid this copy by introducing a new
|
||||||
|
// mechanism that would allow to use the final attributes as portable
|
||||||
|
// attributes for predictors that may need them.
|
||||||
|
att->CopyFrom(*src_att);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
// Convert all quantized values back to floats.
|
// Convert all quantized values back to floats.
|
||||||
const int32_t max_quantized_value =
|
const int32_t max_quantized_value =
|
||||||
(1u << static_cast<uint32_t>(transform.quantization_bits())) - 1;
|
(1u << static_cast<uint32_t>(transform.quantization_bits())) - 1;
|
||||||
@ -485,7 +501,6 @@ bool KdTreeAttributesDecoder::TransformAttributesToOriginalFormat() {
|
|||||||
att->buffer()->Write(out_byte_pos, att_val.get(), entry_size);
|
att->buffer()->Write(out_byte_pos, att_val.get(), entry_size);
|
||||||
out_byte_pos += entry_size;
|
out_byte_pos += entry_size;
|
||||||
}
|
}
|
||||||
num_processed_attributes++;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
|
@ -13,7 +13,6 @@
|
|||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
//
|
//
|
||||||
#include "draco/compression/attributes/point_d_vector.h"
|
#include "draco/compression/attributes/point_d_vector.h"
|
||||||
|
|
||||||
#include "draco/compression/point_cloud/algorithms/point_cloud_types.h"
|
#include "draco/compression/point_cloud/algorithms/point_cloud_types.h"
|
||||||
#include "draco/core/draco_test_base.h"
|
#include "draco/core/draco_test_base.h"
|
||||||
|
|
||||||
|
@ -149,7 +149,7 @@ class MeshPredictionSchemeConstrainedMultiParallelogramEncoder
|
|||||||
// the edges are processed. For better compression, the flags are stored in
|
// the edges are processed. For better compression, the flags are stored in
|
||||||
// in separate contexts based on the number of available parallelograms at a
|
// in separate contexts based on the number of available parallelograms at a
|
||||||
// given vertex.
|
// given vertex.
|
||||||
// TODO() reconsider std::vector<bool> (performance/space).
|
// TODO(draco-eng) reconsider std::vector<bool> (performance/space).
|
||||||
std::vector<bool> is_crease_edge_[kMaxNumParallelograms];
|
std::vector<bool> is_crease_edge_[kMaxNumParallelograms];
|
||||||
Mode selected_mode_;
|
Mode selected_mode_;
|
||||||
|
|
||||||
@ -284,7 +284,7 @@ bool MeshPredictionSchemeConstrainedMultiParallelogramEncoder<
|
|||||||
// Mark all parallelograms as excluded.
|
// Mark all parallelograms as excluded.
|
||||||
std::fill(exluded_parallelograms,
|
std::fill(exluded_parallelograms,
|
||||||
exluded_parallelograms + num_parallelograms, true);
|
exluded_parallelograms + num_parallelograms, true);
|
||||||
// TODO(scottgodfrey) maybe this should be another std::fill.
|
// TODO(draco-eng) maybe this should be another std::fill.
|
||||||
// Mark the first |num_used_parallelograms| as not excluded.
|
// Mark the first |num_used_parallelograms| as not excluded.
|
||||||
for (int j = 0; j < num_used_parallelograms; ++j) {
|
for (int j = 0; j < num_used_parallelograms; ++j) {
|
||||||
exluded_parallelograms[j] = false;
|
exluded_parallelograms[j] = false;
|
||||||
|
@ -22,7 +22,7 @@
|
|||||||
|
|
||||||
namespace draco {
|
namespace draco {
|
||||||
|
|
||||||
// TODO() consolidate Vertex/next/previous queries to one call
|
// TODO(draco-eng) consolidate Vertex/next/previous queries to one call
|
||||||
// (performance).
|
// (performance).
|
||||||
template <class CornerTableT>
|
template <class CornerTableT>
|
||||||
inline void GetParallelogramEntries(
|
inline void GetParallelogramEntries(
|
||||||
|
@ -98,7 +98,7 @@ bool SequentialAttributeDecodersController::
|
|||||||
// attributes for predictors that may need them.
|
// attributes for predictors that may need them.
|
||||||
sequential_decoders_[i]->attribute()->CopyFrom(
|
sequential_decoders_[i]->attribute()->CopyFrom(
|
||||||
*sequential_decoders_[i]->GetPortableAttribute());
|
*sequential_decoders_[i]->GetPortableAttribute());
|
||||||
return true;
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (!sequential_decoders_[i]->TransformAttributeToOriginalFormat(
|
if (!sequential_decoders_[i]->TransformAttributeToOriginalFormat(
|
||||||
|
@ -41,7 +41,7 @@ static constexpr uint16_t kDracoMeshBitstreamVersion = DRACO_BITSTREAM_VERSION(
|
|||||||
kDracoMeshBitstreamVersionMajor, kDracoMeshBitstreamVersionMinor);
|
kDracoMeshBitstreamVersionMajor, kDracoMeshBitstreamVersionMinor);
|
||||||
|
|
||||||
// Currently, we support point cloud and triangular mesh encoding.
|
// Currently, we support point cloud and triangular mesh encoding.
|
||||||
// TODO() convert enum to enum class (safety, not performance).
|
// TODO(draco-eng) convert enum to enum class (safety, not performance).
|
||||||
enum EncodedGeometryType {
|
enum EncodedGeometryType {
|
||||||
INVALID_GEOMETRY_TYPE = -1,
|
INVALID_GEOMETRY_TYPE = -1,
|
||||||
POINT_CLOUD = 0,
|
POINT_CLOUD = 0,
|
||||||
|
@ -76,4 +76,78 @@ TEST_F(DecodeTest, TestSkipAttributeTransform) {
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
void TestSkipAttributeTransformOnPointCloudWithColor(const std::string &file) {
|
||||||
|
std::ifstream input_file(draco::GetTestFileFullPath(file), std::ios::binary);
|
||||||
|
ASSERT_TRUE(input_file);
|
||||||
|
|
||||||
|
// Read the file stream into a buffer.
|
||||||
|
std::streampos file_size = 0;
|
||||||
|
input_file.seekg(0, std::ios::end);
|
||||||
|
file_size = input_file.tellg() - file_size;
|
||||||
|
input_file.seekg(0, std::ios::beg);
|
||||||
|
std::vector<char> data(file_size);
|
||||||
|
input_file.read(data.data(), file_size);
|
||||||
|
|
||||||
|
ASSERT_FALSE(data.empty());
|
||||||
|
|
||||||
|
// Create a draco decoding buffer. Note that no data is copied in this step.
|
||||||
|
draco::DecoderBuffer buffer;
|
||||||
|
buffer.Init(data.data(), data.size());
|
||||||
|
|
||||||
|
draco::Decoder decoder;
|
||||||
|
// Make sure we skip dequantization for the position attribute.
|
||||||
|
decoder.SetSkipAttributeTransform(draco::GeometryAttribute::POSITION);
|
||||||
|
|
||||||
|
// Decode the input data into a geometry.
|
||||||
|
std::unique_ptr<draco::PointCloud> pc =
|
||||||
|
decoder.DecodePointCloudFromBuffer(&buffer).value();
|
||||||
|
ASSERT_NE(pc, nullptr);
|
||||||
|
|
||||||
|
const draco::PointAttribute *const pos_att =
|
||||||
|
pc->GetNamedAttribute(draco::GeometryAttribute::POSITION);
|
||||||
|
ASSERT_NE(pos_att, nullptr);
|
||||||
|
|
||||||
|
// Ensure the position attribute is of type int32_t or uint32_t and that it
|
||||||
|
// has a valid attribute transform.
|
||||||
|
ASSERT_TRUE(pos_att->data_type() == draco::DT_INT32 ||
|
||||||
|
pos_att->data_type() == draco::DT_UINT32);
|
||||||
|
ASSERT_NE(pos_att->GetAttributeTransformData(), nullptr);
|
||||||
|
|
||||||
|
const draco::PointAttribute *const clr_att =
|
||||||
|
pc->GetNamedAttribute(draco::GeometryAttribute::COLOR);
|
||||||
|
ASSERT_EQ(clr_att->data_type(), draco::DT_UINT8);
|
||||||
|
|
||||||
|
// Ensure the color attribute was decoded correctly. Perform the decoding
|
||||||
|
// again without skipping the position dequantization and compare the
|
||||||
|
// attribute values.
|
||||||
|
|
||||||
|
draco::DecoderBuffer buffer_2;
|
||||||
|
buffer_2.Init(data.data(), data.size());
|
||||||
|
|
||||||
|
draco::Decoder decoder_2;
|
||||||
|
|
||||||
|
// Decode the input data into a geometry.
|
||||||
|
std::unique_ptr<draco::PointCloud> pc_2 =
|
||||||
|
decoder_2.DecodePointCloudFromBuffer(&buffer_2).value();
|
||||||
|
ASSERT_NE(pc_2, nullptr);
|
||||||
|
|
||||||
|
const draco::PointAttribute *const clr_att_2 =
|
||||||
|
pc_2->GetNamedAttribute(draco::GeometryAttribute::COLOR);
|
||||||
|
ASSERT_NE(clr_att_2, nullptr);
|
||||||
|
for (draco::PointIndex pi(0); pi < pc_2->num_points(); ++pi) {
|
||||||
|
// Colors should be exactly the same for both cases.
|
||||||
|
ASSERT_EQ(std::memcmp(clr_att->GetAddress(clr_att->mapped_index(pi)),
|
||||||
|
clr_att_2->GetAddress(clr_att_2->mapped_index(pi)),
|
||||||
|
clr_att->byte_stride()),
|
||||||
|
0);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
TEST_F(DecodeTest, TestSkipAttributeTransformOnPointCloud) {
|
||||||
|
// Tests that decoders can successfully skip attribute transform on a point
|
||||||
|
// cloud with multiple attributes encoded with one attributes encoder.
|
||||||
|
TestSkipAttributeTransformOnPointCloudWithColor("pc_color.drc");
|
||||||
|
TestSkipAttributeTransformOnPointCloudWithColor("pc_kd_color.drc");
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
@ -28,7 +28,7 @@
|
|||||||
#include "draco/mesh/prediction_degree_traverser.h"
|
#include "draco/mesh/prediction_degree_traverser.h"
|
||||||
|
|
||||||
namespace draco {
|
namespace draco {
|
||||||
// TODO() consider converting 'typedef' to 'using' and deduplicate.
|
// TODO(draco-eng) consider converting 'typedef' to 'using' and deduplicate.
|
||||||
typedef CornerIndex CornerIndex;
|
typedef CornerIndex CornerIndex;
|
||||||
typedef FaceIndex FaceIndex;
|
typedef FaceIndex FaceIndex;
|
||||||
typedef VertexIndex VertexIndex;
|
typedef VertexIndex VertexIndex;
|
||||||
|
@ -28,7 +28,7 @@ namespace draco {
|
|||||||
// draco_index_type.h .
|
// draco_index_type.h .
|
||||||
// TODO(ostava): Make the interface more complete. It's currently missing
|
// TODO(ostava): Make the interface more complete. It's currently missing
|
||||||
// features such as iterators.
|
// features such as iterators.
|
||||||
// TODO(): Make unit tests for this class.
|
// TODO(draco-eng): Make unit tests for this class.
|
||||||
template <class IndexTypeT, class ValueTypeT>
|
template <class IndexTypeT, class ValueTypeT>
|
||||||
class IndexTypeVector {
|
class IndexTypeVector {
|
||||||
public:
|
public:
|
||||||
|
@ -18,7 +18,7 @@
|
|||||||
namespace draco {
|
namespace draco {
|
||||||
|
|
||||||
// Draco version is comprised of <major>.<minor>.<revision>.
|
// Draco version is comprised of <major>.<minor>.<revision>.
|
||||||
static const char kDracoVersion[] = "1.3.0";
|
static const char kDracoVersion[] = "1.3.1";
|
||||||
|
|
||||||
const char *Version() { return kDracoVersion; }
|
const char *Version() { return kDracoVersion; }
|
||||||
|
|
||||||
|
@ -113,7 +113,7 @@ class MeshAttributeCornerTable {
|
|||||||
bool no_interior_seams() const { return no_interior_seams_; }
|
bool no_interior_seams() const { return no_interior_seams_; }
|
||||||
const CornerTable *corner_table() const { return corner_table_; }
|
const CornerTable *corner_table() const { return corner_table_; }
|
||||||
|
|
||||||
// TODO(): extract valence functions into a reusable class/object
|
// TODO(draco-eng): extract valence functions into a reusable class/object
|
||||||
// also from 'corner_table.*'
|
// also from 'corner_table.*'
|
||||||
|
|
||||||
// Returns the valence (or degree) of a vertex.
|
// Returns the valence (or degree) of a vertex.
|
||||||
|
@ -100,7 +100,7 @@ class ValenceCache {
|
|||||||
return vertex_valence_cache_8_bit_[v];
|
return vertex_valence_cache_8_bit_[v];
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO() Add unit tests for ValenceCache functions.
|
// TODO(draco-eng) Add unit tests for ValenceCache functions.
|
||||||
inline int32_t ValenceFromCache(VertexIndex v) const {
|
inline int32_t ValenceFromCache(VertexIndex v) const {
|
||||||
DRACO_DCHECK_EQ(vertex_valence_cache_32_bit_.size(), table_.num_vertices());
|
DRACO_DCHECK_EQ(vertex_valence_cache_32_bit_.size(), table_.num_vertices());
|
||||||
if (v == kInvalidVertexIndex || v.value() >= table_.num_vertices())
|
if (v == kInvalidVertexIndex || v.value() >= table_.num_vertices())
|
||||||
|
BIN
testdata/pc_color.drc
vendored
Normal file
BIN
testdata/pc_color.drc
vendored
Normal file
Binary file not shown.
BIN
testdata/pc_kd_color.drc
vendored
Normal file
BIN
testdata/pc_kd_color.drc
vendored
Normal file
Binary file not shown.
Loading…
x
Reference in New Issue
Block a user