glTF: add Draco shared library for mesh compression.

Draco is added as a library under extern/ and builds a shared library that is
installed into the Python site-packages. This is then loaded by the glTF add-on
to do mesh compression.

Differential Revision: https://developer.blender.org/D4501
This commit is contained in:
Benjamin Schmithüsen 2019-04-11 11:26:23 +02:00 committed by Brecht Van Lommel
parent a9d6356fee
commit 4bad4bfc6a
350 changed files with 41438 additions and 0 deletions

View File

@ -344,6 +344,7 @@ option(WITH_LZMA "Enable best LZMA compression, (used for pointcache)"
if(UNIX AND NOT APPLE)
option(WITH_SYSTEM_LZO "Use the system LZO library" OFF)
endif()
option(WITH_DRACO "Enable Draco mesh compression Python module (used for glTF)" ON)
# Camera/motion tracking
option(WITH_LIBMV "Enable Libmv structure from motion library" ON)
@ -636,6 +637,7 @@ endif()
if(NOT WITH_PYTHON)
set(WITH_CYCLES OFF)
set(WITH_DRACO OFF)
endif()
# enable boost for cycles, audaspace or i18n

View File

@ -12,6 +12,7 @@ set(WITH_CODEC_FFMPEG ON CACHE BOOL "" FORCE)
set(WITH_CODEC_SNDFILE ON CACHE BOOL "" FORCE)
set(WITH_CYCLES ON CACHE BOOL "" FORCE)
set(WITH_CYCLES_OSL ON CACHE BOOL "" FORCE)
set(WITH_DRACO ON CACHE BOOL "" FORCE)
set(WITH_FFTW3 ON CACHE BOOL "" FORCE)
set(WITH_LIBMV ON CACHE BOOL "" FORCE)
set(WITH_LIBMV_SCHUR_SPECIALIZATIONS ON CACHE BOOL "" FORCE)

View File

@ -17,6 +17,7 @@ set(WITH_CODEC_FFMPEG OFF CACHE BOOL "" FORCE)
set(WITH_CODEC_SNDFILE OFF CACHE BOOL "" FORCE)
set(WITH_CYCLES OFF CACHE BOOL "" FORCE)
set(WITH_CYCLES_OSL OFF CACHE BOOL "" FORCE)
set(WITH_DRACO OFF CACHE BOOL "" FORCE)
set(WITH_FFTW3 OFF CACHE BOOL "" FORCE)
set(WITH_LIBMV OFF CACHE BOOL "" FORCE)
set(WITH_LLVM OFF CACHE BOOL "" FORCE)

View File

@ -13,6 +13,7 @@ set(WITH_CODEC_FFMPEG ON CACHE BOOL "" FORCE)
set(WITH_CODEC_SNDFILE ON CACHE BOOL "" FORCE)
set(WITH_CYCLES ON CACHE BOOL "" FORCE)
set(WITH_CYCLES_OSL ON CACHE BOOL "" FORCE)
set(WITH_DRACO ON CACHE BOOL "" FORCE)
set(WITH_FFTW3 ON CACHE BOOL "" FORCE)
set(WITH_LIBMV ON CACHE BOOL "" FORCE)
set(WITH_LIBMV_SCHUR_SPECIALIZATIONS ON CACHE BOOL "" FORCE)

View File

@ -41,6 +41,10 @@ if(WITH_BULLET)
endif()
endif()
if(WITH_DRACO)
add_subdirectory(draco)
endif()
# now only available in a branch
#if(WITH_MOD_CLOTH_ELTOPO)
# add_subdirectory(eltopo)

29
extern/draco/CMakeLists.txt vendored Normal file
View File

@ -0,0 +1,29 @@
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# The Original Code is Copyright (C) 2019, Blender Foundation
# All rights reserved.
# ***** END GPL LICENSE BLOCK *****
set(CMAKE_CXX_STANDARD 14)
# Build Draco library.
add_subdirectory(dracoenc)
# Build blender-draco-exporter module.
add_library(extern_draco SHARED src/draco-compressor.cpp)
target_include_directories(extern_draco PUBLIC dracoenc/src)
target_link_libraries(extern_draco PUBLIC dracoenc)

7
extern/draco/dracoenc/AUTHORS vendored Normal file
View File

@ -0,0 +1,7 @@
# This is the list of Draco authors for copyright purposes.
#
# This does not necessarily list everyone who has contributed code, since in
# some cases, their employer may be the copyright holder. To see the full list
# of contributors, see the revision history in source control.
Google Inc.
and other contributors

185
extern/draco/dracoenc/CMakeLists.txt vendored Normal file
View File

@ -0,0 +1,185 @@
remove_strict_flags()
set(SRC
src/draco/animation/keyframe_animation.cc
src/draco/animation/keyframe_animation_encoder.cc
src/draco/animation/keyframe_animation_encoder.h
src/draco/animation/keyframe_animation.h
src/draco/attributes/attribute_octahedron_transform.cc
src/draco/attributes/attribute_octahedron_transform.h
src/draco/attributes/attribute_quantization_transform.cc
src/draco/attributes/attribute_quantization_transform.h
src/draco/attributes/attribute_transform.cc
src/draco/attributes/attribute_transform_data.h
src/draco/attributes/attribute_transform.h
src/draco/attributes/attribute_transform_type.h
src/draco/attributes/geometry_attribute.cc
src/draco/attributes/geometry_attribute.h
src/draco/attributes/geometry_indices.h
src/draco/attributes/point_attribute.cc
src/draco/attributes/point_attribute.h
src/draco/compression/attributes/attributes_encoder.cc
src/draco/compression/attributes/attributes_encoder.h
src/draco/compression/attributes/kd_tree_attributes_encoder.cc
src/draco/compression/attributes/kd_tree_attributes_encoder.h
src/draco/compression/attributes/linear_sequencer.h
src/draco/compression/attributes/points_sequencer.h
src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_encoder.h
src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_shared.h
src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_data.h
src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h
src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_encoder.h
src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_area.h
src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_base.h
src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_encoder.h
src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_encoder.h
src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h
src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_encoder.h
src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_encoder.h
src/draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_predictor.h
src/draco/compression/attributes/prediction_schemes/prediction_scheme_delta_encoder.h
src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.cc
src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h
src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder.h
src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_interface.h
src/draco/compression/attributes/prediction_schemes/prediction_scheme_encoding_transform.h
src/draco/compression/attributes/prediction_schemes/prediction_scheme_factory.h
src/draco/compression/attributes/prediction_schemes/prediction_scheme_interface.h
src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_encoding_transform.h
src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_canonicalized_transform_base.h
src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_encoding_transform.h
src/draco/compression/attributes/prediction_schemes/prediction_scheme_normal_octahedron_transform_base.h
src/draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_encoding_transform.h
src/draco/compression/attributes/prediction_schemes/prediction_scheme_wrap_transform_base.h
src/draco/compression/attributes/sequential_attribute_encoder.cc
src/draco/compression/attributes/sequential_attribute_encoder.h
src/draco/compression/attributes/sequential_attribute_encoders_controller.cc
src/draco/compression/attributes/sequential_attribute_encoders_controller.h
src/draco/compression/attributes/sequential_integer_attribute_encoder.cc
src/draco/compression/attributes/sequential_integer_attribute_encoder.h
src/draco/compression/attributes/sequential_normal_attribute_encoder.cc
src/draco/compression/attributes/sequential_normal_attribute_encoder.h
src/draco/compression/attributes/sequential_quantization_attribute_encoder.cc
src/draco/compression/attributes/sequential_quantization_attribute_encoder.h
src/draco/compression/bit_coders/adaptive_rans_bit_coding_shared.h
src/draco/compression/bit_coders/adaptive_rans_bit_encoder.cc
src/draco/compression/bit_coders/adaptive_rans_bit_encoder.h
src/draco/compression/bit_coders/direct_bit_encoder.cc
src/draco/compression/bit_coders/direct_bit_encoder.h
src/draco/compression/bit_coders/folded_integer_bit_encoder.h
src/draco/compression/bit_coders/rans_bit_encoder.cc
src/draco/compression/bit_coders/rans_bit_encoder.h
src/draco/compression/bit_coders/symbol_bit_encoder.cc
src/draco/compression/bit_coders/symbol_bit_encoder.h
src/draco/compression/config/compression_shared.h
src/draco/compression/config/draco_options.h
src/draco/compression/config/encoder_options.h
src/draco/compression/config/encoding_features.h
src/draco/compression/encode_base.h
src/draco/compression/encode.cc
src/draco/compression/encode.h
src/draco/compression/entropy/ans.h
src/draco/compression/entropy/rans_symbol_coding.h
src/draco/compression/entropy/rans_symbol_encoder.h
src/draco/compression/entropy/shannon_entropy.cc
src/draco/compression/entropy/shannon_entropy.h
src/draco/compression/entropy/symbol_encoding.cc
src/draco/compression/entropy/symbol_encoding.h
src/draco/compression/expert_encode.cc
src/draco/compression/expert_encode.h
src/draco/compression/mesh/mesh_edgebreaker_encoder.cc
src/draco/compression/mesh/mesh_edgebreaker_encoder.h
src/draco/compression/mesh/mesh_edgebreaker_encoder_impl.cc
src/draco/compression/mesh/mesh_edgebreaker_encoder_impl.h
src/draco/compression/mesh/mesh_edgebreaker_encoder_impl_interface.h
src/draco/compression/mesh/mesh_edgebreaker_shared.h
src/draco/compression/mesh/mesh_edgebreaker_traversal_encoder.h
src/draco/compression/mesh/mesh_edgebreaker_traversal_predictive_encoder.h
src/draco/compression/mesh/mesh_edgebreaker_traversal_valence_encoder.h
src/draco/compression/mesh/mesh_encoder.cc
src/draco/compression/mesh/mesh_encoder.h
src/draco/compression/mesh/mesh_encoder_helpers.h
src/draco/compression/mesh/mesh_sequential_encoder.cc
src/draco/compression/mesh/mesh_sequential_encoder.h
src/draco/compression/mesh/traverser/depth_first_traverser.h
src/draco/compression/mesh/traverser/max_prediction_degree_traverser.h
src/draco/compression/mesh/traverser/mesh_attribute_indices_encoding_observer.h
src/draco/compression/mesh/traverser/mesh_traversal_sequencer.h
src/draco/compression/mesh/traverser/traverser_base.h
src/draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_encoder.cc
src/draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_encoder.h
src/draco/compression/point_cloud/algorithms/float_points_tree_encoder.cc
src/draco/compression/point_cloud/algorithms/float_points_tree_encoder.h
src/draco/compression/point_cloud/algorithms/point_cloud_compression_method.h
src/draco/compression/point_cloud/algorithms/point_cloud_types.h
src/draco/compression/point_cloud/algorithms/quantize_points_3.h
src/draco/compression/point_cloud/algorithms/queuing_policy.h
src/draco/compression/point_cloud/point_cloud_encoder.cc
src/draco/compression/point_cloud/point_cloud_encoder.h
src/draco/compression/point_cloud/point_cloud_kd_tree_encoder.cc
src/draco/compression/point_cloud/point_cloud_kd_tree_encoder.h
src/draco/compression/point_cloud/point_cloud_sequential_encoder.cc
src/draco/compression/point_cloud/point_cloud_sequential_encoder.h
src/draco/core/bit_utils.cc
src/draco/core/bit_utils.h
src/draco/core/bounding_box.cc
src/draco/core/bounding_box.h
src/draco/core/cycle_timer.cc
src/draco/core/cycle_timer.h
src/draco/core/data_buffer.cc
src/draco/core/data_buffer.h
src/draco/core/divide.cc
src/draco/core/divide.h
src/draco/core/draco_index_type.h
src/draco/core/draco_index_type_vector.h
src/draco/core/draco_types.cc
src/draco/core/draco_types.h
src/draco/core/encoder_buffer.cc
src/draco/core/encoder_buffer.h
src/draco/core/hash_utils.cc
src/draco/core/hash_utils.h
src/draco/core/macros.h
src/draco/core/math_utils.h
src/draco/core/options.cc
src/draco/core/options.h
src/draco/core/quantization_utils.cc
src/draco/core/quantization_utils.h
src/draco/core/status.h
src/draco/core/statusor.h
src/draco/core/varint_encoding.h
src/draco/core/vector_d.h
src/draco/mesh/corner_table.cc
src/draco/mesh/corner_table.h
src/draco/mesh/corner_table_iterators.h
src/draco/mesh/mesh_are_equivalent.cc
src/draco/mesh/mesh_are_equivalent.h
src/draco/mesh/mesh_attribute_corner_table.cc
src/draco/mesh/mesh_attribute_corner_table.h
src/draco/mesh/mesh.cc
src/draco/mesh/mesh_cleanup.cc
src/draco/mesh/mesh_cleanup.h
src/draco/mesh/mesh.h
src/draco/mesh/mesh_misc_functions.cc
src/draco/mesh/mesh_misc_functions.h
src/draco/mesh/mesh_stripifier.cc
src/draco/mesh/mesh_stripifier.h
src/draco/mesh/triangle_soup_mesh_builder.cc
src/draco/mesh/triangle_soup_mesh_builder.h
src/draco/mesh/valence_cache.h
src/draco/metadata/geometry_metadata.cc
src/draco/metadata/geometry_metadata.h
src/draco/metadata/metadata.cc
src/draco/metadata/metadata_encoder.cc
src/draco/metadata/metadata_encoder.h
src/draco/metadata/metadata.h
src/draco/point_cloud/point_cloud_builder.cc
src/draco/point_cloud/point_cloud_builder.h
src/draco/point_cloud/point_cloud.cc
src/draco/point_cloud/point_cloud.h
)
set(INC
src
)
blender_add_lib(dracoenc "${SRC}" "${INC}" "")

202
extern/draco/dracoenc/LICENSE vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,3 @@
@PACKAGE_INIT@
set_and_check(draco_INCLUDE_DIR "@PACKAGE_draco_include_install_dir@")
set_and_check(draco_LIBRARY_DIR "@PACKAGE_draco_lib_install_dir@")

View File

@ -0,0 +1,58 @@
# Finddraco
#
# Locates draco and sets the following variables:
#
# draco_FOUND
# draco_INCLUDE_DIRS
# draco_LIBARY_DIRS
# draco_LIBRARIES
# draco_VERSION_STRING
#
# draco_FOUND is set to YES only when all other variables are successfully
# configured.
unset(draco_FOUND)
unset(draco_INCLUDE_DIRS)
unset(draco_LIBRARY_DIRS)
unset(draco_LIBRARIES)
unset(draco_VERSION_STRING)
mark_as_advanced(draco_FOUND)
mark_as_advanced(draco_INCLUDE_DIRS)
mark_as_advanced(draco_LIBRARY_DIRS)
mark_as_advanced(draco_LIBRARIES)
mark_as_advanced(draco_VERSION_STRING)
set(draco_version_file_no_prefix "draco/src/draco/core/draco_version.h")
# Set draco_INCLUDE_DIRS
find_path(draco_INCLUDE_DIRS NAMES "${draco_version_file_no_prefix}")
# Extract the version string from draco_version.h.
if (draco_INCLUDE_DIRS)
set(draco_version_file
"${draco_INCLUDE_DIRS}/draco/src/draco/core/draco_version.h")
file(STRINGS "${draco_version_file}" draco_version
REGEX "kdracoVersion")
list(GET draco_version 0 draco_version)
string(REPLACE "static const char kdracoVersion[] = " "" draco_version
"${draco_version}")
string(REPLACE ";" "" draco_version "${draco_version}")
string(REPLACE "\"" "" draco_version "${draco_version}")
set(draco_VERSION_STRING ${draco_version})
endif ()
# Find the library.
if (BUILD_SHARED_LIBS)
find_library(draco_LIBRARIES NAMES draco.dll libdraco.dylib libdraco.so)
else ()
find_library(draco_LIBRARIES NAMES draco.lib libdraco.a)
endif ()
# Store path to library.
get_filename_component(draco_LIBRARY_DIRS ${draco_LIBRARIES} DIRECTORY)
if (draco_INCLUDE_DIRS AND draco_LIBRARY_DIRS AND draco_LIBRARIES AND
draco_VERSION_STRING)
set(draco_FOUND YES)
endif ()

View File

@ -0,0 +1,216 @@
if (NOT DRACO_CMAKE_COMPILER_FLAGS_CMAKE_)
set(DRACO_CMAKE_COMPILER_FLAGS_CMAKE_ 1)
include(CheckCCompilerFlag)
include(CheckCXXCompilerFlag)
include("${draco_root}/cmake/compiler_tests.cmake")
# Strings used to cache failed C/CXX flags.
set(DRACO_FAILED_C_FLAGS)
set(DRACO_FAILED_CXX_FLAGS)
# Checks C compiler for support of $c_flag. Adds $c_flag to $CMAKE_C_FLAGS when
# the compile test passes. Caches $c_flag in $DRACO_FAILED_C_FLAGS when the test
# fails.
macro (add_c_flag_if_supported c_flag)
unset(C_FLAG_FOUND CACHE)
string(FIND "${CMAKE_C_FLAGS}" "${c_flag}" C_FLAG_FOUND)
unset(C_FLAG_FAILED CACHE)
string(FIND "${DRACO_FAILED_C_FLAGS}" "${c_flag}" C_FLAG_FAILED)
if (${C_FLAG_FOUND} EQUAL -1 AND ${C_FLAG_FAILED} EQUAL -1)
unset(C_FLAG_SUPPORTED CACHE)
message("Checking C compiler flag support for: " ${c_flag})
check_c_compiler_flag("${c_flag}" C_FLAG_SUPPORTED)
if (${C_FLAG_SUPPORTED})
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${c_flag}" CACHE STRING "")
else ()
set(DRACO_FAILED_C_FLAGS "${DRACO_FAILED_C_FLAGS} ${c_flag}" CACHE STRING
"" FORCE)
endif ()
endif ()
endmacro ()
# Checks C++ compiler for support of $cxx_flag. Adds $cxx_flag to
# $CMAKE_CXX_FLAGS when the compile test passes. Caches $c_flag in
# $DRACO_FAILED_CXX_FLAGS when the test fails.
macro (add_cxx_flag_if_supported cxx_flag)
unset(CXX_FLAG_FOUND CACHE)
string(FIND "${CMAKE_CXX_FLAGS}" "${cxx_flag}" CXX_FLAG_FOUND)
unset(CXX_FLAG_FAILED CACHE)
string(FIND "${DRACO_FAILED_CXX_FLAGS}" "${cxx_flag}" CXX_FLAG_FAILED)
if (${CXX_FLAG_FOUND} EQUAL -1 AND ${CXX_FLAG_FAILED} EQUAL -1)
unset(CXX_FLAG_SUPPORTED CACHE)
message("Checking CXX compiler flag support for: " ${cxx_flag})
check_cxx_compiler_flag("${cxx_flag}" CXX_FLAG_SUPPORTED)
if (${CXX_FLAG_SUPPORTED})
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${cxx_flag}" CACHE STRING "")
else()
set(DRACO_FAILED_CXX_FLAGS "${DRACO_FAILED_CXX_FLAGS} ${cxx_flag}" CACHE
STRING "" FORCE)
endif ()
endif ()
endmacro ()
# Convenience method for adding a flag to both the C and C++ compiler command
# lines.
macro (add_compiler_flag_if_supported flag)
add_c_flag_if_supported(${flag})
add_cxx_flag_if_supported(${flag})
endmacro ()
# Checks C compiler for support of $c_flag and terminates generation when
# support is not present.
macro (require_c_flag c_flag update_c_flags)
unset(C_FLAG_FOUND CACHE)
string(FIND "${CMAKE_C_FLAGS}" "${c_flag}" C_FLAG_FOUND)
if (${C_FLAG_FOUND} EQUAL -1)
unset(HAVE_C_FLAG CACHE)
message("Checking C compiler flag support for: " ${c_flag})
check_c_compiler_flag("${c_flag}" HAVE_C_FLAG)
if (NOT ${HAVE_C_FLAG})
message(FATAL_ERROR
"${PROJECT_NAME} requires support for C flag: ${c_flag}.")
endif ()
if (${update_c_flags})
set(CMAKE_C_FLAGS "${c_flag} ${CMAKE_C_FLAGS}" CACHE STRING "" FORCE)
endif ()
endif ()
endmacro ()
# Checks CXX compiler for support of $cxx_flag and terminates generation when
# support is not present.
macro (require_cxx_flag cxx_flag update_cxx_flags)
unset(CXX_FLAG_FOUND CACHE)
string(FIND "${CMAKE_CXX_FLAGS}" "${cxx_flag}" CXX_FLAG_FOUND)
if (${CXX_FLAG_FOUND} EQUAL -1)
unset(HAVE_CXX_FLAG CACHE)
message("Checking CXX compiler flag support for: " ${cxx_flag})
check_cxx_compiler_flag("${cxx_flag}" HAVE_CXX_FLAG)
if (NOT ${HAVE_CXX_FLAG})
message(FATAL_ERROR
"${PROJECT_NAME} requires support for CXX flag: ${cxx_flag}.")
endif ()
if (${update_cxx_flags})
set(CMAKE_CXX_FLAGS "${cxx_flag} ${CMAKE_CXX_FLAGS}" CACHE STRING ""
FORCE)
endif ()
endif ()
endmacro ()
# Checks for support of $flag by both the C and CXX compilers. Terminates
# generation when support is not present in both compilers.
macro (require_compiler_flag flag update_cmake_flags)
require_c_flag(${flag} ${update_cmake_flags})
require_cxx_flag(${flag} ${update_cmake_flags})
endmacro ()
# Checks only non-MSVC targets for support of $c_flag and terminates generation
# when support is not present.
macro (require_c_flag_nomsvc c_flag update_c_flags)
if (NOT MSVC)
require_c_flag(${c_flag} ${update_c_flags})
endif ()
endmacro ()
# Checks only non-MSVC targets for support of $cxx_flag and terminates
# generation when support is not present.
macro (require_cxx_flag_nomsvc cxx_flag update_cxx_flags)
if (NOT MSVC)
require_cxx_flag(${cxx_flag} ${update_cxx_flags})
endif ()
endmacro ()
# Checks only non-MSVC targets for support of $flag by both the C and CXX
# compilers. Terminates generation when support is not present in both
# compilers.
macro (require_compiler_flag_nomsvc flag update_cmake_flags)
require_c_flag_nomsvc(${flag} ${update_cmake_flags})
require_cxx_flag_nomsvc(${flag} ${update_cmake_flags})
endmacro ()
# Adds $flag to assembler command line.
macro (append_as_flag flag)
unset(AS_FLAG_FOUND CACHE)
string(FIND "${DRACO_AS_FLAGS}" "${flag}" AS_FLAG_FOUND)
if (${AS_FLAG_FOUND} EQUAL -1)
set(DRACO_AS_FLAGS "${DRACO_AS_FLAGS} ${flag}")
endif ()
endmacro ()
# Adds $flag to the C compiler command line.
macro (append_c_flag flag)
unset(C_FLAG_FOUND CACHE)
string(FIND "${CMAKE_C_FLAGS}" "${flag}" C_FLAG_FOUND)
if (${C_FLAG_FOUND} EQUAL -1)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${flag}")
endif ()
endmacro ()
# Adds $flag to the CXX compiler command line.
macro (append_cxx_flag flag)
unset(CXX_FLAG_FOUND CACHE)
string(FIND "${CMAKE_CXX_FLAGS}" "${flag}" CXX_FLAG_FOUND)
if (${CXX_FLAG_FOUND} EQUAL -1)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${flag}")
endif ()
endmacro ()
# Adds $flag to the C and CXX compiler command lines.
macro (append_compiler_flag flag)
append_c_flag(${flag})
append_cxx_flag(${flag})
endmacro ()
# Adds $flag to the executable linker command line.
macro (append_exe_linker_flag flag)
unset(LINKER_FLAG_FOUND CACHE)
string(FIND "${CMAKE_EXE_LINKER_FLAGS}" "${flag}" LINKER_FLAG_FOUND)
if (${LINKER_FLAG_FOUND} EQUAL -1)
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${flag}")
endif ()
endmacro ()
# Adds $flag to the link flags for $target.
function (append_link_flag_to_target target flags)
unset(target_link_flags)
get_target_property(target_link_flags ${target} LINK_FLAGS)
if (target_link_flags)
unset(link_flag_found)
string(FIND "${target_link_flags}" "${flags}" link_flag_found)
if (NOT ${link_flag_found} EQUAL -1)
return()
endif ()
set(target_link_flags "${target_link_flags} ${flags}")
else ()
set(target_link_flags "${flags}")
endif ()
set_target_properties(${target} PROPERTIES LINK_FLAGS ${target_link_flags})
endfunction ()
# Adds $flag to executable linker flags, and makes sure C/CXX builds still work.
macro (require_linker_flag flag)
append_exe_linker_flag(${flag})
unset(c_passed)
draco_check_c_compiles("LINKER_FLAG_C_TEST(${flag})" "" c_passed)
unset(cxx_passed)
draco_check_cxx_compiles("LINKER_FLAG_CXX_TEST(${flag})" "" cxx_passed)
if (NOT c_passed OR NOT cxx_passed)
message(FATAL_ERROR "Linker flag test for ${flag} failed.")
endif ()
endmacro ()
endif () # DRACO_CMAKE_COMPILER_FLAGS_CMAKE_

View File

@ -0,0 +1,124 @@
if (NOT DRACO_CMAKE_COMPILER_TESTS_CMAKE_)
set(DRACO_CMAKE_COMPILER_TESTS_CMAKE_ 1)
include(CheckCSourceCompiles)
include(CheckCXXSourceCompiles)
# The basic main() macro used in all compile tests.
set(DRACO_C_MAIN "\nint main(void) { return 0; }")
set(DRACO_CXX_MAIN "\nint main() { return 0; }")
# Strings containing the names of passed and failed tests.
set(DRACO_C_PASSED_TESTS)
set(DRACO_C_FAILED_TESTS)
set(DRACO_CXX_PASSED_TESTS)
set(DRACO_CXX_FAILED_TESTS)
macro(draco_push_var var new_value)
set(SAVED_${var} ${var})
set(${var} ${new_value})
endmacro ()
macro(draco_pop_var var)
set(var ${SAVED_${var}})
unset(SAVED_${var})
endmacro ()
# Confirms $test_source compiles and stores $test_name in one of
# $DRACO_C_PASSED_TESTS or $DRACO_C_FAILED_TESTS depending on out come. When the
# test passes $result_var is set to 1. When it fails $result_var is unset.
# The test is not run if the test name is found in either of the passed or
# failed test variables.
macro(draco_check_c_compiles test_name test_source result_var)
unset(C_TEST_PASSED CACHE)
unset(C_TEST_FAILED CACHE)
string(FIND "${DRACO_C_PASSED_TESTS}" "${test_name}" C_TEST_PASSED)
string(FIND "${DRACO_C_FAILED_TESTS}" "${test_name}" C_TEST_FAILED)
if (${C_TEST_PASSED} EQUAL -1 AND ${C_TEST_FAILED} EQUAL -1)
unset(C_TEST_COMPILED CACHE)
message("Running C compiler test: ${test_name}")
check_c_source_compiles("${test_source} ${DRACO_C_MAIN}" C_TEST_COMPILED)
set(${result_var} ${C_TEST_COMPILED})
if (${C_TEST_COMPILED})
set(DRACO_C_PASSED_TESTS "${DRACO_C_PASSED_TESTS} ${test_name}")
else ()
set(DRACO_C_FAILED_TESTS "${DRACO_C_FAILED_TESTS} ${test_name}")
message("C Compiler test ${test_name} failed.")
endif ()
elseif (NOT ${C_TEST_PASSED} EQUAL -1)
set(${result_var} 1)
else () # ${C_TEST_FAILED} NOT EQUAL -1
unset(${result_var})
endif ()
endmacro ()
# Confirms $test_source compiles and stores $test_name in one of
# $DRACO_CXX_PASSED_TESTS or $DRACO_CXX_FAILED_TESTS depending on out come. When
# the test passes $result_var is set to 1. When it fails $result_var is unset.
# The test is not run if the test name is found in either of the passed or
# failed test variables.
macro(draco_check_cxx_compiles test_name test_source result_var)
unset(CXX_TEST_PASSED CACHE)
unset(CXX_TEST_FAILED CACHE)
string(FIND "${DRACO_CXX_PASSED_TESTS}" "${test_name}" CXX_TEST_PASSED)
string(FIND "${DRACO_CXX_FAILED_TESTS}" "${test_name}" CXX_TEST_FAILED)
if (${CXX_TEST_PASSED} EQUAL -1 AND ${CXX_TEST_FAILED} EQUAL -1)
unset(CXX_TEST_COMPILED CACHE)
message("Running CXX compiler test: ${test_name}")
check_cxx_source_compiles("${test_source} ${DRACO_CXX_MAIN}"
CXX_TEST_COMPILED)
set(${result_var} ${CXX_TEST_COMPILED})
if (${CXX_TEST_COMPILED})
set(DRACO_CXX_PASSED_TESTS "${DRACO_CXX_PASSED_TESTS} ${test_name}")
else ()
set(DRACO_CXX_FAILED_TESTS "${DRACO_CXX_FAILED_TESTS} ${test_name}")
message("CXX Compiler test ${test_name} failed.")
endif ()
elseif (NOT ${CXX_TEST_PASSED} EQUAL -1)
set(${result_var} 1)
else () # ${CXX_TEST_FAILED} NOT EQUAL -1
unset(${result_var})
endif ()
endmacro ()
# Convenience macro that confirms $test_source compiles as C and C++.
# $result_var is set to 1 when both tests are successful, and 0 when one or both
# tests fail.
# Note: This macro is intended to be used to write to result variables that
# are expanded via configure_file(). $result_var is set to 1 or 0 to allow
# direct usage of the value in generated source files.
macro(draco_check_source_compiles test_name test_source result_var)
unset(C_PASSED)
unset(CXX_PASSED)
draco_check_c_compiles(${test_name} ${test_source} C_PASSED)
draco_check_cxx_compiles(${test_name} ${test_source} CXX_PASSED)
if (${C_PASSED} AND ${CXX_PASSED})
set(${result_var} 1)
else ()
set(${result_var} 0)
endif ()
endmacro ()
# When inline support is detected for the current compiler the supported
# inlining keyword is written to $result in caller scope.
macro (draco_get_inline result)
draco_check_source_compiles("inline_check_1"
"static inline void macro(void) {}"
HAVE_INLINE_1)
if (HAVE_INLINE_1 EQUAL 1)
set(${result} "inline")
return()
endif ()
# Check __inline.
draco_check_source_compiles("inline_check_2"
"static __inline void macro(void) {}"
HAVE_INLINE_2)
if (HAVE_INLINE_2 EQUAL 1)
set(${result} "__inline")
endif ()
endmacro ()
endif () # DRACO_CMAKE_COMPILER_TESTS_CMAKE_

View File

@ -0,0 +1,57 @@
if (NOT DRACO_CMAKE_DRACO_FEATURES_CMAKE_)
set(DRACO_CMAKE_DRACO_FEATURES_CMAKE_ 1)
set(draco_features_file_name "${draco_build_dir}/draco/draco_features.h")
set(draco_features_list)
# Macro that handles tracking of Draco preprocessor symbols for the purpose of
# producing draco_features.h.
#
# draco_enable_feature(FEATURE <feature_name> [TARGETS <target_name>])
# FEATURE is required. It should be a Draco preprocessor symbol.
# TARGETS is optional. It can be one or more draco targets.
#
# When the TARGETS argument is not present the preproc symbol is added to
# draco_features.h. When it is draco_features.h is unchanged, and
# target_compile_options() is called for each target specified.
macro (draco_enable_feature)
set(def_flags)
set(def_single_arg_opts FEATURE)
set(def_multi_arg_opts TARGETS)
cmake_parse_arguments(DEF "${def_flags}" "${def_single_arg_opts}"
"${def_multi_arg_opts}" ${ARGN})
if ("${DEF_FEATURE}" STREQUAL "")
message(FATAL_ERROR "Empty FEATURE passed to draco_enable_feature().")
endif ()
# Do nothing/return early if $DEF_FEATURE is already in the list.
list(FIND draco_features_list ${DEF_FEATURE} df_index)
if (NOT df_index EQUAL -1)
return ()
endif ()
list(LENGTH DEF_TARGETS df_targets_list_length)
if (${df_targets_list_length} EQUAL 0)
list(APPEND draco_features_list ${DEF_FEATURE})
else ()
foreach (target ${DEF_TARGETS})
target_compile_definitions(${target} PRIVATE ${DEF_FEATURE})
endforeach ()
endif ()
endmacro ()
# Function for generating draco_features.h.
function (draco_generate_features_h)
file(WRITE "${draco_features_file_name}"
"// GENERATED FILE -- DO NOT EDIT\n\n"
"#ifndef DRACO_FEATURES_H_\n"
"#define DRACO_FEATURES_H_\n\n")
foreach (feature ${draco_features_list})
file(APPEND "${draco_features_file_name}" "#define ${feature}\n")
endforeach ()
file(APPEND "${draco_features_file_name}" "\n#endif // DRACO_FEATURES_H_")
endfunction ()
endif () # DRACO_CMAKE_DRACO_FEATURES_CMAKE_

View File

@ -0,0 +1,13 @@
#ifndef DRACO_TESTING_DRACO_TEST_CONFIG_H_
#define DRACO_TESTING_DRACO_TEST_CONFIG_H_
// If this file is named draco_test_config.h.cmake:
// This file is used as input at cmake generation time.
// If this file is named draco_test_config.h:
// GENERATED FILE, DO NOT EDIT. SEE ABOVE.
#define DRACO_TEST_DATA_DIR "${DRACO_TEST_DATA_DIR}"
#define DRACO_TEST_TEMP_DIR "${DRACO_TEST_TEMP_DIR}"
#endif // DRACO_TESTING_DRACO_TEST_CONFIG_H_

View File

@ -0,0 +1,21 @@
// If this file is named draco_version.cc.cmake:
// This file is used as input at cmake generation time.
// If this file is named draco_version.cc:
// GENERATED FILE, DO NOT EDIT. SEE ABOVE.
#include "draco_version.h"
static const char kDracoGitHash[] = "${draco_git_hash}";
static const char kDracoGitDesc[] = "${draco_git_desc}";
const char *draco_git_hash() {
return kDracoGitHash;
}
const char *draco_git_version() {
return kDracoGitDesc;
}
const char* draco_version() {
return draco::Version();
}

View File

@ -0,0 +1,21 @@
// If this file is named draco_version.h.cmake:
// This file is used as input at cmake generation time.
// If this file is named draco_version.h:
// GENERATED FILE, DO NOT EDIT. SEE ABOVE.
#ifndef DRACO_DRACO_VERSION_H_
#define DRACO_DRACO_VERSION_H_
#include "draco/core/draco_version.h"
// Returns git hash of Draco git repository.
const char *draco_git_hash();
// Returns the output of the git describe command when run from the Draco git
// repository.
const char *draco_git_version();
// Returns the version string from core/draco_version.h.
const char* draco_version();
#endif // DRACO_DRACO_VERSION_H_

View File

@ -0,0 +1,14 @@
cmake_minimum_required(VERSION 3.2)
if (MSVC)
# Use statically linked versions of the MS standard libraries.
if (NOT "${MSVC_RUNTIME}" STREQUAL "dll")
foreach (flag_var
CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE
CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO)
if (${flag_var} MATCHES "/MD")
string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}")
endif ()
endforeach ()
endif ()
endif ()

View File

@ -0,0 +1,19 @@
if (NOT DRACO_CMAKE_SANITIZERS_CMAKE_)
set(DRACO_CMAKE_SANITIZERS_CMAKE_ 1)
if (MSVC OR NOT SANITIZE)
return ()
endif ()
include("${draco_root}/cmake/compiler_flags.cmake")
string(TOLOWER ${SANITIZE} SANITIZE)
# Require the sanitizer requested.
require_linker_flag("-fsanitize=${SANITIZE}")
require_compiler_flag("-fsanitize=${SANITIZE}" YES)
# Make callstacks accurate.
require_compiler_flag("-fno-omit-frame-pointer -fno-optimize-sibling-calls" YES)
endif() # DRACO_CMAKE_SANITIZERS_CMAKE_

View File

@ -0,0 +1,13 @@
if (NOT DRACO_CMAKE_TOOLCHAINS_ARM_IOS_COMMON_CMAKE_)
set(DRACO_CMAKE_ARM_IOS_COMMON_CMAKE_ 1)
set(CMAKE_SYSTEM_NAME "Darwin")
set(CMAKE_OSX_SYSROOT iphoneos)
set(CMAKE_C_COMPILER clang)
set(CMAKE_C_COMPILER_ARG1 "-arch ${CMAKE_SYSTEM_PROCESSOR}")
set(CMAKE_CXX_COMPILER clang++)
set(CMAKE_CXX_COMPILER_ARG1 "-arch ${CMAKE_SYSTEM_PROCESSOR}")
# TODO(tomfinegan): Handle bit code embedding.
endif () # DRACO_CMAKE_TOOLCHAINS_ARM_IOS_COMMON_CMAKE_

View File

@ -0,0 +1,12 @@
if (NOT DRACO_CMAKE_TOOLCHAINS_ARM64_ANDROID_NDK_LIBCPP_CMAKE_)
set(DRACO_CMAKE_TOOLCHAINS_ARM64_ANDROID_NDK_LIBCPP_CMAKE_ 1)
include("${CMAKE_CURRENT_LIST_DIR}/../util.cmake")
set(CMAKE_SYSTEM_NAME Android)
set(CMAKE_ANDROID_ARCH_ABI arm64-v8a)
require_variable(CMAKE_ANDROID_NDK)
set_variable_if_unset(CMAKE_SYSTEM_VERSION 21)
set_variable_if_unset(CMAKE_ANDROID_STL_TYPE c++_static)
endif () # DRACO_CMAKE_TOOLCHAINS_ARM64_ANDROID_NDK_LIBCPP_CMAKE_

View File

@ -0,0 +1,14 @@
if (NOT DRACO_CMAKE_TOOLCHAINS_ARM64_IOS_CMAKE_)
set(DRACO_CMAKE_TOOLCHAINS_ARM64_IOS_CMAKE_ 1)
if (XCODE)
# TODO(tomfinegan): Handle arm builds in Xcode.
message(FATAL_ERROR "This toolchain does not support Xcode.")
endif ()
set(CMAKE_SYSTEM_PROCESSOR "arm64")
set(CMAKE_OSX_ARCHITECTURES "arm64")
include("${CMAKE_CURRENT_LIST_DIR}/arm-ios-common.cmake")
endif () # DRACO_CMAKE_TOOLCHAINS_ARM64_IOS_CMAKE_

View File

@ -0,0 +1,18 @@
if (NOT DRACO_CMAKE_TOOLCHAINS_ARM64_LINUX_GCC_CMAKE_)
set(DRACO_CMAKE_TOOLCHAINS_ARM64_LINUX_GCC_CMAKE_ 1)
set(CMAKE_SYSTEM_NAME "Linux")
if ("${CROSS}" STREQUAL "")
# Default the cross compiler prefix to something known to work.
set(CROSS aarch64-linux-gnu-)
endif ()
set(CMAKE_C_COMPILER ${CROSS}gcc)
set(CMAKE_CXX_COMPILER ${CROSS}g++)
set(AS_EXECUTABLE ${CROSS}as)
set(CMAKE_C_COMPILER_ARG1 "-march=armv8-a")
set(CMAKE_CXX_COMPILER_ARG1 "-march=armv8-a")
set(CMAKE_SYSTEM_PROCESSOR "arm64")
endif () # DRACO_CMAKE_TOOLCHAINS_ARM64_LINUX_GCC_CMAKE_

View File

@ -0,0 +1,12 @@
if (NOT DRACO_CMAKE_TOOLCHAINS_ARMV7_ANDROID_NDK_LIBCPP_CMAKE_)
set(DRACO_CMAKE_TOOLCHAINS_ARMV7_ANDROID_NDK_LIBCPP_CMAKE_ 1)
include("${CMAKE_CURRENT_LIST_DIR}/../util.cmake")
set(CMAKE_SYSTEM_NAME Android)
set(CMAKE_ANDROID_ARCH_ABI armeabi-v7a)
require_variable(CMAKE_ANDROID_NDK)
set_variable_if_unset(CMAKE_SYSTEM_VERSION 18)
set_variable_if_unset(CMAKE_ANDROID_STL_TYPE c++_static)
endif () # DRACO_CMAKE_TOOLCHAINS_ARMV7_ANDROID_NDK_LIBCPP_CMAKE_

View File

@ -0,0 +1,14 @@
if (NOT DRACO_CMAKE_TOOLCHAINS_ARMV7_IOS_CMAKE_)
set(DRACO_CMAKE_TOOLCHAINS_ARMV7_IOS_CMAKE_ 1)
if (XCODE)
# TODO(tomfinegan): Handle arm builds in Xcode.
message(FATAL_ERROR "This toolchain does not support Xcode.")
endif ()
set(CMAKE_SYSTEM_PROCESSOR "armv7")
set(CMAKE_OSX_ARCHITECTURES "armv7")
include("${CMAKE_CURRENT_LIST_DIR}/arm-ios-common.cmake")
endif () # DRACO_CMAKE_TOOLCHAINS_ARMV7_IOS_CMAKE_

View File

@ -0,0 +1,24 @@
if (NOT DRACO_CMAKE_TOOLCHAINS_ARMV7_LINUX_GCC_CMAKE_)
set(DRACO_CMAKE_TOOLCHAINS_ARMV7_LINUX_GCC_CMAKE_ 1)
set(CMAKE_SYSTEM_NAME "Linux")
if ("${CROSS}" STREQUAL "")
# Default the cross compiler prefix to something known to work.
set(CROSS arm-linux-gnueabihf-)
endif ()
if (NOT ${CROSS} MATCHES hf-$)
set(DRACO_EXTRA_TOOLCHAIN_FLAGS "-mfloat-abi=softfp")
endif ()
set(CMAKE_C_COMPILER ${CROSS}gcc)
set(CMAKE_CXX_COMPILER ${CROSS}g++)
set(AS_EXECUTABLE ${CROSS}as)
set(CMAKE_C_COMPILER_ARG1
"-march=armv7-a -mfpu=neon ${DRACO_EXTRA_TOOLCHAIN_FLAGS}")
set(CMAKE_CXX_COMPILER_ARG1
"-march=armv7-a -mfpu=neon ${DRACO_EXTRA_TOOLCHAIN_FLAGS}")
set(CMAKE_SYSTEM_PROCESSOR "armv7")
endif () # DRACO_CMAKE_TOOLCHAINS_ARMV7_LINUX_GCC_CMAKE_

View File

@ -0,0 +1,14 @@
if (NOT DRACO_CMAKE_TOOLCHAINS_ARMV7S_IOS_CMAKE_)
set(DRACO_CMAKE_TOOLCHAINS_ARMV7S_IOS_CMAKE_ 1)
if (XCODE)
# TODO(tomfinegan): Handle arm builds in Xcode.
message(FATAL_ERROR "This toolchain does not support Xcode.")
endif ()
set(CMAKE_SYSTEM_PROCESSOR "armv7s")
set(CMAKE_OSX_ARCHITECTURES "armv7s")
include("${CMAKE_CURRENT_LIST_DIR}/arm-ios-common.cmake")
endif () # DRACO_CMAKE_TOOLCHAINS_ARMV7S_IOS_CMAKE_

View File

@ -0,0 +1,12 @@
if (NOT DRACO_CMAKE_TOOLCHAINS_X86_ANDROID_NDK_LIBCPP_CMAKE_)
set(DRACO_CMAKE_TOOLCHAINS_X86_ANDROID_NDK_LIBCPP_CMAKE_ 1)
include("${CMAKE_CURRENT_LIST_DIR}/../util.cmake")
set(CMAKE_SYSTEM_NAME Android)
set(CMAKE_ANDROID_ARCH_ABI x86)
require_variable(CMAKE_ANDROID_NDK)
set_variable_if_unset(CMAKE_SYSTEM_VERSION 18)
set_variable_if_unset(CMAKE_ANDROID_STL_TYPE c++_static)
endif () # DRACO_CMAKE_TOOLCHAINS_X86_ANDROID_NDK_LIBCPP_CMAKE_

View File

@ -0,0 +1,12 @@
if (NOT DRACO_CMAKE_TOOLCHAINS_X86_64_ANDROID_NDK_LIBCPP_CMAKE_)
set(DRACO_CMAKE_TOOLCHAINS_X86_64_ANDROID_NDK_LIBCPP_CMAKE_ 1)
include("${CMAKE_CURRENT_LIST_DIR}/../util.cmake")
set(CMAKE_SYSTEM_NAME Android)
set(CMAKE_ANDROID_ARCH_ABI x86_64)
require_variable(CMAKE_ANDROID_NDK)
set_variable_if_unset(CMAKE_SYSTEM_VERSION 21)
set_variable_if_unset(CMAKE_ANDROID_STL_TYPE c++_static)
endif () # DRACO_CMAKE_TOOLCHAINS_X86_64_ANDROID_NDK_LIBCPP_CMAKE_

74
extern/draco/dracoenc/cmake/util.cmake vendored Normal file
View File

@ -0,0 +1,74 @@
if (NOT DRACO_CMAKE_UTIL_CMAKE_)
set(DRACO_CMAKE_UTIL_CMAKE_ 1)
# Creates dummy source file in $draco_build_dir named $basename.$extension and
# returns the full path to the dummy source file via the $out_file_path
# parameter.
function (create_dummy_source_file basename extension out_file_path)
set(dummy_source_file "${draco_build_dir}/${basename}.${extension}")
file(WRITE "${dummy_source_file}"
"// Generated file. DO NOT EDIT!\n"
"// ${target_name} needs a ${extension} file to force link language, \n"
"// or to silence a harmless CMake warning: Ignore me.\n"
"void ${target_name}_dummy_function(void) {}\n")
set(${out_file_path} ${dummy_source_file} PARENT_SCOPE)
endfunction ()
# Convenience function for adding a dummy source file to $target_name using
# $extension as the file extension. Wraps create_dummy_source_file().
function (add_dummy_source_file_to_target target_name extension)
create_dummy_source_file("${target_name}" "${extension}" "dummy_source_file")
target_sources(${target_name} PRIVATE ${dummy_source_file})
endfunction ()
# Extracts the version number from $version_file and returns it to the user via
# $version_string_out_var. This is achieved by finding the first instance of
# the kDracoVersion variable and then removing everything but the string literal
# assigned to the variable. Quotes and semicolon are stripped from the returned
# string.
function (extract_version_string version_file version_string_out_var)
file(STRINGS "${version_file}" draco_version REGEX "kDracoVersion")
list(GET draco_version 0 draco_version)
string(REPLACE "static const char kDracoVersion[] = " "" draco_version
"${draco_version}")
string(REPLACE ";" "" draco_version "${draco_version}")
string(REPLACE "\"" "" draco_version "${draco_version}")
set("${version_string_out_var}" "${draco_version}" PARENT_SCOPE)
endfunction ()
# Sets CMake compiler launcher to $launcher_name when $launcher_name is found in
# $PATH. Warns user about ignoring build flag $launcher_flag when $launcher_name
# is not found in $PATH.
function (set_compiler_launcher launcher_flag launcher_name)
find_program(launcher_path "${launcher_name}")
if (launcher_path)
set(CMAKE_C_COMPILER_LAUNCHER "${launcher_path}" PARENT_SCOPE)
set(CMAKE_CXX_COMPILER_LAUNCHER "${launcher_path}" PARENT_SCOPE)
message("--- Using ${launcher_name} as compiler launcher.")
else ()
message(WARNING
"--- Cannot find ${launcher_name}, ${launcher_flag} ignored.")
endif ()
endfunction ()
# Terminates CMake execution when $var_name is unset in the environment. Sets
# CMake variable to the value of the environment variable when the variable is
# present in the environment.
macro(require_variable var_name)
if ("$ENV{${var_name}}" STREQUAL "")
message(FATAL_ERROR "${var_name} must be set in environment.")
endif ()
set_variable_if_unset(${var_name} "")
endmacro ()
# Sets $var_name to $default_value if not already set in the environment.
macro (set_variable_if_unset var_name default_value)
if (NOT "$ENV{${var_name}}" STREQUAL "")
set(${var_name} $ENV{${var_name}})
else ()
set(${var_name} ${default_value})
endif ()
endmacro ()
endif() # DRACO_CMAKE_UTIL_CMAKE_

View File

@ -0,0 +1,55 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/animation/keyframe_animation.h"
namespace draco {
KeyframeAnimation::KeyframeAnimation() {}
bool KeyframeAnimation::SetTimestamps(
const std::vector<TimestampType> &timestamp) {
// Already added attributes.
const int32_t num_frames = timestamp.size();
if (num_attributes() > 0) {
// Timestamp attribute could be added only once.
if (timestamps()->size()) {
return false;
} else {
// Check if the number of frames is consistent with
// the existing keyframes.
if (num_frames != num_points())
return false;
}
} else {
// This is the first attribute.
set_num_frames(num_frames);
}
// Add attribute for time stamp data.
std::unique_ptr<PointAttribute> timestamp_att =
std::unique_ptr<PointAttribute>(new PointAttribute());
timestamp_att->Init(GeometryAttribute::GENERIC, nullptr, 1, DT_FLOAT32, false,
sizeof(float), 0);
timestamp_att->SetIdentityMapping();
timestamp_att->Reset(num_frames);
for (PointIndex i(0); i < num_frames; ++i) {
timestamp_att->SetAttributeValue(timestamp_att->mapped_index(i),
&timestamp[i.value()]);
}
this->SetAttribute(kTimestampId, std::move(timestamp_att));
return true;
}
} // namespace draco

View File

@ -0,0 +1,108 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_ANIMATION_KEYFRAME_ANIMATION_H_
#define DRACO_ANIMATION_KEYFRAME_ANIMATION_H_
#include <vector>
#include "draco/point_cloud/point_cloud.h"
namespace draco {
// Class for holding keyframe animation data. It will have two or more
// attributes as a point cloud. The first attribute is always the timestamp
// of the animation. Each KeyframeAnimation could have multiple animations with
// the same number of frames. Each animation will be treated as a point
// attribute.
class KeyframeAnimation : public PointCloud {
public:
// Force time stamp to be float type.
using TimestampType = float;
KeyframeAnimation();
// Animation must have only one timestamp attribute.
// This function must be called before adding any animation data.
// Returns false if timestamp already exists.
bool SetTimestamps(const std::vector<TimestampType> &timestamp);
// Returns an id for the added animation data. This id will be used to
// identify this animation.
// Returns -1 if error, e.g. number of frames is not consistent.
// Type |T| should be consistent with |DataType|, e.g:
// float - DT_FLOAT32,
// int32_t - DT_INT32, ...
template <typename T>
int32_t AddKeyframes(DataType data_type, uint32_t num_components,
const std::vector<T> &data);
const PointAttribute *timestamps() const {
return GetAttributeByUniqueId(kTimestampId);
}
const PointAttribute *keyframes(int32_t animation_id) const {
return GetAttributeByUniqueId(animation_id);
}
// Number of frames should be equal to number points in the point cloud.
void set_num_frames(int32_t num_frames) { set_num_points(num_frames); }
int32_t num_frames() const { return static_cast<int32_t>(num_points()); }
int32_t num_animations() const { return num_attributes() - 1; }
private:
// Attribute id of timestamp is fixed to 0.
static constexpr int32_t kTimestampId = 0;
};
template <typename T>
int32_t KeyframeAnimation::AddKeyframes(DataType data_type,
uint32_t num_components,
const std::vector<T> &data) {
// TODO(draco-eng): Verify T is consistent with |data_type|.
if (num_components == 0)
return -1;
// If timestamps is not added yet, then reserve attribute 0 for timestamps.
if (!num_attributes()) {
// Add a temporary attribute with 0 points to fill attribute id 0.
std::unique_ptr<PointAttribute> temp_att =
std::unique_ptr<PointAttribute>(new PointAttribute());
temp_att->Init(GeometryAttribute::GENERIC, nullptr, num_components,
data_type, false, DataTypeLength(data_type), 0);
temp_att->Reset(0);
this->AddAttribute(std::move(temp_att));
set_num_frames(data.size() / num_components);
}
if (data.size() != num_components * num_frames())
return -1;
std::unique_ptr<PointAttribute> keyframe_att =
std::unique_ptr<PointAttribute>(new PointAttribute());
keyframe_att->Init(GeometryAttribute::GENERIC, nullptr, num_components,
data_type, false, DataTypeLength(data_type), 0);
keyframe_att->SetIdentityMapping();
keyframe_att->Reset(num_frames());
const size_t stride = num_components;
for (PointIndex i(0); i < num_frames(); ++i) {
keyframe_att->SetAttributeValue(keyframe_att->mapped_index(i),
&data[i.value() * stride]);
}
return this->AddAttribute(std::move(keyframe_att));
}
} // namespace draco
#endif // DRACO_ANIMATION_KEYFRAME_ANIMATION_H_

View File

@ -0,0 +1,29 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/animation/keyframe_animation_decoder.h"
namespace draco {
Status KeyframeAnimationDecoder::Decode(const DecoderOptions &options,
DecoderBuffer *in_buffer,
KeyframeAnimation *animation) {
const auto status = PointCloudSequentialDecoder::Decode(
options, in_buffer, static_cast<PointCloud *>(animation));
if (!status.ok())
return status;
return OkStatus();
}
} // namespace draco

View File

@ -0,0 +1,34 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_ANIMATION_KEYFRAME_ANIMATION_DECODER_H_
#define DRACO_ANIMATION_KEYFRAME_ANIMATION_DECODER_H_
#include "draco/animation/keyframe_animation.h"
#include "draco/compression/point_cloud/point_cloud_sequential_decoder.h"
namespace draco {
// Class for decoding keyframe animation.
class KeyframeAnimationDecoder : private PointCloudSequentialDecoder {
public:
KeyframeAnimationDecoder(){};
Status Decode(const DecoderOptions &options, DecoderBuffer *in_buffer,
KeyframeAnimation *animation);
};
} // namespace draco
#endif // DRACO_ANIMATION_KEYFRAME_ANIMATION_DECODER_H_

View File

@ -0,0 +1,28 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/animation/keyframe_animation_encoder.h"
namespace draco {
KeyframeAnimationEncoder::KeyframeAnimationEncoder() {}
Status KeyframeAnimationEncoder::EncodeKeyframeAnimation(
const KeyframeAnimation &animation, const EncoderOptions &options,
EncoderBuffer *out_buffer) {
SetPointCloud(animation);
return Encode(options, out_buffer);
}
} // namespace draco

View File

@ -0,0 +1,39 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_ANIMATION_KEYFRAME_ANIMATION_ENCODER_H_
#define DRACO_ANIMATION_KEYFRAME_ANIMATION_ENCODER_H_
#include "draco/animation/keyframe_animation.h"
#include "draco/compression/point_cloud/point_cloud_sequential_encoder.h"
namespace draco {
// Class for encoding keyframe animation. It takes KeyframeAnimation as a
// PointCloud and compress it. It's mostly a wrapper around PointCloudEncoder so
// that the animation module could be separated from geometry compression when
// exposed to developers.
class KeyframeAnimationEncoder : private PointCloudSequentialEncoder {
public:
KeyframeAnimationEncoder();
// Encode an animation to a buffer.
Status EncodeKeyframeAnimation(const KeyframeAnimation &animation,
const EncoderOptions &options,
EncoderBuffer *out_buffer);
};
} // namespace draco
#endif // DRACO_ANIMATION_KEYFRAME_ANIMATION_ENCODER_H_

View File

@ -0,0 +1,168 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/animation/keyframe_animation.h"
#include "draco/animation/keyframe_animation_decoder.h"
#include "draco/animation/keyframe_animation_encoder.h"
#include "draco/core/draco_test_base.h"
#include "draco/core/draco_test_utils.h"
namespace draco {
class KeyframeAnimationEncodingTest : public ::testing::Test {
protected:
KeyframeAnimationEncodingTest() {}
bool CreateAndAddTimestamps(int32_t num_frames) {
timestamps_.resize(num_frames);
for (int i = 0; i < timestamps_.size(); ++i)
timestamps_[i] = static_cast<draco::KeyframeAnimation::TimestampType>(i);
return keyframe_animation_.SetTimestamps(timestamps_);
}
int32_t CreateAndAddAnimationData(int32_t num_frames,
uint32_t num_components) {
// Create and add animation data with.
animation_data_.resize(num_frames * num_components);
for (int i = 0; i < animation_data_.size(); ++i)
animation_data_[i] = static_cast<float>(i);
return keyframe_animation_.AddKeyframes(draco::DT_FLOAT32, num_components,
animation_data_);
}
template <int num_components_t>
void CompareAnimationData(const KeyframeAnimation &animation0,
const KeyframeAnimation &animation1,
bool quantized) {
ASSERT_EQ(animation0.num_frames(), animation1.num_frames());
ASSERT_EQ(animation0.num_animations(), animation1.num_animations());
if (quantized) {
// TODO(hemmer) : Add test for stable quantization.
// Quantization will result in slightly different values.
// Skip comparing values.
return;
}
// Compare time stamp.
const auto timestamp_att0 = animation0.timestamps();
const auto timestamp_att1 = animation0.timestamps();
for (int i = 0; i < animation0.num_frames(); ++i) {
std::array<float, 1> att_value0;
std::array<float, 1> att_value1;
ASSERT_TRUE((timestamp_att0->GetValue<float, 1>(
draco::AttributeValueIndex(i), &att_value0)));
ASSERT_TRUE((timestamp_att1->GetValue<float, 1>(
draco::AttributeValueIndex(i), &att_value1)));
ASSERT_FLOAT_EQ(att_value0[0], att_value1[0]);
}
for (int animation_id = 1; animation_id < animation0.num_animations();
++animation_id) {
// Compare keyframe data.
const auto keyframe_att0 = animation0.keyframes(animation_id);
const auto keyframe_att1 = animation1.keyframes(animation_id);
ASSERT_EQ(keyframe_att0->num_components(),
keyframe_att1->num_components());
for (int i = 0; i < animation0.num_frames(); ++i) {
std::array<float, num_components_t> att_value0;
std::array<float, num_components_t> att_value1;
ASSERT_TRUE((keyframe_att0->GetValue<float, num_components_t>(
draco::AttributeValueIndex(i), &att_value0)));
ASSERT_TRUE((keyframe_att1->GetValue<float, num_components_t>(
draco::AttributeValueIndex(i), &att_value1)));
for (int j = 0; j < att_value0.size(); ++j) {
ASSERT_FLOAT_EQ(att_value0[j], att_value1[j]);
}
}
}
}
template <int num_components_t>
void TestKeyframeAnimationEncoding() {
TestKeyframeAnimationEncoding<num_components_t>(false);
}
template <int num_components_t>
void TestKeyframeAnimationEncoding(bool quantized) {
// Encode animation class.
draco::EncoderBuffer buffer;
draco::KeyframeAnimationEncoder encoder;
EncoderOptions options = EncoderOptions::CreateDefaultOptions();
if (quantized) {
// Set quantization for timestamps.
options.SetAttributeInt(0, "quantization_bits", 20);
// Set quantization for keyframes.
for (int i = 1; i <= keyframe_animation_.num_animations(); ++i) {
options.SetAttributeInt(i, "quantization_bits", 20);
}
}
ASSERT_TRUE(
encoder.EncodeKeyframeAnimation(keyframe_animation_, options, &buffer)
.ok());
draco::DecoderBuffer dec_decoder;
draco::KeyframeAnimationDecoder decoder;
DecoderBuffer dec_buffer;
dec_buffer.Init(buffer.data(), buffer.size());
// Decode animation class.
std::unique_ptr<KeyframeAnimation> decoded_animation(
new KeyframeAnimation());
DecoderOptions dec_options;
ASSERT_TRUE(
decoder.Decode(dec_options, &dec_buffer, decoded_animation.get()).ok());
// Verify if animation before and after compression is identical.
CompareAnimationData<num_components_t>(keyframe_animation_,
*decoded_animation, quantized);
}
draco::KeyframeAnimation keyframe_animation_;
std::vector<draco::KeyframeAnimation::TimestampType> timestamps_;
std::vector<float> animation_data_;
};
TEST_F(KeyframeAnimationEncodingTest, OneComponent) {
const int num_frames = 1;
ASSERT_TRUE(CreateAndAddTimestamps(num_frames));
ASSERT_EQ(CreateAndAddAnimationData(num_frames, 1), 1);
TestKeyframeAnimationEncoding<1>();
}
TEST_F(KeyframeAnimationEncodingTest, ManyComponents) {
const int num_frames = 100;
ASSERT_TRUE(CreateAndAddTimestamps(num_frames));
ASSERT_EQ(CreateAndAddAnimationData(num_frames, 100), 1);
TestKeyframeAnimationEncoding<100>();
}
TEST_F(KeyframeAnimationEncodingTest, ManyComponentsWithQuantization) {
const int num_frames = 100;
ASSERT_TRUE(CreateAndAddTimestamps(num_frames));
ASSERT_EQ(CreateAndAddAnimationData(num_frames, 4), 1);
// Test compression with quantization.
TestKeyframeAnimationEncoding<4>(true);
}
TEST_F(KeyframeAnimationEncodingTest, MultipleAnimations) {
const int num_frames = 5;
ASSERT_TRUE(CreateAndAddTimestamps(num_frames));
ASSERT_EQ(CreateAndAddAnimationData(num_frames, 3), 1);
ASSERT_EQ(CreateAndAddAnimationData(num_frames, 3), 2);
TestKeyframeAnimationEncoding<3>();
}
} // namespace draco

View File

@ -0,0 +1,102 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/animation/keyframe_animation.h"
#include "draco/core/draco_test_base.h"
namespace {
class KeyframeAnimationTest : public ::testing::Test {
protected:
KeyframeAnimationTest() {}
bool CreateAndAddTimestamps(int32_t num_frames) {
timestamps_.resize(num_frames);
for (int i = 0; i < timestamps_.size(); ++i)
timestamps_[i] = static_cast<draco::KeyframeAnimation::TimestampType>(i);
return keyframe_animation_.SetTimestamps(timestamps_);
}
int32_t CreateAndAddAnimationData(int32_t num_frames,
uint32_t num_components) {
// Create and add animation data with.
animation_data_.resize(num_frames * num_components);
for (int i = 0; i < animation_data_.size(); ++i)
animation_data_[i] = static_cast<float>(i);
return keyframe_animation_.AddKeyframes(draco::DT_FLOAT32, num_components,
animation_data_);
}
template <int num_components_t>
void CompareAnimationData() {
// Compare time stamp.
const auto timestamp_att = keyframe_animation_.timestamps();
for (int i = 0; i < timestamps_.size(); ++i) {
std::array<float, 1> att_value;
ASSERT_TRUE((timestamp_att->GetValue<float, 1>(
draco::AttributeValueIndex(i), &att_value)));
ASSERT_FLOAT_EQ(att_value[0], i);
}
// Compare keyframe data.
const auto keyframe_att = keyframe_animation_.keyframes(1);
for (int i = 0; i < animation_data_.size() / num_components_t; ++i) {
std::array<float, num_components_t> att_value;
ASSERT_TRUE((keyframe_att->GetValue<float, num_components_t>(
draco::AttributeValueIndex(i), &att_value)));
for (int j = 0; j < num_components_t; ++j) {
ASSERT_FLOAT_EQ(att_value[j], i * num_components_t + j);
}
}
}
template <int num_components_t>
void TestKeyframeAnimation(int32_t num_frames) {
ASSERT_TRUE(CreateAndAddTimestamps(num_frames));
ASSERT_EQ(CreateAndAddAnimationData(num_frames, num_components_t), 1);
CompareAnimationData<num_components_t>();
}
draco::KeyframeAnimation keyframe_animation_;
std::vector<draco::KeyframeAnimation::TimestampType> timestamps_;
std::vector<float> animation_data_;
};
// Test animation with 1 component and 10 frames.
TEST_F(KeyframeAnimationTest, OneComponent) { TestKeyframeAnimation<1>(10); }
// Test animation with 4 component and 10 frames.
TEST_F(KeyframeAnimationTest, FourComponent) { TestKeyframeAnimation<4>(10); }
// Test adding animation data before timestamp.
TEST_F(KeyframeAnimationTest, AddingAnimationFirst) {
ASSERT_EQ(CreateAndAddAnimationData(5, 1), 1);
ASSERT_TRUE(CreateAndAddTimestamps(5));
}
// Test adding timestamp more than once.
TEST_F(KeyframeAnimationTest, ErrorAddingTimestampsTwice) {
ASSERT_TRUE(CreateAndAddTimestamps(5));
ASSERT_FALSE(CreateAndAddTimestamps(5));
}
// Test animation with multiple animation data.
TEST_F(KeyframeAnimationTest, MultipleAnimationData) {
const int num_frames = 5;
ASSERT_TRUE(CreateAndAddTimestamps(num_frames));
ASSERT_EQ(CreateAndAddAnimationData(num_frames, 1), 1);
ASSERT_EQ(CreateAndAddAnimationData(num_frames, 2), 2);
}
} // namespace

View File

@ -0,0 +1,86 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/attributes/attribute_octahedron_transform.h"
#include "draco/attributes/attribute_transform_type.h"
#include "draco/compression/attributes/normal_compression_utils.h"
namespace draco {
bool AttributeOctahedronTransform::InitFromAttribute(
const PointAttribute &attribute) {
const AttributeTransformData *const transform_data =
attribute.GetAttributeTransformData();
if (!transform_data ||
transform_data->transform_type() != ATTRIBUTE_OCTAHEDRON_TRANSFORM)
return false; // Wrong transform type.
quantization_bits_ = transform_data->GetParameterValue<int32_t>(0);
return true;
}
void AttributeOctahedronTransform::CopyToAttributeTransformData(
AttributeTransformData *out_data) const {
out_data->set_transform_type(ATTRIBUTE_OCTAHEDRON_TRANSFORM);
out_data->AppendParameterValue(quantization_bits_);
}
void AttributeOctahedronTransform::SetParameters(int quantization_bits) {
quantization_bits_ = quantization_bits;
}
bool AttributeOctahedronTransform::EncodeParameters(
EncoderBuffer *encoder_buffer) const {
if (is_initialized()) {
encoder_buffer->Encode(static_cast<uint8_t>(quantization_bits_));
return true;
}
return false;
}
std::unique_ptr<PointAttribute>
AttributeOctahedronTransform::GeneratePortableAttribute(
const PointAttribute &attribute, const std::vector<PointIndex> &point_ids,
int num_points) const {
DRACO_DCHECK(is_initialized());
// Allocate portable attribute.
const int num_entries = static_cast<int>(point_ids.size());
std::unique_ptr<PointAttribute> portable_attribute =
InitPortableAttribute(num_entries, 2, num_points, attribute, true);
// Quantize all values in the order given by point_ids into portable
// attribute.
int32_t *const portable_attribute_data = reinterpret_cast<int32_t *>(
portable_attribute->GetAddress(AttributeValueIndex(0)));
float att_val[3];
int32_t dst_index = 0;
OctahedronToolBox converter;
if (!converter.SetQuantizationBits(quantization_bits_))
return nullptr;
for (uint32_t i = 0; i < point_ids.size(); ++i) {
const AttributeValueIndex att_val_id = attribute.mapped_index(point_ids[i]);
attribute.GetValue(att_val_id, att_val);
// Encode the vector into a s and t octahedral coordinates.
int32_t s, t;
converter.FloatVectorToQuantizedOctahedralCoords(att_val, &s, &t);
portable_attribute_data[dst_index++] = s;
portable_attribute_data[dst_index++] = t;
}
return portable_attribute;
}
} // namespace draco

View File

@ -0,0 +1,60 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_ATTRIBUTES_ATTRIBUTE_OCTAHEDRON_TRANSFORM_H_
#define DRACO_ATTRIBUTES_ATTRIBUTE_OCTAHEDRON_TRANSFORM_H_
#include "draco/attributes/attribute_transform.h"
#include "draco/attributes/point_attribute.h"
#include "draco/core/encoder_buffer.h"
namespace draco {
// Attribute transform for attributes transformed to octahedral coordinates.
class AttributeOctahedronTransform : public AttributeTransform {
public:
AttributeOctahedronTransform() : quantization_bits_(-1) {}
// Return attribute transform type.
AttributeTransformType Type() const override {
return ATTRIBUTE_OCTAHEDRON_TRANSFORM;
}
// Try to init transform from attribute.
bool InitFromAttribute(const PointAttribute &attribute) override;
// Copy parameter values into the provided AttributeTransformData instance.
void CopyToAttributeTransformData(
AttributeTransformData *out_data) const override;
// Set number of quantization bits.
void SetParameters(int quantization_bits);
// Encode relevant parameters into buffer.
bool EncodeParameters(EncoderBuffer *encoder_buffer) const;
bool is_initialized() const { return quantization_bits_ != -1; }
int32_t quantization_bits() const { return quantization_bits_; }
// Create portable attribute.
std::unique_ptr<PointAttribute> GeneratePortableAttribute(
const PointAttribute &attribute, const std::vector<PointIndex> &point_ids,
int num_points) const;
private:
int32_t quantization_bits_;
};
} // namespace draco
#endif // DRACO_ATTRIBUTES_ATTRIBUTE_OCTAHEDRON_TRANSFORM_H_

View File

@ -0,0 +1,173 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/attributes/attribute_quantization_transform.h"
#include "draco/attributes/attribute_transform_type.h"
#include "draco/core/quantization_utils.h"
namespace draco {
bool AttributeQuantizationTransform::InitFromAttribute(
const PointAttribute &attribute) {
const AttributeTransformData *const transform_data =
attribute.GetAttributeTransformData();
if (!transform_data ||
transform_data->transform_type() != ATTRIBUTE_QUANTIZATION_TRANSFORM)
return false; // Wrong transform type.
int32_t byte_offset = 0;
quantization_bits_ = transform_data->GetParameterValue<int32_t>(byte_offset);
byte_offset += 4;
min_values_.resize(attribute.num_components());
for (int i = 0; i < attribute.num_components(); ++i) {
min_values_[i] = transform_data->GetParameterValue<float>(byte_offset);
byte_offset += 4;
}
range_ = transform_data->GetParameterValue<float>(byte_offset);
return true;
}
// Copy parameter values into the provided AttributeTransformData instance.
void AttributeQuantizationTransform::CopyToAttributeTransformData(
AttributeTransformData *out_data) const {
out_data->set_transform_type(ATTRIBUTE_QUANTIZATION_TRANSFORM);
out_data->AppendParameterValue(quantization_bits_);
for (int i = 0; i < min_values_.size(); ++i) {
out_data->AppendParameterValue(min_values_[i]);
}
out_data->AppendParameterValue(range_);
}
void AttributeQuantizationTransform::SetParameters(int quantization_bits,
const float *min_values,
int num_components,
float range) {
quantization_bits_ = quantization_bits;
min_values_.assign(min_values, min_values + num_components);
range_ = range;
}
bool AttributeQuantizationTransform::ComputeParameters(
const PointAttribute &attribute, const int quantization_bits) {
if (quantization_bits_ != -1) {
return false; // already initialized.
}
quantization_bits_ = quantization_bits;
const int num_components = attribute.num_components();
range_ = 0.f;
min_values_ = std::vector<float>(num_components, 0.f);
const std::unique_ptr<float[]> max_values(new float[num_components]);
const std::unique_ptr<float[]> att_val(new float[num_components]);
// Compute minimum values and max value difference.
attribute.GetValue(AttributeValueIndex(0), att_val.get());
attribute.GetValue(AttributeValueIndex(0), min_values_.data());
attribute.GetValue(AttributeValueIndex(0), max_values.get());
for (AttributeValueIndex i(1); i < static_cast<uint32_t>(attribute.size());
++i) {
attribute.GetValue(i, att_val.get());
for (int c = 0; c < num_components; ++c) {
if (min_values_[c] > att_val[c])
min_values_[c] = att_val[c];
if (max_values[c] < att_val[c])
max_values[c] = att_val[c];
}
}
for (int c = 0; c < num_components; ++c) {
const float dif = max_values[c] - min_values_[c];
if (dif > range_)
range_ = dif;
}
return true;
}
bool AttributeQuantizationTransform::EncodeParameters(
EncoderBuffer *encoder_buffer) const {
if (is_initialized()) {
encoder_buffer->Encode(min_values_.data(),
sizeof(float) * min_values_.size());
encoder_buffer->Encode(range_);
encoder_buffer->Encode(static_cast<uint8_t>(quantization_bits_));
return true;
}
return false;
}
std::unique_ptr<PointAttribute>
AttributeQuantizationTransform::GeneratePortableAttribute(
const PointAttribute &attribute, int num_points) const {
DRACO_DCHECK(is_initialized());
// Allocate portable attribute.
const int num_entries = num_points;
const int num_components = attribute.num_components();
std::unique_ptr<PointAttribute> portable_attribute =
InitPortableAttribute(num_entries, num_components, 0, attribute, true);
// Quantize all values using the order given by point_ids.
int32_t *const portable_attribute_data = reinterpret_cast<int32_t *>(
portable_attribute->GetAddress(AttributeValueIndex(0)));
const uint32_t max_quantized_value = (1 << (quantization_bits_)) - 1;
Quantizer quantizer;
quantizer.Init(range(), max_quantized_value);
int32_t dst_index = 0;
const std::unique_ptr<float[]> att_val(new float[num_components]);
for (PointIndex i(0); i < num_points; ++i) {
const AttributeValueIndex att_val_id = attribute.mapped_index(i);
attribute.GetValue(att_val_id, att_val.get());
for (int c = 0; c < num_components; ++c) {
const float value = (att_val[c] - min_values()[c]);
const int32_t q_val = quantizer.QuantizeFloat(value);
portable_attribute_data[dst_index++] = q_val;
}
}
return portable_attribute;
}
std::unique_ptr<PointAttribute>
AttributeQuantizationTransform::GeneratePortableAttribute(
const PointAttribute &attribute, const std::vector<PointIndex> &point_ids,
int num_points) const {
DRACO_DCHECK(is_initialized());
// Allocate portable attribute.
const int num_entries = static_cast<int>(point_ids.size());
const int num_components = attribute.num_components();
std::unique_ptr<PointAttribute> portable_attribute = InitPortableAttribute(
num_entries, num_components, num_points, attribute, true);
// Quantize all values using the order given by point_ids.
int32_t *const portable_attribute_data = reinterpret_cast<int32_t *>(
portable_attribute->GetAddress(AttributeValueIndex(0)));
const uint32_t max_quantized_value = (1 << (quantization_bits_)) - 1;
Quantizer quantizer;
quantizer.Init(range(), max_quantized_value);
int32_t dst_index = 0;
const std::unique_ptr<float[]> att_val(new float[num_components]);
for (uint32_t i = 0; i < point_ids.size(); ++i) {
const AttributeValueIndex att_val_id = attribute.mapped_index(point_ids[i]);
attribute.GetValue(att_val_id, att_val.get());
for (int c = 0; c < num_components; ++c) {
const float value = (att_val[c] - min_values()[c]);
const int32_t q_val = quantizer.QuantizeFloat(value);
portable_attribute_data[dst_index++] = q_val;
}
}
return portable_attribute;
}
} // namespace draco

View File

@ -0,0 +1,78 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_ATTRIBUTES_ATTRIBUTE_QUANTIZATION_TRANSFORM_H_
#define DRACO_ATTRIBUTES_ATTRIBUTE_QUANTIZATION_TRANSFORM_H_
#include <vector>
#include "draco/attributes/attribute_transform.h"
#include "draco/attributes/point_attribute.h"
#include "draco/core/encoder_buffer.h"
namespace draco {
// Attribute transform for quantized attributes.
class AttributeQuantizationTransform : public AttributeTransform {
public:
AttributeQuantizationTransform() : quantization_bits_(-1), range_(0.f) {}
// Return attribute transform type.
AttributeTransformType Type() const override {
return ATTRIBUTE_QUANTIZATION_TRANSFORM;
}
// Try to init transform from attribute.
bool InitFromAttribute(const PointAttribute &attribute) override;
// Copy parameter values into the provided AttributeTransformData instance.
void CopyToAttributeTransformData(
AttributeTransformData *out_data) const override;
void SetParameters(int quantization_bits, const float *min_values,
int num_components, float range);
bool ComputeParameters(const PointAttribute &attribute,
const int quantization_bits);
// Encode relevant parameters into buffer.
bool EncodeParameters(EncoderBuffer *encoder_buffer) const;
int32_t quantization_bits() const { return quantization_bits_; }
float min_value(int axis) const { return min_values_[axis]; }
const std::vector<float> &min_values() const { return min_values_; }
float range() const { return range_; }
bool is_initialized() const { return quantization_bits_ != -1; }
// Create portable attribute using 1:1 mapping between points in the input and
// output attribute.
std::unique_ptr<PointAttribute> GeneratePortableAttribute(
const PointAttribute &attribute, int num_points) const;
// Create portable attribute using custom mapping between input and output
// points.
std::unique_ptr<PointAttribute> GeneratePortableAttribute(
const PointAttribute &attribute, const std::vector<PointIndex> &point_ids,
int num_points) const;
private:
int32_t quantization_bits_;
// Minimal dequantized value for each component of the attribute.
std::vector<float> min_values_;
// Bounds of the dequantized attribute (max delta over all components).
float range_;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTE_DEQUANTIZATION_TRANSFORM_H_

View File

@ -0,0 +1,44 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/attributes/attribute_transform.h"
namespace draco {
bool AttributeTransform::TransferToAttribute(PointAttribute *attribute) const {
std::unique_ptr<AttributeTransformData> transform_data(
new AttributeTransformData());
this->CopyToAttributeTransformData(transform_data.get());
attribute->SetAttributeTransformData(std::move(transform_data));
return true;
}
std::unique_ptr<PointAttribute> AttributeTransform::InitPortableAttribute(
int num_entries, int num_components, int num_points,
const PointAttribute &attribute, bool is_unsigned) const {
const DataType dt = is_unsigned ? DT_UINT32 : DT_INT32;
GeometryAttribute va;
va.Init(attribute.attribute_type(), nullptr, num_components, dt, false,
num_components * DataTypeLength(dt), 0);
std::unique_ptr<PointAttribute> portable_attribute(new PointAttribute(va));
portable_attribute->Reset(num_entries);
if (num_points) {
portable_attribute->SetExplicitMapping(num_points);
} else {
portable_attribute->SetIdentityMapping();
}
return portable_attribute;
}
} // namespace draco

View File

@ -0,0 +1,46 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_H_
#define DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_H_
#include "draco/attributes/attribute_transform_data.h"
#include "draco/attributes/point_attribute.h"
namespace draco {
// Virtual base class for various attribute transforms, enforcing common
// interface where possible.
class AttributeTransform {
public:
virtual ~AttributeTransform() = default;
// Return attribute transform type.
virtual AttributeTransformType Type() const = 0;
// Try to init transform from attribute.
virtual bool InitFromAttribute(const PointAttribute &attribute) = 0;
// Copy parameter values into the provided AttributeTransformData instance.
virtual void CopyToAttributeTransformData(
AttributeTransformData *out_data) const = 0;
bool TransferToAttribute(PointAttribute *attribute) const;
protected:
std::unique_ptr<PointAttribute> InitPortableAttribute(
int num_entries, int num_components, int num_points,
const PointAttribute &attribute, bool is_unsigned) const;
};
} // namespace draco
#endif // DRACO_ATTRIBUTES_ATTRIBUTE_OCTAHEDRON_TRANSFORM_H_

View File

@ -0,0 +1,71 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_DATA_H_
#define DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_DATA_H_
#include <memory>
#include "draco/attributes/attribute_transform_type.h"
#include "draco/core/data_buffer.h"
namespace draco {
// Class for holding parameter values for an attribute transform of a
// PointAttribute. This can be for example quantization data for an attribute
// that holds quantized values. This class provides only a basic storage for
// attribute transform parameters and it should be accessed only through wrapper
// classes for a specific transform (e.g. AttributeQuantizationTransform).
class AttributeTransformData {
public:
AttributeTransformData() : transform_type_(ATTRIBUTE_INVALID_TRANSFORM) {}
AttributeTransformData(const AttributeTransformData &data) = default;
// Returns the type of the attribute transform that is described by the class.
AttributeTransformType transform_type() const { return transform_type_; }
void set_transform_type(AttributeTransformType type) {
transform_type_ = type;
}
// Returns a parameter value on a given |byte_offset|.
template <typename DataTypeT>
DataTypeT GetParameterValue(int byte_offset) const {
DataTypeT out_data;
buffer_.Read(byte_offset, &out_data, sizeof(DataTypeT));
return out_data;
}
// Sets a parameter value on a given |byte_offset|.
template <typename DataTypeT>
void SetParameterValue(int byte_offset, const DataTypeT &in_data) {
if (byte_offset + sizeof(DataTypeT) > buffer_.data_size()) {
buffer_.Resize(byte_offset + sizeof(DataTypeT));
}
buffer_.Write(byte_offset, &in_data, sizeof(DataTypeT));
}
// Sets a parameter value at the end of the |buffer_|.
template <typename DataTypeT>
void AppendParameterValue(const DataTypeT &in_data) {
SetParameterValue(static_cast<int>(buffer_.data_size()), in_data);
}
private:
AttributeTransformType transform_type_;
DataBuffer buffer_;
};
} // namespace draco
#endif // DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_DATA_H_

View File

@ -0,0 +1,30 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_TYPE_H_
#define DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_TYPE_H_
namespace draco {
// List of all currently supported attribute transforms.
enum AttributeTransformType {
ATTRIBUTE_INVALID_TRANSFORM = -1,
ATTRIBUTE_NO_TRANSFORM = 0,
ATTRIBUTE_QUANTIZATION_TRANSFORM = 1,
ATTRIBUTE_OCTAHEDRON_TRANSFORM = 2,
};
} // namespace draco
#endif // DRACO_ATTRIBUTES_ATTRIBUTE_TRANSFORM_TYPE_H_

View File

@ -0,0 +1,91 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/attributes/geometry_attribute.h"
using std::array;
namespace draco {
GeometryAttribute::GeometryAttribute()
: buffer_(nullptr),
num_components_(1),
data_type_(DT_FLOAT32),
byte_stride_(0),
byte_offset_(0),
attribute_type_(INVALID),
unique_id_(0) {}
void GeometryAttribute::Init(GeometryAttribute::Type attribute_type,
DataBuffer *buffer, int8_t num_components,
DataType data_type, bool normalized,
int64_t byte_stride, int64_t byte_offset) {
buffer_ = buffer;
if (buffer) {
buffer_descriptor_.buffer_id = buffer->buffer_id();
buffer_descriptor_.buffer_update_count = buffer->update_count();
}
num_components_ = num_components;
data_type_ = data_type;
normalized_ = normalized;
byte_stride_ = byte_stride;
byte_offset_ = byte_offset;
attribute_type_ = attribute_type;
}
bool GeometryAttribute::CopyFrom(const GeometryAttribute &src_att) {
if (buffer_ == nullptr || src_att.buffer_ == nullptr)
return false;
buffer_->Update(src_att.buffer_->data(), src_att.buffer_->data_size());
num_components_ = src_att.num_components_;
data_type_ = src_att.data_type_;
normalized_ = src_att.normalized_;
byte_stride_ = src_att.byte_stride_;
byte_offset_ = src_att.byte_offset_;
attribute_type_ = src_att.attribute_type_;
buffer_descriptor_ = src_att.buffer_descriptor_;
return true;
}
bool GeometryAttribute::operator==(const GeometryAttribute &va) const {
if (attribute_type_ != va.attribute_type_)
return false;
// It's OK to compare just the buffer descriptors here. We don't need to
// compare the buffers themselves.
if (buffer_descriptor_.buffer_id != va.buffer_descriptor_.buffer_id)
return false;
if (buffer_descriptor_.buffer_update_count !=
va.buffer_descriptor_.buffer_update_count)
return false;
if (num_components_ != va.num_components_)
return false;
if (data_type_ != va.data_type_)
return false;
if (byte_stride_ != va.byte_stride_)
return false;
if (byte_offset_ != va.byte_offset_)
return false;
return true;
}
void GeometryAttribute::ResetBuffer(DataBuffer *buffer, int64_t byte_stride,
int64_t byte_offset) {
buffer_ = buffer;
buffer_descriptor_.buffer_id = buffer->buffer_id();
buffer_descriptor_.buffer_update_count = buffer->update_count();
byte_stride_ = byte_stride;
byte_offset_ = byte_offset;
}
} // namespace draco

View File

@ -0,0 +1,304 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_ATTRIBUTES_GEOMETRY_ATTRIBUTE_H_
#define DRACO_ATTRIBUTES_GEOMETRY_ATTRIBUTE_H_
#include <array>
#include <limits>
#include "draco/attributes/geometry_indices.h"
#include "draco/core/data_buffer.h"
#include "draco/core/hash_utils.h"
namespace draco {
// The class provides access to a specific attribute which is stored in a
// DataBuffer, such as normals or coordinates. However, the GeometryAttribute
// class does not own the buffer and the buffer itself may store other data
// unrelated to this attribute (such as data for other attributes in which case
// we can have multiple GeometryAttributes accessing one buffer). Typically,
// all attributes for a point (or corner, face) are stored in one block, which
// is advantageous in terms of memory access. The length of the entire block is
// given by the byte_stride, the position where the attribute starts is given by
// the byte_offset, the actual number of bytes that the attribute occupies is
// given by the data_type and the number of components.
class GeometryAttribute {
public:
// Supported attribute types.
enum Type {
INVALID = -1,
// Named attributes start here. The difference between named and generic
// attributes is that for named attributes we know their purpose and we
// can apply some special methods when dealing with them (e.g. during
// encoding).
POSITION = 0,
NORMAL,
COLOR,
TEX_COORD,
// A special id used to mark attributes that are not assigned to any known
// predefined use case. Such attributes are often used for a shader specific
// data.
GENERIC,
// Total number of different attribute types.
// Always keep behind all named attributes.
NAMED_ATTRIBUTES_COUNT,
};
GeometryAttribute();
// Initializes and enables the attribute.
void Init(Type attribute_type, DataBuffer *buffer, int8_t num_components,
DataType data_type, bool normalized, int64_t byte_stride,
int64_t byte_offset);
bool IsValid() const { return buffer_ != nullptr; }
// Copies data from the source attribute to the this attribute.
// This attribute must have a valid buffer allocated otherwise the operation
// is going to fail and return false.
bool CopyFrom(const GeometryAttribute &src_att);
// Function for getting a attribute value with a specific format.
// Unsafe. Caller must ensure the accessed memory is valid.
// T is the attribute data type.
// att_components_t is the number of attribute components.
template <typename T, int att_components_t>
std::array<T, att_components_t> GetValue(
AttributeValueIndex att_index) const {
// Byte address of the attribute index.
const int64_t byte_pos = byte_offset_ + byte_stride_ * att_index.value();
std::array<T, att_components_t> out;
buffer_->Read(byte_pos, &(out[0]), sizeof(out));
return out;
}
// Function for getting a attribute value with a specific format.
// T is the attribute data type.
// att_components_t is the number of attribute components.
template <typename T, int att_components_t>
bool GetValue(AttributeValueIndex att_index,
std::array<T, att_components_t> *out) const {
// Byte address of the attribute index.
const int64_t byte_pos = byte_offset_ + byte_stride_ * att_index.value();
// Check we are not reading past end of data.
if (byte_pos + sizeof(*out) > buffer_->data_size())
return false;
buffer_->Read(byte_pos, &((*out)[0]), sizeof(*out));
return true;
}
// Returns the byte position of the attribute entry in the data buffer.
inline int64_t GetBytePos(AttributeValueIndex att_index) const {
return byte_offset_ + byte_stride_ * att_index.value();
}
inline const uint8_t *GetAddress(AttributeValueIndex att_index) const {
const int64_t byte_pos = GetBytePos(att_index);
return buffer_->data() + byte_pos;
}
inline uint8_t *GetAddress(AttributeValueIndex att_index) {
const int64_t byte_pos = GetBytePos(att_index);
return buffer_->data() + byte_pos;
}
// Fills out_data with the raw value of the requested attribute entry.
// out_data must be at least byte_stride_ long.
void GetValue(AttributeValueIndex att_index, void *out_data) const {
const int64_t byte_pos = byte_offset_ + byte_stride_ * att_index.value();
buffer_->Read(byte_pos, out_data, byte_stride_);
}
// DEPRECATED: Use
// ConvertValue(AttributeValueIndex att_id,
// int out_num_components,
// OutT *out_val);
//
// Function for conversion of a attribute to a specific output format.
// OutT is the desired data type of the attribute.
// out_att_components_t is the number of components of the output format.
// Returns false when the conversion failed.
template <typename OutT, int out_att_components_t>
bool ConvertValue(AttributeValueIndex att_id, OutT *out_val) const {
return ConvertValue(att_id, out_att_components_t, out_val);
}
// Function for conversion of a attribute to a specific output format.
// |out_val| needs to be able to store |out_num_components| values.
// OutT is the desired data type of the attribute.
// Returns false when the conversion failed.
template <typename OutT>
bool ConvertValue(AttributeValueIndex att_id, int8_t out_num_components,
OutT *out_val) const {
if (out_val == nullptr)
return false;
switch (data_type_) {
case DT_INT8:
return ConvertTypedValue<int8_t, OutT>(att_id, out_num_components,
out_val);
case DT_UINT8:
return ConvertTypedValue<uint8_t, OutT>(att_id, out_num_components,
out_val);
case DT_INT16:
return ConvertTypedValue<int16_t, OutT>(att_id, out_num_components,
out_val);
case DT_UINT16:
return ConvertTypedValue<uint16_t, OutT>(att_id, out_num_components,
out_val);
case DT_INT32:
return ConvertTypedValue<int32_t, OutT>(att_id, out_num_components,
out_val);
case DT_UINT32:
return ConvertTypedValue<uint32_t, OutT>(att_id, out_num_components,
out_val);
case DT_INT64:
return ConvertTypedValue<int64_t, OutT>(att_id, out_num_components,
out_val);
case DT_UINT64:
return ConvertTypedValue<uint64_t, OutT>(att_id, out_num_components,
out_val);
case DT_FLOAT32:
return ConvertTypedValue<float, OutT>(att_id, out_num_components,
out_val);
case DT_FLOAT64:
return ConvertTypedValue<double, OutT>(att_id, out_num_components,
out_val);
case DT_BOOL:
return ConvertTypedValue<bool, OutT>(att_id, out_num_components,
out_val);
default:
// Wrong attribute type.
return false;
}
}
// Function for conversion of a attribute to a specific output format.
// The |out_value| must be able to store all components of a single attribute
// entry.
// OutT is the desired data type of the attribute.
// Returns false when the conversion failed.
template <typename OutT>
bool ConvertValue(AttributeValueIndex att_index, OutT *out_value) const {
return ConvertValue<OutT>(att_index, num_components_, out_value);
}
bool operator==(const GeometryAttribute &va) const;
// Returns the type of the attribute indicating the nature of the attribute.
Type attribute_type() const { return attribute_type_; }
void set_attribute_type(Type type) { attribute_type_ = type; }
// Returns the data type that is stored in the attribute.
DataType data_type() const { return data_type_; }
// Returns the number of components that are stored for each entry.
// For position attribute this is usually three (x,y,z),
// while texture coordinates have two components (u,v).
int8_t num_components() const { return num_components_; }
// Indicates whether the data type should be normalized before interpretation,
// that is, it should be divided by the max value of the data type.
bool normalized() const { return normalized_; }
// The buffer storing the entire data of the attribute.
const DataBuffer *buffer() const { return buffer_; }
// Returns the number of bytes between two attribute entries, this is, at
// least size of the data types times number of components.
int64_t byte_stride() const { return byte_stride_; }
// The offset where the attribute starts within the block of size byte_stride.
int64_t byte_offset() const { return byte_offset_; }
void set_byte_offset(int64_t byte_offset) { byte_offset_ = byte_offset; }
DataBufferDescriptor buffer_descriptor() const { return buffer_descriptor_; }
uint32_t unique_id() const { return unique_id_; }
void set_unique_id(uint32_t id) { unique_id_ = id; }
protected:
// Sets a new internal storage for the attribute.
void ResetBuffer(DataBuffer *buffer, int64_t byte_stride,
int64_t byte_offset);
private:
// Function for conversion of an attribute to a specific output format given a
// format of the stored attribute.
// T is the stored attribute data type.
// OutT is the desired data type of the attribute.
template <typename T, typename OutT>
bool ConvertTypedValue(AttributeValueIndex att_id, int8_t out_num_components,
OutT *out_value) const {
const uint8_t *src_address = GetAddress(att_id);
// Convert all components available in both the original and output formats.
for (int i = 0; i < std::min(num_components_, out_num_components); ++i) {
const T in_value = *reinterpret_cast<const T *>(src_address);
out_value[i] = static_cast<OutT>(in_value);
// When converting integer to floating point, normalize the value if
// necessary.
if (std::is_integral<T>::value && std::is_floating_point<OutT>::value &&
normalized_) {
out_value[i] /= static_cast<OutT>(std::numeric_limits<T>::max());
}
// TODO(ostava): Add handling of normalized attributes when converting
// between different integer representations. If the attribute is
// normalized, integer values should be converted as if they represent 0-1
// range. E.g. when we convert uint16 to uint8, the range <0, 2^16 - 1>
// should be converted to range <0, 2^8 - 1>.
src_address += sizeof(T);
}
// Fill empty data for unused output components if needed.
for (int i = num_components_; i < out_num_components; ++i) {
out_value[i] = static_cast<OutT>(0);
}
return true;
}
DataBuffer *buffer_;
// The buffer descriptor is stored at the time the buffer is attached to this
// attribute. The purpose is to detect if any changes happened to the buffer
// since the time it was attached.
DataBufferDescriptor buffer_descriptor_;
int8_t num_components_;
DataType data_type_;
bool normalized_;
int64_t byte_stride_;
int64_t byte_offset_;
Type attribute_type_;
// Unique id of this attribute. No two attributes could have the same unique
// id. It is used to identify each attribute, especially when there are
// multiple attribute of the same type in a point cloud.
uint32_t unique_id_;
friend struct GeometryAttributeHasher;
};
// Hashing support
// Function object for using Attribute as a hash key.
struct GeometryAttributeHasher {
size_t operator()(const GeometryAttribute &va) const {
size_t hash = HashCombine(va.buffer_descriptor_.buffer_id,
va.buffer_descriptor_.buffer_update_count);
hash = HashCombine(va.num_components_, hash);
hash = HashCombine((int8_t)va.data_type_, hash);
hash = HashCombine((int8_t)va.attribute_type_, hash);
hash = HashCombine(va.byte_stride_, hash);
return HashCombine(va.byte_offset_, hash);
}
};
// Function object for using GeometryAttribute::Type as a hash key.
struct GeometryAttributeTypeHasher {
size_t operator()(const GeometryAttribute::Type &at) const {
return static_cast<size_t>(at);
}
};
} // namespace draco
#endif // DRACO_ATTRIBUTES_GEOMETRY_ATTRIBUTE_H_

View File

@ -0,0 +1,54 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_ATTRIBUTES_GEOMETRY_INDICES_H_
#define DRACO_ATTRIBUTES_GEOMETRY_INDICES_H_
#include <inttypes.h>
#include <limits>
#include "draco/core/draco_index_type.h"
namespace draco {
// Index of an attribute value entry stored in a GeometryAttribute.
DEFINE_NEW_DRACO_INDEX_TYPE(uint32_t, AttributeValueIndex)
// Index of a point in a PointCloud.
DEFINE_NEW_DRACO_INDEX_TYPE(uint32_t, PointIndex)
// Vertex index in a Mesh or CornerTable.
DEFINE_NEW_DRACO_INDEX_TYPE(uint32_t, VertexIndex);
// Corner index that identifies a corner in a Mesh or CornerTable.
DEFINE_NEW_DRACO_INDEX_TYPE(uint32_t, CornerIndex);
// Face index for Mesh and CornerTable.
DEFINE_NEW_DRACO_INDEX_TYPE(uint32_t, FaceIndex);
// Constants denoting invalid indices.
static constexpr AttributeValueIndex kInvalidAttributeValueIndex(
std::numeric_limits<uint32_t>::max());
static constexpr PointIndex kInvalidPointIndex(
std::numeric_limits<uint32_t>::max());
static constexpr VertexIndex kInvalidVertexIndex(
std::numeric_limits<uint32_t>::max());
static constexpr CornerIndex kInvalidCornerIndex(
std::numeric_limits<uint32_t>::max());
static constexpr FaceIndex kInvalidFaceIndex(
std::numeric_limits<uint32_t>::max());
// TODO(ostava): Add strongly typed indices for attribute id and unique
// attribute id.
} // namespace draco
#endif // DRACO_ATTRIBUTES_GEOMETRY_INDICES_H_

View File

@ -0,0 +1,205 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/attributes/point_attribute.h"
#include <unordered_map>
using std::unordered_map;
// Shortcut for typed conditionals.
template <bool B, class T, class F>
using conditional_t = typename std::conditional<B, T, F>::type;
namespace draco {
PointAttribute::PointAttribute()
: num_unique_entries_(0), identity_mapping_(false) {}
PointAttribute::PointAttribute(const GeometryAttribute &att)
: GeometryAttribute(att),
num_unique_entries_(0),
identity_mapping_(false) {}
void PointAttribute::CopyFrom(const PointAttribute &src_att) {
if (buffer() == nullptr) {
// If the destination attribute doesn't have a valid buffer, create it.
attribute_buffer_ = std::unique_ptr<DataBuffer>(new DataBuffer());
ResetBuffer(attribute_buffer_.get(), 0, 0);
}
if (!GeometryAttribute::CopyFrom(src_att))
return;
identity_mapping_ = src_att.identity_mapping_;
num_unique_entries_ = src_att.num_unique_entries_;
indices_map_ = src_att.indices_map_;
if (src_att.attribute_transform_data_) {
attribute_transform_data_ = std::unique_ptr<AttributeTransformData>(
new AttributeTransformData(*src_att.attribute_transform_data_.get()));
} else {
attribute_transform_data_ = nullptr;
}
}
bool PointAttribute::Reset(size_t num_attribute_values) {
if (attribute_buffer_ == nullptr) {
attribute_buffer_ = std::unique_ptr<DataBuffer>(new DataBuffer());
}
const int64_t entry_size = DataTypeLength(data_type()) * num_components();
if (!attribute_buffer_->Update(nullptr, num_attribute_values * entry_size))
return false;
// Assign the new buffer to the parent attribute.
ResetBuffer(attribute_buffer_.get(), entry_size, 0);
num_unique_entries_ = static_cast<uint32_t>(num_attribute_values);
return true;
}
#ifdef DRACO_ATTRIBUTE_DEDUPLICATION_SUPPORTED
AttributeValueIndex::ValueType PointAttribute::DeduplicateValues(
const GeometryAttribute &in_att) {
return DeduplicateValues(in_att, AttributeValueIndex(0));
}
AttributeValueIndex::ValueType PointAttribute::DeduplicateValues(
const GeometryAttribute &in_att, AttributeValueIndex in_att_offset) {
AttributeValueIndex::ValueType unique_vals = 0;
switch (in_att.data_type()) {
// Currently we support only float, uint8, and uint16 arguments.
case DT_FLOAT32:
unique_vals = DeduplicateTypedValues<float>(in_att, in_att_offset);
break;
case DT_INT8:
unique_vals = DeduplicateTypedValues<int8_t>(in_att, in_att_offset);
break;
case DT_UINT8:
case DT_BOOL:
unique_vals = DeduplicateTypedValues<uint8_t>(in_att, in_att_offset);
break;
case DT_UINT16:
unique_vals = DeduplicateTypedValues<uint16_t>(in_att, in_att_offset);
break;
case DT_INT16:
unique_vals = DeduplicateTypedValues<int16_t>(in_att, in_att_offset);
break;
case DT_UINT32:
unique_vals = DeduplicateTypedValues<uint32_t>(in_att, in_att_offset);
break;
case DT_INT32:
unique_vals = DeduplicateTypedValues<int32_t>(in_att, in_att_offset);
break;
default:
return -1; // Unsupported data type.
}
if (unique_vals == 0)
return -1; // Unexpected error.
return unique_vals;
}
// Helper function for calling UnifyDuplicateAttributes<T,num_components_t>
// with the correct template arguments.
// Returns the number of unique attribute values.
template <typename T>
AttributeValueIndex::ValueType PointAttribute::DeduplicateTypedValues(
const GeometryAttribute &in_att, AttributeValueIndex in_att_offset) {
// Select the correct method to call based on the number of attribute
// components.
switch (in_att.num_components()) {
case 1:
return DeduplicateFormattedValues<T, 1>(in_att, in_att_offset);
case 2:
return DeduplicateFormattedValues<T, 2>(in_att, in_att_offset);
case 3:
return DeduplicateFormattedValues<T, 3>(in_att, in_att_offset);
case 4:
return DeduplicateFormattedValues<T, 4>(in_att, in_att_offset);
default:
return 0;
}
}
template <typename T, int num_components_t>
AttributeValueIndex::ValueType PointAttribute::DeduplicateFormattedValues(
const GeometryAttribute &in_att, AttributeValueIndex in_att_offset) {
// We want to detect duplicates using a hash map but we cannot hash floating
// point numbers directly so bit-copy floats to the same sized integers and
// hash them.
// First we need to determine which int type to use (1, 2, 4 or 8 bytes).
// Note, this is done at compile time using std::conditional struct.
// Conditional is in form <bool-expression, true, false>. If bool-expression
// is true the "true" branch is used and vice versa. All at compile time.
typedef conditional_t<sizeof(T) == 1, uint8_t,
conditional_t<sizeof(T) == 2, uint16_t,
conditional_t<sizeof(T) == 4, uint32_t,
/*else*/ uint64_t>>>
HashType;
AttributeValueIndex unique_vals(0);
typedef std::array<T, num_components_t> AttributeValue;
typedef std::array<HashType, num_components_t> AttributeHashableValue;
// Hash map storing index of the first attribute with a given value.
unordered_map<AttributeHashableValue, AttributeValueIndex,
HashArray<AttributeHashableValue>>
value_to_index_map;
AttributeValue att_value;
AttributeHashableValue hashable_value;
IndexTypeVector<AttributeValueIndex, AttributeValueIndex> value_map(
num_unique_entries_);
for (AttributeValueIndex i(0); i < num_unique_entries_; ++i) {
const AttributeValueIndex att_pos = i + in_att_offset;
att_value = in_att.GetValue<T, num_components_t>(att_pos);
// Convert the value to hashable type. Bit-copy real attributes to integers.
memcpy(&(hashable_value[0]), &(att_value[0]), sizeof(att_value));
// Check if the given attribute value has been used before already.
auto it = value_to_index_map.find(hashable_value);
if (it != value_to_index_map.end()) {
// Duplicated value found. Update index mapping.
value_map[i] = it->second;
} else {
// New unique value.
// Update the hash map with a new entry pointing to the latest unique
// vertex index.
value_to_index_map.insert(
std::pair<AttributeHashableValue, AttributeValueIndex>(hashable_value,
unique_vals));
// Add the unique value to the mesh builder.
SetAttributeValue(unique_vals, &att_value);
// Update index mapping.
value_map[i] = unique_vals;
++unique_vals;
}
}
if (unique_vals == num_unique_entries_)
return unique_vals.value(); // Nothing has changed.
if (is_mapping_identity()) {
// Change identity mapping to the explicit one.
// The number of points is equal to the number of old unique values.
SetExplicitMapping(num_unique_entries_);
// Update the explicit map.
for (uint32_t i = 0; i < num_unique_entries_; ++i) {
SetPointMapEntry(PointIndex(i), value_map[AttributeValueIndex(i)]);
}
} else {
// Update point to value map using the mapping between old and new values.
for (PointIndex i(0); i < static_cast<uint32_t>(indices_map_.size()); ++i) {
SetPointMapEntry(i, value_map[indices_map_[i]]);
}
}
num_unique_entries_ = unique_vals.value();
return num_unique_entries_;
}
#endif
} // namespace draco

View File

@ -0,0 +1,186 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_ATTRIBUTES_POINT_ATTRIBUTE_H_
#define DRACO_ATTRIBUTES_POINT_ATTRIBUTE_H_
#include <memory>
#include "draco/draco_features.h"
#include "draco/attributes/attribute_transform_data.h"
#include "draco/attributes/geometry_attribute.h"
#include "draco/core/draco_index_type_vector.h"
#include "draco/core/hash_utils.h"
#include "draco/core/macros.h"
namespace draco {
// Class for storing point specific data about each attribute. In general,
// multiple points stored in a point cloud can share the same attribute value
// and this class provides the necessary mapping between point ids and attribute
// value ids.
class PointAttribute : public GeometryAttribute {
public:
PointAttribute();
explicit PointAttribute(const GeometryAttribute &att);
// Make sure the move constructor is defined (needed for better performance
// when new attributes are added to PointCloud).
PointAttribute(PointAttribute &&attribute) = default;
PointAttribute &operator=(PointAttribute &&attribute) = default;
// Copies attribute data from the provided |src_att| attribute.
void CopyFrom(const PointAttribute &src_att);
// Prepares the attribute storage for the specified number of entries.
bool Reset(size_t num_attribute_values);
size_t size() const { return num_unique_entries_; }
AttributeValueIndex mapped_index(PointIndex point_index) const {
if (identity_mapping_)
return AttributeValueIndex(point_index.value());
return indices_map_[point_index];
}
DataBuffer *buffer() const { return attribute_buffer_.get(); }
bool is_mapping_identity() const { return identity_mapping_; }
size_t indices_map_size() const {
if (is_mapping_identity())
return 0;
return indices_map_.size();
}
const uint8_t *GetAddressOfMappedIndex(PointIndex point_index) const {
return GetAddress(mapped_index(point_index));
}
// Sets the new number of unique attribute entries for the attribute.
void Resize(size_t new_num_unique_entries) {
num_unique_entries_ = static_cast<uint32_t>(new_num_unique_entries);
}
// Functions for setting the type of mapping between point indices and
// attribute entry ids.
// This function sets the mapping to implicit, where point indices are equal
// to attribute entry indices.
void SetIdentityMapping() {
identity_mapping_ = true;
indices_map_.clear();
}
// This function sets the mapping to be explicitly using the indices_map_
// array that needs to be initialized by the caller.
void SetExplicitMapping(size_t num_points) {
identity_mapping_ = false;
indices_map_.resize(num_points, kInvalidAttributeValueIndex);
}
// Set an explicit map entry for a specific point index.
void SetPointMapEntry(PointIndex point_index,
AttributeValueIndex entry_index) {
DRACO_DCHECK(!identity_mapping_);
indices_map_[point_index] = entry_index;
}
// Sets a value of an attribute entry. The input value must be allocated to
// cover all components of a single attribute entry.
void SetAttributeValue(AttributeValueIndex entry_index, const void *value) {
const int64_t byte_pos = entry_index.value() * byte_stride();
buffer()->Write(byte_pos, value, byte_stride());
}
// Same as GeometryAttribute::GetValue(), but using point id as the input.
// Mapping to attribute value index is performed automatically.
void GetMappedValue(PointIndex point_index, void *out_data) const {
return GetValue(mapped_index(point_index), out_data);
}
#ifdef DRACO_ATTRIBUTE_DEDUPLICATION_SUPPORTED
// Deduplicate |in_att| values into |this| attribute. |in_att| can be equal
// to |this|.
// Returns -1 if the deduplication failed.
AttributeValueIndex::ValueType DeduplicateValues(
const GeometryAttribute &in_att);
// Same as above but the values read from |in_att| are sampled with the
// provided offset |in_att_offset|.
AttributeValueIndex::ValueType DeduplicateValues(
const GeometryAttribute &in_att, AttributeValueIndex in_att_offset);
#endif
// Set attribute transform data for the attribute. The data is used to store
// the type and parameters of the transform that is applied on the attribute
// data (optional).
void SetAttributeTransformData(
std::unique_ptr<AttributeTransformData> transform_data) {
attribute_transform_data_ = std::move(transform_data);
}
const AttributeTransformData *GetAttributeTransformData() const {
return attribute_transform_data_.get();
}
private:
#ifdef DRACO_ATTRIBUTE_DEDUPLICATION_SUPPORTED
template <typename T>
AttributeValueIndex::ValueType DeduplicateTypedValues(
const GeometryAttribute &in_att, AttributeValueIndex in_att_offset);
template <typename T, int COMPONENTS_COUNT>
AttributeValueIndex::ValueType DeduplicateFormattedValues(
const GeometryAttribute &in_att, AttributeValueIndex in_att_offset);
#endif
// Data storage for attribute values. GeometryAttribute itself doesn't own its
// buffer so we need to allocate it here.
std::unique_ptr<DataBuffer> attribute_buffer_;
// Mapping between point ids and attribute value ids.
IndexTypeVector<PointIndex, AttributeValueIndex> indices_map_;
AttributeValueIndex::ValueType num_unique_entries_;
// Flag when the mapping between point ids and attribute values is identity.
bool identity_mapping_;
// If an attribute contains transformed data (e.g. quantized), we can specify
// the attribute transform here and use it to transform the attribute back to
// its original format.
std::unique_ptr<AttributeTransformData> attribute_transform_data_;
friend struct PointAttributeHasher;
};
// Hash functor for the PointAttribute class.
struct PointAttributeHasher {
size_t operator()(const PointAttribute &attribute) const {
GeometryAttributeHasher base_hasher;
size_t hash = base_hasher(attribute);
hash = HashCombine(attribute.identity_mapping_, hash);
hash = HashCombine(attribute.num_unique_entries_, hash);
hash = HashCombine(attribute.indices_map_.size(), hash);
if (attribute.indices_map_.size() > 0) {
const uint64_t indices_hash = FingerprintString(
reinterpret_cast<const char *>(attribute.indices_map_.data()),
attribute.indices_map_.size());
hash = HashCombine(indices_hash, hash);
}
if (attribute.attribute_buffer_ != nullptr) {
const uint64_t buffer_hash = FingerprintString(
reinterpret_cast<const char *>(attribute.attribute_buffer_->data()),
attribute.attribute_buffer_->data_size());
hash = HashCombine(buffer_hash, hash);
}
return hash;
}
};
} // namespace draco
#endif // DRACO_ATTRIBUTES_POINT_ATTRIBUTE_H_

View File

@ -0,0 +1,129 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/attributes/point_attribute.h"
#include "draco/core/draco_test_base.h"
namespace {
class PointAttributeTest : public ::testing::Test {
protected:
PointAttributeTest() {}
};
TEST_F(PointAttributeTest, TestCopy) {
// This test verifies that PointAttribute can copy data from another point
// attribute.
draco::GeometryAttribute pos_att;
pos_att.Init(draco::GeometryAttribute::POSITION, nullptr, 1, draco::DT_INT32,
false, 4, 0);
draco::PointAttribute pa(pos_att);
pa.SetIdentityMapping();
pa.Reset(10);
for (int32_t i = 0; i < 10; ++i) {
pa.SetAttributeValue(draco::AttributeValueIndex(i), &i);
}
draco::PointAttribute other_pa;
other_pa.CopyFrom(pa);
draco::PointAttributeHasher hasher;
ASSERT_EQ(hasher(pa), hasher(other_pa));
// The hash function does not actually compute the hash from attribute values,
// so ensure the data got copied correctly as well.
for (int32_t i = 0; i < 10; ++i) {
int32_t data;
other_pa.GetValue(draco::AttributeValueIndex(i), &data);
ASSERT_EQ(data, i);
}
}
TEST_F(PointAttributeTest, TestGetValueFloat) {
draco::GeometryAttribute pos_att;
pos_att.Init(draco::GeometryAttribute::POSITION, nullptr, 3,
draco::DT_FLOAT32, false, 4, 0);
draco::PointAttribute pa(pos_att);
pa.SetIdentityMapping();
pa.Reset(5);
float points[3];
for (int32_t i = 0; i < 5; ++i) {
points[0] = i * 3.0;
points[1] = (i * 3.0) + 1.0;
points[2] = (i * 3.0) + 2.0;
pa.SetAttributeValue(draco::AttributeValueIndex(i), &points);
}
for (int32_t i = 0; i < 5; ++i) {
pa.GetValue(draco::AttributeValueIndex(i), &points);
ASSERT_FLOAT_EQ(points[0], i * 3.0);
ASSERT_FLOAT_EQ(points[1], (i * 3.0) + 1.0);
ASSERT_FLOAT_EQ(points[2], (i * 3.0) + 2.0);
}
}
TEST_F(PointAttributeTest, TestGetArray) {
draco::GeometryAttribute pos_att;
pos_att.Init(draco::GeometryAttribute::POSITION, nullptr, 3,
draco::DT_FLOAT32, false, 4, 0);
draco::PointAttribute pa(pos_att);
pa.SetIdentityMapping();
pa.Reset(5);
float points[3];
for (int32_t i = 0; i < 5; ++i) {
points[0] = i * 3.0;
points[1] = (i * 3.0) + 1.0;
points[2] = (i * 3.0) + 2.0;
pa.SetAttributeValue(draco::AttributeValueIndex(i), &points);
}
for (int32_t i = 0; i < 5; ++i) {
std::array<float, 3> att_value;
att_value = pa.GetValue<float, 3>(draco::AttributeValueIndex(i));
ASSERT_FLOAT_EQ(att_value[0], i * 3.0);
ASSERT_FLOAT_EQ(att_value[1], (i * 3.0) + 1.0);
ASSERT_FLOAT_EQ(att_value[2], (i * 3.0) + 2.0);
}
for (int32_t i = 0; i < 5; ++i) {
std::array<float, 3> att_value;
EXPECT_TRUE(
(pa.GetValue<float, 3>(draco::AttributeValueIndex(i), &att_value)));
ASSERT_FLOAT_EQ(att_value[0], i * 3.0);
ASSERT_FLOAT_EQ(att_value[1], (i * 3.0) + 1.0);
ASSERT_FLOAT_EQ(att_value[2], (i * 3.0) + 2.0);
}
}
TEST_F(PointAttributeTest, TestArrayReadError) {
draco::GeometryAttribute pos_att;
pos_att.Init(draco::GeometryAttribute::POSITION, nullptr, 3,
draco::DT_FLOAT32, false, 4, 0);
draco::PointAttribute pa(pos_att);
pa.SetIdentityMapping();
pa.Reset(5);
float points[3];
for (int32_t i = 0; i < 5; ++i) {
points[0] = i * 3.0;
points[1] = (i * 3.0) + 1.0;
points[2] = (i * 3.0) + 2.0;
pa.SetAttributeValue(draco::AttributeValueIndex(i), &points);
}
std::array<float, 3> att_value;
EXPECT_FALSE(
(pa.GetValue<float, 3>(draco::AttributeValueIndex(5), &att_value)));
}
} // namespace

View File

@ -0,0 +1,97 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/compression/attributes/attributes_decoder.h"
#include "draco/core/varint_decoding.h"
namespace draco {
AttributesDecoder::AttributesDecoder()
: point_cloud_decoder_(nullptr), point_cloud_(nullptr) {}
bool AttributesDecoder::Init(PointCloudDecoder *decoder, PointCloud *pc) {
point_cloud_decoder_ = decoder;
point_cloud_ = pc;
return true;
}
bool AttributesDecoder::DecodeAttributesDecoderData(DecoderBuffer *in_buffer) {
// Decode and create attributes.
uint32_t num_attributes;
#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
if (point_cloud_decoder_->bitstream_version() <
DRACO_BITSTREAM_VERSION(2, 0)) {
if (!in_buffer->Decode(&num_attributes))
return false;
} else
#endif
{
if (!DecodeVarint(&num_attributes, in_buffer))
return false;
}
if (num_attributes == 0)
return false;
point_attribute_ids_.resize(num_attributes);
PointCloud *pc = point_cloud_;
for (uint32_t i = 0; i < num_attributes; ++i) {
// Decode attribute descriptor data.
uint8_t att_type, data_type, num_components, normalized;
if (!in_buffer->Decode(&att_type))
return false;
if (!in_buffer->Decode(&data_type))
return false;
if (!in_buffer->Decode(&num_components))
return false;
if (!in_buffer->Decode(&normalized))
return false;
if (data_type <= DT_INVALID || data_type >= DT_TYPES_COUNT)
return false;
const DataType draco_dt = static_cast<DataType>(data_type);
// Add the attribute to the point cloud
GeometryAttribute ga;
ga.Init(static_cast<GeometryAttribute::Type>(att_type), nullptr,
num_components, draco_dt, normalized > 0,
DataTypeLength(draco_dt) * num_components, 0);
uint32_t unique_id;
#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
if (point_cloud_decoder_->bitstream_version() <
DRACO_BITSTREAM_VERSION(1, 3)) {
uint16_t custom_id;
if (!in_buffer->Decode(&custom_id))
return false;
// TODO(draco-eng): Add "custom_id" to attribute metadata.
unique_id = static_cast<uint32_t>(custom_id);
ga.set_unique_id(unique_id);
} else
#endif
{
DecodeVarint(&unique_id, in_buffer);
ga.set_unique_id(unique_id);
}
const int att_id = pc->AddAttribute(
std::unique_ptr<PointAttribute>(new PointAttribute(ga)));
pc->attribute(att_id)->set_unique_id(unique_id);
point_attribute_ids_[i] = att_id;
// Update the inverse map.
if (att_id >= static_cast<int32_t>(point_attribute_to_local_id_map_.size()))
point_attribute_to_local_id_map_.resize(att_id + 1, -1);
point_attribute_to_local_id_map_[att_id] = i;
}
return true;
}
} // namespace draco

View File

@ -0,0 +1,94 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_DECODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_DECODER_H_
#include <vector>
#include "draco/draco_features.h"
#include "draco/compression/attributes/attributes_decoder_interface.h"
#include "draco/compression/point_cloud/point_cloud_decoder.h"
#include "draco/core/decoder_buffer.h"
#include "draco/point_cloud/point_cloud.h"
namespace draco {
// Base class for decoding one or more attributes that were encoded with a
// matching AttributesEncoder. It is a basic implementation of
// AttributesDecoderInterface that provides functionality that is shared between
// all AttributesDecoders.
class AttributesDecoder : public AttributesDecoderInterface {
public:
AttributesDecoder();
virtual ~AttributesDecoder() = default;
// Called after all attribute decoders are created. It can be used to perform
// any custom initialization.
bool Init(PointCloudDecoder *decoder, PointCloud *pc) override;
// Decodes any attribute decoder specific data from the |in_buffer|.
bool DecodeAttributesDecoderData(DecoderBuffer *in_buffer) override;
int32_t GetAttributeId(int i) const override {
return point_attribute_ids_[i];
}
int32_t GetNumAttributes() const override {
return static_cast<int32_t>(point_attribute_ids_.size());
}
PointCloudDecoder *GetDecoder() const override {
return point_cloud_decoder_;
}
// Decodes attribute data from the source buffer.
bool DecodeAttributes(DecoderBuffer *in_buffer) override {
if (!DecodePortableAttributes(in_buffer))
return false;
if (!DecodeDataNeededByPortableTransforms(in_buffer))
return false;
if (!TransformAttributesToOriginalFormat())
return false;
return true;
}
protected:
int32_t GetLocalIdForPointAttribute(int32_t point_attribute_id) const {
const int id_map_size =
static_cast<int>(point_attribute_to_local_id_map_.size());
if (point_attribute_id >= id_map_size)
return -1;
return point_attribute_to_local_id_map_[point_attribute_id];
}
virtual bool DecodePortableAttributes(DecoderBuffer *in_buffer) = 0;
virtual bool DecodeDataNeededByPortableTransforms(DecoderBuffer *in_buffer) {
return true;
}
virtual bool TransformAttributesToOriginalFormat() { return true; }
private:
// List of attribute ids that need to be decoded with this decoder.
std::vector<int32_t> point_attribute_ids_;
// Map between point attribute id and the local id (i.e., the inverse of the
// |point_attribute_ids_|.
std::vector<int32_t> point_attribute_to_local_id_map_;
PointCloudDecoder *point_cloud_decoder_;
PointCloud *point_cloud_;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_DECODER_H_

View File

@ -0,0 +1,62 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_DECODER_INTERFACE_H_
#define DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_DECODER_INTERFACE_H_
#include <vector>
#include "draco/core/decoder_buffer.h"
#include "draco/point_cloud/point_cloud.h"
namespace draco {
class PointCloudDecoder;
// Interface class for decoding one or more attributes that were encoded with a
// matching AttributesEncoder. It provides only the basic interface
// that is used by the PointCloudDecoder. The actual decoding must be
// implemented in derived classes using the DecodeAttributes() method.
class AttributesDecoderInterface {
public:
AttributesDecoderInterface() = default;
virtual ~AttributesDecoderInterface() = default;
// Called after all attribute decoders are created. It can be used to perform
// any custom initialization.
virtual bool Init(PointCloudDecoder *decoder, PointCloud *pc) = 0;
// Decodes any attribute decoder specific data from the |in_buffer|.
virtual bool DecodeAttributesDecoderData(DecoderBuffer *in_buffer) = 0;
// Decode attribute data from the source buffer. Needs to be implemented by
// the derived classes.
virtual bool DecodeAttributes(DecoderBuffer *in_buffer) = 0;
virtual int32_t GetAttributeId(int i) const = 0;
virtual int32_t GetNumAttributes() const = 0;
virtual PointCloudDecoder *GetDecoder() const = 0;
// Returns an attribute containing data processed by the attribute transform.
// (see TransformToPortableFormat() method). This data is guaranteed to be
// same for encoder and decoder and it can be used by predictors.
virtual const PointAttribute *GetPortableAttribute(
int32_t /* point_attribute_id */) {
return nullptr;
}
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_DECODER_INTERFACE_H_

View File

@ -0,0 +1,49 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/compression/attributes/attributes_encoder.h"
#include "draco/core/varint_encoding.h"
namespace draco {
AttributesEncoder::AttributesEncoder()
: point_cloud_encoder_(nullptr), point_cloud_(nullptr) {}
AttributesEncoder::AttributesEncoder(int att_id) : AttributesEncoder() {
AddAttributeId(att_id);
}
bool AttributesEncoder::Init(PointCloudEncoder *encoder, const PointCloud *pc) {
point_cloud_encoder_ = encoder;
point_cloud_ = pc;
return true;
}
bool AttributesEncoder::EncodeAttributesEncoderData(EncoderBuffer *out_buffer) {
// Encode data about all attributes.
EncodeVarint(num_attributes(), out_buffer);
for (uint32_t i = 0; i < num_attributes(); ++i) {
const int32_t att_id = point_attribute_ids_[i];
const PointAttribute *const pa = point_cloud_->attribute(att_id);
out_buffer->Encode(static_cast<uint8_t>(pa->attribute_type()));
out_buffer->Encode(static_cast<uint8_t>(pa->data_type()));
out_buffer->Encode(static_cast<uint8_t>(pa->num_components()));
out_buffer->Encode(static_cast<uint8_t>(pa->normalized()));
EncodeVarint(pa->unique_id(), out_buffer);
}
return true;
}
} // namespace draco

View File

@ -0,0 +1,149 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_ENCODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_ENCODER_H_
#include "draco/attributes/point_attribute.h"
#include "draco/core/encoder_buffer.h"
#include "draco/point_cloud/point_cloud.h"
namespace draco {
class PointCloudEncoder;
// Base class for encoding one or more attributes of a PointCloud (or other
// geometry). This base class provides only the basic interface that is used
// by the PointCloudEncoder.
class AttributesEncoder {
public:
AttributesEncoder();
// Constructs an attribute encoder associated with a given point attribute.
explicit AttributesEncoder(int point_attrib_id);
virtual ~AttributesEncoder() = default;
// Called after all attribute encoders are created. It can be used to perform
// any custom initialization, including setting up attribute dependencies.
// Note: no data should be encoded in this function, because the decoder may
// process encoders in a different order from the decoder.
virtual bool Init(PointCloudEncoder *encoder, const PointCloud *pc);
// Encodes data needed by the target attribute decoder.
virtual bool EncodeAttributesEncoderData(EncoderBuffer *out_buffer);
// Returns a unique identifier of the given encoder type, that is used during
// decoding to construct the corresponding attribute decoder.
virtual uint8_t GetUniqueId() const = 0;
// Encode attribute data to the target buffer.
virtual bool EncodeAttributes(EncoderBuffer *out_buffer) {
if (!TransformAttributesToPortableFormat())
return false;
if (!EncodePortableAttributes(out_buffer))
return false;
// Encode data needed by portable transforms after the attribute is encoded.
// This corresponds to the order in which the data is going to be decoded by
// the decoder.
if (!EncodeDataNeededByPortableTransforms(out_buffer))
return false;
return true;
}
// Returns the number of attributes that need to be encoded before the
// specified attribute is encoded.
// Note that the attribute is specified by its point attribute id.
virtual int NumParentAttributes(int32_t /* point_attribute_id */) const {
return 0;
}
virtual int GetParentAttributeId(int32_t /* point_attribute_id */,
int32_t /* parent_i */) const {
return -1;
}
// Marks a given attribute as a parent of another attribute.
virtual bool MarkParentAttribute(int32_t /* point_attribute_id */) {
return false;
}
// Returns an attribute containing data processed by the attribute transform.
// (see TransformToPortableFormat() method). This data is guaranteed to be
// encoded losslessly and it can be safely used for predictors.
virtual const PointAttribute *GetPortableAttribute(
int32_t /* point_attribute_id */) {
return nullptr;
}
void AddAttributeId(int32_t id) {
point_attribute_ids_.push_back(id);
if (id >= static_cast<int32_t>(point_attribute_to_local_id_map_.size()))
point_attribute_to_local_id_map_.resize(id + 1, -1);
point_attribute_to_local_id_map_[id] =
static_cast<int32_t>(point_attribute_ids_.size()) - 1;
}
// Sets new attribute point ids (replacing the existing ones).
void SetAttributeIds(const std::vector<int32_t> &point_attribute_ids) {
point_attribute_ids_.clear();
point_attribute_to_local_id_map_.clear();
for (int32_t att_id : point_attribute_ids) {
AddAttributeId(att_id);
}
}
int32_t GetAttributeId(int i) const { return point_attribute_ids_[i]; }
uint32_t num_attributes() const {
return static_cast<uint32_t>(point_attribute_ids_.size());
}
PointCloudEncoder *encoder() const { return point_cloud_encoder_; }
protected:
// Transforms the input attribute data into a form that should be losslessly
// encoded (transform itself can be lossy).
virtual bool TransformAttributesToPortableFormat() { return true; }
// Losslessly encodes data of all portable attributes.
// Precondition: All attributes must have been transformed into portable
// format at this point (see TransformAttributesToPortableFormat() method).
virtual bool EncodePortableAttributes(EncoderBuffer *out_buffer) = 0;
// Encodes any data needed to revert the transform to portable format for each
// attribute (e.g. data needed for dequantization of quantized values).
virtual bool EncodeDataNeededByPortableTransforms(EncoderBuffer *out_buffer) {
return true;
}
int32_t GetLocalIdForPointAttribute(int32_t point_attribute_id) const {
const int id_map_size =
static_cast<int>(point_attribute_to_local_id_map_.size());
if (point_attribute_id >= id_map_size)
return -1;
return point_attribute_to_local_id_map_[point_attribute_id];
}
private:
// List of attribute ids that need to be encoded with this encoder.
std::vector<int32_t> point_attribute_ids_;
// Map between point attribute id and the local id (i.e., the inverse of the
// |point_attribute_ids_|.
std::vector<int32_t> point_attribute_to_local_id_map_;
PointCloudEncoder *point_cloud_encoder_;
const PointCloud *point_cloud_;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_ATTRIBUTES_ENCODER_H_

View File

@ -0,0 +1,515 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/compression/attributes/kd_tree_attributes_decoder.h"
#include "draco/compression/attributes/kd_tree_attributes_shared.h"
#include "draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_decoder.h"
#include "draco/compression/point_cloud/algorithms/float_points_tree_decoder.h"
#include "draco/compression/point_cloud/point_cloud_decoder.h"
#include "draco/core/draco_types.h"
#include "draco/core/varint_decoding.h"
namespace draco {
// attribute, offset_dimensionality, data_type, data_size, num_components
using AttributeTuple =
std::tuple<PointAttribute *, uint32_t, DataType, uint32_t, uint32_t>;
// Output iterator that is used to decode values directly into the data buffer
// of the modified PointAttribute.
// The extension of this iterator beyond the DT_UINT32 concerns itself only with
// the size of the data for efficiency, not the type. DataType is conveyed in
// but is an unused field populated for any future logic/special casing.
// DT_UINT32 and all other 4-byte types are naturally supported from the size of
// data in the kd tree encoder. DT_UINT16 and DT_UINT8 are supported by way
// of byte copies into a temporary memory buffer.
template <class CoeffT>
class PointAttributeVectorOutputIterator {
typedef PointAttributeVectorOutputIterator<CoeffT> Self;
public:
PointAttributeVectorOutputIterator(
PointAttributeVectorOutputIterator &&that) = default;
explicit PointAttributeVectorOutputIterator(
const std::vector<AttributeTuple> &atts)
: attributes_(atts), point_id_(0) {
DRACO_DCHECK_GE(atts.size(), 1);
uint32_t required_decode_bytes = 0;
for (auto index = 0; index < attributes_.size(); index++) {
const AttributeTuple &att = attributes_[index];
required_decode_bytes = (std::max)(required_decode_bytes,
std::get<3>(att) * std::get<4>(att));
}
memory_.resize(required_decode_bytes);
data_ = memory_.data();
}
const Self &operator++() {
++point_id_;
return *this;
}
// We do not want to do ANY copying of this constructor so this particular
// operator is disabled for performance reasons.
// Self operator++(int) {
// Self copy = *this;
// ++point_id_;
// return copy;
// }
Self &operator*() { return *this; }
// Still needed in some cases.
// TODO(hemmer): remove.
// hardcoded to 3 based on legacy usage.
const Self &operator=(const VectorD<CoeffT, 3> &val) {
DRACO_DCHECK_EQ(attributes_.size(), 1); // Expect only ONE attribute.
AttributeTuple &att = attributes_[0];
PointAttribute *attribute = std::get<0>(att);
const uint32_t &offset = std::get<1>(att);
DRACO_DCHECK_EQ(offset, 0); // expected to be zero
attribute->SetAttributeValue(attribute->mapped_index(point_id_),
&val[0] + offset);
return *this;
}
// Additional operator taking std::vector as argument.
const Self &operator=(const std::vector<CoeffT> &val) {
for (auto index = 0; index < attributes_.size(); index++) {
AttributeTuple &att = attributes_[index];
PointAttribute *attribute = std::get<0>(att);
const uint32_t &offset = std::get<1>(att);
const uint32_t &data_size = std::get<3>(att);
const uint32_t &num_components = std::get<4>(att);
const uint32_t *data_source = val.data() + offset;
if (data_size != 4) { // handle uint16_t, uint8_t
// selectively copy data bytes
uint8_t *data_counter = data_;
for (uint32_t index = 0; index < num_components;
index += 1, data_counter += data_size) {
std::memcpy(data_counter, data_source + index, data_size);
}
// redirect to copied data
data_source = reinterpret_cast<uint32_t *>(data_);
}
const AttributeValueIndex avi = attribute->mapped_index(point_id_);
if (avi >= static_cast<uint32_t>(attribute->size()))
return *this;
attribute->SetAttributeValue(avi, data_source);
}
return *this;
}
private:
// preallocated memory for buffering different data sizes. Never reallocated.
std::vector<uint8_t> memory_;
uint8_t *data_;
std::vector<AttributeTuple> attributes_;
PointIndex point_id_;
// NO COPY
PointAttributeVectorOutputIterator(
const PointAttributeVectorOutputIterator &that) = delete;
PointAttributeVectorOutputIterator &operator=(
PointAttributeVectorOutputIterator const &) = delete;
};
KdTreeAttributesDecoder::KdTreeAttributesDecoder() {}
bool KdTreeAttributesDecoder::DecodePortableAttributes(
DecoderBuffer *in_buffer) {
if (in_buffer->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 3)) {
// Old bitstream does everything in the
// DecodeDataNeededByPortableTransforms() method.
return true;
}
uint8_t compression_level = 0;
if (!in_buffer->Decode(&compression_level))
return false;
const int32_t num_points = GetDecoder()->point_cloud()->num_points();
// Decode data using the kd tree decoding into integer (portable) attributes.
// We first need to go over all attributes and create a new portable storage
// for those attributes that need it (floating point attributes that have to
// be dequantized after decoding).
const int num_attributes = GetNumAttributes();
uint32_t total_dimensionality = 0; // position is a required dimension
std::vector<AttributeTuple> atts(num_attributes);
for (int i = 0; i < GetNumAttributes(); ++i) {
const int att_id = GetAttributeId(i);
PointAttribute *const att = GetDecoder()->point_cloud()->attribute(att_id);
// All attributes have the same number of values and identity mapping
// between PointIndex and AttributeValueIndex.
att->Reset(num_points);
att->SetIdentityMapping();
PointAttribute *target_att = nullptr;
if (att->data_type() == DT_UINT32 || att->data_type() == DT_UINT16 ||
att->data_type() == DT_UINT8) {
// We can decode to these attributes directly.
target_att = att;
} else if (att->data_type() == DT_INT32 || att->data_type() == DT_INT16 ||
att->data_type() == DT_INT8) {
// Prepare storage for data that is used to convert unsigned values back
// to the signed ones.
for (int c = 0; c < att->num_components(); ++c) {
min_signed_values_.push_back(0);
}
target_att = att;
} else if (att->data_type() == DT_FLOAT32) {
// Create a portable attribute that will hold the decoded data. We will
// dequantize the decoded data to the final attribute later on.
const int num_components = att->num_components();
GeometryAttribute va;
va.Init(att->attribute_type(), nullptr, num_components, DT_UINT32, false,
num_components * DataTypeLength(DT_UINT32), 0);
std::unique_ptr<PointAttribute> port_att(new PointAttribute(va));
port_att->SetIdentityMapping();
port_att->Reset(num_points);
quantized_portable_attributes_.push_back(std::move(port_att));
target_att = quantized_portable_attributes_.back().get();
} else {
// Unsupported type.
return false;
}
// Add attribute to the output iterator used by the core algorithm.
const DataType data_type = target_att->data_type();
const uint32_t data_size = (std::max)(0, DataTypeLength(data_type));
const uint32_t num_components = target_att->num_components();
atts[i] = std::make_tuple(target_att, total_dimensionality, data_type,
data_size, num_components);
total_dimensionality += num_components;
}
PointAttributeVectorOutputIterator<uint32_t> out_it(atts);
switch (compression_level) {
case 0: {
DynamicIntegerPointsKdTreeDecoder<0> decoder(total_dimensionality);
if (!decoder.DecodePoints(in_buffer, out_it))
return false;
break;
}
case 1: {
DynamicIntegerPointsKdTreeDecoder<1> decoder(total_dimensionality);
if (!decoder.DecodePoints(in_buffer, out_it))
return false;
break;
}
case 2: {
DynamicIntegerPointsKdTreeDecoder<2> decoder(total_dimensionality);
if (!decoder.DecodePoints(in_buffer, out_it))
return false;
break;
}
case 3: {
DynamicIntegerPointsKdTreeDecoder<3> decoder(total_dimensionality);
if (!decoder.DecodePoints(in_buffer, out_it))
return false;
break;
}
case 4: {
DynamicIntegerPointsKdTreeDecoder<4> decoder(total_dimensionality);
if (!decoder.DecodePoints(in_buffer, out_it))
return false;
break;
}
case 5: {
DynamicIntegerPointsKdTreeDecoder<5> decoder(total_dimensionality);
if (!decoder.DecodePoints(in_buffer, out_it))
return false;
break;
}
case 6: {
DynamicIntegerPointsKdTreeDecoder<6> decoder(total_dimensionality);
if (!decoder.DecodePoints(in_buffer, out_it))
return false;
break;
}
default:
return false;
}
return true;
}
bool KdTreeAttributesDecoder::DecodeDataNeededByPortableTransforms(
DecoderBuffer *in_buffer) {
if (in_buffer->bitstream_version() >= DRACO_BITSTREAM_VERSION(2, 3)) {
// Decode quantization data for each attribute that need it.
// TODO(ostava): This should be moved to AttributeQuantizationTransform.
std::vector<float> min_value;
for (int i = 0; i < GetNumAttributes(); ++i) {
const int att_id = GetAttributeId(i);
const PointAttribute *const att =
GetDecoder()->point_cloud()->attribute(att_id);
if (att->data_type() == DT_FLOAT32) {
const int num_components = att->num_components();
min_value.resize(num_components);
if (!in_buffer->Decode(&min_value[0], sizeof(float) * num_components))
return false;
float max_value_dif;
if (!in_buffer->Decode(&max_value_dif))
return false;
uint8_t quantization_bits;
if (!in_buffer->Decode(&quantization_bits) || quantization_bits > 31)
return false;
AttributeQuantizationTransform transform;
transform.SetParameters(quantization_bits, min_value.data(),
num_components, max_value_dif);
const int num_transforms =
static_cast<int>(attribute_quantization_transforms_.size());
if (!transform.TransferToAttribute(
quantized_portable_attributes_[num_transforms].get()))
return false;
attribute_quantization_transforms_.push_back(transform);
}
}
// Decode transform data for signed integer attributes.
for (int i = 0; i < min_signed_values_.size(); ++i) {
int32_t val;
DecodeVarint(&val, in_buffer);
min_signed_values_[i] = val;
}
return true;
}
#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
// Handle old bitstream
// Figure out the total dimensionality of the point cloud
const uint32_t attribute_count = GetNumAttributes();
uint32_t total_dimensionality = 0; // position is a required dimension
std::vector<AttributeTuple> atts(attribute_count);
for (auto attribute_index = 0;
static_cast<uint32_t>(attribute_index) < attribute_count;
attribute_index += 1) // increment the dimensionality as needed...
{
const int att_id = GetAttributeId(attribute_index);
PointAttribute *const att = GetDecoder()->point_cloud()->attribute(att_id);
const DataType data_type = att->data_type();
const uint32_t data_size = (std::max)(0, DataTypeLength(data_type));
const uint32_t num_components = att->num_components();
atts[attribute_index] = std::make_tuple(
att, total_dimensionality, data_type, data_size, num_components);
// everything is treated as 32bit in the encoder.
total_dimensionality += num_components;
}
const int att_id = GetAttributeId(0);
PointAttribute *const att = GetDecoder()->point_cloud()->attribute(att_id);
att->SetIdentityMapping();
// Decode method
uint8_t method;
if (!in_buffer->Decode(&method))
return false;
if (method == KdTreeAttributesEncodingMethod::kKdTreeQuantizationEncoding) {
uint8_t compression_level = 0;
if (!in_buffer->Decode(&compression_level))
return false;
uint32_t num_points = 0;
if (!in_buffer->Decode(&num_points))
return false;
att->Reset(num_points);
FloatPointsTreeDecoder decoder;
PointAttributeVectorOutputIterator<float> out_it(atts);
if (!decoder.DecodePointCloud(in_buffer, out_it))
return false;
} else if (method == KdTreeAttributesEncodingMethod::kKdTreeIntegerEncoding) {
uint8_t compression_level = 0;
if (!in_buffer->Decode(&compression_level))
return false;
if (6 < compression_level) {
LOGE("KdTreeAttributesDecoder: compression level %i not supported.\n",
compression_level);
return false;
}
uint32_t num_points;
if (!in_buffer->Decode(&num_points))
return false;
for (auto attribute_index = 0;
static_cast<uint32_t>(attribute_index) < attribute_count;
attribute_index += 1) {
const int att_id = GetAttributeId(attribute_index);
PointAttribute *const attr =
GetDecoder()->point_cloud()->attribute(att_id);
attr->Reset(num_points);
attr->SetIdentityMapping();
};
PointAttributeVectorOutputIterator<uint32_t> out_it(atts);
switch (compression_level) {
case 0: {
DynamicIntegerPointsKdTreeDecoder<0> decoder(total_dimensionality);
if (!decoder.DecodePoints(in_buffer, out_it))
return false;
break;
}
case 1: {
DynamicIntegerPointsKdTreeDecoder<1> decoder(total_dimensionality);
if (!decoder.DecodePoints(in_buffer, out_it))
return false;
break;
}
case 2: {
DynamicIntegerPointsKdTreeDecoder<2> decoder(total_dimensionality);
if (!decoder.DecodePoints(in_buffer, out_it))
return false;
break;
}
case 3: {
DynamicIntegerPointsKdTreeDecoder<3> decoder(total_dimensionality);
if (!decoder.DecodePoints(in_buffer, out_it))
return false;
break;
}
case 4: {
DynamicIntegerPointsKdTreeDecoder<4> decoder(total_dimensionality);
if (!decoder.DecodePoints(in_buffer, out_it))
return false;
break;
}
case 5: {
DynamicIntegerPointsKdTreeDecoder<5> decoder(total_dimensionality);
if (!decoder.DecodePoints(in_buffer, out_it))
return false;
break;
}
case 6: {
DynamicIntegerPointsKdTreeDecoder<6> decoder(total_dimensionality);
if (!decoder.DecodePoints(in_buffer, out_it))
return false;
break;
}
default:
return false;
}
} else {
// Invalid method.
return false;
}
return true;
#else
return false;
#endif
}
template <typename SignedDataTypeT>
bool KdTreeAttributesDecoder::TransformAttributeBackToSignedType(
PointAttribute *att, int num_processed_signed_components) {
typedef typename std::make_unsigned<SignedDataTypeT>::type UnsignedType;
std::vector<UnsignedType> unsigned_val(att->num_components());
std::vector<SignedDataTypeT> signed_val(att->num_components());
for (AttributeValueIndex avi(0); avi < static_cast<uint32_t>(att->size());
++avi) {
att->GetValue(avi, &unsigned_val[0]);
for (int c = 0; c < att->num_components(); ++c) {
// Up-cast |unsigned_val| to int32_t to ensure we don't overflow it for
// smaller data types.
signed_val[c] = static_cast<SignedDataTypeT>(
static_cast<int32_t>(unsigned_val[c]) +
min_signed_values_[num_processed_signed_components + c]);
}
att->SetAttributeValue(avi, &signed_val[0]);
}
return true;
}
bool KdTreeAttributesDecoder::TransformAttributesToOriginalFormat() {
if (quantized_portable_attributes_.empty() && min_signed_values_.empty()) {
return true;
}
int num_processed_quantized_attributes = 0;
int num_processed_signed_components = 0;
// Dequantize attributes that needed it.
for (int i = 0; i < GetNumAttributes(); ++i) {
const int att_id = GetAttributeId(i);
PointAttribute *const att = GetDecoder()->point_cloud()->attribute(att_id);
if (att->data_type() == DT_INT32 || att->data_type() == DT_INT16 ||
att->data_type() == DT_INT8) {
std::vector<uint32_t> unsigned_val(att->num_components());
std::vector<int32_t> signed_val(att->num_components());
// Values are stored as unsigned in the attribute, make them signed again.
if (att->data_type() == DT_INT32) {
if (!TransformAttributeBackToSignedType<int32_t>(
att, num_processed_signed_components))
return false;
} else if (att->data_type() == DT_INT16) {
if (!TransformAttributeBackToSignedType<int16_t>(
att, num_processed_signed_components))
return false;
} else if (att->data_type() == DT_INT8) {
if (!TransformAttributeBackToSignedType<int8_t>(
att, num_processed_signed_components))
return false;
}
num_processed_signed_components += att->num_components();
} else if (att->data_type() == DT_FLOAT32) {
// TODO(ostava): This code should be probably moved out to attribute
// transform and shared with the SequentialQuantizationAttributeDecoder.
const PointAttribute *const src_att =
quantized_portable_attributes_[num_processed_quantized_attributes]
.get();
const AttributeQuantizationTransform &transform =
attribute_quantization_transforms_
[num_processed_quantized_attributes];
num_processed_quantized_attributes++;
if (GetDecoder()->options()->GetAttributeBool(
att->attribute_type(), "skip_attribute_transform", false)) {
// Attribute transform should not be performed. In this case, we replace
// the output geometry attribute with the portable attribute.
// TODO(ostava): We can potentially avoid this copy by introducing a new
// mechanism that would allow to use the final attributes as portable
// attributes for predictors that may need them.
att->CopyFrom(*src_att);
continue;
}
// Convert all quantized values back to floats.
const int32_t max_quantized_value =
(1u << static_cast<uint32_t>(transform.quantization_bits())) - 1;
const int num_components = att->num_components();
const int entry_size = sizeof(float) * num_components;
const std::unique_ptr<float[]> att_val(new float[num_components]);
int quant_val_id = 0;
int out_byte_pos = 0;
Dequantizer dequantizer;
if (!dequantizer.Init(transform.range(), max_quantized_value))
return false;
const uint32_t *const portable_attribute_data =
reinterpret_cast<const uint32_t *>(
src_att->GetAddress(AttributeValueIndex(0)));
for (uint32_t i = 0; i < src_att->size(); ++i) {
for (int c = 0; c < num_components; ++c) {
float value = dequantizer.DequantizeFloat(
portable_attribute_data[quant_val_id++]);
value = value + transform.min_value(c);
att_val[c] = value;
}
// Store the floating point value into the attribute buffer.
att->buffer()->Write(out_byte_pos, att_val.get(), entry_size);
out_byte_pos += entry_size;
}
}
}
return true;
}
} // namespace draco

View File

@ -0,0 +1,46 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_DECODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_DECODER_H_
#include "draco/attributes/attribute_quantization_transform.h"
#include "draco/compression/attributes/attributes_decoder.h"
namespace draco {
// Decodes attributes encoded with the KdTreeAttributesEncoder.
class KdTreeAttributesDecoder : public AttributesDecoder {
public:
KdTreeAttributesDecoder();
protected:
bool DecodePortableAttributes(DecoderBuffer *in_buffer) override;
bool DecodeDataNeededByPortableTransforms(DecoderBuffer *in_buffer) override;
bool TransformAttributesToOriginalFormat() override;
private:
template <typename SignedDataTypeT>
bool TransformAttributeBackToSignedType(PointAttribute *att,
int num_processed_signed_components);
std::vector<AttributeQuantizationTransform>
attribute_quantization_transforms_;
std::vector<int32_t> min_signed_values_;
std::vector<std::unique_ptr<PointAttribute>> quantized_portable_attributes_;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_DECODER_H_

View File

@ -0,0 +1,289 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/compression/attributes/kd_tree_attributes_encoder.h"
#include "draco/compression/attributes/kd_tree_attributes_shared.h"
#include "draco/compression/attributes/point_d_vector.h"
#include "draco/compression/point_cloud/algorithms/dynamic_integer_points_kd_tree_encoder.h"
#include "draco/compression/point_cloud/algorithms/float_points_tree_encoder.h"
#include "draco/compression/point_cloud/point_cloud_encoder.h"
#include "draco/core/varint_encoding.h"
namespace draco {
KdTreeAttributesEncoder::KdTreeAttributesEncoder() : num_components_(0) {}
KdTreeAttributesEncoder::KdTreeAttributesEncoder(int att_id)
: AttributesEncoder(att_id), num_components_(0) {}
bool KdTreeAttributesEncoder::TransformAttributesToPortableFormat() {
// Convert any of the input attributes into a format that can be processed by
// the kd tree encoder (quantization of floating attributes for now).
const size_t num_points = encoder()->point_cloud()->num_points();
int num_components = 0;
for (uint32_t i = 0; i < num_attributes(); ++i) {
const int att_id = GetAttributeId(i);
const PointAttribute *const att =
encoder()->point_cloud()->attribute(att_id);
num_components += att->num_components();
}
num_components_ = num_components;
// Go over all attributes and quantize them if needed.
for (uint32_t i = 0; i < num_attributes(); ++i) {
const int att_id = GetAttributeId(i);
const PointAttribute *const att =
encoder()->point_cloud()->attribute(att_id);
if (att->data_type() == DT_FLOAT32) {
// Quantization path.
AttributeQuantizationTransform attribute_quantization_transform;
const int quantization_bits = encoder()->options()->GetAttributeInt(
att_id, "quantization_bits", -1);
if (quantization_bits < 1)
return false;
if (encoder()->options()->IsAttributeOptionSet(att_id,
"quantization_origin") &&
encoder()->options()->IsAttributeOptionSet(att_id,
"quantization_range")) {
// Quantization settings are explicitly specified in the provided
// options.
std::vector<float> quantization_origin(att->num_components());
encoder()->options()->GetAttributeVector(att_id, "quantization_origin",
att->num_components(),
&quantization_origin[0]);
const float range = encoder()->options()->GetAttributeFloat(
att_id, "quantization_range", 1.f);
attribute_quantization_transform.SetParameters(
quantization_bits, quantization_origin.data(),
att->num_components(), range);
} else {
// Compute quantization settings from the attribute values.
attribute_quantization_transform.ComputeParameters(*att,
quantization_bits);
}
attribute_quantization_transforms_.push_back(
attribute_quantization_transform);
// Store the quantized attribute in an array that will be used when we do
// the actual encoding of the data.
quantized_portable_attributes_.push_back(
attribute_quantization_transform.GeneratePortableAttribute(
*att, static_cast<int>(num_points)));
} else if (att->data_type() == DT_INT32 || att->data_type() == DT_INT16 ||
att->data_type() == DT_INT8) {
// For signed types, find the minimum value for each component. These
// values are going to be used to transform the attribute values to
// unsigned integers that can be processed by the core kd tree algorithm.
std::vector<int32_t> min_value(att->num_components(),
std::numeric_limits<int32_t>::max());
std::vector<int32_t> act_value(att->num_components());
for (AttributeValueIndex avi(0); avi < static_cast<uint32_t>(att->size());
++avi) {
att->ConvertValue<int32_t>(avi, &act_value[0]);
for (int c = 0; c < att->num_components(); ++c) {
if (min_value[c] > act_value[c])
min_value[c] = act_value[c];
}
}
for (int c = 0; c < att->num_components(); ++c) {
min_signed_values_.push_back(min_value[c]);
}
}
}
return true;
}
bool KdTreeAttributesEncoder::EncodeDataNeededByPortableTransforms(
EncoderBuffer *out_buffer) {
// Store quantization settings for all attributes that need it.
for (int i = 0; i < attribute_quantization_transforms_.size(); ++i) {
attribute_quantization_transforms_[i].EncodeParameters(out_buffer);
}
// Encode data needed for transforming signed integers to unsigned ones.
for (int i = 0; i < min_signed_values_.size(); ++i) {
EncodeVarint<int32_t>(min_signed_values_[i], out_buffer);
}
return true;
}
bool KdTreeAttributesEncoder::EncodePortableAttributes(
EncoderBuffer *out_buffer) {
// Encode the data using the kd tree encoder algorithm. The data is first
// copied to a PointDVector that provides all the API expected by the core
// encoding algorithm.
// We limit the maximum value of compression_level to 6 as we don't currently
// have viable algorithms for higher compression levels.
uint8_t compression_level =
std::min(10 - encoder()->options()->GetSpeed(), 6);
DRACO_DCHECK_LE(compression_level, 6);
if (compression_level == 6 && num_components_ > 15) {
// Don't use compression level for CL >= 6. Axis selection is currently
// encoded using 4 bits.
compression_level = 5;
}
out_buffer->Encode(compression_level);
// Init PointDVector. The number of dimensions is equal to the total number
// of dimensions across all attributes.
const int num_points = encoder()->point_cloud()->num_points();
PointDVector<uint32_t> point_vector(num_points, num_components_);
int num_processed_components = 0;
int num_processed_quantized_attributes = 0;
int num_processed_signed_components = 0;
// Copy data to the point vector.
for (uint32_t i = 0; i < num_attributes(); ++i) {
const int att_id = GetAttributeId(i);
const PointAttribute *const att =
encoder()->point_cloud()->attribute(att_id);
const PointAttribute *source_att = nullptr;
if (att->data_type() == DT_UINT32 || att->data_type() == DT_UINT16 ||
att->data_type() == DT_UINT8 || att->data_type() == DT_INT32 ||
att->data_type() == DT_INT16 || att->data_type() == DT_INT8) {
// Use the original attribute.
source_att = att;
} else if (att->data_type() == DT_FLOAT32) {
// Use the portable (quantized) attribute instead.
source_att =
quantized_portable_attributes_[num_processed_quantized_attributes]
.get();
num_processed_quantized_attributes++;
} else {
// Unsupported data type.
return false;
}
if (source_att == nullptr)
return false;
// Copy source_att to the vector.
if (source_att->data_type() == DT_UINT32) {
// If the data type is the same as the one used by the point vector, we
// can directly copy individual elements.
for (PointIndex pi(0); pi < num_points; ++pi) {
const AttributeValueIndex avi = source_att->mapped_index(pi);
const uint8_t *const att_value_address = source_att->GetAddress(avi);
point_vector.CopyAttribute(source_att->num_components(),
num_processed_components, pi.value(),
att_value_address);
}
} else if (source_att->data_type() == DT_INT32 ||
source_att->data_type() == DT_INT16 ||
source_att->data_type() == DT_INT8) {
// Signed values need to be converted to unsigned before they are stored
// in the point vector.
std::vector<int32_t> signed_point(source_att->num_components());
std::vector<uint32_t> unsigned_point(source_att->num_components());
for (PointIndex pi(0); pi < num_points; ++pi) {
const AttributeValueIndex avi = source_att->mapped_index(pi);
source_att->ConvertValue<int32_t>(avi, &signed_point[0]);
for (int c = 0; c < source_att->num_components(); ++c) {
unsigned_point[c] =
signed_point[c] -
min_signed_values_[num_processed_signed_components + c];
}
point_vector.CopyAttribute(source_att->num_components(),
num_processed_components, pi.value(),
&unsigned_point[0]);
}
num_processed_signed_components += source_att->num_components();
} else {
// If the data type of the attribute is different, we have to convert the
// value before we put it to the point vector.
std::vector<uint32_t> point(source_att->num_components());
for (PointIndex pi(0); pi < num_points; ++pi) {
const AttributeValueIndex avi = source_att->mapped_index(pi);
source_att->ConvertValue<uint32_t>(avi, &point[0]);
point_vector.CopyAttribute(source_att->num_components(),
num_processed_components, pi.value(),
point.data());
}
}
num_processed_components += source_att->num_components();
}
// Compute the maximum bit length needed for the kd tree encoding.
int num_bits = 0;
const uint32_t *data = point_vector[0];
for (int i = 0; i < num_points * num_components_; ++i) {
if (data[i] > 0) {
const int msb = MostSignificantBit(data[i]) + 1;
if (msb > num_bits) {
num_bits = msb;
}
}
}
switch (compression_level) {
case 6: {
DynamicIntegerPointsKdTreeEncoder<6> points_encoder(num_components_);
if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(),
num_bits, out_buffer))
return false;
break;
}
case 5: {
DynamicIntegerPointsKdTreeEncoder<5> points_encoder(num_components_);
if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(),
num_bits, out_buffer))
return false;
break;
}
case 4: {
DynamicIntegerPointsKdTreeEncoder<4> points_encoder(num_components_);
if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(),
num_bits, out_buffer))
return false;
break;
}
case 3: {
DynamicIntegerPointsKdTreeEncoder<3> points_encoder(num_components_);
if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(),
num_bits, out_buffer))
return false;
break;
}
case 2: {
DynamicIntegerPointsKdTreeEncoder<2> points_encoder(num_components_);
if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(),
num_bits, out_buffer))
return false;
break;
}
case 1: {
DynamicIntegerPointsKdTreeEncoder<1> points_encoder(num_components_);
if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(),
num_bits, out_buffer))
return false;
break;
}
case 0: {
DynamicIntegerPointsKdTreeEncoder<0> points_encoder(num_components_);
if (!points_encoder.EncodePoints(point_vector.begin(), point_vector.end(),
num_bits, out_buffer))
return false;
break;
}
// Compression level and/or encoding speed seem wrong.
default:
return false;
}
return true;
}
} // namespace draco

View File

@ -0,0 +1,51 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_POINT_CLOUD_KD_TREE_ATTRIBUTES_ENCODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_POINT_CLOUD_KD_TREE_ATTRIBUTES_ENCODER_H_
#include "draco/attributes/attribute_quantization_transform.h"
#include "draco/compression/attributes/attributes_encoder.h"
#include "draco/compression/config/compression_shared.h"
namespace draco {
// Encodes all attributes of a given PointCloud using one of the available
// Kd-tree compression methods.
// See compression/point_cloud/point_cloud_kd_tree_encoder.h for more details.
class KdTreeAttributesEncoder : public AttributesEncoder {
public:
KdTreeAttributesEncoder();
explicit KdTreeAttributesEncoder(int att_id);
uint8_t GetUniqueId() const override { return KD_TREE_ATTRIBUTE_ENCODER; }
protected:
bool TransformAttributesToPortableFormat() override;
bool EncodePortableAttributes(EncoderBuffer *out_buffer) override;
bool EncodeDataNeededByPortableTransforms(EncoderBuffer *out_buffer) override;
private:
std::vector<AttributeQuantizationTransform>
attribute_quantization_transforms_;
// Min signed values are used to transform signed integers into unsigned ones
// (by subtracting the min signed value for each component).
std::vector<int32_t> min_signed_values_;
std::vector<std::unique_ptr<PointAttribute>> quantized_portable_attributes_;
int num_components_;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_POINT_CLOUD_KD_TREE_ATTRIBUTES_ENCODER_H_

View File

@ -0,0 +1,28 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_SHARED_H_
#define DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_SHARED_H_
namespace draco {
// Defines types of kD-tree compression
enum KdTreeAttributesEncodingMethod {
kKdTreeQuantizationEncoding = 0,
kKdTreeIntegerEncoding
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_KD_TREE_ATTRIBUTES_SHARED_H_

View File

@ -0,0 +1,50 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_LINEAR_SEQUENCER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_LINEAR_SEQUENCER_H_
#include "draco/compression/attributes/points_sequencer.h"
namespace draco {
// A simple sequencer that generates a linear sequence [0, num_points - 1].
// I.e., the order of the points is preserved for the input data.
class LinearSequencer : public PointsSequencer {
public:
explicit LinearSequencer(int32_t num_points) : num_points_(num_points) {}
bool UpdatePointToAttributeIndexMapping(PointAttribute *attribute) override {
attribute->SetIdentityMapping();
return true;
}
protected:
bool GenerateSequenceInternal() override {
if (num_points_ < 0)
return false;
out_point_ids()->resize(num_points_);
for (int i = 0; i < num_points_; ++i) {
out_point_ids()->at(i) = PointIndex(i);
}
return true;
}
private:
int32_t num_points_;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_LINEAR_SEQUENCER_H_

View File

@ -0,0 +1,58 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_MESH_ATTRIBUTE_INDICES_ENCODING_DATA_H_
#define DRACO_COMPRESSION_ATTRIBUTES_MESH_ATTRIBUTE_INDICES_ENCODING_DATA_H_
#include <inttypes.h>
#include <vector>
#include "draco/attributes/geometry_indices.h"
namespace draco {
// Data used for encoding and decoding of mesh attributes.
struct MeshAttributeIndicesEncodingData {
MeshAttributeIndicesEncodingData() : num_values(0) {}
void Init(int num_vertices) {
vertex_to_encoded_attribute_value_index_map.resize(num_vertices);
// We expect to store one value for each vertex.
encoded_attribute_value_index_to_corner_map.reserve(num_vertices);
}
// Array for storing the corner ids in the order their associated attribute
// entries were encoded/decoded. For every encoded attribute value entry we
// store exactly one corner. I.e., this is the mapping between an encoded
// attribute entry ids and corner ids. This map is needed for example by
// prediction schemes. Note that not all corners are included in this map,
// e.g., if multiple corners share the same attribute value, only one of these
// corners will be usually included.
std::vector<CornerIndex> encoded_attribute_value_index_to_corner_map;
// Map for storing encoding order of attribute entries for each vertex.
// i.e. Mapping between vertices and their corresponding attribute entry ids
// that are going to be used by the decoder.
// -1 if an attribute entry hasn't been encoded/decoded yet.
std::vector<int32_t> vertex_to_encoded_attribute_value_index_map;
// Total number of encoded/decoded attribute entries.
int num_values;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_MESH_ATTRIBUTE_INDICES_ENCODING_DATA_H_

View File

@ -0,0 +1,335 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Utilities for converting unit vectors to octahedral coordinates and back.
// For more details about octahedral coordinates, see for example Cigolle
// et al.'14 “A Survey of Efficient Representations for Independent Unit
// Vectors”.
//
// In short this is motivated by an octahedron inscribed into a sphere. The
// direction of the normal vector can be defined by a point on the octahedron.
// On the right hemisphere (x > 0) this point is projected onto the x = 0 plane,
// that is, the right side of the octahedron forms a diamond like shape. The
// left side of the octahedron is also projected onto the x = 0 plane, however,
// in this case we flap the triangles of the diamond outward. Afterwards we
// shift the resulting square such that all values are positive.
//
// Important values in this file:
// * q: number of quantization bits
// * max_quantized_value: the max value representable with q bits (odd)
// * max_value: max value of the diamond = max_quantized_value - 1 (even)
// * center_value: center of the diamond after shift
//
// Note that the parameter space is somewhat periodic, e.g. (0, 0) ==
// (max_value, max_value), which is also why the diamond is one smaller than the
// maximal representable value in order to have an odd range of values.
#ifndef DRACO_COMPRESSION_ATTRIBUTES_NORMAL_COMPRESSION_UTILS_H_
#define DRACO_COMPRESSION_ATTRIBUTES_NORMAL_COMPRESSION_UTILS_H_
#include <inttypes.h>
#include <algorithm>
#include <cmath>
#include "draco/core/macros.h"
namespace draco {
class OctahedronToolBox {
public:
OctahedronToolBox()
: quantization_bits_(-1),
max_quantized_value_(-1),
max_value_(-1),
center_value_(-1) {}
bool SetQuantizationBits(int32_t q) {
if (q < 2 || q > 30)
return false;
quantization_bits_ = q;
max_quantized_value_ = (1 << quantization_bits_) - 1;
max_value_ = max_quantized_value_ - 1;
center_value_ = max_value_ / 2;
return true;
}
bool IsInitialized() const { return quantization_bits_ != -1; }
// Convert all edge points in the top left and bottom right quadrants to
// their corresponding position in the bottom left and top right quadrants.
// Convert all corner edge points to the top right corner.
inline void CanonicalizeOctahedralCoords(int32_t s, int32_t t, int32_t *out_s,
int32_t *out_t) const {
if ((s == 0 && t == 0) || (s == 0 && t == max_value_) ||
(s == max_value_ && t == 0)) {
s = max_value_;
t = max_value_;
} else if (s == 0 && t > center_value_) {
t = center_value_ - (t - center_value_);
} else if (s == max_value_ && t < center_value_) {
t = center_value_ + (center_value_ - t);
} else if (t == max_value_ && s < center_value_) {
s = center_value_ + (center_value_ - s);
} else if (t == 0 && s > center_value_) {
s = center_value_ - (s - center_value_);
}
*out_s = s;
*out_t = t;
}
// Converts an integer vector to octahedral coordinates.
// Precondition: |int_vec| abs sum must equal center value.
inline void IntegerVectorToQuantizedOctahedralCoords(const int32_t *int_vec,
int32_t *out_s,
int32_t *out_t) const {
DRACO_DCHECK_EQ(
std::abs(int_vec[0]) + std::abs(int_vec[1]) + std::abs(int_vec[2]),
center_value_);
int32_t s, t;
if (int_vec[0] >= 0) {
// Right hemisphere.
s = (int_vec[1] + center_value_);
t = (int_vec[2] + center_value_);
} else {
// Left hemisphere.
if (int_vec[1] < 0) {
s = std::abs(int_vec[2]);
} else {
s = (max_value_ - std::abs(int_vec[2]));
}
if (int_vec[2] < 0) {
t = std::abs(int_vec[1]);
} else {
t = (max_value_ - std::abs(int_vec[1]));
}
}
CanonicalizeOctahedralCoords(s, t, out_s, out_t);
}
template <class T>
void FloatVectorToQuantizedOctahedralCoords(const T *vector, int32_t *out_s,
int32_t *out_t) const {
const double abs_sum = std::abs(static_cast<double>(vector[0])) +
std::abs(static_cast<double>(vector[1])) +
std::abs(static_cast<double>(vector[2]));
// Adjust values such that abs sum equals 1.
double scaled_vector[3];
if (abs_sum > 1e-6) {
// Scale needed to project the vector to the surface of an octahedron.
const double scale = 1.0 / abs_sum;
scaled_vector[0] = vector[0] * scale;
scaled_vector[1] = vector[1] * scale;
scaled_vector[2] = vector[2] * scale;
} else {
scaled_vector[0] = 1.0;
scaled_vector[1] = 0;
scaled_vector[2] = 0;
}
// Scale vector such that the sum equals the center value.
int32_t int_vec[3];
int_vec[0] =
static_cast<int32_t>(floor(scaled_vector[0] * center_value_ + 0.5));
int_vec[1] =
static_cast<int32_t>(floor(scaled_vector[1] * center_value_ + 0.5));
// Make sure the sum is exactly the center value.
int_vec[2] = center_value_ - std::abs(int_vec[0]) - std::abs(int_vec[1]);
if (int_vec[2] < 0) {
// If the sum of first two coordinates is too large, we need to decrease
// the length of one of the coordinates.
if (int_vec[1] > 0) {
int_vec[1] += int_vec[2];
} else {
int_vec[1] -= int_vec[2];
}
int_vec[2] = 0;
}
// Take care of the sign.
if (scaled_vector[2] < 0)
int_vec[2] *= -1;
IntegerVectorToQuantizedOctahedralCoords(int_vec, out_s, out_t);
}
// Normalize |vec| such that its abs sum is equal to the center value;
template <class T>
void CanonicalizeIntegerVector(T *vec) const {
static_assert(std::is_integral<T>::value, "T must be an integral type.");
static_assert(std::is_signed<T>::value, "T must be a signed type.");
const int64_t abs_sum = static_cast<int64_t>(std::abs(vec[0])) +
static_cast<int64_t>(std::abs(vec[1])) +
static_cast<int64_t>(std::abs(vec[2]));
if (abs_sum == 0) {
vec[0] = center_value_; // vec[1] == v[2] == 0
} else {
vec[0] =
(static_cast<int64_t>(vec[0]) * static_cast<int64_t>(center_value_)) /
abs_sum;
vec[1] =
(static_cast<int64_t>(vec[1]) * static_cast<int64_t>(center_value_)) /
abs_sum;
if (vec[2] >= 0) {
vec[2] = center_value_ - std::abs(vec[0]) - std::abs(vec[1]);
} else {
vec[2] = -(center_value_ - std::abs(vec[0]) - std::abs(vec[1]));
}
}
}
template <typename T>
void OctaherdalCoordsToUnitVector(T in_s, T in_t, T *out_vector) const {
DRACO_DCHECK_GE(in_s, 0);
DRACO_DCHECK_GE(in_t, 0);
DRACO_DCHECK_LE(in_s, 1);
DRACO_DCHECK_LE(in_t, 1);
T s = in_s;
T t = in_t;
T spt = s + t;
T smt = s - t;
T x_sign = 1.0;
if (spt >= 0.5 && spt <= 1.5 && smt >= -0.5 && smt <= 0.5) {
// Right hemisphere. Don't do anything.
} else {
// Left hemisphere.
x_sign = -1.0;
if (spt <= 0.5) {
s = 0.5 - in_t;
t = 0.5 - in_s;
} else if (spt >= 1.5) {
s = 1.5 - in_t;
t = 1.5 - in_s;
} else if (smt <= -0.5) {
s = in_t - 0.5;
t = in_s + 0.5;
} else {
s = in_t + 0.5;
t = in_s - 0.5;
}
spt = s + t;
smt = s - t;
}
const T y = 2.0 * s - 1.0;
const T z = 2.0 * t - 1.0;
const T x = std::min(std::min(2.0 * spt - 1.0, 3.0 - 2.0 * spt),
std::min(2.0 * smt + 1.0, 1.0 - 2.0 * smt)) *
x_sign;
// Normalize the computed vector.
const T normSquared = x * x + y * y + z * z;
if (normSquared < 1e-6) {
out_vector[0] = 0;
out_vector[1] = 0;
out_vector[2] = 0;
} else {
const T d = 1.0 / std::sqrt(normSquared);
out_vector[0] = x * d;
out_vector[1] = y * d;
out_vector[2] = z * d;
}
}
template <typename T>
void QuantizedOctaherdalCoordsToUnitVector(int32_t in_s, int32_t in_t,
T *out_vector) const {
T scale = 1.0 / static_cast<T>(max_value_);
OctaherdalCoordsToUnitVector(in_s * scale, in_t * scale, out_vector);
}
// |s| and |t| are expected to be signed values.
inline bool IsInDiamond(const int32_t &s, const int32_t &t) const {
// Expect center already at origin.
DRACO_DCHECK_LE(s, center_value_);
DRACO_DCHECK_LE(t, center_value_);
DRACO_DCHECK_GE(s, -center_value_);
DRACO_DCHECK_GE(t, -center_value_);
return std::abs(s) + std::abs(t) <= center_value_;
}
void InvertDiamond(int32_t *s, int32_t *t) const {
// Expect center already at origin.
DRACO_DCHECK_LE(*s, center_value_);
DRACO_DCHECK_LE(*t, center_value_);
DRACO_DCHECK_GE(*s, -center_value_);
DRACO_DCHECK_GE(*t, -center_value_);
int32_t sign_s = 0;
int32_t sign_t = 0;
if (*s >= 0 && *t >= 0) {
sign_s = 1;
sign_t = 1;
} else if (*s <= 0 && *t <= 0) {
sign_s = -1;
sign_t = -1;
} else {
sign_s = (*s > 0) ? 1 : -1;
sign_t = (*t > 0) ? 1 : -1;
}
const int32_t corner_point_s = sign_s * center_value_;
const int32_t corner_point_t = sign_t * center_value_;
*s = 2 * *s - corner_point_s;
*t = 2 * *t - corner_point_t;
if (sign_s * sign_t >= 0) {
int32_t temp = *s;
*s = -*t;
*t = -temp;
} else {
std::swap(*s, *t);
}
*s = (*s + corner_point_s) / 2;
*t = (*t + corner_point_t) / 2;
}
void InvertDirection(int32_t *s, int32_t *t) const {
// Expect center already at origin.
DRACO_DCHECK_LE(*s, center_value_);
DRACO_DCHECK_LE(*t, center_value_);
DRACO_DCHECK_GE(*s, -center_value_);
DRACO_DCHECK_GE(*t, -center_value_);
*s *= -1;
*t *= -1;
this->InvertDiamond(s, t);
}
// For correction values.
int32_t ModMax(int32_t x) const {
if (x > this->center_value())
return x - this->max_quantized_value();
if (x < -this->center_value())
return x + this->max_quantized_value();
return x;
}
// For correction values.
int32_t MakePositive(int32_t x) const {
DRACO_DCHECK_LE(x, this->center_value() * 2);
if (x < 0)
return x + this->max_quantized_value();
return x;
}
int32_t quantization_bits() const { return quantization_bits_; }
int32_t max_quantized_value() const { return max_quantized_value_; }
int32_t max_value() const { return max_value_; }
int32_t center_value() const { return center_value_; }
private:
int32_t quantization_bits_;
int32_t max_quantized_value_;
int32_t max_value_;
int32_t center_value_;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_NORMAL_COMPRESSION_UTILS_H_

View File

@ -0,0 +1,275 @@
// Copyright 2018 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_POINT_D_VECTOR_H_
#define DRACO_COMPRESSION_ATTRIBUTES_POINT_D_VECTOR_H_
#include <cstring>
#include <memory>
#include <vector>
#include "draco/core/macros.h"
namespace draco {
// The main class of this file is PointDVector providing an interface similar to
// std::vector<PointD> for arbitrary number of dimensions (without a template
// argument). PointDVectorIterator is a random access iterator, which allows for
// compatibility with existing algorithms. PseudoPointD provides for a view on
// the individual items in a contiguous block of memory, which is compatible
// with the swap function and is returned by a dereference of
// PointDVectorIterator. Swap functions provide for compatibility/specialization
// that allows these classes to work with currently utilized STL functions.
// This class allows for swap functionality from the RandomIterator
// It seems problematic to bring this inside PointDVector due to templating.
template <typename internal_t>
class PseudoPointD {
public:
PseudoPointD(internal_t *mem, internal_t dimension)
: mem_(mem), dimension_(dimension) {}
// Specifically copies referenced memory
void swap(PseudoPointD &other) noexcept {
for (auto dim = 0; dim < dimension_; dim += 1)
std::swap(mem_[dim], other.mem_[dim]);
}
PseudoPointD(const PseudoPointD &other)
: mem_(other.mem_), dimension_(other.dimension_) {}
const internal_t &operator[](const size_t &n) const {
DRACO_DCHECK_LT(n, dimension_);
return mem_[n];
}
internal_t &operator[](const size_t &n) {
DRACO_DCHECK_LT(n, dimension_);
return mem_[n];
}
bool operator==(const PseudoPointD &other) const {
for (auto dim = 0; dim < dimension_; dim += 1)
if (mem_[dim] != other.mem_[dim])
return false;
return true;
}
bool operator!=(const PseudoPointD &other) const {
return !this->operator==(other);
}
private:
internal_t *const mem_;
const internal_t dimension_;
};
// It seems problematic to bring this inside PointDVector due to templating.
template <typename internal_t>
void swap(draco::PseudoPointD<internal_t> &&a,
draco::PseudoPointD<internal_t> &&b) noexcept {
a.swap(b);
};
template <typename internal_t>
void swap(draco::PseudoPointD<internal_t> &a,
draco::PseudoPointD<internal_t> &b) noexcept {
a.swap(b);
};
template <typename internal_t>
class PointDVector {
public:
PointDVector(const uint32_t n_items, const uint32_t dimensionality)
: n_items_(n_items),
dimensionality_(dimensionality),
item_size_bytes_(dimensionality * sizeof(internal_t)),
data_(n_items * dimensionality),
data0_(data_.data()) {}
// random access iterator
class PointDVectorIterator
: public std::iterator<std::random_access_iterator_tag, size_t, size_t> {
friend class PointDVector;
public:
// std::iter_swap is called inside of std::partition and needs this
// specialized support
PseudoPointD<internal_t> operator*() const {
return PseudoPointD<internal_t>(vec_->data0_ + item_ * dimensionality_,
dimensionality_);
}
const PointDVectorIterator &operator++() {
item_ += 1;
return *this;
}
const PointDVectorIterator &operator--() {
item_ -= 1;
return *this;
}
PointDVectorIterator operator++(int32_t) {
PointDVectorIterator copy(*this);
item_ += 1;
return copy;
}
PointDVectorIterator operator--(int32_t) {
PointDVectorIterator copy(*this);
item_ -= 1;
return copy;
}
PointDVectorIterator &operator=(const PointDVectorIterator &other) {
this->item_ = other.item_;
return *this;
}
bool operator==(const PointDVectorIterator &ref) const {
return item_ == ref.item_;
}
bool operator!=(const PointDVectorIterator &ref) const {
return item_ != ref.item_;
}
bool operator<(const PointDVectorIterator &ref) const {
return item_ < ref.item_;
}
bool operator>(const PointDVectorIterator &ref) const {
return item_ > ref.item_;
}
bool operator<=(const PointDVectorIterator &ref) const {
return item_ <= ref.item_;
}
bool operator>=(const PointDVectorIterator &ref) const {
return item_ >= ref.item_;
}
PointDVectorIterator operator+(const int32_t &add) const {
PointDVectorIterator copy(vec_, item_ + add);
return copy;
}
PointDVectorIterator &operator+=(const int32_t &add) {
item_ += add;
return *this;
}
PointDVectorIterator operator-(const int32_t &sub) const {
PointDVectorIterator copy(vec_, item_ - sub);
return copy;
}
size_t operator-(const PointDVectorIterator &sub) const {
return (item_ - sub.item_);
}
PointDVectorIterator &operator-=(const int32_t &sub) {
item_ -= sub;
return *this;
}
internal_t *operator[](const size_t &n) const {
return vec_->data0_ + (item_ + n) * dimensionality_;
}
protected:
explicit PointDVectorIterator(PointDVector *vec, size_t start_item)
: item_(start_item), vec_(vec), dimensionality_(vec->dimensionality_) {}
private:
size_t item_; // this counts the item that should be referenced.
PointDVector *const vec_; // the thing that we're iterating on
const uint32_t dimensionality_; // local copy from vec_
};
PointDVectorIterator begin() { return PointDVectorIterator(this, 0); }
PointDVectorIterator end() { return PointDVectorIterator(this, n_items_); }
// operator[] allows for unprotected user-side usage of operator[] on the
// return value AS IF it were a natively indexable type like Point3*
internal_t *operator[](const uint32_t index) {
DRACO_DCHECK_LT(index, n_items_);
return data0_ + index * dimensionality_;
}
const internal_t *operator[](const uint32_t index) const {
DRACO_DCHECK_LT(index, n_items_);
return data0_ + index * dimensionality_;
}
uint32_t size() const { return n_items_; }
size_t GetBufferSize() const { return data_.size(); }
// copy a single contiguous 'item' from one PointDVector into this one.
void CopyItem(const PointDVector &source, const internal_t source_index,
const internal_t destination_index) {
DRACO_DCHECK(&source != this ||
(&source == this && source_index != destination_index));
DRACO_DCHECK_LT(destination_index, n_items_);
DRACO_DCHECK_LT(source_index, source.n_items_);
// DRACO_DCHECK_EQ(source.n_items_, n_items_); // not technically necessary
DRACO_DCHECK_EQ(source.dimensionality_, dimensionality_);
const internal_t *ref = source[source_index];
internal_t *const dest = this->operator[](destination_index);
std::memcpy(dest, ref, item_size_bytes_);
}
// Copy data directly off of an attribute buffer interleaved into internal
// memory.
void CopyAttribute(
// The dimensionality of the attribute being integrated
const internal_t attribute_dimensionality,
// The offset in dimensions to insert this attribute.
const internal_t offset_dimensionality, const internal_t index,
// The direct pointer to the data
const void *const attribute_item_data) {
// chunk copy
const size_t copy_size = sizeof(internal_t) * attribute_dimensionality;
// a multiply and add can be optimized away with an iterator
std::memcpy(data0_ + index * dimensionality_ + offset_dimensionality,
attribute_item_data, copy_size);
}
// Copy data off of a contiguous buffer interleaved into internal memory
void CopyAttribute(
// The dimensionality of the attribute being integrated
const internal_t attribute_dimensionality,
// The offset in dimensions to insert this attribute.
const internal_t offset_dimensionality,
const internal_t *const attribute_mem) {
DRACO_DCHECK_LT(offset_dimensionality,
dimensionality_ - attribute_dimensionality);
// degenerate case block copy the whole buffer.
if (dimensionality_ == attribute_dimensionality) {
DRACO_DCHECK_EQ(offset_dimensionality, 0);
const size_t copy_size =
sizeof(internal_t) * attribute_dimensionality * n_items_;
std::memcpy(data0_, attribute_mem, copy_size);
} else { // chunk copy
const size_t copy_size = sizeof(internal_t) * attribute_dimensionality;
internal_t *internal_data;
const internal_t *attribute_data;
internal_t item;
for (internal_data = data0_ + offset_dimensionality,
attribute_data = attribute_mem, item = 0;
item < n_items_; internal_data += dimensionality_,
attribute_data += attribute_dimensionality, item += 1) {
std::memcpy(internal_data, attribute_data, copy_size);
}
}
}
private:
// internal parameters.
const uint32_t n_items_;
const uint32_t dimensionality_; // The dimension of the points in the buffer
const uint32_t item_size_bytes_;
std::vector<internal_t> data_; // contiguously stored data. Never resized.
internal_t *const data0_; // raw pointer to base data.
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_POINT_D_VECTOR_H_

View File

@ -0,0 +1,359 @@
// Copyright 2018 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/compression/attributes/point_d_vector.h"
#include "draco/compression/point_cloud/algorithms/point_cloud_types.h"
#include "draco/core/draco_test_base.h"
namespace draco {
class PointDVectorTest : public ::testing::Test {
protected:
template <typename PT>
void TestIntegrity() {}
template <typename PT>
void TestSize() {
for (uint32_t n_items = 0; n_items <= 10; ++n_items) {
for (uint32_t dimensionality = 1; dimensionality <= 10;
++dimensionality) {
draco::PointDVector<PT> var(n_items, dimensionality);
ASSERT_EQ(n_items, var.size());
ASSERT_EQ(n_items * dimensionality, var.GetBufferSize());
}
}
}
template <typename PT>
void TestContentsContiguous() {
for (uint32_t n_items = 1; n_items <= 1000; n_items *= 10) {
for (uint32_t dimensionality = 1; dimensionality < 10;
dimensionality += 2) {
for (uint32_t att_dimensionality = 1;
att_dimensionality <= dimensionality; att_dimensionality += 2) {
for (uint32_t offset_dimensionality = 0;
offset_dimensionality < dimensionality - att_dimensionality;
++offset_dimensionality) {
PointDVector<PT> var(n_items, dimensionality);
std::vector<PT> att(n_items * att_dimensionality);
for (PT val = 0; val < n_items; val += 1) {
for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
att[val * att_dimensionality + att_dim] = val;
}
}
const PT *const attribute_data = att.data();
var.CopyAttribute(att_dimensionality, offset_dimensionality,
attribute_data);
for (PT val = 0; val < n_items; val += 1) {
for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
ASSERT_EQ(var[val][offset_dimensionality + att_dim], val);
}
}
}
}
}
}
}
template <typename PT>
void TestContentsDiscrete() {
for (uint32_t n_items = 1; n_items <= 1000; n_items *= 10) {
for (uint32_t dimensionality = 1; dimensionality < 10;
dimensionality += 2) {
for (uint32_t att_dimensionality = 1;
att_dimensionality <= dimensionality; att_dimensionality += 2) {
for (uint32_t offset_dimensionality = 0;
offset_dimensionality < dimensionality - att_dimensionality;
++offset_dimensionality) {
PointDVector<PT> var(n_items, dimensionality);
std::vector<PT> att(n_items * att_dimensionality);
for (PT val = 0; val < n_items; val += 1) {
for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
att[val * att_dimensionality + att_dim] = val;
}
}
const PT *const attribute_data = att.data();
for (PT item = 0; item < n_items; item += 1) {
var.CopyAttribute(att_dimensionality, offset_dimensionality, item,
attribute_data + item * att_dimensionality);
}
for (PT val = 0; val < n_items; val += 1) {
for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
ASSERT_EQ(var[val][offset_dimensionality + att_dim], val);
}
}
}
}
}
}
}
template <typename PT>
void TestContentsCopy() {
for (uint32_t n_items = 1; n_items <= 1000; n_items *= 10) {
for (uint32_t dimensionality = 1; dimensionality < 10;
dimensionality += 2) {
for (uint32_t att_dimensionality = 1;
att_dimensionality <= dimensionality; att_dimensionality += 2) {
for (uint32_t offset_dimensionality = 0;
offset_dimensionality < dimensionality - att_dimensionality;
++offset_dimensionality) {
PointDVector<PT> var(n_items, dimensionality);
PointDVector<PT> dest(n_items, dimensionality);
std::vector<PT> att(n_items * att_dimensionality);
for (PT val = 0; val < n_items; val += 1) {
for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
att[val * att_dimensionality + att_dim] = val;
}
}
const PT *const attribute_data = att.data();
var.CopyAttribute(att_dimensionality, offset_dimensionality,
attribute_data);
for (PT item = 0; item < n_items; item += 1) {
dest.CopyItem(var, item, item);
}
for (PT val = 0; val < n_items; val += 1) {
for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
ASSERT_EQ(var[val][offset_dimensionality + att_dim], val);
ASSERT_EQ(dest[val][offset_dimensionality + att_dim], val);
}
}
}
}
}
}
}
template <typename PT>
void TestIterator() {
for (uint32_t n_items = 1; n_items <= 1000; n_items *= 10) {
for (uint32_t dimensionality = 1; dimensionality < 10;
dimensionality += 2) {
for (uint32_t att_dimensionality = 1;
att_dimensionality <= dimensionality; att_dimensionality += 2) {
for (uint32_t offset_dimensionality = 0;
offset_dimensionality < dimensionality - att_dimensionality;
++offset_dimensionality) {
PointDVector<PT> var(n_items, dimensionality);
PointDVector<PT> dest(n_items, dimensionality);
std::vector<PT> att(n_items * att_dimensionality);
for (PT val = 0; val < n_items; val += 1) {
for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
att[val * att_dimensionality + att_dim] = val;
}
}
const PT *const attribute_data = att.data();
var.CopyAttribute(att_dimensionality, offset_dimensionality,
attribute_data);
for (PT item = 0; item < n_items; item += 1) {
dest.CopyItem(var, item, item);
}
auto V0 = var.begin();
auto VE = var.end();
auto D0 = dest.begin();
auto DE = dest.end();
while (V0 != VE && D0 != DE) {
ASSERT_EQ(*D0, *V0); // compare PseudoPointD
// verify elemental values
for (auto index = 0; index < dimensionality; index += 1) {
ASSERT_EQ((*D0)[index], (*V0)[index]);
}
++V0;
++D0;
}
for (PT val = 0; val < n_items; val += 1) {
for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
ASSERT_EQ(var[val][offset_dimensionality + att_dim], val);
ASSERT_EQ(dest[val][offset_dimensionality + att_dim], val);
}
}
}
}
}
}
}
template <typename PT>
void TestPoint3Iterator() {
for (uint32_t n_items = 1; n_items <= 1000; n_items *= 10) {
const uint32_t dimensionality = 3;
// for (uint32_t dimensionality = 1; dimensionality < 10;
// dimensionality += 2) {
const uint32_t att_dimensionality = 3;
// for (uint32_t att_dimensionality = 1;
// att_dimensionality <= dimensionality; att_dimensionality += 2) {
for (uint32_t offset_dimensionality = 0;
offset_dimensionality < dimensionality - att_dimensionality;
++offset_dimensionality) {
PointDVector<PT> var(n_items, dimensionality);
PointDVector<PT> dest(n_items, dimensionality);
std::vector<PT> att(n_items * att_dimensionality);
std::vector<draco::Point3ui> att3(n_items);
for (PT val = 0; val < n_items; val += 1) {
att3[val][0] = val;
att3[val][1] = val;
att3[val][2] = val;
for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
att[val * att_dimensionality + att_dim] = val;
}
}
const PT *const attribute_data = att.data();
var.CopyAttribute(att_dimensionality, offset_dimensionality,
attribute_data);
for (PT item = 0; item < n_items; item += 1) {
dest.CopyItem(var, item, item);
}
auto aV0 = att3.begin();
auto aVE = att3.end();
auto V0 = var.begin();
auto VE = var.end();
auto D0 = dest.begin();
auto DE = dest.end();
while (aV0 != aVE && V0 != VE && D0 != DE) {
ASSERT_EQ(*D0, *V0); // compare PseudoPointD
// verify elemental values
for (auto index = 0; index < dimensionality; index += 1) {
ASSERT_EQ((*D0)[index], (*V0)[index]);
ASSERT_EQ((*D0)[index], (*aV0)[index]);
ASSERT_EQ((*aV0)[index], (*V0)[index]);
}
++aV0;
++V0;
++D0;
}
for (PT val = 0; val < n_items; val += 1) {
for (PT att_dim = 0; att_dim < att_dimensionality; att_dim += 1) {
ASSERT_EQ(var[val][offset_dimensionality + att_dim], val);
ASSERT_EQ(dest[val][offset_dimensionality + att_dim], val);
}
}
}
}
}
void TestPseudoPointDSwap() {
draco::Point3ui val = {0, 1, 2};
draco::Point3ui dest = {10, 11, 12};
draco::PseudoPointD<uint32_t> val_src1(&val[0], 3);
draco::PseudoPointD<uint32_t> dest_src1(&dest[0], 3);
ASSERT_EQ(val_src1[0], 0);
ASSERT_EQ(val_src1[1], 1);
ASSERT_EQ(val_src1[2], 2);
ASSERT_EQ(dest_src1[0], 10);
ASSERT_EQ(dest_src1[1], 11);
ASSERT_EQ(dest_src1[2], 12);
ASSERT_NE(val_src1, dest_src1);
swap(val_src1, dest_src1);
ASSERT_EQ(dest_src1[0], 0);
ASSERT_EQ(dest_src1[1], 1);
ASSERT_EQ(dest_src1[2], 2);
ASSERT_EQ(val_src1[0], 10);
ASSERT_EQ(val_src1[1], 11);
ASSERT_EQ(val_src1[2], 12);
ASSERT_NE(val_src1, dest_src1);
}
void TestPseudoPointDEquality() {
draco::Point3ui val = {0, 1, 2};
draco::Point3ui dest = {0, 1, 2};
draco::PseudoPointD<uint32_t> val_src1(&val[0], 3);
draco::PseudoPointD<uint32_t> val_src2(&val[0], 3);
draco::PseudoPointD<uint32_t> dest_src1(&dest[0], 3);
draco::PseudoPointD<uint32_t> dest_src2(&dest[0], 3);
ASSERT_EQ(val_src1, val_src1);
ASSERT_EQ(val_src1, val_src2);
ASSERT_EQ(dest_src1, val_src1);
ASSERT_EQ(dest_src1, val_src2);
ASSERT_EQ(val_src2, val_src1);
ASSERT_EQ(val_src2, val_src2);
ASSERT_EQ(dest_src2, val_src1);
ASSERT_EQ(dest_src2, val_src2);
for (auto i = 0; i < 3; i++) {
ASSERT_EQ(val_src1[i], val_src1[i]);
ASSERT_EQ(val_src1[i], val_src2[i]);
ASSERT_EQ(dest_src1[i], val_src1[i]);
ASSERT_EQ(dest_src1[i], val_src2[i]);
ASSERT_EQ(val_src2[i], val_src1[i]);
ASSERT_EQ(val_src2[i], val_src2[i]);
ASSERT_EQ(dest_src2[i], val_src1[i]);
ASSERT_EQ(dest_src2[i], val_src2[i]);
}
}
void TestPseudoPointDInequality() {
draco::Point3ui val = {0, 1, 2};
draco::Point3ui dest = {1, 2, 3};
draco::PseudoPointD<uint32_t> val_src1(&val[0], 3);
draco::PseudoPointD<uint32_t> val_src2(&val[0], 3);
draco::PseudoPointD<uint32_t> dest_src1(&dest[0], 3);
draco::PseudoPointD<uint32_t> dest_src2(&dest[0], 3);
ASSERT_EQ(val_src1, val_src1);
ASSERT_EQ(val_src1, val_src2);
ASSERT_NE(dest_src1, val_src1);
ASSERT_NE(dest_src1, val_src2);
ASSERT_EQ(val_src2, val_src1);
ASSERT_EQ(val_src2, val_src2);
ASSERT_NE(dest_src2, val_src1);
ASSERT_NE(dest_src2, val_src2);
for (auto i = 0; i < 3; i++) {
ASSERT_EQ(val_src1[i], val_src1[i]);
ASSERT_EQ(val_src1[i], val_src2[i]);
ASSERT_NE(dest_src1[i], val_src1[i]);
ASSERT_NE(dest_src1[i], val_src2[i]);
ASSERT_EQ(val_src2[i], val_src1[i]);
ASSERT_EQ(val_src2[i], val_src2[i]);
ASSERT_NE(dest_src2[i], val_src1[i]);
ASSERT_NE(dest_src2[i], val_src2[i]);
}
}
};
TEST_F(PointDVectorTest, VectorTest) {
TestSize<uint32_t>();
TestContentsDiscrete<uint32_t>();
TestContentsContiguous<uint32_t>();
TestContentsCopy<uint32_t>();
TestIterator<uint32_t>();
TestPoint3Iterator<uint32_t>();
}
TEST_F(PointDVectorTest, PseudoPointDTest) {
TestPseudoPointDSwap();
TestPseudoPointDEquality();
TestPseudoPointDInequality();
}
} // namespace draco

View File

@ -0,0 +1,63 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_POINTS_SEQUENCER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_POINTS_SEQUENCER_H_
#include <vector>
#include "draco/attributes/point_attribute.h"
namespace draco {
// Class for generating a sequence of point ids that can be used to encode
// or decode attribute values in a specific order.
// See sequential_attribute_encoders/decoders_controller.h for more details.
class PointsSequencer {
public:
PointsSequencer() : out_point_ids_(nullptr) {}
virtual ~PointsSequencer() = default;
// Fills the |out_point_ids| with the generated sequence of point ids.
bool GenerateSequence(std::vector<PointIndex> *out_point_ids) {
out_point_ids_ = out_point_ids;
return GenerateSequenceInternal();
}
// Appends a point to the sequence.
void AddPointId(PointIndex point_id) { out_point_ids_->push_back(point_id); }
// Sets the correct mapping between point ids and value ids. I.e., the inverse
// of the |out_point_ids|. In general, |out_point_ids_| does not contain
// sufficient information to compute the inverse map, because not all point
// ids are necessarily contained within the map.
// Must be implemented for sequencers that are used by attribute decoders.
virtual bool UpdatePointToAttributeIndexMapping(PointAttribute * /* attr */) {
return false;
}
protected:
// Method that needs to be implemented by the derived classes. The
// implementation is responsible for filling |out_point_ids_| with the valid
// sequence of point ids.
virtual bool GenerateSequenceInternal() = 0;
std::vector<PointIndex> *out_point_ids() const { return out_point_ids_; }
private:
std::vector<PointIndex> *out_point_ids_;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_POINTS_SEQUENCER_H_

View File

@ -0,0 +1,227 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_DECODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_DECODER_H_
#include <algorithm>
#include <cmath>
#include "draco/draco_features.h"
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_shared.h"
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h"
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h"
#include "draco/compression/bit_coders/rans_bit_decoder.h"
#include "draco/core/varint_decoding.h"
namespace draco {
// Decoder for predictions encoded with the constrained multi-parallelogram
// encoder. See the corresponding encoder for more details about the prediction
// method.
template <typename DataTypeT, class TransformT, class MeshDataT>
class MeshPredictionSchemeConstrainedMultiParallelogramDecoder
: public MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT> {
public:
using CorrType =
typename PredictionSchemeDecoder<DataTypeT, TransformT>::CorrType;
using CornerTable = typename MeshDataT::CornerTable;
explicit MeshPredictionSchemeConstrainedMultiParallelogramDecoder(
const PointAttribute *attribute)
: MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT>(
attribute),
selected_mode_(Mode::OPTIMAL_MULTI_PARALLELOGRAM) {}
MeshPredictionSchemeConstrainedMultiParallelogramDecoder(
const PointAttribute *attribute, const TransformT &transform,
const MeshDataT &mesh_data)
: MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT>(
attribute, transform, mesh_data),
selected_mode_(Mode::OPTIMAL_MULTI_PARALLELOGRAM) {}
bool ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data,
int size, int num_components,
const PointIndex *entry_to_point_id_map) override;
bool DecodePredictionData(DecoderBuffer *buffer) override;
PredictionSchemeMethod GetPredictionMethod() const override {
return MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM;
}
bool IsInitialized() const override {
return this->mesh_data().IsInitialized();
}
private:
typedef constrained_multi_parallelogram::Mode Mode;
static constexpr int kMaxNumParallelograms =
constrained_multi_parallelogram::kMaxNumParallelograms;
// Crease edges are used to store whether any given edge should be used for
// parallelogram prediction or not. New values are added in the order in which
// the edges are processed. For better compression, the flags are stored in
// in separate contexts based on the number of available parallelograms at a
// given vertex.
std::vector<bool> is_crease_edge_[kMaxNumParallelograms];
Mode selected_mode_;
};
template <typename DataTypeT, class TransformT, class MeshDataT>
bool MeshPredictionSchemeConstrainedMultiParallelogramDecoder<
DataTypeT, TransformT, MeshDataT>::
ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data,
int /* size */, int num_components,
const PointIndex * /* entry_to_point_id_map */) {
this->transform().Init(num_components);
// Predicted values for all simple parallelograms encountered at any given
// vertex.
std::vector<DataTypeT> pred_vals[kMaxNumParallelograms];
for (int i = 0; i < kMaxNumParallelograms; ++i) {
pred_vals[i].resize(num_components, 0);
}
this->transform().ComputeOriginalValue(pred_vals[0].data(), in_corr,
out_data);
const CornerTable *const table = this->mesh_data().corner_table();
const std::vector<int32_t> *const vertex_to_data_map =
this->mesh_data().vertex_to_data_map();
// Current position in the |is_crease_edge_| array for each context.
std::vector<int> is_crease_edge_pos(kMaxNumParallelograms, 0);
// Used to store predicted value for multi-parallelogram prediction.
std::vector<DataTypeT> multi_pred_vals(num_components);
const int corner_map_size =
static_cast<int>(this->mesh_data().data_to_corner_map()->size());
for (int p = 1; p < corner_map_size; ++p) {
const CornerIndex start_corner_id =
this->mesh_data().data_to_corner_map()->at(p);
CornerIndex corner_id(start_corner_id);
int num_parallelograms = 0;
bool first_pass = true;
while (corner_id != kInvalidCornerIndex) {
if (ComputeParallelogramPrediction(
p, corner_id, table, *vertex_to_data_map, out_data,
num_components, &(pred_vals[num_parallelograms][0]))) {
// Parallelogram prediction applied and stored in
// |pred_vals[num_parallelograms]|
++num_parallelograms;
// Stop processing when we reach the maximum number of allowed
// parallelograms.
if (num_parallelograms == kMaxNumParallelograms)
break;
}
// Proceed to the next corner attached to the vertex. First swing left
// and if we reach a boundary, swing right from the start corner.
if (first_pass) {
corner_id = table->SwingLeft(corner_id);
} else {
corner_id = table->SwingRight(corner_id);
}
if (corner_id == start_corner_id) {
break;
}
if (corner_id == kInvalidCornerIndex && first_pass) {
first_pass = false;
corner_id = table->SwingRight(start_corner_id);
}
}
// Check which of the available parallelograms are actually used and compute
// the final predicted value.
int num_used_parallelograms = 0;
if (num_parallelograms > 0) {
for (int i = 0; i < num_components; ++i) {
multi_pred_vals[i] = 0;
}
// Check which parallelograms are actually used.
for (int i = 0; i < num_parallelograms; ++i) {
const int context = num_parallelograms - 1;
const int pos = is_crease_edge_pos[context]++;
if (is_crease_edge_[context].size() <= pos)
return false;
const bool is_crease = is_crease_edge_[context][pos];
if (!is_crease) {
++num_used_parallelograms;
for (int j = 0; j < num_components; ++j) {
multi_pred_vals[j] += pred_vals[i][j];
}
}
}
}
const int dst_offset = p * num_components;
if (num_used_parallelograms == 0) {
// No parallelogram was valid.
// We use the last decoded point as a reference.
const int src_offset = (p - 1) * num_components;
this->transform().ComputeOriginalValue(
out_data + src_offset, in_corr + dst_offset, out_data + dst_offset);
} else {
// Compute the correction from the predicted value.
for (int c = 0; c < num_components; ++c) {
multi_pred_vals[c] /= num_used_parallelograms;
}
this->transform().ComputeOriginalValue(
multi_pred_vals.data(), in_corr + dst_offset, out_data + dst_offset);
}
}
return true;
}
template <typename DataTypeT, class TransformT, class MeshDataT>
bool MeshPredictionSchemeConstrainedMultiParallelogramDecoder<
DataTypeT, TransformT, MeshDataT>::DecodePredictionData(DecoderBuffer
*buffer) {
#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
if (buffer->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 2)) {
// Decode prediction mode.
uint8_t mode;
if (!buffer->Decode(&mode)) {
return false;
}
if (mode != Mode::OPTIMAL_MULTI_PARALLELOGRAM) {
// Unsupported mode.
return false;
}
}
#endif
// Encode selected edges using separate rans bit coder for each context.
for (int i = 0; i < kMaxNumParallelograms; ++i) {
uint32_t num_flags;
DecodeVarint<uint32_t>(&num_flags, buffer);
if (num_flags > 0) {
is_crease_edge_[i].resize(num_flags);
RAnsBitDecoder decoder;
if (!decoder.StartDecoding(buffer))
return false;
for (uint32_t j = 0; j < num_flags; ++j) {
is_crease_edge_[i][j] = decoder.DecodeNextBit();
}
decoder.EndDecoding();
}
}
return MeshPredictionSchemeDecoder<DataTypeT, TransformT,
MeshDataT>::DecodePredictionData(buffer);
}
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_DECODER_H_

View File

@ -0,0 +1,410 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_ENCODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_ENCODER_H_
#include <algorithm>
#include <cmath>
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_shared.h"
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h"
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h"
#include "draco/compression/bit_coders/rans_bit_encoder.h"
#include "draco/compression/entropy/shannon_entropy.h"
#include "draco/core/varint_encoding.h"
namespace draco {
// Compared to standard multi-parallelogram, constrained multi-parallelogram can
// explicitly select which of the available parallelograms are going to be used
// for the prediction by marking crease edges between two triangles. This
// requires storing extra data, but it allows the predictor to avoid using
// parallelograms that would lead to poor predictions. For improved efficiency,
// our current implementation limits the maximum number of used parallelograms
// to four, which covers >95% of the cases (on average, there are only two
// parallelograms available for any given vertex).
// All bits of the explicitly chosen configuration are stored together in a
// single context chosen by the total number of parallelograms available to
// choose from.
template <typename DataTypeT, class TransformT, class MeshDataT>
class MeshPredictionSchemeConstrainedMultiParallelogramEncoder
: public MeshPredictionSchemeEncoder<DataTypeT, TransformT, MeshDataT> {
public:
using CorrType =
typename PredictionSchemeEncoder<DataTypeT, TransformT>::CorrType;
using CornerTable = typename MeshDataT::CornerTable;
explicit MeshPredictionSchemeConstrainedMultiParallelogramEncoder(
const PointAttribute *attribute)
: MeshPredictionSchemeEncoder<DataTypeT, TransformT, MeshDataT>(
attribute),
selected_mode_(Mode::OPTIMAL_MULTI_PARALLELOGRAM) {}
MeshPredictionSchemeConstrainedMultiParallelogramEncoder(
const PointAttribute *attribute, const TransformT &transform,
const MeshDataT &mesh_data)
: MeshPredictionSchemeEncoder<DataTypeT, TransformT, MeshDataT>(
attribute, transform, mesh_data),
selected_mode_(Mode::OPTIMAL_MULTI_PARALLELOGRAM) {}
bool ComputeCorrectionValues(
const DataTypeT *in_data, CorrType *out_corr, int size,
int num_components, const PointIndex *entry_to_point_id_map) override;
bool EncodePredictionData(EncoderBuffer *buffer) override;
PredictionSchemeMethod GetPredictionMethod() const override {
return MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM;
}
bool IsInitialized() const override {
return this->mesh_data().IsInitialized();
}
private:
// Function used to compute number of bits needed to store overhead of the
// predictor. In this case, we consider overhead to be all bits that mark
// whether a parallelogram should be used for prediction or not. The input
// to this method is the total number of parallelograms that were evaluated so
// far(total_parallelogram), and the number of parallelograms we decided to
// use for prediction (total_used_parallelograms).
// Returns number of bits required to store the overhead.
int64_t ComputeOverheadBits(int64_t total_used_parallelograms,
int64_t total_parallelogram) const {
// For now we assume RAns coding for the bits where the total required size
// is directly correlated to the binary entropy of the input stream.
// TODO(ostava): This should be generalized in case we use other binary
// coding scheme.
const double entropy = ComputeBinaryShannonEntropy(
static_cast<uint32_t>(total_parallelogram),
static_cast<uint32_t>(total_used_parallelograms));
// Round up to the nearest full bit.
return static_cast<int64_t>(
ceil(static_cast<double>(total_parallelogram) * entropy));
}
// Struct that contains data used for measuring the error of each available
// parallelogram configuration.
struct Error {
Error() : num_bits(0), residual_error(0) {}
// Primary metric: number of bits required to store the data as a result of
// the selected prediction configuration.
int num_bits;
// Secondary metric: absolute difference of residuals for the given
// configuration.
int residual_error;
bool operator<(const Error &e) const {
if (num_bits < e.num_bits)
return true;
if (num_bits > e.num_bits)
return false;
return residual_error < e.residual_error;
}
};
// Computes error for predicting |predicted_val| instead of |actual_val|.
// Error is computed as the number of bits needed to encode the difference
// between the values.
Error ComputeError(const DataTypeT *predicted_val,
const DataTypeT *actual_val, int *out_residuals,
int num_components) {
Error error;
for (int i = 0; i < num_components; ++i) {
const int dif = (predicted_val[i] - actual_val[i]);
error.residual_error += std::abs(dif);
out_residuals[i] = dif;
// Entropy needs unsigned symbols, so convert the signed difference to an
// unsigned symbol.
entropy_symbols_[i] = ConvertSignedIntToSymbol(dif);
}
// Generate entropy data for case that this configuration was used.
// Note that the entropy stream is NOT updated in this case.
const auto entropy_data =
entropy_tracker_.Peek(entropy_symbols_.data(), num_components);
error.num_bits = entropy_tracker_.GetNumberOfDataBits(entropy_data) +
entropy_tracker_.GetNumberOfRAnsTableBits(entropy_data);
return error;
}
typedef constrained_multi_parallelogram::Mode Mode;
static constexpr int kMaxNumParallelograms =
constrained_multi_parallelogram::kMaxNumParallelograms;
// Crease edges are used to store whether any given edge should be used for
// parallelogram prediction or not. New values are added in the order in which
// the edges are processed. For better compression, the flags are stored in
// in separate contexts based on the number of available parallelograms at a
// given vertex.
// TODO(draco-eng) reconsider std::vector<bool> (performance/space).
std::vector<bool> is_crease_edge_[kMaxNumParallelograms];
Mode selected_mode_;
ShannonEntropyTracker entropy_tracker_;
// Temporary storage for symbols that are fed into the |entropy_stream|.
// Always contains only |num_components| entries.
std::vector<uint32_t> entropy_symbols_;
};
template <typename DataTypeT, class TransformT, class MeshDataT>
bool MeshPredictionSchemeConstrainedMultiParallelogramEncoder<
DataTypeT, TransformT, MeshDataT>::
ComputeCorrectionValues(const DataTypeT *in_data, CorrType *out_corr,
int size, int num_components,
const PointIndex * /* entry_to_point_id_map */) {
this->transform().Init(in_data, size, num_components);
const CornerTable *const table = this->mesh_data().corner_table();
const std::vector<int32_t> *const vertex_to_data_map =
this->mesh_data().vertex_to_data_map();
// Predicted values for all simple parallelograms encountered at any given
// vertex.
std::vector<DataTypeT> pred_vals[kMaxNumParallelograms];
for (int i = 0; i < kMaxNumParallelograms; ++i) {
pred_vals[i].resize(num_components);
}
// Used to store predicted value for various multi-parallelogram predictions
// (combinations of simple parallelogram predictions).
std::vector<DataTypeT> multi_pred_vals(num_components);
entropy_symbols_.resize(num_components);
// Struct for holding data about prediction configuration for different sets
// of used parallelograms.
struct PredictionConfiguration {
PredictionConfiguration()
: error(), configuration(0), num_used_parallelograms(0) {}
Error error;
uint8_t configuration; // Bitfield, 1 use parallelogram, 0 don't use it.
int num_used_parallelograms;
std::vector<DataTypeT> predicted_value;
std::vector<int32_t> residuals;
};
// Bit-field used for computing permutations of excluded edges
// (parallelograms).
bool exluded_parallelograms[kMaxNumParallelograms];
// Data about the number of used parallelogram and total number of available
// parallelogram for each context. Used to compute overhead needed for storing
// the parallelogram choices made by the encoder.
int64_t total_used_parallelograms[kMaxNumParallelograms] = {0};
int64_t total_parallelograms[kMaxNumParallelograms] = {0};
std::vector<int> current_residuals(num_components);
// We start processing the vertices from the end because this prediction uses
// data from previous entries that could be overwritten when an entry is
// processed.
for (int p =
static_cast<int>(this->mesh_data().data_to_corner_map()->size()) - 1;
p > 0; --p) {
const CornerIndex start_corner_id =
this->mesh_data().data_to_corner_map()->at(p);
// Go over all corners attached to the vertex and compute the predicted
// value from the parallelograms defined by their opposite faces.
CornerIndex corner_id(start_corner_id);
int num_parallelograms = 0;
bool first_pass = true;
while (corner_id != kInvalidCornerIndex) {
if (ComputeParallelogramPrediction(
p, corner_id, table, *vertex_to_data_map, in_data, num_components,
&(pred_vals[num_parallelograms][0]))) {
// Parallelogram prediction applied and stored in
// |pred_vals[num_parallelograms]|
++num_parallelograms;
// Stop processing when we reach the maximum number of allowed
// parallelograms.
if (num_parallelograms == kMaxNumParallelograms)
break;
}
// Proceed to the next corner attached to the vertex. First swing left
// and if we reach a boundary, swing right from the start corner.
if (first_pass) {
corner_id = table->SwingLeft(corner_id);
} else {
corner_id = table->SwingRight(corner_id);
}
if (corner_id == start_corner_id) {
break;
}
if (corner_id == kInvalidCornerIndex && first_pass) {
first_pass = false;
corner_id = table->SwingRight(start_corner_id);
}
}
// Offset to the target (destination) vertex.
const int dst_offset = p * num_components;
Error error;
// Compute all prediction errors for all possible configurations of
// available parallelograms.
// Variable for holding the best configuration that has been found so far.
PredictionConfiguration best_prediction;
// Compute delta coding error (configuration when no parallelogram is
// selected).
const int src_offset = (p - 1) * num_components;
error = ComputeError(in_data + src_offset, in_data + dst_offset,
&current_residuals[0], num_components);
if (num_parallelograms > 0) {
total_parallelograms[num_parallelograms - 1] += num_parallelograms;
const int64_t new_overhead_bits =
ComputeOverheadBits(total_used_parallelograms[num_parallelograms - 1],
total_parallelograms[num_parallelograms - 1]);
error.num_bits += new_overhead_bits;
}
best_prediction.error = error;
best_prediction.configuration = 0;
best_prediction.num_used_parallelograms = 0;
best_prediction.predicted_value.assign(
in_data + src_offset, in_data + src_offset + num_components);
best_prediction.residuals.assign(current_residuals.begin(),
current_residuals.end());
// Compute prediction error for different cases of used parallelograms.
for (int num_used_parallelograms = 1;
num_used_parallelograms <= num_parallelograms;
++num_used_parallelograms) {
// Mark all parallelograms as excluded.
std::fill(exluded_parallelograms,
exluded_parallelograms + num_parallelograms, true);
// TODO(draco-eng) maybe this should be another std::fill.
// Mark the first |num_used_parallelograms| as not excluded.
for (int j = 0; j < num_used_parallelograms; ++j) {
exluded_parallelograms[j] = false;
}
// Permute over the excluded edges and compute error for each
// configuration (permutation of excluded parallelograms).
do {
// Reset the multi-parallelogram predicted values.
for (int j = 0; j < num_components; ++j) {
multi_pred_vals[j] = 0;
}
uint8_t configuration = 0;
for (int j = 0; j < num_parallelograms; ++j) {
if (exluded_parallelograms[j])
continue;
for (int c = 0; c < num_components; ++c) {
multi_pred_vals[c] += pred_vals[j][c];
}
// Set jth bit of the configuration.
configuration |= (1 << j);
}
for (int j = 0; j < num_components; ++j) {
multi_pred_vals[j] /= num_used_parallelograms;
}
error = ComputeError(multi_pred_vals.data(), in_data + dst_offset,
&current_residuals[0], num_components);
if (num_parallelograms > 0) {
const int64_t new_overhead_bits = ComputeOverheadBits(
total_used_parallelograms[num_parallelograms - 1] +
num_used_parallelograms,
total_parallelograms[num_parallelograms - 1]);
// Add overhead bits to the total error.
error.num_bits += new_overhead_bits;
}
if (error < best_prediction.error) {
best_prediction.error = error;
best_prediction.configuration = configuration;
best_prediction.num_used_parallelograms = num_used_parallelograms;
best_prediction.predicted_value.assign(multi_pred_vals.begin(),
multi_pred_vals.end());
best_prediction.residuals.assign(current_residuals.begin(),
current_residuals.end());
}
} while (std::next_permutation(
exluded_parallelograms, exluded_parallelograms + num_parallelograms));
}
if (num_parallelograms > 0) {
total_used_parallelograms[num_parallelograms - 1] +=
best_prediction.num_used_parallelograms;
}
// Update the entropy stream by adding selected residuals as symbols to the
// stream.
for (int i = 0; i < num_components; ++i) {
entropy_symbols_[i] =
ConvertSignedIntToSymbol(best_prediction.residuals[i]);
}
entropy_tracker_.Push(entropy_symbols_.data(), num_components);
for (int i = 0; i < num_parallelograms; ++i) {
if ((best_prediction.configuration & (1 << i)) == 0) {
// Parallelogram not used, mark the edge as crease.
is_crease_edge_[num_parallelograms - 1].push_back(true);
} else {
// Parallelogram used. Add it to the predicted value and mark the
// edge as not a crease.
is_crease_edge_[num_parallelograms - 1].push_back(false);
}
}
this->transform().ComputeCorrection(in_data + dst_offset,
best_prediction.predicted_value.data(),
out_corr + dst_offset);
}
// First element is always fixed because it cannot be predicted.
for (int i = 0; i < num_components; ++i) {
pred_vals[0][i] = static_cast<DataTypeT>(0);
}
this->transform().ComputeCorrection(in_data, pred_vals[0].data(), out_corr);
return true;
}
template <typename DataTypeT, class TransformT, class MeshDataT>
bool MeshPredictionSchemeConstrainedMultiParallelogramEncoder<
DataTypeT, TransformT, MeshDataT>::EncodePredictionData(EncoderBuffer
*buffer) {
// Encode selected edges using separate rans bit coder for each context.
for (int i = 0; i < kMaxNumParallelograms; ++i) {
// |i| is the context based on the number of available parallelograms, which
// is always equal to |i + 1|.
const int num_used_parallelograms = i + 1;
EncodeVarint<uint32_t>(is_crease_edge_[i].size(), buffer);
if (is_crease_edge_[i].size()) {
RAnsBitEncoder encoder;
encoder.StartEncoding();
// Encode the crease edge flags in the reverse vertex order that is needed
// be the decoder. Note that for the currently supported mode, each vertex
// has exactly |num_used_parallelograms| edges that need to be encoded.
for (int j = static_cast<int>(is_crease_edge_[i].size()) -
num_used_parallelograms;
j >= 0; j -= num_used_parallelograms) {
// Go over all edges of the current vertex.
for (int k = 0; k < num_used_parallelograms; ++k) {
encoder.EncodeBit(is_crease_edge_[i][j + k]);
}
}
encoder.EndEncoding(buffer);
}
}
return MeshPredictionSchemeEncoder<DataTypeT, TransformT,
MeshDataT>::EncodePredictionData(buffer);
}
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_ENCODER_H_

View File

@ -0,0 +1,34 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_SHARED_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_SHARED_H_
namespace draco {
// Data shared between constrained multi-parallelogram encoder and decoder.
namespace constrained_multi_parallelogram {
enum Mode {
// Selects the optimal multi-parallelogram from up to 4 available
// parallelograms.
OPTIMAL_MULTI_PARALLELOGRAM = 0,
};
static constexpr int kMaxNumParallelograms = 4;
} // namespace constrained_multi_parallelogram
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_CONSTRAINED_MULTI_PARALLELOGRAM_SHARED_H_

View File

@ -0,0 +1,72 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_MESH_PREDICTION_SCHEMES_PREDICTION_SCHEME_DATA_H_
#define DRACO_COMPRESSION_ATTRIBUTES_MESH_PREDICTION_SCHEMES_PREDICTION_SCHEME_DATA_H_
#include "draco/mesh/corner_table.h"
#include "draco/mesh/mesh.h"
namespace draco {
// Class stores data about the connectivity data of the mesh and information
// about how the connectivity was encoded/decoded.
template <class CornerTableT>
class MeshPredictionSchemeData {
public:
typedef CornerTableT CornerTable;
MeshPredictionSchemeData()
: mesh_(nullptr),
corner_table_(nullptr),
vertex_to_data_map_(nullptr),
data_to_corner_map_(nullptr) {}
void Set(const Mesh *mesh, const CornerTable *table,
const std::vector<CornerIndex> *data_to_corner_map,
const std::vector<int32_t> *vertex_to_data_map) {
mesh_ = mesh;
corner_table_ = table;
data_to_corner_map_ = data_to_corner_map;
vertex_to_data_map_ = vertex_to_data_map;
}
const Mesh *mesh() const { return mesh_; }
const CornerTable *corner_table() const { return corner_table_; }
const std::vector<int32_t> *vertex_to_data_map() const {
return vertex_to_data_map_;
}
const std::vector<CornerIndex> *data_to_corner_map() const {
return data_to_corner_map_;
}
bool IsInitialized() const {
return mesh_ != nullptr && corner_table_ != nullptr &&
vertex_to_data_map_ != nullptr && data_to_corner_map_ != nullptr;
}
private:
const Mesh *mesh_;
const CornerTable *corner_table_;
// Mapping between vertices and their encoding order. I.e. when an attribute
// entry on a given vertex was encoded.
const std::vector<int32_t> *vertex_to_data_map_;
// Array that stores which corner was processed when a given attribute entry
// was encoded or decoded.
const std::vector<CornerIndex> *data_to_corner_map_;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_DATA_H_

View File

@ -0,0 +1,46 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_DECODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_DECODER_H_
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_data.h"
#include "draco/compression/attributes/prediction_schemes/prediction_scheme_decoder.h"
namespace draco {
// Base class for all mesh prediction scheme decoders that use the mesh
// connectivity data. |MeshDataT| can be any class that provides the same
// interface as the PredictionSchemeMeshData class.
template <typename DataTypeT, class TransformT, class MeshDataT>
class MeshPredictionSchemeDecoder
: public PredictionSchemeDecoder<DataTypeT, TransformT> {
public:
typedef MeshDataT MeshData;
MeshPredictionSchemeDecoder(const PointAttribute *attribute,
const TransformT &transform,
const MeshDataT &mesh_data)
: PredictionSchemeDecoder<DataTypeT, TransformT>(attribute, transform),
mesh_data_(mesh_data) {}
protected:
const MeshData &mesh_data() const { return mesh_data_; }
private:
MeshData mesh_data_;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_DECODER_H_

View File

@ -0,0 +1,46 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_ENCODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_ENCODER_H_
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_data.h"
#include "draco/compression/attributes/prediction_schemes/prediction_scheme_encoder.h"
namespace draco {
// Base class for all mesh prediction scheme encoders that use the mesh
// connectivity data. |MeshDataT| can be any class that provides the same
// interface as the PredictionSchemeMeshData class.
template <typename DataTypeT, class TransformT, class MeshDataT>
class MeshPredictionSchemeEncoder
: public PredictionSchemeEncoder<DataTypeT, TransformT> {
public:
typedef MeshDataT MeshData;
MeshPredictionSchemeEncoder(const PointAttribute *attribute,
const TransformT &transform,
const MeshDataT &mesh_data)
: PredictionSchemeEncoder<DataTypeT, TransformT>(attribute, transform),
mesh_data_(mesh_data) {}
protected:
const MeshData &mesh_data() const { return mesh_data_; }
private:
MeshData mesh_data_;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_ENCODER_H_

View File

@ -0,0 +1,163 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_DECODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_DECODER_H_
#include "draco/draco_features.h"
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h"
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_area.h"
#include "draco/compression/bit_coders/rans_bit_decoder.h"
namespace draco {
// See MeshPredictionSchemeGeometricNormalEncoder for documentation.
template <typename DataTypeT, class TransformT, class MeshDataT>
class MeshPredictionSchemeGeometricNormalDecoder
: public MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT> {
public:
using CorrType = typename MeshPredictionSchemeDecoder<DataTypeT, TransformT,
MeshDataT>::CorrType;
MeshPredictionSchemeGeometricNormalDecoder(const PointAttribute *attribute,
const TransformT &transform,
const MeshDataT &mesh_data)
: MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT>(
attribute, transform, mesh_data),
predictor_(mesh_data) {}
private:
MeshPredictionSchemeGeometricNormalDecoder() {}
public:
bool ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data,
int size, int num_components,
const PointIndex *entry_to_point_id_map) override;
bool DecodePredictionData(DecoderBuffer *buffer) override;
PredictionSchemeMethod GetPredictionMethod() const override {
return MESH_PREDICTION_GEOMETRIC_NORMAL;
}
bool IsInitialized() const override {
if (!predictor_.IsInitialized())
return false;
if (!this->mesh_data().IsInitialized())
return false;
if (!octahedron_tool_box_.IsInitialized())
return false;
return true;
}
int GetNumParentAttributes() const override { return 1; }
GeometryAttribute::Type GetParentAttributeType(int i) const override {
DRACO_DCHECK_EQ(i, 0);
(void)i;
return GeometryAttribute::POSITION;
}
bool SetParentAttribute(const PointAttribute *att) override {
if (att->attribute_type() != GeometryAttribute::POSITION)
return false; // Invalid attribute type.
if (att->num_components() != 3)
return false; // Currently works only for 3 component positions.
predictor_.SetPositionAttribute(*att);
return true;
}
void SetQuantizationBits(int q) {
octahedron_tool_box_.SetQuantizationBits(q);
}
private:
MeshPredictionSchemeGeometricNormalPredictorArea<DataTypeT, TransformT,
MeshDataT>
predictor_;
OctahedronToolBox octahedron_tool_box_;
RAnsBitDecoder flip_normal_bit_decoder_;
};
template <typename DataTypeT, class TransformT, class MeshDataT>
bool MeshPredictionSchemeGeometricNormalDecoder<
DataTypeT, TransformT,
MeshDataT>::ComputeOriginalValues(const CorrType *in_corr,
DataTypeT *out_data, int /* size */,
int num_components,
const PointIndex *entry_to_point_id_map) {
this->SetQuantizationBits(this->transform().quantization_bits());
predictor_.SetEntryToPointIdMap(entry_to_point_id_map);
DRACO_DCHECK(this->IsInitialized());
// Expecting in_data in octahedral coordinates, i.e., portable attribute.
DRACO_DCHECK_EQ(num_components, 2);
const int corner_map_size =
static_cast<int>(this->mesh_data().data_to_corner_map()->size());
VectorD<int32_t, 3> pred_normal_3d;
int32_t pred_normal_oct[2];
for (int data_id = 0; data_id < corner_map_size; ++data_id) {
const CornerIndex corner_id =
this->mesh_data().data_to_corner_map()->at(data_id);
predictor_.ComputePredictedValue(corner_id, pred_normal_3d.data());
// Compute predicted octahedral coordinates.
octahedron_tool_box_.CanonicalizeIntegerVector(pred_normal_3d.data());
DRACO_DCHECK_EQ(pred_normal_3d.AbsSum(),
octahedron_tool_box_.center_value());
if (flip_normal_bit_decoder_.DecodeNextBit()) {
pred_normal_3d = -pred_normal_3d;
}
octahedron_tool_box_.IntegerVectorToQuantizedOctahedralCoords(
pred_normal_3d.data(), pred_normal_oct, pred_normal_oct + 1);
const int data_offset = data_id * 2;
this->transform().ComputeOriginalValue(
pred_normal_oct, in_corr + data_offset, out_data + data_offset);
}
flip_normal_bit_decoder_.EndDecoding();
return true;
}
template <typename DataTypeT, class TransformT, class MeshDataT>
bool MeshPredictionSchemeGeometricNormalDecoder<
DataTypeT, TransformT, MeshDataT>::DecodePredictionData(DecoderBuffer
*buffer) {
// Get data needed for transform
if (!this->transform().DecodeTransformData(buffer))
return false;
#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
if (buffer->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 2)) {
uint8_t prediction_mode;
buffer->Decode(&prediction_mode);
if (!predictor_.SetNormalPredictionMode(
NormalPredictionMode(prediction_mode)))
return false;
}
#endif
// Init normal flips.
if (!flip_normal_bit_decoder_.StartDecoding(buffer))
return false;
return true;
}
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_DECODER_H_

View File

@ -0,0 +1,175 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_ENCODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_ENCODER_H_
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h"
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_area.h"
#include "draco/compression/bit_coders/rans_bit_encoder.h"
#include "draco/compression/config/compression_shared.h"
namespace draco {
// Prediction scheme for normals based on the underlying geometry.
// At a smooth vertices normals are computed by weighting the normals of
// adjacent faces with the area of these faces. At seams, the same approach
// applies for seam corners.
template <typename DataTypeT, class TransformT, class MeshDataT>
class MeshPredictionSchemeGeometricNormalEncoder
: public MeshPredictionSchemeEncoder<DataTypeT, TransformT, MeshDataT> {
public:
using CorrType = typename MeshPredictionSchemeEncoder<DataTypeT, TransformT,
MeshDataT>::CorrType;
MeshPredictionSchemeGeometricNormalEncoder(const PointAttribute *attribute,
const TransformT &transform,
const MeshDataT &mesh_data)
: MeshPredictionSchemeEncoder<DataTypeT, TransformT, MeshDataT>(
attribute, transform, mesh_data),
predictor_(mesh_data) {}
bool ComputeCorrectionValues(
const DataTypeT *in_data, CorrType *out_corr, int size,
int num_components, const PointIndex *entry_to_point_id_map) override;
bool EncodePredictionData(EncoderBuffer *buffer) override;
PredictionSchemeMethod GetPredictionMethod() const override {
return MESH_PREDICTION_GEOMETRIC_NORMAL;
}
bool IsInitialized() const override {
if (!predictor_.IsInitialized())
return false;
if (!this->mesh_data().IsInitialized())
return false;
return true;
}
int GetNumParentAttributes() const override { return 1; }
GeometryAttribute::Type GetParentAttributeType(int i) const override {
DRACO_DCHECK_EQ(i, 0);
(void)i;
return GeometryAttribute::POSITION;
}
bool SetParentAttribute(const PointAttribute *att) override {
if (att->attribute_type() != GeometryAttribute::POSITION)
return false; // Invalid attribute type.
if (att->num_components() != 3)
return false; // Currently works only for 3 component positions.
predictor_.SetPositionAttribute(*att);
return true;
}
private:
void SetQuantizationBits(int q) {
DRACO_DCHECK_GE(q, 2);
DRACO_DCHECK_LE(q, 30);
octahedron_tool_box_.SetQuantizationBits(q);
}
MeshPredictionSchemeGeometricNormalPredictorArea<DataTypeT, TransformT,
MeshDataT>
predictor_;
OctahedronToolBox octahedron_tool_box_;
RAnsBitEncoder flip_normal_bit_encoder_;
};
template <typename DataTypeT, class TransformT, class MeshDataT>
bool MeshPredictionSchemeGeometricNormalEncoder<DataTypeT, TransformT,
MeshDataT>::
ComputeCorrectionValues(const DataTypeT *in_data, CorrType *out_corr,
int size, int num_components,
const PointIndex *entry_to_point_id_map) {
this->SetQuantizationBits(this->transform().quantization_bits());
predictor_.SetEntryToPointIdMap(entry_to_point_id_map);
DRACO_DCHECK(this->IsInitialized());
// Expecting in_data in octahedral coordinates, i.e., portable attribute.
DRACO_DCHECK_EQ(num_components, 2);
flip_normal_bit_encoder_.StartEncoding();
const int corner_map_size =
static_cast<int>(this->mesh_data().data_to_corner_map()->size());
VectorD<int32_t, 3> pred_normal_3d;
VectorD<int32_t, 2> pos_pred_normal_oct;
VectorD<int32_t, 2> neg_pred_normal_oct;
VectorD<int32_t, 2> pos_correction;
VectorD<int32_t, 2> neg_correction;
for (int data_id = 0; data_id < corner_map_size; ++data_id) {
const CornerIndex corner_id =
this->mesh_data().data_to_corner_map()->at(data_id);
predictor_.ComputePredictedValue(corner_id, pred_normal_3d.data());
// Compute predicted octahedral coordinates.
octahedron_tool_box_.CanonicalizeIntegerVector(pred_normal_3d.data());
DRACO_DCHECK_EQ(pred_normal_3d.AbsSum(),
octahedron_tool_box_.center_value());
// Compute octahedral coordinates for both possible directions.
octahedron_tool_box_.IntegerVectorToQuantizedOctahedralCoords(
pred_normal_3d.data(), pos_pred_normal_oct.data(),
pos_pred_normal_oct.data() + 1);
pred_normal_3d = -pred_normal_3d;
octahedron_tool_box_.IntegerVectorToQuantizedOctahedralCoords(
pred_normal_3d.data(), neg_pred_normal_oct.data(),
neg_pred_normal_oct.data() + 1);
// Choose the one with the best correction value.
const int data_offset = data_id * 2;
this->transform().ComputeCorrection(in_data + data_offset,
pos_pred_normal_oct.data(),
pos_correction.data());
this->transform().ComputeCorrection(in_data + data_offset,
neg_pred_normal_oct.data(),
neg_correction.data());
pos_correction[0] = octahedron_tool_box_.ModMax(pos_correction[0]);
pos_correction[1] = octahedron_tool_box_.ModMax(pos_correction[1]);
neg_correction[0] = octahedron_tool_box_.ModMax(neg_correction[0]);
neg_correction[1] = octahedron_tool_box_.ModMax(neg_correction[1]);
if (pos_correction.AbsSum() < neg_correction.AbsSum()) {
flip_normal_bit_encoder_.EncodeBit(false);
(out_corr + data_offset)[0] =
octahedron_tool_box_.MakePositive(pos_correction[0]);
(out_corr + data_offset)[1] =
octahedron_tool_box_.MakePositive(pos_correction[1]);
} else {
flip_normal_bit_encoder_.EncodeBit(true);
(out_corr + data_offset)[0] =
octahedron_tool_box_.MakePositive(neg_correction[0]);
(out_corr + data_offset)[1] =
octahedron_tool_box_.MakePositive(neg_correction[1]);
}
}
return true;
}
template <typename DataTypeT, class TransformT, class MeshDataT>
bool MeshPredictionSchemeGeometricNormalEncoder<
DataTypeT, TransformT, MeshDataT>::EncodePredictionData(EncoderBuffer
*buffer) {
if (!this->transform().EncodeTransformData(buffer))
return false;
// Encode normal flips.
flip_normal_bit_encoder_.EndEncoding(buffer);
return true;
}
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_ENCODER_H_

View File

@ -0,0 +1,110 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_PREDICTOR_AREA_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_PREDICTOR_AREA_H_
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_predictor_base.h"
namespace draco {
// This predictor estimates the normal via the surrounding triangles of the
// given corner. Triangles are weighted according to their area.
template <typename DataTypeT, class TransformT, class MeshDataT>
class MeshPredictionSchemeGeometricNormalPredictorArea
: public MeshPredictionSchemeGeometricNormalPredictorBase<
DataTypeT, TransformT, MeshDataT> {
typedef MeshPredictionSchemeGeometricNormalPredictorBase<
DataTypeT, TransformT, MeshDataT>
Base;
public:
explicit MeshPredictionSchemeGeometricNormalPredictorArea(const MeshDataT &md)
: Base(md) {
this->SetNormalPredictionMode(TRIANGLE_AREA);
};
virtual ~MeshPredictionSchemeGeometricNormalPredictorArea() {}
// Computes predicted octahedral coordinates on a given corner.
void ComputePredictedValue(CornerIndex corner_id,
DataTypeT *prediction) override {
DRACO_DCHECK(this->IsInitialized());
typedef typename MeshDataT::CornerTable CornerTable;
const CornerTable *const corner_table = this->mesh_data_.corner_table();
// Going to compute the predicted normal from the surrounding triangles
// according to the connectivity of the given corner table.
VertexCornersIterator<CornerTable> cit(corner_table, corner_id);
// Position of central vertex does not change in loop.
const VectorD<int64_t, 3> pos_cent = this->GetPositionForCorner(corner_id);
// Computing normals for triangles and adding them up.
VectorD<int64_t, 3> normal;
CornerIndex c_next, c_prev;
while (!cit.End()) {
// Getting corners.
if (this->normal_prediction_mode_ == ONE_TRIANGLE) {
c_next = corner_table->Next(corner_id);
c_prev = corner_table->Previous(corner_id);
} else {
c_next = corner_table->Next(cit.Corner());
c_prev = corner_table->Previous(cit.Corner());
}
const VectorD<int64_t, 3> pos_next = this->GetPositionForCorner(c_next);
const VectorD<int64_t, 3> pos_prev = this->GetPositionForCorner(c_prev);
// Computing delta vectors to next and prev.
const VectorD<int64_t, 3> delta_next = pos_next - pos_cent;
const VectorD<int64_t, 3> delta_prev = pos_prev - pos_cent;
// Computing cross product.
const VectorD<int64_t, 3> cross = CrossProduct(delta_next, delta_prev);
normal = normal + cross;
cit.Next();
}
// Convert to int32_t, make sure entries are not too large.
constexpr int64_t upper_bound = 1 << 29;
if (this->normal_prediction_mode_ == ONE_TRIANGLE) {
const int32_t abs_sum = static_cast<int32_t>(normal.AbsSum());
if (abs_sum > upper_bound) {
const int64_t quotient = abs_sum / upper_bound;
normal = normal / quotient;
}
} else {
const int64_t abs_sum = normal.AbsSum();
if (abs_sum > upper_bound) {
const int64_t quotient = abs_sum / upper_bound;
normal = normal / quotient;
}
}
DRACO_DCHECK_LE(normal.AbsSum(), upper_bound);
prediction[0] = static_cast<int32_t>(normal[0]);
prediction[1] = static_cast<int32_t>(normal[1]);
prediction[2] = static_cast<int32_t>(normal[2]);
}
bool SetNormalPredictionMode(NormalPredictionMode mode) override {
if (mode == ONE_TRIANGLE) {
this->normal_prediction_mode_ = mode;
return true;
} else if (mode == TRIANGLE_AREA) {
this->normal_prediction_mode_ = mode;
return true;
}
return false;
}
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_PREDICTOR_AREA_H_

View File

@ -0,0 +1,94 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_PREDICTOR_BASE_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_PREDICTOR_BASE_H_
#include <math.h>
#include "draco/attributes/point_attribute.h"
#include "draco/compression/attributes/normal_compression_utils.h"
#include "draco/compression/config/compression_shared.h"
#include "draco/core/math_utils.h"
#include "draco/core/vector_d.h"
#include "draco/mesh/corner_table.h"
#include "draco/mesh/corner_table_iterators.h"
namespace draco {
// Base class for geometric normal predictors using position attribute.
template <typename DataTypeT, class TransformT, class MeshDataT>
class MeshPredictionSchemeGeometricNormalPredictorBase {
protected:
explicit MeshPredictionSchemeGeometricNormalPredictorBase(const MeshDataT &md)
: pos_attribute_(nullptr),
entry_to_point_id_map_(nullptr),
mesh_data_(md) {}
virtual ~MeshPredictionSchemeGeometricNormalPredictorBase() {}
public:
void SetPositionAttribute(const PointAttribute &position_attribute) {
pos_attribute_ = &position_attribute;
}
void SetEntryToPointIdMap(const PointIndex *map) {
entry_to_point_id_map_ = map;
}
bool IsInitialized() const {
if (pos_attribute_ == nullptr)
return false;
if (entry_to_point_id_map_ == nullptr)
return false;
return true;
}
virtual bool SetNormalPredictionMode(NormalPredictionMode mode) = 0;
virtual NormalPredictionMode GetNormalPredictionMode() const {
return normal_prediction_mode_;
}
protected:
VectorD<int64_t, 3> GetPositionForDataId(int data_id) const {
DRACO_DCHECK(this->IsInitialized());
const auto point_id = entry_to_point_id_map_[data_id];
const auto pos_val_id = pos_attribute_->mapped_index(point_id);
VectorD<int64_t, 3> pos;
pos_attribute_->ConvertValue(pos_val_id, &pos[0]);
return pos;
}
VectorD<int64_t, 3> GetPositionForCorner(CornerIndex ci) const {
DRACO_DCHECK(this->IsInitialized());
const auto corner_table = mesh_data_.corner_table();
const auto vert_id = corner_table->Vertex(ci).value();
const auto data_id = mesh_data_.vertex_to_data_map()->at(vert_id);
return GetPositionForDataId(data_id);
}
VectorD<int32_t, 2> GetOctahedralCoordForDataId(int data_id,
const DataTypeT *data) const {
DRACO_DCHECK(this->IsInitialized());
const int data_offset = data_id * 2;
return VectorD<int32_t, 2>(data[data_offset], data[data_offset + 1]);
}
// Computes predicted octahedral coordinates on a given corner.
virtual void ComputePredictedValue(CornerIndex corner_id,
DataTypeT *prediction) = 0;
const PointAttribute *pos_attribute_;
const PointIndex *entry_to_point_id_map_;
MeshDataT mesh_data_;
NormalPredictionMode normal_prediction_mode_;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_GEOMETRIC_NORMAL_PREDICTOR_BASE_H_

View File

@ -0,0 +1,127 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_MULTI_PARALLELOGRAM_DECODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_MULTI_PARALLELOGRAM_DECODER_H_
#include "draco/draco_features.h"
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h"
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h"
namespace draco {
// Decoder for predictions encoded by multi-parallelogram encoding scheme.
// See the corresponding encoder for method description.
template <typename DataTypeT, class TransformT, class MeshDataT>
class MeshPredictionSchemeMultiParallelogramDecoder
: public MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT> {
public:
using CorrType =
typename PredictionSchemeDecoder<DataTypeT, TransformT>::CorrType;
using CornerTable = typename MeshDataT::CornerTable;
explicit MeshPredictionSchemeMultiParallelogramDecoder(
const PointAttribute *attribute)
: MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT>(
attribute) {}
MeshPredictionSchemeMultiParallelogramDecoder(const PointAttribute *attribute,
const TransformT &transform,
const MeshDataT &mesh_data)
: MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT>(
attribute, transform, mesh_data) {}
bool ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data,
int size, int num_components,
const PointIndex *entry_to_point_id_map) override;
PredictionSchemeMethod GetPredictionMethod() const override {
return MESH_PREDICTION_MULTI_PARALLELOGRAM;
}
bool IsInitialized() const override {
return this->mesh_data().IsInitialized();
}
};
template <typename DataTypeT, class TransformT, class MeshDataT>
bool MeshPredictionSchemeMultiParallelogramDecoder<DataTypeT, TransformT,
MeshDataT>::
ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data,
int /* size */, int num_components,
const PointIndex * /* entry_to_point_id_map */) {
this->transform().Init(num_components);
// For storage of prediction values (already initialized to zero).
std::unique_ptr<DataTypeT[]> pred_vals(new DataTypeT[num_components]());
std::unique_ptr<DataTypeT[]> parallelogram_pred_vals(
new DataTypeT[num_components]());
this->transform().ComputeOriginalValue(pred_vals.get(), in_corr, out_data);
const CornerTable *const table = this->mesh_data().corner_table();
const std::vector<int32_t> *const vertex_to_data_map =
this->mesh_data().vertex_to_data_map();
const int corner_map_size =
static_cast<int>(this->mesh_data().data_to_corner_map()->size());
for (int p = 1; p < corner_map_size; ++p) {
const CornerIndex start_corner_id =
this->mesh_data().data_to_corner_map()->at(p);
CornerIndex corner_id(start_corner_id);
int num_parallelograms = 0;
for (int i = 0; i < num_components; ++i) {
pred_vals[i] = static_cast<DataTypeT>(0);
}
while (corner_id != kInvalidCornerIndex) {
if (ComputeParallelogramPrediction(
p, corner_id, table, *vertex_to_data_map, out_data,
num_components, parallelogram_pred_vals.get())) {
for (int c = 0; c < num_components; ++c) {
pred_vals[c] += parallelogram_pred_vals[c];
}
++num_parallelograms;
}
// Proceed to the next corner attached to the vertex.
corner_id = table->SwingRight(corner_id);
if (corner_id == start_corner_id) {
corner_id = kInvalidCornerIndex;
}
}
const int dst_offset = p * num_components;
if (num_parallelograms == 0) {
// No parallelogram was valid.
// We use the last decoded point as a reference.
const int src_offset = (p - 1) * num_components;
this->transform().ComputeOriginalValue(
out_data + src_offset, in_corr + dst_offset, out_data + dst_offset);
} else {
// Compute the correction from the predicted value.
for (int c = 0; c < num_components; ++c) {
pred_vals[c] /= num_parallelograms;
}
this->transform().ComputeOriginalValue(
pred_vals.get(), in_corr + dst_offset, out_data + dst_offset);
}
}
return true;
}
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_MULTI_PARALLELOGRAM_DECODER_H_
#endif

View File

@ -0,0 +1,133 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_MULTI_PARALLELOGRAM_ENCODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_MULTI_PARALLELOGRAM_ENCODER_H_
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h"
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h"
namespace draco {
// Multi parallelogram prediction predicts attribute values using information
// from all opposite faces to the predicted vertex, compared to the standard
// prediction scheme, where only one opposite face is used (see
// prediction_scheme_parallelogram.h). This approach is generally slower than
// the standard parallelogram prediction, but it usually results in better
// prediction (5 - 20% based on the quantization level. Better gains can be
// achieved when more aggressive quantization is used).
template <typename DataTypeT, class TransformT, class MeshDataT>
class MeshPredictionSchemeMultiParallelogramEncoder
: public MeshPredictionSchemeEncoder<DataTypeT, TransformT, MeshDataT> {
public:
using CorrType =
typename PredictionSchemeEncoder<DataTypeT, TransformT>::CorrType;
using CornerTable = typename MeshDataT::CornerTable;
explicit MeshPredictionSchemeMultiParallelogramEncoder(
const PointAttribute *attribute)
: MeshPredictionSchemeEncoder<DataTypeT, TransformT, MeshDataT>(
attribute) {}
MeshPredictionSchemeMultiParallelogramEncoder(const PointAttribute *attribute,
const TransformT &transform,
const MeshDataT &mesh_data)
: MeshPredictionSchemeEncoder<DataTypeT, TransformT, MeshDataT>(
attribute, transform, mesh_data) {}
bool ComputeCorrectionValues(
const DataTypeT *in_data, CorrType *out_corr, int size,
int num_components, const PointIndex *entry_to_point_id_map) override;
PredictionSchemeMethod GetPredictionMethod() const override {
return MESH_PREDICTION_MULTI_PARALLELOGRAM;
}
bool IsInitialized() const override {
return this->mesh_data().IsInitialized();
}
};
template <typename DataTypeT, class TransformT, class MeshDataT>
bool MeshPredictionSchemeMultiParallelogramEncoder<DataTypeT, TransformT,
MeshDataT>::
ComputeCorrectionValues(const DataTypeT *in_data, CorrType *out_corr,
int size, int num_components,
const PointIndex * /* entry_to_point_id_map */) {
this->transform().Init(in_data, size, num_components);
const CornerTable *const table = this->mesh_data().corner_table();
const std::vector<int32_t> *const vertex_to_data_map =
this->mesh_data().vertex_to_data_map();
// For storage of prediction values (already initialized to zero).
std::unique_ptr<DataTypeT[]> pred_vals(new DataTypeT[num_components]());
std::unique_ptr<DataTypeT[]> parallelogram_pred_vals(
new DataTypeT[num_components]());
// We start processing from the end because this prediction uses data from
// previous entries that could be overwritten when an entry is processed.
for (int p =
static_cast<int>(this->mesh_data().data_to_corner_map()->size() - 1);
p > 0; --p) {
const CornerIndex start_corner_id =
this->mesh_data().data_to_corner_map()->at(p);
// Go over all corners attached to the vertex and compute the predicted
// value from the parallelograms defined by their opposite faces.
CornerIndex corner_id(start_corner_id);
int num_parallelograms = 0;
for (int i = 0; i < num_components; ++i) {
pred_vals[i] = static_cast<DataTypeT>(0);
}
while (corner_id != kInvalidCornerIndex) {
if (ComputeParallelogramPrediction(
p, corner_id, table, *vertex_to_data_map, in_data, num_components,
parallelogram_pred_vals.get())) {
for (int c = 0; c < num_components; ++c) {
pred_vals[c] += parallelogram_pred_vals[c];
}
++num_parallelograms;
}
// Proceed to the next corner attached to the vertex.
corner_id = table->SwingRight(corner_id);
if (corner_id == start_corner_id) {
corner_id = kInvalidCornerIndex;
}
}
const int dst_offset = p * num_components;
if (num_parallelograms == 0) {
// No parallelogram was valid.
// We use the last encoded point as a reference.
const int src_offset = (p - 1) * num_components;
this->transform().ComputeCorrection(
in_data + dst_offset, in_data + src_offset, out_corr + dst_offset);
} else {
// Compute the correction from the predicted value.
for (int c = 0; c < num_components; ++c) {
pred_vals[c] /= num_parallelograms;
}
this->transform().ComputeCorrection(in_data + dst_offset, pred_vals.get(),
out_corr + dst_offset);
}
}
// First element is always fixed because it cannot be predicted.
for (int i = 0; i < num_components; ++i) {
pred_vals[i] = static_cast<DataTypeT>(0);
}
this->transform().ComputeCorrection(in_data, pred_vals.get(), out_corr);
return true;
}
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_MULTI_PARALLELOGRAM_ENCODER_H_

View File

@ -0,0 +1,98 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_PARALLELOGRAM_DECODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_PARALLELOGRAM_DECODER_H_
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h"
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h"
namespace draco {
// Decoder for attribute values encoded with the standard parallelogram
// prediction. See the description of the corresponding encoder for more
// details.
template <typename DataTypeT, class TransformT, class MeshDataT>
class MeshPredictionSchemeParallelogramDecoder
: public MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT> {
public:
using CorrType =
typename PredictionSchemeDecoder<DataTypeT, TransformT>::CorrType;
using CornerTable = typename MeshDataT::CornerTable;
explicit MeshPredictionSchemeParallelogramDecoder(
const PointAttribute *attribute)
: MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT>(
attribute) {}
MeshPredictionSchemeParallelogramDecoder(const PointAttribute *attribute,
const TransformT &transform,
const MeshDataT &mesh_data)
: MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT>(
attribute, transform, mesh_data) {}
bool ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data,
int size, int num_components,
const PointIndex *entry_to_point_id_map) override;
PredictionSchemeMethod GetPredictionMethod() const override {
return MESH_PREDICTION_PARALLELOGRAM;
}
bool IsInitialized() const override {
return this->mesh_data().IsInitialized();
}
};
template <typename DataTypeT, class TransformT, class MeshDataT>
bool MeshPredictionSchemeParallelogramDecoder<DataTypeT, TransformT,
MeshDataT>::
ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data,
int /* size */, int num_components,
const PointIndex * /* entry_to_point_id_map */) {
this->transform().Init(num_components);
const CornerTable *const table = this->mesh_data().corner_table();
const std::vector<int32_t> *const vertex_to_data_map =
this->mesh_data().vertex_to_data_map();
// For storage of prediction values (already initialized to zero).
std::unique_ptr<DataTypeT[]> pred_vals(new DataTypeT[num_components]());
// Restore the first value.
this->transform().ComputeOriginalValue(pred_vals.get(), in_corr, out_data);
const int corner_map_size =
static_cast<int>(this->mesh_data().data_to_corner_map()->size());
for (int p = 1; p < corner_map_size; ++p) {
const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p);
const int dst_offset = p * num_components;
if (!ComputeParallelogramPrediction(p, corner_id, table,
*vertex_to_data_map, out_data,
num_components, pred_vals.get())) {
// Parallelogram could not be computed, Possible because some of the
// vertices are not valid (not encoded yet).
// We use the last encoded point as a reference (delta coding).
const int src_offset = (p - 1) * num_components;
this->transform().ComputeOriginalValue(
out_data + src_offset, in_corr + dst_offset, out_data + dst_offset);
} else {
// Apply the parallelogram prediction.
this->transform().ComputeOriginalValue(
pred_vals.get(), in_corr + dst_offset, out_data + dst_offset);
}
}
return true;
}
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_PARALLELOGRAM_DECODER_H_

View File

@ -0,0 +1,111 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_PARALLELOGRAM_ENCODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_PARALLELOGRAM_ENCODER_H_
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h"
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_shared.h"
namespace draco {
// Parallelogram prediction predicts an attribute value V from three vertices
// on the opposite face to the predicted vertex. The values on the three
// vertices are used to construct a parallelogram V' = O - A - B, where O is the
// value on the opposite vertex, and A, B are values on the shared vertices:
// V
// / \
// / \
// / \
// A-------B
// \ /
// \ /
// \ /
// O
//
template <typename DataTypeT, class TransformT, class MeshDataT>
class MeshPredictionSchemeParallelogramEncoder
: public MeshPredictionSchemeEncoder<DataTypeT, TransformT, MeshDataT> {
public:
using CorrType =
typename PredictionSchemeEncoder<DataTypeT, TransformT>::CorrType;
using CornerTable = typename MeshDataT::CornerTable;
explicit MeshPredictionSchemeParallelogramEncoder(
const PointAttribute *attribute)
: MeshPredictionSchemeEncoder<DataTypeT, TransformT, MeshDataT>(
attribute) {}
MeshPredictionSchemeParallelogramEncoder(const PointAttribute *attribute,
const TransformT &transform,
const MeshDataT &mesh_data)
: MeshPredictionSchemeEncoder<DataTypeT, TransformT, MeshDataT>(
attribute, transform, mesh_data) {}
bool ComputeCorrectionValues(
const DataTypeT *in_data, CorrType *out_corr, int size,
int num_components, const PointIndex *entry_to_point_id_map) override;
PredictionSchemeMethod GetPredictionMethod() const override {
return MESH_PREDICTION_PARALLELOGRAM;
}
bool IsInitialized() const override {
return this->mesh_data().IsInitialized();
}
};
template <typename DataTypeT, class TransformT, class MeshDataT>
bool MeshPredictionSchemeParallelogramEncoder<DataTypeT, TransformT,
MeshDataT>::
ComputeCorrectionValues(const DataTypeT *in_data, CorrType *out_corr,
int size, int num_components,
const PointIndex * /* entry_to_point_id_map */) {
this->transform().Init(in_data, size, num_components);
// For storage of prediction values (already initialized to zero).
std::unique_ptr<DataTypeT[]> pred_vals(new DataTypeT[num_components]());
// We start processing from the end because this prediction uses data from
// previous entries that could be overwritten when an entry is processed.
const CornerTable *const table = this->mesh_data().corner_table();
const std::vector<int32_t> *const vertex_to_data_map =
this->mesh_data().vertex_to_data_map();
for (int p =
static_cast<int>(this->mesh_data().data_to_corner_map()->size() - 1);
p > 0; --p) {
const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p);
const int dst_offset = p * num_components;
if (!ComputeParallelogramPrediction(p, corner_id, table,
*vertex_to_data_map, in_data,
num_components, pred_vals.get())) {
// Parallelogram could not be computed, Possible because some of the
// vertices are not valid (not encoded yet).
// We use the last encoded point as a reference (delta coding).
const int src_offset = (p - 1) * num_components;
this->transform().ComputeCorrection(
in_data + dst_offset, in_data + src_offset, out_corr + dst_offset);
} else {
// Apply the parallelogram prediction.
this->transform().ComputeCorrection(in_data + dst_offset, pred_vals.get(),
out_corr + dst_offset);
}
}
// First element is always fixed because it cannot be predicted.
for (int i = 0; i < num_components; ++i) {
pred_vals[i] = static_cast<DataTypeT>(0);
}
this->transform().ComputeCorrection(in_data, pred_vals.get(), out_corr);
return true;
}
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_PARALLELOGRAM_ENCODER_H_

View File

@ -0,0 +1,72 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Shared functionality for different parallelogram prediction schemes.
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_PARALLELOGRAM_SHARED_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_PARALLELOGRAM_SHARED_H_
#include "draco/mesh/corner_table.h"
#include "draco/mesh/mesh.h"
namespace draco {
// TODO(draco-eng) consolidate Vertex/next/previous queries to one call
// (performance).
template <class CornerTableT>
inline void GetParallelogramEntries(
const CornerIndex ci, const CornerTableT *table,
const std::vector<int32_t> &vertex_to_data_map, int *opp_entry,
int *next_entry, int *prev_entry) {
// One vertex of the input |table| correspond to exactly one attribute value
// entry. The |table| can be either CornerTable for per-vertex attributes,
// or MeshAttributeCornerTable for attributes with interior seams.
*opp_entry = vertex_to_data_map[table->Vertex(ci).value()];
*next_entry = vertex_to_data_map[table->Vertex(table->Next(ci)).value()];
*prev_entry = vertex_to_data_map[table->Vertex(table->Previous(ci)).value()];
}
// Computes parallelogram prediction for a given corner and data entry id.
// The prediction is stored in |out_prediction|.
// Function returns false when the prediction couldn't be computed, e.g. because
// not all entry points were available.
template <class CornerTableT, typename DataTypeT>
inline bool ComputeParallelogramPrediction(
int data_entry_id, const CornerIndex ci, const CornerTableT *table,
const std::vector<int32_t> &vertex_to_data_map, const DataTypeT *in_data,
int num_components, DataTypeT *out_prediction) {
const CornerIndex oci = table->Opposite(ci);
if (oci == kInvalidCornerIndex)
return false;
int vert_opp, vert_next, vert_prev;
GetParallelogramEntries<CornerTableT>(oci, table, vertex_to_data_map,
&vert_opp, &vert_next, &vert_prev);
if (vert_opp < data_entry_id && vert_next < data_entry_id &&
vert_prev < data_entry_id) {
// Apply the parallelogram prediction.
const int v_opp_off = vert_opp * num_components;
const int v_next_off = vert_next * num_components;
const int v_prev_off = vert_prev * num_components;
for (int c = 0; c < num_components; ++c) {
out_prediction[c] = (in_data[v_next_off + c] + in_data[v_prev_off + c]) -
in_data[v_opp_off + c];
}
return true;
}
return false; // Not all data is available for prediction
}
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_PARALLELOGRAM_SHARED_H_

View File

@ -0,0 +1,335 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_DECODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_DECODER_H_
#include <math.h>
#include "draco/draco_features.h"
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h"
#include "draco/compression/bit_coders/rans_bit_decoder.h"
#include "draco/core/varint_decoding.h"
#include "draco/core/vector_d.h"
#include "draco/mesh/corner_table.h"
namespace draco {
// Decoder for predictions of UV coordinates encoded by our specialized texture
// coordinate predictor. See the corresponding encoder for more details. Note
// that this predictor is not portable and should not be used anymore. See
// MeshPredictionSchemeTexCoordsPortableEncoder/Decoder for a portable version
// of this prediction scheme.
template <typename DataTypeT, class TransformT, class MeshDataT>
class MeshPredictionSchemeTexCoordsDecoder
: public MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT> {
public:
using CorrType = typename MeshPredictionSchemeDecoder<DataTypeT, TransformT,
MeshDataT>::CorrType;
MeshPredictionSchemeTexCoordsDecoder(const PointAttribute *attribute,
const TransformT &transform,
const MeshDataT &mesh_data, int version)
: MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT>(
attribute, transform, mesh_data),
pos_attribute_(nullptr),
entry_to_point_id_map_(nullptr),
num_components_(0),
version_(version) {}
bool ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data,
int size, int num_components,
const PointIndex *entry_to_point_id_map) override;
bool DecodePredictionData(DecoderBuffer *buffer) override;
PredictionSchemeMethod GetPredictionMethod() const override {
return MESH_PREDICTION_TEX_COORDS_DEPRECATED;
}
bool IsInitialized() const override {
if (pos_attribute_ == nullptr)
return false;
if (!this->mesh_data().IsInitialized())
return false;
return true;
}
int GetNumParentAttributes() const override { return 1; }
GeometryAttribute::Type GetParentAttributeType(int i) const override {
DRACO_DCHECK_EQ(i, 0);
(void)i;
return GeometryAttribute::POSITION;
}
bool SetParentAttribute(const PointAttribute *att) override {
if (att == nullptr)
return false;
if (att->attribute_type() != GeometryAttribute::POSITION)
return false; // Invalid attribute type.
if (att->num_components() != 3)
return false; // Currently works only for 3 component positions.
pos_attribute_ = att;
return true;
}
protected:
Vector3f GetPositionForEntryId(int entry_id) const {
const PointIndex point_id = entry_to_point_id_map_[entry_id];
Vector3f pos;
pos_attribute_->ConvertValue(pos_attribute_->mapped_index(point_id),
&pos[0]);
return pos;
}
Vector2f GetTexCoordForEntryId(int entry_id, const DataTypeT *data) const {
const int data_offset = entry_id * num_components_;
return Vector2f(static_cast<float>(data[data_offset]),
static_cast<float>(data[data_offset + 1]));
}
void ComputePredictedValue(CornerIndex corner_id, const DataTypeT *data,
int data_id);
private:
const PointAttribute *pos_attribute_;
const PointIndex *entry_to_point_id_map_;
std::unique_ptr<DataTypeT[]> predicted_value_;
int num_components_;
// Encoded / decoded array of UV flips.
std::vector<bool> orientations_;
int version_;
};
template <typename DataTypeT, class TransformT, class MeshDataT>
bool MeshPredictionSchemeTexCoordsDecoder<DataTypeT, TransformT, MeshDataT>::
ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data,
int /* size */, int num_components,
const PointIndex *entry_to_point_id_map) {
num_components_ = num_components;
entry_to_point_id_map_ = entry_to_point_id_map;
predicted_value_ =
std::unique_ptr<DataTypeT[]>(new DataTypeT[num_components]);
this->transform().Init(num_components);
const int corner_map_size =
static_cast<int>(this->mesh_data().data_to_corner_map()->size());
for (int p = 0; p < corner_map_size; ++p) {
const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p);
ComputePredictedValue(corner_id, out_data, p);
const int dst_offset = p * num_components;
this->transform().ComputeOriginalValue(
predicted_value_.get(), in_corr + dst_offset, out_data + dst_offset);
}
return true;
}
template <typename DataTypeT, class TransformT, class MeshDataT>
bool MeshPredictionSchemeTexCoordsDecoder<DataTypeT, TransformT, MeshDataT>::
DecodePredictionData(DecoderBuffer *buffer) {
// Decode the delta coded orientations.
uint32_t num_orientations = 0;
if (buffer->bitstream_version() < DRACO_BITSTREAM_VERSION(2, 2)) {
if (!buffer->Decode(&num_orientations))
return false;
} else {
if (!DecodeVarint(&num_orientations, buffer))
return false;
}
if (num_orientations == 0)
return false;
orientations_.resize(num_orientations);
bool last_orientation = true;
RAnsBitDecoder decoder;
if (!decoder.StartDecoding(buffer))
return false;
for (uint32_t i = 0; i < num_orientations; ++i) {
if (!decoder.DecodeNextBit())
last_orientation = !last_orientation;
orientations_[i] = last_orientation;
}
decoder.EndDecoding();
return MeshPredictionSchemeDecoder<DataTypeT, TransformT,
MeshDataT>::DecodePredictionData(buffer);
}
template <typename DataTypeT, class TransformT, class MeshDataT>
void MeshPredictionSchemeTexCoordsDecoder<DataTypeT, TransformT, MeshDataT>::
ComputePredictedValue(CornerIndex corner_id, const DataTypeT *data,
int data_id) {
// Compute the predicted UV coordinate from the positions on all corners
// of the processed triangle. For the best prediction, the UV coordinates
// on the next/previous corners need to be already encoded/decoded.
const CornerIndex next_corner_id =
this->mesh_data().corner_table()->Next(corner_id);
const CornerIndex prev_corner_id =
this->mesh_data().corner_table()->Previous(corner_id);
// Get the encoded data ids from the next and previous corners.
// The data id is the encoding order of the UV coordinates.
int next_data_id, prev_data_id;
int next_vert_id, prev_vert_id;
next_vert_id =
this->mesh_data().corner_table()->Vertex(next_corner_id).value();
prev_vert_id =
this->mesh_data().corner_table()->Vertex(prev_corner_id).value();
next_data_id = this->mesh_data().vertex_to_data_map()->at(next_vert_id);
prev_data_id = this->mesh_data().vertex_to_data_map()->at(prev_vert_id);
if (prev_data_id < data_id && next_data_id < data_id) {
// Both other corners have available UV coordinates for prediction.
const Vector2f n_uv = GetTexCoordForEntryId(next_data_id, data);
const Vector2f p_uv = GetTexCoordForEntryId(prev_data_id, data);
if (p_uv == n_uv) {
// We cannot do a reliable prediction on degenerated UV triangles.
predicted_value_[0] = static_cast<int>(p_uv[0]);
predicted_value_[1] = static_cast<int>(p_uv[1]);
return;
}
// Get positions at all corners.
const Vector3f tip_pos = GetPositionForEntryId(data_id);
const Vector3f next_pos = GetPositionForEntryId(next_data_id);
const Vector3f prev_pos = GetPositionForEntryId(prev_data_id);
// Use the positions of the above triangle to predict the texture coordinate
// on the tip corner C.
// Convert the triangle into a new coordinate system defined by orthogonal
// bases vectors S, T, where S is vector prev_pos - next_pos and T is an
// perpendicular vector to S in the same plane as vector the
// tip_pos - next_pos.
// The transformed triangle in the new coordinate system is then going to
// be represented as:
//
// 1 ^
// |
// |
// | C
// | / \
// | / \
// |/ \
// N--------------P
// 0 1
//
// Where next_pos point (N) is at position (0, 0), prev_pos point (P) is
// at (1, 0). Our goal is to compute the position of the tip_pos point (C)
// in this new coordinate space (s, t).
//
const Vector3f pn = prev_pos - next_pos;
const Vector3f cn = tip_pos - next_pos;
const float pn_norm2_squared = pn.SquaredNorm();
// Coordinate s of the tip corner C is simply the dot product of the
// normalized vectors |pn| and |cn| (normalized by the length of |pn|).
// Since both of these vectors are normalized, we don't need to perform the
// normalization explicitly and instead we can just use the squared norm
// of |pn| as a denominator of the resulting dot product of non normalized
// vectors.
float s, t;
// |pn_norm2_squared| can be exactly 0 when the next_pos and prev_pos are
// the same positions (e.g. because they were quantized to the same
// location).
if (version_ < DRACO_BITSTREAM_VERSION(1, 2) || pn_norm2_squared > 0) {
s = pn.Dot(cn) / pn_norm2_squared;
// To get the coordinate t, we can use formula:
// t = |C-N - (P-N) * s| / |P-N|
// Do not use std::sqrt to avoid changes in the bitstream.
t = sqrt((cn - pn * s).SquaredNorm() / pn_norm2_squared);
} else {
s = 0;
t = 0;
}
// Now we need to transform the point (s, t) to the texture coordinate space
// UV. We know the UV coordinates on points N and P (N_UV and P_UV). Lets
// denote P_UV - N_UV = PN_UV. PN_UV is then 2 dimensional vector that can
// be used to define transformation from the normalized coordinate system
// to the texture coordinate system using a 3x3 affine matrix M:
//
// M = | PN_UV[0] -PN_UV[1] N_UV[0] |
// | PN_UV[1] PN_UV[0] N_UV[1] |
// | 0 0 1 |
//
// The predicted point C_UV in the texture space is then equal to
// C_UV = M * (s, t, 1). Because the triangle in UV space may be flipped
// around the PN_UV axis, we also need to consider point C_UV' = M * (s, -t)
// as the prediction.
const Vector2f pn_uv = p_uv - n_uv;
const float pnus = pn_uv[0] * s + n_uv[0];
const float pnut = pn_uv[0] * t;
const float pnvs = pn_uv[1] * s + n_uv[1];
const float pnvt = pn_uv[1] * t;
Vector2f predicted_uv;
// When decoding the data, we already know which orientation to use.
const bool orientation = orientations_.back();
orientations_.pop_back();
if (orientation)
predicted_uv = Vector2f(pnus - pnvt, pnvs + pnut);
else
predicted_uv = Vector2f(pnus + pnvt, pnvs - pnut);
if (std::is_integral<DataTypeT>::value) {
// Round the predicted value for integer types.
if (std::isnan(predicted_uv[0])) {
predicted_value_[0] = INT_MIN;
} else {
predicted_value_[0] = static_cast<int>(floor(predicted_uv[0] + 0.5));
}
if (std::isnan(predicted_uv[1])) {
predicted_value_[1] = INT_MIN;
} else {
predicted_value_[1] = static_cast<int>(floor(predicted_uv[1] + 0.5));
}
} else {
predicted_value_[0] = static_cast<int>(predicted_uv[0]);
predicted_value_[1] = static_cast<int>(predicted_uv[1]);
}
return;
}
// Else we don't have available textures on both corners. For such case we
// can't use positions for predicting the uv value and we resort to delta
// coding.
int data_offset = 0;
if (prev_data_id < data_id) {
// Use the value on the previous corner as the prediction.
data_offset = prev_data_id * num_components_;
}
if (next_data_id < data_id) {
// Use the value on the next corner as the prediction.
data_offset = next_data_id * num_components_;
} else {
// None of the other corners have a valid value. Use the last encoded value
// as the prediction if possible.
if (data_id > 0) {
data_offset = (data_id - 1) * num_components_;
} else {
// We are encoding the first value. Predict 0.
for (int i = 0; i < num_components_; ++i) {
predicted_value_[i] = 0;
}
return;
}
}
for (int i = 0; i < num_components_; ++i) {
predicted_value_[i] = data[data_offset + i];
}
}
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_DECODER_H_
#endif

View File

@ -0,0 +1,313 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_ENCODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_ENCODER_H_
#include <math.h>
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h"
#include "draco/compression/bit_coders/rans_bit_encoder.h"
#include "draco/core/varint_encoding.h"
#include "draco/core/vector_d.h"
#include "draco/mesh/corner_table.h"
namespace draco {
// Prediction scheme designed for predicting texture coordinates from known
// spatial position of vertices. For good parametrization, the ratios between
// triangle edge lengths should be about the same in both the spatial and UV
// coordinate spaces, which makes the positions a good predictor for the UV
// coordinates.
template <typename DataTypeT, class TransformT, class MeshDataT>
class MeshPredictionSchemeTexCoordsEncoder
: public MeshPredictionSchemeEncoder<DataTypeT, TransformT, MeshDataT> {
public:
using CorrType = typename MeshPredictionSchemeEncoder<DataTypeT, TransformT,
MeshDataT>::CorrType;
MeshPredictionSchemeTexCoordsEncoder(const PointAttribute *attribute,
const TransformT &transform,
const MeshDataT &mesh_data)
: MeshPredictionSchemeEncoder<DataTypeT, TransformT, MeshDataT>(
attribute, transform, mesh_data),
pos_attribute_(nullptr),
entry_to_point_id_map_(nullptr),
num_components_(0) {}
bool ComputeCorrectionValues(
const DataTypeT *in_data, CorrType *out_corr, int size,
int num_components, const PointIndex *entry_to_point_id_map) override;
bool EncodePredictionData(EncoderBuffer *buffer) override;
PredictionSchemeMethod GetPredictionMethod() const override {
return MESH_PREDICTION_TEX_COORDS_DEPRECATED;
}
bool IsInitialized() const override {
if (pos_attribute_ == nullptr)
return false;
if (!this->mesh_data().IsInitialized())
return false;
return true;
}
int GetNumParentAttributes() const override { return 1; }
GeometryAttribute::Type GetParentAttributeType(int i) const override {
DRACO_DCHECK_EQ(i, 0);
(void)i;
return GeometryAttribute::POSITION;
}
bool SetParentAttribute(const PointAttribute *att) override {
if (att->attribute_type() != GeometryAttribute::POSITION)
return false; // Invalid attribute type.
if (att->num_components() != 3)
return false; // Currently works only for 3 component positions.
pos_attribute_ = att;
return true;
}
protected:
Vector3f GetPositionForEntryId(int entry_id) const {
const PointIndex point_id = entry_to_point_id_map_[entry_id];
Vector3f pos;
pos_attribute_->ConvertValue(pos_attribute_->mapped_index(point_id),
&pos[0]);
return pos;
}
Vector2f GetTexCoordForEntryId(int entry_id, const DataTypeT *data) const {
const int data_offset = entry_id * num_components_;
return Vector2f(static_cast<float>(data[data_offset]),
static_cast<float>(data[data_offset + 1]));
}
void ComputePredictedValue(CornerIndex corner_id, const DataTypeT *data,
int data_id);
private:
const PointAttribute *pos_attribute_;
const PointIndex *entry_to_point_id_map_;
std::unique_ptr<DataTypeT[]> predicted_value_;
int num_components_;
// Encoded / decoded array of UV flips.
std::vector<bool> orientations_;
};
template <typename DataTypeT, class TransformT, class MeshDataT>
bool MeshPredictionSchemeTexCoordsEncoder<DataTypeT, TransformT, MeshDataT>::
ComputeCorrectionValues(const DataTypeT *in_data, CorrType *out_corr,
int size, int num_components,
const PointIndex *entry_to_point_id_map) {
num_components_ = num_components;
entry_to_point_id_map_ = entry_to_point_id_map;
predicted_value_ =
std::unique_ptr<DataTypeT[]>(new DataTypeT[num_components]);
this->transform().Init(in_data, size, num_components);
// We start processing from the end because this prediction uses data from
// previous entries that could be overwritten when an entry is processed.
for (int p =
static_cast<int>(this->mesh_data().data_to_corner_map()->size()) - 1;
p >= 0; --p) {
const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p);
ComputePredictedValue(corner_id, in_data, p);
const int dst_offset = p * num_components;
this->transform().ComputeCorrection(
in_data + dst_offset, predicted_value_.get(), out_corr + dst_offset);
}
return true;
}
template <typename DataTypeT, class TransformT, class MeshDataT>
bool MeshPredictionSchemeTexCoordsEncoder<DataTypeT, TransformT, MeshDataT>::
EncodePredictionData(EncoderBuffer *buffer) {
// Encode the delta-coded orientations using arithmetic coding.
const uint32_t num_orientations = static_cast<uint32_t>(orientations_.size());
EncodeVarint(num_orientations, buffer);
bool last_orientation = true;
RAnsBitEncoder encoder;
encoder.StartEncoding();
for (bool orientation : orientations_) {
encoder.EncodeBit(orientation == last_orientation);
last_orientation = orientation;
}
encoder.EndEncoding(buffer);
return MeshPredictionSchemeEncoder<DataTypeT, TransformT,
MeshDataT>::EncodePredictionData(buffer);
}
template <typename DataTypeT, class TransformT, class MeshDataT>
void MeshPredictionSchemeTexCoordsEncoder<DataTypeT, TransformT, MeshDataT>::
ComputePredictedValue(CornerIndex corner_id, const DataTypeT *data,
int data_id) {
// Compute the predicted UV coordinate from the positions on all corners
// of the processed triangle. For the best prediction, the UV coordinates
// on the next/previous corners need to be already encoded/decoded.
const CornerIndex next_corner_id =
this->mesh_data().corner_table()->Next(corner_id);
const CornerIndex prev_corner_id =
this->mesh_data().corner_table()->Previous(corner_id);
// Get the encoded data ids from the next and previous corners.
// The data id is the encoding order of the UV coordinates.
int next_data_id, prev_data_id;
int next_vert_id, prev_vert_id;
next_vert_id =
this->mesh_data().corner_table()->Vertex(next_corner_id).value();
prev_vert_id =
this->mesh_data().corner_table()->Vertex(prev_corner_id).value();
next_data_id = this->mesh_data().vertex_to_data_map()->at(next_vert_id);
prev_data_id = this->mesh_data().vertex_to_data_map()->at(prev_vert_id);
if (prev_data_id < data_id && next_data_id < data_id) {
// Both other corners have available UV coordinates for prediction.
const Vector2f n_uv = GetTexCoordForEntryId(next_data_id, data);
const Vector2f p_uv = GetTexCoordForEntryId(prev_data_id, data);
if (p_uv == n_uv) {
// We cannot do a reliable prediction on degenerated UV triangles.
predicted_value_[0] = static_cast<int>(p_uv[0]);
predicted_value_[1] = static_cast<int>(p_uv[1]);
return;
}
// Get positions at all corners.
const Vector3f tip_pos = GetPositionForEntryId(data_id);
const Vector3f next_pos = GetPositionForEntryId(next_data_id);
const Vector3f prev_pos = GetPositionForEntryId(prev_data_id);
// Use the positions of the above triangle to predict the texture coordinate
// on the tip corner C.
// Convert the triangle into a new coordinate system defined by orthogonal
// bases vectors S, T, where S is vector prev_pos - next_pos and T is an
// perpendicular vector to S in the same plane as vector the
// tip_pos - next_pos.
// The transformed triangle in the new coordinate system is then going to
// be represented as:
//
// 1 ^
// |
// |
// | C
// | / \
// | / \
// |/ \
// N--------------P
// 0 1
//
// Where next_pos point (N) is at position (0, 0), prev_pos point (P) is
// at (1, 0). Our goal is to compute the position of the tip_pos point (C)
// in this new coordinate space (s, t).
//
const Vector3f pn = prev_pos - next_pos;
const Vector3f cn = tip_pos - next_pos;
const float pn_norm2_squared = pn.SquaredNorm();
// Coordinate s of the tip corner C is simply the dot product of the
// normalized vectors |pn| and |cn| (normalized by the length of |pn|).
// Since both of these vectors are normalized, we don't need to perform the
// normalization explicitly and instead we can just use the squared norm
// of |pn| as a denominator of the resulting dot product of non normalized
// vectors.
float s, t;
// |pn_norm2_squared| can be exactly 0 when the next_pos and prev_pos are
// the same positions (e.g. because they were quantized to the same
// location).
if (pn_norm2_squared > 0) {
s = pn.Dot(cn) / pn_norm2_squared;
// To get the coordinate t, we can use formula:
// t = |C-N - (P-N) * s| / |P-N|
// Do not use std::sqrt to avoid changes in the bitstream.
t = sqrt((cn - pn * s).SquaredNorm() / pn_norm2_squared);
} else {
s = 0;
t = 0;
}
// Now we need to transform the point (s, t) to the texture coordinate space
// UV. We know the UV coordinates on points N and P (N_UV and P_UV). Lets
// denote P_UV - N_UV = PN_UV. PN_UV is then 2 dimensional vector that can
// be used to define transformation from the normalized coordinate system
// to the texture coordinate system using a 3x3 affine matrix M:
//
// M = | PN_UV[0] -PN_UV[1] N_UV[0] |
// | PN_UV[1] PN_UV[0] N_UV[1] |
// | 0 0 1 |
//
// The predicted point C_UV in the texture space is then equal to
// C_UV = M * (s, t, 1). Because the triangle in UV space may be flipped
// around the PN_UV axis, we also need to consider point C_UV' = M * (s, -t)
// as the prediction.
const Vector2f pn_uv = p_uv - n_uv;
const float pnus = pn_uv[0] * s + n_uv[0];
const float pnut = pn_uv[0] * t;
const float pnvs = pn_uv[1] * s + n_uv[1];
const float pnvt = pn_uv[1] * t;
Vector2f predicted_uv;
// When encoding compute both possible vectors and determine which one
// results in a better prediction.
const Vector2f predicted_uv_0(pnus - pnvt, pnvs + pnut);
const Vector2f predicted_uv_1(pnus + pnvt, pnvs - pnut);
const Vector2f c_uv = GetTexCoordForEntryId(data_id, data);
if ((c_uv - predicted_uv_0).SquaredNorm() <
(c_uv - predicted_uv_1).SquaredNorm()) {
predicted_uv = predicted_uv_0;
orientations_.push_back(true);
} else {
predicted_uv = predicted_uv_1;
orientations_.push_back(false);
}
if (std::is_integral<DataTypeT>::value) {
// Round the predicted value for integer types.
predicted_value_[0] = static_cast<int>(floor(predicted_uv[0] + 0.5));
predicted_value_[1] = static_cast<int>(floor(predicted_uv[1] + 0.5));
} else {
predicted_value_[0] = static_cast<int>(predicted_uv[0]);
predicted_value_[1] = static_cast<int>(predicted_uv[1]);
}
return;
}
// Else we don't have available textures on both corners. For such case we
// can't use positions for predicting the uv value and we resort to delta
// coding.
int data_offset = 0;
if (prev_data_id < data_id) {
// Use the value on the previous corner as the prediction.
data_offset = prev_data_id * num_components_;
}
if (next_data_id < data_id) {
// Use the value on the next corner as the prediction.
data_offset = next_data_id * num_components_;
} else {
// None of the other corners have a valid value. Use the last encoded value
// as the prediction if possible.
if (data_id > 0) {
data_offset = (data_id - 1) * num_components_;
} else {
// We are encoding the first value. Predict 0.
for (int i = 0; i < num_components_; ++i) {
predicted_value_[i] = 0;
}
return;
}
}
for (int i = 0; i < num_components_; ++i) {
predicted_value_[i] = data[data_offset + i];
}
}
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_H_

View File

@ -0,0 +1,131 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_PORTABLE_DECODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_PORTABLE_DECODER_H_
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_decoder.h"
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_predictor.h"
#include "draco/compression/bit_coders/rans_bit_decoder.h"
namespace draco {
// Decoder for predictions of UV coordinates encoded by our specialized and
// portable texture coordinate predictor. See the corresponding encoder for more
// details.
template <typename DataTypeT, class TransformT, class MeshDataT>
class MeshPredictionSchemeTexCoordsPortableDecoder
: public MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT> {
public:
using CorrType = typename MeshPredictionSchemeDecoder<DataTypeT, TransformT,
MeshDataT>::CorrType;
MeshPredictionSchemeTexCoordsPortableDecoder(const PointAttribute *attribute,
const TransformT &transform,
const MeshDataT &mesh_data)
: MeshPredictionSchemeDecoder<DataTypeT, TransformT, MeshDataT>(
attribute, transform, mesh_data),
predictor_(mesh_data) {}
bool ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data,
int size, int num_components,
const PointIndex *entry_to_point_id_map) override;
bool DecodePredictionData(DecoderBuffer *buffer) override;
PredictionSchemeMethod GetPredictionMethod() const override {
return MESH_PREDICTION_TEX_COORDS_PORTABLE;
}
bool IsInitialized() const override {
if (!predictor_.IsInitialized())
return false;
if (!this->mesh_data().IsInitialized())
return false;
return true;
}
int GetNumParentAttributes() const override { return 1; }
GeometryAttribute::Type GetParentAttributeType(int i) const override {
DRACO_DCHECK_EQ(i, 0);
(void)i;
return GeometryAttribute::POSITION;
}
bool SetParentAttribute(const PointAttribute *att) override {
if (!att || att->attribute_type() != GeometryAttribute::POSITION)
return false; // Invalid attribute type.
if (att->num_components() != 3)
return false; // Currently works only for 3 component positions.
predictor_.SetPositionAttribute(*att);
return true;
}
private:
MeshPredictionSchemeTexCoordsPortablePredictor<DataTypeT, MeshDataT>
predictor_;
};
template <typename DataTypeT, class TransformT, class MeshDataT>
bool MeshPredictionSchemeTexCoordsPortableDecoder<
DataTypeT, TransformT,
MeshDataT>::ComputeOriginalValues(const CorrType *in_corr,
DataTypeT *out_data, int /* size */,
int num_components,
const PointIndex *entry_to_point_id_map) {
predictor_.SetEntryToPointIdMap(entry_to_point_id_map);
this->transform().Init(num_components);
const int corner_map_size =
static_cast<int>(this->mesh_data().data_to_corner_map()->size());
for (int p = 0; p < corner_map_size; ++p) {
const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p);
if (!predictor_.template ComputePredictedValue<false>(corner_id, out_data,
p))
return false;
const int dst_offset = p * num_components;
this->transform().ComputeOriginalValue(predictor_.predicted_value(),
in_corr + dst_offset,
out_data + dst_offset);
}
return true;
}
template <typename DataTypeT, class TransformT, class MeshDataT>
bool MeshPredictionSchemeTexCoordsPortableDecoder<
DataTypeT, TransformT, MeshDataT>::DecodePredictionData(DecoderBuffer
*buffer) {
// Decode the delta coded orientations.
int32_t num_orientations = 0;
if (!buffer->Decode(&num_orientations) || num_orientations < 0)
return false;
predictor_.ResizeOrientations(num_orientations);
bool last_orientation = true;
RAnsBitDecoder decoder;
if (!decoder.StartDecoding(buffer))
return false;
for (int i = 0; i < num_orientations; ++i) {
if (!decoder.DecodeNextBit())
last_orientation = !last_orientation;
predictor_.set_orientation(i, last_orientation);
}
decoder.EndDecoding();
return MeshPredictionSchemeDecoder<DataTypeT, TransformT,
MeshDataT>::DecodePredictionData(buffer);
}
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_PORTABLE_DECODER_H_

View File

@ -0,0 +1,129 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_PORTABLE_ENCODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_PORTABLE_ENCODER_H_
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_encoder.h"
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_predictor.h"
#include "draco/compression/bit_coders/rans_bit_encoder.h"
namespace draco {
// Prediction scheme designed for predicting texture coordinates from known
// spatial position of vertices. For isometric parametrizations, the ratios
// between triangle edge lengths should be about the same in both the spatial
// and UV coordinate spaces, which makes the positions a good predictor for the
// UV coordinates. Note that this may not be the optimal approach for other
// parametrizations such as projective ones.
template <typename DataTypeT, class TransformT, class MeshDataT>
class MeshPredictionSchemeTexCoordsPortableEncoder
: public MeshPredictionSchemeEncoder<DataTypeT, TransformT, MeshDataT> {
public:
using CorrType = typename MeshPredictionSchemeEncoder<DataTypeT, TransformT,
MeshDataT>::CorrType;
MeshPredictionSchemeTexCoordsPortableEncoder(const PointAttribute *attribute,
const TransformT &transform,
const MeshDataT &mesh_data)
: MeshPredictionSchemeEncoder<DataTypeT, TransformT, MeshDataT>(
attribute, transform, mesh_data),
predictor_(mesh_data) {}
bool ComputeCorrectionValues(
const DataTypeT *in_data, CorrType *out_corr, int size,
int num_components, const PointIndex *entry_to_point_id_map) override;
bool EncodePredictionData(EncoderBuffer *buffer) override;
PredictionSchemeMethod GetPredictionMethod() const override {
return MESH_PREDICTION_TEX_COORDS_PORTABLE;
}
bool IsInitialized() const override {
if (!predictor_.IsInitialized())
return false;
if (!this->mesh_data().IsInitialized())
return false;
return true;
}
int GetNumParentAttributes() const override { return 1; }
GeometryAttribute::Type GetParentAttributeType(int i) const override {
DRACO_DCHECK_EQ(i, 0);
(void)i;
return GeometryAttribute::POSITION;
}
bool SetParentAttribute(const PointAttribute *att) override {
if (att->attribute_type() != GeometryAttribute::POSITION)
return false; // Invalid attribute type.
if (att->num_components() != 3)
return false; // Currently works only for 3 component positions.
predictor_.SetPositionAttribute(*att);
return true;
}
private:
MeshPredictionSchemeTexCoordsPortablePredictor<DataTypeT, MeshDataT>
predictor_;
};
template <typename DataTypeT, class TransformT, class MeshDataT>
bool MeshPredictionSchemeTexCoordsPortableEncoder<DataTypeT, TransformT,
MeshDataT>::
ComputeCorrectionValues(const DataTypeT *in_data, CorrType *out_corr,
int size, int num_components,
const PointIndex *entry_to_point_id_map) {
predictor_.SetEntryToPointIdMap(entry_to_point_id_map);
this->transform().Init(in_data, size, num_components);
// We start processing from the end because this prediction uses data from
// previous entries that could be overwritten when an entry is processed.
for (int p =
static_cast<int>(this->mesh_data().data_to_corner_map()->size() - 1);
p >= 0; --p) {
const CornerIndex corner_id = this->mesh_data().data_to_corner_map()->at(p);
predictor_.template ComputePredictedValue<true>(corner_id, in_data, p);
const int dst_offset = p * num_components;
this->transform().ComputeCorrection(in_data + dst_offset,
predictor_.predicted_value(),
out_corr + dst_offset);
}
return true;
}
template <typename DataTypeT, class TransformT, class MeshDataT>
bool MeshPredictionSchemeTexCoordsPortableEncoder<
DataTypeT, TransformT, MeshDataT>::EncodePredictionData(EncoderBuffer
*buffer) {
// Encode the delta-coded orientations using arithmetic coding.
const int32_t num_orientations = predictor_.num_orientations();
buffer->Encode(num_orientations);
bool last_orientation = true;
RAnsBitEncoder encoder;
encoder.StartEncoding();
for (int i = 0; i < num_orientations; ++i) {
const bool orientation = predictor_.orientation(i);
encoder.EncodeBit(orientation == last_orientation);
last_orientation = orientation;
}
encoder.EndEncoding(buffer);
return MeshPredictionSchemeEncoder<DataTypeT, TransformT,
MeshDataT>::EncodePredictionData(buffer);
}
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_PORTABLE_ENCODER_H_

View File

@ -0,0 +1,252 @@
// Copyright 2017 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_PORTABLE_PREDICTOR_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_PORTABLE_PREDICTOR_H_
#include <math.h>
#include "draco/attributes/point_attribute.h"
#include "draco/core/math_utils.h"
#include "draco/core/vector_d.h"
#include "draco/mesh/corner_table.h"
namespace draco {
// Predictor functionality used for portable UV prediction by both encoder and
// decoder.
template <typename DataTypeT, class MeshDataT>
class MeshPredictionSchemeTexCoordsPortablePredictor {
public:
explicit MeshPredictionSchemeTexCoordsPortablePredictor(const MeshDataT &md)
: pos_attribute_(nullptr),
entry_to_point_id_map_(nullptr),
mesh_data_(md) {}
void SetPositionAttribute(const PointAttribute &position_attribute) {
pos_attribute_ = &position_attribute;
}
void SetEntryToPointIdMap(const PointIndex *map) {
entry_to_point_id_map_ = map;
}
bool IsInitialized() const { return pos_attribute_ != nullptr; }
VectorD<int64_t, 3> GetPositionForEntryId(int entry_id) const {
const PointIndex point_id = entry_to_point_id_map_[entry_id];
VectorD<int64_t, 3> pos;
pos_attribute_->ConvertValue(pos_attribute_->mapped_index(point_id),
&pos[0]);
return pos;
}
VectorD<int64_t, 2> GetTexCoordForEntryId(int entry_id,
const DataTypeT *data) const {
const int data_offset = entry_id * kNumComponents;
return VectorD<int64_t, 2>(data[data_offset], data[data_offset + 1]);
}
// Computes predicted UV coordinates on a given corner. The coordinates are
// stored in |predicted_value_| member.
template <bool is_encoder_t>
bool ComputePredictedValue(CornerIndex corner_id, const DataTypeT *data,
int data_id);
const DataTypeT *predicted_value() const { return predicted_value_; }
bool orientation(int i) const { return orientations_[i]; }
void set_orientation(int i, bool v) { orientations_[i] = v; }
size_t num_orientations() const { return orientations_.size(); }
void ResizeOrientations(int num_orientations) {
orientations_.resize(num_orientations);
}
private:
const PointAttribute *pos_attribute_;
const PointIndex *entry_to_point_id_map_;
static constexpr int kNumComponents = 2;
DataTypeT predicted_value_[kNumComponents];
// Encoded / decoded array of UV flips.
// TODO(ostava): We should remove this and replace this with in-place encoding
// and decoding to avoid unnecessary copy.
std::vector<bool> orientations_;
MeshDataT mesh_data_;
};
template <typename DataTypeT, class MeshDataT>
template <bool is_encoder_t>
bool MeshPredictionSchemeTexCoordsPortablePredictor<
DataTypeT, MeshDataT>::ComputePredictedValue(CornerIndex corner_id,
const DataTypeT *data,
int data_id) {
// Compute the predicted UV coordinate from the positions on all corners
// of the processed triangle. For the best prediction, the UV coordinates
// on the next/previous corners need to be already encoded/decoded.
const CornerIndex next_corner_id = mesh_data_.corner_table()->Next(corner_id);
const CornerIndex prev_corner_id =
mesh_data_.corner_table()->Previous(corner_id);
// Get the encoded data ids from the next and previous corners.
// The data id is the encoding order of the UV coordinates.
int next_data_id, prev_data_id;
int next_vert_id, prev_vert_id;
next_vert_id = mesh_data_.corner_table()->Vertex(next_corner_id).value();
prev_vert_id = mesh_data_.corner_table()->Vertex(prev_corner_id).value();
next_data_id = mesh_data_.vertex_to_data_map()->at(next_vert_id);
prev_data_id = mesh_data_.vertex_to_data_map()->at(prev_vert_id);
if (prev_data_id < data_id && next_data_id < data_id) {
// Both other corners have available UV coordinates for prediction.
const VectorD<int64_t, 2> n_uv = GetTexCoordForEntryId(next_data_id, data);
const VectorD<int64_t, 2> p_uv = GetTexCoordForEntryId(prev_data_id, data);
if (p_uv == n_uv) {
// We cannot do a reliable prediction on degenerated UV triangles.
predicted_value_[0] = p_uv[0];
predicted_value_[1] = p_uv[1];
return true;
}
// Get positions at all corners.
const VectorD<int64_t, 3> tip_pos = GetPositionForEntryId(data_id);
const VectorD<int64_t, 3> next_pos = GetPositionForEntryId(next_data_id);
const VectorD<int64_t, 3> prev_pos = GetPositionForEntryId(prev_data_id);
// We use the positions of the above triangle to predict the texture
// coordinate on the tip corner C.
// To convert the triangle into the UV coordinate system we first compute
// position X on the vector |prev_pos - next_pos| that is the projection of
// point C onto vector |prev_pos - next_pos|:
//
// C
// /. \
// / . \
// / . \
// N---X----------P
//
// Where next_pos is point (N), prev_pos is point (P) and tip_pos is the
// position of predicted coordinate (C).
//
const VectorD<int64_t, 3> pn = prev_pos - next_pos;
const uint64_t pn_norm2_squared = pn.SquaredNorm();
if (pn_norm2_squared != 0) {
// Compute the projection of C onto PN by computing dot product of CN with
// PN and normalizing it by length of PN. This gives us a factor |s| where
// |s = PN.Dot(CN) / PN.SquaredNorm2()|. This factor can be used to
// compute X in UV space |X_UV| as |X_UV = N_UV + s * PN_UV|.
const VectorD<int64_t, 3> cn = tip_pos - next_pos;
const int64_t cn_dot_pn = pn.Dot(cn);
const VectorD<int64_t, 2> pn_uv = p_uv - n_uv;
// Because we perform all computations with integers, we don't explicitly
// compute the normalized factor |s|, but rather we perform all operations
// over UV vectors in a non-normalized coordinate system scaled with a
// scaling factor |pn_norm2_squared|:
//
// x_uv = X_UV * PN.Norm2Squared()
//
const VectorD<int64_t, 2> x_uv =
n_uv * pn_norm2_squared + (cn_dot_pn * pn_uv);
// Compute squared length of vector CX in position coordinate system:
const VectorD<int64_t, 3> x_pos =
next_pos + (cn_dot_pn * pn) / pn_norm2_squared;
const uint64_t cx_norm2_squared = (tip_pos - x_pos).SquaredNorm();
// Compute vector CX_UV in the uv space by rotating vector PN_UV by 90
// degrees and scaling it with factor CX.Norm2() / PN.Norm2():
//
// CX_UV = (CX.Norm2() / PN.Norm2()) * Rot(PN_UV)
//
// To preserve precision, we perform all operations in scaled space as
// explained above, so we want the final vector to be:
//
// cx_uv = CX_UV * PN.Norm2Squared()
//
// We can then rewrite the formula as:
//
// cx_uv = CX.Norm2() * PN.Norm2() * Rot(PN_UV)
//
VectorD<int64_t, 2> cx_uv(pn_uv[1], -pn_uv[0]); // Rotated PN_UV.
// Compute CX.Norm2() * PN.Norm2()
const uint64_t norm_squared =
IntSqrt(cx_norm2_squared * pn_norm2_squared);
// Final cx_uv in the scaled coordinate space.
cx_uv = cx_uv * norm_squared;
// Predicted uv coordinate is then computed by either adding or
// subtracting CX_UV to/from X_UV.
VectorD<int64_t, 2> predicted_uv;
if (is_encoder_t) {
// When encoding, compute both possible vectors and determine which one
// results in a better prediction.
// Both vectors need to be transformed back from the scaled space to
// the real UV coordinate space.
const VectorD<int64_t, 2> predicted_uv_0((x_uv + cx_uv) /
pn_norm2_squared);
const VectorD<int64_t, 2> predicted_uv_1((x_uv - cx_uv) /
pn_norm2_squared);
const VectorD<int64_t, 2> c_uv = GetTexCoordForEntryId(data_id, data);
if ((c_uv - predicted_uv_0).SquaredNorm() <
(c_uv - predicted_uv_1).SquaredNorm()) {
predicted_uv = predicted_uv_0;
orientations_.push_back(true);
} else {
predicted_uv = predicted_uv_1;
orientations_.push_back(false);
}
} else {
// When decoding the data, we already know which orientation to use.
if (orientations_.empty())
return false;
const bool orientation = orientations_.back();
orientations_.pop_back();
if (orientation)
predicted_uv = (x_uv + cx_uv) / pn_norm2_squared;
else
predicted_uv = (x_uv - cx_uv) / pn_norm2_squared;
}
predicted_value_[0] = static_cast<int>(predicted_uv[0]);
predicted_value_[1] = static_cast<int>(predicted_uv[1]);
return true;
}
}
// Else we don't have available textures on both corners or the position data
// is invalid. For such cases we can't use positions for predicting the uv
// value and we resort to delta coding.
int data_offset = 0;
if (prev_data_id < data_id) {
// Use the value on the previous corner as the prediction.
data_offset = prev_data_id * kNumComponents;
}
if (next_data_id < data_id) {
// Use the value on the next corner as the prediction.
data_offset = next_data_id * kNumComponents;
} else {
// None of the other corners have a valid value. Use the last encoded value
// as the prediction if possible.
if (data_id > 0) {
data_offset = (data_id - 1) * kNumComponents;
} else {
// We are encoding the first value. Predict 0.
for (int i = 0; i < kNumComponents; ++i) {
predicted_value_[i] = 0;
}
return true;
}
}
for (int i = 0; i < kNumComponents; ++i) {
predicted_value_[i] = data[data_offset + i];
}
return true;
}
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_MESH_PREDICTION_SCHEME_TEX_COORDS_PORTABLE_PREDICTOR_H_

View File

@ -0,0 +1,89 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODER_H_
#include <type_traits>
#include "draco/compression/attributes/prediction_schemes/prediction_scheme_decoder_interface.h"
#include "draco/compression/attributes/prediction_schemes/prediction_scheme_decoding_transform.h"
// Prediction schemes can be used during encoding and decoding of vertex
// attributes to predict attribute values based on the previously
// encoded/decoded data. The differences between the original and predicted
// attribute values are used to compute correction values that can be usually
// encoded with fewer bits compared to the original data.
namespace draco {
// Abstract base class for typed prediction schemes. It provides basic access
// to the encoded attribute and to the supplied prediction transform.
template <typename DataTypeT,
class TransformT =
PredictionSchemeDecodingTransform<DataTypeT, DataTypeT>>
class PredictionSchemeDecoder : public PredictionSchemeTypedDecoderInterface<
DataTypeT, typename TransformT::CorrType> {
public:
typedef DataTypeT DataType;
typedef TransformT Transform;
// Correction type needs to be defined in the prediction transform class.
typedef typename Transform::CorrType CorrType;
explicit PredictionSchemeDecoder(const PointAttribute *attribute)
: PredictionSchemeDecoder(attribute, Transform()) {}
PredictionSchemeDecoder(const PointAttribute *attribute,
const Transform &transform)
: attribute_(attribute), transform_(transform) {}
bool DecodePredictionData(DecoderBuffer *buffer) override {
if (!transform_.DecodeTransformData(buffer))
return false;
return true;
}
const PointAttribute *GetAttribute() const override { return attribute(); }
// Returns the number of parent attributes that are needed for the prediction.
int GetNumParentAttributes() const override { return 0; }
// Returns the type of each of the parent attribute.
GeometryAttribute::Type GetParentAttributeType(int /* i */) const override {
return GeometryAttribute::INVALID;
}
// Sets the required parent attribute.
bool SetParentAttribute(const PointAttribute * /* att */) override {
return false;
}
bool AreCorrectionsPositive() override {
return transform_.AreCorrectionsPositive();
}
PredictionSchemeTransformType GetTransformType() const override {
return transform_.GetType();
}
protected:
inline const PointAttribute *attribute() const { return attribute_; }
inline const Transform &transform() const { return transform_; }
inline Transform &transform() { return transform_; }
private:
const PointAttribute *attribute_;
Transform transform_;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODER_H_

View File

@ -0,0 +1,186 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Functions for creating prediction schemes for decoders using the provided
// prediction method id.
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODER_FACTORY_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODER_FACTORY_H_
#include "draco/draco_features.h"
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_decoder.h"
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_decoder.h"
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_decoder.h"
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_decoder.h"
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_decoder.h"
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_decoder.h"
#include "draco/compression/attributes/prediction_schemes/prediction_scheme_decoder.h"
#include "draco/compression/attributes/prediction_schemes/prediction_scheme_delta_decoder.h"
#include "draco/compression/attributes/prediction_schemes/prediction_scheme_factory.h"
#include "draco/compression/mesh/mesh_decoder.h"
namespace draco {
// Factory class for creating mesh prediction schemes. The factory implements
// operator() that is used to create an appropriate mesh prediction scheme in
// CreateMeshPredictionScheme() function in prediction_scheme_factory.h
template <typename DataTypeT>
struct MeshPredictionSchemeDecoderFactory {
// Operator () specialized for the wrap transform. Wrap transform can be used
// for all mesh prediction schemes. The specialization is done in compile time
// to prevent instantiations of unneeded combinations of prediction schemes +
// prediction transforms.
template <class TransformT, class MeshDataT,
PredictionSchemeTransformType Method>
struct DispatchFunctor {
std::unique_ptr<PredictionSchemeDecoder<DataTypeT, TransformT>> operator()(
PredictionSchemeMethod method, const PointAttribute *attribute,
const TransformT &transform, const MeshDataT &mesh_data,
uint16_t bitstream_version) {
if (method == MESH_PREDICTION_PARALLELOGRAM) {
return std::unique_ptr<PredictionSchemeDecoder<DataTypeT, TransformT>>(
new MeshPredictionSchemeParallelogramDecoder<DataTypeT, TransformT,
MeshDataT>(
attribute, transform, mesh_data));
}
#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
else if (method == MESH_PREDICTION_MULTI_PARALLELOGRAM) {
return std::unique_ptr<PredictionSchemeDecoder<DataTypeT, TransformT>>(
new MeshPredictionSchemeMultiParallelogramDecoder<
DataTypeT, TransformT, MeshDataT>(attribute, transform,
mesh_data));
}
#endif
else if (method == MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM) {
return std::unique_ptr<PredictionSchemeDecoder<DataTypeT, TransformT>>(
new MeshPredictionSchemeConstrainedMultiParallelogramDecoder<
DataTypeT, TransformT, MeshDataT>(attribute, transform,
mesh_data));
}
#ifdef DRACO_BACKWARDS_COMPATIBILITY_SUPPORTED
else if (method == MESH_PREDICTION_TEX_COORDS_DEPRECATED) {
return std::unique_ptr<PredictionSchemeDecoder<DataTypeT, TransformT>>(
new MeshPredictionSchemeTexCoordsDecoder<DataTypeT, TransformT,
MeshDataT>(
attribute, transform, mesh_data, bitstream_version));
}
#endif
else if (method == MESH_PREDICTION_TEX_COORDS_PORTABLE) {
return std::unique_ptr<PredictionSchemeDecoder<DataTypeT, TransformT>>(
new MeshPredictionSchemeTexCoordsPortableDecoder<
DataTypeT, TransformT, MeshDataT>(attribute, transform,
mesh_data));
} else if (method == MESH_PREDICTION_GEOMETRIC_NORMAL) {
return std::unique_ptr<PredictionSchemeDecoder<DataTypeT, TransformT>>(
new MeshPredictionSchemeGeometricNormalDecoder<
DataTypeT, TransformT, MeshDataT>(attribute, transform,
mesh_data));
}
return nullptr;
}
};
// Operator () specialized for normal octahedron transforms. These transforms
// are currently used only by the geometric normal prediction scheme (the
// transform is also used by delta coding, but delta predictor is not
// constructed in this function).
template <class TransformT, class MeshDataT>
struct DispatchFunctor<TransformT, MeshDataT,
PREDICTION_TRANSFORM_NORMAL_OCTAHEDRON_CANONICALIZED> {
std::unique_ptr<PredictionSchemeDecoder<DataTypeT, TransformT>> operator()(
PredictionSchemeMethod method, const PointAttribute *attribute,
const TransformT &transform, const MeshDataT &mesh_data,
uint16_t bitstream_version) {
if (method == MESH_PREDICTION_GEOMETRIC_NORMAL) {
return std::unique_ptr<PredictionSchemeDecoder<DataTypeT, TransformT>>(
new MeshPredictionSchemeGeometricNormalDecoder<
DataTypeT, TransformT, MeshDataT>(attribute, transform,
mesh_data));
}
return nullptr;
}
};
template <class TransformT, class MeshDataT>
struct DispatchFunctor<TransformT, MeshDataT,
PREDICTION_TRANSFORM_NORMAL_OCTAHEDRON> {
std::unique_ptr<PredictionSchemeDecoder<DataTypeT, TransformT>> operator()(
PredictionSchemeMethod method, const PointAttribute *attribute,
const TransformT &transform, const MeshDataT &mesh_data,
uint16_t bitstream_version) {
if (method == MESH_PREDICTION_GEOMETRIC_NORMAL) {
return std::unique_ptr<PredictionSchemeDecoder<DataTypeT, TransformT>>(
new MeshPredictionSchemeGeometricNormalDecoder<
DataTypeT, TransformT, MeshDataT>(attribute, transform,
mesh_data));
}
return nullptr;
}
};
template <class TransformT, class MeshDataT>
std::unique_ptr<PredictionSchemeDecoder<DataTypeT, TransformT>> operator()(
PredictionSchemeMethod method, const PointAttribute *attribute,
const TransformT &transform, const MeshDataT &mesh_data,
uint16_t bitstream_version) {
return DispatchFunctor<TransformT, MeshDataT, TransformT::GetType()>()(
method, attribute, transform, mesh_data, bitstream_version);
}
};
// Creates a prediction scheme for a given decoder and given prediction method.
// The prediction schemes are automatically initialized with decoder specific
// data if needed.
template <typename DataTypeT, class TransformT>
std::unique_ptr<PredictionSchemeDecoder<DataTypeT, TransformT>>
CreatePredictionSchemeForDecoder(PredictionSchemeMethod method, int att_id,
const PointCloudDecoder *decoder,
const TransformT &transform) {
if (method == PREDICTION_NONE)
return nullptr;
const PointAttribute *const att = decoder->point_cloud()->attribute(att_id);
if (decoder->GetGeometryType() == TRIANGULAR_MESH) {
// Cast the decoder to mesh decoder. This is not necessarily safe if there
// is some other decoder decides to use TRIANGULAR_MESH as the return type,
// but unfortunately there is not nice work around for this without using
// RTTI (double dispatch and similar concepts will not work because of the
// template nature of the prediction schemes).
const MeshDecoder *const mesh_decoder =
static_cast<const MeshDecoder *>(decoder);
auto ret = CreateMeshPredictionScheme<
MeshDecoder, PredictionSchemeDecoder<DataTypeT, TransformT>,
MeshPredictionSchemeDecoderFactory<DataTypeT>>(
mesh_decoder, method, att_id, transform, decoder->bitstream_version());
if (ret)
return ret;
// Otherwise try to create another prediction scheme.
}
// Create delta decoder.
return std::unique_ptr<PredictionSchemeDecoder<DataTypeT, TransformT>>(
new PredictionSchemeDeltaDecoder<DataTypeT, TransformT>(att, transform));
}
// Create a prediction scheme using a default transform constructor.
template <typename DataTypeT, class TransformT>
std::unique_ptr<PredictionSchemeDecoder<DataTypeT, TransformT>>
CreatePredictionSchemeForDecoder(PredictionSchemeMethod method, int att_id,
const PointCloudDecoder *decoder) {
return CreatePredictionSchemeForDecoder<DataTypeT, TransformT>(
method, att_id, decoder, TransformT());
}
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODER_FACTORY_H_

View File

@ -0,0 +1,53 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODER_INTERFACE_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODER_INTERFACE_H_
#include "draco/compression/attributes/prediction_schemes/prediction_scheme_interface.h"
#include "draco/core/decoder_buffer.h"
// Prediction schemes can be used during encoding and decoding of attributes
// to predict attribute values based on the previously encoded/decoded data.
// See prediction_scheme.h for more details.
namespace draco {
// Abstract interface for all prediction schemes used during attribute encoding.
class PredictionSchemeDecoderInterface : public PredictionSchemeInterface {
public:
// Method that can be used to decode any prediction scheme specific data
// from the input buffer.
virtual bool DecodePredictionData(DecoderBuffer *buffer) = 0;
};
// A specialized version of the prediction scheme interface for specific
// input and output data types.
// |entry_to_point_id_map| is the mapping between value entries to point ids
// of the associated point cloud, where one entry is defined as |num_components|
// values of the |in_data|.
// DataTypeT is the data type of input and predicted values.
// CorrTypeT is the data type used for storing corrected values.
template <typename DataTypeT, typename CorrTypeT = DataTypeT>
class PredictionSchemeTypedDecoderInterface
: public PredictionSchemeDecoderInterface {
public:
// Reverts changes made by the prediction scheme during encoding.
virtual bool ComputeOriginalValues(
const CorrTypeT *in_corr, DataTypeT *out_data, int size,
int num_components, const PointIndex *entry_to_point_id_map) = 0;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODER_INTERFACE_H_

View File

@ -0,0 +1,65 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODING_TRANSFORM_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODING_TRANSFORM_H_
#include "draco/compression/config/compression_shared.h"
#include "draco/core/decoder_buffer.h"
namespace draco {
// PredictionSchemeDecodingTransform is used to transform predicted values and
// correction values into the final original attribute values.
// DataTypeT is the data type of predicted values.
// CorrTypeT is the data type used for storing corrected values. It allows
// transforms to store corrections into a different type or format compared to
// the predicted data.
template <typename DataTypeT, typename CorrTypeT>
class PredictionSchemeDecodingTransform {
public:
typedef CorrTypeT CorrType;
PredictionSchemeDecodingTransform() : num_components_(0) {}
void Init(int num_components) { num_components_ = num_components; }
// Computes the original value from the input predicted value and the decoded
// corrections. The default implementation is equal to std:plus.
inline void ComputeOriginalValue(const DataTypeT *predicted_vals,
const CorrTypeT *corr_vals,
DataTypeT *out_original_vals) const {
static_assert(std::is_same<DataTypeT, CorrTypeT>::value,
"For the default prediction transform, correction and input "
"data must be of the same type.");
for (int i = 0; i < num_components_; ++i) {
out_original_vals[i] = predicted_vals[i] + corr_vals[i];
}
}
// Decodes any transform specific data. Called before Init() method.
bool DecodeTransformData(DecoderBuffer * /* buffer */) { return true; }
// Should return true if all corrected values are guaranteed to be positive.
bool AreCorrectionsPositive() const { return false; }
protected:
int num_components() const { return num_components_; }
private:
int num_components_;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DECODING_TRANSFORM_H_

View File

@ -0,0 +1,65 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DELTA_DECODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DELTA_DECODER_H_
#include "draco/compression/attributes/prediction_schemes/prediction_scheme_decoder.h"
namespace draco {
// Decoder for values encoded with delta coding. See the corresponding encoder
// for more details.
template <typename DataTypeT, class TransformT>
class PredictionSchemeDeltaDecoder
: public PredictionSchemeDecoder<DataTypeT, TransformT> {
public:
using CorrType =
typename PredictionSchemeDecoder<DataTypeT, TransformT>::CorrType;
// Initialized the prediction scheme.
explicit PredictionSchemeDeltaDecoder(const PointAttribute *attribute)
: PredictionSchemeDecoder<DataTypeT, TransformT>(attribute) {}
PredictionSchemeDeltaDecoder(const PointAttribute *attribute,
const TransformT &transform)
: PredictionSchemeDecoder<DataTypeT, TransformT>(attribute, transform) {}
bool ComputeOriginalValues(const CorrType *in_corr, DataTypeT *out_data,
int size, int num_components,
const PointIndex *entry_to_point_id_map) override;
PredictionSchemeMethod GetPredictionMethod() const override {
return PREDICTION_DIFFERENCE;
}
bool IsInitialized() const override { return true; }
};
template <typename DataTypeT, class TransformT>
bool PredictionSchemeDeltaDecoder<DataTypeT, TransformT>::ComputeOriginalValues(
const CorrType *in_corr, DataTypeT *out_data, int size, int num_components,
const PointIndex *) {
this->transform().Init(num_components);
// Decode the original value for the first element.
std::unique_ptr<DataTypeT[]> zero_vals(new DataTypeT[num_components]());
this->transform().ComputeOriginalValue(zero_vals.get(), in_corr, out_data);
// Decode data from the front using D(i) = D(i) + D(i - 1).
for (int i = num_components; i < size; i += num_components) {
this->transform().ComputeOriginalValue(out_data + i - num_components,
in_corr + i, out_data + i);
}
return true;
}
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DELTA_DECODER_H_

View File

@ -0,0 +1,69 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DELTA_ENCODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DELTA_ENCODER_H_
#include "draco/compression/attributes/prediction_schemes/prediction_scheme_encoder.h"
namespace draco {
// Basic prediction scheme based on computing backward differences between
// stored attribute values (also known as delta-coding). Usually works better
// than the reference point prediction scheme, because nearby values are often
// encoded next to each other.
template <typename DataTypeT, class TransformT>
class PredictionSchemeDeltaEncoder
: public PredictionSchemeEncoder<DataTypeT, TransformT> {
public:
using CorrType =
typename PredictionSchemeEncoder<DataTypeT, TransformT>::CorrType;
// Initialized the prediction scheme.
explicit PredictionSchemeDeltaEncoder(const PointAttribute *attribute)
: PredictionSchemeEncoder<DataTypeT, TransformT>(attribute) {}
PredictionSchemeDeltaEncoder(const PointAttribute *attribute,
const TransformT &transform)
: PredictionSchemeEncoder<DataTypeT, TransformT>(attribute, transform) {}
bool ComputeCorrectionValues(
const DataTypeT *in_data, CorrType *out_corr, int size,
int num_components, const PointIndex *entry_to_point_id_map) override;
PredictionSchemeMethod GetPredictionMethod() const override {
return PREDICTION_DIFFERENCE;
}
bool IsInitialized() const override { return true; }
};
template <typename DataTypeT, class TransformT>
bool PredictionSchemeDeltaEncoder<
DataTypeT, TransformT>::ComputeCorrectionValues(const DataTypeT *in_data,
CorrType *out_corr,
int size,
int num_components,
const PointIndex *) {
this->transform().Init(in_data, size, num_components);
// Encode data from the back using D(i) = D(i) - D(i - 1).
for (int i = size - num_components; i > 0; i -= num_components) {
this->transform().ComputeCorrection(
in_data + i, in_data + i - num_components, out_corr + i);
}
// Encode correction for the first element.
std::unique_ptr<DataTypeT[]> zero_vals(new DataTypeT[num_components]());
this->transform().ComputeCorrection(in_data, zero_vals.get(), out_corr);
return true;
}
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_DELTA_ENCODER_H_

View File

@ -0,0 +1,89 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODER_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODER_H_
#include <type_traits>
#include "draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_interface.h"
#include "draco/compression/attributes/prediction_schemes/prediction_scheme_encoding_transform.h"
// Prediction schemes can be used during encoding and decoding of vertex
// attributes to predict attribute values based on the previously
// encoded/decoded data. The differences between the original and predicted
// attribute values are used to compute correction values that can be usually
// encoded with fewer bits compared to the original data.
namespace draco {
// Abstract base class for typed prediction schemes. It provides basic access
// to the encoded attribute and to the supplied prediction transform.
template <typename DataTypeT,
class TransformT =
PredictionSchemeEncodingTransform<DataTypeT, DataTypeT>>
class PredictionSchemeEncoder : public PredictionSchemeTypedEncoderInterface<
DataTypeT, typename TransformT::CorrType> {
public:
typedef DataTypeT DataType;
typedef TransformT Transform;
// Correction type needs to be defined in the prediction transform class.
typedef typename Transform::CorrType CorrType;
explicit PredictionSchemeEncoder(const PointAttribute *attribute)
: PredictionSchemeEncoder(attribute, Transform()) {}
PredictionSchemeEncoder(const PointAttribute *attribute,
const Transform &transform)
: attribute_(attribute), transform_(transform) {}
bool EncodePredictionData(EncoderBuffer *buffer) override {
if (!transform_.EncodeTransformData(buffer))
return false;
return true;
}
const PointAttribute *GetAttribute() const override { return attribute(); }
// Returns the number of parent attributes that are needed for the prediction.
int GetNumParentAttributes() const override { return 0; }
// Returns the type of each of the parent attribute.
GeometryAttribute::Type GetParentAttributeType(int /* i */) const override {
return GeometryAttribute::INVALID;
}
// Sets the required parent attribute.
bool SetParentAttribute(const PointAttribute * /* att */) override {
return false;
}
bool AreCorrectionsPositive() override {
return transform_.AreCorrectionsPositive();
}
PredictionSchemeTransformType GetTransformType() const override {
return transform_.GetType();
}
protected:
inline const PointAttribute *attribute() const { return attribute_; }
inline const Transform &transform() const { return transform_; }
inline Transform &transform() { return transform_; }
private:
const PointAttribute *attribute_;
Transform transform_;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODER_H_

View File

@ -0,0 +1,70 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#include "draco/compression/attributes/prediction_schemes/prediction_scheme_encoder_factory.h"
namespace draco {
PredictionSchemeMethod SelectPredictionMethod(
int att_id, const PointCloudEncoder *encoder) {
if (encoder->options()->GetSpeed() >= 10) {
// Selected fastest, though still doing some compression.
return PREDICTION_DIFFERENCE;
}
if (encoder->GetGeometryType() == TRIANGULAR_MESH) {
// Use speed setting to select the best encoding method.
const PointAttribute *const att = encoder->point_cloud()->attribute(att_id);
if (att->attribute_type() == GeometryAttribute::TEX_COORD) {
if (encoder->options()->GetSpeed() < 4) {
// Use texture coordinate prediction for speeds 0, 1, 2, 3.
return MESH_PREDICTION_TEX_COORDS_PORTABLE;
}
}
if (att->attribute_type() == GeometryAttribute::NORMAL) {
if (encoder->options()->GetSpeed() < 4) {
// Use geometric normal prediction for speeds 0, 1, 2, 3.
return MESH_PREDICTION_GEOMETRIC_NORMAL;
}
return PREDICTION_DIFFERENCE; // default
}
// Handle other attribute types.
if (encoder->options()->GetSpeed() >= 8) {
return PREDICTION_DIFFERENCE;
}
if (encoder->options()->GetSpeed() >= 2 ||
encoder->point_cloud()->num_points() < 40) {
// Parallelogram prediction is used for speeds 2 - 7 or when the overhead
// of using constrained multi-parallelogram would be too high.
return MESH_PREDICTION_PARALLELOGRAM;
}
// Multi-parallelogram is used for speeds 0, 1.
return MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM;
}
// Default option is delta coding.
return PREDICTION_DIFFERENCE;
}
// Returns the preferred prediction scheme based on the encoder options.
PredictionSchemeMethod GetPredictionMethodFromOptions(
int att_id, const EncoderOptions &options) {
const int pred_type =
options.GetAttributeInt(att_id, "prediction_scheme", -1);
if (pred_type == -1)
return PREDICTION_UNDEFINED;
if (pred_type < 0 || pred_type >= NUM_PREDICTION_SCHEMES)
return PREDICTION_NONE;
return static_cast<PredictionSchemeMethod>(pred_type);
}
} // namespace draco

View File

@ -0,0 +1,132 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Functions for creating prediction schemes for encoders using the provided
// prediction method id.
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODER_FACTORY_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODER_FACTORY_H_
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_constrained_multi_parallelogram_encoder.h"
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_geometric_normal_encoder.h"
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_multi_parallelogram_encoder.h"
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_parallelogram_encoder.h"
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_encoder.h"
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_tex_coords_portable_encoder.h"
#include "draco/compression/attributes/prediction_schemes/prediction_scheme_delta_encoder.h"
#include "draco/compression/attributes/prediction_schemes/prediction_scheme_encoder.h"
#include "draco/compression/attributes/prediction_schemes/prediction_scheme_factory.h"
#include "draco/compression/mesh/mesh_encoder.h"
namespace draco {
// Selects a prediction method based on the input geometry type and based on the
// encoder options.
PredictionSchemeMethod SelectPredictionMethod(int att_id,
const PointCloudEncoder *encoder);
// Factory class for creating mesh prediction schemes.
template <typename DataTypeT>
struct MeshPredictionSchemeEncoderFactory {
template <class TransformT, class MeshDataT>
std::unique_ptr<PredictionSchemeEncoder<DataTypeT, TransformT>> operator()(
PredictionSchemeMethod method, const PointAttribute *attribute,
const TransformT &transform, const MeshDataT &mesh_data,
uint16_t bitstream_version) {
if (method == MESH_PREDICTION_PARALLELOGRAM) {
return std::unique_ptr<PredictionSchemeEncoder<DataTypeT, TransformT>>(
new MeshPredictionSchemeParallelogramEncoder<DataTypeT, TransformT,
MeshDataT>(
attribute, transform, mesh_data));
} else if (method == MESH_PREDICTION_MULTI_PARALLELOGRAM) {
return std::unique_ptr<PredictionSchemeEncoder<DataTypeT, TransformT>>(
new MeshPredictionSchemeMultiParallelogramEncoder<
DataTypeT, TransformT, MeshDataT>(attribute, transform,
mesh_data));
} else if (method == MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM) {
return std::unique_ptr<PredictionSchemeEncoder<DataTypeT, TransformT>>(
new MeshPredictionSchemeConstrainedMultiParallelogramEncoder<
DataTypeT, TransformT, MeshDataT>(attribute, transform,
mesh_data));
} else if (method == MESH_PREDICTION_TEX_COORDS_DEPRECATED) {
return std::unique_ptr<PredictionSchemeEncoder<DataTypeT, TransformT>>(
new MeshPredictionSchemeTexCoordsEncoder<DataTypeT, TransformT,
MeshDataT>(
attribute, transform, mesh_data));
} else if (method == MESH_PREDICTION_TEX_COORDS_PORTABLE) {
return std::unique_ptr<PredictionSchemeEncoder<DataTypeT, TransformT>>(
new MeshPredictionSchemeTexCoordsPortableEncoder<
DataTypeT, TransformT, MeshDataT>(attribute, transform,
mesh_data));
} else if (method == MESH_PREDICTION_GEOMETRIC_NORMAL) {
return std::unique_ptr<PredictionSchemeEncoder<DataTypeT, TransformT>>(
new MeshPredictionSchemeGeometricNormalEncoder<DataTypeT, TransformT,
MeshDataT>(
attribute, transform, mesh_data));
}
return nullptr;
}
};
// Creates a prediction scheme for a given encoder and given prediction method.
// The prediction schemes are automatically initialized with encoder specific
// data if needed.
template <typename DataTypeT, class TransformT>
std::unique_ptr<PredictionSchemeEncoder<DataTypeT, TransformT>>
CreatePredictionSchemeForEncoder(PredictionSchemeMethod method, int att_id,
const PointCloudEncoder *encoder,
const TransformT &transform) {
const PointAttribute *const att = encoder->point_cloud()->attribute(att_id);
if (method == PREDICTION_UNDEFINED) {
method = SelectPredictionMethod(att_id, encoder);
}
if (method == PREDICTION_NONE)
return nullptr; // No prediction is used.
if (encoder->GetGeometryType() == TRIANGULAR_MESH) {
// Cast the encoder to mesh encoder. This is not necessarily safe if there
// is some other encoder decides to use TRIANGULAR_MESH as the return type,
// but unfortunately there is not nice work around for this without using
// RTTI (double dispatch and similar concepts will not work because of the
// template nature of the prediction schemes).
const MeshEncoder *const mesh_encoder =
static_cast<const MeshEncoder *>(encoder);
auto ret = CreateMeshPredictionScheme<
MeshEncoder, PredictionSchemeEncoder<DataTypeT, TransformT>,
MeshPredictionSchemeEncoderFactory<DataTypeT>>(
mesh_encoder, method, att_id, transform, kDracoMeshBitstreamVersion);
if (ret)
return ret;
// Otherwise try to create another prediction scheme.
}
// Create delta encoder.
return std::unique_ptr<PredictionSchemeEncoder<DataTypeT, TransformT>>(
new PredictionSchemeDeltaEncoder<DataTypeT, TransformT>(att, transform));
}
// Create a prediction scheme using a default transform constructor.
template <typename DataTypeT, class TransformT>
std::unique_ptr<PredictionSchemeEncoder<DataTypeT, TransformT>>
CreatePredictionSchemeForEncoder(PredictionSchemeMethod method, int att_id,
const PointCloudEncoder *encoder) {
return CreatePredictionSchemeForEncoder<DataTypeT, TransformT>(
method, att_id, encoder, TransformT());
}
// Returns the preferred prediction scheme based on the encoder options.
PredictionSchemeMethod GetPredictionMethodFromOptions(
int att_id, const EncoderOptions &options);
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODER_FACTORY_H_

View File

@ -0,0 +1,55 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODING_INTERFACE_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODING_INTERFACE_H_
#include "draco/compression/attributes/prediction_schemes/prediction_scheme_interface.h"
#include "draco/core/encoder_buffer.h"
// Prediction schemes can be used during encoding and decoding of attributes
// to predict attribute values based on the previously encoded/decoded data.
// See prediction_scheme.h for more details.
namespace draco {
// Abstract interface for all prediction schemes used during attribute encoding.
class PredictionSchemeEncoderInterface : public PredictionSchemeInterface {
public:
// Method that can be used to encode any prediction scheme specific data
// into the output buffer.
virtual bool EncodePredictionData(EncoderBuffer *buffer) = 0;
};
// A specialized version of the prediction scheme interface for specific
// input and output data types.
// |entry_to_point_id_map| is the mapping between value entries to point ids
// of the associated point cloud, where one entry is defined as |num_components|
// values of the |in_data|.
// DataTypeT is the data type of input and predicted values.
// CorrTypeT is the data type used for storing corrected values.
template <typename DataTypeT, typename CorrTypeT = DataTypeT>
class PredictionSchemeTypedEncoderInterface
: public PredictionSchemeEncoderInterface {
public:
// Applies the prediction scheme when encoding the attribute.
// |in_data| contains value entries to be encoded.
// |out_corr| is an output array containing the to be encoded corrections.
virtual bool ComputeCorrectionValues(
const DataTypeT *in_data, CorrTypeT *out_corr, int size,
int num_components, const PointIndex *entry_to_point_id_map) = 0;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODING_INTERFACE_H_

View File

@ -0,0 +1,77 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODING_TRANSFORM_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODING_TRANSFORM_H_
#include "draco/compression/config/compression_shared.h"
#include "draco/core/encoder_buffer.h"
namespace draco {
// PredictionSchemeEncodingTransform is used to transform predicted values into
// correction values.
// CorrTypeT is the data type used for storing corrected values. It allows
// transforms to store corrections into a different type or format compared to
// the predicted data.
template <typename DataTypeT, typename CorrTypeT>
class PredictionSchemeEncodingTransform {
public:
typedef CorrTypeT CorrType;
PredictionSchemeEncodingTransform() : num_components_(0) {}
PredictionSchemeTransformType GetType() const {
return PREDICTION_TRANSFORM_DELTA;
}
// Performs any custom initialization of the transform for the encoder.
// |size| = total number of values in |orig_data| (i.e., number of entries *
// number of components).
void Init(const DataTypeT * /* orig_data */, int /* size */,
int num_components) {
num_components_ = num_components;
}
// Computes the corrections based on the input original values and the
// predicted values. The correction is always computed for all components
// of the input element. |val_id| is the id of the input value
// (i.e., element_id * num_components). The default implementation is equal to
// std::minus.
inline void ComputeCorrection(const DataTypeT *original_vals,
const DataTypeT *predicted_vals,
CorrTypeT *out_corr_vals) {
static_assert(std::is_same<DataTypeT, CorrTypeT>::value,
"For the default prediction transform, correction and input "
"data must be of the same type.");
for (int i = 0; i < num_components_; ++i) {
out_corr_vals[i] = original_vals[i] - predicted_vals[i];
}
}
// Encode any transform specific data.
bool EncodeTransformData(EncoderBuffer * /* buffer */) { return true; }
// Should return true if all corrected values are guaranteed to be positive.
bool AreCorrectionsPositive() const { return false; }
protected:
int num_components() const { return num_components_; }
private:
int num_components_;
};
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_ENCODING_TRANSFORM_H_

View File

@ -0,0 +1,83 @@
// Copyright 2016 The Draco Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Functions for creating prediction schemes from a provided prediction method
// name. The functions in this file can create only basic prediction schemes
// that don't require any encoder or decoder specific data. To create more
// sophisticated prediction schemes, use functions from either
// prediction_scheme_encoder_factory.h or,
// prediction_scheme_decoder_factory.h.
#ifndef DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_FACTORY_H_
#define DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_FACTORY_H_
#include "draco/compression/attributes/mesh_attribute_indices_encoding_data.h"
#include "draco/compression/attributes/prediction_schemes/mesh_prediction_scheme_data.h"
#include "draco/compression/config/compression_shared.h"
#include "draco/mesh/mesh_attribute_corner_table.h"
namespace draco {
template <class EncodingDataSourceT, class PredictionSchemeT,
class MeshPredictionSchemeFactoryT>
std::unique_ptr<PredictionSchemeT> CreateMeshPredictionScheme(
const EncodingDataSourceT *source, PredictionSchemeMethod method,
int att_id, const typename PredictionSchemeT::Transform &transform,
uint16_t bitstream_version) {
const PointAttribute *const att = source->point_cloud()->attribute(att_id);
if (source->GetGeometryType() == TRIANGULAR_MESH &&
(method == MESH_PREDICTION_PARALLELOGRAM ||
method == MESH_PREDICTION_MULTI_PARALLELOGRAM ||
method == MESH_PREDICTION_CONSTRAINED_MULTI_PARALLELOGRAM ||
method == MESH_PREDICTION_TEX_COORDS_PORTABLE ||
method == MESH_PREDICTION_GEOMETRIC_NORMAL ||
method == MESH_PREDICTION_TEX_COORDS_DEPRECATED)) {
const CornerTable *const ct = source->GetCornerTable();
const MeshAttributeIndicesEncodingData *const encoding_data =
source->GetAttributeEncodingData(att_id);
if (ct == nullptr || encoding_data == nullptr) {
// No connectivity data found.
return nullptr;
}
// Connectivity data exists.
const MeshAttributeCornerTable *const att_ct =
source->GetAttributeCornerTable(att_id);
if (att_ct != nullptr) {
typedef MeshPredictionSchemeData<MeshAttributeCornerTable> MeshData;
MeshData md;
md.Set(source->mesh(), att_ct,
&encoding_data->encoded_attribute_value_index_to_corner_map,
&encoding_data->vertex_to_encoded_attribute_value_index_map);
MeshPredictionSchemeFactoryT factory;
auto ret = factory(method, att, transform, md, bitstream_version);
if (ret)
return ret;
} else {
typedef MeshPredictionSchemeData<CornerTable> MeshData;
MeshData md;
md.Set(source->mesh(), ct,
&encoding_data->encoded_attribute_value_index_to_corner_map,
&encoding_data->vertex_to_encoded_attribute_value_index_map);
MeshPredictionSchemeFactoryT factory;
auto ret = factory(method, att, transform, md, bitstream_version);
if (ret)
return ret;
}
}
return nullptr;
}
} // namespace draco
#endif // DRACO_COMPRESSION_ATTRIBUTES_PREDICTION_SCHEMES_PREDICTION_SCHEME_FACTORY_H_

Some files were not shown because too many files have changed in this diff Show More