Update Ceres to latest upstream version 2.1.0

This release deprecated the Parameterization API and the new Manifolds
API is to be used instead. This is what was done in the Libmv as part
of this change.

Additionally, remove the bundling scripts. Nowadays those are only
leading to a duplicated work to maintain.

No measurable changes on user side is expected.
This commit is contained in:
Sergey Sharybin 2022-05-10 16:36:22 +02:00
parent b30cb05c14
commit be9800e8da
333 changed files with 9006 additions and 5523 deletions

View File

@ -1457,14 +1457,6 @@ if(WITH_LIBMV OR WITH_GTESTS OR (WITH_CYCLES AND WITH_CYCLES_LOGGING))
endif()
endif()
#-----------------------------------------------------------------------------
# Configure Ceres
if(WITH_LIBMV)
# We always have C++11 which includes unordered_map.
set(CERES_DEFINES "-DCERES_STD_UNORDERED_MAP;-DCERES_USE_CXX_THREADS")
endif()
#-----------------------------------------------------------------------------
# Extra limits to number of jobs running in parallel for some kind os tasks.
# Only supported by Ninja build system currently.

View File

@ -1,16 +1,11 @@
# SPDX-License-Identifier: GPL-2.0-or-later
# Copyright 2012 Blender Foundation. All rights reserved.
# NOTE: This file is automatically generated by bundle.sh script
# If you're doing changes in this file, please update template
# in that script too
set(INC
.
include
internal
config
../gflags/src
)
set(INC_SYS
@ -20,279 +15,296 @@ set(INC_SYS
)
set(SRC
internal/ceres/accelerate_sparse.cc
internal/ceres/array_utils.cc
internal/ceres/blas.cc
internal/ceres/block_evaluate_preparer.cc
internal/ceres/block_jacobian_writer.cc
internal/ceres/block_jacobi_preconditioner.cc
internal/ceres/block_random_access_dense_matrix.cc
internal/ceres/block_random_access_diagonal_matrix.cc
internal/ceres/block_random_access_matrix.cc
internal/ceres/block_random_access_sparse_matrix.cc
internal/ceres/block_sparse_matrix.cc
internal/ceres/block_structure.cc
internal/ceres/callbacks.cc
internal/ceres/canonical_views_clustering.cc
internal/ceres/c_api.cc
internal/ceres/cgnr_solver.cc
internal/ceres/compressed_col_sparse_matrix_utils.cc
internal/ceres/compressed_row_jacobian_writer.cc
internal/ceres/compressed_row_sparse_matrix.cc
internal/ceres/conditioned_cost_function.cc
internal/ceres/conjugate_gradients_solver.cc
internal/ceres/context.cc
internal/ceres/context_impl.cc
internal/ceres/coordinate_descent_minimizer.cc
internal/ceres/corrector.cc
internal/ceres/covariance.cc
internal/ceres/covariance_impl.cc
internal/ceres/cxsparse.cc
internal/ceres/dense_normal_cholesky_solver.cc
internal/ceres/dense_qr_solver.cc
internal/ceres/dense_sparse_matrix.cc
internal/ceres/detect_structure.cc
internal/ceres/dogleg_strategy.cc
internal/ceres/dynamic_compressed_row_jacobian_writer.cc
internal/ceres/dynamic_compressed_row_sparse_matrix.cc
internal/ceres/dynamic_sparse_normal_cholesky_solver.cc
internal/ceres/eigensparse.cc
internal/ceres/evaluator.cc
internal/ceres/file.cc
internal/ceres/float_cxsparse.cc
internal/ceres/float_suitesparse.cc
internal/ceres/function_sample.cc
internal/ceres/generated/partitioned_matrix_view_d_d_d.cc
internal/ceres/generated/schur_eliminator_d_d_d.cc
internal/ceres/gradient_checker.cc
internal/ceres/gradient_checking_cost_function.cc
internal/ceres/gradient_problem.cc
internal/ceres/gradient_problem_solver.cc
internal/ceres/implicit_schur_complement.cc
internal/ceres/inner_product_computer.cc
internal/ceres/is_close.cc
internal/ceres/iterative_refiner.cc
internal/ceres/iterative_schur_complement_solver.cc
internal/ceres/lapack.cc
internal/ceres/levenberg_marquardt_strategy.cc
internal/ceres/linear_least_squares_problems.cc
internal/ceres/linear_operator.cc
internal/ceres/linear_solver.cc
internal/ceres/line_search.cc
internal/ceres/line_search_direction.cc
internal/ceres/line_search_minimizer.cc
internal/ceres/line_search_preprocessor.cc
internal/ceres/local_parameterization.cc
internal/ceres/loss_function.cc
internal/ceres/low_rank_inverse_hessian.cc
internal/ceres/minimizer.cc
internal/ceres/normal_prior.cc
internal/ceres/parallel_for_cxx.cc
internal/ceres/parallel_for_nothreads.cc
internal/ceres/parallel_for_openmp.cc
internal/ceres/parallel_utils.cc
internal/ceres/parameter_block_ordering.cc
internal/ceres/partitioned_matrix_view.cc
internal/ceres/polynomial.cc
internal/ceres/preconditioner.cc
internal/ceres/preprocessor.cc
internal/ceres/problem.cc
internal/ceres/problem_impl.cc
internal/ceres/program.cc
internal/ceres/reorder_program.cc
internal/ceres/residual_block.cc
internal/ceres/residual_block_utils.cc
internal/ceres/schur_complement_solver.cc
internal/ceres/schur_eliminator.cc
internal/ceres/schur_jacobi_preconditioner.cc
internal/ceres/schur_templates.cc
internal/ceres/scratch_evaluate_preparer.cc
internal/ceres/single_linkage_clustering.cc
internal/ceres/solver.cc
internal/ceres/solver_utils.cc
internal/ceres/sparse_cholesky.cc
internal/ceres/sparse_matrix.cc
internal/ceres/sparse_normal_cholesky_solver.cc
internal/ceres/split.cc
internal/ceres/stringprintf.cc
internal/ceres/subset_preconditioner.cc
internal/ceres/suitesparse.cc
internal/ceres/thread_pool.cc
internal/ceres/thread_token_provider.cc
internal/ceres/triplet_sparse_matrix.cc
internal/ceres/trust_region_minimizer.cc
internal/ceres/trust_region_preprocessor.cc
internal/ceres/trust_region_step_evaluator.cc
internal/ceres/trust_region_strategy.cc
internal/ceres/types.cc
internal/ceres/visibility_based_preconditioner.cc
internal/ceres/visibility.cc
internal/ceres/wall_time.cc
include/ceres/autodiff_cost_function.h
include/ceres/autodiff_first_order_function.h
include/ceres/autodiff_local_parameterization.h
include/ceres/autodiff_manifold.h
include/ceres/c_api.h
include/ceres/ceres.h
include/ceres/conditioned_cost_function.h
include/ceres/context.h
include/ceres/cost_function.h
include/ceres/cost_function_to_functor.h
include/ceres/covariance.h
include/ceres/crs_matrix.h
include/ceres/cubic_interpolation.h
include/ceres/dynamic_autodiff_cost_function.h
include/ceres/dynamic_cost_function.h
include/ceres/dynamic_cost_function_to_functor.h
include/ceres/dynamic_numeric_diff_cost_function.h
include/ceres/evaluation_callback.h
include/ceres/first_order_function.h
include/ceres/gradient_checker.h
include/ceres/gradient_problem.h
include/ceres/gradient_problem_solver.h
include/ceres/iteration_callback.h
include/ceres/jet.h
include/ceres/jet_fwd.h
include/ceres/line_manifold.h
include/ceres/local_parameterization.h
include/ceres/loss_function.h
include/ceres/manifold.h
include/ceres/manifold_test_utils.h
include/ceres/normal_prior.h
include/ceres/numeric_diff_cost_function.h
include/ceres/numeric_diff_first_order_function.h
include/ceres/numeric_diff_options.h
include/ceres/ordered_groups.h
include/ceres/problem.h
include/ceres/product_manifold.h
include/ceres/rotation.h
include/ceres/sized_cost_function.h
include/ceres/solver.h
include/ceres/sphere_manifold.h
include/ceres/tiny_solver.h
include/ceres/tiny_solver_autodiff_function.h
include/ceres/tiny_solver_cost_function_adapter.h
include/ceres/types.h
include/ceres/version.h
include/ceres/autodiff_cost_function.h
include/ceres/autodiff_first_order_function.h
include/ceres/autodiff_local_parameterization.h
include/ceres/c_api.h
include/ceres/ceres.h
include/ceres/conditioned_cost_function.h
include/ceres/context.h
include/ceres/cost_function.h
include/ceres/cost_function_to_functor.h
include/ceres/covariance.h
include/ceres/crs_matrix.h
include/ceres/cubic_interpolation.h
include/ceres/dynamic_autodiff_cost_function.h
include/ceres/dynamic_cost_function.h
include/ceres/dynamic_cost_function_to_functor.h
include/ceres/dynamic_numeric_diff_cost_function.h
include/ceres/evaluation_callback.h
include/ceres/first_order_function.h
include/ceres/gradient_checker.h
include/ceres/gradient_problem.h
include/ceres/gradient_problem_solver.h
include/ceres/internal/array_selector.h
include/ceres/internal/autodiff.h
include/ceres/internal/disable_warnings.h
include/ceres/internal/eigen.h
include/ceres/internal/fixed_array.h
include/ceres/internal/householder_vector.h
include/ceres/internal/integer_sequence_algorithm.h
include/ceres/internal/line_parameterization.h
include/ceres/internal/memory.h
include/ceres/internal/numeric_diff.h
include/ceres/internal/parameter_dims.h
include/ceres/internal/port.h
include/ceres/internal/reenable_warnings.h
include/ceres/internal/variadic_evaluate.h
include/ceres/iteration_callback.h
include/ceres/jet.h
include/ceres/local_parameterization.h
include/ceres/loss_function.h
include/ceres/normal_prior.h
include/ceres/numeric_diff_cost_function.h
include/ceres/numeric_diff_options.h
include/ceres/ordered_groups.h
include/ceres/problem.h
include/ceres/rotation.h
include/ceres/sized_cost_function.h
include/ceres/solver.h
include/ceres/tiny_solver_autodiff_function.h
include/ceres/tiny_solver_cost_function_adapter.h
include/ceres/tiny_solver.h
include/ceres/types.h
include/ceres/version.h
internal/ceres/accelerate_sparse.h
internal/ceres/array_utils.h
internal/ceres/blas.h
internal/ceres/block_evaluate_preparer.h
internal/ceres/block_jacobian_writer.h
internal/ceres/block_jacobi_preconditioner.h
internal/ceres/block_random_access_dense_matrix.h
internal/ceres/block_random_access_diagonal_matrix.h
internal/ceres/block_random_access_matrix.h
internal/ceres/block_random_access_sparse_matrix.h
internal/ceres/block_sparse_matrix.h
internal/ceres/block_structure.h
internal/ceres/callbacks.h
internal/ceres/canonical_views_clustering.h
internal/ceres/casts.h
internal/ceres/cgnr_linear_operator.h
internal/ceres/cgnr_solver.h
internal/ceres/compressed_col_sparse_matrix_utils.h
internal/ceres/compressed_row_jacobian_writer.h
internal/ceres/compressed_row_sparse_matrix.h
internal/ceres/concurrent_queue.h
internal/ceres/conjugate_gradients_solver.h
internal/ceres/context_impl.h
internal/ceres/coordinate_descent_minimizer.h
internal/ceres/corrector.h
internal/ceres/covariance_impl.h
internal/ceres/cxsparse.h
internal/ceres/dense_jacobian_writer.h
internal/ceres/dense_normal_cholesky_solver.h
internal/ceres/dense_qr_solver.h
internal/ceres/dense_sparse_matrix.h
internal/ceres/detect_structure.h
internal/ceres/dogleg_strategy.h
internal/ceres/dynamic_compressed_row_finalizer.h
internal/ceres/dynamic_compressed_row_jacobian_writer.h
internal/ceres/dynamic_compressed_row_sparse_matrix.h
internal/ceres/dynamic_sparse_normal_cholesky_solver.h
internal/ceres/eigensparse.h
internal/ceres/evaluator.h
internal/ceres/execution_summary.h
internal/ceres/file.h
internal/ceres/float_cxsparse.h
internal/ceres/float_suitesparse.h
internal/ceres/function_sample.h
internal/ceres/gradient_checking_cost_function.h
internal/ceres/gradient_problem_evaluator.h
internal/ceres/graph_algorithms.h
internal/ceres/graph.h
internal/ceres/implicit_schur_complement.h
internal/ceres/inner_product_computer.h
internal/ceres/invert_psd_matrix.h
internal/ceres/is_close.h
internal/ceres/iterative_refiner.h
internal/ceres/iterative_schur_complement_solver.h
internal/ceres/lapack.h
internal/ceres/levenberg_marquardt_strategy.h
internal/ceres/linear_least_squares_problems.h
internal/ceres/linear_operator.h
internal/ceres/linear_solver.h
internal/ceres/line_search_direction.h
internal/ceres/line_search.h
internal/ceres/line_search_minimizer.h
internal/ceres/line_search_preprocessor.h
internal/ceres/low_rank_inverse_hessian.h
internal/ceres/map_util.h
internal/ceres/minimizer.h
internal/ceres/pair_hash.h
internal/ceres/parallel_for.h
internal/ceres/parallel_utils.h
internal/ceres/parameter_block.h
internal/ceres/parameter_block_ordering.h
internal/ceres/partitioned_matrix_view.h
internal/ceres/partitioned_matrix_view_impl.h
internal/ceres/polynomial.h
internal/ceres/preconditioner.h
internal/ceres/preprocessor.h
internal/ceres/problem_impl.h
internal/ceres/program_evaluator.h
internal/ceres/program.h
internal/ceres/random.h
internal/ceres/reorder_program.h
internal/ceres/residual_block.h
internal/ceres/residual_block_utils.h
internal/ceres/schur_complement_solver.h
internal/ceres/schur_eliminator.h
internal/ceres/schur_eliminator_impl.h
internal/ceres/schur_jacobi_preconditioner.h
internal/ceres/schur_templates.h
internal/ceres/scoped_thread_token.h
internal/ceres/scratch_evaluate_preparer.h
internal/ceres/single_linkage_clustering.h
internal/ceres/small_blas_generic.h
internal/ceres/small_blas.h
internal/ceres/solver_utils.h
internal/ceres/sparse_cholesky.h
internal/ceres/sparse_matrix.h
internal/ceres/sparse_normal_cholesky_solver.h
internal/ceres/split.h
internal/ceres/stl_util.h
internal/ceres/stringprintf.h
internal/ceres/subset_preconditioner.h
internal/ceres/suitesparse.h
internal/ceres/thread_pool.h
internal/ceres/thread_token_provider.h
internal/ceres/triplet_sparse_matrix.h
internal/ceres/trust_region_minimizer.h
internal/ceres/trust_region_preprocessor.h
internal/ceres/trust_region_step_evaluator.h
internal/ceres/trust_region_strategy.h
internal/ceres/visibility_based_preconditioner.h
internal/ceres/visibility.h
internal/ceres/wall_time.h
include/ceres/internal/array_selector.h
include/ceres/internal/autodiff.h
include/ceres/internal/disable_warnings.h
include/ceres/internal/eigen.h
include/ceres/internal/fixed_array.h
include/ceres/internal/householder_vector.h
include/ceres/internal/integer_sequence_algorithm.h
include/ceres/internal/jet_traits.h
include/ceres/internal/line_parameterization.h
include/ceres/internal/memory.h
include/ceres/internal/numeric_diff.h
include/ceres/internal/parameter_dims.h
include/ceres/internal/port.h
include/ceres/internal/reenable_warnings.h
include/ceres/internal/sphere_manifold_functions.h
include/ceres/internal/variadic_evaluate.h
internal/ceres/accelerate_sparse.cc
internal/ceres/accelerate_sparse.h
internal/ceres/array_utils.cc
internal/ceres/array_utils.h
internal/ceres/block_evaluate_preparer.cc
internal/ceres/block_evaluate_preparer.h
internal/ceres/block_jacobi_preconditioner.cc
internal/ceres/block_jacobi_preconditioner.h
internal/ceres/block_jacobian_writer.cc
internal/ceres/block_jacobian_writer.h
internal/ceres/block_random_access_dense_matrix.cc
internal/ceres/block_random_access_dense_matrix.h
internal/ceres/block_random_access_diagonal_matrix.cc
internal/ceres/block_random_access_diagonal_matrix.h
internal/ceres/block_random_access_matrix.cc
internal/ceres/block_random_access_matrix.h
internal/ceres/block_random_access_sparse_matrix.cc
internal/ceres/block_random_access_sparse_matrix.h
internal/ceres/block_sparse_matrix.cc
internal/ceres/block_sparse_matrix.h
internal/ceres/block_structure.cc
internal/ceres/block_structure.h
internal/ceres/c_api.cc
internal/ceres/callbacks.cc
internal/ceres/callbacks.h
internal/ceres/canonical_views_clustering.cc
internal/ceres/canonical_views_clustering.h
internal/ceres/casts.h
internal/ceres/cgnr_linear_operator.h
internal/ceres/cgnr_solver.cc
internal/ceres/cgnr_solver.h
internal/ceres/compressed_col_sparse_matrix_utils.cc
internal/ceres/compressed_col_sparse_matrix_utils.h
internal/ceres/compressed_row_jacobian_writer.cc
internal/ceres/compressed_row_jacobian_writer.h
internal/ceres/compressed_row_sparse_matrix.cc
internal/ceres/compressed_row_sparse_matrix.h
internal/ceres/concurrent_queue.h
internal/ceres/conditioned_cost_function.cc
internal/ceres/conjugate_gradients_solver.cc
internal/ceres/conjugate_gradients_solver.h
internal/ceres/context.cc
internal/ceres/context_impl.cc
internal/ceres/context_impl.h
internal/ceres/coordinate_descent_minimizer.cc
internal/ceres/coordinate_descent_minimizer.h
internal/ceres/corrector.cc
internal/ceres/corrector.h
internal/ceres/cost_function.cc
internal/ceres/covariance.cc
internal/ceres/covariance_impl.cc
internal/ceres/covariance_impl.h
internal/ceres/cuda_buffer.h
internal/ceres/cxsparse.cc
internal/ceres/cxsparse.h
internal/ceres/dense_cholesky.cc
internal/ceres/dense_cholesky.h
internal/ceres/dense_jacobian_writer.h
internal/ceres/dense_normal_cholesky_solver.cc
internal/ceres/dense_normal_cholesky_solver.h
internal/ceres/dense_qr.cc
internal/ceres/dense_qr.h
internal/ceres/dense_qr_solver.cc
internal/ceres/dense_qr_solver.h
internal/ceres/dense_sparse_matrix.cc
internal/ceres/dense_sparse_matrix.h
internal/ceres/detect_structure.cc
internal/ceres/detect_structure.h
internal/ceres/dogleg_strategy.cc
internal/ceres/dogleg_strategy.h
internal/ceres/dynamic_compressed_row_finalizer.h
internal/ceres/dynamic_compressed_row_jacobian_writer.cc
internal/ceres/dynamic_compressed_row_jacobian_writer.h
internal/ceres/dynamic_compressed_row_sparse_matrix.cc
internal/ceres/dynamic_compressed_row_sparse_matrix.h
internal/ceres/dynamic_sparse_normal_cholesky_solver.cc
internal/ceres/dynamic_sparse_normal_cholesky_solver.h
internal/ceres/eigensparse.cc
internal/ceres/eigensparse.h
internal/ceres/evaluation_callback.cc
internal/ceres/evaluator.cc
internal/ceres/evaluator.h
internal/ceres/execution_summary.h
internal/ceres/file.cc
internal/ceres/file.h
internal/ceres/first_order_function.cc
internal/ceres/float_cxsparse.cc
internal/ceres/float_cxsparse.h
internal/ceres/float_suitesparse.cc
internal/ceres/float_suitesparse.h
internal/ceres/function_sample.cc
internal/ceres/function_sample.h
internal/ceres/gradient_checker.cc
internal/ceres/gradient_checking_cost_function.cc
internal/ceres/gradient_checking_cost_function.h
internal/ceres/gradient_problem.cc
internal/ceres/gradient_problem_evaluator.h
internal/ceres/gradient_problem_solver.cc
internal/ceres/graph.h
internal/ceres/graph_algorithms.h
internal/ceres/implicit_schur_complement.cc
internal/ceres/implicit_schur_complement.h
internal/ceres/inner_product_computer.cc
internal/ceres/inner_product_computer.h
internal/ceres/invert_psd_matrix.h
internal/ceres/is_close.cc
internal/ceres/is_close.h
internal/ceres/iteration_callback.cc
internal/ceres/iterative_refiner.cc
internal/ceres/iterative_refiner.h
internal/ceres/iterative_schur_complement_solver.cc
internal/ceres/iterative_schur_complement_solver.h
internal/ceres/levenberg_marquardt_strategy.cc
internal/ceres/levenberg_marquardt_strategy.h
internal/ceres/line_search.cc
internal/ceres/line_search.h
internal/ceres/line_search_direction.cc
internal/ceres/line_search_direction.h
internal/ceres/line_search_minimizer.cc
internal/ceres/line_search_minimizer.h
internal/ceres/line_search_preprocessor.cc
internal/ceres/line_search_preprocessor.h
internal/ceres/linear_least_squares_problems.cc
internal/ceres/linear_least_squares_problems.h
internal/ceres/linear_operator.cc
internal/ceres/linear_operator.h
internal/ceres/linear_solver.cc
internal/ceres/linear_solver.h
internal/ceres/local_parameterization.cc
internal/ceres/loss_function.cc
internal/ceres/low_rank_inverse_hessian.cc
internal/ceres/low_rank_inverse_hessian.h
internal/ceres/manifold.cc
internal/ceres/manifold_adapter.h
internal/ceres/map_util.h
internal/ceres/minimizer.cc
internal/ceres/minimizer.h
internal/ceres/normal_prior.cc
internal/ceres/pair_hash.h
internal/ceres/parallel_for.h
internal/ceres/parallel_for_cxx.cc
internal/ceres/parallel_for_nothreads.cc
internal/ceres/parallel_for_openmp.cc
internal/ceres/parallel_utils.cc
internal/ceres/parallel_utils.h
internal/ceres/parameter_block.h
internal/ceres/parameter_block_ordering.cc
internal/ceres/parameter_block_ordering.h
internal/ceres/partitioned_matrix_view.cc
internal/ceres/partitioned_matrix_view.h
internal/ceres/partitioned_matrix_view_impl.h
internal/ceres/polynomial.cc
internal/ceres/polynomial.h
internal/ceres/preconditioner.cc
internal/ceres/preconditioner.h
internal/ceres/preprocessor.cc
internal/ceres/preprocessor.h
internal/ceres/problem.cc
internal/ceres/problem_impl.cc
internal/ceres/problem_impl.h
internal/ceres/program.cc
internal/ceres/program.h
internal/ceres/program_evaluator.h
internal/ceres/random.h
internal/ceres/reorder_program.cc
internal/ceres/reorder_program.h
internal/ceres/residual_block.cc
internal/ceres/residual_block.h
internal/ceres/residual_block_utils.cc
internal/ceres/residual_block_utils.h
internal/ceres/schur_complement_solver.cc
internal/ceres/schur_complement_solver.h
internal/ceres/schur_eliminator.cc
internal/ceres/schur_eliminator.h
internal/ceres/schur_eliminator_impl.h
internal/ceres/schur_jacobi_preconditioner.cc
internal/ceres/schur_jacobi_preconditioner.h
internal/ceres/schur_templates.cc
internal/ceres/schur_templates.h
internal/ceres/scoped_thread_token.h
internal/ceres/scratch_evaluate_preparer.cc
internal/ceres/scratch_evaluate_preparer.h
internal/ceres/single_linkage_clustering.cc
internal/ceres/single_linkage_clustering.h
internal/ceres/small_blas.h
internal/ceres/small_blas_generic.h
internal/ceres/solver.cc
internal/ceres/solver_utils.cc
internal/ceres/solver_utils.h
internal/ceres/sparse_cholesky.cc
internal/ceres/sparse_cholesky.h
internal/ceres/sparse_matrix.cc
internal/ceres/sparse_matrix.h
internal/ceres/sparse_normal_cholesky_solver.cc
internal/ceres/sparse_normal_cholesky_solver.h
internal/ceres/stl_util.h
internal/ceres/stringprintf.cc
internal/ceres/stringprintf.h
internal/ceres/subset_preconditioner.cc
internal/ceres/subset_preconditioner.h
internal/ceres/suitesparse.cc
internal/ceres/suitesparse.h
internal/ceres/thread_pool.cc
internal/ceres/thread_pool.h
internal/ceres/thread_token_provider.cc
internal/ceres/thread_token_provider.h
internal/ceres/triplet_sparse_matrix.cc
internal/ceres/triplet_sparse_matrix.h
internal/ceres/trust_region_minimizer.cc
internal/ceres/trust_region_minimizer.h
internal/ceres/trust_region_preprocessor.cc
internal/ceres/trust_region_preprocessor.h
internal/ceres/trust_region_step_evaluator.cc
internal/ceres/trust_region_step_evaluator.h
internal/ceres/trust_region_strategy.cc
internal/ceres/trust_region_strategy.h
internal/ceres/types.cc
internal/ceres/visibility.cc
internal/ceres/visibility.h
internal/ceres/visibility_based_preconditioner.cc
internal/ceres/visibility_based_preconditioner.h
internal/ceres/wall_time.cc
internal/ceres/wall_time.h
internal/ceres/generated/partitioned_matrix_view_d_d_d.cc
internal/ceres/generated/schur_eliminator_d_d_d.cc
)
set(LIB
@ -302,48 +314,48 @@ set(LIB
if(WITH_LIBMV_SCHUR_SPECIALIZATIONS)
list(APPEND SRC
internal/ceres/generated/partitioned_matrix_view_2_2_2.cc
internal/ceres/generated/partitioned_matrix_view_2_2_3.cc
internal/ceres/generated/partitioned_matrix_view_2_2_4.cc
internal/ceres/generated/partitioned_matrix_view_2_2_d.cc
internal/ceres/generated/partitioned_matrix_view_2_3_3.cc
internal/ceres/generated/partitioned_matrix_view_2_3_4.cc
internal/ceres/generated/partitioned_matrix_view_2_3_6.cc
internal/ceres/generated/partitioned_matrix_view_2_3_9.cc
internal/ceres/generated/partitioned_matrix_view_2_3_d.cc
internal/ceres/generated/partitioned_matrix_view_2_4_3.cc
internal/ceres/generated/partitioned_matrix_view_2_4_4.cc
internal/ceres/generated/partitioned_matrix_view_2_4_6.cc
internal/ceres/generated/partitioned_matrix_view_2_4_8.cc
internal/ceres/generated/partitioned_matrix_view_2_4_9.cc
internal/ceres/generated/partitioned_matrix_view_2_4_d.cc
internal/ceres/generated/partitioned_matrix_view_2_d_d.cc
internal/ceres/generated/partitioned_matrix_view_3_3_3.cc
internal/ceres/generated/partitioned_matrix_view_4_4_2.cc
internal/ceres/generated/partitioned_matrix_view_4_4_3.cc
internal/ceres/generated/partitioned_matrix_view_4_4_4.cc
internal/ceres/generated/partitioned_matrix_view_4_4_d.cc
internal/ceres/generated/schur_eliminator_2_2_2.cc
internal/ceres/generated/schur_eliminator_2_2_3.cc
internal/ceres/generated/schur_eliminator_2_2_4.cc
internal/ceres/generated/schur_eliminator_2_2_d.cc
internal/ceres/generated/schur_eliminator_2_3_3.cc
internal/ceres/generated/schur_eliminator_2_3_4.cc
internal/ceres/generated/schur_eliminator_2_3_6.cc
internal/ceres/generated/schur_eliminator_2_3_9.cc
internal/ceres/generated/schur_eliminator_2_3_d.cc
internal/ceres/generated/schur_eliminator_2_4_3.cc
internal/ceres/generated/schur_eliminator_2_4_4.cc
internal/ceres/generated/schur_eliminator_2_4_6.cc
internal/ceres/generated/schur_eliminator_2_4_8.cc
internal/ceres/generated/schur_eliminator_2_4_9.cc
internal/ceres/generated/schur_eliminator_2_4_d.cc
internal/ceres/generated/schur_eliminator_2_d_d.cc
internal/ceres/generated/schur_eliminator_3_3_3.cc
internal/ceres/generated/schur_eliminator_4_4_2.cc
internal/ceres/generated/schur_eliminator_4_4_3.cc
internal/ceres/generated/schur_eliminator_4_4_4.cc
internal/ceres/generated/schur_eliminator_4_4_d.cc
internal/ceres/generated/partitioned_matrix_view_2_2_2.cc
internal/ceres/generated/partitioned_matrix_view_2_2_3.cc
internal/ceres/generated/partitioned_matrix_view_2_2_4.cc
internal/ceres/generated/partitioned_matrix_view_2_2_d.cc
internal/ceres/generated/partitioned_matrix_view_2_3_3.cc
internal/ceres/generated/partitioned_matrix_view_2_3_4.cc
internal/ceres/generated/partitioned_matrix_view_2_3_6.cc
internal/ceres/generated/partitioned_matrix_view_2_3_9.cc
internal/ceres/generated/partitioned_matrix_view_2_3_d.cc
internal/ceres/generated/partitioned_matrix_view_2_4_3.cc
internal/ceres/generated/partitioned_matrix_view_2_4_4.cc
internal/ceres/generated/partitioned_matrix_view_2_4_6.cc
internal/ceres/generated/partitioned_matrix_view_2_4_8.cc
internal/ceres/generated/partitioned_matrix_view_2_4_9.cc
internal/ceres/generated/partitioned_matrix_view_2_4_d.cc
internal/ceres/generated/partitioned_matrix_view_2_d_d.cc
internal/ceres/generated/partitioned_matrix_view_3_3_3.cc
internal/ceres/generated/partitioned_matrix_view_4_4_2.cc
internal/ceres/generated/partitioned_matrix_view_4_4_3.cc
internal/ceres/generated/partitioned_matrix_view_4_4_4.cc
internal/ceres/generated/partitioned_matrix_view_4_4_d.cc
internal/ceres/generated/schur_eliminator_2_2_2.cc
internal/ceres/generated/schur_eliminator_2_2_3.cc
internal/ceres/generated/schur_eliminator_2_2_4.cc
internal/ceres/generated/schur_eliminator_2_2_d.cc
internal/ceres/generated/schur_eliminator_2_3_3.cc
internal/ceres/generated/schur_eliminator_2_3_4.cc
internal/ceres/generated/schur_eliminator_2_3_6.cc
internal/ceres/generated/schur_eliminator_2_3_9.cc
internal/ceres/generated/schur_eliminator_2_3_d.cc
internal/ceres/generated/schur_eliminator_2_4_3.cc
internal/ceres/generated/schur_eliminator_2_4_4.cc
internal/ceres/generated/schur_eliminator_2_4_6.cc
internal/ceres/generated/schur_eliminator_2_4_8.cc
internal/ceres/generated/schur_eliminator_2_4_9.cc
internal/ceres/generated/schur_eliminator_2_4_d.cc
internal/ceres/generated/schur_eliminator_2_d_d.cc
internal/ceres/generated/schur_eliminator_3_3_3.cc
internal/ceres/generated/schur_eliminator_4_4_2.cc
internal/ceres/generated/schur_eliminator_4_4_3.cc
internal/ceres/generated/schur_eliminator_4_4_4.cc
internal/ceres/generated/schur_eliminator_4_4_d.cc
)
else()
add_definitions(-DCERES_RESTRICT_SCHUR_SPECIALIZATION)
@ -351,16 +363,5 @@ endif()
add_definitions(${GFLAGS_DEFINES})
add_definitions(${GLOG_DEFINES})
add_definitions(${CERES_DEFINES})
add_definitions(
-DCERES_HAVE_PTHREAD
-DCERES_NO_SUITESPARSE
-DCERES_NO_CXSPARSE
-DCERES_NO_LAPACK
-DCERES_NO_ACCELERATE_SPARSE
-DCERES_HAVE_RWLOCK
-DCERES_USE_CXX_THREADS
)
blender_add_lib(extern_ceres "${SRC}" "${INC}" "${INC_SYS}" "${LIB}")

596
extern/ceres/ChangeLog vendored
View File

@ -1,596 +0,0 @@
commit 399cda773035d99eaf1f4a129a666b3c4df9d1b1
Author: Alex Stewart <alexs.mac@gmail.com>
Date: Fri Oct 23 19:36:08 2020 +0100
Update build documentation to reflect detection of Eigen via config mode
Change-Id: I18d5f0fc1eb51ea630164c911d935e9bffea35ce
commit bb127272f9b57672bca48424f2d83bc430a46eb8
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Mon Oct 19 09:28:34 2020 -0700
Fix typos.
Contributed by Ishamis@, IanBoyanZhang@, gkrobner@ & mithunjacob@.
Change-Id: Iab3c19a07a6f3db2486e3557dcb55bfe5de2aee5
commit a0ec5c32af5c5f5a52168dc2748be910dba14810
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Sun Oct 18 15:20:36 2020 -0700
Update version history for 2.0.0RC2
Change-Id: I75b7515fbf9880bd8eaea6ecd5e72ce1ae4a3a86
commit 3f6d2736769044e7c08c873c41a184849eea73ab
Author: Taylor Braun-Jones <taylor@braun-jones.org>
Date: Tue Jan 28 12:09:30 2020 -0500
Unify symbol visibility configuration for all compilers
This makes it possible to build unit tests with shared libraries on MSVC.
Change-Id: I1db66a80b2c78c4f3d354e35235244d17bac9809
commit 29c2912ee635c77f3ddf2e382a5d6a9cf9805a3d
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Tue Oct 13 12:07:06 2020 -0700
Unbreak the bazel build some more
Change-Id: I6bbf3df977a473b9b5e16a9e59da5f535f8cdc24
commit bf47e1a36829f62697b930241d0a353932f34090
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Tue Oct 13 10:00:22 2020 -0700
Fix the Bazel build.
1. Fix the path to eigen, now that it uses gitlab instead of bitbucket.
2. Remove an unrecognized compiler option.
3. Remove an obsolete benchmark.
This CL only unbreaks the build, it is likely that it is still not
at par with the cmake build.
https://github.com/ceres-solver/ceres-solver/issues/628
Change-Id: I470209cbb48b6a4f499564a86b52436e0c8d98ef
commit 600e8c529ebbb4bb89d5baefa3d5ab6ad923706a
Author: Nikolaus Demmel <nikolaus@nikolaus-demmel.de>
Date: Mon Oct 12 23:00:39 2020 +0200
fix minor typos
all timing values in the summary are initialized to -1, so the one
+1 is likely an oversight.
Change-Id: Ie355f3b7da08a56d49d19ca9a5bc48fe5581dee3
commit bdcdcc78af61a0cb85317ebee52dc804bf4ea975
Author: Nikolaus Demmel <nikolaus@nikolaus-demmel.de>
Date: Mon Sep 7 01:48:50 2020 +0200
update docs for changed cmake usage
- update links to cmake docs to version 3.5
- highlight difference between dependencies with and without custom
find modules
- point out removal of CERES_INCLUDE_DIRS
- point out that TBB might be linked if SuiteSparseQR is found
- added 'Migration' section
- fixed typos
Change-Id: Icbcc0e723d11f12246fb3cf09b9d7c6206195a82
commit 3f69e5b36a49b44344e96a26b39693a914ba80c6
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Mon Oct 12 11:46:40 2020 -0700
Corrections from William Rucklidge
Change-Id: I0b5d4808be48f68df7829c70ec93ffa67d81315d
commit 8bfdb02fb18551bbd5f222c5472e45eddecd42b9
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Mon Oct 12 10:07:13 2020 -0700
Rewrite uses of VLOG_IF and LOG_IF.
VLOG_IF's evaluation order is ambiguous - does it mean
`if (cond) VLOG(lvl)` or `if (VLOG_IS_ON(lvl) && cond) LOG(INFO)`?
In particular, the way it works now is inconsistent with the way the
rest of the LOG macros evaluate their arguments.
Fixing this would be hard, and the macro's behavior would still surprise
some people. Replacing it with an if statement is simple, clear, and unambiguous.
Change-Id: I97a92d17a932c0a5344a1bf98d676308793ba877
commit d1b35ffc161fd857c7c433574ca82aa9b2db7f98
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Mon Oct 12 10:58:05 2020 -0700
Corrections from William Rucklidge
Change-Id: Ifb50e87aa915d00f9861fe1a6da0acee11bc0a94
commit f34e80e91f600014a3030915cf9ea28bcbc576e7
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Thu Oct 8 12:34:53 2020 -0700
Add dividers between licenses.
Change-Id: I4e4aaa15e0621c5648550cfa622fe0a79f1f4f9f
commit 65c397daeca77da53d16e73720b9a17edd6757ab
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Wed Oct 7 14:34:52 2020 -0700
Fix formatting
Change-Id: Ib4ca8a097059dbb8d2f3a6a888222c0188cb126e
commit f63b1fea9cfa48ae4530c327b10efa4985e69631
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Wed Oct 7 14:30:54 2020 -0700
Add the MIT license text corresponding to the libmv derived files.
Change-Id: Ie72fb45ae96a7892c00411eee6873db7f0e365a8
commit 542613c13d8b7469822aff5eec076f2cad4507ec
Author: Nikolaus Demmel <nikolaus@nikolaus-demmel.de>
Date: Tue Oct 6 22:48:59 2020 +0200
minor formatting fix for trust_region_minimizer.cc
Change-Id: I18ba27825fc23dd0e9e3e15dc13fc0833db01b5b
commit 6d9e9843d8c61cfb04cc55b9def9518f823a592a
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Mon Sep 28 11:35:37 2020 -0700
Remove inclusion of ceres/eigen.h
The initial reason for this is because of a previous reformatting CL
triggered a macro redefinition warning in the schur eliminator. But
actually it was worse because the reordering had caused the macro
definition to be ignored and caused a performance regression.
This simplifies the generated files, fixes some formatting errors
and recovers the performance.
Change-Id: I9dbeffc38743b3f24b25843feec2e26a73188413
commit eafeca5dcb7af8688d40a9c14b0d2fcb856c96fc
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Mon Sep 28 11:12:59 2020 -0700
Fix a logging bug in TrustRegionMinimizer.
Upon encountering an unsuccessful step (one where the cost goes up)
the the trust region minimizer failed to populate the gradient norm
in the IterationSummary. This would cause the gradient norm to be
logged as zero which is incorrect. Instead it should be the gradient
norm at the current point.
This CL fixes this issue.
Before:
iter cost cost_change |gradient| |step| tr_ratio tr_radius ls_iter iter_time total_time
0 1.115206e+07 0.00e+00 1.90e+07 0.00e+00 0.00e+00 1.00e+04 0 2.72e-01 1.33e+00
1 3.687552e+06 7.46e+06 1.84e+08 2.86e+03 6.91e-01 1.06e+04 1 1.32e+00 2.65e+00
2 3.670266e+10 -3.67e+10 0.00e+00 3.27e+03 -1.07e+04 5.30e+03 1 7.52e-01 3.40e+00
3 4.335397e+07 -3.97e+07 0.00e+00 2.74e+03 -1.16e+01 1.32e+03 1 7.28e-01 4.13e+00
4 1.345488e+06 2.34e+06 4.12e+07 1.55e+03 6.87e-01 1.40e+03 1 9.31e-01 5.06e+00
5 5.376653e+05 8.08e+05 9.99e+06 6.64e+02 7.46e-01 1.59e+03 1 9.64e-01 6.03e+00
After:
iter cost cost_change |gradient| |step| tr_ratio tr_radius ls_iter iter_time total_time
0 1.115206e+07 0.00e+00 1.90e+07 0.00e+00 0.00e+00 1.00e+04 0 2.37e-01 1.13e+00
1 3.687552e+06 7.46e+06 1.84e+08 2.86e+03 6.91e-01 1.06e+04 1 1.08e+00 2.21e+00
2 3.670266e+10 -3.67e+10 1.84e+08 3.27e+03 -1.07e+04 5.30e+03 1 7.50e-01 2.96e+00
3 4.335397e+07 -3.97e+07 1.84e+08 2.74e+03 -1.16e+01 1.32e+03 1 7.13e-01 3.67e+00
4 1.345488e+06 2.34e+06 4.12e+07 1.55e+03 6.87e-01 1.40e+03 1 9.01e-01 4.57e+00
5 5.376653e+05 8.08e+05 9.99e+06 6.64e+02 7.46e-01 1.59e+03 1 9.36e-01 5.51e+00
Change-Id: Iae538fe089be07c7bb219337a6f1392f7213acfe
commit 1fd0be916dd4ff4241bd52264b9e9170bc7e4339
Author: Alex Stewart <alexs.mac@gmail.com>
Date: Mon Sep 28 18:54:33 2020 +0100
Fix default initialisation of IterationCallback::cost
Change-Id: I9f529093fc09424c90dbff8e9648b90b16990623
commit 137bbe845577929a87f8eef979196df6a8b30ee4
Author: Nikolaus Demmel <nikolaus@nikolaus-demmel.de>
Date: Mon Sep 28 02:17:32 2020 +0200
add info about clang-format to contributing docs
Change-Id: I2f4dcbda2e4f36096df217d76de370103ffaa43e
commit d3f66d77f45482b90d01af47938289c32dd2cc08
Author: Nikolaus Demmel <nikolaus@nikolaus-demmel.de>
Date: Mon Sep 28 02:01:43 2020 +0200
fix formatting generated files (best effort)
- update file generator scripts / templates so generated files adhere
to clang-format
- A few exceptions are not fixed, where the file generation results in
lines of different width. To properly fix this would make the code
more complicated and it's not that important for generated files
anyway.
- note that generated files are excluded in ./scripts/format_all.sh
Change-Id: I4f42c83d1fec01242eada5e7ce6c1a5192234d37
commit a9c7361c8dc1d37e78d216754a4c03e8a8f1e74f
Author: Nikolaus Demmel <nikolaus@nikolaus-demmel.de>
Date: Mon Sep 28 02:14:29 2020 +0200
minor formatting fix (wrongly updated in earlier commit)
Change-Id: I544635fd936cb5b7f7bd9255876641cd5a9590c6
commit 7b8f675bfdb1d924af6a2dcc1f79bda5ace7e886
Author: Nikolaus Demmel <nikolaus@nikolaus-demmel.de>
Date: Sun Sep 20 21:45:24 2020 +0200
fix formatting for (non-generated) internal source files
- Change formatting standard to Cpp11. Main difference is not having
the space between two closing >> for nested templates. We don't
choose c++14, because older versions of clang-format (version 9
and earlier) don't know this value yet, and it doesn't make a
difference in the formatting.
- Apply clang-format to all (non generated) internal source files.
- Manually fix some code sections (clang-format on/off) and c-strings
- Exclude some embedded external files with very different formatting
(gtest/gmock)
- Add script to format all source files
Change-Id: Ic6cea41575ad6e37c9e136dbce176b0d505dc44d
commit 921368ce31c42ee793cf131860abba291a7e39ad
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Wed Sep 9 09:15:37 2020 -0700
Fix a number of typos in covariance.h
Also some minor cleanups in covariance_impl.h
Thanks to Lorenzo Lamia for pointing these out.
Change-Id: Icb4012a367fdd1f249bc1e7019e0114c868e45b6
commit 7b6b2491cc1be0b3abb67338366d8d69bef3a402
Author: Nikolaus Demmel <nikolaus@nikolaus-demmel.de>
Date: Tue Sep 8 17:51:32 2020 +0200
fix formatting for examples
This is mostly just applying the existing clang format config, except:
- Use NOLINT on overlong comment lines.
- Wrap some sections in 'clang-format off' / 'clang format on'.
- Manually split or join some multi-line strings.
Change-Id: Ia1a40eeb92112e12c3a169309afe087af55b2f4f
commit 82275d8a4eac4fc0bd07e17c3a41a6e429e72bfb
Author: Nikolaus Demmel <nikolaus@nikolaus-demmel.de>
Date: Tue Sep 8 02:00:21 2020 +0200
some fixes for Linux and macOS install docs
Linux:
- Remove workaround for Ubuntu 14.04, which is EOL. libsuitesparse-dev
seems to come with a shared library on 16.04 and later, so linking
to a shared build of ceres doesn't seem to be an issue any more.
- Add missing libgflags-dev.
macOS:
- OS X is now called macOS.
- Update homebrew link.
- Mac homebrew the preferred method of installation.
- Fix OpenMP instructions.
- Remove reference to homebrew/science. Everything is in core.
- Add missing gflags.
Change-Id: I633b3c7ea84a87886bfd823f8187fdd0a84737c9
commit 9d762d74f06b946bbd2f098de7216032d0e7b51d
Author: Nikolaus Demmel <nikolaus@nikolaus-demmel.de>
Date: Sun Sep 6 21:04:24 2020 +0200
fix formatting for public header files
- ensure all public headers files adhere to clang-format
- preserve one-per-line for enums by adding trailing comma
- preserve include order for en/disable_warning.h
Change-Id: I78dbd0527a294ab2ec5f074fb426e48b20c393e6
commit c76478c4898f3af11a6a826ac89c261205f4dd96
Author: Nikolaus Demmel <nikolaus@nikolaus-demmel.de>
Date: Sun Sep 6 23:29:56 2020 +0200
gitignore *.pyc
Change-Id: Ic6238a617a3c7ce92df7dcefcc44bae20c32b30b
commit 4e69a475cd7d7cbed983f5aebf79ae13a46e5415
Author: Alex Stewart <alexs.mac@gmail.com>
Date: Tue Sep 1 10:15:23 2020 +0100
Fix potential for mismatched release/debug TBB libraries
- Protect against the case when the user has multiple installs of TBB
in their search paths and the first install does not contain debug
libraries. In this case it is possible to get mismatched versions
of TBB inserted into TBB_LIBRARIES.
- Also suppresses warning about use of TBB_ROOT on modern versions of
CMake due to CMP0074.
Change-Id: I2eaafdde4a028cbf6c500c63771973d85bc4723d
commit 8e1d8e32ad0d28c0d4d1d7b2b1ce7fc01d90b7b0
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Thu Sep 3 10:49:20 2020 -0700
A number of small changes.
1. Add a move constructor to NumericDiffCostFunction, DynamicAutoDiffCostfunction
and DynamicNumericDiffCostFunction.
2. Add optional ownership of the underlying functor.
3. Update docs to reflect this as well as the variadic templates that allow an
arbitrary number of parameter blocks.
Change-Id: I57bbb51fb9e75f36ec2a661b603beda270f30a19
commit 368a738e5281039f19587545806b7bc6f35e78f9
Author: Julian Kent <jkflying@gmail.com>
Date: Thu May 7 12:54:35 2020 +0200
AutoDiffCostFunction: optional ownership
Add Ownership semantics to the AutoDiffCostFunction
This allows several benefits, such as pointer ordering always being the
same for numerical repeatability (due to blocks being ordered by
pointer address), memory adjacency for better cache performance, and
reduced allocator pressure / overhead.
This is then made use of in libmv by preallocating the errors and
cost functions into vectors
Change-Id: Ia5b97e7249b55a463264b6e26f7a02291927c9f2
commit 8cbd721c199c69f127af6ef7c187ddf7e8f116f9
Author: Morten Hannemose <morten@hannemose.dk>
Date: Thu Sep 3 17:54:20 2020 +0200
Add erf and erfc to jet.h, including tests in jet_test.cc
erf is necessary for evaluating Gaussian functions.
erfc was added because it is so similar to erf.
Change-Id: I5e470dbe013cc938fabb87cde3b0ebf26a90fff4
commit 31366cff299cf2a8d97b43a7533d953ff28fdc29
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Tue Sep 1 09:23:34 2020 -0700
Benchmarks for dynamic autodiff.
This patch is from Clement Courbet. courbet@google.com
Change-Id: I886390663644733bfa5b7b52b0c883079e793726
commit 29fb08aeae1ce691851724af7209fea6127523a9
Author: Alex Stewart <alexs.mac@gmail.com>
Date: Tue Sep 1 10:23:31 2020 +0100
Use CMAKE_PREFIX_PATH to pass Homebrew install location
- Passing HINTS disables the MODULE mode of find_package() which
precludes users from creating their own find modules to provide
Ceres' dependencies.
Change-Id: I6f2edf429331d13fe67bf61ac4b79d17579d9a57
commit 242c703b501ffd64d645f4016d63c8b41c381038
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Tue Aug 4 21:02:11 2020 -0700
Minor fixes to the documentation
Change-Id: I65e6f648d963b8aa640078684ce02dcde6acb87d
commit 79bbf95103672fa4b5485e055ff7692ee4a1f9da
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Tue Aug 4 18:26:02 2020 -0700
Add changelog for 2.0.0
Change-Id: I8acad62bfe629454ae5032732693e43fe37b97ff
commit 41d05f13d0ffb230d7a5a9d67ed31b0cfb35d669
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Tue Aug 4 14:56:33 2020 -0700
Fix lint errors in evaluation_callback_test.cc
Change-Id: I63eb069544ad0d8f495490fe4caa07b9f04f7ec2
commit 4b67903c1f96037048c83a723028c5d0991c09cf
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Tue Aug 4 14:40:50 2020 -0700
Remove unused variables from problem_test.cc
Change-Id: Ia1a13cfc6e462f6d249dcbf169ad34831dd93ec2
commit 10449fc3664c96d4b5454c092195432df79412f8
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Tue Aug 4 14:30:25 2020 -0700
Add Apache license to the LICENSE file for FixedArray
FixedArray implementation comes from ABSL which is Apache
licensed.
Change-Id: I566dbe9d236814c95945732c6347d3bf7b508283
commit 8c3ecec6db26d7a66f5de8dc654475ec7aa0df14
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Tue May 26 04:44:11 2020 -0700
Fix some minor errors in IterationCallback docs
Change-Id: Id3d7f21a523ff8466868cdec542921c566bbbfa9
commit 7d3ffcb4234632dc51ee84c8a509d9428263070b
Author: Alex Stewart <alexs.mac@gmail.com>
Date: Sun Jul 26 19:42:16 2020 +0100
Remove forced CONFIG from find_package(Eigen3)
- Ceres will fail to configure if Eigen3::Eigen target is not found, and
the minimum required Eigen version specified (3.3) exports Eigen as
a CMake package and this is reflected in the default Ubuntu 18.04
packages.
- This permits users to specify their own Eigen3 detection should they
choose to do so, but they must do so via an imported target.
Change-Id: I5edff117c8001770004f49012ac1ae63b66ec9c1
commit a029fc0f93817f20b387b707bc578dc1f1a269ae
Author: Alex Stewart <alexs.mac@gmail.com>
Date: Sun Jul 26 18:44:59 2020 +0100
Use latest FindTBB.cmake from VTK project
- Retrieved from [1], SHA: 0d9bbf9beb97f8f696c43a9edf1e52c082b3639b on
2020-07-26
- [1]: https://gitlab.kitware.com/vtk/vtk/blob/master/CMake/FindTBB.cmake
Change-Id: I953a8c87802a974d30ccc7c80f5229683826efbd
commit aa1abbc578797c6b17ee7221db31535dc249ae66
Author: Alex Stewart <alexs.mac@gmail.com>
Date: Sun Jul 26 19:57:31 2020 +0100
Replace use of GFLAGS_LIBRARIES with export gflags target
- As our minimum required version of gflags (2.2) exports itself as
a CMake package and this is the case for the default 18.04 package
we can use the gflags target directly.
- Replaces forced use of CONFIG in find_package(gflags) with a check
that the gflags imported target exists to avoid ambiguity with
libgflags if installed in a default location. This permits users to
override the gflags detection should they so choose, provided that
they do so via an imported target.
- Also removes some previously removed legacy GLAGS_ vars from the
installation docs.
Change-Id: I015f5a751e5b22f956bbf9df692e63a6825c9f0d
commit db2af1be8780bbe88944775400baa2dbd3592b7d
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Mon Aug 3 04:57:08 2020 -0700
Add Problem::EvaluateResidualBlockAssumingParametersUnchanged
Simplify the semantics for Problem::EvaluateResidualBlock to
not ignore the presence of EvaluationCallback and add another method
EvaluateResidualBlockAssumingParametersUnchanged to handle the case
where the user has an EvaluationCallback but knows that the parameter
blocks do not change between calls.
Updated the documentation for the methods and EvaluationCallback to
reflect these semantics.
Also added tests for Evaluation related methods calling i
EvaluationCallback when its present.
https://github.com/ceres-solver/ceres-solver/issues/483
Change-Id: If0a0c95c2f1f92e9183a90df240104a69a71c46d
commit ab4ed32cda004befd29a0b4b02f1d907e0c4dab7
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Mon Aug 3 04:17:33 2020 -0700
Replace NULL with nullptr in the documentation.
Change-Id: I995f68770e2a4b6027c0a1d3edf5eb5132b081d7
commit ee280e27a6140295ef6258d24c92305628f3d508
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Fri Jul 31 16:48:06 2020 -0700
Allow SubsetParameterization to accept an empty vector of constant parameters.
Thanks to Frédéric Devernay for reporting this and providing an initial fix.
Change-Id: Id86a2051ab7841ecafdcfb00f4634b353a7ef3b4
commit 4b8c731d8a4f3fda53c642ff14a25fab6c233918
Author: Sameer Agarwal <sameeragarwal@google.com>
Date: Fri Jul 31 10:05:52 2020 -0700
Fix a bug in DynamicAutoDiffCostFunction
DynamicAutoDiffCostFunction::Evaluate when provided with a jacobians
array that was non-empty but all its entries are nullptr, would
compute num_active_parameters = 0, and then skip over all the loops
that evaluated the CostFunctor.
The fix is to check if num_active_parameters == 0, and then treat
it as the case where jacobians array is null.
Thanks to Ky Waegel for reporting and providing a reproduction for this.
Change-Id: Ib86930c2c3f722724d249f662bf88238679bbf98
commit 5cb5b35a930c1702278083c75769dbb4e5801045
Author: Alex Stewart <alexs.mac@gmail.com>
Date: Sun Jul 26 20:42:12 2020 +0100
Fixed incorrect argument name in RotationMatrixToQuaternion()
- Raised as: https://github.com/ceres-solver/ceres-solver/pull/607 by
Frank Dellaert
Change-Id: Id3e9f190e814cf18206e2f8c3b1b67b995c21dd5
commit e39d9ed1d60dfeb58dd2a0df4622c683f87b28e3
Author: Carl Dehlin <carl@dehlin.com>
Date: Tue Jun 16 09:02:05 2020 +0200
Add a missing term and remove a superfluous word
Change-Id: I25f40f0bf241302b975e6fc14690aa863c0728b0
commit 27cab77b699a1a2b5354820c57a91c92eaeb21e3
Author: Carl Dehlin <carl@dehlin.com>
Date: Mon Jun 15 20:01:18 2020 +0200
Reformulate some sentences
Change-Id: I4841aa8e8522008dd816261d9ad98e5fb8ad1758
commit 8ac6655ce85a4462f2882fcb9e9118a7057ebe09
Author: Carl Dehlin <carl@dehlin.com>
Date: Mon Jun 15 19:10:12 2020 +0200
Fix documentation formatting issues
Change-Id: Iea3a6e75dc3a7376eda866ab24e535a6df84f8ea

234
extern/ceres/LICENSE vendored
View File

@ -1,6 +1,6 @@
Ceres Solver - A fast non-linear least squares minimizer
Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
http://code.google.com/p/ceres-solver/
Copyright 2015 Google Inc. All rights reserved.
http://ceres-solver.org/
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
@ -25,3 +25,233 @@ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
----------------------------------------------------------------------------------------------------------------------------
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-------------------------------------------------------------------------------------------------------------------------------
Some of the code in the examples directory derives from libmv, which is
distributed under the MIT license as described below
Copyright (c) 2007-2011 libmv authors.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.

3
extern/ceres/README vendored
View File

@ -1,3 +0,0 @@
Ceres Solver - A non-linear least squares minimizer
==================================================
Please see ceres.pdf in docs/ for a tutorial and reference.

View File

@ -1,5 +1,5 @@
Project: Ceres Solver
URL: http://ceres-solver.org/
License: BSD 3-Clause
Upstream version 2.0.0
Upstream version 2.1.0
Local modifications: None

18
extern/ceres/README.md vendored Normal file
View File

@ -0,0 +1,18 @@
[![Android](https://github.com/ceres-solver/ceres-solver/actions/workflows/android.yml/badge.svg)](https://github.com/ceres-solver/ceres-solver/actions/workflows/android.yml)
[![Linux](https://github.com/ceres-solver/ceres-solver/actions/workflows/linux.yml/badge.svg)](https://github.com/ceres-solver/ceres-solver/actions/workflows/linux.yml)
[![macOS](https://github.com/ceres-solver/ceres-solver/actions/workflows/macos.yml/badge.svg)](https://github.com/ceres-solver/ceres-solver/actions/workflows/macos.yml)
[![Windows](https://github.com/ceres-solver/ceres-solver/actions/workflows/windows.yml/badge.svg)](https://github.com/ceres-solver/ceres-solver/actions/workflows/windows.yml)
Ceres Solver
============
Ceres Solver is an open source C++ library for modeling and solving
large, complicated optimization problems. It is a feature rich, mature
and performant library which has been used in production at Google
since 2010. Ceres Solver can solve two kinds of problems.
1. Non-linear Least Squares problems with bounds constraints.
2. General unconstrained optimization problems.
Please see [ceres-solver.org](http://ceres-solver.org/) for more
information.

165
extern/ceres/bundle.sh vendored
View File

@ -1,165 +0,0 @@
#!/bin/sh
if [ "x$1" = "x--i-really-know-what-im-doing" ] ; then
echo Proceeding as requested by command line ...
else
echo "*** Please run again with --i-really-know-what-im-doing ..."
exit 1
fi
repo="https://ceres-solver.googlesource.com/ceres-solver"
#branch="master"
tag="2.0.0"
tmp=`mktemp -d`
checkout="$tmp/ceres"
GIT="git --git-dir $tmp/ceres/.git --work-tree $checkout"
git clone $repo $checkout
if [ $branch != "master" ]; then
$GIT checkout -t remotes/origin/$branch
else
if [ "x$tag" != "x" ]; then
$GIT checkout $tag
fi
fi
$GIT log -n 50 > ChangeLog
for p in `cat ./patches/series`; do
echo "Applying patch $p..."
cat ./patches/$p | patch -d $tmp/ceres -p1
done
find include -type f -not -iwholename '*.svn*' -exec rm -rf {} \;
find internal -type f -not -iwholename '*.svn*' -exec rm -rf {} \;
cat "files.txt" | while read f; do
mkdir -p `dirname $f`
cp $tmp/ceres/$f $f
done
rm -rf $tmp
sources=`find ./include ./internal -type f -iname '*.cc' -or -iname '*.cpp' -or -iname '*.c' | sed -r 's/^\.\//\t/' | \
grep -v -E 'schur_eliminator_[0-9]_[0-9d]_[0-9d].cc' | \
grep -v -E 'partitioned_matrix_view_[0-9]_[0-9d]_[0-9d].cc' | sort -d`
generated_sources=`find ./include ./internal -type f -iname '*.cc' -or -iname '*.cpp' -or -iname '*.c' | sed -r 's/^\.\//\t\t/' | \
grep -E 'schur_eliminator_[0-9]_[0-9d]_[0-9d].cc|partitioned_matrix_view_[0-9]_[0-9d]_[0-9d].cc' | sort -d`
headers=`find ./include ./internal -type f -iname '*.h' | sed -r 's/^\.\//\t/' | sort -d`
src_dir=`find ./internal -type f -iname '*.cc' -exec dirname {} \; -or -iname '*.cpp' -exec dirname {} \; -or -iname '*.c' -exec dirname {} \; | sed -r 's/^\.\//\t/' | sort -d | uniq`
src=""
for x in $src_dir $src_third_dir; do
t=""
if test `echo "$x" | grep -c glog ` -eq 1; then
continue;
fi
if test `echo "$x" | grep -c generated` -eq 1; then
continue;
fi
if stat $x/*.cpp > /dev/null 2>&1; then
t="src += env.Glob('`echo $x'/*.cpp'`')"
fi
if stat $x/*.c > /dev/null 2>&1; then
if [ -z "$t" ]; then
t="src += env.Glob('`echo $x'/*.c'`')"
else
t="$t + env.Glob('`echo $x'/*.c'`')"
fi
fi
if stat $x/*.cc > /dev/null 2>&1; then
if [ -z "$t" ]; then
t="src += env.Glob('`echo $x'/*.cc'`')"
else
t="$t + env.Glob('`echo $x'/*.cc'`')"
fi
fi
if [ -z "$src" ]; then
src=$t
else
src=`echo "$src\n$t"`
fi
done
cat > CMakeLists.txt << EOF
# ***** BEGIN GPL LICENSE BLOCK *****
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# The Original Code is Copyright (C) 2012, Blender Foundation
# All rights reserved.
# ***** END GPL LICENSE BLOCK *****
# NOTE: This file is automatically generated by bundle.sh script
# If you're doing changes in this file, please update template
# in that script too
set(INC
.
include
internal
config
../gflags/src
)
set(INC_SYS
\${EIGEN3_INCLUDE_DIRS}
\${GFLAGS_INCLUDE_DIRS}
\${GLOG_INCLUDE_DIRS}
)
set(SRC
${sources}
${headers}
)
set(LIB
\${GLOG_LIBRARIES}
\${GFLAGS_LIBRARIES}
)
if(WITH_LIBMV_SCHUR_SPECIALIZATIONS)
list(APPEND SRC
${generated_sources}
)
else()
add_definitions(-DCERES_RESTRICT_SCHUR_SPECIALIZATION)
endif()
add_definitions(\${GFLAGS_DEFINES})
add_definitions(\${GLOG_DEFINES})
add_definitions(\${CERES_DEFINES})
add_definitions(
-DCERES_HAVE_PTHREAD
-DCERES_NO_SUITESPARSE
-DCERES_NO_CXSPARSE
-DCERES_NO_LAPACK
-DCERES_NO_ACCELERATE_SPARSE
-DCERES_HAVE_RWLOCK
-DCERES_USE_CXX_THREADS
)
blender_add_lib(extern_ceres "\${SRC}" "\${INC}" "\${INC_SYS}" "\${LIB}")
EOF

View File

@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@ -28,21 +28,98 @@
//
// Author: alexs.mac@gmail.com (Alex Stewart)
// Default (empty) configuration options for Ceres.
// Configuration options for Ceres.
//
// IMPORTANT: Most users of Ceres will not use this file, when
// compiling Ceres with CMake, CMake will configure a new
// config.h with the currently selected Ceres compile
// options in <BUILD_DIR>/config, which will be added to
// the include path for compilation, and installed with the
// public Ceres headers. However, for some users of Ceres
// who compile without CMake (Android), this file ensures
// that Ceres will compile, with the user either specifying
// manually the Ceres compile options, or passing them
// directly through the compiler.
// Do not edit this file, it was automatically configured by CMake when
// Ceres was compiled with the relevant configuration for the machine
// on which Ceres was compiled.
//
// Ceres Developers: All options should have the same name as their mapped
// CMake options, in the preconfigured version of this file
// all options should be enclosed in '@'.
#ifndef CERES_PUBLIC_INTERNAL_CONFIG_H_
#define CERES_PUBLIC_INTERNAL_CONFIG_H_
// If defined, use the LGPL code in Eigen.
#define CERES_USE_EIGEN_SPARSE
// If defined, Ceres was compiled without LAPACK.
#define CERES_NO_LAPACK
// If defined, Ceres was compiled without SuiteSparse.
#define CERES_NO_SUITESPARSE
// If defined, Ceres was compiled without CXSparse.
#define CERES_NO_CXSPARSE
// If defined, Ceres was compiled without CUDA.
#define CERES_NO_CUDA
// If defined, Ceres was compiled without Apple's Accelerate framework solvers.
#define CERES_NO_ACCELERATE_SPARSE
#if defined(CERES_NO_SUITESPARSE) && \
defined(CERES_NO_ACCELERATE_SPARSE) && \
defined(CERES_NO_CXSPARSE) && \
!defined(CERES_USE_EIGEN_SPARSE) // NOLINT
// If defined Ceres was compiled without any sparse linear algebra support.
#define CERES_NO_SPARSE
#endif
// If defined, Ceres was compiled without Schur specializations.
// #define CERES_RESTRICT_SCHUR_SPECIALIZATION
// If defined, Ceres was compiled to use Eigen instead of hardcoded BLAS
// routines.
// #define CERES_NO_CUSTOM_BLAS
// If defined, Ceres was compiled without multithreading support.
// #define CERES_NO_THREADS
// If defined Ceres was compiled with OpenMP multithreading.
// #define CERES_USE_OPENMP
// If defined Ceres was compiled with modern C++ multithreading.
#define CERES_USE_CXX_THREADS
// If defined, Ceres was compiled with a version MSVC >= 2005 which
// deprecated the standard POSIX names for bessel functions, replacing them
// with underscore prefixed versions (e.g. j0() -> _j0()).
#ifdef _MSC_VER
#define CERES_MSVC_USE_UNDERSCORE_PREFIXED_BESSEL_FUNCTIONS
#endif
#if defined(CERES_USE_OPENMP)
#if defined(CERES_USE_CXX_THREADS) || defined(CERES_NO_THREADS)
#error CERES_USE_OPENMP is mutually exclusive to CERES_USE_CXX_THREADS and CERES_NO_THREADS
#endif
#elif defined(CERES_USE_CXX_THREADS)
#if defined(CERES_USE_OPENMP) || defined(CERES_NO_THREADS)
#error CERES_USE_CXX_THREADS is mutually exclusive to CERES_USE_OPENMP, CERES_USE_CXX_THREADS and CERES_NO_THREADS
#endif
#elif defined(CERES_NO_THREADS)
#if defined(CERES_USE_OPENMP) || defined(CERES_USE_CXX_THREADS)
#error CERES_NO_THREADS is mutually exclusive to CERES_USE_OPENMP and CERES_USE_CXX_THREADS
#endif
#else
# error One of CERES_USE_OPENMP, CERES_USE_CXX_THREADS or CERES_NO_THREADS must be defined.
#endif
// CERES_NO_SPARSE should be automatically defined by config.h if Ceres was
// compiled without any sparse back-end. Verify that it has not subsequently
// been inconsistently redefined.
#if defined(CERES_NO_SPARSE)
#if !defined(CERES_NO_SUITESPARSE)
#error CERES_NO_SPARSE requires CERES_NO_SUITESPARSE.
#endif
#if !defined(CERES_NO_CXSPARSE)
#error CERES_NO_SPARSE requires CERES_NO_CXSPARSE
#endif
#if !defined(CERES_NO_ACCELERATE_SPARSE)
#error CERES_NO_SPARSE requires CERES_NO_ACCELERATE_SPARSE
#endif
#if defined(CERES_USE_EIGEN_SPARSE)
#error CERES_NO_SPARSE requires !CERES_USE_EIGEN_SPARSE
#endif
#endif
#endif // CERES_PUBLIC_INTERNAL_CONFIG_H_

View File

@ -0,0 +1,42 @@
#ifndef CERES_EXPORT_H
#define CERES_EXPORT_H
#ifdef CERES_STATIC_DEFINE
# define CERES_EXPORT
# define CERES_NO_EXPORT
#else
# ifndef CERES_EXPORT
# ifdef ceres_EXPORTS
/* We are building this library */
# define CERES_EXPORT
# else
/* We are using this library */
# define CERES_EXPORT
# endif
# endif
# ifndef CERES_NO_EXPORT
# define CERES_NO_EXPORT
# endif
#endif
#ifndef CERES_DEPRECATED
# define CERES_DEPRECATED __attribute__ ((__deprecated__))
#endif
#ifndef CERES_DEPRECATED_EXPORT
# define CERES_DEPRECATED_EXPORT CERES_EXPORT CERES_DEPRECATED
#endif
#ifndef CERES_DEPRECATED_NO_EXPORT
# define CERES_DEPRECATED_NO_EXPORT CERES_NO_EXPORT CERES_DEPRECATED
#endif
#if 0 /* DEFINE_NO_DEPRECATED */
# ifndef CERES_NO_DEPRECATED
# define CERES_NO_DEPRECATED
# endif
#endif
#endif /* CERES_EXPORT_H */

318
extern/ceres/files.txt vendored
View File

@ -1,318 +0,0 @@
include/ceres/autodiff_cost_function.h
include/ceres/autodiff_first_order_function.h
include/ceres/autodiff_local_parameterization.h
include/ceres/c_api.h
include/ceres/ceres.h
include/ceres/conditioned_cost_function.h
include/ceres/context.h
include/ceres/cost_function.h
include/ceres/cost_function_to_functor.h
include/ceres/covariance.h
include/ceres/crs_matrix.h
include/ceres/cubic_interpolation.h
include/ceres/dynamic_autodiff_cost_function.h
include/ceres/dynamic_cost_function.h
include/ceres/dynamic_cost_function_to_functor.h
include/ceres/dynamic_numeric_diff_cost_function.h
include/ceres/evaluation_callback.h
include/ceres/first_order_function.h
include/ceres/gradient_checker.h
include/ceres/gradient_problem.h
include/ceres/gradient_problem_solver.h
include/ceres/internal/array_selector.h
include/ceres/internal/autodiff.h
include/ceres/internal/disable_warnings.h
include/ceres/internal/eigen.h
include/ceres/internal/fixed_array.h
include/ceres/internal/householder_vector.h
include/ceres/internal/integer_sequence_algorithm.h
include/ceres/internal/line_parameterization.h
include/ceres/internal/memory.h
include/ceres/internal/numeric_diff.h
include/ceres/internal/parameter_dims.h
include/ceres/internal/port.h
include/ceres/internal/reenable_warnings.h
include/ceres/internal/variadic_evaluate.h
include/ceres/iteration_callback.h
include/ceres/jet.h
include/ceres/local_parameterization.h
include/ceres/loss_function.h
include/ceres/normal_prior.h
include/ceres/numeric_diff_cost_function.h
include/ceres/numeric_diff_options.h
include/ceres/ordered_groups.h
include/ceres/problem.h
include/ceres/rotation.h
include/ceres/sized_cost_function.h
include/ceres/solver.h
include/ceres/tiny_solver_autodiff_function.h
include/ceres/tiny_solver_cost_function_adapter.h
include/ceres/tiny_solver.h
include/ceres/types.h
include/ceres/version.h
internal/ceres/accelerate_sparse.cc
internal/ceres/accelerate_sparse.h
internal/ceres/array_utils.cc
internal/ceres/array_utils.h
internal/ceres/blas.cc
internal/ceres/blas.h
internal/ceres/block_evaluate_preparer.cc
internal/ceres/block_evaluate_preparer.h
internal/ceres/block_jacobian_writer.cc
internal/ceres/block_jacobian_writer.h
internal/ceres/block_jacobi_preconditioner.cc
internal/ceres/block_jacobi_preconditioner.h
internal/ceres/block_random_access_dense_matrix.cc
internal/ceres/block_random_access_dense_matrix.h
internal/ceres/block_random_access_diagonal_matrix.cc
internal/ceres/block_random_access_diagonal_matrix.h
internal/ceres/block_random_access_matrix.cc
internal/ceres/block_random_access_matrix.h
internal/ceres/block_random_access_sparse_matrix.cc
internal/ceres/block_random_access_sparse_matrix.h
internal/ceres/block_sparse_matrix.cc
internal/ceres/block_sparse_matrix.h
internal/ceres/block_structure.cc
internal/ceres/block_structure.h
internal/ceres/callbacks.cc
internal/ceres/callbacks.h
internal/ceres/canonical_views_clustering.cc
internal/ceres/canonical_views_clustering.h
internal/ceres/c_api.cc
internal/ceres/casts.h
internal/ceres/cgnr_linear_operator.h
internal/ceres/cgnr_solver.cc
internal/ceres/cgnr_solver.h
internal/ceres/compressed_col_sparse_matrix_utils.cc
internal/ceres/compressed_col_sparse_matrix_utils.h
internal/ceres/compressed_row_jacobian_writer.cc
internal/ceres/compressed_row_jacobian_writer.h
internal/ceres/compressed_row_sparse_matrix.cc
internal/ceres/compressed_row_sparse_matrix.h
internal/ceres/concurrent_queue.h
internal/ceres/conditioned_cost_function.cc
internal/ceres/conjugate_gradients_solver.cc
internal/ceres/conjugate_gradients_solver.h
internal/ceres/context.cc
internal/ceres/context_impl.cc
internal/ceres/context_impl.h
internal/ceres/coordinate_descent_minimizer.cc
internal/ceres/coordinate_descent_minimizer.h
internal/ceres/corrector.cc
internal/ceres/corrector.h
internal/ceres/covariance.cc
internal/ceres/covariance_impl.cc
internal/ceres/covariance_impl.h
internal/ceres/cxsparse.cc
internal/ceres/cxsparse.h
internal/ceres/dense_jacobian_writer.h
internal/ceres/dense_normal_cholesky_solver.cc
internal/ceres/dense_normal_cholesky_solver.h
internal/ceres/dense_qr_solver.cc
internal/ceres/dense_qr_solver.h
internal/ceres/dense_sparse_matrix.cc
internal/ceres/dense_sparse_matrix.h
internal/ceres/detect_structure.cc
internal/ceres/detect_structure.h
internal/ceres/dogleg_strategy.cc
internal/ceres/dogleg_strategy.h
internal/ceres/dynamic_compressed_row_finalizer.h
internal/ceres/dynamic_compressed_row_jacobian_writer.cc
internal/ceres/dynamic_compressed_row_jacobian_writer.h
internal/ceres/dynamic_compressed_row_sparse_matrix.cc
internal/ceres/dynamic_compressed_row_sparse_matrix.h
internal/ceres/dynamic_sparse_normal_cholesky_solver.cc
internal/ceres/dynamic_sparse_normal_cholesky_solver.h
internal/ceres/eigensparse.cc
internal/ceres/eigensparse.h
internal/ceres/evaluator.cc
internal/ceres/evaluator.h
internal/ceres/execution_summary.h
internal/ceres/file.cc
internal/ceres/file.h
internal/ceres/float_cxsparse.cc
internal/ceres/float_cxsparse.h
internal/ceres/float_suitesparse.cc
internal/ceres/float_suitesparse.h
internal/ceres/function_sample.cc
internal/ceres/function_sample.h
internal/ceres/generated/partitioned_matrix_view_2_2_2.cc
internal/ceres/generated/partitioned_matrix_view_2_2_3.cc
internal/ceres/generated/partitioned_matrix_view_2_2_4.cc
internal/ceres/generated/partitioned_matrix_view_2_2_d.cc
internal/ceres/generated/partitioned_matrix_view_2_3_3.cc
internal/ceres/generated/partitioned_matrix_view_2_3_4.cc
internal/ceres/generated/partitioned_matrix_view_2_3_6.cc
internal/ceres/generated/partitioned_matrix_view_2_3_9.cc
internal/ceres/generated/partitioned_matrix_view_2_3_d.cc
internal/ceres/generated/partitioned_matrix_view_2_4_3.cc
internal/ceres/generated/partitioned_matrix_view_2_4_4.cc
internal/ceres/generated/partitioned_matrix_view_2_4_6.cc
internal/ceres/generated/partitioned_matrix_view_2_4_8.cc
internal/ceres/generated/partitioned_matrix_view_2_4_9.cc
internal/ceres/generated/partitioned_matrix_view_2_4_d.cc
internal/ceres/generated/partitioned_matrix_view_2_d_d.cc
internal/ceres/generated/partitioned_matrix_view_3_3_3.cc
internal/ceres/generated/partitioned_matrix_view_4_4_2.cc
internal/ceres/generated/partitioned_matrix_view_4_4_3.cc
internal/ceres/generated/partitioned_matrix_view_4_4_4.cc
internal/ceres/generated/partitioned_matrix_view_4_4_d.cc
internal/ceres/generated/partitioned_matrix_view_d_d_d.cc
internal/ceres/generated/schur_eliminator_2_2_2.cc
internal/ceres/generated/schur_eliminator_2_2_3.cc
internal/ceres/generated/schur_eliminator_2_2_4.cc
internal/ceres/generated/schur_eliminator_2_2_d.cc
internal/ceres/generated/schur_eliminator_2_3_3.cc
internal/ceres/generated/schur_eliminator_2_3_4.cc
internal/ceres/generated/schur_eliminator_2_3_6.cc
internal/ceres/generated/schur_eliminator_2_3_9.cc
internal/ceres/generated/schur_eliminator_2_3_d.cc
internal/ceres/generated/schur_eliminator_2_4_3.cc
internal/ceres/generated/schur_eliminator_2_4_4.cc
internal/ceres/generated/schur_eliminator_2_4_6.cc
internal/ceres/generated/schur_eliminator_2_4_8.cc
internal/ceres/generated/schur_eliminator_2_4_9.cc
internal/ceres/generated/schur_eliminator_2_4_d.cc
internal/ceres/generated/schur_eliminator_2_d_d.cc
internal/ceres/generated/schur_eliminator_3_3_3.cc
internal/ceres/generated/schur_eliminator_4_4_2.cc
internal/ceres/generated/schur_eliminator_4_4_3.cc
internal/ceres/generated/schur_eliminator_4_4_4.cc
internal/ceres/generated/schur_eliminator_4_4_d.cc
internal/ceres/generated/schur_eliminator_d_d_d.cc
internal/ceres/generate_template_specializations.py
internal/ceres/gradient_checker.cc
internal/ceres/gradient_checking_cost_function.cc
internal/ceres/gradient_checking_cost_function.h
internal/ceres/gradient_problem.cc
internal/ceres/gradient_problem_evaluator.h
internal/ceres/gradient_problem_solver.cc
internal/ceres/graph_algorithms.h
internal/ceres/graph.h
internal/ceres/implicit_schur_complement.cc
internal/ceres/implicit_schur_complement.h
internal/ceres/inner_product_computer.cc
internal/ceres/inner_product_computer.h
internal/ceres/invert_psd_matrix.h
internal/ceres/is_close.cc
internal/ceres/is_close.h
internal/ceres/iterative_refiner.cc
internal/ceres/iterative_refiner.h
internal/ceres/iterative_schur_complement_solver.cc
internal/ceres/iterative_schur_complement_solver.h
internal/ceres/lapack.cc
internal/ceres/lapack.h
internal/ceres/levenberg_marquardt_strategy.cc
internal/ceres/levenberg_marquardt_strategy.h
internal/ceres/linear_least_squares_problems.cc
internal/ceres/linear_least_squares_problems.h
internal/ceres/linear_operator.cc
internal/ceres/linear_operator.h
internal/ceres/linear_solver.cc
internal/ceres/linear_solver.h
internal/ceres/line_search.cc
internal/ceres/line_search_direction.cc
internal/ceres/line_search_direction.h
internal/ceres/line_search.h
internal/ceres/line_search_minimizer.cc
internal/ceres/line_search_minimizer.h
internal/ceres/line_search_preprocessor.cc
internal/ceres/line_search_preprocessor.h
internal/ceres/local_parameterization.cc
internal/ceres/loss_function.cc
internal/ceres/low_rank_inverse_hessian.cc
internal/ceres/low_rank_inverse_hessian.h
internal/ceres/map_util.h
internal/ceres/minimizer.cc
internal/ceres/minimizer.h
internal/ceres/normal_prior.cc
internal/ceres/pair_hash.h
internal/ceres/parallel_for_cxx.cc
internal/ceres/parallel_for.h
internal/ceres/parallel_for_nothreads.cc
internal/ceres/parallel_for_openmp.cc
internal/ceres/parallel_utils.cc
internal/ceres/parallel_utils.h
internal/ceres/parameter_block.h
internal/ceres/parameter_block_ordering.cc
internal/ceres/parameter_block_ordering.h
internal/ceres/partitioned_matrix_view.cc
internal/ceres/partitioned_matrix_view.h
internal/ceres/partitioned_matrix_view_impl.h
internal/ceres/partitioned_matrix_view_template.py
internal/ceres/polynomial.cc
internal/ceres/polynomial.h
internal/ceres/preconditioner.cc
internal/ceres/preconditioner.h
internal/ceres/preprocessor.cc
internal/ceres/preprocessor.h
internal/ceres/problem.cc
internal/ceres/problem_impl.cc
internal/ceres/problem_impl.h
internal/ceres/program.cc
internal/ceres/program_evaluator.h
internal/ceres/program.h
internal/ceres/random.h
internal/ceres/reorder_program.cc
internal/ceres/reorder_program.h
internal/ceres/residual_block.cc
internal/ceres/residual_block.h
internal/ceres/residual_block_utils.cc
internal/ceres/residual_block_utils.h
internal/ceres/schur_complement_solver.cc
internal/ceres/schur_complement_solver.h
internal/ceres/schur_eliminator.cc
internal/ceres/schur_eliminator.h
internal/ceres/schur_eliminator_impl.h
internal/ceres/schur_eliminator_template.py
internal/ceres/schur_jacobi_preconditioner.cc
internal/ceres/schur_jacobi_preconditioner.h
internal/ceres/schur_templates.cc
internal/ceres/schur_templates.h
internal/ceres/scoped_thread_token.h
internal/ceres/scratch_evaluate_preparer.cc
internal/ceres/scratch_evaluate_preparer.h
internal/ceres/single_linkage_clustering.cc
internal/ceres/single_linkage_clustering.h
internal/ceres/small_blas_generic.h
internal/ceres/small_blas.h
internal/ceres/solver.cc
internal/ceres/solver_utils.cc
internal/ceres/solver_utils.h
internal/ceres/sparse_cholesky.cc
internal/ceres/sparse_cholesky.h
internal/ceres/sparse_matrix.cc
internal/ceres/sparse_matrix.h
internal/ceres/sparse_normal_cholesky_solver.cc
internal/ceres/sparse_normal_cholesky_solver.h
internal/ceres/split.cc
internal/ceres/split.h
internal/ceres/stl_util.h
internal/ceres/stringprintf.cc
internal/ceres/stringprintf.h
internal/ceres/subset_preconditioner.cc
internal/ceres/subset_preconditioner.h
internal/ceres/suitesparse.cc
internal/ceres/suitesparse.h
internal/ceres/thread_pool.cc
internal/ceres/thread_pool.h
internal/ceres/thread_token_provider.cc
internal/ceres/thread_token_provider.h
internal/ceres/triplet_sparse_matrix.cc
internal/ceres/triplet_sparse_matrix.h
internal/ceres/trust_region_minimizer.cc
internal/ceres/trust_region_minimizer.h
internal/ceres/trust_region_preprocessor.cc
internal/ceres/trust_region_preprocessor.h
internal/ceres/trust_region_step_evaluator.cc
internal/ceres/trust_region_step_evaluator.h
internal/ceres/trust_region_strategy.cc
internal/ceres/trust_region_strategy.h
internal/ceres/types.cc
internal/ceres/visibility_based_preconditioner.cc
internal/ceres/visibility_based_preconditioner.h
internal/ceres/visibility.cc
internal/ceres/visibility.h
internal/ceres/wall_time.cc
internal/ceres/wall_time.h
config/ceres/internal/config.h

View File

@ -151,7 +151,8 @@ namespace ceres {
template <typename CostFunctor,
int kNumResiduals, // Number of residuals, or ceres::DYNAMIC.
int... Ns> // Number of parameters in each parameter block.
class AutoDiffCostFunction : public SizedCostFunction<kNumResiduals, Ns...> {
class AutoDiffCostFunction final
: public SizedCostFunction<kNumResiduals, Ns...> {
public:
// Takes ownership of functor by default. Uses the template-provided
// value for the number of residuals ("kNumResiduals").
@ -178,7 +179,7 @@ class AutoDiffCostFunction : public SizedCostFunction<kNumResiduals, Ns...> {
SizedCostFunction<kNumResiduals, Ns...>::set_num_residuals(num_residuals);
}
explicit AutoDiffCostFunction(AutoDiffCostFunction&& other)
AutoDiffCostFunction(AutoDiffCostFunction&& other)
: functor_(std::move(other.functor_)), ownership_(other.ownership_) {}
virtual ~AutoDiffCostFunction() {
@ -215,6 +216,8 @@ class AutoDiffCostFunction : public SizedCostFunction<kNumResiduals, Ns...> {
jacobians);
};
const CostFunctor& functor() const { return *functor_; }
private:
std::unique_ptr<CostFunctor> functor_;
Ownership ownership_;

View File

@ -102,7 +102,7 @@ namespace ceres {
// seen where instead of using a_ directly, a_ is wrapped with T(a_).
template <typename FirstOrderFunctor, int kNumParameters>
class AutoDiffFirstOrderFunction : public FirstOrderFunction {
class AutoDiffFirstOrderFunction final : public FirstOrderFunction {
public:
// Takes ownership of functor.
explicit AutoDiffFirstOrderFunction(FirstOrderFunctor* functor)
@ -110,8 +110,6 @@ class AutoDiffFirstOrderFunction : public FirstOrderFunction {
static_assert(kNumParameters > 0, "kNumParameters must be positive");
}
virtual ~AutoDiffFirstOrderFunction() {}
bool Evaluate(const double* const parameters,
double* cost,
double* gradient) const override {
@ -119,7 +117,7 @@ class AutoDiffFirstOrderFunction : public FirstOrderFunction {
return (*functor_)(parameters, cost);
}
typedef Jet<double, kNumParameters> JetT;
using JetT = Jet<double, kNumParameters>;
internal::FixedArray<JetT, (256 * 7) / sizeof(JetT)> x(kNumParameters);
for (int i = 0; i < kNumParameters; ++i) {
x[i].a = parameters[i];
@ -142,6 +140,8 @@ class AutoDiffFirstOrderFunction : public FirstOrderFunction {
int NumParameters() const override { return kNumParameters; }
const FirstOrderFunctor& functor() const { return *functor_; }
private:
std::unique_ptr<FirstOrderFunctor> functor_;
};

View File

@ -40,6 +40,10 @@
namespace ceres {
// WARNING: LocalParameterizations are deprecated, so is
// AutoDiffLocalParameterization. They will be removed from Ceres Solver in
// version 2.2.0. Please use Manifolds and AutoDiffManifold instead.
// Create local parameterization with Jacobians computed via automatic
// differentiation. For more information on local parameterizations,
// see include/ceres/local_parameterization.h
@ -106,7 +110,8 @@ namespace ceres {
// seen where instead of using k_ directly, k_ is wrapped with T(k_).
template <typename Functor, int kGlobalSize, int kLocalSize>
class AutoDiffLocalParameterization : public LocalParameterization {
class CERES_DEPRECATED_WITH_MSG("Use AutoDiffManifold instead.")
AutoDiffLocalParameterization : public LocalParameterization {
public:
AutoDiffLocalParameterization() : functor_(new Functor()) {}
@ -114,7 +119,6 @@ class AutoDiffLocalParameterization : public LocalParameterization {
explicit AutoDiffLocalParameterization(Functor* functor)
: functor_(functor) {}
virtual ~AutoDiffLocalParameterization() {}
bool Plus(const double* x,
const double* delta,
double* x_plus_delta) const override {
@ -133,7 +137,7 @@ class AutoDiffLocalParameterization : public LocalParameterization {
}
const double* parameter_ptrs[2] = {x, zero_delta};
double* jacobian_ptrs[2] = {NULL, jacobian};
double* jacobian_ptrs[2] = {nullptr, jacobian};
return internal::AutoDifferentiate<
kGlobalSize,
internal::StaticParameterDims<kGlobalSize, kLocalSize>>(
@ -143,6 +147,8 @@ class AutoDiffLocalParameterization : public LocalParameterization {
int GlobalSize() const override { return kGlobalSize; }
int LocalSize() const override { return kLocalSize; }
const Functor& functor() const { return *functor_; }
private:
std::unique_ptr<Functor> functor_;
};

View File

@ -0,0 +1,259 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
#ifndef CERES_PUBLIC_AUTODIFF_MANIFOLD_H_
#define CERES_PUBLIC_AUTODIFF_MANIFOLD_H_
#include <memory>
#include "ceres/internal/autodiff.h"
#include "ceres/manifold.h"
namespace ceres {
// Create a Manifold with Jacobians computed via automatic differentiation. For
// more information on manifolds, see include/ceres/manifold.h
//
// To get an auto differentiated manifold, you must define a class/struct with
// templated Plus and Minus functions that compute
//
// x_plus_delta = Plus(x, delta);
// y_minus_x = Minus(y, x);
//
// Where, x, y and x_plus_y are vectors on the manifold in the ambient space (so
// they are kAmbientSize vectors) and delta, y_minus_x are vectors in the
// tangent space (so they are kTangentSize vectors).
//
// The Functor should have the signature:
//
// struct Functor {
// template <typename T>
// bool Plus(const T* x, const T* delta, T* x_plus_delta) const;
//
// template <typename T>
// bool Minus(const T* y, const T* x, T* y_minus_x) const;
// };
//
// Observe that the Plus and Minus operations are templated on the parameter T.
// The autodiff framework substitutes appropriate "Jet" objects for T in order
// to compute the derivative when necessary. This is the same mechanism that is
// used to compute derivatives when using AutoDiffCostFunction.
//
// Plus and Minus should return true if the computation is successful and false
// otherwise, in which case the result will not be used.
//
// Given this Functor, the corresponding Manifold can be constructed as:
//
// AutoDiffManifold<Functor, kAmbientSize, kTangentSize> manifold;
//
// As a concrete example consider the case of Quaternions. Quaternions form a
// three dimensional manifold embedded in R^4, i.e. they have an ambient
// dimension of 4 and their tangent space has dimension 3. The following Functor
// (taken from autodiff_manifold_test.cc) defines the Plus and Minus operations
// on the Quaternion manifold:
//
// NOTE: The following is only used for illustration purposes. Ceres Solver
// ships with optimized production grade QuaternionManifold implementation. See
// manifold.h.
//
// This functor assumes that the quaternions are laid out as [w,x,y,z] in
// memory, i.e. the real or scalar part is the first coordinate.
//
// struct QuaternionFunctor {
// template <typename T>
// bool Plus(const T* x, const T* delta, T* x_plus_delta) const {
// const T squared_norm_delta =
// delta[0] * delta[0] + delta[1] * delta[1] + delta[2] * delta[2];
//
// T q_delta[4];
// if (squared_norm_delta > T(0.0)) {
// T norm_delta = sqrt(squared_norm_delta);
// const T sin_delta_by_delta = sin(norm_delta) / norm_delta;
// q_delta[0] = cos(norm_delta);
// q_delta[1] = sin_delta_by_delta * delta[0];
// q_delta[2] = sin_delta_by_delta * delta[1];
// q_delta[3] = sin_delta_by_delta * delta[2];
// } else {
// // We do not just use q_delta = [1,0,0,0] here because that is a
// // constant and when used for automatic differentiation will
// // lead to a zero derivative. Instead we take a first order
// // approximation and evaluate it at zero.
// q_delta[0] = T(1.0);
// q_delta[1] = delta[0];
// q_delta[2] = delta[1];
// q_delta[3] = delta[2];
// }
//
// QuaternionProduct(q_delta, x, x_plus_delta);
// return true;
// }
//
// template <typename T>
// bool Minus(const T* y, const T* x, T* y_minus_x) const {
// T minus_x[4] = {x[0], -x[1], -x[2], -x[3]};
// T ambient_y_minus_x[4];
// QuaternionProduct(y, minus_x, ambient_y_minus_x);
// T u_norm = sqrt(ambient_y_minus_x[1] * ambient_y_minus_x[1] +
// ambient_y_minus_x[2] * ambient_y_minus_x[2] +
// ambient_y_minus_x[3] * ambient_y_minus_x[3]);
// if (u_norm > 0.0) {
// T theta = atan2(u_norm, ambient_y_minus_x[0]);
// y_minus_x[0] = theta * ambient_y_minus_x[1] / u_norm;
// y_minus_x[1] = theta * ambient_y_minus_x[2] / u_norm;
// y_minus_x[2] = theta * ambient_y_minus_x[3] / u_norm;
// } else {
// // We do not use [0,0,0] here because even though the value part is
// // a constant, the derivative part is not.
// y_minus_x[0] = ambient_y_minus_x[1];
// y_minus_x[1] = ambient_y_minus_x[2];
// y_minus_x[2] = ambient_y_minus_x[3];
// }
// return true;
// }
// };
//
// Then given this struct, the auto differentiated Quaternion Manifold can now
// be constructed as
//
// Manifold* manifold = new AutoDiffManifold<QuaternionFunctor, 4, 3>;
template <typename Functor, int kAmbientSize, int kTangentSize>
class AutoDiffManifold final : public Manifold {
public:
AutoDiffManifold() : functor_(std::make_unique<Functor>()) {}
// Takes ownership of functor.
explicit AutoDiffManifold(Functor* functor) : functor_(functor) {}
int AmbientSize() const override { return kAmbientSize; }
int TangentSize() const override { return kTangentSize; }
bool Plus(const double* x,
const double* delta,
double* x_plus_delta) const override {
return functor_->Plus(x, delta, x_plus_delta);
}
bool PlusJacobian(const double* x, double* jacobian) const override;
bool Minus(const double* y,
const double* x,
double* y_minus_x) const override {
return functor_->Minus(y, x, y_minus_x);
}
bool MinusJacobian(const double* x, double* jacobian) const override;
const Functor& functor() const { return *functor_; }
private:
std::unique_ptr<Functor> functor_;
};
namespace internal {
// The following two helper structs are needed to interface the Plus and Minus
// methods of the ManifoldFunctor with the automatic differentiation which
// expects a Functor with operator().
template <typename Functor>
struct PlusWrapper {
explicit PlusWrapper(const Functor& functor) : functor(functor) {}
template <typename T>
bool operator()(const T* x, const T* delta, T* x_plus_delta) const {
return functor.Plus(x, delta, x_plus_delta);
}
const Functor& functor;
};
template <typename Functor>
struct MinusWrapper {
explicit MinusWrapper(const Functor& functor) : functor(functor) {}
template <typename T>
bool operator()(const T* y, const T* x, T* y_minus_x) const {
return functor.Minus(y, x, y_minus_x);
}
const Functor& functor;
};
} // namespace internal
template <typename Functor, int kAmbientSize, int kTangentSize>
bool AutoDiffManifold<Functor, kAmbientSize, kTangentSize>::PlusJacobian(
const double* x, double* jacobian) const {
double zero_delta[kTangentSize];
for (int i = 0; i < kTangentSize; ++i) {
zero_delta[i] = 0.0;
}
double x_plus_delta[kAmbientSize];
for (int i = 0; i < kAmbientSize; ++i) {
x_plus_delta[i] = 0.0;
}
const double* parameter_ptrs[2] = {x, zero_delta};
// PlusJacobian is D_2 Plus(x,0) so we only need to compute the Jacobian
// w.r.t. the second argument.
double* jacobian_ptrs[2] = {nullptr, jacobian};
return internal::AutoDifferentiate<
kAmbientSize,
internal::StaticParameterDims<kAmbientSize, kTangentSize>>(
internal::PlusWrapper<Functor>(*functor_),
parameter_ptrs,
kAmbientSize,
x_plus_delta,
jacobian_ptrs);
}
template <typename Functor, int kAmbientSize, int kTangentSize>
bool AutoDiffManifold<Functor, kAmbientSize, kTangentSize>::MinusJacobian(
const double* x, double* jacobian) const {
double y_minus_x[kTangentSize];
for (int i = 0; i < kTangentSize; ++i) {
y_minus_x[i] = 0.0;
}
const double* parameter_ptrs[2] = {x, x};
// MinusJacobian is D_1 Minus(x,x), so we only need to compute the Jacobian
// w.r.t. the first argument.
double* jacobian_ptrs[2] = {jacobian, nullptr};
return internal::AutoDifferentiate<
kTangentSize,
internal::StaticParameterDims<kAmbientSize, kAmbientSize>>(
internal::MinusWrapper<Functor>(*functor_),
parameter_ptrs,
kTangentSize,
y_minus_x,
jacobian_ptrs);
}
} // namespace ceres
#endif // CERES_PUBLIC_AUTODIFF_MANIFOLD_H_

View File

@ -39,7 +39,7 @@
#define CERES_PUBLIC_C_API_H_
// clang-format off
#include "ceres/internal/port.h"
#include "ceres/internal/export.h"
#include "ceres/internal/disable_warnings.h"
// clang-format on

View File

@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2019 Google Inc. All rights reserved.
// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@ -35,7 +35,9 @@
#define CERES_PUBLIC_CERES_H_
#include "ceres/autodiff_cost_function.h"
#include "ceres/autodiff_first_order_function.h"
#include "ceres/autodiff_local_parameterization.h"
#include "ceres/autodiff_manifold.h"
#include "ceres/conditioned_cost_function.h"
#include "ceres/context.h"
#include "ceres/cost_function.h"
@ -47,19 +49,25 @@
#include "ceres/dynamic_cost_function_to_functor.h"
#include "ceres/dynamic_numeric_diff_cost_function.h"
#include "ceres/evaluation_callback.h"
#include "ceres/first_order_function.h"
#include "ceres/gradient_checker.h"
#include "ceres/gradient_problem.h"
#include "ceres/gradient_problem_solver.h"
#include "ceres/iteration_callback.h"
#include "ceres/jet.h"
#include "ceres/line_manifold.h"
#include "ceres/local_parameterization.h"
#include "ceres/loss_function.h"
#include "ceres/manifold.h"
#include "ceres/numeric_diff_cost_function.h"
#include "ceres/numeric_diff_first_order_function.h"
#include "ceres/numeric_diff_options.h"
#include "ceres/ordered_groups.h"
#include "ceres/problem.h"
#include "ceres/product_manifold.h"
#include "ceres/sized_cost_function.h"
#include "ceres/solver.h"
#include "ceres/sphere_manifold.h"
#include "ceres/types.h"
#include "ceres/version.h"

View File

@ -71,18 +71,18 @@ namespace ceres {
// ccf_residual[i] = f_i(my_cost_function_residual[i])
//
// and the Jacobian will be affected appropriately.
class CERES_EXPORT ConditionedCostFunction : public CostFunction {
class CERES_EXPORT ConditionedCostFunction final : public CostFunction {
public:
// Builds a cost function based on a wrapped cost function, and a
// per-residual conditioner. Takes ownership of all of the wrapped cost
// functions, or not, depending on the ownership parameter. Conditioners
// may be NULL, in which case the corresponding residual is not modified.
// may be nullptr, in which case the corresponding residual is not modified.
//
// The conditioners can repeat.
ConditionedCostFunction(CostFunction* wrapped_cost_function,
const std::vector<CostFunction*>& conditioners,
Ownership ownership);
virtual ~ConditionedCostFunction();
~ConditionedCostFunction() override;
bool Evaluate(double const* const* parameters,
double* residuals,

View File

@ -31,6 +31,8 @@
#ifndef CERES_PUBLIC_CONTEXT_H_
#define CERES_PUBLIC_CONTEXT_H_
#include "ceres/internal/export.h"
namespace ceres {
// A global context for processing data in Ceres. This provides a mechanism to
@ -39,13 +41,13 @@ namespace ceres {
// Problems, either serially or in parallel. When using it with multiple
// Problems at the same time, they may end up contending for resources
// (e.g. threads) managed by the Context.
class Context {
class CERES_EXPORT Context {
public:
Context() {}
Context();
Context(const Context&) = delete;
void operator=(const Context&) = delete;
virtual ~Context() {}
virtual ~Context();
// Creates a context object and the caller takes ownership.
static Context* Create();

View File

@ -48,7 +48,7 @@
#include <vector>
#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/port.h"
#include "ceres/internal/export.h"
namespace ceres {
@ -63,11 +63,11 @@ namespace ceres {
// when added with AddResidualBlock().
class CERES_EXPORT CostFunction {
public:
CostFunction() : num_residuals_(0) {}
CostFunction();
CostFunction(const CostFunction&) = delete;
void operator=(const CostFunction&) = delete;
virtual ~CostFunction() {}
virtual ~CostFunction();
// Inputs:
//
@ -92,8 +92,8 @@ class CERES_EXPORT CostFunction {
// jacobians[i][r*parameter_block_size_[i] + c] =
// d residual[r] / d parameters[i][c]
//
// If jacobians is NULL, then no derivatives are returned; this is
// the case when computing cost only. If jacobians[i] is NULL, then
// If jacobians is nullptr, then no derivatives are returned; this is
// the case when computing cost only. If jacobians[i] is nullptr, then
// the jacobian block corresponding to the i'th parameter block must
// not to be returned.
//

View File

@ -94,10 +94,11 @@
#include "ceres/cost_function.h"
#include "ceres/dynamic_cost_function_to_functor.h"
#include "ceres/internal/export.h"
#include "ceres/internal/fixed_array.h"
#include "ceres/internal/parameter_dims.h"
#include "ceres/internal/port.h"
#include "ceres/types.h"
#include "glog/logging.h"
namespace ceres {

View File

@ -35,8 +35,9 @@
#include <utility>
#include <vector>
#include "ceres/internal/config.h"
#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/port.h"
#include "ceres/internal/export.h"
#include "ceres/types.h"
namespace ceres {
@ -145,7 +146,7 @@ class CovarianceImpl;
// a. The rank deficiency arises from overparameterization. e.g., a
// four dimensional quaternion used to parameterize SO(3), which is
// a three dimensional manifold. In cases like this, the user should
// use an appropriate LocalParameterization. Not only will this lead
// use an appropriate LocalParameterization/Manifold. Not only will this lead
// to better numerical behaviour of the Solver, it will also expose
// the rank deficiency to the Covariance object so that it can
// handle it correctly.

View File

@ -34,17 +34,17 @@
#include <vector>
#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/port.h"
#include "ceres/internal/export.h"
namespace ceres {
// A compressed row sparse matrix used primarily for communicating the
// Jacobian matrix to the user.
struct CERES_EXPORT CRSMatrix {
CRSMatrix() : num_rows(0), num_cols(0) {}
CRSMatrix() = default;
int num_rows;
int num_cols;
int num_rows{0};
int num_cols{0};
// A compressed row matrix stores its contents in three arrays,
// rows, cols and values.

View File

@ -32,7 +32,7 @@
#define CERES_PUBLIC_CUBIC_INTERPOLATION_H_
#include "Eigen/Core"
#include "ceres/internal/port.h"
#include "ceres/internal/export.h"
#include "glog/logging.h"
namespace ceres {
@ -59,8 +59,8 @@ namespace ceres {
// http://en.wikipedia.org/wiki/Cubic_Hermite_spline
// http://en.wikipedia.org/wiki/Bicubic_interpolation
//
// f if not NULL will contain the interpolated function values.
// dfdx if not NULL will contain the interpolated derivative values.
// f if not nullptr will contain the interpolated function values.
// dfdx if not nullptr will contain the interpolated derivative values.
template <int kDataDimension>
void CubicHermiteSpline(const Eigen::Matrix<double, kDataDimension, 1>& p0,
const Eigen::Matrix<double, kDataDimension, 1>& p1,
@ -69,7 +69,7 @@ void CubicHermiteSpline(const Eigen::Matrix<double, kDataDimension, 1>& p0,
const double x,
double* f,
double* dfdx) {
typedef Eigen::Matrix<double, kDataDimension, 1> VType;
using VType = Eigen::Matrix<double, kDataDimension, 1>;
const VType a = 0.5 * (-p0 + 3.0 * p1 - 3.0 * p2 + p3);
const VType b = 0.5 * (2.0 * p0 - 5.0 * p1 + 4.0 * p2 - p3);
const VType c = 0.5 * (-p0 + p2);
@ -79,12 +79,12 @@ void CubicHermiteSpline(const Eigen::Matrix<double, kDataDimension, 1>& p0,
// derivative.
// f = ax^3 + bx^2 + cx + d
if (f != NULL) {
if (f != nullptr) {
Eigen::Map<VType>(f, kDataDimension) = d + x * (c + x * (b + x * a));
}
// dfdx = 3ax^2 + 2bx + c
if (dfdx != NULL) {
if (dfdx != nullptr) {
Eigen::Map<VType>(dfdx, kDataDimension) = c + x * (2.0 * b + 3.0 * a * x);
}
}
@ -143,7 +143,7 @@ class CubicInterpolator {
// The following two Evaluate overloads are needed for interfacing
// with automatic differentiation. The first is for when a scalar
// evaluation is done, and the second one is for when Jets are used.
void Evaluate(const double& x, double* f) const { Evaluate(x, f, NULL); }
void Evaluate(const double& x, double* f) const { Evaluate(x, f, nullptr); }
template <typename JetT>
void Evaluate(const JetT& x, JetT* f) const {
@ -191,7 +191,7 @@ struct Grid1D {
}
EIGEN_STRONG_INLINE void GetValue(const int n, double* f) const {
const int idx = std::min(std::max(begin_, n), end_ - 1) - begin_;
const int idx = (std::min)((std::max)(begin_, n), end_ - 1) - begin_;
if (kInterleaved) {
for (int i = 0; i < kDataDimension; ++i) {
f[i] = static_cast<double>(data_[kDataDimension * idx + i]);
@ -317,10 +317,10 @@ class BiCubicInterpolator {
// Interpolate vertically the interpolated value from each row and
// compute the derivative along the columns.
CubicHermiteSpline<Grid::DATA_DIMENSION>(f0, f1, f2, f3, r - row, f, dfdr);
if (dfdc != NULL) {
if (dfdc != nullptr) {
// Interpolate vertically the derivative along the columns.
CubicHermiteSpline<Grid::DATA_DIMENSION>(
df0dc, df1dc, df2dc, df3dc, r - row, dfdc, NULL);
df0dc, df1dc, df2dc, df3dc, r - row, dfdc, nullptr);
}
}
@ -328,7 +328,7 @@ class BiCubicInterpolator {
// with automatic differentiation. The first is for when a scalar
// evaluation is done, and the second one is for when Jets are used.
void Evaluate(const double& r, const double& c, double* f) const {
Evaluate(r, c, f, NULL, NULL);
Evaluate(r, c, f, nullptr, nullptr);
}
template <typename JetT>
@ -402,9 +402,9 @@ struct Grid2D {
EIGEN_STRONG_INLINE void GetValue(const int r, const int c, double* f) const {
const int row_idx =
std::min(std::max(row_begin_, r), row_end_ - 1) - row_begin_;
(std::min)((std::max)(row_begin_, r), row_end_ - 1) - row_begin_;
const int col_idx =
std::min(std::max(col_begin_, c), col_end_ - 1) - col_begin_;
(std::min)((std::max)(col_begin_, c), col_end_ - 1) - col_begin_;
const int n = (kRowMajor) ? num_cols_ * row_idx + col_idx
: num_rows_ * col_idx + row_idx;

View File

@ -77,17 +77,17 @@ namespace ceres {
// pass. There is a tradeoff with the size of the passes; you may want
// to experiment with the stride.
template <typename CostFunctor, int Stride = 4>
class DynamicAutoDiffCostFunction : public DynamicCostFunction {
class DynamicAutoDiffCostFunction final : public DynamicCostFunction {
public:
// Takes ownership by default.
DynamicAutoDiffCostFunction(CostFunctor* functor,
Ownership ownership = TAKE_OWNERSHIP)
explicit DynamicAutoDiffCostFunction(CostFunctor* functor,
Ownership ownership = TAKE_OWNERSHIP)
: functor_(functor), ownership_(ownership) {}
explicit DynamicAutoDiffCostFunction(DynamicAutoDiffCostFunction&& other)
DynamicAutoDiffCostFunction(DynamicAutoDiffCostFunction&& other)
: functor_(std::move(other.functor_)), ownership_(other.ownership_) {}
virtual ~DynamicAutoDiffCostFunction() {
~DynamicAutoDiffCostFunction() override {
// Manually release pointer if configured to not take ownership
// rather than deleting only if ownership is taken. This is to
// stay maximally compatible to old user code which may have
@ -105,7 +105,7 @@ class DynamicAutoDiffCostFunction : public DynamicCostFunction {
<< "You must call DynamicAutoDiffCostFunction::SetNumResiduals() "
<< "before DynamicAutoDiffCostFunction::Evaluate().";
if (jacobians == NULL) {
if (jacobians == nullptr) {
return (*functor_)(parameters, residuals);
}
@ -150,7 +150,7 @@ class DynamicAutoDiffCostFunction : public DynamicCostFunction {
jet_parameters[i] = &input_jets[parameter_cursor];
const int parameter_block_size = parameter_block_sizes()[i];
if (jacobians[i] != NULL) {
if (jacobians[i] != nullptr) {
if (!in_derivative_section) {
start_derivative_section.push_back(parameter_cursor);
in_derivative_section = true;
@ -209,7 +209,7 @@ class DynamicAutoDiffCostFunction : public DynamicCostFunction {
parameter_cursor >=
(start_derivative_section[current_derivative_section] +
current_derivative_section_cursor)) {
if (jacobians[i] != NULL) {
if (jacobians[i] != nullptr) {
input_jets[parameter_cursor].v[active_parameter_count] = 1.0;
++active_parameter_count;
++current_derivative_section_cursor;
@ -238,7 +238,7 @@ class DynamicAutoDiffCostFunction : public DynamicCostFunction {
parameter_cursor >=
(start_derivative_section[current_derivative_section] +
current_derivative_section_cursor)) {
if (jacobians[i] != NULL) {
if (jacobians[i] != nullptr) {
for (int k = 0; k < num_residuals(); ++k) {
jacobians[i][k * parameter_block_sizes()[i] + j] =
output_jets[k].v[active_parameter_count];

View File

@ -32,6 +32,7 @@
#define CERES_PUBLIC_DYNAMIC_COST_FUNCTION_H_
#include "ceres/cost_function.h"
#include "ceres/internal/disable_warnings.h"
namespace ceres {
@ -40,8 +41,6 @@ namespace ceres {
// parameter blocks and set the number of residuals at run time.
class CERES_EXPORT DynamicCostFunction : public CostFunction {
public:
~DynamicCostFunction() {}
virtual void AddParameterBlock(int size) {
mutable_parameter_block_sizes()->push_back(size);
}
@ -53,4 +52,6 @@ class CERES_EXPORT DynamicCostFunction : public CostFunction {
} // namespace ceres
#include "ceres/internal/reenable_warnings.h"
#endif // CERES_PUBLIC_DYNAMIC_COST_FUNCTION_H_

View File

@ -37,8 +37,10 @@
#include <vector>
#include "ceres/dynamic_cost_function.h"
#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/export.h"
#include "ceres/internal/fixed_array.h"
#include "ceres/internal/port.h"
#include "glog/logging.h"
namespace ceres {
@ -100,7 +102,7 @@ namespace ceres {
// private:
// DynamicCostFunctionToFunctor intrinsic_projection_;
// };
class DynamicCostFunctionToFunctor {
class CERES_EXPORT DynamicCostFunctionToFunctor {
public:
// Takes ownership of cost_function.
explicit DynamicCostFunctionToFunctor(CostFunction* cost_function)
@ -109,7 +111,7 @@ class DynamicCostFunctionToFunctor {
}
bool operator()(double const* const* parameters, double* residuals) const {
return cost_function_->Evaluate(parameters, residuals, NULL);
return cost_function_->Evaluate(parameters, residuals, nullptr);
}
template <typename JetT>
@ -187,4 +189,6 @@ class DynamicCostFunctionToFunctor {
} // namespace ceres
#include "ceres/internal/reenable_warnings.h"
#endif // CERES_PUBLIC_DYNAMIC_COST_FUNCTION_TO_FUNCTOR_H_

View File

@ -77,7 +77,7 @@ namespace ceres {
// cost_function.AddParameterBlock(10);
// cost_function.SetNumResiduals(21);
template <typename CostFunctor, NumericDiffMethodType method = CENTRAL>
class DynamicNumericDiffCostFunction : public DynamicCostFunction {
class DynamicNumericDiffCostFunction final : public DynamicCostFunction {
public:
explicit DynamicNumericDiffCostFunction(
const CostFunctor* functor,
@ -85,11 +85,10 @@ class DynamicNumericDiffCostFunction : public DynamicCostFunction {
const NumericDiffOptions& options = NumericDiffOptions())
: functor_(functor), ownership_(ownership), options_(options) {}
explicit DynamicNumericDiffCostFunction(
DynamicNumericDiffCostFunction&& other)
DynamicNumericDiffCostFunction(DynamicNumericDiffCostFunction&& other)
: functor_(std::move(other.functor_)), ownership_(other.ownership_) {}
virtual ~DynamicNumericDiffCostFunction() {
~DynamicNumericDiffCostFunction() override {
if (ownership_ != TAKE_OWNERSHIP) {
functor_.release();
}
@ -111,7 +110,7 @@ class DynamicNumericDiffCostFunction : public DynamicCostFunction {
const bool status =
internal::VariadicEvaluate<internal::DynamicParameterDims>(
*functor_.get(), parameters, residuals);
if (jacobians == NULL || !status) {
if (jacobians == nullptr || !status) {
return status;
}
@ -133,7 +132,7 @@ class DynamicNumericDiffCostFunction : public DynamicCostFunction {
}
for (size_t block = 0; block < block_sizes.size(); ++block) {
if (jacobians[block] != NULL &&
if (jacobians[block] != nullptr &&
!NumericDiff<CostFunctor,
method,
ceres::DYNAMIC,

View File

@ -31,7 +31,7 @@
#ifndef CERES_PUBLIC_EVALUATION_CALLBACK_H_
#define CERES_PUBLIC_EVALUATION_CALLBACK_H_
#include "ceres/internal/port.h"
#include "ceres/internal/export.h"
namespace ceres {
@ -62,7 +62,7 @@ namespace ceres {
// execute faster.
class CERES_EXPORT EvaluationCallback {
public:
virtual ~EvaluationCallback() {}
virtual ~EvaluationCallback();
// Called before Ceres requests residuals or jacobians for a given setting of
// the parameters. User parameters (the double* values provided to the cost

View File

@ -31,7 +31,7 @@
#ifndef CERES_PUBLIC_FIRST_ORDER_FUNCTION_H_
#define CERES_PUBLIC_FIRST_ORDER_FUNCTION_H_
#include "ceres/internal/port.h"
#include "ceres/internal/export.h"
namespace ceres {
@ -39,7 +39,7 @@ namespace ceres {
// and its gradient.
class CERES_EXPORT FirstOrderFunction {
public:
virtual ~FirstOrderFunction() {}
virtual ~FirstOrderFunction();
// cost is never null. gradient may be null. The return value
// indicates whether the evaluation was successful or not.

View File

@ -40,9 +40,12 @@
#include "ceres/cost_function.h"
#include "ceres/dynamic_numeric_diff_cost_function.h"
#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/export.h"
#include "ceres/internal/fixed_array.h"
#include "ceres/local_parameterization.h"
#include "ceres/manifold.h"
#include "glog/logging.h"
namespace ceres {
@ -65,19 +68,42 @@ namespace ceres {
// CostFunction, and then call Probe(). Check that the return value is 'true'.
class CERES_EXPORT GradientChecker {
public:
// This will not take ownership of the cost function or local
// This constructor will not take ownership of the cost function or local
// parameterizations.
//
// function: The cost function to probe.
// local_parameterizations: A vector of local parameterizations for each
// parameter. May be NULL or contain NULL pointers to indicate that the
//
// local_parameterizations: A vector of local parameterizations, one for each
// parameter block. May be nullptr or contain nullptrs to indicate that the
// respective parameter does not have a local parameterization.
//
// options: Options to use for numerical differentiation.
//
// NOTE: This constructor is deprecated and will be removed in the next public
// release of Ceres Solver. Please transition to using the Manifold based
// version.
CERES_DEPRECATED_WITH_MSG(
"Local Parameterizations are deprecated. Use the constructor that uses "
"Manifolds instead.")
GradientChecker(
const CostFunction* function,
const std::vector<const LocalParameterization*>* local_parameterizations,
const NumericDiffOptions& options);
// This will not take ownership of the cost function or manifolds.
//
// function: The cost function to probe.
//
// manifolds: A vector of manifolds for each parameter. May be nullptr or
// contain nullptrs to indicate that the respective parameter blocks are
// Euclidean.
//
// options: Options to use for numerical differentiation.
GradientChecker(const CostFunction* function,
const std::vector<const Manifold*>* manifolds,
const NumericDiffOptions& options);
~GradientChecker();
// Contains results from a call to Probe for later inspection.
struct CERES_EXPORT ProbeResults {
// The return value of the cost function.
@ -87,11 +113,11 @@ class CERES_EXPORT GradientChecker {
Vector residuals;
// The sizes of the Jacobians below are dictated by the cost function's
// parameter block size and residual block sizes. If a parameter block
// has a local parameterization associated with it, the size of the "local"
// Jacobian will be determined by the local parameterization dimension and
// residual block size, otherwise it will be identical to the regular
// Jacobian.
// parameter block size and residual block sizes. If a parameter block has a
// manifold associated with it, the size of the "local" Jacobian will be
// determined by the dimension of the manifold (which is the same as the
// dimension of the tangent space) and residual block size, otherwise it
// will be identical to the regular Jacobian.
// Derivatives as computed by the cost function.
std::vector<Matrix> jacobians;
@ -114,20 +140,20 @@ class CERES_EXPORT GradientChecker {
};
// Call the cost function, compute alternative Jacobians using finite
// differencing and compare results. If local parameterizations are given,
// the Jacobians will be multiplied by the local parameterization Jacobians
// before performing the check, which effectively means that all errors along
// the null space of the local parameterization will be ignored.
// Returns false if the Jacobians don't match, the cost function return false,
// or if the cost function returns different residual when called with a
// Jacobian output argument vs. calling it without. Otherwise returns true.
// differencing and compare results. If manifolds are given, the Jacobians
// will be multiplied by the manifold Jacobians before performing the check,
// which effectively means that all errors along the null space of the
// manifold will be ignored. Returns false if the Jacobians don't match, the
// cost function return false, or if a cost function returns a different
// residual when called with a Jacobian output argument vs. calling it
// without. Otherwise returns true.
//
// parameters: The parameter values at which to probe.
// relative_precision: A threshold for the relative difference between the
// Jacobians. If the Jacobians differ by more than this amount, then the
// probe fails.
// results: On return, the Jacobians (and other information) will be stored
// here. May be NULL.
// here. May be nullptr.
//
// Returns true if no problems are detected and the difference between the
// Jacobians is less than error_tolerance.
@ -140,11 +166,24 @@ class CERES_EXPORT GradientChecker {
GradientChecker(const GradientChecker&) = delete;
void operator=(const GradientChecker&) = delete;
std::vector<const LocalParameterization*> local_parameterizations_;
// This bool is used to determine whether the constructor with the
// LocalParameterizations is called or the one with Manifolds is called. If
// the former, then the vector of manifolds is a vector of ManifoldAdapter
// objects which we own and should be deleted. If the latter then they are
// real Manifold objects owned by the caller and will not be deleted.
//
// This bool is only needed during the LocalParameterization to Manifold
// transition, once this transition is complete the LocalParameterization
// based constructor and this bool will be removed.
const bool delete_manifolds_ = false;
std::vector<const Manifold*> manifolds_;
const CostFunction* function_;
std::unique_ptr<CostFunction> finite_diff_cost_function_;
};
} // namespace ceres
#include "ceres/internal/reenable_warnings.h"
#endif // CERES_PUBLIC_GRADIENT_CHECKER_H_

View File

@ -34,8 +34,10 @@
#include <memory>
#include "ceres/first_order_function.h"
#include "ceres/internal/port.h"
#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/export.h"
#include "ceres/local_parameterization.h"
#include "ceres/manifold.h"
namespace ceres {
@ -43,23 +45,22 @@ class FirstOrderFunction;
// Instances of GradientProblem represent general non-linear
// optimization problems that must be solved using just the value of
// the objective function and its gradient. Unlike the Problem class,
// which can only be used to model non-linear least squares problems,
// instances of GradientProblem not restricted in the form of the
// objective function.
// the objective function and its gradient.
// Unlike the Problem class, which can only be used to model non-linear least
// squares problems, instances of GradientProblem are not restricted in the form
// of the objective function.
//
// Structurally GradientProblem is a composition of a
// FirstOrderFunction and optionally a LocalParameterization.
// Structurally GradientProblem is a composition of a FirstOrderFunction and
// optionally a Manifold.
//
// The FirstOrderFunction is responsible for evaluating the cost and
// gradient of the objective function.
// The FirstOrderFunction is responsible for evaluating the cost and gradient of
// the objective function.
//
// The LocalParameterization is responsible for going back and forth
// between the ambient space and the local tangent space. (See
// local_parameterization.h for more details). When a
// LocalParameterization is not provided, then the tangent space is
// assumed to coincide with the ambient Euclidean space that the
// gradient vector lives in.
// The Manifold is responsible for going back and forth between the ambient
// space and the local tangent space. (See manifold.h for more details). When a
// Manifold is not provided, then the tangent space is assumed to coincide with
// the ambient Euclidean space that the gradient vector lives in.
//
// Example usage:
//
@ -78,7 +79,7 @@ class FirstOrderFunction;
// const double y = parameters[1];
//
// cost[0] = (1.0 - x) * (1.0 - x) + 100.0 * (y - x * x) * (y - x * x);
// if (gradient != NULL) {
// if (gradient != nullptr) {
// gradient[0] = -2.0 * (1.0 - x) - 200.0 * (y - x * x) * 2.0 * x;
// gradient[1] = 200.0 * (y - x * x);
// }
@ -89,28 +90,96 @@ class FirstOrderFunction;
// };
//
// ceres::GradientProblem problem(new Rosenbrock());
//
// NOTE: We are currently in the process of transitioning from
// LocalParameterization to Manifolds in the Ceres API. During this period,
// GradientProblem will support using both Manifold and LocalParameterization
// objects interchangably. For methods in the API affected by this change, see
// their documentation below.
class CERES_EXPORT GradientProblem {
public:
// Takes ownership of the function.
explicit GradientProblem(FirstOrderFunction* function);
// Takes ownership of the function and the parameterization.
//
// NOTE: This constructor is deprecated and will be removed in the next public
// release of Ceres Solver. Please move to using the Manifold based
// constructor.
CERES_DEPRECATED_WITH_MSG(
"LocalParameterizations are deprecated. Please use the constructor that "
"uses Manifold instead.")
GradientProblem(FirstOrderFunction* function,
LocalParameterization* parameterization);
// Takes ownership of the function and the manifold.
GradientProblem(FirstOrderFunction* function, Manifold* manifold);
int NumParameters() const;
int NumLocalParameters() const;
// Dimension of the manifold (and its tangent space).
//
// During the transition from LocalParameterization to Manifold, this method
// reports the LocalSize of the LocalParameterization or the TangentSize of
// the Manifold object associated with this problem.
int NumTangentParameters() const;
// Dimension of the manifold (and its tangent space).
//
// NOTE: This method is deprecated and will be removed in the next public
// release of Ceres Solver. Please move to using NumTangentParameters()
// instead.
int NumLocalParameters() const { return NumTangentParameters(); }
// This call is not thread safe.
bool Evaluate(const double* parameters, double* cost, double* gradient) const;
bool Plus(const double* x, const double* delta, double* x_plus_delta) const;
const FirstOrderFunction* function() const { return function_.get(); }
FirstOrderFunction* mutable_function() { return function_.get(); }
// NOTE: During the transition from LocalParameterization to Manifold we need
// to support both The LocalParameterization and Manifold based constructors.
//
// When the user uses the LocalParameterization, internally the solver will
// wrap it in a ManifoldAdapter object and return it when manifold or
// mutable_manifold are called.
//
// As a result this method will return a non-nullptr result if a Manifold or a
// LocalParameterization was used when constructing the GradientProblem.
const Manifold* manifold() const { return manifold_.get(); }
Manifold* mutable_manifold() { return manifold_.get(); }
// If the problem is constructed without a LocalParameterization or with a
// Manifold this method will return a nullptr.
//
// NOTE: This method is deprecated and will be removed in the next public
// release of Ceres Solver.
CERES_DEPRECATED_WITH_MSG("Use Manifolds instead.")
const LocalParameterization* parameterization() const {
return parameterization_.get();
}
// If the problem is constructed without a LocalParameterization or with a
// Manifold this method will return a nullptr.
//
// NOTE: This method is deprecated and will be removed in the next public
// release of Ceres Solver.
CERES_DEPRECATED_WITH_MSG("Use Manifolds instead.")
LocalParameterization* mutable_parameterization() {
return parameterization_.get();
}
private:
std::unique_ptr<FirstOrderFunction> function_;
CERES_DEPRECATED_WITH_MSG("")
std::unique_ptr<LocalParameterization> parameterization_;
std::unique_ptr<Manifold> manifold_;
std::unique_ptr<double[]> scratch_;
};
} // namespace ceres
#include "ceres/internal/reenable_warnings.h"
#endif // CERES_PUBLIC_GRADIENT_PROBLEM_H_

View File

@ -36,6 +36,7 @@
#include <vector>
#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/export.h"
#include "ceres/internal/port.h"
#include "ceres/iteration_callback.h"
#include "ceres/types.h"
@ -305,8 +306,12 @@ class CERES_EXPORT GradientProblemSolver {
int num_parameters = -1;
// Dimension of the tangent space of the problem.
CERES_DEPRECATED_WITH_MSG("Use num_tangent_parameters.")
int num_local_parameters = -1;
// Dimension of the tangent space of the problem.
int num_tangent_parameters = -1;
// Type of line search direction used.
LineSearchDirectionType line_search_direction_type = LBFGS;

View File

@ -73,20 +73,22 @@ struct ArraySelector<T,
true,
fits_on_stack>
: ceres::internal::FixedArray<T, max_num_elements_on_stack> {
ArraySelector(int s)
explicit ArraySelector(int s)
: ceres::internal::FixedArray<T, max_num_elements_on_stack>(s) {}
};
template <typename T, int num_elements, int max_num_elements_on_stack>
struct ArraySelector<T, num_elements, max_num_elements_on_stack, false, true>
: std::array<T, num_elements> {
ArraySelector(int s) { CHECK_EQ(s, num_elements); }
explicit ArraySelector(int s) { CHECK_EQ(s, num_elements); }
};
template <typename T, int num_elements, int max_num_elements_on_stack>
struct ArraySelector<T, num_elements, max_num_elements_on_stack, false, false>
: std::vector<T> {
ArraySelector(int s) : std::vector<T>(s) { CHECK_EQ(s, num_elements); }
explicit ArraySelector(int s) : std::vector<T>(s) {
CHECK_EQ(s, num_elements);
}
};
} // namespace internal

View File

@ -132,17 +132,16 @@
// respectively. This is how autodiff works for functors taking multiple vector
// valued arguments (up to 6).
//
// Jacobian NULL pointers
// ----------------------
// In general, the functions below will accept NULL pointers for all or some of
// the Jacobian parameters, meaning that those Jacobians will not be computed.
// Jacobian null pointers (nullptr)
// --------------------------------
// In general, the functions below will accept nullptr for all or some of the
// Jacobian parameters, meaning that those Jacobians will not be computed.
#ifndef CERES_PUBLIC_INTERNAL_AUTODIFF_H_
#define CERES_PUBLIC_INTERNAL_AUTODIFF_H_
#include <stddef.h>
#include <array>
#include <cstddef>
#include <utility>
#include "ceres/internal/array_selector.h"
@ -198,7 +197,7 @@ struct Make1stOrderPerturbation {
template <int N, int Offset, typename T, typename JetT>
struct Make1stOrderPerturbation<N, N, Offset, T, JetT> {
public:
static void Apply(const T* src, JetT* dst) {}
static void Apply(const T* /* NOT USED */, JetT* /* NOT USED */) {}
};
// Calls Make1stOrderPerturbation for every parameter block.
@ -311,7 +310,7 @@ inline bool AutoDifferentiate(const Functor& functor,
int dynamic_num_outputs,
T* function_value,
T** jacobians) {
typedef Jet<T, ParameterDims::kNumParameters> JetT;
using JetT = Jet<T, ParameterDims::kNumParameters>;
using Parameters = typename ParameterDims::Parameters;
if (kNumResiduals != DYNAMIC) {

View File

@ -35,39 +35,39 @@
namespace ceres {
typedef Eigen::Matrix<double, Eigen::Dynamic, 1> Vector;
typedef Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>
Matrix;
typedef Eigen::Map<Vector> VectorRef;
typedef Eigen::Map<Matrix> MatrixRef;
typedef Eigen::Map<const Vector> ConstVectorRef;
typedef Eigen::Map<const Matrix> ConstMatrixRef;
using Vector = Eigen::Matrix<double, Eigen::Dynamic, 1>;
using Matrix =
Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>;
using VectorRef = Eigen::Map<Vector>;
using MatrixRef = Eigen::Map<Matrix>;
using ConstVectorRef = Eigen::Map<const Vector>;
using ConstMatrixRef = Eigen::Map<const Matrix>;
// Column major matrices for DenseSparseMatrix/DenseQRSolver
typedef Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::ColMajor>
ColMajorMatrix;
using ColMajorMatrix =
Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::ColMajor>;
typedef Eigen::Map<ColMajorMatrix, 0, Eigen::Stride<Eigen::Dynamic, 1>>
ColMajorMatrixRef;
using ColMajorMatrixRef =
Eigen::Map<ColMajorMatrix, 0, Eigen::Stride<Eigen::Dynamic, 1>>;
typedef Eigen::Map<const ColMajorMatrix, 0, Eigen::Stride<Eigen::Dynamic, 1>>
ConstColMajorMatrixRef;
using ConstColMajorMatrixRef =
Eigen::Map<const ColMajorMatrix, 0, Eigen::Stride<Eigen::Dynamic, 1>>;
// C++ does not support templated typdefs, thus the need for this
// struct so that we can support statically sized Matrix and Maps.
template <int num_rows = Eigen::Dynamic, int num_cols = Eigen::Dynamic>
struct EigenTypes {
typedef Eigen::Matrix<double,
num_rows,
num_cols,
num_cols == 1 ? Eigen::ColMajor : Eigen::RowMajor>
Matrix;
using Matrix =
Eigen::Matrix<double,
num_rows,
num_cols,
num_cols == 1 ? Eigen::ColMajor : Eigen::RowMajor>;
typedef Eigen::Map<Matrix> MatrixRef;
typedef Eigen::Map<const Matrix> ConstMatrixRef;
typedef Eigen::Matrix<double, num_rows, 1> Vector;
typedef Eigen::Map<Eigen::Matrix<double, num_rows, 1>> VectorRef;
typedef Eigen::Map<const Eigen::Matrix<double, num_rows, 1>> ConstVectorRef;
using MatrixRef = Eigen::Map<Matrix>;
using ConstMatrixRef = Eigen::Map<const Matrix>;
using Vector = Eigen::Matrix<double, num_rows, 1>;
using VectorRef = Eigen::Map<Eigen::Matrix<double, num_rows, 1>>;
using ConstVectorRef = Eigen::Map<const Eigen::Matrix<double, num_rows, 1>>;
};
} // namespace ceres

View File

@ -82,6 +82,14 @@ void ComputeHouseholderVector(const XVectorType& x,
v->head(v->rows() - 1) /= v_pivot;
}
template <typename XVectorType, typename Derived>
typename Derived::PlainObject ApplyHouseholderVector(
const XVectorType& y,
const Eigen::MatrixBase<Derived>& v,
const typename Derived::Scalar& beta) {
return (y - v * (beta * (v.transpose() * y)));
}
} // namespace internal
} // namespace ceres

View File

@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2018 Google Inc. All rights reserved.
// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@ -27,6 +27,7 @@
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: jodebo_beck@gmx.de (Johannes Beck)
// sergiu.deitsch@gmail.com (Sergiu Deitsch)
//
// Algorithms to be used together with integer_sequence, like computing the sum
// or the exclusive scan (sometimes called exclusive prefix sum) at compile
@ -37,6 +38,8 @@
#include <utility>
#include "ceres/jet_fwd.h"
namespace ceres {
namespace internal {
@ -164,6 +167,124 @@ class ExclusiveScanT {
template <typename Seq>
using ExclusiveScan = typename ExclusiveScanT<Seq>::Type;
// Removes all elements from a integer sequence corresponding to specified
// ValueToRemove.
//
// This type should not be used directly but instead RemoveValue.
template <typename T, T ValueToRemove, typename... Sequence>
struct RemoveValueImpl;
// Final filtered sequence
template <typename T, T ValueToRemove, T... Values>
struct RemoveValueImpl<T,
ValueToRemove,
std::integer_sequence<T, Values...>,
std::integer_sequence<T>> {
using type = std::integer_sequence<T, Values...>;
};
// Found a matching value
template <typename T, T ValueToRemove, T... Head, T... Tail>
struct RemoveValueImpl<T,
ValueToRemove,
std::integer_sequence<T, Head...>,
std::integer_sequence<T, ValueToRemove, Tail...>>
: RemoveValueImpl<T,
ValueToRemove,
std::integer_sequence<T, Head...>,
std::integer_sequence<T, Tail...>> {};
// Move one element from the tail to the head
template <typename T, T ValueToRemove, T... Head, T MiddleValue, T... Tail>
struct RemoveValueImpl<T,
ValueToRemove,
std::integer_sequence<T, Head...>,
std::integer_sequence<T, MiddleValue, Tail...>>
: RemoveValueImpl<T,
ValueToRemove,
std::integer_sequence<T, Head..., MiddleValue>,
std::integer_sequence<T, Tail...>> {};
// Start recursion by splitting the integer sequence into two separate ones
template <typename T, T ValueToRemove, T... Tail>
struct RemoveValueImpl<T, ValueToRemove, std::integer_sequence<T, Tail...>>
: RemoveValueImpl<T,
ValueToRemove,
std::integer_sequence<T>,
std::integer_sequence<T, Tail...>> {};
// RemoveValue takes an integer Sequence of arbitrary type and removes all
// elements matching ValueToRemove.
//
// In contrast to RemoveValueImpl, this implementation deduces the value type
// eliminating the need to specify it explicitly.
//
// As an example, RemoveValue<std::integer_sequence<int, 1, 2, 3>, 4>::type will
// not transform the type of the original sequence. However,
// RemoveValue<std::integer_sequence<int, 0, 0, 2>, 2>::type will generate a new
// sequence of type std::integer_sequence<int, 0, 0> by removing the value 2.
template <typename Sequence, typename Sequence::value_type ValueToRemove>
struct RemoveValue
: RemoveValueImpl<typename Sequence::value_type, ValueToRemove, Sequence> {
};
// Convenience template alias for RemoveValue.
template <typename Sequence, typename Sequence::value_type ValueToRemove>
using RemoveValue_t = typename RemoveValue<Sequence, ValueToRemove>::type;
// Determines whether the values of an integer sequence are all the same.
//
// The integer sequence must contain at least one value. The predicate is
// undefined for empty sequences. The evaluation result of the predicate for a
// sequence containing only one value is defined to be true.
template <typename... Sequence>
struct AreAllEqual;
// The predicate result for a sequence containing one element is defined to be
// true.
template <typename T, T Value>
struct AreAllEqual<std::integer_sequence<T, Value>> : std::true_type {};
// Recursion end.
template <typename T, T Value1, T Value2>
struct AreAllEqual<std::integer_sequence<T, Value1, Value2>>
: std::integral_constant<bool, Value1 == Value2> {};
// Recursion for sequences containing at least two elements.
template <typename T, T Value1, T Value2, T... Values>
// clang-format off
struct AreAllEqual<std::integer_sequence<T, Value1, Value2, Values...> >
: std::integral_constant
<
bool,
AreAllEqual<std::integer_sequence<T, Value1, Value2> >::value &&
AreAllEqual<std::integer_sequence<T, Value2, Values...> >::value
>
// clang-format on
{};
// Convenience variable template for AreAllEqual.
template <class Sequence>
constexpr bool AreAllEqual_v = AreAllEqual<Sequence>::value;
// Predicate determining whether an integer sequence is either empty or all
// values are equal.
template <typename Sequence>
struct IsEmptyOrAreAllEqual;
// Empty case.
template <typename T>
struct IsEmptyOrAreAllEqual<std::integer_sequence<T>> : std::true_type {};
// General case for sequences containing at least one value.
template <typename T, T HeadValue, T... Values>
struct IsEmptyOrAreAllEqual<std::integer_sequence<T, HeadValue, Values...>>
: AreAllEqual<std::integer_sequence<T, HeadValue, Values...>> {};
// Convenience variable template for IsEmptyOrAreAllEqual.
template <class Sequence>
constexpr bool IsEmptyOrAreAllEqual_v = IsEmptyOrAreAllEqual<Sequence>::value;
} // namespace internal
} // namespace ceres

View File

@ -0,0 +1,223 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sergiu.deitsch@gmail.com (Sergiu Deitsch)
//
#ifndef CERES_PUBLIC_INTERNAL_JET_TRAITS_H_
#define CERES_PUBLIC_INTERNAL_JET_TRAITS_H_
#include <tuple>
#include <type_traits>
#include <utility>
#include "ceres/internal/integer_sequence_algorithm.h"
#include "ceres/jet_fwd.h"
namespace ceres {
namespace internal {
// Predicate that determines whether T is a Jet.
template <typename T, typename E = void>
struct IsJet : std::false_type {};
template <typename T, int N>
struct IsJet<Jet<T, N>> : std::true_type {};
// Convenience variable template for IsJet.
template <typename T>
constexpr bool IsJet_v = IsJet<T>::value;
// Predicate that determines whether any of the Types is a Jet.
template <typename... Types>
struct AreAnyJet : std::false_type {};
template <typename T, typename... Types>
struct AreAnyJet<T, Types...> : AreAnyJet<Types...> {};
template <typename T, int N, typename... Types>
struct AreAnyJet<Jet<T, N>, Types...> : std::true_type {};
// Convenience variable template for AreAnyJet.
template <typename... Types>
constexpr bool AreAnyJet_v = AreAnyJet<Types...>::value;
// Extracts the underlying floating-point from a type T.
template <typename T, typename E = void>
struct UnderlyingScalar {
using type = T;
};
template <typename T, int N>
struct UnderlyingScalar<Jet<T, N>> : UnderlyingScalar<T> {};
// Convenience template alias for UnderlyingScalar type trait.
template <typename T>
using UnderlyingScalar_t = typename UnderlyingScalar<T>::type;
// Predicate determining whether all Types in the pack are the same.
//
// Specifically, the predicate applies std::is_same recursively to pairs of
// Types in the pack.
//
// The predicate is defined only for template packs containing at least two
// types.
template <typename T1, typename T2, typename... Types>
// clang-format off
struct AreAllSame : std::integral_constant
<
bool,
AreAllSame<T1, T2>::value &&
AreAllSame<T2, Types...>::value
>
// clang-format on
{};
// AreAllSame pairwise test.
template <typename T1, typename T2>
struct AreAllSame<T1, T2> : std::is_same<T1, T2> {};
// Convenience variable template for AreAllSame.
template <typename... Types>
constexpr bool AreAllSame_v = AreAllSame<Types...>::value;
// Determines the rank of a type. This allows to ensure that types passed as
// arguments are compatible to each other. The rank of Jet is determined by the
// dimensions of the dual part. The rank of a scalar is always 0.
// Non-specialized types default to a rank of -1.
template <typename T, typename E = void>
struct Rank : std::integral_constant<int, -1> {};
// The rank of a scalar is 0.
template <typename T>
struct Rank<T, std::enable_if_t<std::is_scalar<T>::value>>
: std::integral_constant<int, 0> {};
// The rank of a Jet is given by its dimensionality.
template <typename T, int N>
struct Rank<Jet<T, N>> : std::integral_constant<int, N> {};
// Convenience variable template for Rank.
template <typename T>
constexpr int Rank_v = Rank<T>::value;
// Constructs an integer sequence of ranks for each of the Types in the pack.
template <typename... Types>
using Ranks_t = std::integer_sequence<int, Rank_v<Types>...>;
// Returns the scalar part of a type. This overload acts as an identity.
template <typename T>
constexpr decltype(auto) AsScalar(T&& value) noexcept {
return std::forward<T>(value);
}
// Recursively unwraps the scalar part of a Jet until a non-Jet scalar type is
// encountered.
template <typename T, int N>
constexpr decltype(auto) AsScalar(const Jet<T, N>& value) noexcept(
noexcept(AsScalar(value.a))) {
return AsScalar(value.a);
}
} // namespace internal
// Type trait ensuring at least one of the types is a Jet,
// the underlying scalar types are the same and Jet dimensions match.
//
// The type trait can be further specialized if necessary.
//
// This trait is a candidate for a concept definition once C++20 features can
// be used.
template <typename... Types>
// clang-format off
struct CompatibleJetOperands : std::integral_constant
<
bool,
// At least one of the types is a Jet
internal::AreAnyJet_v<Types...> &&
// The underlying floating-point types are exactly the same
internal::AreAllSame_v<internal::UnderlyingScalar_t<Types>...> &&
// Non-zero ranks of types are equal
internal::IsEmptyOrAreAllEqual_v<internal::RemoveValue_t<internal::Ranks_t<Types...>, 0>>
>
// clang-format on
{};
// Single Jet operand is always compatible.
template <typename T, int N>
struct CompatibleJetOperands<Jet<T, N>> : std::true_type {};
// Single non-Jet operand is always incompatible.
template <typename T>
struct CompatibleJetOperands<T> : std::false_type {};
// Empty operands are always incompatible.
template <>
struct CompatibleJetOperands<> : std::false_type {};
// Convenience variable template ensuring at least one of the types is a Jet,
// the underlying scalar types are the same and Jet dimensions match.
//
// This trait is a candidate for a concept definition once C++20 features can
// be used.
template <typename... Types>
constexpr bool CompatibleJetOperands_v = CompatibleJetOperands<Types...>::value;
// Type trait ensuring at least one of the types is a Jet,
// the underlying scalar types are compatible among each other and Jet
// dimensions match.
//
// The type trait can be further specialized if necessary.
//
// This trait is a candidate for a concept definition once C++20 features can
// be used.
template <typename... Types>
// clang-format off
struct PromotableJetOperands : std::integral_constant
<
bool,
// Types can be compatible among each other
internal::AreAnyJet_v<Types...> &&
// Non-zero ranks of types are equal
internal::IsEmptyOrAreAllEqual_v<internal::RemoveValue_t<internal::Ranks_t<Types...>, 0>>
>
// clang-format on
{};
// Convenience variable template ensuring at least one of the types is a Jet,
// the underlying scalar types are compatible among each other and Jet
// dimensions match.
//
// This trait is a candidate for a concept definition once C++20 features can
// be used.
template <typename... Types>
constexpr bool PromotableJetOperands_v = PromotableJetOperands<Types...>::value;
} // namespace ceres
#endif // CERES_PUBLIC_INTERNAL_JET_TRAITS_H_

View File

@ -86,18 +86,18 @@ struct NumericDiff {
(kParameterBlockSize != ceres::DYNAMIC ? kParameterBlockSize
: parameter_block_size);
typedef Matrix<double, kNumResiduals, 1> ResidualVector;
typedef Matrix<double, kParameterBlockSize, 1> ParameterVector;
using ResidualVector = Matrix<double, kNumResiduals, 1>;
using ParameterVector = Matrix<double, kParameterBlockSize, 1>;
// The convoluted reasoning for choosing the Row/Column major
// ordering of the matrix is an artifact of the restrictions in
// Eigen that prevent it from creating RowMajor matrices with a
// single column. In these cases, we ask for a ColMajor matrix.
typedef Matrix<double,
kNumResiduals,
kParameterBlockSize,
(kParameterBlockSize == 1) ? ColMajor : RowMajor>
JacobianMatrix;
using JacobianMatrix =
Matrix<double,
kNumResiduals,
kParameterBlockSize,
(kParameterBlockSize == 1) ? ColMajor : RowMajor>;
Map<JacobianMatrix> parameter_jacobian(
jacobian, num_residuals_internal, parameter_block_size_internal);
@ -121,7 +121,7 @@ struct NumericDiff {
// thus ridders_relative_initial_step_size is used.
if (kMethod == RIDDERS) {
min_step_size =
std::max(min_step_size, options.ridders_relative_initial_step_size);
(std::max)(min_step_size, options.ridders_relative_initial_step_size);
}
// For each parameter in the parameter block, use finite differences to
@ -132,7 +132,7 @@ struct NumericDiff {
num_residuals_internal);
for (int j = 0; j < parameter_block_size_internal; ++j) {
const double delta = std::max(min_step_size, step_size(j));
const double delta = (std::max)(min_step_size, step_size(j));
if (kMethod == RIDDERS) {
if (!EvaluateRiddersJacobianColumn(functor,
@ -184,8 +184,8 @@ struct NumericDiff {
using Eigen::Map;
using Eigen::Matrix;
typedef Matrix<double, kNumResiduals, 1> ResidualVector;
typedef Matrix<double, kParameterBlockSize, 1> ParameterVector;
using ResidualVector = Matrix<double, kNumResiduals, 1>;
using ParameterVector = Matrix<double, kParameterBlockSize, 1>;
Map<const ParameterVector> x(x_ptr, parameter_block_size);
Map<ParameterVector> x_plus_delta(x_plus_delta_ptr, parameter_block_size);
@ -260,10 +260,10 @@ struct NumericDiff {
using Eigen::Map;
using Eigen::Matrix;
typedef Matrix<double, kNumResiduals, 1> ResidualVector;
typedef Matrix<double, kNumResiduals, Eigen::Dynamic>
ResidualCandidateMatrix;
typedef Matrix<double, kParameterBlockSize, 1> ParameterVector;
using ResidualVector = Matrix<double, kNumResiduals, 1>;
using ResidualCandidateMatrix =
Matrix<double, kNumResiduals, Eigen::Dynamic>;
using ParameterVector = Matrix<double, kParameterBlockSize, 1>;
Map<const ParameterVector> x(x_ptr, parameter_block_size);
Map<ParameterVector> x_plus_delta(x_plus_delta_ptr, parameter_block_size);
@ -296,7 +296,7 @@ struct NumericDiff {
// norm_error is supposed to decrease as the finite difference tableau
// generation progresses, serving both as an estimate for differentiation
// error and as a measure of differentiation numerical stability.
double norm_error = std::numeric_limits<double>::max();
double norm_error = (std::numeric_limits<double>::max)();
// Loop over decreasing step sizes until:
// 1. Error is smaller than a given value (ridders_epsilon),
@ -342,7 +342,7 @@ struct NumericDiff {
options.ridders_step_shrink_factor;
// Compute the difference between the previous value and the current.
double candidate_error = std::max(
double candidate_error = (std::max)(
(current_candidates->col(k) - current_candidates->col(k - 1))
.norm(),
(current_candidates->col(k) - previous_candidates->col(k - 1))

View File

@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@ -31,80 +31,58 @@
#ifndef CERES_PUBLIC_INTERNAL_PORT_H_
#define CERES_PUBLIC_INTERNAL_PORT_H_
// This file needs to compile as c code.
#include "ceres/internal/config.h"
#if defined(CERES_USE_OPENMP)
#if defined(CERES_USE_CXX_THREADS) || defined(CERES_NO_THREADS)
#error CERES_USE_OPENMP is mutually exclusive to CERES_USE_CXX_THREADS and CERES_NO_THREADS
#endif
#elif defined(CERES_USE_CXX_THREADS)
#if defined(CERES_USE_OPENMP) || defined(CERES_NO_THREADS)
#error CERES_USE_CXX_THREADS is mutually exclusive to CERES_USE_OPENMP, CERES_USE_CXX_THREADS and CERES_NO_THREADS
#endif
#elif defined(CERES_NO_THREADS)
#if defined(CERES_USE_OPENMP) || defined(CERES_USE_CXX_THREADS)
#error CERES_NO_THREADS is mutually exclusive to CERES_USE_OPENMP and CERES_USE_CXX_THREADS
#endif
#else
# error One of CERES_USE_OPENMP, CERES_USE_CXX_THREADS or CERES_NO_THREADS must be defined.
#endif
// CERES_NO_SPARSE should be automatically defined by config.h if Ceres was
// compiled without any sparse back-end. Verify that it has not subsequently
// been inconsistently redefined.
#if defined(CERES_NO_SPARSE)
#if !defined(CERES_NO_SUITESPARSE)
#error CERES_NO_SPARSE requires CERES_NO_SUITESPARSE.
#endif
#if !defined(CERES_NO_CXSPARSE)
#error CERES_NO_SPARSE requires CERES_NO_CXSPARSE
#endif
#if !defined(CERES_NO_ACCELERATE_SPARSE)
#error CERES_NO_SPARSE requires CERES_NO_ACCELERATE_SPARSE
#endif
#if defined(CERES_USE_EIGEN_SPARSE)
#error CERES_NO_SPARSE requires !CERES_USE_EIGEN_SPARSE
#endif
#endif
// A macro to signal which functions and classes are exported when
// building a shared library.
// A macro to mark a function/variable/class as deprecated.
// We use compiler specific attributes rather than the c++
// attribute because they do not mix well with each other.
#if defined(_MSC_VER)
#define CERES_API_SHARED_IMPORT __declspec(dllimport)
#define CERES_API_SHARED_EXPORT __declspec(dllexport)
#define CERES_DEPRECATED_WITH_MSG(message) __declspec(deprecated(message))
#elif defined(__GNUC__)
#define CERES_API_SHARED_IMPORT __attribute__((visibility("default")))
#define CERES_API_SHARED_EXPORT __attribute__((visibility("default")))
#define CERES_DEPRECATED_WITH_MSG(message) __attribute__((deprecated(message)))
#else
#define CERES_API_SHARED_IMPORT
#define CERES_API_SHARED_EXPORT
// In the worst case fall back to c++ attribute.
#define CERES_DEPRECATED_WITH_MSG(message) [[deprecated(message)]]
#endif
// CERES_BUILDING_SHARED_LIBRARY is only defined locally when Ceres itself is
// compiled as a shared library, it is never exported to users. In order that
// we do not have to configure config.h separately when building Ceres as either
// a static or dynamic library, we define both CERES_USING_SHARED_LIBRARY and
// CERES_BUILDING_SHARED_LIBRARY when building as a shared library.
#if defined(CERES_USING_SHARED_LIBRARY)
#if defined(CERES_BUILDING_SHARED_LIBRARY)
// Compiling Ceres itself as a shared library.
#define CERES_EXPORT CERES_API_SHARED_EXPORT
#else
// Using Ceres as a shared library.
#define CERES_EXPORT CERES_API_SHARED_IMPORT
#endif
#else
// Ceres was compiled as a static library, export everything.
#define CERES_EXPORT
#ifndef CERES_GET_FLAG
#define CERES_GET_FLAG(X) X
#endif
// Unit tests reach in and test internal functionality so we need a way to make
// those symbols visible
#ifdef CERES_EXPORT_INTERNAL_SYMBOLS
#define CERES_EXPORT_INTERNAL CERES_EXPORT
#else
#define CERES_EXPORT_INTERNAL
#endif
// Indicates whether C++17 is currently active
#ifndef CERES_HAS_CPP17
#if __cplusplus >= 201703L || (defined(_MSVC_LANG) && _MSVC_LANG >= 201703L)
#define CERES_HAS_CPP17
#endif // __cplusplus >= 201703L || (defined(_MSVC_LANG) && _MSVC_LANG >=
// 201703L)
#endif // !defined(CERES_HAS_CPP17)
// Indicates whether C++20 is currently active
#ifndef CERES_HAS_CPP20
#if __cplusplus >= 202002L || (defined(_MSVC_LANG) && _MSVC_LANG >= 202002L)
#define CERES_HAS_CPP20
#endif // __cplusplus >= 202002L || (defined(_MSVC_LANG) && _MSVC_LANG >=
// 202002L)
#endif // !defined(CERES_HAS_CPP20)
// Prevents symbols from being substituted by the corresponding macro definition
// under the same name. For instance, min and max are defined as macros on
// Windows (unless NOMINMAX is defined) which causes compilation errors when
// defining or referencing symbols under the same name.
//
// To be robust in all cases particularly when NOMINMAX cannot be used, use this
// macro to annotate min/max declarations/definitions. Examples:
//
// int max CERES_PREVENT_MACRO_SUBSTITUTION();
// min CERES_PREVENT_MACRO_SUBSTITUTION(a, b);
// max CERES_PREVENT_MACRO_SUBSTITUTION(a, b);
//
// NOTE: In case the symbols for which the substitution must be prevented are
// used within another macro, the substitution must be inhibited using parens as
//
// (std::numerical_limits<double>::max)()
//
// since the helper macro will not work here. Do not use this technique in
// general case, because it will prevent argument-dependent lookup (ADL).
//
#define CERES_PREVENT_MACRO_SUBSTITUTION // Yes, it's empty
#endif // CERES_PUBLIC_INTERNAL_PORT_H_

View File

@ -0,0 +1,162 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: vitus@google.com (Mike Vitus)
// jodebo_beck@gmx.de (Johannes Beck)
#ifndef CERES_PUBLIC_INTERNAL_SPHERE_MANIFOLD_HELPERS_H_
#define CERES_PUBLIC_INTERNAL_SPHERE_MANIFOLD_HELPERS_H_
#include "ceres/internal/householder_vector.h"
// This module contains functions to compute the SphereManifold plus and minus
// operator and their Jacobians.
//
// As the parameters to these functions are shared between them, they are
// described here: The following variable names are used:
// Plus(x, delta) = x + delta = x_plus_delta,
// Minus(y, x) = y - x = y_minus_x.
//
// The remaining ones are v and beta which describe the Householder
// transformation of x, and norm_delta which is the norm of delta.
//
// The types of x, y, x_plus_delta and y_minus_x need to be equivalent to
// Eigen::Matrix<double, AmbientSpaceDimension, 1> and the type of delta needs
// to be equivalent to Eigen::Matrix<double, TangentSpaceDimension, 1>.
//
// The type of Jacobian plus needs to be equivalent to Eigen::Matrix<double,
// AmbientSpaceDimension, TangentSpaceDimension, Eigen::RowMajor> and for
// Jacobian minus Eigen::Matrix<double, TangentSpaceDimension,
// AmbientSpaceDimension, Eigen::RowMajor>.
//
// For all vector / matrix inputs and outputs, template parameters are
// used in order to allow also Eigen::Ref and Eigen block expressions to
// be passed to the function.
namespace ceres {
namespace internal {
template <typename VT, typename XT, typename DeltaT, typename XPlusDeltaT>
inline void ComputeSphereManifoldPlus(const VT& v,
double beta,
const XT& x,
const DeltaT& delta,
double norm_delta,
XPlusDeltaT* x_plus_delta) {
constexpr int AmbientDim = VT::RowsAtCompileTime;
// Map the delta from the minimum representation to the over parameterized
// homogeneous vector. See B.2 p.25 equation (106) - (107) for more details.
const double norm_delta_div_2 = 0.5 * norm_delta;
const double sin_delta_by_delta =
std::sin(norm_delta_div_2) / norm_delta_div_2;
Eigen::Matrix<double, AmbientDim, 1> y(v.size());
y << 0.5 * sin_delta_by_delta * delta, std::cos(norm_delta_div_2);
// Apply the delta update to remain on the sphere.
*x_plus_delta = x.norm() * ApplyHouseholderVector(y, v, beta);
}
template <typename VT, typename JacobianT>
inline void ComputeSphereManifoldPlusJacobian(const VT& x,
JacobianT* jacobian) {
constexpr int AmbientSpaceDim = VT::RowsAtCompileTime;
using AmbientVector = Eigen::Matrix<double, AmbientSpaceDim, 1>;
const int ambient_size = x.size();
const int tangent_size = x.size() - 1;
AmbientVector v(ambient_size);
double beta;
// NOTE: The explicit template arguments are needed here because
// ComputeHouseholderVector is templated and some versions of MSVC
// have trouble deducing the type of v automatically.
ComputeHouseholderVector<VT, double, AmbientSpaceDim>(x, &v, &beta);
// The Jacobian is equal to J = 0.5 * H.leftCols(size_ - 1) where H is the
// Householder matrix (H = I - beta * v * v').
for (int i = 0; i < tangent_size; ++i) {
(*jacobian).col(i) = -0.5 * beta * v(i) * v;
(*jacobian)(i, i) += 0.5;
}
(*jacobian) *= x.norm();
}
template <typename VT, typename XT, typename YT, typename YMinusXT>
inline void ComputeSphereManifoldMinus(
const VT& v, double beta, const XT& x, const YT& y, YMinusXT* y_minus_x) {
constexpr int AmbientSpaceDim = VT::RowsAtCompileTime;
constexpr int TangentSpaceDim =
AmbientSpaceDim == Eigen::Dynamic ? Eigen::Dynamic : AmbientSpaceDim - 1;
using AmbientVector = Eigen::Matrix<double, AmbientSpaceDim, 1>;
const int tanget_size = v.size() - 1;
const AmbientVector hy = ApplyHouseholderVector(y, v, beta) / x.norm();
// Calculate y - x. See B.2 p.25 equation (108).
double y_last = hy[tanget_size];
double hy_norm = hy.template head<TangentSpaceDim>(tanget_size).norm();
if (hy_norm == 0.0) {
y_minus_x->setZero();
} else {
*y_minus_x = 2.0 * std::atan2(hy_norm, y_last) / hy_norm *
hy.template head<TangentSpaceDim>(tanget_size);
}
}
template <typename VT, typename JacobianT>
inline void ComputeSphereManifoldMinusJacobian(const VT& x,
JacobianT* jacobian) {
constexpr int AmbientSpaceDim = VT::RowsAtCompileTime;
using AmbientVector = Eigen::Matrix<double, AmbientSpaceDim, 1>;
const int ambient_size = x.size();
const int tangent_size = x.size() - 1;
AmbientVector v(ambient_size);
double beta;
// NOTE: The explicit template arguments are needed here because
// ComputeHouseholderVector is templated and some versions of MSVC
// have trouble deducing the type of v automatically.
ComputeHouseholderVector<VT, double, AmbientSpaceDim>(x, &v, &beta);
// The Jacobian is equal to J = 2.0 * H.leftCols(size_ - 1) where H is the
// Householder matrix (H = I - beta * v * v').
for (int i = 0; i < tangent_size; ++i) {
(*jacobian).row(i) = -2.0 * beta * v(i) * v;
(*jacobian)(i, i) += 2.0;
}
(*jacobian) /= x.norm();
}
} // namespace internal
} // namespace ceres
#endif

View File

@ -33,8 +33,7 @@
#ifndef CERES_PUBLIC_INTERNAL_VARIADIC_EVALUATE_H_
#define CERES_PUBLIC_INTERNAL_VARIADIC_EVALUATE_H_
#include <stddef.h>
#include <cstddef>
#include <type_traits>
#include <utility>

View File

@ -36,6 +36,7 @@
#define CERES_PUBLIC_ITERATION_CALLBACK_H_
#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/export.h"
#include "ceres/types.h"
namespace ceres {
@ -164,8 +165,6 @@ struct CERES_EXPORT IterationSummary {
// explicit LoggingCallback(bool log_to_stdout)
// : log_to_stdout_(log_to_stdout) {}
//
// ~LoggingCallback() {}
//
// CallbackReturnType operator()(const IterationSummary& summary) {
// const char* kReportRowFormat =
// "% 4d: f:% 8e d:% 3.2e g:% 3.2e h:% 3.2e "
@ -194,7 +193,7 @@ struct CERES_EXPORT IterationSummary {
//
class CERES_EXPORT IterationCallback {
public:
virtual ~IterationCallback() {}
virtual ~IterationCallback();
virtual CallbackReturnType operator()(const IterationSummary& summary) = 0;
};

View File

@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2019 Google Inc. All rights reserved.
// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@ -158,20 +158,59 @@
#define CERES_PUBLIC_JET_H_
#include <cmath>
#include <complex>
#include <iosfwd>
#include <iostream> // NOLINT
#include <limits>
#include <numeric>
#include <string>
#include <type_traits>
#include "Eigen/Core"
#include "ceres/internal/jet_traits.h"
#include "ceres/internal/port.h"
#include "ceres/jet_fwd.h"
// Here we provide partial specializations of std::common_type for the Jet class
// to allow determining a Jet type with a common underlying arithmetic type.
// Such an arithmetic type can be either a scalar or an another Jet. An example
// for a common type, say, between a float and a Jet<double, N> is a Jet<double,
// N> (i.e., std::common_type_t<float, ceres::Jet<double, N>> and
// ceres::Jet<double, N> refer to the same type.)
//
// The partial specialization are also used for determining compatible types by
// means of SFINAE and thus allow such types to be expressed as operands of
// logical comparison operators. Missing (partial) specialization of
// std::common_type for a particular (custom) type will therefore disable the
// use of comparison operators defined by Ceres.
//
// Since these partial specializations are used as SFINAE constraints, they
// enable standard promotion rules between various scalar types and consequently
// their use in comparison against a Jet without providing implicit
// conversions from a scalar, such as an int, to a Jet (see the implementation
// of logical comparison operators below).
template <typename T, int N, typename U>
struct std::common_type<T, ceres::Jet<U, N>> {
using type = ceres::Jet<common_type_t<T, U>, N>;
};
template <typename T, int N, typename U>
struct std::common_type<ceres::Jet<T, N>, U> {
using type = ceres::Jet<common_type_t<T, U>, N>;
};
template <typename T, int N, typename U>
struct std::common_type<ceres::Jet<T, N>, ceres::Jet<U, N>> {
using type = ceres::Jet<common_type_t<T, U>, N>;
};
namespace ceres {
template <typename T, int N>
struct Jet {
enum { DIMENSION = N };
typedef T Scalar;
using Scalar = T;
// Default-construct "a" because otherwise this can lead to false errors about
// uninitialized uses when other classes relying on default constructed T
@ -352,19 +391,21 @@ inline Jet<T, N> operator/(const Jet<T, N>& f, T s) {
return Jet<T, N>(f.a * s_inverse, f.v * s_inverse);
}
// Binary comparison operators for both scalars and jets.
#define CERES_DEFINE_JET_COMPARISON_OPERATOR(op) \
template <typename T, int N> \
inline bool operator op(const Jet<T, N>& f, const Jet<T, N>& g) { \
return f.a op g.a; \
} \
template <typename T, int N> \
inline bool operator op(const T& s, const Jet<T, N>& g) { \
return s op g.a; \
} \
template <typename T, int N> \
inline bool operator op(const Jet<T, N>& f, const T& s) { \
return f.a op s; \
// Binary comparison operators for both scalars and jets. At least one of the
// operands must be a Jet. Promotable scalars (e.g., int, float, double etc.)
// can appear on either side of the operator. std::common_type_t is used as an
// SFINAE constraint to selectively enable compatible operand types. This allows
// comparison, for instance, against int literals without implicit conversion.
// In case the Jet arithmetic type is a Jet itself, a recursive expansion of Jet
// value is performed.
#define CERES_DEFINE_JET_COMPARISON_OPERATOR(op) \
template <typename Lhs, \
typename Rhs, \
std::enable_if_t<PromotableJetOperands_v<Lhs, Rhs>>* = nullptr> \
constexpr bool operator op(const Lhs& f, const Rhs& g) noexcept( \
noexcept(internal::AsScalar(f) op internal::AsScalar(g))) { \
using internal::AsScalar; \
return AsScalar(f) op AsScalar(g); \
}
CERES_DEFINE_JET_COMPARISON_OPERATOR(<) // NOLINT
CERES_DEFINE_JET_COMPARISON_OPERATOR(<=) // NOLINT
@ -386,43 +427,138 @@ using std::atan;
using std::atan2;
using std::cbrt;
using std::ceil;
using std::copysign;
using std::cos;
using std::cosh;
using std::erf;
using std::erfc;
using std::exp;
using std::exp2;
using std::expm1;
using std::fdim;
using std::floor;
using std::fma;
using std::fmax;
using std::fmin;
using std::fpclassify;
using std::hypot;
using std::isfinite;
using std::isinf;
using std::isnan;
using std::isnormal;
using std::log;
using std::log10;
using std::log1p;
using std::log2;
using std::norm;
using std::pow;
using std::signbit;
using std::sin;
using std::sinh;
using std::sqrt;
using std::tan;
using std::tanh;
// MSVC (up to 1930) defines quiet comparison functions as template functions
// which causes compilation errors due to ambiguity in the template parameter
// type resolution for using declarations in the ceres namespace. Workaround the
// issue by defining specific overload and bypass MSVC standard library
// definitions.
#if defined(_MSC_VER)
inline bool isgreater(double lhs,
double rhs) noexcept(noexcept(std::isgreater(lhs, rhs))) {
return std::isgreater(lhs, rhs);
}
inline bool isless(double lhs,
double rhs) noexcept(noexcept(std::isless(lhs, rhs))) {
return std::isless(lhs, rhs);
}
inline bool islessequal(double lhs,
double rhs) noexcept(noexcept(std::islessequal(lhs,
rhs))) {
return std::islessequal(lhs, rhs);
}
inline bool isgreaterequal(double lhs, double rhs) noexcept(
noexcept(std::isgreaterequal(lhs, rhs))) {
return std::isgreaterequal(lhs, rhs);
}
inline bool islessgreater(double lhs, double rhs) noexcept(
noexcept(std::islessgreater(lhs, rhs))) {
return std::islessgreater(lhs, rhs);
}
inline bool isunordered(double lhs,
double rhs) noexcept(noexcept(std::isunordered(lhs,
rhs))) {
return std::isunordered(lhs, rhs);
}
#else
using std::isgreater;
using std::isgreaterequal;
using std::isless;
using std::islessequal;
using std::islessgreater;
using std::isunordered;
#endif
#ifdef CERES_HAS_CPP20
using std::lerp;
using std::midpoint;
#endif // defined(CERES_HAS_CPP20)
// Legacy names from pre-C++11 days.
// clang-format off
CERES_DEPRECATED_WITH_MSG("ceres::IsFinite will be removed in a future Ceres Solver release. Please use ceres::isfinite.")
inline bool IsFinite(double x) { return std::isfinite(x); }
CERES_DEPRECATED_WITH_MSG("ceres::IsInfinite will be removed in a future Ceres Solver release. Please use ceres::isinf.")
inline bool IsInfinite(double x) { return std::isinf(x); }
CERES_DEPRECATED_WITH_MSG("ceres::IsNaN will be removed in a future Ceres Solver release. Please use ceres::isnan.")
inline bool IsNaN(double x) { return std::isnan(x); }
CERES_DEPRECATED_WITH_MSG("ceres::IsNormal will be removed in a future Ceres Solver release. Please use ceres::isnormal.")
inline bool IsNormal(double x) { return std::isnormal(x); }
// clang-format on
// In general, f(a + h) ~= f(a) + f'(a) h, via the chain rule.
// abs(x + h) ~= x + h or -(x + h)
// abs(x + h) ~= abs(x) + sgn(x)h
template <typename T, int N>
inline Jet<T, N> abs(const Jet<T, N>& f) {
return (f.a < T(0.0) ? -f : f);
return Jet<T, N>(abs(f.a), copysign(T(1), f.a) * f.v);
}
// copysign(a, b) composes a float with the magnitude of a and the sign of b.
// Therefore, the function can be formally defined as
//
// copysign(a, b) = sgn(b)|a|
//
// where
//
// d/dx |x| = sgn(x)
// d/dx sgn(x) = 2δ(x)
//
// sgn(x) being the signum function. Differentiating copysign(a, b) with respect
// to a and b gives:
//
// d/da sgn(b)|a| = sgn(a) sgn(b)
// d/db sgn(b)|a| = 2|a|δ(b)
//
// with the dual representation given by
//
// copysign(a + da, b + db) ~= sgn(b)|a| + (sgn(a)sgn(b) da + 2|a|δ(b) db)
//
// where δ(b) is the Dirac delta function.
template <typename T, int N>
inline Jet<T, N> copysign(const Jet<T, N>& f, const Jet<T, N> g) {
// The Dirac delta function δ(b) is undefined at b=0 (here it's
// infinite) and 0 everywhere else.
T d = fpclassify(g) == FP_ZERO ? std::numeric_limits<T>::infinity() : T(0);
T sa = copysign(T(1), f.a); // sgn(a)
T sb = copysign(T(1), g.a); // sgn(b)
// The second part of the infinitesimal is 2|a|δ(b) which is either infinity
// or 0 unless a or any of the values of the b infinitesimal are 0. In the
// latter case, the corresponding values become NaNs (multiplying 0 by
// infinity gives NaN). We drop the constant factor 2 since it does not change
// the result (its values will still be either 0, infinity or NaN).
return Jet<T, N>(copysign(f.a, g.a), sa * sb * f.v + abs(f.a) * d * g.v);
}
// log(a + h) ~= log(a) + h / a
@ -432,6 +568,21 @@ inline Jet<T, N> log(const Jet<T, N>& f) {
return Jet<T, N>(log(f.a), f.v * a_inverse);
}
// log10(a + h) ~= log10(a) + h / (a log(10))
template <typename T, int N>
inline Jet<T, N> log10(const Jet<T, N>& f) {
// Most compilers will expand log(10) to a constant.
const T a_inverse = T(1.0) / (f.a * log(T(10.0)));
return Jet<T, N>(log10(f.a), f.v * a_inverse);
}
// log1p(a + h) ~= log1p(a) + h / (1 + a)
template <typename T, int N>
inline Jet<T, N> log1p(const Jet<T, N>& f) {
const T a_inverse = T(1.0) / (T(1.0) + f.a);
return Jet<T, N>(log1p(f.a), f.v * a_inverse);
}
// exp(a + h) ~= exp(a) + exp(a) h
template <typename T, int N>
inline Jet<T, N> exp(const Jet<T, N>& f) {
@ -439,6 +590,14 @@ inline Jet<T, N> exp(const Jet<T, N>& f) {
return Jet<T, N>(tmp, tmp * f.v);
}
// expm1(a + h) ~= expm1(a) + exp(a) h
template <typename T, int N>
inline Jet<T, N> expm1(const Jet<T, N>& f) {
const T tmp = expm1(f.a);
const T expa = tmp + T(1.0); // exp(a) = expm1(a) + 1
return Jet<T, N>(tmp, expa * f.v);
}
// sqrt(a + h) ~= sqrt(a) + h / (2 sqrt(a))
template <typename T, int N>
inline Jet<T, N> sqrt(const Jet<T, N>& f) {
@ -565,29 +724,101 @@ inline Jet<T, N> hypot(const Jet<T, N>& x, const Jet<T, N>& y) {
return Jet<T, N>(tmp, x.a / tmp * x.v + y.a / tmp * y.v);
}
#ifdef CERES_HAS_CPP17
// Like sqrt(x^2 + y^2 + z^2),
// but acts to prevent underflow/overflow for small/large x/y/z.
// Note that the function is non-smooth at x=y=z=0,
// so the derivative is undefined there.
template <typename T, int N>
inline Jet<T, N> fmax(const Jet<T, N>& x, const Jet<T, N>& y) {
return x < y ? y : x;
inline Jet<T, N> hypot(const Jet<T, N>& x,
const Jet<T, N>& y,
const Jet<T, N>& z) {
// d/da sqrt(a) = 0.5 / sqrt(a)
// d/dx x^2 + y^2 + z^2 = 2x
// So by the chain rule:
// d/dx sqrt(x^2 + y^2 + z^2)
// = 0.5 / sqrt(x^2 + y^2 + z^2) * 2x
// = x / sqrt(x^2 + y^2 + z^2)
// d/dy sqrt(x^2 + y^2 + z^2) = y / sqrt(x^2 + y^2 + z^2)
// d/dz sqrt(x^2 + y^2 + z^2) = z / sqrt(x^2 + y^2 + z^2)
const T tmp = hypot(x.a, y.a, z.a);
return Jet<T, N>(tmp, x.a / tmp * x.v + y.a / tmp * y.v + z.a / tmp * z.v);
}
#endif // defined(CERES_HAS_CPP17)
// Like x * y + z but rounded only once.
template <typename T, int N>
inline Jet<T, N> fma(const Jet<T, N>& x,
const Jet<T, N>& y,
const Jet<T, N>& z) {
// d/dx fma(x, y, z) = y
// d/dy fma(x, y, z) = x
// d/dz fma(x, y, z) = 1
return Jet<T, N>(fma(x.a, y.a, z.a), y.a * x.v + x.a * y.v + z.v);
}
template <typename T, int N>
inline Jet<T, N> fmin(const Jet<T, N>& x, const Jet<T, N>& y) {
return y < x ? y : x;
// Returns the larger of the two arguments. NaNs are treated as missing data.
//
// NOTE: This function is NOT subject to any of the error conditions specified
// in `math_errhandling`.
template <typename Lhs,
typename Rhs,
std::enable_if_t<CompatibleJetOperands_v<Lhs, Rhs>>* = nullptr>
inline decltype(auto) fmax(const Lhs& f, const Rhs& g) {
using J = std::common_type_t<Lhs, Rhs>;
return (isnan(g) || isgreater(f, g)) ? J{f} : J{g};
}
// erf is defined as an integral that cannot be expressed analyticaly
// Returns the smaller of the two arguments. NaNs are treated as missing data.
//
// NOTE: This function is NOT subject to any of the error conditions specified
// in `math_errhandling`.
template <typename Lhs,
typename Rhs,
std::enable_if_t<CompatibleJetOperands_v<Lhs, Rhs>>* = nullptr>
inline decltype(auto) fmin(const Lhs& f, const Rhs& g) {
using J = std::common_type_t<Lhs, Rhs>;
return (isnan(f) || isless(g, f)) ? J{g} : J{f};
}
// Returns the positive difference (f - g) of two arguments and zero if f <= g.
// If at least one argument is NaN, a NaN is return.
//
// NOTE At least one of the argument types must be a Jet, the other one can be a
// scalar. In case both arguments are Jets, their dimensionality must match.
template <typename Lhs,
typename Rhs,
std::enable_if_t<CompatibleJetOperands_v<Lhs, Rhs>>* = nullptr>
inline decltype(auto) fdim(const Lhs& f, const Rhs& g) {
using J = std::common_type_t<Lhs, Rhs>;
if (isnan(f) || isnan(g)) {
return std::numeric_limits<J>::quiet_NaN();
}
return isgreater(f, g) ? J{f - g} : J{};
}
// erf is defined as an integral that cannot be expressed analytically
// however, the derivative is trivial to compute
// erf(x + h) = erf(x) + h * 2*exp(-x^2)/sqrt(pi)
template <typename T, int N>
inline Jet<T, N> erf(const Jet<T, N>& x) {
return Jet<T, N>(erf(x.a), x.v * M_2_SQRTPI * exp(-x.a * x.a));
// We evaluate the constant as follows:
// 2 / sqrt(pi) = 1 / sqrt(atan(1.))
// On POSIX sytems it is defined as M_2_SQRTPI, but this is not
// portable and the type may not be T. The above expression
// evaluates to full precision with IEEE arithmetic and, since it's
// constant, the compiler can generate exactly the same code. gcc
// does so even at -O0.
return Jet<T, N>(erf(x.a), x.v * exp(-x.a * x.a) * (T(1) / sqrt(atan(T(1)))));
}
// erfc(x) = 1-erf(x)
// erfc(x + h) = erfc(x) + h * (-2*exp(-x^2)/sqrt(pi))
template <typename T, int N>
inline Jet<T, N> erfc(const Jet<T, N>& x) {
return Jet<T, N>(erfc(x.a), -x.v * M_2_SQRTPI * exp(-x.a * x.a));
// See in erf() above for the evaluation of the constant in the derivative.
return Jet<T, N>(erfc(x.a),
-x.v * exp(-x.a * x.a) * (T(1) / sqrt(atan(T(1)))));
}
// Bessel functions of the first kind with integer order equal to 0, 1, n.
@ -648,80 +879,210 @@ inline Jet<T, N> BesselJn(int n, const Jet<T, N>& f) {
T(0.5) * (BesselJn(n - 1, f.a) - BesselJn(n + 1, f.a)) * f.v);
}
// Jet Classification. It is not clear what the appropriate semantics are for
// these classifications. This picks that std::isfinite and std::isnormal are
// "all" operations, i.e. all elements of the jet must be finite for the jet
// itself to be finite (or normal). For IsNaN and IsInfinite, the answer is less
// clear. This takes a "any" approach for IsNaN and IsInfinite such that if any
// part of a jet is nan or inf, then the entire jet is nan or inf. This leads
// to strange situations like a jet can be both IsInfinite and IsNaN, but in
// practice the "any" semantics are the most useful for e.g. checking that
// derivatives are sane.
// Classification and comparison functionality referencing only the scalar part
// of a Jet. To classify the derivatives (e.g., for sanity checks), the dual
// part should be referenced explicitly. For instance, to check whether the
// derivatives of a Jet 'f' are reasonable, one can use
//
// isfinite(f.v.array()).all()
// !isnan(f.v.array()).any()
//
// etc., depending on the desired semantics.
//
// NOTE: Floating-point classification and comparison functions and operators
// should be used with care as no derivatives can be propagated by such
// functions directly but only by expressions resulting from corresponding
// conditional statements. At the same time, conditional statements can possibly
// introduce a discontinuity in the cost function making it impossible to
// evaluate its derivative and thus the optimization problem intractable.
// The jet is finite if all parts of the jet are finite.
// Determines whether the scalar part of the Jet is finite.
template <typename T, int N>
inline bool isfinite(const Jet<T, N>& f) {
// Branchless implementation. This is more efficient for the false-case and
// works with the codegen system.
auto result = isfinite(f.a);
for (int i = 0; i < N; ++i) {
result = result & isfinite(f.v[i]);
}
return result;
return isfinite(f.a);
}
// The jet is infinite if any part of the Jet is infinite.
// Determines whether the scalar part of the Jet is infinite.
template <typename T, int N>
inline bool isinf(const Jet<T, N>& f) {
auto result = isinf(f.a);
for (int i = 0; i < N; ++i) {
result = result | isinf(f.v[i]);
}
return result;
return isinf(f.a);
}
// The jet is NaN if any part of the jet is NaN.
// Determines whether the scalar part of the Jet is NaN.
template <typename T, int N>
inline bool isnan(const Jet<T, N>& f) {
auto result = isnan(f.a);
for (int i = 0; i < N; ++i) {
result = result | isnan(f.v[i]);
}
return result;
return isnan(f.a);
}
// The jet is normal if all parts of the jet are normal.
// Determines whether the scalar part of the Jet is neither zero, subnormal,
// infinite, nor NaN.
template <typename T, int N>
inline bool isnormal(const Jet<T, N>& f) {
auto result = isnormal(f.a);
for (int i = 0; i < N; ++i) {
result = result & isnormal(f.v[i]);
}
return result;
return isnormal(f.a);
}
// Determines whether the scalar part of the Jet f is less than the scalar
// part of g.
//
// NOTE: This function does NOT set any floating-point exceptions.
template <typename Lhs,
typename Rhs,
std::enable_if_t<CompatibleJetOperands_v<Lhs, Rhs>>* = nullptr>
inline bool isless(const Lhs& f, const Rhs& g) {
using internal::AsScalar;
return isless(AsScalar(f), AsScalar(g));
}
// Determines whether the scalar part of the Jet f is greater than the scalar
// part of g.
//
// NOTE: This function does NOT set any floating-point exceptions.
template <typename Lhs,
typename Rhs,
std::enable_if_t<CompatibleJetOperands_v<Lhs, Rhs>>* = nullptr>
inline bool isgreater(const Lhs& f, const Rhs& g) {
using internal::AsScalar;
return isgreater(AsScalar(f), AsScalar(g));
}
// Determines whether the scalar part of the Jet f is less than or equal to the
// scalar part of g.
//
// NOTE: This function does NOT set any floating-point exceptions.
template <typename Lhs,
typename Rhs,
std::enable_if_t<CompatibleJetOperands_v<Lhs, Rhs>>* = nullptr>
inline bool islessequal(const Lhs& f, const Rhs& g) {
using internal::AsScalar;
return islessequal(AsScalar(f), AsScalar(g));
}
// Determines whether the scalar part of the Jet f is less than or greater than
// (f < g || f > g) the scalar part of g.
//
// NOTE: This function does NOT set any floating-point exceptions.
template <typename Lhs,
typename Rhs,
std::enable_if_t<CompatibleJetOperands_v<Lhs, Rhs>>* = nullptr>
inline bool islessgreater(const Lhs& f, const Rhs& g) {
using internal::AsScalar;
return islessgreater(AsScalar(f), AsScalar(g));
}
// Determines whether the scalar part of the Jet f is greater than or equal to
// the scalar part of g.
//
// NOTE: This function does NOT set any floating-point exceptions.
template <typename Lhs,
typename Rhs,
std::enable_if_t<CompatibleJetOperands_v<Lhs, Rhs>>* = nullptr>
inline bool isgreaterequal(const Lhs& f, const Rhs& g) {
using internal::AsScalar;
return isgreaterequal(AsScalar(f), AsScalar(g));
}
// Determines if either of the scalar parts of the arguments are NaN and
// thus cannot be ordered with respect to each other.
template <typename Lhs,
typename Rhs,
std::enable_if_t<CompatibleJetOperands_v<Lhs, Rhs>>* = nullptr>
inline bool isunordered(const Lhs& f, const Rhs& g) {
using internal::AsScalar;
return isunordered(AsScalar(f), AsScalar(g));
}
// Categorize scalar part as zero, subnormal, normal, infinite, NaN, or
// implementation-defined.
template <typename T, int N>
inline int fpclassify(const Jet<T, N>& f) {
return fpclassify(f.a);
}
// Determines whether the scalar part of the argument is negative.
template <typename T, int N>
inline bool signbit(const Jet<T, N>& f) {
return signbit(f.a);
}
// Legacy functions from the pre-C++11 days.
template <typename T, int N>
CERES_DEPRECATED_WITH_MSG(
"ceres::IsFinite will be removed in a future Ceres Solver release. Please "
"use ceres::isfinite.")
inline bool IsFinite(const Jet<T, N>& f) {
return isfinite(f);
}
template <typename T, int N>
CERES_DEPRECATED_WITH_MSG(
"ceres::IsNaN will be removed in a future Ceres Solver release. Please use "
"ceres::isnan.")
inline bool IsNaN(const Jet<T, N>& f) {
return isnan(f);
}
template <typename T, int N>
CERES_DEPRECATED_WITH_MSG(
"ceres::IsNormal will be removed in a future Ceres Solver release. Please "
"use ceres::isnormal.")
inline bool IsNormal(const Jet<T, N>& f) {
return isnormal(f);
}
// The jet is infinite if any part of the jet is infinite.
template <typename T, int N>
CERES_DEPRECATED_WITH_MSG(
"ceres::IsInfinite will be removed in a future Ceres Solver release. "
"Please use ceres::isinf.")
inline bool IsInfinite(const Jet<T, N>& f) {
return isinf(f);
}
#ifdef CERES_HAS_CPP20
// Computes the linear interpolation a + t(b - a) between a and b at the value
// t. For arguments outside of the range 0 <= t <= 1, the values are
// extrapolated.
//
// Differentiating lerp(a, b, t) with respect to a, b, and t gives:
//
// d/da lerp(a, b, t) = 1 - t
// d/db lerp(a, b, t) = t
// d/dt lerp(a, b, t) = b - a
//
// with the dual representation given by
//
// lerp(a + da, b + db, t + dt)
// ~= lerp(a, b, t) + (1 - t) da + t db + (b - a) dt .
template <typename T, int N>
inline Jet<T, N> lerp(const Jet<T, N>& a,
const Jet<T, N>& b,
const Jet<T, N>& t) {
return Jet<T, N>{lerp(a.a, b.a, t.a),
(T(1) - t.a) * a.v + t.a * b.v + (b.a - a.a) * t.v};
}
// Computes the midpoint a + (b - a) / 2.
//
// Differentiating midpoint(a, b) with respect to a and b gives:
//
// d/da midpoint(a, b) = 1/2
// d/db midpoint(a, b) = 1/2
//
// with the dual representation given by
//
// midpoint(a + da, b + db) ~= midpoint(a, b) + (da + db) / 2 .
template <typename T, int N>
inline Jet<T, N> midpoint(const Jet<T, N>& a, const Jet<T, N>& b) {
Jet<T, N> result{midpoint(a.a, b.a)};
// To avoid overflow in the differential, compute
// (da + db) / 2 using midpoint.
for (int i = 0; i < N; ++i) {
result.v[i] = midpoint(a.v[i], b.v[i]);
}
return result;
}
#endif // defined(CERES_HAS_CPP20)
// atan2(b + db, a + da) ~= atan2(b, a) + (- b da + a db) / (a^2 + b^2)
//
// In words: the rate of change of theta is 1/r times the rate of
@ -737,6 +1098,22 @@ inline Jet<T, N> atan2(const Jet<T, N>& g, const Jet<T, N>& f) {
return Jet<T, N>(atan2(g.a, f.a), tmp * (-g.a * f.v + f.a * g.v));
}
// Computes the square x^2 of a real number x (not the Euclidean L^2 norm as
// the name might suggest).
//
// NOTE: While std::norm is primarily intended for computing the squared
// magnitude of a std::complex<> number, the current Jet implementation does not
// support mixing a scalar T in its real part and std::complex<T> and in the
// infinitesimal. Mixed Jet support is necessary for the type decay from
// std::complex<T> to T (the squared magnitude of a complex number is always
// real) performed by std::norm.
//
// norm(x + h) ~= norm(x) + 2x h
template <typename T, int N>
inline Jet<T, N> norm(const Jet<T, N>& f) {
return Jet<T, N>(norm(f.a), T(2) * f.a * f.v);
}
// pow -- base is a differentiable function, exponent is a constant.
// (a+da)^p ~= a^p + p*a^(p-1) da
template <typename T, int N>
@ -760,14 +1137,14 @@ template <typename T, int N>
inline Jet<T, N> pow(T f, const Jet<T, N>& g) {
Jet<T, N> result;
if (f == T(0) && g.a > T(0)) {
if (fpclassify(f) == FP_ZERO && g > 0) {
// Handle case 2.
result = Jet<T, N>(T(0.0));
} else {
if (f < 0 && g.a == floor(g.a)) { // Handle case 3.
if (f < 0 && g == floor(g.a)) { // Handle case 3.
result = Jet<T, N>(pow(f, g.a));
for (int i = 0; i < N; i++) {
if (g.v[i] != T(0.0)) {
if (fpclassify(g.v[i]) != FP_ZERO) {
// Return a NaN when g.v != 0.
result.v[i] = std::numeric_limits<T>::quiet_NaN();
}
@ -822,21 +1199,21 @@ template <typename T, int N>
inline Jet<T, N> pow(const Jet<T, N>& f, const Jet<T, N>& g) {
Jet<T, N> result;
if (f.a == T(0) && g.a >= T(1)) {
if (fpclassify(f) == FP_ZERO && g >= 1) {
// Handle cases 2 and 3.
if (g.a > T(1)) {
if (g > 1) {
result = Jet<T, N>(T(0.0));
} else {
result = f;
}
} else {
if (f.a < T(0) && g.a == floor(g.a)) {
if (f < 0 && g == floor(g.a)) {
// Handle cases 7 and 8.
T const tmp = g.a * pow(f.a, g.a - T(1.0));
result = Jet<T, N>(pow(f.a, g.a), tmp * f.v);
for (int i = 0; i < N; i++) {
if (g.v[i] != T(0.0)) {
if (fpclassify(g.v[i]) != FP_ZERO) {
// Return a NaN when g.v != 0.
result.v[i] = T(std::numeric_limits<double>::quiet_NaN());
}
@ -904,8 +1281,9 @@ struct numeric_limits<ceres::Jet<T, N>> {
static constexpr bool tinyness_before =
std::numeric_limits<T>::tinyness_before;
static constexpr ceres::Jet<T, N> min() noexcept {
return ceres::Jet<T, N>(std::numeric_limits<T>::min());
static constexpr ceres::Jet<T, N> min
CERES_PREVENT_MACRO_SUBSTITUTION() noexcept {
return ceres::Jet<T, N>((std::numeric_limits<T>::min)());
}
static constexpr ceres::Jet<T, N> lowest() noexcept {
return ceres::Jet<T, N>(std::numeric_limits<T>::lowest());
@ -929,8 +1307,9 @@ struct numeric_limits<ceres::Jet<T, N>> {
return ceres::Jet<T, N>(std::numeric_limits<T>::denorm_min());
}
static constexpr ceres::Jet<T, N> max() noexcept {
return ceres::Jet<T, N>(std::numeric_limits<T>::max());
static constexpr ceres::Jet<T, N> max
CERES_PREVENT_MACRO_SUBSTITUTION() noexcept {
return ceres::Jet<T, N>((std::numeric_limits<T>::max)());
}
};
@ -942,10 +1321,10 @@ namespace Eigen {
// Eigen arrays, getting all the goodness of Eigen combined with autodiff.
template <typename T, int N>
struct NumTraits<ceres::Jet<T, N>> {
typedef ceres::Jet<T, N> Real;
typedef ceres::Jet<T, N> NonInteger;
typedef ceres::Jet<T, N> Nested;
typedef ceres::Jet<T, N> Literal;
using Real = ceres::Jet<T, N>;
using NonInteger = ceres::Jet<T, N>;
using Nested = ceres::Jet<T, N>;
using Literal = ceres::Jet<T, N>;
static typename ceres::Jet<T, N> dummy_precision() {
return ceres::Jet<T, N>(1e-12);
@ -984,8 +1363,8 @@ struct NumTraits<ceres::Jet<T, N>> {
};
};
static inline Real highest() { return Real(std::numeric_limits<T>::max()); }
static inline Real lowest() { return Real(-std::numeric_limits<T>::max()); }
static inline Real highest() { return Real((std::numeric_limits<T>::max)()); }
static inline Real lowest() { return Real(-(std::numeric_limits<T>::max)()); }
};
// Specifying the return type of binary operations between Jets and scalar types
@ -996,11 +1375,11 @@ struct NumTraits<ceres::Jet<T, N>> {
// is only available on Eigen versions >= 3.3
template <typename BinaryOp, typename T, int N>
struct ScalarBinaryOpTraits<ceres::Jet<T, N>, T, BinaryOp> {
typedef ceres::Jet<T, N> ReturnType;
using ReturnType = ceres::Jet<T, N>;
};
template <typename BinaryOp, typename T, int N>
struct ScalarBinaryOpTraits<T, ceres::Jet<T, N>, BinaryOp> {
typedef ceres::Jet<T, N> ReturnType;
using ReturnType = ceres::Jet<T, N>;
};
} // namespace Eigen

View File

@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@ -26,27 +26,19 @@
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: keir@google.com (Keir Mierle)
// Author: sergiu.deitsch@gmail.com (Sergiu Deitsch)
//
#ifndef CERES_INTERNAL_SPLIT_H_
#define CERES_INTERNAL_SPLIT_H_
#include <string>
#include <vector>
#include "ceres/internal/port.h"
#ifndef CERES_PUBLIC_JET_FWD_H_
#define CERES_PUBLIC_JET_FWD_H_
namespace ceres {
namespace internal {
// Split a string using one or more character delimiters, presented as a
// nul-terminated c string. Append the components to 'result'. If there are
// consecutive delimiters, this function skips over all of them.
void SplitStringUsing(const std::string& full,
const char* delim,
std::vector<std::string>* res);
// Jet forward declaration necessary for the following partial specialization of
// std::common_type and type traits.
template <typename T, int N>
struct Jet;
} // namespace internal
} // namespace ceres
#endif // CERES_INTERNAL_SPLIT_H_
#endif // CERES_PUBLIC_JET_FWD_H_

View File

@ -0,0 +1,304 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: jodebo_beck@gmx.de (Johannes Beck)
//
#ifndef CERES_PUBLIC_LINE_MANIFOLD_H_
#define CERES_PUBLIC_LINE_MANIFOLD_H_
#include <Eigen/Core>
#include <algorithm>
#include <array>
#include <memory>
#include <vector>
#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/export.h"
#include "ceres/internal/householder_vector.h"
#include "ceres/internal/sphere_manifold_functions.h"
#include "ceres/manifold.h"
#include "ceres/types.h"
#include "glog/logging.h"
namespace ceres {
// This provides a manifold for lines, where the line is
// over-parameterized by an origin point and a direction vector. So the
// parameter vector size needs to be two times the ambient space dimension,
// where the first half is interpreted as the origin point and the second half
// as the direction.
//
// The plus operator for the line direction is the same as for the
// SphereManifold. The update of the origin point is
// perpendicular to the line direction before the update.
//
// This manifold is a special case of the affine Grassmannian
// manifold (see https://en.wikipedia.org/wiki/Affine_Grassmannian_(manifold))
// for the case Graff_1(R^n).
//
// The class works with dynamic and static ambient space dimensions. If the
// ambient space dimensions is known at compile time use
//
// LineManifold<3> manifold;
//
// If the ambient space dimensions is not known at compile time the template
// parameter needs to be set to ceres::DYNAMIC and the actual dimension needs
// to be provided as a constructor argument:
//
// LineManifold<ceres::DYNAMIC> manifold(ambient_dim);
//
template <int AmbientSpaceDimension>
class LineManifold final : public Manifold {
public:
static_assert(AmbientSpaceDimension == DYNAMIC || AmbientSpaceDimension >= 2,
"The ambient space must be at least 2.");
static_assert(ceres::DYNAMIC == Eigen::Dynamic,
"ceres::DYNAMIC needs to be the same as Eigen::Dynamic.");
LineManifold();
explicit LineManifold(int size);
int AmbientSize() const override { return 2 * size_; }
int TangentSize() const override { return 2 * (size_ - 1); }
bool Plus(const double* x,
const double* delta,
double* x_plus_delta) const override;
bool PlusJacobian(const double* x, double* jacobian) const override;
bool Minus(const double* y,
const double* x,
double* y_minus_x) const override;
bool MinusJacobian(const double* x, double* jacobian) const override;
private:
static constexpr bool IsDynamic = (AmbientSpaceDimension == ceres::DYNAMIC);
static constexpr int TangentSpaceDimension =
IsDynamic ? ceres::DYNAMIC : AmbientSpaceDimension - 1;
static constexpr int DAmbientSpaceDimension =
IsDynamic ? ceres::DYNAMIC : 2 * AmbientSpaceDimension;
static constexpr int DTangentSpaceDimension =
IsDynamic ? ceres::DYNAMIC : 2 * TangentSpaceDimension;
using AmbientVector = Eigen::Matrix<double, AmbientSpaceDimension, 1>;
using TangentVector = Eigen::Matrix<double, TangentSpaceDimension, 1>;
using MatrixPlusJacobian = Eigen::Matrix<double,
DAmbientSpaceDimension,
DTangentSpaceDimension,
Eigen::RowMajor>;
using MatrixMinusJacobian = Eigen::Matrix<double,
DTangentSpaceDimension,
DAmbientSpaceDimension,
Eigen::RowMajor>;
const int size_{AmbientSpaceDimension};
};
template <int AmbientSpaceDimension>
LineManifold<AmbientSpaceDimension>::LineManifold()
: size_{AmbientSpaceDimension} {
static_assert(
AmbientSpaceDimension != Eigen::Dynamic,
"The size is set to dynamic. Please call the constructor with a size.");
}
template <int AmbientSpaceDimension>
LineManifold<AmbientSpaceDimension>::LineManifold(int size) : size_{size} {
if (AmbientSpaceDimension != Eigen::Dynamic) {
CHECK_EQ(AmbientSpaceDimension, size)
<< "Specified size by template parameter differs from the supplied "
"one.";
} else {
CHECK_GT(size_, 1)
<< "The size of the manifold needs to be greater than 1.";
}
}
template <int AmbientSpaceDimension>
bool LineManifold<AmbientSpaceDimension>::Plus(const double* x_ptr,
const double* delta_ptr,
double* x_plus_delta_ptr) const {
// We seek a box plus operator of the form
//
// [o*, d*] = Plus([o, d], [delta_o, delta_d])
//
// where o is the origin point, d is the direction vector, delta_o is
// the delta of the origin point and delta_d the delta of the direction and
// o* and d* is the updated origin point and direction.
//
// We separate the Plus operator into the origin point and directional part
// d* = Plus_d(d, delta_d)
// o* = Plus_o(o, d, delta_o)
//
// The direction update function Plus_d is the same as as the SphereManifold:
//
// d* = H_{v(d)} [0.5 sinc(0.5 |delta_d|) delta_d, cos(0.5 |delta_d|)]^T
//
// where H is the householder matrix
// H_{v} = I - (2 / |v|^2) v v^T
// and
// v(d) = d - sign(d_n) |d| e_n.
//
// The origin point update function Plus_o is defined as
//
// o* = o + H_{v(d)} [0.5 delta_o, 0]^T.
Eigen::Map<const AmbientVector> o(x_ptr, size_);
Eigen::Map<const AmbientVector> d(x_ptr + size_, size_);
Eigen::Map<const TangentVector> delta_o(delta_ptr, size_ - 1);
Eigen::Map<const TangentVector> delta_d(delta_ptr + size_ - 1, size_ - 1);
Eigen::Map<AmbientVector> o_plus_delta(x_plus_delta_ptr, size_);
Eigen::Map<AmbientVector> d_plus_delta(x_plus_delta_ptr + size_, size_);
const double norm_delta_d = delta_d.norm();
o_plus_delta = o;
// Shortcut for zero delta direction.
if (norm_delta_d == 0.0) {
d_plus_delta = d;
if (delta_o.isZero(0.0)) {
return true;
}
}
// Calculate the householder transformation which is needed for f_d and f_o.
AmbientVector v(size_);
double beta;
// NOTE: The explicit template arguments are needed here because
// ComputeHouseholderVector is templated and some versions of MSVC
// have trouble deducing the type of v automatically.
internal::ComputeHouseholderVector<Eigen::Map<const AmbientVector>,
double,
AmbientSpaceDimension>(d, &v, &beta);
if (norm_delta_d != 0.0) {
internal::ComputeSphereManifoldPlus(
v, beta, d, delta_d, norm_delta_d, &d_plus_delta);
}
// The null space is in the direction of the line, so the tangent space is
// perpendicular to the line direction. This is achieved by using the
// householder matrix of the direction and allow only movements
// perpendicular to e_n.
//
// The factor of 0.5 is used to be consistent with the line direction
// update.
AmbientVector y(size_);
y << 0.5 * delta_o, 0;
o_plus_delta += internal::ApplyHouseholderVector(y, v, beta);
return true;
}
template <int AmbientSpaceDimension>
bool LineManifold<AmbientSpaceDimension>::PlusJacobian(
const double* x_ptr, double* jacobian_ptr) const {
Eigen::Map<const AmbientVector> d(x_ptr + size_, size_);
Eigen::Map<MatrixPlusJacobian> jacobian(
jacobian_ptr, 2 * size_, 2 * (size_ - 1));
// Clear the Jacobian as only half of the matrix is not zero.
jacobian.setZero();
auto jacobian_d =
jacobian
.template topLeftCorner<AmbientSpaceDimension, TangentSpaceDimension>(
size_, size_ - 1);
auto jacobian_o = jacobian.template bottomRightCorner<AmbientSpaceDimension,
TangentSpaceDimension>(
size_, size_ - 1);
internal::ComputeSphereManifoldPlusJacobian(d, &jacobian_d);
jacobian_o = jacobian_d;
return true;
}
template <int AmbientSpaceDimension>
bool LineManifold<AmbientSpaceDimension>::Minus(const double* y_ptr,
const double* x_ptr,
double* y_minus_x) const {
Eigen::Map<const AmbientVector> y_o(y_ptr, size_);
Eigen::Map<const AmbientVector> y_d(y_ptr + size_, size_);
Eigen::Map<const AmbientVector> x_o(x_ptr, size_);
Eigen::Map<const AmbientVector> x_d(x_ptr + size_, size_);
Eigen::Map<TangentVector> y_minus_x_o(y_minus_x, size_ - 1);
Eigen::Map<TangentVector> y_minus_x_d(y_minus_x + size_ - 1, size_ - 1);
AmbientVector v(size_);
double beta;
// NOTE: The explicit template arguments are needed here because
// ComputeHouseholderVector is templated and some versions of MSVC
// have trouble deducing the type of v automatically.
internal::ComputeHouseholderVector<Eigen::Map<const AmbientVector>,
double,
AmbientSpaceDimension>(x_d, &v, &beta);
internal::ComputeSphereManifoldMinus(v, beta, x_d, y_d, &y_minus_x_d);
AmbientVector delta_o = y_o - x_o;
const AmbientVector h_delta_o =
2.0 * internal::ApplyHouseholderVector(delta_o, v, beta);
y_minus_x_o = h_delta_o.template head<TangentSpaceDimension>(size_ - 1);
return true;
}
template <int AmbientSpaceDimension>
bool LineManifold<AmbientSpaceDimension>::MinusJacobian(
const double* x_ptr, double* jacobian_ptr) const {
Eigen::Map<const AmbientVector> d(x_ptr + size_, size_);
Eigen::Map<MatrixMinusJacobian> jacobian(
jacobian_ptr, 2 * (size_ - 1), 2 * size_);
// Clear the Jacobian as only half of the matrix is not zero.
jacobian.setZero();
auto jacobian_d =
jacobian
.template topLeftCorner<TangentSpaceDimension, AmbientSpaceDimension>(
size_ - 1, size_);
auto jacobian_o = jacobian.template bottomRightCorner<TangentSpaceDimension,
AmbientSpaceDimension>(
size_ - 1, size_);
internal::ComputeSphereManifoldMinusJacobian(d, &jacobian_d);
jacobian_o = jacobian_d;
return true;
}
} // namespace ceres
// clang-format off
#include "ceres/internal/reenable_warnings.h"
// clang-format on
#endif // CERES_PUBLIC_LINE_MANIFOLD_H_

View File

@ -37,10 +37,14 @@
#include <vector>
#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/export.h"
#include "ceres/internal/port.h"
namespace ceres {
// WARNING: LocalParameterizations are deprecated. They will be removed from
// Ceres Solver in version 2.2.0. Please use Manifolds instead.
// Purpose: Sometimes parameter blocks x can overparameterize a problem
//
// min f(x)
@ -111,7 +115,10 @@ namespace ceres {
//
// The class LocalParameterization defines the function Plus and its
// Jacobian which is needed to compute the Jacobian of f w.r.t delta.
class CERES_EXPORT LocalParameterization {
class CERES_DEPRECATED_WITH_MSG(
"LocalParameterizations will be removed from the Ceres Solver API in "
"version 2.2.0. Use Manifolds instead.")
CERES_EXPORT LocalParameterization {
public:
virtual ~LocalParameterization();
@ -120,6 +127,7 @@ class CERES_EXPORT LocalParameterization {
// x_plus_delta = Plus(x, delta)
//
// with the condition that Plus(x, 0) = x.
//
virtual bool Plus(const double* x,
const double* delta,
double* x_plus_delta) const = 0;
@ -152,10 +160,10 @@ class CERES_EXPORT LocalParameterization {
// Some basic parameterizations
// Identity Parameterization: Plus(x, delta) = x + delta
class CERES_EXPORT IdentityParameterization : public LocalParameterization {
class CERES_DEPRECATED_WITH_MSG("Use EuclideanManifold instead.")
CERES_EXPORT IdentityParameterization : public LocalParameterization {
public:
explicit IdentityParameterization(int size);
virtual ~IdentityParameterization() {}
bool Plus(const double* x,
const double* delta,
double* x_plus_delta) const override;
@ -172,11 +180,11 @@ class CERES_EXPORT IdentityParameterization : public LocalParameterization {
};
// Hold a subset of the parameters inside a parameter block constant.
class CERES_EXPORT SubsetParameterization : public LocalParameterization {
class CERES_DEPRECATED_WITH_MSG("Use SubsetManifold instead.")
CERES_EXPORT SubsetParameterization : public LocalParameterization {
public:
explicit SubsetParameterization(int size,
const std::vector<int>& constant_parameters);
virtual ~SubsetParameterization() {}
bool Plus(const double* x,
const double* delta,
double* x_plus_delta) const override;
@ -199,9 +207,9 @@ class CERES_EXPORT SubsetParameterization : public LocalParameterization {
// with * being the quaternion multiplication operator. Here we assume
// that the first element of the quaternion vector is the real (cos
// theta) part.
class CERES_EXPORT QuaternionParameterization : public LocalParameterization {
class CERES_DEPRECATED_WITH_MSG("Use QuaternionManifold instead.")
CERES_EXPORT QuaternionParameterization : public LocalParameterization {
public:
virtual ~QuaternionParameterization() {}
bool Plus(const double* x,
const double* delta,
double* x_plus_delta) const override;
@ -221,10 +229,10 @@ class CERES_EXPORT QuaternionParameterization : public LocalParameterization {
//
// Plus(x, delta) = [sin(|delta|) delta / |delta|, cos(|delta|)] * x
// with * being the quaternion multiplication operator.
class CERES_EXPORT EigenQuaternionParameterization
class CERES_DEPRECATED_WITH_MSG("Use EigenQuaternionManifold instead.")
CERES_EXPORT EigenQuaternionParameterization
: public ceres::LocalParameterization {
public:
virtual ~EigenQuaternionParameterization() {}
bool Plus(const double* x,
const double* delta,
double* x_plus_delta) const override;
@ -234,23 +242,23 @@ class CERES_EXPORT EigenQuaternionParameterization
};
// This provides a parameterization for homogeneous vectors which are commonly
// used in Structure for Motion problems. One example where they are used is
// in representing points whose triangulation is ill-conditioned. Here
// it is advantageous to use an over-parameterization since homogeneous vectors
// can represent points at infinity.
// used in Structure from Motion problems. One example where they are used is
// in representing points whose triangulation is ill-conditioned. Here it is
// advantageous to use an over-parameterization since homogeneous vectors can
// represent points at infinity.
//
// The plus operator is defined as
// Plus(x, delta) =
// [sin(0.5 * |delta|) * delta / |delta|, cos(0.5 * |delta|)] * x
//
// with * defined as an operator which applies the update orthogonal to x to
// remain on the sphere. We assume that the last element of x is the scalar
// component. The size of the homogeneous vector is required to be greater than
// 1.
class CERES_EXPORT HomogeneousVectorParameterization
: public LocalParameterization {
class CERES_DEPRECATED_WITH_MSG("Use SphereManifold instead.") CERES_EXPORT
HomogeneousVectorParameterization : public LocalParameterization {
public:
explicit HomogeneousVectorParameterization(int size);
virtual ~HomogeneousVectorParameterization() {}
bool Plus(const double* x,
const double* delta,
double* x_plus_delta) const override;
@ -276,7 +284,8 @@ class CERES_EXPORT HomogeneousVectorParameterization
// manifold (see https://en.wikipedia.org/wiki/Affine_Grassmannian_(manifold))
// for the case Graff_1(R^n).
template <int AmbientSpaceDimension>
class LineParameterization : public LocalParameterization {
class CERES_DEPRECATED_WITH_MSG("Use LineManifold instead.")
LineParameterization : public LocalParameterization {
public:
static_assert(AmbientSpaceDimension >= 2,
"The ambient space must be at least 2");
@ -302,21 +311,19 @@ class LineParameterization : public LocalParameterization {
//
// is the local parameterization for a rigid transformation, where the
// rotation is represented using a quaternion.
class CERES_EXPORT ProductParameterization : public LocalParameterization {
//
class CERES_DEPRECATED_WITH_MSG("Use ProductManifold instead.")
CERES_EXPORT ProductParameterization : public LocalParameterization {
public:
ProductParameterization(const ProductParameterization&) = delete;
ProductParameterization& operator=(const ProductParameterization&) = delete;
virtual ~ProductParameterization() {}
//
// NOTE: The constructor takes ownership of the input local
// parameterizations.
//
template <typename... LocalParams>
ProductParameterization(LocalParams*... local_params)
: local_params_(sizeof...(LocalParams)),
local_size_{0},
global_size_{0},
buffer_size_{0} {
explicit ProductParameterization(LocalParams*... local_params)
: local_params_(sizeof...(LocalParams)) {
constexpr int kNumLocalParams = sizeof...(LocalParams);
static_assert(kNumLocalParams >= 2,
"At least two local parameterizations must be specified.");
@ -342,22 +349,23 @@ class CERES_EXPORT ProductParameterization : public LocalParameterization {
bool Plus(const double* x,
const double* delta,
double* x_plus_delta) const override;
bool ComputeJacobian(const double* x,
double* jacobian) const override;
bool ComputeJacobian(const double* x, double* jacobian) const override;
int GlobalSize() const override { return global_size_; }
int LocalSize() const override { return local_size_; }
private:
std::vector<std::unique_ptr<LocalParameterization>> local_params_;
int local_size_;
int global_size_;
int buffer_size_;
int local_size_{0};
int global_size_{0};
int buffer_size_{0};
};
} // namespace ceres
// clang-format off
#include "ceres/internal/reenable_warnings.h"
// clang-format on
#include "ceres/internal/line_parameterization.h"
#endif // CERES_PUBLIC_LOCAL_PARAMETERIZATION_H_

View File

@ -35,7 +35,7 @@
//
// For least squares problem where there are no outliers and standard
// squared loss is expected, it is not necessary to create a loss
// function; instead passing a NULL to the problem when adding
// function; instead passing a nullptr to the problem when adding
// residuals implies a standard squared loss.
//
// For least squares problems where the minimization may encounter
@ -78,6 +78,7 @@
#include <memory>
#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/export.h"
#include "ceres/types.h"
#include "glog/logging.h"
@ -85,7 +86,7 @@ namespace ceres {
class CERES_EXPORT LossFunction {
public:
virtual ~LossFunction() {}
virtual ~LossFunction();
// For a residual vector with squared 2-norm 'sq_norm', this method
// is required to fill in the value and derivatives of the loss
@ -125,10 +126,10 @@ class CERES_EXPORT LossFunction {
//
// At s = 0: rho = [0, 1, 0].
//
// It is not normally necessary to use this, as passing NULL for the
// It is not normally necessary to use this, as passing nullptr for the
// loss function when building the problem accomplishes the same
// thing.
class CERES_EXPORT TrivialLoss : public LossFunction {
class CERES_EXPORT TrivialLoss final : public LossFunction {
public:
void Evaluate(double, double*) const override;
};
@ -171,7 +172,7 @@ class CERES_EXPORT TrivialLoss : public LossFunction {
//
// The scaling parameter 'a' corresponds to 'delta' on this page:
// http://en.wikipedia.org/wiki/Huber_Loss_Function
class CERES_EXPORT HuberLoss : public LossFunction {
class CERES_EXPORT HuberLoss final : public LossFunction {
public:
explicit HuberLoss(double a) : a_(a), b_(a * a) {}
void Evaluate(double, double*) const override;
@ -187,7 +188,7 @@ class CERES_EXPORT HuberLoss : public LossFunction {
// rho(s) = 2 (sqrt(1 + s) - 1).
//
// At s = 0: rho = [0, 1, -1 / (2 * a^2)].
class CERES_EXPORT SoftLOneLoss : public LossFunction {
class CERES_EXPORT SoftLOneLoss final : public LossFunction {
public:
explicit SoftLOneLoss(double a) : b_(a * a), c_(1 / b_) {}
void Evaluate(double, double*) const override;
@ -204,7 +205,7 @@ class CERES_EXPORT SoftLOneLoss : public LossFunction {
// rho(s) = log(1 + s).
//
// At s = 0: rho = [0, 1, -1 / a^2].
class CERES_EXPORT CauchyLoss : public LossFunction {
class CERES_EXPORT CauchyLoss final : public LossFunction {
public:
explicit CauchyLoss(double a) : b_(a * a), c_(1 / b_) {}
void Evaluate(double, double*) const override;
@ -225,7 +226,7 @@ class CERES_EXPORT CauchyLoss : public LossFunction {
// rho(s) = a atan(s / a).
//
// At s = 0: rho = [0, 1, 0].
class CERES_EXPORT ArctanLoss : public LossFunction {
class CERES_EXPORT ArctanLoss final : public LossFunction {
public:
explicit ArctanLoss(double a) : a_(a), b_(1 / (a * a)) {}
void Evaluate(double, double*) const override;
@ -264,7 +265,7 @@ class CERES_EXPORT ArctanLoss : public LossFunction {
// concentrated in the range a - b to a + b.
//
// At s = 0: rho = [0, ~0, ~0].
class CERES_EXPORT TolerantLoss : public LossFunction {
class CERES_EXPORT TolerantLoss final : public LossFunction {
public:
explicit TolerantLoss(double a, double b);
void Evaluate(double, double*) const override;
@ -283,7 +284,7 @@ class CERES_EXPORT TolerantLoss : public LossFunction {
// rho(s) = a^2 / 3 for s > a^2.
//
// At s = 0: rho = [0, 1, -2 / a^2]
class CERES_EXPORT TukeyLoss : public ceres::LossFunction {
class CERES_EXPORT TukeyLoss final : public ceres::LossFunction {
public:
explicit TukeyLoss(double a) : a_squared_(a * a) {}
void Evaluate(double, double*) const override;
@ -294,14 +295,14 @@ class CERES_EXPORT TukeyLoss : public ceres::LossFunction {
// Composition of two loss functions. The error is the result of first
// evaluating g followed by f to yield the composition f(g(s)).
// The loss functions must not be NULL.
class CERES_EXPORT ComposedLoss : public LossFunction {
// The loss functions must not be nullptr.
class CERES_EXPORT ComposedLoss final : public LossFunction {
public:
explicit ComposedLoss(const LossFunction* f,
Ownership ownership_f,
const LossFunction* g,
Ownership ownership_g);
virtual ~ComposedLoss();
~ComposedLoss() override;
void Evaluate(double, double*) const override;
private:
@ -322,11 +323,11 @@ class CERES_EXPORT ComposedLoss : public LossFunction {
// s -> a * rho'(s)
// s -> a * rho''(s)
//
// Since we treat the a NULL Loss function as the Identity loss
// function, rho = NULL is a valid input and will result in the input
// Since we treat the a nullptr Loss function as the Identity loss
// function, rho = nullptr is a valid input and will result in the input
// being scaled by a. This provides a simple way of implementing a
// scaled ResidualBlock.
class CERES_EXPORT ScaledLoss : public LossFunction {
class CERES_EXPORT ScaledLoss final : public LossFunction {
public:
// Constructs a ScaledLoss wrapping another loss function. Takes
// ownership of the wrapped loss function or not depending on the
@ -336,7 +337,7 @@ class CERES_EXPORT ScaledLoss : public LossFunction {
ScaledLoss(const ScaledLoss&) = delete;
void operator=(const ScaledLoss&) = delete;
virtual ~ScaledLoss() {
~ScaledLoss() override {
if (ownership_ == DO_NOT_TAKE_OWNERSHIP) {
rho_.release();
}
@ -361,8 +362,8 @@ class CERES_EXPORT ScaledLoss : public LossFunction {
// whose scale can be mutated after an optimization problem has been
// constructed.
//
// Since we treat the a NULL Loss function as the Identity loss
// function, rho = NULL is a valid input.
// Since we treat the a nullptr Loss function as the Identity loss
// function, rho = nullptr is a valid input.
//
// Example usage
//
@ -374,7 +375,8 @@ class CERES_EXPORT ScaledLoss : public LossFunction {
// new AutoDiffCostFunction < UW_Camera_Mapper, 2, 9, 3>(
// new UW_Camera_Mapper(feature_x, feature_y));
//
// LossFunctionWrapper* loss_function(new HuberLoss(1.0), TAKE_OWNERSHIP);
// LossFunctionWrapper* loss_function = new LossFunctionWrapper(
// new HuberLoss(1.0), TAKE_OWNERSHIP);
//
// problem.AddResidualBlock(cost_function, loss_function, parameters);
//
@ -387,7 +389,7 @@ class CERES_EXPORT ScaledLoss : public LossFunction {
//
// Solve(options, &problem, &summary)
//
class CERES_EXPORT LossFunctionWrapper : public LossFunction {
class CERES_EXPORT LossFunctionWrapper final : public LossFunction {
public:
LossFunctionWrapper(LossFunction* rho, Ownership ownership)
: rho_(rho), ownership_(ownership) {}
@ -395,14 +397,14 @@ class CERES_EXPORT LossFunctionWrapper : public LossFunction {
LossFunctionWrapper(const LossFunctionWrapper&) = delete;
void operator=(const LossFunctionWrapper&) = delete;
virtual ~LossFunctionWrapper() {
~LossFunctionWrapper() override {
if (ownership_ == DO_NOT_TAKE_OWNERSHIP) {
rho_.release();
}
}
void Evaluate(double sq_norm, double out[3]) const override {
if (rho_.get() == NULL) {
if (rho_.get() == nullptr) {
out[0] = sq_norm;
out[1] = 1.0;
out[2] = 0.0;

411
extern/ceres/include/ceres/manifold.h vendored Normal file
View File

@ -0,0 +1,411 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
#ifndef CERES_PUBLIC_MANIFOLD_H_
#define CERES_PUBLIC_MANIFOLD_H_
#include <Eigen/Core>
#include <algorithm>
#include <array>
#include <memory>
#include <utility>
#include <vector>
#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/export.h"
#include "ceres/types.h"
#include "glog/logging.h"
namespace ceres {
// In sensor fusion problems, often we have to model quantities that live in
// spaces known as Manifolds, for example the rotation/orientation of a sensor
// that is represented by a quaternion.
//
// Manifolds are spaces which locally look like Euclidean spaces. More
// precisely, at each point on the manifold there is a linear space that is
// tangent to the manifold. It has dimension equal to the intrinsic dimension of
// the manifold itself, which is less than or equal to the ambient space in
// which the manifold is embedded.
//
// For example, the tangent space to a point on a sphere in three dimensions is
// the two dimensional plane that is tangent to the sphere at that point. There
// are two reasons tangent spaces are interesting:
//
// 1. They are Eucliean spaces so the usual vector space operations apply there,
// which makes numerical operations easy.
// 2. Movement in the tangent space translate into movements along the manifold.
// Movements perpendicular to the tangent space do not translate into
// movements on the manifold.
//
// Returning to our sphere example, moving in the 2 dimensional plane
// tangent to the sphere and projecting back onto the sphere will move you away
// from the point you started from but moving along the normal at the same point
// and the projecting back onto the sphere brings you back to the point.
//
// The Manifold interface defines two operations (and their derivatives)
// involving the tangent space, allowing filtering and optimization to be
// performed on said manifold:
//
// 1. x_plus_delta = Plus(x, delta)
// 2. delta = Minus(x_plus_delta, x)
//
// "Plus" computes the result of moving along delta in the tangent space at x,
// and then projecting back onto the manifold that x belongs to. In Differential
// Geometry this is known as a "Retraction". It is a generalization of vector
// addition in Euclidean spaces.
//
// Given two points on the manifold, "Minus" computes the change delta to x in
// the tangent space at x, that will take it to x_plus_delta.
//
// Let us now consider two examples.
//
// The Euclidean space R^n is the simplest example of a manifold. It has
// dimension n (and so does its tangent space) and Plus and Minus are the
// familiar vector sum and difference operations.
//
// Plus(x, delta) = x + delta = y,
// Minus(y, x) = y - x = delta.
//
// A more interesting case is SO(3), the special orthogonal group in three
// dimensions - the space of 3x3 rotation matrices. SO(3) is a three dimensional
// manifold embedded in R^9 or R^(3x3). So points on SO(3) are represented using
// 9 dimensional vectors or 3x3 matrices, and points in its tangent spaces are
// represented by 3 dimensional vectors.
//
// Defining Plus and Minus are defined in terms of the matrix Exp and Log
// operations as follows:
//
// Let Exp(p, q, r) = [cos(theta) + cp^2, -sr + cpq , sq + cpr ]
// [sr + cpq , cos(theta) + cq^2, -sp + cqr ]
// [-sq + cpr , sp + cqr , cos(theta) + cr^2]
//
// where: theta = sqrt(p^2 + q^2 + r^2)
// s = sinc(theta)
// c = (1 - cos(theta))/theta^2
//
// and Log(x) = 1/(2 sinc(theta))[x_32 - x_23, x_13 - x_31, x_21 - x_12]
//
// where: theta = acos((Trace(x) - 1)/2)
//
// Then,
//
// Plus(x, delta) = x Exp(delta)
// Minus(y, x) = Log(x^T y)
//
// For Plus and Minus to be mathematically consistent, the following identities
// must be satisfied at all points x on the manifold:
//
// 1. Plus(x, 0) = x.
// 2. For all y, Plus(x, Minus(y, x)) = y.
// 3. For all delta, Minus(Plus(x, delta), x) = delta.
// 4. For all delta_1, delta_2
// |Minus(Plus(x, delta_1), Plus(x, delta_2)) <= |delta_1 - delta_2|
//
// Briefly:
// (1) Ensures that the tangent space is "centered" at x, and the zero vector is
// the identity element.
// (2) Ensures that any y can be reached from x.
// (3) Ensures that Plus is an injective (one-to-one) map.
// (4) Allows us to define a metric on the manifold.
//
// Additionally we require that Plus and Minus be sufficiently smooth. In
// particular they need to be differentiable everywhere on the manifold.
//
// For more details, please see
//
// "Integrating Generic Sensor Fusion Algorithms with Sound State
// Representations through Encapsulation of Manifolds"
// By C. Hertzberg, R. Wagner, U. Frese and L. Schroder
// https://arxiv.org/pdf/1107.1119.pdf
class CERES_EXPORT Manifold {
public:
virtual ~Manifold();
// Dimension of the ambient space in which the manifold is embedded.
virtual int AmbientSize() const = 0;
// Dimension of the manifold/tangent space.
virtual int TangentSize() const = 0;
// x_plus_delta = Plus(x, delta),
//
// A generalization of vector addition in Euclidean space, Plus computes the
// result of moving along delta in the tangent space at x, and then projecting
// back onto the manifold that x belongs to.
//
// x and x_plus_delta are AmbientSize() vectors.
// delta is a TangentSize() vector.
//
// Return value indicates if the operation was successful or not.
virtual bool Plus(const double* x,
const double* delta,
double* x_plus_delta) const = 0;
// Compute the derivative of Plus(x, delta) w.r.t delta at delta = 0, i.e.
//
// (D_2 Plus)(x, 0)
//
// jacobian is a row-major AmbientSize() x TangentSize() matrix.
//
// Return value indicates whether the operation was successful or not.
virtual bool PlusJacobian(const double* x, double* jacobian) const = 0;
// tangent_matrix = ambient_matrix * (D_2 Plus)(x, 0)
//
// ambient_matrix is a row-major num_rows x AmbientSize() matrix.
// tangent_matrix is a row-major num_rows x TangentSize() matrix.
//
// Return value indicates whether the operation was successful or not.
//
// This function is only used by the GradientProblemSolver, where the
// dimension of the parameter block can be large and it may be more efficient
// to compute this product directly rather than first evaluating the Jacobian
// into a matrix and then doing a matrix vector product.
//
// Because this is not an often used function, we provide a default
// implementation for convenience. If performance becomes an issue then the
// user should consider implementing a specialization.
virtual bool RightMultiplyByPlusJacobian(const double* x,
const int num_rows,
const double* ambient_matrix,
double* tangent_matrix) const;
// y_minus_x = Minus(y, x)
//
// Given two points on the manifold, Minus computes the change to x in the
// tangent space at x, that will take it to y.
//
// x and y are AmbientSize() vectors.
// y_minus_x is a TangentSize() vector.
//
// Return value indicates if the operation was successful or not.
virtual bool Minus(const double* y,
const double* x,
double* y_minus_x) const = 0;
// Compute the derivative of Minus(y, x) w.r.t y at y = x, i.e
//
// (D_1 Minus) (x, x)
//
// Jacobian is a row-major TangentSize() x AmbientSize() matrix.
//
// Return value indicates whether the operation was successful or not.
virtual bool MinusJacobian(const double* x, double* jacobian) const = 0;
};
// The Euclidean manifold is another name for the ordinary vector space R^size,
// where the plus and minus operations are the usual vector addition and
// subtraction:
// Plus(x, delta) = x + delta
// Minus(y, x) = y - x.
//
// The class works with dynamic and static ambient space dimensions. If the
// ambient space dimensions is know at compile time use
//
// EuclideanManifold<3> manifold;
//
// If the ambient space dimensions is not known at compile time the template
// parameter needs to be set to ceres::DYNAMIC and the actual dimension needs
// to be provided as a constructor argument:
//
// EuclideanManifold<ceres::DYNAMIC> manifold(ambient_dim);
template <int Size>
class EuclideanManifold final : public Manifold {
public:
static_assert(Size == ceres::DYNAMIC || Size >= 0,
"The size of the manifold needs to be non-negative.");
static_assert(ceres::DYNAMIC == Eigen::Dynamic,
"ceres::DYNAMIC needs to be the same as Eigen::Dynamic.");
EuclideanManifold() : size_{Size} {
static_assert(
Size != ceres::DYNAMIC,
"The size is set to dynamic. Please call the constructor with a size.");
}
explicit EuclideanManifold(int size) : size_(size) {
if (Size != ceres::DYNAMIC) {
CHECK_EQ(Size, size)
<< "Specified size by template parameter differs from the supplied "
"one.";
} else {
CHECK_GE(size_, 0)
<< "The size of the manifold needs to be non-negative.";
}
}
int AmbientSize() const override { return size_; }
int TangentSize() const override { return size_; }
bool Plus(const double* x_ptr,
const double* delta_ptr,
double* x_plus_delta_ptr) const override {
Eigen::Map<const AmbientVector> x(x_ptr, size_);
Eigen::Map<const AmbientVector> delta(delta_ptr, size_);
Eigen::Map<AmbientVector> x_plus_delta(x_plus_delta_ptr, size_);
x_plus_delta = x + delta;
return true;
}
bool PlusJacobian(const double* x_ptr, double* jacobian_ptr) const override {
Eigen::Map<MatrixJacobian> jacobian(jacobian_ptr, size_, size_);
jacobian.setIdentity();
return true;
}
bool RightMultiplyByPlusJacobian(const double* x,
const int num_rows,
const double* ambient_matrix,
double* tangent_matrix) const override {
std::copy_n(ambient_matrix, num_rows * size_, tangent_matrix);
return true;
}
bool Minus(const double* y_ptr,
const double* x_ptr,
double* y_minus_x_ptr) const override {
Eigen::Map<const AmbientVector> x(x_ptr, size_);
Eigen::Map<const AmbientVector> y(y_ptr, size_);
Eigen::Map<AmbientVector> y_minus_x(y_minus_x_ptr, size_);
y_minus_x = y - x;
return true;
}
bool MinusJacobian(const double* x_ptr, double* jacobian_ptr) const override {
Eigen::Map<MatrixJacobian> jacobian(jacobian_ptr, size_, size_);
jacobian.setIdentity();
return true;
}
private:
static constexpr bool IsDynamic = (Size == ceres::DYNAMIC);
using AmbientVector = Eigen::Matrix<double, Size, 1>;
using MatrixJacobian = Eigen::Matrix<double, Size, Size, Eigen::RowMajor>;
int size_{};
};
// Hold a subset of the parameters inside a parameter block constant.
class CERES_EXPORT SubsetManifold final : public Manifold {
public:
SubsetManifold(int size, const std::vector<int>& constant_parameters);
int AmbientSize() const override;
int TangentSize() const override;
bool Plus(const double* x,
const double* delta,
double* x_plus_delta) const override;
bool PlusJacobian(const double* x, double* jacobian) const override;
bool RightMultiplyByPlusJacobian(const double* x,
const int num_rows,
const double* ambient_matrix,
double* tangent_matrix) const override;
bool Minus(const double* y,
const double* x,
double* y_minus_x) const override;
bool MinusJacobian(const double* x, double* jacobian) const override;
private:
const int tangent_size_ = 0;
std::vector<bool> constancy_mask_;
};
// Implements the manifold for a Hamilton quaternion as defined in
// https://en.wikipedia.org/wiki/Quaternion. Quaternions are represented as
// unit norm 4-vectors, i.e.
//
// q = [q0; q1; q2; q3], |q| = 1
//
// is the ambient space representation.
//
// q0 scalar part.
// q1 coefficient of i.
// q2 coefficient of j.
// q3 coefficient of k.
//
// where: i*i = j*j = k*k = -1 and i*j = k, j*k = i, k*i = j.
//
// The tangent space is R^3, which relates to the ambient space through the
// Plus and Minus operations defined as:
//
// Plus(x, delta) = [cos(|delta|); sin(|delta|) * delta / |delta|] * x
// Minus(y, x) = to_delta(y * x^{-1})
//
// where "*" is the quaternion product and because q is a unit quaternion
// (|q|=1), q^-1 = [q0; -q1; -q2; -q3]
//
// and to_delta( [q0; u_{3x1}] ) = u / |u| * atan2(|u|, q0)
class CERES_EXPORT QuaternionManifold final : public Manifold {
public:
int AmbientSize() const override { return 4; }
int TangentSize() const override { return 3; }
bool Plus(const double* x,
const double* delta,
double* x_plus_delta) const override;
bool PlusJacobian(const double* x, double* jacobian) const override;
bool Minus(const double* y,
const double* x,
double* y_minus_x) const override;
bool MinusJacobian(const double* x, double* jacobian) const override;
};
// Implements the quaternion manifold for Eigen's representation of the
// Hamilton quaternion. Geometrically it is exactly the same as the
// QuaternionManifold defined above. However, Eigen uses a different internal
// memory layout for the elements of the quaternion than what is commonly
// used. It stores the quaternion in memory as [q1, q2, q3, q0] or
// [x, y, z, w] where the real (scalar) part is last.
//
// Since Ceres operates on parameter blocks which are raw double pointers this
// difference is important and requires a different manifold.
class CERES_EXPORT EigenQuaternionManifold final : public Manifold {
public:
int AmbientSize() const override { return 4; }
int TangentSize() const override { return 3; }
bool Plus(const double* x,
const double* delta,
double* x_plus_delta) const override;
bool PlusJacobian(const double* x, double* jacobian) const override;
bool Minus(const double* y,
const double* x,
double* y_minus_x) const override;
bool MinusJacobian(const double* x, double* jacobian) const override;
};
} // namespace ceres
// clang-format off
#include "ceres/internal/reenable_warnings.h"
// clang-format on
#endif // CERES_PUBLIC_MANIFOLD_H_

View File

@ -0,0 +1,328 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
#include <cmath>
#include <limits>
#include <memory>
#include "ceres/dynamic_numeric_diff_cost_function.h"
#include "ceres/internal/eigen.h"
#include "ceres/manifold.h"
#include "ceres/numeric_diff_options.h"
#include "ceres/types.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
namespace ceres {
// Matchers and macros for help with testing Manifold objects.
//
// Testing a Manifold has two parts.
//
// 1. Checking that Manifold::Plus is correctly defined. This requires per
// manifold tests.
//
// 2. The other methods of the manifold have mathematical properties that make
// it compatible with Plus, as described in:
//
// "Integrating Generic Sensor Fusion Algorithms with Sound State
// Representations through Encapsulation of Manifolds"
// By C. Hertzberg, R. Wagner, U. Frese and L. Schroder
// https://arxiv.org/pdf/1107.1119.pdf
//
// These tests are implemented using generic matchers defined below which can
// all be called by the macro EXPECT_THAT_MANIFOLD_INVARIANTS_HOLD(manifold, x,
// delta, y, tolerance). See manifold_test.cc for example usage.
// Checks that the invariant Plus(x, 0) == x holds.
MATCHER_P2(XPlusZeroIsXAt, x, tolerance, "") {
const int ambient_size = arg.AmbientSize();
const int tangent_size = arg.TangentSize();
Vector actual = Vector::Zero(ambient_size);
Vector zero = Vector::Zero(tangent_size);
EXPECT_TRUE(arg.Plus(x.data(), zero.data(), actual.data()));
const double n = (actual - x).norm();
const double d = x.norm();
const double diffnorm = (d == 0.0) ? n : (n / d);
if (diffnorm > tolerance) {
*result_listener << "\nexpected (x): " << x.transpose()
<< "\nactual: " << actual.transpose()
<< "\ndiffnorm: " << diffnorm;
return false;
}
return true;
}
// Checks that the invariant Minus(x, x) == 0 holds.
MATCHER_P2(XMinusXIsZeroAt, x, tolerance, "") {
const int tangent_size = arg.TangentSize();
Vector actual = Vector::Zero(tangent_size);
EXPECT_TRUE(arg.Minus(x.data(), x.data(), actual.data()));
const double diffnorm = actual.norm();
if (diffnorm > tolerance) {
*result_listener << "\nx: " << x.transpose() //
<< "\nexpected: 0 0 0"
<< "\nactual: " << actual.transpose()
<< "\ndiffnorm: " << diffnorm;
return false;
}
return true;
}
// Helper struct to curry Plus(x, .) so that it can be numerically
// differentiated.
struct PlusFunctor {
PlusFunctor(const Manifold& manifold, const double* x)
: manifold(manifold), x(x) {}
bool operator()(double const* const* parameters, double* x_plus_delta) const {
return manifold.Plus(x, parameters[0], x_plus_delta);
}
const Manifold& manifold;
const double* x;
};
// Checks that the output of PlusJacobian matches the one obtained by
// numerically evaluating D_2 Plus(x,0).
MATCHER_P2(HasCorrectPlusJacobianAt, x, tolerance, "") {
const int ambient_size = arg.AmbientSize();
const int tangent_size = arg.TangentSize();
NumericDiffOptions options;
options.ridders_relative_initial_step_size = 1e-4;
DynamicNumericDiffCostFunction<PlusFunctor, RIDDERS> cost_function(
new PlusFunctor(arg, x.data()), TAKE_OWNERSHIP, options);
cost_function.AddParameterBlock(tangent_size);
cost_function.SetNumResiduals(ambient_size);
Vector zero = Vector::Zero(tangent_size);
double* parameters[1] = {zero.data()};
Vector x_plus_zero = Vector::Zero(ambient_size);
Matrix expected = Matrix::Zero(ambient_size, tangent_size);
double* jacobians[1] = {expected.data()};
EXPECT_TRUE(
cost_function.Evaluate(parameters, x_plus_zero.data(), jacobians));
Matrix actual = Matrix::Random(ambient_size, tangent_size);
EXPECT_TRUE(arg.PlusJacobian(x.data(), actual.data()));
const double n = (actual - expected).norm();
const double d = expected.norm();
const double diffnorm = (d == 0.0) ? n : n / d;
if (diffnorm > tolerance) {
*result_listener << "\nx: " << x.transpose() << "\nexpected: \n"
<< expected << "\nactual:\n"
<< actual << "\ndiff:\n"
<< expected - actual << "\ndiffnorm : " << diffnorm;
return false;
}
return true;
}
// Checks that the invariant Minus(Plus(x, delta), x) == delta holds.
MATCHER_P3(MinusPlusIsIdentityAt, x, delta, tolerance, "") {
const int ambient_size = arg.AmbientSize();
const int tangent_size = arg.TangentSize();
Vector x_plus_delta = Vector::Zero(ambient_size);
EXPECT_TRUE(arg.Plus(x.data(), delta.data(), x_plus_delta.data()));
Vector actual = Vector::Zero(tangent_size);
EXPECT_TRUE(arg.Minus(x_plus_delta.data(), x.data(), actual.data()));
const double n = (actual - delta).norm();
const double d = delta.norm();
const double diffnorm = (d == 0.0) ? n : (n / d);
if (diffnorm > tolerance) {
*result_listener << "\nx: " << x.transpose()
<< "\nexpected: " << delta.transpose()
<< "\nactual:" << actual.transpose()
<< "\ndiff:" << (delta - actual).transpose()
<< "\ndiffnorm: " << diffnorm;
return false;
}
return true;
}
// Checks that the invariant Plus(Minus(y, x), x) == y holds.
MATCHER_P3(PlusMinusIsIdentityAt, x, y, tolerance, "") {
const int ambient_size = arg.AmbientSize();
const int tangent_size = arg.TangentSize();
Vector y_minus_x = Vector::Zero(tangent_size);
EXPECT_TRUE(arg.Minus(y.data(), x.data(), y_minus_x.data()));
Vector actual = Vector::Zero(ambient_size);
EXPECT_TRUE(arg.Plus(x.data(), y_minus_x.data(), actual.data()));
const double n = (actual - y).norm();
const double d = y.norm();
const double diffnorm = (d == 0.0) ? n : (n / d);
if (diffnorm > tolerance) {
*result_listener << "\nx: " << x.transpose()
<< "\nexpected: " << y.transpose()
<< "\nactual:" << actual.transpose()
<< "\ndiff:" << (y - actual).transpose()
<< "\ndiffnorm: " << diffnorm;
return false;
}
return true;
}
// Helper struct to curry Minus(., x) so that it can be numerically
// differentiated.
struct MinusFunctor {
MinusFunctor(const Manifold& manifold, const double* x)
: manifold(manifold), x(x) {}
bool operator()(double const* const* parameters, double* y_minus_x) const {
return manifold.Minus(parameters[0], x, y_minus_x);
}
const Manifold& manifold;
const double* x;
};
// Checks that the output of MinusJacobian matches the one obtained by
// numerically evaluating D_1 Minus(x,x).
MATCHER_P2(HasCorrectMinusJacobianAt, x, tolerance, "") {
const int ambient_size = arg.AmbientSize();
const int tangent_size = arg.TangentSize();
Vector y = x;
Vector y_minus_x = Vector::Zero(tangent_size);
NumericDiffOptions options;
options.ridders_relative_initial_step_size = 1e-4;
DynamicNumericDiffCostFunction<MinusFunctor, RIDDERS> cost_function(
new MinusFunctor(arg, x.data()), TAKE_OWNERSHIP, options);
cost_function.AddParameterBlock(ambient_size);
cost_function.SetNumResiduals(tangent_size);
double* parameters[1] = {y.data()};
Matrix expected = Matrix::Zero(tangent_size, ambient_size);
double* jacobians[1] = {expected.data()};
EXPECT_TRUE(cost_function.Evaluate(parameters, y_minus_x.data(), jacobians));
Matrix actual = Matrix::Random(tangent_size, ambient_size);
EXPECT_TRUE(arg.MinusJacobian(x.data(), actual.data()));
const double n = (actual - expected).norm();
const double d = expected.norm();
const double diffnorm = (d == 0.0) ? n : (n / d);
if (diffnorm > tolerance) {
*result_listener << "\nx: " << x.transpose() << "\nexpected: \n"
<< expected << "\nactual:\n"
<< actual << "\ndiff:\n"
<< expected - actual << "\ndiffnorm: " << diffnorm;
return false;
}
return true;
}
// Checks that D_delta Minus(Plus(x, delta), x) at delta = 0 is an identity
// matrix.
MATCHER_P2(MinusPlusJacobianIsIdentityAt, x, tolerance, "") {
const int ambient_size = arg.AmbientSize();
const int tangent_size = arg.TangentSize();
Matrix plus_jacobian(ambient_size, tangent_size);
EXPECT_TRUE(arg.PlusJacobian(x.data(), plus_jacobian.data()));
Matrix minus_jacobian(tangent_size, ambient_size);
EXPECT_TRUE(arg.MinusJacobian(x.data(), minus_jacobian.data()));
const Matrix actual = minus_jacobian * plus_jacobian;
const Matrix expected = Matrix::Identity(tangent_size, tangent_size);
const double n = (actual - expected).norm();
const double d = expected.norm();
const double diffnorm = n / d;
if (diffnorm > tolerance) {
*result_listener << "\nx: " << x.transpose() << "\nexpected: \n"
<< expected << "\nactual:\n"
<< actual << "\ndiff:\n"
<< expected - actual << "\ndiffnorm: " << diffnorm;
return false;
}
return true;
}
// Verify that the output of RightMultiplyByPlusJacobian is ambient_matrix *
// plus_jacobian.
MATCHER_P2(HasCorrectRightMultiplyByPlusJacobianAt, x, tolerance, "") {
const int ambient_size = arg.AmbientSize();
const int tangent_size = arg.TangentSize();
constexpr int kMinNumRows = 0;
constexpr int kMaxNumRows = 3;
for (int num_rows = kMinNumRows; num_rows <= kMaxNumRows; ++num_rows) {
Matrix plus_jacobian = Matrix::Random(ambient_size, tangent_size);
EXPECT_TRUE(arg.PlusJacobian(x.data(), plus_jacobian.data()));
Matrix ambient_matrix = Matrix::Random(num_rows, ambient_size);
Matrix expected = ambient_matrix * plus_jacobian;
Matrix actual = Matrix::Random(num_rows, tangent_size);
EXPECT_TRUE(arg.RightMultiplyByPlusJacobian(
x.data(), num_rows, ambient_matrix.data(), actual.data()));
const double n = (actual - expected).norm();
const double d = expected.norm();
const double diffnorm = (d == 0.0) ? n : (n / d);
if (diffnorm > tolerance) {
*result_listener << "\nx: " << x.transpose() << "\nambient_matrix : \n"
<< ambient_matrix << "\nplus_jacobian : \n"
<< plus_jacobian << "\nexpected: \n"
<< expected << "\nactual:\n"
<< actual << "\ndiff:\n"
<< expected - actual << "\ndiffnorm : " << diffnorm;
return false;
}
}
return true;
}
#define EXPECT_THAT_MANIFOLD_INVARIANTS_HOLD(manifold, x, delta, y, tolerance) \
Vector zero_tangent = Vector::Zero(manifold.TangentSize()); \
EXPECT_THAT(manifold, XPlusZeroIsXAt(x, tolerance)); \
EXPECT_THAT(manifold, XMinusXIsZeroAt(x, tolerance)); \
EXPECT_THAT(manifold, MinusPlusIsIdentityAt(x, delta, tolerance)); \
EXPECT_THAT(manifold, MinusPlusIsIdentityAt(x, zero_tangent, tolerance)); \
EXPECT_THAT(manifold, PlusMinusIsIdentityAt(x, x, tolerance)); \
EXPECT_THAT(manifold, PlusMinusIsIdentityAt(x, y, tolerance)); \
EXPECT_THAT(manifold, HasCorrectPlusJacobianAt(x, tolerance)); \
EXPECT_THAT(manifold, HasCorrectMinusJacobianAt(x, tolerance)); \
EXPECT_THAT(manifold, MinusPlusJacobianIsIdentityAt(x, tolerance)); \
EXPECT_THAT(manifold, HasCorrectRightMultiplyByPlusJacobianAt(x, tolerance));
} // namespace ceres

View File

@ -57,7 +57,7 @@ namespace ceres {
// which would be the case if the covariance matrix S is rank
// deficient.
class CERES_EXPORT NormalPrior : public CostFunction {
class CERES_EXPORT NormalPrior final : public CostFunction {
public:
// Check that the number of rows in the vector b are the same as the
// number of columns in the matrix A, crash otherwise.

View File

@ -179,9 +179,10 @@ template <typename CostFunctor,
NumericDiffMethodType method = CENTRAL,
int kNumResiduals = 0, // Number of residuals, or ceres::DYNAMIC
int... Ns> // Parameters dimensions for each block.
class NumericDiffCostFunction : public SizedCostFunction<kNumResiduals, Ns...> {
class NumericDiffCostFunction final
: public SizedCostFunction<kNumResiduals, Ns...> {
public:
NumericDiffCostFunction(
explicit NumericDiffCostFunction(
CostFunctor* functor,
Ownership ownership = TAKE_OWNERSHIP,
int num_residuals = kNumResiduals,
@ -192,7 +193,7 @@ class NumericDiffCostFunction : public SizedCostFunction<kNumResiduals, Ns...> {
}
}
explicit NumericDiffCostFunction(NumericDiffCostFunction&& other)
NumericDiffCostFunction(NumericDiffCostFunction&& other)
: functor_(std::move(other.functor_)), ownership_(other.ownership_) {}
virtual ~NumericDiffCostFunction() {
@ -219,7 +220,7 @@ class NumericDiffCostFunction : public SizedCostFunction<kNumResiduals, Ns...> {
return false;
}
if (jacobians == NULL) {
if (jacobians == nullptr) {
return true;
}
@ -246,6 +247,8 @@ class NumericDiffCostFunction : public SizedCostFunction<kNumResiduals, Ns...> {
return true;
}
const CostFunctor& functor() const { return *functor_; }
private:
std::unique_ptr<CostFunctor> functor_;
Ownership ownership_;

View File

@ -0,0 +1,163 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
#ifndef CERES_PUBLIC_NUMERIC_DIFF_FIRST_ORDER_FUNCTION_H_
#define CERES_PUBLIC_NUMERIC_DIFF_FIRST_ORDER_FUNCTION_H_
#include <algorithm>
#include <memory>
#include "ceres/first_order_function.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/fixed_array.h"
#include "ceres/internal/numeric_diff.h"
#include "ceres/internal/parameter_dims.h"
#include "ceres/internal/variadic_evaluate.h"
#include "ceres/numeric_diff_options.h"
#include "ceres/types.h"
namespace ceres {
// Creates FirstOrderFunctions as needed by the GradientProblem
// framework, with gradients computed via numeric differentiation. For
// more information on numeric differentiation, see the wikipedia
// article at https://en.wikipedia.org/wiki/Numerical_differentiation
//
// To get an numerically differentiated cost function, you must define
// a class with an operator() (a functor) that computes the cost.
//
// The function must write the computed value in the last argument
// (the only non-const one) and return true to indicate success.
//
// For example, consider a scalar error e = x'y - a, where both x and y are
// two-dimensional column vector parameters, the prime sign indicates
// transposition, and a is a constant.
//
// To write an numerically-differentiable cost function for the above model,
// first define the object
//
// class QuadraticCostFunctor {
// public:
// explicit QuadraticCostFunctor(double a) : a_(a) {}
// bool operator()(const double* const xy, double* cost) const {
// constexpr int kInputVectorLength = 2;
// const double* const x = xy;
// const double* const y = xy + kInputVectorLength;
// *cost = x[0] * y[0] + x[1] * y[1] - a_;
// return true;
// }
//
// private:
// double a_;
// };
//
//
// Note that in the declaration of operator() the input parameters xy
// come first, and are passed as const pointers to array of
// doubles. The output cost is the last parameter.
//
// Then given this class definition, the numerically differentiated
// first order function with central differences used for computing the
// derivative can be constructed as follows.
//
// FirstOrderFunction* function
// = new NumericDiffFirstOrderFunction<MyScalarCostFunctor, CENTRAL, 4>(
// new QuadraticCostFunctor(1.0)); ^ ^ ^
// | | |
// Finite Differencing Scheme -+ | |
// Dimension of xy ------------------------+
//
//
// In the instantiation above, the template parameters following
// "QuadraticCostFunctor", "CENTRAL, 4", describe the finite
// differencing scheme as "central differencing" and the functor as
// computing its cost from a 4 dimensional input.
template <typename FirstOrderFunctor,
NumericDiffMethodType method,
int kNumParameters>
class NumericDiffFirstOrderFunction final : public FirstOrderFunction {
public:
explicit NumericDiffFirstOrderFunction(
FirstOrderFunctor* functor,
Ownership ownership = TAKE_OWNERSHIP,
const NumericDiffOptions& options = NumericDiffOptions())
: functor_(functor), ownership_(ownership), options_(options) {
static_assert(kNumParameters > 0, "kNumParameters must be positive");
}
~NumericDiffFirstOrderFunction() override {
if (ownership_ != TAKE_OWNERSHIP) {
functor_.release();
}
}
bool Evaluate(const double* const parameters,
double* cost,
double* gradient) const override {
using ParameterDims = internal::StaticParameterDims<kNumParameters>;
constexpr int kNumResiduals = 1;
// Get the function value (cost) at the the point to evaluate.
if (!internal::VariadicEvaluate<ParameterDims>(
*functor_, &parameters, cost)) {
return false;
}
if (gradient == nullptr) {
return true;
}
// Create a copy of the parameters which will get mutated.
internal::FixedArray<double, 32> parameters_copy(kNumParameters);
std::copy_n(parameters, kNumParameters, parameters_copy.data());
double* parameters_ptr = parameters_copy.data();
internal::EvaluateJacobianForParameterBlocks<
ParameterDims>::template Apply<method, kNumResiduals>(functor_.get(),
cost,
options_,
kNumResiduals,
&parameters_ptr,
&gradient);
return true;
}
int NumParameters() const override { return kNumParameters; }
const FirstOrderFunctor& functor() const { return *functor_; }
private:
std::unique_ptr<FirstOrderFunctor> functor_;
Ownership ownership_;
NumericDiffOptions options_;
};
} // namespace ceres
#endif // CERES_PUBLIC_NUMERIC_DIFF_FIRST_ORDER_FUNCTION_H_

View File

@ -32,7 +32,8 @@
#ifndef CERES_PUBLIC_NUMERIC_DIFF_OPTIONS_H_
#define CERES_PUBLIC_NUMERIC_DIFF_OPTIONS_H_
#include "ceres/internal/port.h"
#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/export.h"
namespace ceres {
@ -70,4 +71,6 @@ struct CERES_EXPORT NumericDiffOptions {
} // namespace ceres
#include "ceres/internal/reenable_warnings.h"
#endif // CERES_PUBLIC_NUMERIC_DIFF_OPTIONS_H_

View File

@ -36,7 +36,7 @@
#include <unordered_map>
#include <vector>
#include "ceres/internal/port.h"
#include "ceres/internal/export.h"
#include "glog/logging.h"
namespace ceres {
@ -190,7 +190,7 @@ class OrderedGroups {
};
// Typedef for the most commonly used version of OrderedGroups.
typedef OrderedGroups<double*> ParameterBlockOrdering;
using ParameterBlockOrdering = OrderedGroups<double*>;
} // namespace ceres

View File

@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// Copyright 2021 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@ -43,6 +43,7 @@
#include "ceres/context.h"
#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/export.h"
#include "ceres/internal/port.h"
#include "ceres/types.h"
#include "glog/logging.h"
@ -53,6 +54,7 @@ class CostFunction;
class EvaluationCallback;
class LossFunction;
class LocalParameterization;
class Manifold;
class Solver;
struct CRSMatrix;
@ -65,7 +67,7 @@ class ResidualBlock;
// A ResidualBlockId is an opaque handle clients can use to remove residual
// blocks from a Problem after adding them.
typedef internal::ResidualBlock* ResidualBlockId;
using ResidualBlockId = internal::ResidualBlock*;
// A class to represent non-linear least squares problems. Such
// problems have a cost function that is a sum of error terms (known
@ -78,31 +80,28 @@ typedef internal::ResidualBlock* ResidualBlockId;
//
// where
//
// r_ij is residual number i, component j; the residual is a
// function of some subset of the parameters x1...xk. For
// example, in a structure from motion problem a residual
// might be the difference between a measured point in an
// image and the reprojected position for the matching
// camera, point pair. The residual would have two
// components, error in x and error in y.
// r_ij is residual number i, component j; the residual is a function of some
// subset of the parameters x1...xk. For example, in a structure from
// motion problem a residual might be the difference between a measured
// point in an image and the reprojected position for the matching
// camera, point pair. The residual would have two components, error in x
// and error in y.
//
// loss(y) is the loss function; for example, squared error or
// Huber L1 loss. If loss(y) = y, then the cost function is
// non-robustified least squares.
// loss(y) is the loss function; for example, squared error or Huber L1
// loss. If loss(y) = y, then the cost function is non-robustified
// least squares.
//
// This class is specifically designed to address the important subset
// of "sparse" least squares problems, where each component of the
// residual depends only on a small number number of parameters, even
// though the total number of residuals and parameters may be very
// large. This property affords tremendous gains in scale, allowing
// efficient solving of large problems that are otherwise
// inaccessible.
// This class is specifically designed to address the important subset of
// "sparse" least squares problems, where each component of the residual depends
// only on a small number number of parameters, even though the total number of
// residuals and parameters may be very large. This property affords tremendous
// gains in scale, allowing efficient solving of large problems that are
// otherwise inaccessible.
//
// The canonical example of a sparse least squares problem is
// "structure-from-motion" (SFM), where the parameters are points and
// cameras, and residuals are reprojection errors. Typically a single
// residual will depend only on 9 parameters (3 for the point, 6 for
// the camera).
// "structure-from-motion" (SFM), where the parameters are points and cameras,
// and residuals are reprojection errors. Typically a single residual will
// depend only on 9 parameters (3 for the point, 6 for the camera).
//
// To create a least squares problem, use the AddResidualBlock() and
// AddParameterBlock() methods, documented below. Here is an example least
@ -119,41 +118,52 @@ typedef internal::ResidualBlock* ResidualBlockId;
// problem.AddResidualBlock(new MyBinaryCostFunction(...), nullptr, x2, x3);
//
// Please see cost_function.h for details of the CostFunction object.
//
// NOTE: We are currently in the process of transitioning from
// LocalParameterization to Manifolds in the Ceres API. During this period,
// Problem will support using both Manifold and LocalParameterization objects
// interchangably. In particular, adding a LocalParameterization to a parameter
// block is the same as adding a Manifold to that parameter block. For methods
// in the API affected by this change, see their documentation below.
class CERES_EXPORT Problem {
public:
struct CERES_EXPORT Options {
// These flags control whether the Problem object owns the cost
// functions, loss functions, and parameterizations passed into
// the Problem. If set to TAKE_OWNERSHIP, then the problem object
// will delete the corresponding cost or loss functions on
// destruction. The destructor is careful to delete the pointers
// only once, since sharing cost/loss/parameterizations is
// allowed.
// These flags control whether the Problem object owns the CostFunctions,
// LossFunctions, LocalParameterizations, and Manifolds passed into the
// Problem.
//
// If set to TAKE_OWNERSHIP, then the problem object will delete the
// corresponding object on destruction. The destructor is careful to delete
// the pointers only once, since sharing objects is allowed.
Ownership cost_function_ownership = TAKE_OWNERSHIP;
Ownership loss_function_ownership = TAKE_OWNERSHIP;
CERES_DEPRECATED_WITH_MSG(
"Local Parameterizations are deprecated. Use Manifold and "
"manifold_ownership instead.")
Ownership local_parameterization_ownership = TAKE_OWNERSHIP;
Ownership manifold_ownership = TAKE_OWNERSHIP;
// If true, trades memory for faster RemoveResidualBlock() and
// RemoveParameterBlock() operations.
//
// By default, RemoveParameterBlock() and RemoveResidualBlock() take time
// proportional to the size of the entire problem. If you only ever remove
// proportional to the size of the entire problem. If you only ever remove
// parameters or residuals from the problem occasionally, this might be
// acceptable. However, if you have memory to spare, enable this option to
// acceptable. However, if you have memory to spare, enable this option to
// make RemoveParameterBlock() take time proportional to the number of
// residual blocks that depend on it, and RemoveResidualBlock() take (on
// average) constant time.
//
// The increase in memory usage is twofold: an additional hash set per
// The increase in memory usage is two-fold: an additional hash set per
// parameter block containing all the residuals that depend on the parameter
// block; and a hash set in the problem containing all residuals.
bool enable_fast_removal = false;
// By default, Ceres performs a variety of safety checks when constructing
// the problem. There is a small but measurable performance penalty to
// these checks, typically around 5% of construction time. If you are sure
// your problem construction is correct, and 5% of the problem construction
// time is truly an overhead you want to avoid, then you can set
// the problem. There is a small but measurable performance penalty to these
// checks, typically around 5% of construction time. If you are sure your
// problem construction is correct, and 5% of the problem construction time
// is truly an overhead you want to avoid, then you can set
// disable_all_safety_checks to true.
//
// WARNING: Do not set this to true, unless you are absolutely sure of what
@ -167,26 +177,23 @@ class CERES_EXPORT Problem {
// Ceres does NOT take ownership of the pointer.
Context* context = nullptr;
// Using this callback interface, Ceres can notify you when it is
// about to evaluate the residuals or jacobians. With the
// callback, you can share computation between residual blocks by
// doing the shared computation in
// Using this callback interface, Ceres can notify you when it is about to
// evaluate the residuals or jacobians. With the callback, you can share
// computation between residual blocks by doing the shared computation in
// EvaluationCallback::PrepareForEvaluation() before Ceres calls
// CostFunction::Evaluate(). It also enables caching results
// between a pure residual evaluation and a residual & jacobian
// evaluation.
// CostFunction::Evaluate(). It also enables caching results between a pure
// residual evaluation and a residual & jacobian evaluation.
//
// Problem DOES NOT take ownership of the callback.
//
// NOTE: Evaluation callbacks are incompatible with inner
// iterations. So calling Solve with
// Solver::Options::use_inner_iterations = true on a Problem with
// a non-null evaluation callback is an error.
// NOTE: Evaluation callbacks are incompatible with inner iterations. So
// calling Solve with Solver::Options::use_inner_iterations = true on a
// Problem with a non-null evaluation callback is an error.
EvaluationCallback* evaluation_callback = nullptr;
};
// The default constructor is equivalent to the
// invocation Problem(Problem::Options()).
// The default constructor is equivalent to the invocation
// Problem(Problem::Options()).
Problem();
explicit Problem(const Options& options);
Problem(Problem&&);
@ -197,31 +204,29 @@ class CERES_EXPORT Problem {
~Problem();
// Add a residual block to the overall cost function. The cost
// function carries with its information about the sizes of the
// parameter blocks it expects. The function checks that these match
// the sizes of the parameter blocks listed in parameter_blocks. The
// program aborts if a mismatch is detected. loss_function can be
// nullptr, in which case the cost of the term is just the squared norm
// of the residuals.
// Add a residual block to the overall cost function. The cost function
// carries with its information about the sizes of the parameter blocks it
// expects. The function checks that these match the sizes of the parameter
// blocks listed in parameter_blocks. The program aborts if a mismatch is
// detected. loss_function can be nullptr, in which case the cost of the term
// is just the squared norm of the residuals.
//
// The user has the option of explicitly adding the parameter blocks
// using AddParameterBlock. This causes additional correctness
// checking; however, AddResidualBlock implicitly adds the parameter
// blocks if they are not present, so calling AddParameterBlock
// explicitly is not required.
// The user has the option of explicitly adding the parameter blocks using
// AddParameterBlock. This causes additional correctness checking; however,
// AddResidualBlock implicitly adds the parameter blocks if they are not
// present, so calling AddParameterBlock explicitly is not required.
//
// The Problem object by default takes ownership of the
// cost_function and loss_function pointers. These objects remain
// live for the life of the Problem object. If the user wishes to
// keep control over the destruction of these objects, then they can
// The Problem object by default takes ownership of the cost_function and
// loss_function pointers (See Problem::Options to override this behaviour).
// These objects remain live for the life of the Problem object. If the user
// wishes to keep control over the destruction of these objects, then they can
// do this by setting the corresponding enums in the Options struct.
//
// Note: Even though the Problem takes ownership of cost_function
// and loss_function, it does not preclude the user from re-using
// them in another residual block. The destructor takes care to call
// delete on each cost_function or loss_function pointer only once,
// regardless of how many residual blocks refer to them.
// Note: Even though the Problem takes ownership of cost_function and
// loss_function, it does not preclude the user from re-using them in another
// residual block. The destructor takes care to call delete on each
// cost_function or loss_function pointer only once, regardless of how many
// residual blocks refer to them.
//
// Example usage:
//
@ -234,8 +239,8 @@ class CERES_EXPORT Problem {
// problem.AddResidualBlock(new MyUnaryCostFunction(...), nullptr, x1);
// problem.AddResidualBlock(new MyBinaryCostFunction(...), nullptr, x2, x1);
//
// Add a residual block by listing the parameter block pointers
// directly instead of wapping them in a container.
// Add a residual block by listing the parameter block pointers directly
// instead of wapping them in a container.
template <typename... Ts>
ResidualBlockId AddResidualBlock(CostFunction* cost_function,
LossFunction* loss_function,
@ -261,29 +266,75 @@ class CERES_EXPORT Problem {
double* const* const parameter_blocks,
int num_parameter_blocks);
// Add a parameter block with appropriate size to the problem.
// Repeated calls with the same arguments are ignored. Repeated
// calls with the same double pointer but a different size results
// in undefined behaviour.
// Add a parameter block with appropriate size to the problem. Repeated calls
// with the same arguments are ignored. Repeated calls with the same double
// pointer but a different size will result in a crash.
void AddParameterBlock(double* values, int size);
// Add a parameter block with appropriate size and parameterization
// to the problem. Repeated calls with the same arguments are
// ignored. Repeated calls with the same double pointer but a
// different size results in undefined behaviour.
// Add a parameter block with appropriate size and parameterization to the
// problem. It is okay for local_parameterization to be nullptr.
//
// Repeated calls with the same arguments are ignored. Repeated calls
// with the same double pointer but a different size results in a crash
// (unless Solver::Options::diable_all_safety_checks is set to true).
//
// Repeated calls with the same double pointer and size but different
// LocalParameterization is equivalent to calling
// SetParameterization(local_parameterization), i.e., any previously
// associated LocalParameterization or Manifold object will be replaced with
// the local_parameterization.
//
// NOTE:
// ----
//
// This method is deprecated and will be removed in the next public
// release of Ceres Solver. Please move to using the Manifold based version of
// AddParameterBlock.
//
// During the transition from LocalParameterization to Manifold, internally
// the LocalParameterization is treated as a Manifold by wrapping it using a
// ManifoldAdapter object. So HasManifold() will return true, GetManifold()
// will return the wrapped object and ParameterBlockTangentSize() will return
// the LocalSize of the LocalParameterization.
CERES_DEPRECATED_WITH_MSG(
"LocalParameterizations are deprecated. Use the version with Manifolds "
"instead.")
void AddParameterBlock(double* values,
int size,
LocalParameterization* local_parameterization);
// Remove a parameter block from the problem. The parameterization of the
// parameter block, if it exists, will persist until the deletion of the
// problem (similar to cost/loss functions in residual block removal). Any
// residual blocks that depend on the parameter are also removed, as
// described above in RemoveResidualBlock().
// Add a parameter block with appropriate size and Manifold to the
// problem. It is okay for manifold to be nullptr.
//
// If Problem::Options::enable_fast_removal is true, then the
// removal is fast (almost constant time). Otherwise, removing a parameter
// block will incur a scan of the entire Problem object.
// Repeated calls with the same arguments are ignored. Repeated calls
// with the same double pointer but a different size results in a crash
// (unless Solver::Options::diable_all_safety_checks is set to true).
//
// Repeated calls with the same double pointer and size but different Manifold
// is equivalent to calling SetManifold(manifold), i.e., any previously
// associated LocalParameterization or Manifold object will be replaced with
// the manifold.
//
// Note:
// ----
//
// During the transition from LocalParameterization to Manifold, calling
// AddParameterBlock with a Manifold when a LocalParameterization is already
// associated with the parameter block is okay. It is equivalent to calling
// SetManifold(manifold), i.e., any previously associated
// LocalParameterization or Manifold object will be replaced with the
// manifold.
void AddParameterBlock(double* values, int size, Manifold* manifold);
// Remove a parameter block from the problem. The LocalParameterization or
// Manifold of the parameter block, if it exists, will persist until the
// deletion of the problem (similar to cost/loss functions in residual block
// removal). Any residual blocks that depend on the parameter are also
// removed, as described above in RemoveResidualBlock().
//
// If Problem::Options::enable_fast_removal is true, then the removal is fast
// (almost constant time). Otherwise, removing a parameter block will incur a
// scan of the entire Problem object.
//
// WARNING: Removing a residual or parameter block will destroy the implicit
// ordering, rendering the jacobian or residuals returned from the solver
@ -308,35 +359,109 @@ class CERES_EXPORT Problem {
// Allow the indicated parameter block to vary during optimization.
void SetParameterBlockVariable(double* values);
// Returns true if a parameter block is set constant, and false
// otherwise. A parameter block may be set constant in two ways:
// either by calling SetParameterBlockConstant or by associating a
// LocalParameterization with a zero dimensional tangent space with
// it.
// Returns true if a parameter block is set constant, and false otherwise. A
// parameter block may be set constant in two ways: either by calling
// SetParameterBlockConstant or by associating a LocalParameterization or
// Manifold with a zero dimensional tangent space with it.
bool IsParameterBlockConstant(const double* values) const;
// Set the local parameterization for one of the parameter blocks.
// The local_parameterization is owned by the Problem by default. It
// is acceptable to set the same parameterization for multiple
// parameters; the destructor is careful to delete local
// parameterizations only once. Calling SetParameterization with
// nullptr will clear any previously set parameterization.
// Set the LocalParameterization for the parameter block. Calling
// SetParameterization with nullptr will clear any previously set
// LocalParameterization or Manifold for the parameter block.
//
// Repeated calls will cause any previously associated LocalParameterization
// or Manifold object to be replaced with the local_parameterization.
//
// The local_parameterization is owned by the Problem by default (See
// Problem::Options to override this behaviour).
//
// It is acceptable to set the same LocalParameterization for multiple
// parameter blocks; the destructor is careful to delete
// LocalParamaterizations only once.
//
// NOTE:
// ----
//
// This method is deprecated and will be removed in the next public
// release of Ceres Solver. Please move to using the SetManifold instead.
//
// During the transition from LocalParameterization to Manifold, internally
// the LocalParameterization is treated as a Manifold by wrapping it using a
// ManifoldAdapter object. So HasManifold() will return true, GetManifold()
// will return the wrapped object and ParameterBlockTangentSize will return
// the same value of ParameterBlockLocalSize.
CERES_DEPRECATED_WITH_MSG(
"LocalParameterizations are deprecated. Use SetManifold instead.")
void SetParameterization(double* values,
LocalParameterization* local_parameterization);
// Get the local parameterization object associated with this
// parameter block. If there is no parameterization object
// associated then nullptr is returned.
// Get the LocalParameterization object associated with this parameter block.
// If there is no LocalParameterization associated then nullptr is returned.
//
// NOTE: This method is deprecated and will be removed in the next public
// release of Ceres Solver. Use GetManifold instead.
//
// Note also that if a LocalParameterization is associated with a parameter
// block, HasManifold will return true and GetManifold will return the
// LocalParameterization wrapped in a ManifoldAdapter.
//
// The converse is NOT true, i.e., if a Manifold is associated with a
// parameter block, HasParameterization will return false and
// GetParameterization will return a nullptr.
CERES_DEPRECATED_WITH_MSG(
"LocalParameterizations are deprecated. Use GetManifold "
"instead.")
const LocalParameterization* GetParameterization(const double* values) const;
// Returns true if a LocalParameterization is associated with this parameter
// block, false otherwise.
//
// NOTE: This method is deprecated and will be removed in the next public
// release of Ceres Solver. Use HasManifold instead.
//
// Note also that if a Manifold is associated with the parameter block, this
// method will return false.
CERES_DEPRECATED_WITH_MSG(
"LocalParameterizations are deprecated. Use HasManifold instead.")
bool HasParameterization(const double* values) const;
// Set the Manifold for the parameter block. Calling SetManifold with nullptr
// will clear any previously set LocalParameterization or Manifold for the
// parameter block.
//
// Repeated calls will result in any previously associated
// LocalParameterization or Manifold object to be replaced with the manifold.
//
// The manifold is owned by the Problem by default (See Problem::Options to
// override this behaviour).
//
// It is acceptable to set the same Manifold for multiple parameter blocks.
void SetManifold(double* values, Manifold* manifold);
// Get the Manifold object associated with this parameter block.
//
// If there is no Manifold Or LocalParameterization object associated then
// nullptr is returned.
//
// NOTE: During the transition from LocalParameterization to Manifold,
// internally the LocalParameterization is treated as a Manifold by wrapping
// it using a ManifoldAdapter object. So calling GetManifold on a parameter
// block with a LocalParameterization associated with it will return the
// LocalParameterization wrapped in a ManifoldAdapter
const Manifold* GetManifold(const double* values) const;
// Returns true if a Manifold or a LocalParameterization is associated with
// this parameter block, false otherwise.
bool HasManifold(const double* values) const;
// Set the lower/upper bound for the parameter at position "index".
void SetParameterLowerBound(double* values, int index, double lower_bound);
void SetParameterUpperBound(double* values, int index, double upper_bound);
// Get the lower/upper bound for the parameter at position
// "index". If the parameter is not bounded by the user, then its
// lower bound is -std::numeric_limits<double>::max() and upper
// bound is std::numeric_limits<double>::max().
// Get the lower/upper bound for the parameter at position "index". If the
// parameter is not bounded by the user, then its lower bound is
// -std::numeric_limits<double>::max() and upper bound is
// std::numeric_limits<double>::max().
double GetParameterLowerBound(const double* values, int index) const;
double GetParameterUpperBound(const double* values, int index) const;
@ -344,37 +469,47 @@ class CERES_EXPORT Problem {
// parameter_blocks().size() and parameter_block_sizes().size().
int NumParameterBlocks() const;
// The size of the parameter vector obtained by summing over the
// sizes of all the parameter blocks.
// The size of the parameter vector obtained by summing over the sizes of all
// the parameter blocks.
int NumParameters() const;
// Number of residual blocks in the problem. Always equals
// residual_blocks().size().
int NumResidualBlocks() const;
// The size of the residual vector obtained by summing over the
// sizes of all of the residual blocks.
// The size of the residual vector obtained by summing over the sizes of all
// of the residual blocks.
int NumResiduals() const;
// The size of the parameter block.
int ParameterBlockSize(const double* values) const;
// The size of local parameterization for the parameter block. If
// there is no local parameterization associated with this parameter
// block, then ParameterBlockLocalSize = ParameterBlockSize.
// The dimension of the tangent space of the LocalParameterization or Manifold
// for the parameter block. If there is no LocalParameterization or Manifold
// associated with this parameter block, then ParameterBlockLocalSize =
// ParameterBlockSize.
CERES_DEPRECATED_WITH_MSG(
"LocalParameterizations are deprecated. Use ParameterBlockTangentSize "
"instead.")
int ParameterBlockLocalSize(const double* values) const;
// The dimenion of the tangent space of the LocalParameterization or Manifold
// for the parameter block. If there is no LocalParameterization or Manifold
// associated with this parameter block, then ParameterBlockTangentSize =
// ParameterBlockSize.
int ParameterBlockTangentSize(const double* values) const;
// Is the given parameter block present in this problem or not?
bool HasParameterBlock(const double* values) const;
// Fills the passed parameter_blocks vector with pointers to the
// parameter blocks currently in the problem. After this call,
// parameter_block.size() == NumParameterBlocks.
// Fills the passed parameter_blocks vector with pointers to the parameter
// blocks currently in the problem. After this call, parameter_block.size() ==
// NumParameterBlocks.
void GetParameterBlocks(std::vector<double*>* parameter_blocks) const;
// Fills the passed residual_blocks vector with pointers to the
// residual blocks currently in the problem. After this call,
// residual_blocks.size() == NumResidualBlocks.
// Fills the passed residual_blocks vector with pointers to the residual
// blocks currently in the problem. After this call, residual_blocks.size() ==
// NumResidualBlocks.
void GetResidualBlocks(std::vector<ResidualBlockId>* residual_blocks) const;
// Get all the parameter blocks that depend on the given residual block.
@ -393,10 +528,10 @@ class CERES_EXPORT Problem {
// Get all the residual blocks that depend on the given parameter block.
//
// If Problem::Options::enable_fast_removal is true, then
// getting the residual blocks is fast and depends only on the number of
// residual blocks. Otherwise, getting the residual blocks for a parameter
// block will incur a scan of the entire Problem object.
// If Problem::Options::enable_fast_removal is true, then getting the residual
// blocks is fast and depends only on the number of residual
// blocks. Otherwise, getting the residual blocks for a parameter block will
// incur a scan of the entire Problem object.
void GetResidualBlocksForParameterBlock(
const double* values,
std::vector<ResidualBlockId>* residual_blocks) const;
@ -404,49 +539,45 @@ class CERES_EXPORT Problem {
// Options struct to control Problem::Evaluate.
struct EvaluateOptions {
// The set of parameter blocks for which evaluation should be
// performed. This vector determines the order that parameter
// blocks occur in the gradient vector and in the columns of the
// jacobian matrix. If parameter_blocks is empty, then it is
// assumed to be equal to vector containing ALL the parameter
// blocks. Generally speaking the parameter blocks will occur in
// the order in which they were added to the problem. But, this
// may change if the user removes any parameter blocks from the
// problem.
// performed. This vector determines the order that parameter blocks occur
// in the gradient vector and in the columns of the jacobian matrix. If
// parameter_blocks is empty, then it is assumed to be equal to vector
// containing ALL the parameter blocks. Generally speaking the parameter
// blocks will occur in the order in which they were added to the
// problem. But, this may change if the user removes any parameter blocks
// from the problem.
//
// NOTE: This vector should contain the same pointers as the ones
// used to add parameter blocks to the Problem. These parameter
// block should NOT point to new memory locations. Bad things will
// happen otherwise.
// NOTE: This vector should contain the same pointers as the ones used to
// add parameter blocks to the Problem. These parameter block should NOT
// point to new memory locations. Bad things will happen otherwise.
std::vector<double*> parameter_blocks;
// The set of residual blocks to evaluate. This vector determines
// the order in which the residuals occur, and how the rows of the
// jacobian are ordered. If residual_blocks is empty, then it is
// assumed to be equal to the vector containing ALL the residual
// blocks. Generally speaking the residual blocks will occur in
// the order in which they were added to the problem. But, this
// may change if the user removes any residual blocks from the
// problem.
// The set of residual blocks to evaluate. This vector determines the order
// in which the residuals occur, and how the rows of the jacobian are
// ordered. If residual_blocks is empty, then it is assumed to be equal to
// the vector containing ALL the residual blocks. Generally speaking the
// residual blocks will occur in the order in which they were added to the
// problem. But, this may change if the user removes any residual blocks
// from the problem.
std::vector<ResidualBlockId> residual_blocks;
// Even though the residual blocks in the problem may contain loss
// functions, setting apply_loss_function to false will turn off
// the application of the loss function to the output of the cost
// function. This is of use for example if the user wishes to
// analyse the solution quality by studying the distribution of
// residuals before and after the solve.
// functions, setting apply_loss_function to false will turn off the
// application of the loss function to the output of the cost function. This
// is of use for example if the user wishes to analyse the solution quality
// by studying the distribution of residuals before and after the solve.
bool apply_loss_function = true;
int num_threads = 1;
};
// Evaluate Problem. Any of the output pointers can be nullptr. Which
// residual blocks and parameter blocks are used is controlled by
// the EvaluateOptions struct above.
// Evaluate Problem. Any of the output pointers can be nullptr. Which residual
// blocks and parameter blocks are used is controlled by the EvaluateOptions
// struct above.
//
// Note 1: The evaluation will use the values stored in the memory
// locations pointed to by the parameter block pointers used at the
// time of the construction of the problem. i.e.,
// Note 1: The evaluation will use the values stored in the memory locations
// pointed to by the parameter block pointers used at the time of the
// construction of the problem. i.e.,
//
// Problem problem;
// double x = 1;
@ -456,8 +587,8 @@ class CERES_EXPORT Problem {
// problem.Evaluate(Problem::EvaluateOptions(), &cost,
// nullptr, nullptr, nullptr);
//
// The cost is evaluated at x = 1. If you wish to evaluate the
// problem at x = 2, then
// The cost is evaluated at x = 1. If you wish to evaluate the problem at x =
// 2, then
//
// x = 2;
// problem.Evaluate(Problem::EvaluateOptions(), &cost,
@ -465,80 +596,75 @@ class CERES_EXPORT Problem {
//
// is the way to do so.
//
// Note 2: If no local parameterizations are used, then the size of
// the gradient vector (and the number of columns in the jacobian)
// is the sum of the sizes of all the parameter blocks. If a
// parameter block has a local parameterization, then it contributes
// "LocalSize" entries to the gradient vector (and the number of
// columns in the jacobian).
// Note 2: If no LocalParameterizations or Manifolds are used, then the size
// of the gradient vector (and the number of columns in the jacobian) is the
// sum of the sizes of all the parameter blocks. If a parameter block has a
// LocalParameterization or Manifold, then it contributes "TangentSize"
// entries to the gradient vector (and the number of columns in the jacobian).
//
// Note 3: This function cannot be called while the problem is being
// solved, for example it cannot be called from an IterationCallback
// at the end of an iteration during a solve.
// Note 3: This function cannot be called while the problem is being solved,
// for example it cannot be called from an IterationCallback at the end of an
// iteration during a solve.
//
// Note 4: If an EvaluationCallback is associated with the problem,
// then its PrepareForEvaluation method will be called every time
// this method is called with new_point = true.
// Note 4: If an EvaluationCallback is associated with the problem, then its
// PrepareForEvaluation method will be called every time this method is called
// with new_point = true.
bool Evaluate(const EvaluateOptions& options,
double* cost,
std::vector<double>* residuals,
std::vector<double>* gradient,
CRSMatrix* jacobian);
// Evaluates the residual block, storing the scalar cost in *cost,
// the residual components in *residuals, and the jacobians between
// the parameters and residuals in jacobians[i], in row-major order.
// Evaluates the residual block, storing the scalar cost in *cost, the
// residual components in *residuals, and the jacobians between the parameters
// and residuals in jacobians[i], in row-major order.
//
// If residuals is nullptr, the residuals are not computed.
//
// If jacobians is nullptr, no Jacobians are computed. If
// jacobians[i] is nullptr, then the Jacobian for that parameter
// block is not computed.
// If jacobians is nullptr, no Jacobians are computed. If jacobians[i] is
// nullptr, then the Jacobian for that parameter block is not computed.
//
// It is not okay to request the Jacobian w.r.t a parameter block
// that is constant.
// It is not okay to request the Jacobian w.r.t a parameter block that is
// constant.
//
// The return value indicates the success or failure. Even if the
// function returns false, the caller should expect the output
// memory locations to have been modified.
// The return value indicates the success or failure. Even if the function
// returns false, the caller should expect the output memory locations to have
// been modified.
//
// The returned cost and jacobians have had robustification and
// local parameterizations applied already; for example, the
// jacobian for a 4-dimensional quaternion parameter using the
// "QuaternionParameterization" is num_residuals by 3 instead of
// num_residuals by 4.
// LocalParameterization/Manifold applied already; for example, the jacobian
// for a 4-dimensional quaternion parameter using the
// "QuaternionParameterization" is num_residuals by 3 instead of num_residuals
// by 4.
//
// apply_loss_function as the name implies allows the user to switch
// the application of the loss function on and off.
// apply_loss_function as the name implies allows the user to switch the
// application of the loss function on and off.
//
// If an EvaluationCallback is associated with the problem, then its
// PrepareForEvaluation method will be called every time this method
// is called with new_point = true. This conservatively assumes that
// the user may have changed the parameter values since the previous
// call to evaluate / solve. For improved efficiency, and only if
// you know that the parameter values have not changed between
// calls, see EvaluateResidualBlockAssumingParametersUnchanged().
// PrepareForEvaluation method will be called every time this method is called
// with new_point = true. This conservatively assumes that the user may have
// changed the parameter values since the previous call to evaluate / solve.
// For improved efficiency, and only if you know that the parameter values
// have not changed between calls, see
// EvaluateResidualBlockAssumingParametersUnchanged().
bool EvaluateResidualBlock(ResidualBlockId residual_block_id,
bool apply_loss_function,
double* cost,
double* residuals,
double** jacobians) const;
// Same as EvaluateResidualBlock except that if an
// EvaluationCallback is associated with the problem, then its
// PrepareForEvaluation method will be called every time this method
// is called with new_point = false.
// Same as EvaluateResidualBlock except that if an EvaluationCallback is
// associated with the problem, then its PrepareForEvaluation method will be
// called every time this method is called with new_point = false.
//
// This means, if an EvaluationCallback is associated with the
// problem then it is the user's responsibility to call
// PrepareForEvaluation before calling this method if necessary,
// i.e. iff the parameter values have been changed since the last
// call to evaluate / solve.'
// This means, if an EvaluationCallback is associated with the problem then it
// is the user's responsibility to call PrepareForEvaluation before calling
// this method if necessary, i.e. iff the parameter values have been changed
// since the last call to evaluate / solve.'
//
// This is because, as the name implies, we assume that the
// parameter blocks did not change since the last time
// PrepareForEvaluation was called (via Solve, Evaluate or
// EvaluateResidualBlock).
// This is because, as the name implies, we assume that the parameter blocks
// did not change since the last time PrepareForEvaluation was called (via
// Solve, Evaluate or EvaluateResidualBlock).
bool EvaluateResidualBlockAssumingParametersUnchanged(
ResidualBlockId residual_block_id,
bool apply_loss_function,

View File

@ -0,0 +1,328 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
// sergiu.deitsch@gmail.com (Sergiu Deitsch)
//
#ifndef CERES_PUBLIC_PRODUCT_MANIFOLD_H_
#define CERES_PUBLIC_PRODUCT_MANIFOLD_H_
#include <algorithm>
#include <array>
#include <cassert>
#include <cstddef>
#include <numeric>
#include <tuple>
#include <type_traits>
#include <utility>
#include "ceres/internal/eigen.h"
#include "ceres/internal/fixed_array.h"
#include "ceres/internal/port.h"
#include "ceres/manifold.h"
namespace ceres {
// Construct a manifold by taking the Cartesian product of a number of other
// manifolds. This is useful, when a parameter block is the Cartesian product
// of two or more manifolds. For example the parameters of a camera consist of
// a rotation and a translation, i.e., SO(3) x R^3.
//
// Example usage:
//
// ProductManifold<QuaternionManifold, EuclideanManifold<3>> se3;
//
// is the manifold for a rigid transformation, where the rotation is
// represented using a quaternion.
//
// Manifolds can be copied and moved to ProductManifold:
//
// SubsetManifold manifold1(5, {2});
// SubsetManifold manifold2(3, {0, 1});
// ProductManifold<SubsetManifold, SubsetManifold> manifold(manifold1,
// manifold2);
//
// In advanced use cases, manifolds can be dynamically allocated and passed as
// (smart) pointers:
//
// ProductManifold<std::unique_ptr<QuaternionManifold>, EuclideanManifold<3>>
// se3{std::make_unique<QuaternionManifold>(), EuclideanManifold<3>{}};
//
// In C++17, the template parameters can be left out as they are automatically
// deduced making the initialization much simpler:
//
// ProductManifold se3{QuaternionManifold{}, EuclideanManifold<3>{}};
//
// The manifold implementations must be either default constructible, copyable
// or moveable to be usable in a ProductManifold.
template <typename Manifold0, typename Manifold1, typename... ManifoldN>
class ProductManifold final : public Manifold {
public:
// ProductManifold constructor perfect forwards arguments to store manifolds.
//
// Either use default construction or if you need to copy or move-construct a
// manifold instance, you need to pass an instance as an argument for all
// types given as class template parameters.
template <typename... Args,
std::enable_if_t<std::is_constructible<
std::tuple<Manifold0, Manifold1, ManifoldN...>,
Args...>::value>* = nullptr>
explicit ProductManifold(Args&&... manifolds)
: ProductManifold{std::make_index_sequence<kNumManifolds>{},
std::forward<Args>(manifolds)...} {}
int AmbientSize() const override { return ambient_size_; }
int TangentSize() const override { return tangent_size_; }
bool Plus(const double* x,
const double* delta,
double* x_plus_delta) const override {
return PlusImpl(
x, delta, x_plus_delta, std::make_index_sequence<kNumManifolds>{});
}
bool Minus(const double* y,
const double* x,
double* y_minus_x) const override {
return MinusImpl(
y, x, y_minus_x, std::make_index_sequence<kNumManifolds>{});
}
bool PlusJacobian(const double* x, double* jacobian_ptr) const override {
MatrixRef jacobian(jacobian_ptr, AmbientSize(), TangentSize());
jacobian.setZero();
internal::FixedArray<double> buffer(buffer_size_);
return PlusJacobianImpl(
x, jacobian, buffer, std::make_index_sequence<kNumManifolds>{});
}
bool MinusJacobian(const double* x, double* jacobian_ptr) const override {
MatrixRef jacobian(jacobian_ptr, TangentSize(), AmbientSize());
jacobian.setZero();
internal::FixedArray<double> buffer(buffer_size_);
return MinusJacobianImpl(
x, jacobian, buffer, std::make_index_sequence<kNumManifolds>{});
}
private:
static constexpr std::size_t kNumManifolds = 2 + sizeof...(ManifoldN);
template <std::size_t... Indices, typename... Args>
explicit ProductManifold(std::index_sequence<Indices...>, Args&&... manifolds)
: manifolds_{std::forward<Args>(manifolds)...},
buffer_size_{(std::max)(
{(Dereference(std::get<Indices>(manifolds_)).TangentSize() *
Dereference(std::get<Indices>(manifolds_)).AmbientSize())...})},
ambient_sizes_{
Dereference(std::get<Indices>(manifolds_)).AmbientSize()...},
tangent_sizes_{
Dereference(std::get<Indices>(manifolds_)).TangentSize()...},
ambient_offsets_{ExclusiveScan(ambient_sizes_)},
tangent_offsets_{ExclusiveScan(tangent_sizes_)},
ambient_size_{
std::accumulate(ambient_sizes_.begin(), ambient_sizes_.end(), 0)},
tangent_size_{
std::accumulate(tangent_sizes_.begin(), tangent_sizes_.end(), 0)} {}
template <std::size_t Index0, std::size_t... Indices>
bool PlusImpl(const double* x,
const double* delta,
double* x_plus_delta,
std::index_sequence<Index0, Indices...>) const {
if (!Dereference(std::get<Index0>(manifolds_))
.Plus(x + ambient_offsets_[Index0],
delta + tangent_offsets_[Index0],
x_plus_delta + ambient_offsets_[Index0])) {
return false;
}
return PlusImpl(x, delta, x_plus_delta, std::index_sequence<Indices...>{});
}
static constexpr bool PlusImpl(const double* /*x*/,
const double* /*delta*/,
double* /*x_plus_delta*/,
std::index_sequence<>) noexcept {
return true;
}
template <std::size_t Index0, std::size_t... Indices>
bool MinusImpl(const double* y,
const double* x,
double* y_minus_x,
std::index_sequence<Index0, Indices...>) const {
if (!Dereference(std::get<Index0>(manifolds_))
.Minus(y + ambient_offsets_[Index0],
x + ambient_offsets_[Index0],
y_minus_x + tangent_offsets_[Index0])) {
return false;
}
return MinusImpl(y, x, y_minus_x, std::index_sequence<Indices...>{});
}
static constexpr bool MinusImpl(const double* /*y*/,
const double* /*x*/,
double* /*y_minus_x*/,
std::index_sequence<>) noexcept {
return true;
}
template <std::size_t Index0, std::size_t... Indices>
bool PlusJacobianImpl(const double* x,
MatrixRef& jacobian,
internal::FixedArray<double>& buffer,
std::index_sequence<Index0, Indices...>) const {
if (!Dereference(std::get<Index0>(manifolds_))
.PlusJacobian(x + ambient_offsets_[Index0], buffer.data())) {
return false;
}
jacobian.block(ambient_offsets_[Index0],
tangent_offsets_[Index0],
ambient_sizes_[Index0],
tangent_sizes_[Index0]) =
MatrixRef(
buffer.data(), ambient_sizes_[Index0], tangent_sizes_[Index0]);
return PlusJacobianImpl(
x, jacobian, buffer, std::index_sequence<Indices...>{});
}
static constexpr bool PlusJacobianImpl(
const double* /*x*/,
MatrixRef& /*jacobian*/,
internal::FixedArray<double>& /*buffer*/,
std::index_sequence<>) noexcept {
return true;
}
template <std::size_t Index0, std::size_t... Indices>
bool MinusJacobianImpl(const double* x,
MatrixRef& jacobian,
internal::FixedArray<double>& buffer,
std::index_sequence<Index0, Indices...>) const {
if (!Dereference(std::get<Index0>(manifolds_))
.MinusJacobian(x + ambient_offsets_[Index0], buffer.data())) {
return false;
}
jacobian.block(tangent_offsets_[Index0],
ambient_offsets_[Index0],
tangent_sizes_[Index0],
ambient_sizes_[Index0]) =
MatrixRef(
buffer.data(), tangent_sizes_[Index0], ambient_sizes_[Index0]);
return MinusJacobianImpl(
x, jacobian, buffer, std::index_sequence<Indices...>{});
}
static constexpr bool MinusJacobianImpl(
const double* /*x*/,
MatrixRef& /*jacobian*/,
internal::FixedArray<double>& /*buffer*/,
std::index_sequence<>) noexcept {
return true;
}
template <typename T, std::size_t N>
static std::array<T, N> ExclusiveScan(const std::array<T, N>& values) {
std::array<T, N> result;
T init = 0;
// TODO Replace by std::exclusive_scan once C++17 is available
for (std::size_t i = 0; i != N; ++i) {
result[i] = init;
init += values[i];
}
return result;
}
// TODO Replace by std::void_t once C++17 is available
template <typename... Types>
struct Void {
using type = void;
};
template <typename T, typename E = void>
struct IsDereferenceable : std::false_type {};
template <typename T>
struct IsDereferenceable<T, typename Void<decltype(*std::declval<T>())>::type>
: std::true_type {};
template <typename T,
std::enable_if_t<!IsDereferenceable<T>::value>* = nullptr>
static constexpr decltype(auto) Dereference(T& value) {
return value;
}
// Support dereferenceable types such as std::unique_ptr, std::shared_ptr, raw
// pointers etc.
template <typename T,
std::enable_if_t<IsDereferenceable<T>::value>* = nullptr>
static constexpr decltype(auto) Dereference(T& value) {
return *value;
}
template <typename T>
static constexpr decltype(auto) Dereference(T* p) {
assert(p != nullptr);
return *p;
}
std::tuple<Manifold0, Manifold1, ManifoldN...> manifolds_;
int buffer_size_;
std::array<int, kNumManifolds> ambient_sizes_;
std::array<int, kNumManifolds> tangent_sizes_;
std::array<int, kNumManifolds> ambient_offsets_;
std::array<int, kNumManifolds> tangent_offsets_;
int ambient_size_;
int tangent_size_;
};
#ifdef CERES_HAS_CPP17
// C++17 deduction guide that allows the user to avoid explicitly specifying
// the template parameters of ProductManifold. The class can instead be
// instantiated as follows:
//
// ProductManifold manifold{QuaternionManifold{}, EuclideanManifold<3>{}};
//
template <typename Manifold0, typename Manifold1, typename... Manifolds>
ProductManifold(Manifold0&&, Manifold1&&, Manifolds&&...)
-> ProductManifold<Manifold0, Manifold1, Manifolds...>;
#endif
} // namespace ceres
#endif // CERES_PUBLIC_PRODUCT_MANIFOLD_H_

View File

@ -521,18 +521,18 @@ inline void UnitQuaternionRotatePoint(const T q[4],
DCHECK_NE(pt, result) << "Inplace rotation is not supported.";
// clang-format off
const T t2 = q[0] * q[1];
const T t3 = q[0] * q[2];
const T t4 = q[0] * q[3];
const T t5 = -q[1] * q[1];
const T t6 = q[1] * q[2];
const T t7 = q[1] * q[3];
const T t8 = -q[2] * q[2];
const T t9 = q[2] * q[3];
const T t1 = -q[3] * q[3];
result[0] = T(2) * ((t8 + t1) * pt[0] + (t6 - t4) * pt[1] + (t3 + t7) * pt[2]) + pt[0]; // NOLINT
result[1] = T(2) * ((t4 + t6) * pt[0] + (t5 + t1) * pt[1] + (t9 - t2) * pt[2]) + pt[1]; // NOLINT
result[2] = T(2) * ((t7 - t3) * pt[0] + (t2 + t9) * pt[1] + (t5 + t8) * pt[2]) + pt[2]; // NOLINT
T uv0 = q[2] * pt[2] - q[3] * pt[1];
T uv1 = q[3] * pt[0] - q[1] * pt[2];
T uv2 = q[1] * pt[1] - q[2] * pt[0];
uv0 += uv0;
uv1 += uv1;
uv2 += uv2;
result[0] = pt[0] + q[0] * uv0;
result[1] = pt[1] + q[0] * uv1;
result[2] = pt[2] + q[0] * uv2;
result[0] += q[2] * uv2 - q[3] * uv1;
result[1] += q[3] * uv0 - q[1] * uv2;
result[2] += q[1] * uv1 - q[2] * uv0;
// clang-format on
}
@ -624,16 +624,16 @@ inline void AngleAxisRotatePoint(const T angle_axis[3],
result[2] = pt[2] * costheta + w_cross_pt[2] * sintheta + w[2] * tmp;
} else {
// Near zero, the first order Taylor approximation of the rotation
// matrix R corresponding to a vector w and angle w is
// matrix R corresponding to a vector w and angle theta is
//
// R = I + hat(w) * sin(theta)
//
// But sintheta ~ theta and theta * w = angle_axis, which gives us
//
// R = I + hat(w)
// R = I + hat(angle_axis)
//
// and actually performing multiplication with the point pt, gives us
// R * pt = pt + w x pt.
// R * pt = pt + angle_axis x pt.
//
// Switching to the Taylor expansion near zero provides meaningful
// derivatives when evaluated using Jets.

View File

@ -61,8 +61,6 @@ class SizedCostFunction : public CostFunction {
*mutable_parameter_block_sizes() = std::vector<int32_t>{Ns...};
}
virtual ~SizedCostFunction() {}
// Subclasses must implement Evaluate().
};

View File

@ -38,8 +38,9 @@
#include <vector>
#include "ceres/crs_matrix.h"
#include "ceres/internal/config.h"
#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/port.h"
#include "ceres/internal/export.h"
#include "ceres/iteration_callback.h"
#include "ceres/ordered_groups.h"
#include "ceres/problem.h"
@ -363,23 +364,23 @@ class CERES_EXPORT Solver {
std::unordered_set<ResidualBlockId>
residual_blocks_for_subset_preconditioner;
// Ceres supports using multiple dense linear algebra libraries
// for dense matrix factorizations. Currently EIGEN and LAPACK are
// the valid choices. EIGEN is always available, LAPACK refers to
// the system BLAS + LAPACK library which may or may not be
// Ceres supports using multiple dense linear algebra libraries for dense
// matrix factorizations. Currently EIGEN, LAPACK and CUDA are the valid
// choices. EIGEN is always available, LAPACK refers to the system BLAS +
// LAPACK library which may or may not be available. CUDA refers to Nvidia's
// GPU based dense linear algebra library, which may or may not be
// available.
//
// This setting affects the DENSE_QR, DENSE_NORMAL_CHOLESKY and
// DENSE_SCHUR solvers. For small to moderate sized problem EIGEN
// is a fine choice but for large problems, an optimized LAPACK +
// BLAS implementation can make a substantial difference in
// performance.
// This setting affects the DENSE_QR, DENSE_NORMAL_CHOLESKY and DENSE_SCHUR
// solvers. For small to moderate sized problem EIGEN is a fine choice but
// for large problems, an optimized LAPACK + BLAS or CUDA implementation can
// make a substantial difference in performance.
DenseLinearAlgebraLibraryType dense_linear_algebra_library_type = EIGEN;
// Ceres supports using multiple sparse linear algebra libraries
// for sparse matrix ordering and factorizations. Currently,
// SUITE_SPARSE and CX_SPARSE are the valid choices, depending on
// whether they are linked into Ceres at build time.
// Ceres supports using multiple sparse linear algebra libraries for sparse
// matrix ordering and factorizations. Currently, SUITE_SPARSE and CX_SPARSE
// are the valid choices, depending on whether they are linked into Ceres at
// build time.
SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type =
#if !defined(CERES_NO_SUITESPARSE)
SUITE_SPARSE;
@ -423,7 +424,7 @@ class CERES_EXPORT Solver {
// each group, Ceres is free to order the parameter blocks as it
// chooses.
//
// If NULL, then all parameter blocks are assumed to be in the
// If nullptr, then all parameter blocks are assumed to be in the
// same group and the solver is free to decide the best
// ordering.
//
@ -536,8 +537,9 @@ class CERES_EXPORT Solver {
// max_num_refinement_iterations to 2-3.
//
// NOTE2: The following two options are currently only applicable
// if sparse_linear_algebra_library_type is EIGEN_SPARSE and
// linear_solver_type is SPARSE_NORMAL_CHOLESKY, or SPARSE_SCHUR.
// if sparse_linear_algebra_library_type is EIGEN_SPARSE or
// ACCELERATE_SPARSE, and linear_solver_type is SPARSE_NORMAL_CHOLESKY
// or SPARSE_SCHUR.
bool use_mixed_precision_solves = false;
// Number steps of the iterative refinement process to run when
@ -882,7 +884,7 @@ class CERES_EXPORT Solver {
// Dimension of the tangent space of the problem (or the number of
// columns in the Jacobian for the problem). This is different
// from num_parameters if a parameter block is associated with a
// LocalParameterization
// LocalParameterization/Manifold.
int num_effective_parameters = -1;
// Number of residual blocks in the problem.
@ -903,7 +905,7 @@ class CERES_EXPORT Solver {
// number of columns in the Jacobian for the reduced
// problem). This is different from num_parameters_reduced if a
// parameter block in the reduced problem is associated with a
// LocalParameterization.
// LocalParameterization/Manifold.
int num_effective_parameters_reduced = -1;
// Number of residual blocks in the reduced problem.

View File

@ -0,0 +1,231 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Google Inc. nor the names of its contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: vitus@google.com (Mike Vitus)
// jodebo_beck@gmx.de (Johannes Beck)
#ifndef CERES_PUBLIC_SPHERE_MANIFOLD_H_
#define CERES_PUBLIC_SPHERE_MANIFOLD_H_
#include <Eigen/Core>
#include <algorithm>
#include <array>
#include <memory>
#include <vector>
#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/export.h"
#include "ceres/internal/householder_vector.h"
#include "ceres/internal/sphere_manifold_functions.h"
#include "ceres/manifold.h"
#include "ceres/types.h"
#include "glog/logging.h"
namespace ceres {
// This provides a manifold on a sphere meaning that the norm of the vector
// stays the same. Such cases often arises in Structure for Motion
// problems. One example where they are used is in representing points whose
// triangulation is ill-conditioned. Here it is advantageous to use an
// over-parameterization since homogeneous vectors can represent points at
// infinity.
//
// The plus operator is defined as
// Plus(x, delta) =
// [sin(0.5 * |delta|) * delta / |delta|, cos(0.5 * |delta|)] * x
//
// The minus operator is defined as
// Minus(x, y) = 2 atan2(nhy, y[-1]) / nhy * hy[0 : size_ - 1]
// with nhy = norm(hy[0 : size_ - 1])
//
// with * defined as an operator which applies the update orthogonal to x to
// remain on the sphere. The ambient space dimension is required to be greater
// than 1.
//
// The class works with dynamic and static ambient space dimensions. If the
// ambient space dimensions is known at compile time use
//
// SphereManifold<3> manifold;
//
// If the ambient space dimensions is not known at compile time the template
// parameter needs to be set to ceres::DYNAMIC and the actual dimension needs
// to be provided as a constructor argument:
//
// SphereManifold<ceres::DYNAMIC> manifold(ambient_dim);
//
// See section B.2 (p.25) in "Integrating Generic Sensor Fusion Algorithms
// with Sound State Representations through Encapsulation of Manifolds" by C.
// Hertzberg, R. Wagner, U. Frese and L. Schroder for more details
// (https://arxiv.org/pdf/1107.1119.pdf)
template <int AmbientSpaceDimension>
class SphereManifold final : public Manifold {
public:
static_assert(
AmbientSpaceDimension == ceres::DYNAMIC || AmbientSpaceDimension > 1,
"The size of the homogeneous vector needs to be greater than 1.");
static_assert(ceres::DYNAMIC == Eigen::Dynamic,
"ceres::DYNAMIC needs to be the same as Eigen::Dynamic.");
SphereManifold();
explicit SphereManifold(int size);
int AmbientSize() const override {
return AmbientSpaceDimension == ceres::DYNAMIC ? size_
: AmbientSpaceDimension;
}
int TangentSize() const override { return AmbientSize() - 1; }
bool Plus(const double* x,
const double* delta,
double* x_plus_delta) const override;
bool PlusJacobian(const double* x, double* jacobian) const override;
bool Minus(const double* y,
const double* x,
double* y_minus_x) const override;
bool MinusJacobian(const double* x, double* jacobian) const override;
private:
static constexpr int TangentSpaceDimension =
AmbientSpaceDimension > 0 ? AmbientSpaceDimension - 1 : Eigen::Dynamic;
using AmbientVector = Eigen::Matrix<double, AmbientSpaceDimension, 1>;
using TangentVector = Eigen::Matrix<double, TangentSpaceDimension, 1>;
using MatrixPlusJacobian = Eigen::Matrix<double,
AmbientSpaceDimension,
TangentSpaceDimension,
Eigen::RowMajor>;
using MatrixMinusJacobian = Eigen::Matrix<double,
TangentSpaceDimension,
AmbientSpaceDimension,
Eigen::RowMajor>;
const int size_{};
};
template <int AmbientSpaceDimension>
SphereManifold<AmbientSpaceDimension>::SphereManifold()
: size_{AmbientSpaceDimension} {
static_assert(
AmbientSpaceDimension != Eigen::Dynamic,
"The size is set to dynamic. Please call the constructor with a size.");
}
template <int AmbientSpaceDimension>
SphereManifold<AmbientSpaceDimension>::SphereManifold(int size) : size_{size} {
if (AmbientSpaceDimension != Eigen::Dynamic) {
CHECK_EQ(AmbientSpaceDimension, size)
<< "Specified size by template parameter differs from the supplied "
"one.";
} else {
CHECK_GT(size_, 1)
<< "The size of the manifold needs to be greater than 1.";
}
}
template <int AmbientSpaceDimension>
bool SphereManifold<AmbientSpaceDimension>::Plus(
const double* x_ptr,
const double* delta_ptr,
double* x_plus_delta_ptr) const {
Eigen::Map<const AmbientVector> x(x_ptr, size_);
Eigen::Map<const TangentVector> delta(delta_ptr, size_ - 1);
Eigen::Map<AmbientVector> x_plus_delta(x_plus_delta_ptr, size_);
const double norm_delta = delta.norm();
if (norm_delta == 0.0) {
x_plus_delta = x;
return true;
}
AmbientVector v(size_);
double beta;
// NOTE: The explicit template arguments are needed here because
// ComputeHouseholderVector is templated and some versions of MSVC
// have trouble deducing the type of v automatically.
internal::ComputeHouseholderVector<Eigen::Map<const AmbientVector>,
double,
AmbientSpaceDimension>(x, &v, &beta);
internal::ComputeSphereManifoldPlus(
v, beta, x, delta, norm_delta, &x_plus_delta);
return true;
}
template <int AmbientSpaceDimension>
bool SphereManifold<AmbientSpaceDimension>::PlusJacobian(
const double* x_ptr, double* jacobian_ptr) const {
Eigen::Map<const AmbientVector> x(x_ptr, size_);
Eigen::Map<MatrixPlusJacobian> jacobian(jacobian_ptr, size_, size_ - 1);
internal::ComputeSphereManifoldPlusJacobian(x, &jacobian);
return true;
}
template <int AmbientSpaceDimension>
bool SphereManifold<AmbientSpaceDimension>::Minus(const double* y_ptr,
const double* x_ptr,
double* y_minus_x_ptr) const {
AmbientVector y = Eigen::Map<const AmbientVector>(y_ptr, size_);
Eigen::Map<const AmbientVector> x(x_ptr, size_);
Eigen::Map<TangentVector> y_minus_x(y_minus_x_ptr, size_ - 1);
// Apply hoseholder transformation.
AmbientVector v(size_);
double beta;
// NOTE: The explicit template arguments are needed here because
// ComputeHouseholderVector is templated and some versions of MSVC
// have trouble deducing the type of v automatically.
internal::ComputeHouseholderVector<Eigen::Map<const AmbientVector>,
double,
AmbientSpaceDimension>(x, &v, &beta);
internal::ComputeSphereManifoldMinus(v, beta, x, y, &y_minus_x);
return true;
}
template <int AmbientSpaceDimension>
bool SphereManifold<AmbientSpaceDimension>::MinusJacobian(
const double* x_ptr, double* jacobian_ptr) const {
Eigen::Map<const AmbientVector> x(x_ptr, size_);
Eigen::Map<MatrixMinusJacobian> jacobian(jacobian_ptr, size_ - 1, size_);
internal::ComputeSphereManifoldMinusJacobian(x, &jacobian);
return true;
}
} // namespace ceres
// clang-format off
#include "ceres/internal/reenable_warnings.h"
// clang-format on
#endif // CERES_PUBLIC_SPHERE_MANIFOLD_H_

View File

@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2019 Google Inc. All rights reserved.
// Copyright 2021 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@ -84,7 +84,8 @@ namespace ceres {
// double* parameters -- NUM_PARAMETERS or NumParameters()
// double* residuals -- NUM_RESIDUALS or NumResiduals()
// double* jacobian -- NUM_RESIDUALS * NUM_PARAMETERS in column-major format
// (Eigen's default); or NULL if no jacobian requested.
// (Eigen's default); or nullptr if no jacobian
// requested.
//
// An example (fully statically sized):
//
@ -126,8 +127,8 @@ namespace ceres {
//
template <typename Function,
typename LinearSolver =
Eigen::LDLT<Eigen::Matrix<typename Function::Scalar,
Function::NUM_PARAMETERS,
Eigen::LDLT<Eigen::Matrix<typename Function::Scalar, //
Function::NUM_PARAMETERS, //
Function::NUM_PARAMETERS>>>
class TinySolver {
public:
@ -139,41 +140,59 @@ class TinySolver {
NUM_RESIDUALS = Function::NUM_RESIDUALS,
NUM_PARAMETERS = Function::NUM_PARAMETERS
};
typedef typename Function::Scalar Scalar;
typedef typename Eigen::Matrix<Scalar, NUM_PARAMETERS, 1> Parameters;
using Scalar = typename Function::Scalar;
using Parameters = typename Eigen::Matrix<Scalar, NUM_PARAMETERS, 1>;
enum Status {
GRADIENT_TOO_SMALL, // eps > max(J'*f(x))
RELATIVE_STEP_SIZE_TOO_SMALL, // eps > ||dx|| / (||x|| + eps)
COST_TOO_SMALL, // eps > ||f(x)||^2 / 2
// max_norm |J'(x) * f(x)| < gradient_tolerance
GRADIENT_TOO_SMALL,
// ||dx|| <= parameter_tolerance * (||x|| + parameter_tolerance)
RELATIVE_STEP_SIZE_TOO_SMALL,
// cost_threshold > ||f(x)||^2 / 2
COST_TOO_SMALL,
// num_iterations >= max_num_iterations
HIT_MAX_ITERATIONS,
// (new_cost - old_cost) < function_tolerance * old_cost
COST_CHANGE_TOO_SMALL,
// TODO(sameeragarwal): Deal with numerical failures.
};
struct Options {
Scalar gradient_tolerance = 1e-10; // eps > max(J'*f(x))
Scalar parameter_tolerance = 1e-8; // eps > ||dx|| / ||x||
Scalar cost_threshold = // eps > ||f(x)||
std::numeric_limits<Scalar>::epsilon();
Scalar initial_trust_region_radius = 1e4;
int max_num_iterations = 50;
// max_norm |J'(x) * f(x)| < gradient_tolerance
Scalar gradient_tolerance = 1e-10;
// ||dx|| <= parameter_tolerance * (||x|| + parameter_tolerance)
Scalar parameter_tolerance = 1e-8;
// (new_cost - old_cost) < function_tolerance * old_cost
Scalar function_tolerance = 1e-6;
// cost_threshold > ||f(x)||^2 / 2
Scalar cost_threshold = std::numeric_limits<Scalar>::epsilon();
Scalar initial_trust_region_radius = 1e4;
};
struct Summary {
Scalar initial_cost = -1; // 1/2 ||f(x)||^2
Scalar final_cost = -1; // 1/2 ||f(x)||^2
Scalar gradient_max_norm = -1; // max(J'f(x))
// 1/2 ||f(x_0)||^2
Scalar initial_cost = -1;
// 1/2 ||f(x)||^2
Scalar final_cost = -1;
// max_norm(J'f(x))
Scalar gradient_max_norm = -1;
int iterations = -1;
Status status = HIT_MAX_ITERATIONS;
};
bool Update(const Function& function, const Parameters& x) {
if (!function(x.data(), error_.data(), jacobian_.data())) {
if (!function(x.data(), residuals_.data(), jacobian_.data())) {
return false;
}
error_ = -error_;
residuals_ = -residuals_;
// On the first iteration, compute a diagonal (Jacobi) scaling
// matrix, which we store as a vector.
@ -192,9 +211,9 @@ class TinySolver {
// factorization.
jacobian_ = jacobian_ * jacobi_scaling_.asDiagonal();
jtj_ = jacobian_.transpose() * jacobian_;
g_ = jacobian_.transpose() * error_;
g_ = jacobian_.transpose() * residuals_;
summary.gradient_max_norm = g_.array().abs().maxCoeff();
cost_ = error_.squaredNorm() / 2;
cost_ = residuals_.squaredNorm() / 2;
return true;
}
@ -231,7 +250,7 @@ class TinySolver {
const Scalar max_diagonal = 1e32;
for (int i = 0; i < lm_diagonal_.rows(); ++i) {
lm_diagonal_[i] = std::sqrt(
u * std::min(std::max(jtj_(i, i), min_diagonal), max_diagonal));
u * (std::min)((std::max)(jtj_(i, i), min_diagonal), max_diagonal));
jtj_regularized_(i, i) += lm_diagonal_[i] * lm_diagonal_[i];
}
@ -253,10 +272,9 @@ class TinySolver {
// TODO(keir): Add proper handling of errors from user eval of cost
// functions.
function(&x_new_[0], &f_x_new_[0], NULL);
function(&x_new_[0], &f_x_new_[0], nullptr);
const Scalar cost_change = (2 * cost_ - f_x_new_.squaredNorm());
// TODO(sameeragarwal): Better more numerically stable evaluation.
const Scalar model_cost_change = lm_step_.dot(2 * g_ - jtj_ * lm_step_);
@ -269,6 +287,12 @@ class TinySolver {
// model fits well.
x = x_new_;
if (std::abs(cost_change) < options.function_tolerance) {
cost_ = f_x_new_.squaredNorm() / 2;
summary.status = COST_CHANGE_TOO_SMALL;
break;
}
// TODO(sameeragarwal): Deal with failure.
Update(function, x);
if (summary.gradient_max_norm < options.gradient_tolerance) {
@ -282,16 +306,24 @@ class TinySolver {
}
Scalar tmp = Scalar(2 * rho - 1);
u = u * std::max(1 / 3., 1 - tmp * tmp * tmp);
u = u * (std::max)(Scalar(1 / 3.), Scalar(1) - tmp * tmp * tmp);
v = 2;
continue;
}
// Reject the update because either the normal equations failed to solve
// or the local linear model was not good (rho < 0). Instead, increase u
// to move closer to gradient descent.
u *= v;
v *= 2;
} else {
// Reject the update because either the normal equations failed to solve
// or the local linear model was not good (rho < 0).
// Additionally if the cost change is too small, then terminate.
if (std::abs(cost_change) < options.function_tolerance) {
// Terminate
summary.status = COST_CHANGE_TOO_SMALL;
break;
}
// Reduce the size of the trust region.
u *= v;
v *= 2;
}
}
summary.final_cost = cost_;
@ -307,7 +339,7 @@ class TinySolver {
LinearSolver linear_solver_;
Scalar cost_;
Parameters dx_, x_new_, g_, jacobi_scaling_, lm_diagonal_, lm_step_;
Eigen::Matrix<Scalar, NUM_RESIDUALS, 1> error_, f_x_new_;
Eigen::Matrix<Scalar, NUM_RESIDUALS, 1> residuals_, f_x_new_;
Eigen::Matrix<Scalar, NUM_RESIDUALS, NUM_PARAMETERS> jacobian_;
Eigen::Matrix<Scalar, NUM_PARAMETERS, NUM_PARAMETERS> jtj_, jtj_regularized_;
@ -317,7 +349,7 @@ class TinySolver {
template <typename T>
struct enable_if<true, T> {
typedef T type;
using type = T;
};
// The number of parameters and residuals are dynamically sized.
@ -355,7 +387,7 @@ class TinySolver {
jacobi_scaling_.resize(num_parameters);
lm_diagonal_.resize(num_parameters);
lm_step_.resize(num_parameters);
error_.resize(num_residuals);
residuals_.resize(num_residuals);
f_x_new_.resize(num_residuals);
jacobian_.resize(num_residuals, num_parameters);
jtj_.resize(num_parameters, num_parameters);

View File

@ -113,12 +113,12 @@ class TinySolverAutoDiffFunction {
// as a member a Jet type, which itself has a fixed-size Eigen type as member.
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
TinySolverAutoDiffFunction(const CostFunctor& cost_functor)
explicit TinySolverAutoDiffFunction(const CostFunctor& cost_functor)
: cost_functor_(cost_functor) {
Initialize<kNumResiduals>(cost_functor);
}
typedef T Scalar;
using Scalar = T;
enum {
NUM_PARAMETERS = kNumParameters,
NUM_RESIDUALS = kNumResiduals,
@ -127,7 +127,7 @@ class TinySolverAutoDiffFunction {
// This is similar to AutoDifferentiate(), but since there is only one
// parameter block it is easier to inline to avoid overhead.
bool operator()(const T* parameters, T* residuals, T* jacobian) const {
if (jacobian == NULL) {
if (jacobian == nullptr) {
// No jacobian requested, so just directly call the cost function with
// doubles, skipping jets and derivatives.
return cost_functor_(parameters, residuals);

View File

@ -75,7 +75,7 @@ template <int kNumResiduals = Eigen::Dynamic,
int kNumParameters = Eigen::Dynamic>
class TinySolverCostFunctionAdapter {
public:
typedef double Scalar;
using Scalar = double;
enum ComponentSizeType {
NUM_PARAMETERS = kNumParameters,
NUM_RESIDUALS = kNumResiduals
@ -85,7 +85,7 @@ class TinySolverCostFunctionAdapter {
// fixed-size Eigen types.
EIGEN_MAKE_ALIGNED_OPERATOR_NEW
TinySolverCostFunctionAdapter(const CostFunction& cost_function)
explicit TinySolverCostFunctionAdapter(const CostFunction& cost_function)
: cost_function_(cost_function) {
CHECK_EQ(cost_function_.parameter_block_sizes().size(), 1)
<< "Only CostFunctions with exactly one parameter blocks are allowed.";
@ -108,7 +108,7 @@ class TinySolverCostFunctionAdapter {
double* residuals,
double* jacobian) const {
if (!jacobian) {
return cost_function_.Evaluate(&parameters, residuals, NULL);
return cost_function_.Evaluate(&parameters, residuals, nullptr);
}
double* jacobians[1] = {row_major_jacobian_.data()};

View File

@ -40,7 +40,7 @@
#include <string>
#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/port.h"
#include "ceres/internal/export.h"
namespace ceres {
@ -186,6 +186,7 @@ enum SparseLinearAlgebraLibraryType {
enum DenseLinearAlgebraLibraryType {
EIGEN,
LAPACK,
CUDA,
};
// Logging options

View File

@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2019 Google Inc. All rights reserved.
// Copyright 2021 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@ -32,7 +32,7 @@
#define CERES_PUBLIC_VERSION_H_
#define CERES_VERSION_MAJOR 2
#define CERES_VERSION_MINOR 0
#define CERES_VERSION_MINOR 1
#define CERES_VERSION_REVISION 0
// Classic CPP stringifcation; the extra level of indirection allows the

View File

@ -29,11 +29,12 @@
// Author: alexs.mac@gmail.com (Alex Stewart)
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#include "ceres/internal/config.h"
#ifndef CERES_NO_ACCELERATE_SPARSE
#include <algorithm>
#include <memory>
#include <string>
#include <vector>
@ -196,17 +197,17 @@ template <typename Scalar>
LinearSolverTerminationType AppleAccelerateCholesky<Scalar>::Factorize(
CompressedRowSparseMatrix* lhs, std::string* message) {
CHECK_EQ(lhs->storage_type(), StorageType());
if (lhs == NULL) {
*message = "Failure: Input lhs is NULL.";
if (lhs == nullptr) {
*message = "Failure: Input lhs is nullptr.";
return LINEAR_SOLVER_FATAL_ERROR;
}
typename SparseTypesTrait<Scalar>::SparseMatrix as_lhs =
as_.CreateSparseMatrixTransposeView(lhs);
if (!symbolic_factor_) {
symbolic_factor_.reset(
new typename SparseTypesTrait<Scalar>::SymbolicFactorization(
as_.AnalyzeCholesky(&as_lhs)));
symbolic_factor_ = std::make_unique<
typename SparseTypesTrait<Scalar>::SymbolicFactorization>(
as_.AnalyzeCholesky(&as_lhs));
if (symbolic_factor_->status != SparseStatusOK) {
*message = StringPrintf(
"Apple Accelerate Failure : Symbolic factorisation failed: %s",
@ -217,9 +218,9 @@ LinearSolverTerminationType AppleAccelerateCholesky<Scalar>::Factorize(
}
if (!numeric_factor_) {
numeric_factor_.reset(
new typename SparseTypesTrait<Scalar>::NumericFactorization(
as_.Cholesky(&as_lhs, symbolic_factor_.get())));
numeric_factor_ = std::make_unique<
typename SparseTypesTrait<Scalar>::NumericFactorization>(
as_.Cholesky(&as_lhs, symbolic_factor_.get()));
} else {
// Recycle memory from previous numeric factorization.
as_.Cholesky(&as_lhs, numeric_factor_.get());
@ -265,7 +266,7 @@ template <typename Scalar>
void AppleAccelerateCholesky<Scalar>::FreeSymbolicFactorization() {
if (symbolic_factor_) {
SparseCleanup(*symbolic_factor_);
symbolic_factor_.reset();
symbolic_factor_ = nullptr;
}
}
@ -273,7 +274,7 @@ template <typename Scalar>
void AppleAccelerateCholesky<Scalar>::FreeNumericFactorization() {
if (numeric_factor_) {
SparseCleanup(*numeric_factor_);
numeric_factor_.reset();
numeric_factor_ = nullptr;
}
}

View File

@ -32,7 +32,7 @@
#define CERES_INTERNAL_ACCELERATE_SPARSE_H_
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
#include "ceres/internal/config.h"
#ifndef CERES_NO_ACCELERATE_SPARSE
@ -111,7 +111,7 @@ class AccelerateSparse {
// An implementation of SparseCholesky interface using Apple's Accelerate
// framework.
template <typename Scalar>
class AppleAccelerateCholesky : public SparseCholesky {
class AppleAccelerateCholesky final : public SparseCholesky {
public:
// Factory
static std::unique_ptr<SparseCholesky> Create(OrderingType ordering_type);

View File

@ -44,7 +44,7 @@ namespace internal {
using std::string;
bool IsArrayValid(const int size, const double* x) {
if (x != NULL) {
if (x != nullptr) {
for (int i = 0; i < size; ++i) {
if (!std::isfinite(x[i]) || (x[i] == kImpossibleValue)) {
return false;
@ -55,7 +55,7 @@ bool IsArrayValid(const int size, const double* x) {
}
int FindInvalidValue(const int size, const double* x) {
if (x == NULL) {
if (x == nullptr) {
return size;
}
@ -69,7 +69,7 @@ int FindInvalidValue(const int size, const double* x) {
}
void InvalidateArray(const int size, double* x) {
if (x != NULL) {
if (x != nullptr) {
for (int i = 0; i < size; ++i) {
x[i] = kImpossibleValue;
}
@ -78,7 +78,7 @@ void InvalidateArray(const int size, double* x) {
void AppendArrayToString(const int size, const double* x, string* result) {
for (int i = 0; i < size; ++i) {
if (x == NULL) {
if (x == nullptr) {
StringAppendF(result, "Not Computed ");
} else {
if (x[i] == kImpossibleValue) {

View File

@ -45,29 +45,30 @@
#include <string>
#include "ceres/internal/port.h"
#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/export.h"
namespace ceres {
namespace internal {
// Fill the array x with an impossible value that the user code is
// never expected to compute.
CERES_EXPORT_INTERNAL void InvalidateArray(int size, double* x);
CERES_NO_EXPORT void InvalidateArray(int size, double* x);
// Check if all the entries of the array x are valid, i.e. all the
// values in the array should be finite and none of them should be
// equal to the "impossible" value used by InvalidateArray.
CERES_EXPORT_INTERNAL bool IsArrayValid(int size, const double* x);
CERES_NO_EXPORT bool IsArrayValid(int size, const double* x);
// If the array contains an invalid value, return the index for it,
// otherwise return size.
CERES_EXPORT_INTERNAL int FindInvalidValue(const int size, const double* x);
CERES_NO_EXPORT int FindInvalidValue(const int size, const double* x);
// Utility routine to print an array of doubles to a string. If the
// array pointer is NULL, it is treated as an array of zeros.
CERES_EXPORT_INTERNAL void AppendArrayToString(const int size,
const double* x,
std::string* result);
// array pointer is nullptr, it is treated as an array of zeros.
CERES_NO_EXPORT void AppendArrayToString(const int size,
const double* x,
std::string* result);
// This routine takes an array of integer values, sorts and uniques
// them and then maps each value in the array to its position in the
@ -82,9 +83,11 @@ CERES_EXPORT_INTERNAL void AppendArrayToString(const int size,
// gets mapped to
//
// [1 0 2 3 0 1 3]
CERES_EXPORT_INTERNAL void MapValuesToContiguousRange(int size, int* array);
CERES_NO_EXPORT void MapValuesToContiguousRange(int size, int* array);
} // namespace internal
} // namespace ceres
#include "ceres/internal/reenable_warnings.h"
#endif // CERES_INTERNAL_ARRAY_UTILS_H_

View File

@ -53,7 +53,7 @@ void BlockEvaluatePreparer::Prepare(const ResidualBlock* residual_block,
SparseMatrix* jacobian,
double** jacobians) {
// If the overall jacobian is not available, use the scratch space.
if (jacobian == NULL) {
if (jacobian == nullptr) {
scratch_evaluate_preparer_.Prepare(
residual_block, residual_block_index, jacobian, jacobians);
return;
@ -73,7 +73,7 @@ void BlockEvaluatePreparer::Prepare(const ResidualBlock* residual_block,
// parameters. Instead, bump the pointer for active parameters only.
jacobian_block_offset++;
} else {
jacobians[j] = NULL;
jacobians[j] = nullptr;
}
}
}

View File

@ -36,6 +36,7 @@
#ifndef CERES_INTERNAL_BLOCK_EVALUATE_PREPARER_H_
#define CERES_INTERNAL_BLOCK_EVALUATE_PREPARER_H_
#include "ceres/internal/export.h"
#include "ceres/scratch_evaluate_preparer.h"
namespace ceres {
@ -44,7 +45,7 @@ namespace internal {
class ResidualBlock;
class SparseMatrix;
class BlockEvaluatePreparer {
class CERES_NO_EXPORT BlockEvaluatePreparer {
public:
// Using Init() instead of a constructor allows for allocating this structure
// with new[]. This is because C++ doesn't allow passing arguments to objects

View File

@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@ -47,10 +47,10 @@ BlockJacobiPreconditioner::BlockJacobiPreconditioner(
blocks[i] = bs->cols[i].size;
}
m_.reset(new BlockRandomAccessDiagonalMatrix(blocks));
m_ = std::make_unique<BlockRandomAccessDiagonalMatrix>(blocks);
}
BlockJacobiPreconditioner::~BlockJacobiPreconditioner() {}
BlockJacobiPreconditioner::~BlockJacobiPreconditioner() = default;
bool BlockJacobiPreconditioner::UpdateImpl(const BlockSparseMatrix& A,
const double* D) {
@ -60,21 +60,20 @@ bool BlockJacobiPreconditioner::UpdateImpl(const BlockSparseMatrix& A,
for (int i = 0; i < bs->rows.size(); ++i) {
const int row_block_size = bs->rows[i].block.size;
const std::vector<Cell>& cells = bs->rows[i].cells;
for (int j = 0; j < cells.size(); ++j) {
const int block_id = cells[j].block_id;
for (const auto& cell : cells) {
const int block_id = cell.block_id;
const int col_block_size = bs->cols[block_id].size;
int r, c, row_stride, col_stride;
CellInfo* cell_info =
m_->GetCell(block_id, block_id, &r, &c, &row_stride, &col_stride);
MatrixRef m(cell_info->values, row_stride, col_stride);
ConstMatrixRef b(
values + cells[j].position, row_block_size, col_block_size);
ConstMatrixRef b(values + cell.position, row_block_size, col_block_size);
m.block(r, c, col_block_size, col_block_size) += b.transpose() * b;
}
}
if (D != NULL) {
if (D != nullptr) {
// Add the diagonal.
int position = 0;
for (int i = 0; i < bs->cols.size(); ++i) {

View File

@ -34,7 +34,8 @@
#include <memory>
#include "ceres/block_random_access_diagonal_matrix.h"
#include "ceres/internal/port.h"
#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/export.h"
#include "ceres/preconditioner.h"
namespace ceres {
@ -53,7 +54,7 @@ struct CompressedRowBlockStructure;
// update the matrix by running Update(A, D). The values of the matrix A are
// inspected to construct the preconditioner. The vector D is applied as the
// D^TD diagonal term.
class CERES_EXPORT_INTERNAL BlockJacobiPreconditioner
class CERES_NO_EXPORT BlockJacobiPreconditioner
: public BlockSparseMatrixPreconditioner {
public:
// A must remain valid while the BlockJacobiPreconditioner is.
@ -61,7 +62,7 @@ class CERES_EXPORT_INTERNAL BlockJacobiPreconditioner
BlockJacobiPreconditioner(const BlockJacobiPreconditioner&) = delete;
void operator=(const BlockJacobiPreconditioner&) = delete;
virtual ~BlockJacobiPreconditioner();
~BlockJacobiPreconditioner() override;
// Preconditioner interface
void RightMultiply(const double* x, double* y) const final;
@ -78,4 +79,6 @@ class CERES_EXPORT_INTERNAL BlockJacobiPreconditioner
} // namespace internal
} // namespace ceres
#include "ceres/internal/reenable_warnings.h"
#endif // CERES_INTERNAL_BLOCK_JACOBI_PRECONDITIONER_H_

View File

@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@ -30,10 +30,13 @@
#include "ceres/block_jacobian_writer.h"
#include <algorithm>
#include <memory>
#include "ceres/block_evaluate_preparer.h"
#include "ceres/block_sparse_matrix.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/port.h"
#include "ceres/internal/export.h"
#include "ceres/parameter_block.h"
#include "ceres/program.h"
#include "ceres/residual_block.h"
@ -66,8 +69,7 @@ void BuildJacobianLayout(const Program& program,
// matrix. Also compute the number of jacobian blocks.
int f_block_pos = 0;
int num_jacobian_blocks = 0;
for (int i = 0; i < residual_blocks.size(); ++i) {
ResidualBlock* residual_block = residual_blocks[i];
for (auto* residual_block : residual_blocks) {
const int num_residuals = residual_block->NumResiduals();
const int num_parameter_blocks = residual_block->NumParameterBlocks();
@ -78,7 +80,7 @@ void BuildJacobianLayout(const Program& program,
// Only count blocks for active parameters.
num_jacobian_blocks++;
if (parameter_block->index() < num_eliminate_blocks) {
f_block_pos += num_residuals * parameter_block->LocalSize();
f_block_pos += num_residuals * parameter_block->TangentSize();
}
}
}
@ -107,7 +109,7 @@ void BuildJacobianLayout(const Program& program,
continue;
}
const int jacobian_block_size =
num_residuals * parameter_block->LocalSize();
num_residuals * parameter_block->TangentSize();
if (parameter_block_index < num_eliminate_blocks) {
*jacobian_pos = e_block_pos;
e_block_pos += jacobian_block_size;
@ -136,20 +138,20 @@ BlockJacobianWriter::BlockJacobianWriter(const Evaluator::Options& options,
// Create evaluate prepareres that point directly into the final jacobian. This
// makes the final Write() a nop.
BlockEvaluatePreparer* BlockJacobianWriter::CreateEvaluatePreparers(
int num_threads) {
std::unique_ptr<BlockEvaluatePreparer[]>
BlockJacobianWriter::CreateEvaluatePreparers(int num_threads) {
int max_derivatives_per_residual_block =
program_->MaxDerivativesPerResidualBlock();
BlockEvaluatePreparer* preparers = new BlockEvaluatePreparer[num_threads];
auto preparers = std::make_unique<BlockEvaluatePreparer[]>(num_threads);
for (int i = 0; i < num_threads; i++) {
preparers[i].Init(&jacobian_layout_[0], max_derivatives_per_residual_block);
}
return preparers;
}
SparseMatrix* BlockJacobianWriter::CreateJacobian() const {
CompressedRowBlockStructure* bs = new CompressedRowBlockStructure;
std::unique_ptr<SparseMatrix> BlockJacobianWriter::CreateJacobian() const {
auto* bs = new CompressedRowBlockStructure;
const vector<ParameterBlock*>& parameter_blocks =
program_->parameter_blocks();
@ -159,7 +161,7 @@ SparseMatrix* BlockJacobianWriter::CreateJacobian() const {
for (int i = 0, cursor = 0; i < parameter_blocks.size(); ++i) {
CHECK_NE(parameter_blocks[i]->index(), -1);
CHECK(!parameter_blocks[i]->IsConstant());
bs->cols[i].size = parameter_blocks[i]->LocalSize();
bs->cols[i].size = parameter_blocks[i]->TangentSize();
bs->cols[i].position = cursor;
cursor += bs->cols[i].size;
}
@ -201,12 +203,10 @@ SparseMatrix* BlockJacobianWriter::CreateJacobian() const {
}
}
sort(row->cells.begin(), row->cells.end(), CellLessThan);
std::sort(row->cells.begin(), row->cells.end(), CellLessThan);
}
BlockSparseMatrix* jacobian = new BlockSparseMatrix(bs);
CHECK(jacobian != nullptr);
return jacobian;
return std::make_unique<BlockSparseMatrix>(bs);
}
} // namespace internal

View File

@ -38,10 +38,11 @@
#ifndef CERES_INTERNAL_BLOCK_JACOBIAN_WRITER_H_
#define CERES_INTERNAL_BLOCK_JACOBIAN_WRITER_H_
#include <memory>
#include <vector>
#include "ceres/evaluator.h"
#include "ceres/internal/port.h"
#include "ceres/internal/export.h"
namespace ceres {
namespace internal {
@ -51,7 +52,7 @@ class Program;
class SparseMatrix;
// TODO(sameeragarwal): This class needs documemtation.
class BlockJacobianWriter {
class CERES_NO_EXPORT BlockJacobianWriter {
public:
BlockJacobianWriter(const Evaluator::Options& options, Program* program);
@ -59,9 +60,10 @@ class BlockJacobianWriter {
// Create evaluate prepareres that point directly into the final jacobian.
// This makes the final Write() a nop.
BlockEvaluatePreparer* CreateEvaluatePreparers(int num_threads);
std::unique_ptr<BlockEvaluatePreparer[]> CreateEvaluatePreparers(
int num_threads);
SparseMatrix* CreateJacobian() const;
std::unique_ptr<SparseMatrix> CreateJacobian() const;
void Write(int /* residual_id */,
int /* residual_offset */,

View File

@ -48,9 +48,9 @@ BlockRandomAccessDenseMatrix::BlockRandomAccessDenseMatrix(
num_rows_ += blocks[i];
}
values_.reset(new double[num_rows_ * num_rows_]);
values_ = std::make_unique<double[]>(num_rows_ * num_rows_);
cell_infos_.reset(new CellInfo[num_blocks * num_blocks]);
cell_infos_ = std::make_unique<CellInfo[]>(num_blocks * num_blocks);
for (int i = 0; i < num_blocks * num_blocks; ++i) {
cell_infos_[i].values = values_.get();
}
@ -60,7 +60,7 @@ BlockRandomAccessDenseMatrix::BlockRandomAccessDenseMatrix(
// Assume that the user does not hold any locks on any cell blocks
// when they are calling SetZero.
BlockRandomAccessDenseMatrix::~BlockRandomAccessDenseMatrix() {}
BlockRandomAccessDenseMatrix::~BlockRandomAccessDenseMatrix() = default;
CellInfo* BlockRandomAccessDenseMatrix::GetCell(const int row_block_id,
const int col_block_id,

View File

@ -35,7 +35,8 @@
#include <vector>
#include "ceres/block_random_access_matrix.h"
#include "ceres/internal/port.h"
#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/export.h"
namespace ceres {
namespace internal {
@ -46,11 +47,11 @@ namespace internal {
// num_rows x num_cols.
//
// This class is NOT thread safe. Since all n^2 cells are stored,
// GetCell never returns NULL for any (row_block_id, col_block_id)
// GetCell never returns nullptr for any (row_block_id, col_block_id)
// pair.
//
// ReturnCell is a nop.
class CERES_EXPORT_INTERNAL BlockRandomAccessDenseMatrix
class CERES_NO_EXPORT BlockRandomAccessDenseMatrix
: public BlockRandomAccessMatrix {
public:
// blocks is a vector of block sizes. The resulting matrix has
@ -61,7 +62,7 @@ class CERES_EXPORT_INTERNAL BlockRandomAccessDenseMatrix
// The destructor is not thread safe. It assumes that no one is
// modifying any cells when the matrix is being destroyed.
virtual ~BlockRandomAccessDenseMatrix();
~BlockRandomAccessDenseMatrix() override;
// BlockRandomAccessMatrix interface.
CellInfo* GetCell(int row_block_id,
@ -94,4 +95,6 @@ class CERES_EXPORT_INTERNAL BlockRandomAccessDenseMatrix
} // namespace internal
} // namespace ceres
#include "ceres/internal/reenable_warnings.h"
#endif // CERES_INTERNAL_BLOCK_RANDOM_ACCESS_DENSE_MATRIX_H_

View File

@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@ -31,12 +31,13 @@
#include "ceres/block_random_access_diagonal_matrix.h"
#include <algorithm>
#include <memory>
#include <set>
#include <utility>
#include <vector>
#include "Eigen/Dense"
#include "ceres/internal/port.h"
#include "ceres/internal/export.h"
#include "ceres/stl_util.h"
#include "ceres/triplet_sparse_matrix.h"
#include "ceres/types.h"
@ -57,16 +58,17 @@ BlockRandomAccessDiagonalMatrix::BlockRandomAccessDiagonalMatrix(
int num_cols = 0;
int num_nonzeros = 0;
vector<int> block_positions;
for (int i = 0; i < blocks_.size(); ++i) {
for (int block_size : blocks_) {
block_positions.push_back(num_cols);
num_cols += blocks_[i];
num_nonzeros += blocks_[i] * blocks_[i];
num_cols += block_size;
num_nonzeros += block_size * block_size;
}
VLOG(1) << "Matrix Size [" << num_cols << "," << num_cols << "] "
<< num_nonzeros;
tsm_.reset(new TripletSparseMatrix(num_cols, num_cols, num_nonzeros));
tsm_ =
std::make_unique<TripletSparseMatrix>(num_cols, num_cols, num_nonzeros);
tsm_->set_num_nonzeros(num_nonzeros);
int* rows = tsm_->mutable_rows();
int* cols = tsm_->mutable_cols();
@ -99,7 +101,7 @@ CellInfo* BlockRandomAccessDiagonalMatrix::GetCell(int row_block_id,
int* row_stride,
int* col_stride) {
if (row_block_id != col_block_id) {
return NULL;
return nullptr;
}
const int stride = blocks_[row_block_id];
@ -121,8 +123,7 @@ void BlockRandomAccessDiagonalMatrix::SetZero() {
void BlockRandomAccessDiagonalMatrix::Invert() {
double* values = tsm_->mutable_values();
for (int i = 0; i < blocks_.size(); ++i) {
const int block_size = blocks_[i];
for (int block_size : blocks_) {
MatrixRef block(values, block_size, block_size);
block = block.selfadjointView<Eigen::Upper>().llt().solve(
Matrix::Identity(block_size, block_size));
@ -135,8 +136,7 @@ void BlockRandomAccessDiagonalMatrix::RightMultiply(const double* x,
CHECK(x != nullptr);
CHECK(y != nullptr);
const double* values = tsm_->values();
for (int i = 0; i < blocks_.size(); ++i) {
const int block_size = blocks_[i];
for (int block_size : blocks_) {
ConstMatrixRef block(values, block_size, block_size);
VectorRef(y, block_size).noalias() += block * ConstVectorRef(x, block_size);
x += block_size;

View File

@ -37,7 +37,8 @@
#include <vector>
#include "ceres/block_random_access_matrix.h"
#include "ceres/internal/port.h"
#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/export.h"
#include "ceres/triplet_sparse_matrix.h"
#include "ceres/types.h"
@ -46,7 +47,7 @@ namespace internal {
// A thread safe block diagonal matrix implementation of
// BlockRandomAccessMatrix.
class CERES_EXPORT_INTERNAL BlockRandomAccessDiagonalMatrix
class CERES_NO_EXPORT BlockRandomAccessDiagonalMatrix
: public BlockRandomAccessMatrix {
public:
// blocks is an array of block sizes.
@ -57,7 +58,7 @@ class CERES_EXPORT_INTERNAL BlockRandomAccessDiagonalMatrix
// The destructor is not thread safe. It assumes that no one is
// modifying any cells when the matrix is being destroyed.
virtual ~BlockRandomAccessDiagonalMatrix();
~BlockRandomAccessDiagonalMatrix() override;
// BlockRandomAccessMatrix Interface.
CellInfo* GetCell(int row_block_id,
@ -98,4 +99,6 @@ class CERES_EXPORT_INTERNAL BlockRandomAccessDiagonalMatrix
} // namespace internal
} // namespace ceres
#include "ceres/internal/reenable_warnings.h"
#endif // CERES_INTERNAL_BLOCK_RANDOM_ACCESS_DIAGONAL_MATRIX_H_

View File

@ -33,7 +33,7 @@
namespace ceres {
namespace internal {
BlockRandomAccessMatrix::~BlockRandomAccessMatrix() {}
BlockRandomAccessMatrix::~BlockRandomAccessMatrix() = default;
} // namespace internal
} // namespace ceres

View File

@ -35,7 +35,7 @@
#include <mutex>
#include "ceres/internal/port.h"
#include "ceres/internal/export.h"
namespace ceres {
namespace internal {
@ -62,7 +62,7 @@ namespace internal {
//
// There is no requirement that all cells be present, i.e. the matrix
// itself can be block sparse. When a cell is not present, the GetCell
// method will return a NULL pointer.
// method will return a nullptr pointer.
//
// There is no requirement about how the cells are stored beyond that
// form a dense submatrix of a larger dense matrix. Like everywhere
@ -77,7 +77,7 @@ namespace internal {
// &row, &col,
// &row_stride, &col_stride);
//
// if (cell != NULL) {
// if (cell != nullptr) {
// MatrixRef m(cell->values, row_stride, col_stride);
// std::lock_guard<std::mutex> l(&cell->m);
// m.block(row, col, row_block_size, col_block_size) = ...
@ -85,21 +85,21 @@ namespace internal {
// Structure to carry a pointer to the array containing a cell and the
// mutex guarding it.
struct CellInfo {
CellInfo() : values(nullptr) {}
struct CERES_NO_EXPORT CellInfo {
CellInfo() = default;
explicit CellInfo(double* values) : values(values) {}
double* values;
double* values{nullptr};
std::mutex m;
};
class CERES_EXPORT_INTERNAL BlockRandomAccessMatrix {
class CERES_NO_EXPORT BlockRandomAccessMatrix {
public:
virtual ~BlockRandomAccessMatrix();
// If the cell (row_block_id, col_block_id) is present, then return
// a CellInfo with a pointer to the dense matrix containing it,
// otherwise return NULL. The dense matrix containing this cell has
// otherwise return nullptr. The dense matrix containing this cell has
// size row_stride, col_stride and the cell is located at position
// (row, col) within this matrix.
//

View File

@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@ -36,7 +36,7 @@
#include <utility>
#include <vector>
#include "ceres/internal/port.h"
#include "ceres/internal/export.h"
#include "ceres/triplet_sparse_matrix.h"
#include "ceres/types.h"
#include "glog/logging.h"
@ -58,9 +58,9 @@ BlockRandomAccessSparseMatrix::BlockRandomAccessSparseMatrix(
// rows/columns.
int num_cols = 0;
block_positions_.reserve(blocks_.size());
for (int i = 0; i < blocks_.size(); ++i) {
for (int block_size : blocks_) {
block_positions_.push_back(num_cols);
num_cols += blocks_[i];
num_cols += block_size;
}
// Count the number of scalar non-zero entries and build the layout
@ -76,7 +76,8 @@ BlockRandomAccessSparseMatrix::BlockRandomAccessSparseMatrix(
VLOG(1) << "Matrix Size [" << num_cols << "," << num_cols << "] "
<< num_nonzeros;
tsm_.reset(new TripletSparseMatrix(num_cols, num_cols, num_nonzeros));
tsm_ =
std::make_unique<TripletSparseMatrix>(num_cols, num_cols, num_nonzeros);
tsm_->set_num_nonzeros(num_nonzeros);
int* rows = tsm_->mutable_rows();
int* cols = tsm_->mutable_cols();
@ -86,7 +87,7 @@ BlockRandomAccessSparseMatrix::BlockRandomAccessSparseMatrix(
for (const auto& block_pair : block_pairs) {
const int row_block_size = blocks_[block_pair.first];
const int col_block_size = blocks_[block_pair.second];
cell_values_.push_back(make_pair(block_pair, values + pos));
cell_values_.emplace_back(block_pair, values + pos);
layout_[IntPairToLong(block_pair.first, block_pair.second)] =
new CellInfo(values + pos);
pos += row_block_size * col_block_size;
@ -129,7 +130,7 @@ CellInfo* BlockRandomAccessSparseMatrix::GetCell(int row_block_id,
const LayoutType::iterator it =
layout_.find(IntPairToLong(row_block_id, col_block_id));
if (it == layout_.end()) {
return NULL;
return nullptr;
}
// Each cell is stored contiguously as its own little dense matrix.

View File

@ -39,7 +39,8 @@
#include <vector>
#include "ceres/block_random_access_matrix.h"
#include "ceres/internal/port.h"
#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/export.h"
#include "ceres/small_blas.h"
#include "ceres/triplet_sparse_matrix.h"
#include "ceres/types.h"
@ -51,7 +52,7 @@ namespace internal {
// BlockRandomAccessMatrix. Internally a TripletSparseMatrix is used
// for doing the actual storage. This class augments this matrix with
// an unordered_map that allows random read/write access.
class CERES_EXPORT_INTERNAL BlockRandomAccessSparseMatrix
class CERES_NO_EXPORT BlockRandomAccessSparseMatrix
: public BlockRandomAccessMatrix {
public:
// blocks is an array of block sizes. block_pairs is a set of
@ -65,7 +66,7 @@ class CERES_EXPORT_INTERNAL BlockRandomAccessSparseMatrix
// The destructor is not thread safe. It assumes that no one is
// modifying any cells when the matrix is being destroyed.
virtual ~BlockRandomAccessSparseMatrix();
~BlockRandomAccessSparseMatrix() override;
// BlockRandomAccessMatrix Interface.
CellInfo* GetCell(int row_block_id,
@ -111,7 +112,7 @@ class CERES_EXPORT_INTERNAL BlockRandomAccessSparseMatrix
// A mapping from <row_block_id, col_block_id> to the position in
// the values array of tsm_ where the block is stored.
typedef std::unordered_map<long int, CellInfo*> LayoutType;
using LayoutType = std::unordered_map<long, CellInfo*>;
LayoutType layout_;
// In order traversal of contents of the matrix. This allows us to
@ -127,4 +128,6 @@ class CERES_EXPORT_INTERNAL BlockRandomAccessSparseMatrix
} // namespace internal
} // namespace ceres
#include "ceres/internal/reenable_warnings.h"
#endif // CERES_INTERNAL_BLOCK_RANDOM_ACCESS_SPARSE_MATRIX_H_

View File

@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@ -32,6 +32,7 @@
#include <algorithm>
#include <cstddef>
#include <memory>
#include <vector>
#include "ceres/block_structure.h"
@ -46,8 +47,6 @@ namespace internal {
using std::vector;
BlockSparseMatrix::~BlockSparseMatrix() {}
BlockSparseMatrix::BlockSparseMatrix(
CompressedRowBlockStructure* block_structure)
: num_rows_(0),
@ -57,8 +56,8 @@ BlockSparseMatrix::BlockSparseMatrix(
CHECK(block_structure_ != nullptr);
// Count the number of columns in the matrix.
for (int i = 0; i < block_structure_->cols.size(); ++i) {
num_cols_ += block_structure_->cols[i].size;
for (auto& col : block_structure_->cols) {
num_cols_ += col.size;
}
// Count the number of non-zero entries and the number of rows in
@ -68,8 +67,8 @@ BlockSparseMatrix::BlockSparseMatrix(
num_rows_ += row_block_size;
const vector<Cell>& cells = block_structure_->rows[i].cells;
for (int j = 0; j < cells.size(); ++j) {
int col_block_id = cells[j].block_id;
for (const auto& cell : cells) {
int col_block_id = cell.block_id;
int col_block_size = block_structure_->cols[col_block_id].size;
num_nonzeros_ += col_block_size * row_block_size;
}
@ -80,7 +79,7 @@ BlockSparseMatrix::BlockSparseMatrix(
CHECK_GE(num_nonzeros_, 0);
VLOG(2) << "Allocating values array with " << num_nonzeros_ * sizeof(double)
<< " bytes."; // NOLINT
values_.reset(new double[num_nonzeros_]);
values_ = std::make_unique<double[]>(num_nonzeros_);
max_num_nonzeros_ = num_nonzeros_;
CHECK(values_ != nullptr);
}
@ -97,12 +96,12 @@ void BlockSparseMatrix::RightMultiply(const double* x, double* y) const {
int row_block_pos = block_structure_->rows[i].block.position;
int row_block_size = block_structure_->rows[i].block.size;
const vector<Cell>& cells = block_structure_->rows[i].cells;
for (int j = 0; j < cells.size(); ++j) {
int col_block_id = cells[j].block_id;
for (const auto& cell : cells) {
int col_block_id = cell.block_id;
int col_block_size = block_structure_->cols[col_block_id].size;
int col_block_pos = block_structure_->cols[col_block_id].position;
MatrixVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
values_.get() + cells[j].position,
values_.get() + cell.position,
row_block_size,
col_block_size,
x + col_block_pos,
@ -119,12 +118,12 @@ void BlockSparseMatrix::LeftMultiply(const double* x, double* y) const {
int row_block_pos = block_structure_->rows[i].block.position;
int row_block_size = block_structure_->rows[i].block.size;
const vector<Cell>& cells = block_structure_->rows[i].cells;
for (int j = 0; j < cells.size(); ++j) {
int col_block_id = cells[j].block_id;
for (const auto& cell : cells) {
int col_block_id = cell.block_id;
int col_block_size = block_structure_->cols[col_block_id].size;
int col_block_pos = block_structure_->cols[col_block_id].position;
MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
values_.get() + cells[j].position,
values_.get() + cell.position,
row_block_size,
col_block_size,
x + row_block_pos,
@ -139,12 +138,12 @@ void BlockSparseMatrix::SquaredColumnNorm(double* x) const {
for (int i = 0; i < block_structure_->rows.size(); ++i) {
int row_block_size = block_structure_->rows[i].block.size;
const vector<Cell>& cells = block_structure_->rows[i].cells;
for (int j = 0; j < cells.size(); ++j) {
int col_block_id = cells[j].block_id;
for (const auto& cell : cells) {
int col_block_id = cell.block_id;
int col_block_size = block_structure_->cols[col_block_id].size;
int col_block_pos = block_structure_->cols[col_block_id].position;
const MatrixRef m(
values_.get() + cells[j].position, row_block_size, col_block_size);
values_.get() + cell.position, row_block_size, col_block_size);
VectorRef(x + col_block_pos, col_block_size) += m.colwise().squaredNorm();
}
}
@ -156,12 +155,12 @@ void BlockSparseMatrix::ScaleColumns(const double* scale) {
for (int i = 0; i < block_structure_->rows.size(); ++i) {
int row_block_size = block_structure_->rows[i].block.size;
const vector<Cell>& cells = block_structure_->rows[i].cells;
for (int j = 0; j < cells.size(); ++j) {
int col_block_id = cells[j].block_id;
for (const auto& cell : cells) {
int col_block_id = cell.block_id;
int col_block_size = block_structure_->cols[col_block_id].size;
int col_block_pos = block_structure_->cols[col_block_id].position;
MatrixRef m(
values_.get() + cells[j].position, row_block_size, col_block_size);
values_.get() + cell.position, row_block_size, col_block_size);
m *= ConstVectorRef(scale + col_block_pos, col_block_size).asDiagonal();
}
}
@ -178,11 +177,11 @@ void BlockSparseMatrix::ToDenseMatrix(Matrix* dense_matrix) const {
int row_block_pos = block_structure_->rows[i].block.position;
int row_block_size = block_structure_->rows[i].block.size;
const vector<Cell>& cells = block_structure_->rows[i].cells;
for (int j = 0; j < cells.size(); ++j) {
int col_block_id = cells[j].block_id;
for (const auto& cell : cells) {
int col_block_id = cell.block_id;
int col_block_size = block_structure_->cols[col_block_id].size;
int col_block_pos = block_structure_->cols[col_block_id].position;
int jac_pos = cells[j].position;
int jac_pos = cell.position;
m.block(row_block_pos, col_block_pos, row_block_size, col_block_size) +=
MatrixRef(values_.get() + jac_pos, row_block_size, col_block_size);
}
@ -201,11 +200,11 @@ void BlockSparseMatrix::ToTripletSparseMatrix(
int row_block_pos = block_structure_->rows[i].block.position;
int row_block_size = block_structure_->rows[i].block.size;
const vector<Cell>& cells = block_structure_->rows[i].cells;
for (int j = 0; j < cells.size(); ++j) {
int col_block_id = cells[j].block_id;
for (const auto& cell : cells) {
int col_block_id = cell.block_id;
int col_block_size = block_structure_->cols[col_block_id].size;
int col_block_pos = block_structure_->cols[col_block_id].position;
int jac_pos = cells[j].position;
int jac_pos = cell.position;
for (int r = 0; r < row_block_size; ++r) {
for (int c = 0; c < col_block_size; ++c, ++jac_pos) {
matrix->mutable_rows()[jac_pos] = row_block_pos + r;
@ -230,11 +229,11 @@ void BlockSparseMatrix::ToTextFile(FILE* file) const {
const int row_block_pos = block_structure_->rows[i].block.position;
const int row_block_size = block_structure_->rows[i].block.size;
const vector<Cell>& cells = block_structure_->rows[i].cells;
for (int j = 0; j < cells.size(); ++j) {
const int col_block_id = cells[j].block_id;
for (const auto& cell : cells) {
const int col_block_id = cell.block_id;
const int col_block_size = block_structure_->cols[col_block_id].size;
const int col_block_pos = block_structure_->cols[col_block_id].position;
int jac_pos = cells[j].position;
int jac_pos = cell.position;
for (int r = 0; r < row_block_size; ++r) {
for (int c = 0; c < col_block_size; ++c) {
fprintf(file,
@ -248,10 +247,10 @@ void BlockSparseMatrix::ToTextFile(FILE* file) const {
}
}
BlockSparseMatrix* BlockSparseMatrix::CreateDiagonalMatrix(
std::unique_ptr<BlockSparseMatrix> BlockSparseMatrix::CreateDiagonalMatrix(
const double* diagonal, const std::vector<Block>& column_blocks) {
// Create the block structure for the diagonal matrix.
CompressedRowBlockStructure* bs = new CompressedRowBlockStructure();
auto* bs = new CompressedRowBlockStructure();
bs->cols = column_blocks;
int position = 0;
bs->rows.resize(column_blocks.size(), CompressedRow(1));
@ -265,13 +264,13 @@ BlockSparseMatrix* BlockSparseMatrix::CreateDiagonalMatrix(
}
// Create the BlockSparseMatrix with the given block structure.
BlockSparseMatrix* matrix = new BlockSparseMatrix(bs);
auto matrix = std::make_unique<BlockSparseMatrix>(bs);
matrix->SetZero();
// Fill the values array of the block sparse matrix.
double* values = matrix->mutable_values();
for (int i = 0; i < column_blocks.size(); ++i) {
const int size = column_blocks[i].size;
for (const auto& column_block : column_blocks) {
const int size = column_block.size;
for (int j = 0; j < size; ++j) {
// (j + 1) * size is compact way of accessing the (j,j) entry.
values[j * (size + 1)] = diagonal[j];
@ -308,9 +307,10 @@ void BlockSparseMatrix::AppendRows(const BlockSparseMatrix& m) {
}
if (num_nonzeros_ > max_num_nonzeros_) {
double* new_values = new double[num_nonzeros_];
std::copy(values_.get(), values_.get() + old_num_nonzeros, new_values);
values_.reset(new_values);
std::unique_ptr<double[]> new_values =
std::make_unique<double[]>(num_nonzeros_);
std::copy_n(values_.get(), old_num_nonzeros, new_values.get());
values_ = std::move(new_values);
max_num_nonzeros_ = num_nonzeros_;
}
@ -337,7 +337,7 @@ void BlockSparseMatrix::DeleteRowBlocks(const int delta_row_blocks) {
block_structure_->rows.resize(num_row_blocks - delta_row_blocks);
}
BlockSparseMatrix* BlockSparseMatrix::CreateRandomMatrix(
std::unique_ptr<BlockSparseMatrix> BlockSparseMatrix::CreateRandomMatrix(
const BlockSparseMatrix::RandomMatrixOptions& options) {
CHECK_GT(options.num_row_blocks, 0);
CHECK_GT(options.min_row_block_size, 0);
@ -346,7 +346,7 @@ BlockSparseMatrix* BlockSparseMatrix::CreateRandomMatrix(
CHECK_GT(options.block_density, 0.0);
CHECK_LE(options.block_density, 1.0);
CompressedRowBlockStructure* bs = new CompressedRowBlockStructure();
auto* bs = new CompressedRowBlockStructure();
if (options.col_blocks.empty()) {
CHECK_GT(options.num_col_blocks, 0);
CHECK_GT(options.min_col_block_size, 0);
@ -360,7 +360,7 @@ BlockSparseMatrix* BlockSparseMatrix::CreateRandomMatrix(
const int delta_block_size =
Uniform(options.max_col_block_size - options.min_col_block_size);
const int col_block_size = options.min_col_block_size + delta_block_size;
bs->cols.push_back(Block(col_block_size, col_block_position));
bs->cols.emplace_back(col_block_size, col_block_position);
col_block_position += col_block_size;
}
} else {
@ -377,7 +377,7 @@ BlockSparseMatrix* BlockSparseMatrix::CreateRandomMatrix(
const int delta_block_size =
Uniform(options.max_row_block_size - options.min_row_block_size);
const int row_block_size = options.min_row_block_size + delta_block_size;
bs->rows.push_back(CompressedRow());
bs->rows.emplace_back();
CompressedRow& row = bs->rows.back();
row.block.size = row_block_size;
row.block.position = row_block_position;
@ -385,7 +385,7 @@ BlockSparseMatrix* BlockSparseMatrix::CreateRandomMatrix(
for (int c = 0; c < bs->cols.size(); ++c) {
if (RandDouble() > options.block_density) continue;
row.cells.push_back(Cell());
row.cells.emplace_back();
Cell& cell = row.cells.back();
cell.block_id = c;
cell.position = value_position;
@ -395,7 +395,7 @@ BlockSparseMatrix* BlockSparseMatrix::CreateRandomMatrix(
}
}
BlockSparseMatrix* matrix = new BlockSparseMatrix(bs);
auto matrix = std::make_unique<BlockSparseMatrix>(bs);
double* values = matrix->mutable_values();
for (int i = 0; i < matrix->num_nonzeros(); ++i) {
values[i] = RandNormal();

View File

@ -37,8 +37,9 @@
#include <memory>
#include "ceres/block_structure.h"
#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/port.h"
#include "ceres/internal/export.h"
#include "ceres/sparse_matrix.h"
namespace ceres {
@ -54,7 +55,7 @@ class TripletSparseMatrix;
//
// internal/ceres/block_structure.h
//
class CERES_EXPORT_INTERNAL BlockSparseMatrix : public SparseMatrix {
class CERES_NO_EXPORT BlockSparseMatrix final : public SparseMatrix {
public:
// Construct a block sparse matrix with a fully initialized
// CompressedRowBlockStructure objected. The matrix takes over
@ -68,8 +69,6 @@ class CERES_EXPORT_INTERNAL BlockSparseMatrix : public SparseMatrix {
BlockSparseMatrix(const BlockSparseMatrix&) = delete;
void operator=(const BlockSparseMatrix&) = delete;
virtual ~BlockSparseMatrix();
// Implementation of SparseMatrix interface.
void SetZero() final;
void RightMultiply(const double* x, double* y) const final;
@ -97,7 +96,7 @@ class CERES_EXPORT_INTERNAL BlockSparseMatrix : public SparseMatrix {
// Delete the bottom delta_rows_blocks.
void DeleteRowBlocks(int delta_row_blocks);
static BlockSparseMatrix* CreateDiagonalMatrix(
static std::unique_ptr<BlockSparseMatrix> CreateDiagonalMatrix(
const double* diagonal, const std::vector<Block>& column_blocks);
struct RandomMatrixOptions {
@ -122,9 +121,7 @@ class CERES_EXPORT_INTERNAL BlockSparseMatrix : public SparseMatrix {
// Create a random BlockSparseMatrix whose entries are normally
// distributed and whose structure is determined by
// RandomMatrixOptions.
//
// Caller owns the result.
static BlockSparseMatrix* CreateRandomMatrix(
static std::unique_ptr<BlockSparseMatrix> CreateRandomMatrix(
const RandomMatrixOptions& options);
private:
@ -142,9 +139,9 @@ class CERES_EXPORT_INTERNAL BlockSparseMatrix : public SparseMatrix {
//
// BlockSparseDataMatrix a struct that carries these two bits of
// information
class BlockSparseMatrixData {
class CERES_NO_EXPORT BlockSparseMatrixData {
public:
BlockSparseMatrixData(const BlockSparseMatrix& m)
explicit BlockSparseMatrixData(const BlockSparseMatrix& m)
: block_structure_(m.block_structure()), values_(m.values()){};
BlockSparseMatrixData(const CompressedRowBlockStructure* block_structure,
@ -164,4 +161,6 @@ class BlockSparseMatrixData {
} // namespace internal
} // namespace ceres
#include "ceres/internal/reenable_warnings.h"
#endif // CERES_INTERNAL_BLOCK_SPARSE_MATRIX_H_

View File

@ -41,54 +41,54 @@
#include <cstdint>
#include <vector>
#include "ceres/internal/port.h"
#include "ceres/internal/export.h"
namespace ceres {
namespace internal {
typedef int32_t BlockSize;
using BlockSize = int32_t;
struct Block {
Block() : size(-1), position(-1) {}
struct CERES_NO_EXPORT Block {
Block() = default;
Block(int size_, int position_) : size(size_), position(position_) {}
BlockSize size;
int position; // Position along the row/column.
BlockSize size{-1};
int position{-1}; // Position along the row/column.
};
struct Cell {
Cell() : block_id(-1), position(-1) {}
struct CERES_NO_EXPORT Cell {
Cell() = default;
Cell(int block_id_, int position_)
: block_id(block_id_), position(position_) {}
// Column or row block id as the case maybe.
int block_id;
int block_id{-1};
// Where in the values array of the jacobian is this cell located.
int position;
int position{-1};
};
// Order cell by their block_id;
bool CellLessThan(const Cell& lhs, const Cell& rhs);
CERES_NO_EXPORT bool CellLessThan(const Cell& lhs, const Cell& rhs);
struct CompressedList {
CompressedList() {}
struct CERES_NO_EXPORT CompressedList {
CompressedList() = default;
// Construct a CompressedList with the cells containing num_cells
// entries.
CompressedList(int num_cells) : cells(num_cells) {}
explicit CompressedList(int num_cells) : cells(num_cells) {}
Block block;
std::vector<Cell> cells;
};
typedef CompressedList CompressedRow;
typedef CompressedList CompressedColumn;
using CompressedRow = CompressedList;
using CompressedColumn = CompressedList;
struct CompressedRowBlockStructure {
struct CERES_NO_EXPORT CompressedRowBlockStructure {
std::vector<Block> cols;
std::vector<CompressedRow> rows;
};
struct CompressedColumnBlockStructure {
struct CERES_NO_EXPORT CompressedColumnBlockStructure {
std::vector<Block> rows;
std::vector<CompressedColumn> cols;
};

View File

@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@ -35,6 +35,7 @@
#include "ceres/c_api.h"
#include <iostream>
#include <memory>
#include <string>
#include <vector>
@ -64,7 +65,7 @@ void ceres_free_problem(ceres_problem_t* problem) {
// This cost function wraps a C-level function pointer from the user, to bridge
// between C and C++.
class CallbackCostFunction : public ceres::CostFunction {
class CERES_NO_EXPORT CallbackCostFunction final : public ceres::CostFunction {
public:
CallbackCostFunction(ceres_cost_function_t cost_function,
void* user_data,
@ -78,8 +79,6 @@ class CallbackCostFunction : public ceres::CostFunction {
}
}
virtual ~CallbackCostFunction() {}
bool Evaluate(double const* const* parameters,
double* residuals,
double** jacobians) const final {
@ -94,7 +93,7 @@ class CallbackCostFunction : public ceres::CostFunction {
// This loss function wraps a C-level function pointer from the user, to bridge
// between C and C++.
class CallbackLossFunction : public ceres::LossFunction {
class CallbackLossFunction final : public ceres::LossFunction {
public:
explicit CallbackLossFunction(ceres_loss_function_t loss_function,
void* user_data)
@ -146,30 +145,31 @@ ceres_residual_block_id_t* ceres_problem_add_residual_block(
int num_parameter_blocks,
int* parameter_block_sizes,
double** parameters) {
Problem* ceres_problem = reinterpret_cast<Problem*>(problem);
auto* ceres_problem = reinterpret_cast<Problem*>(problem);
ceres::CostFunction* callback_cost_function =
new CallbackCostFunction(cost_function,
cost_function_data,
num_residuals,
num_parameter_blocks,
parameter_block_sizes);
auto callback_cost_function =
std::make_unique<CallbackCostFunction>(cost_function,
cost_function_data,
num_residuals,
num_parameter_blocks,
parameter_block_sizes);
ceres::LossFunction* callback_loss_function = NULL;
if (loss_function != NULL) {
callback_loss_function =
new CallbackLossFunction(loss_function, loss_function_data);
std::unique_ptr<ceres::LossFunction> callback_loss_function;
if (loss_function != nullptr) {
callback_loss_function = std::make_unique<CallbackLossFunction>(
loss_function, loss_function_data);
}
std::vector<double*> parameter_blocks(parameters,
parameters + num_parameter_blocks);
return reinterpret_cast<ceres_residual_block_id_t*>(
ceres_problem->AddResidualBlock(
callback_cost_function, callback_loss_function, parameter_blocks));
ceres_problem->AddResidualBlock(callback_cost_function.release(),
callback_loss_function.release(),
parameter_blocks));
}
void ceres_solve(ceres_problem_t* c_problem) {
Problem* problem = reinterpret_cast<Problem*>(c_problem);
auto* problem = reinterpret_cast<Problem*>(c_problem);
// TODO(keir): Obviously, this way of setting options won't scale or last.
// Instead, figure out a way to specify some of the options without

View File

@ -30,6 +30,7 @@
#include "ceres/callbacks.h"
#include <algorithm>
#include <iostream> // NO LINT
#include "ceres/program.h"
@ -45,7 +46,7 @@ StateUpdatingCallback::StateUpdatingCallback(Program* program,
double* parameters)
: program_(program), parameters_(parameters) {}
StateUpdatingCallback::~StateUpdatingCallback() {}
StateUpdatingCallback::~StateUpdatingCallback() = default;
CallbackReturnType StateUpdatingCallback::operator()(
const IterationSummary& summary) {
@ -64,14 +65,12 @@ GradientProblemSolverStateUpdatingCallback::
user_parameters_(user_parameters) {}
GradientProblemSolverStateUpdatingCallback::
~GradientProblemSolverStateUpdatingCallback() {}
~GradientProblemSolverStateUpdatingCallback() = default;
CallbackReturnType GradientProblemSolverStateUpdatingCallback::operator()(
const IterationSummary& summary) {
if (summary.step_is_successful) {
std::copy(internal_parameters_,
internal_parameters_ + num_parameters_,
user_parameters_);
std::copy_n(internal_parameters_, num_parameters_, user_parameters_);
}
return SOLVER_CONTINUE;
}
@ -80,44 +79,42 @@ LoggingCallback::LoggingCallback(const MinimizerType minimizer_type,
const bool log_to_stdout)
: minimizer_type(minimizer_type), log_to_stdout_(log_to_stdout) {}
LoggingCallback::~LoggingCallback() {}
LoggingCallback::~LoggingCallback() = default;
CallbackReturnType LoggingCallback::operator()(
const IterationSummary& summary) {
string output;
if (minimizer_type == LINE_SEARCH) {
const char* kReportRowFormat =
"% 4d: f:% 8e d:% 3.2e g:% 3.2e h:% 3.2e "
"s:% 3.2e e:% 3d it:% 3.2e tt:% 3.2e";
output = StringPrintf(kReportRowFormat,
summary.iteration,
summary.cost,
summary.cost_change,
summary.gradient_max_norm,
summary.step_norm,
summary.step_size,
summary.line_search_function_evaluations,
summary.iteration_time_in_seconds,
summary.cumulative_time_in_seconds);
output = StringPrintf(
"% 4d: f:% 8e d:% 3.2e g:% 3.2e h:% 3.2e s:% 3.2e e:% 3d it:% 3.2e "
"tt:% 3.2e",
summary.iteration,
summary.cost,
summary.cost_change,
summary.gradient_max_norm,
summary.step_norm,
summary.step_size,
summary.line_search_function_evaluations,
summary.iteration_time_in_seconds,
summary.cumulative_time_in_seconds);
} else if (minimizer_type == TRUST_REGION) {
// clang-format off
if (summary.iteration == 0) {
output = "iter cost cost_change |gradient| |step| tr_ratio tr_radius ls_iter iter_time total_time\n"; // NOLINT
}
const char* kReportRowFormat =
"% 4d % 8e % 3.2e % 3.2e % 3.2e % 3.2e % 3.2e % 4d % 3.2e % 3.2e"; // NOLINT
// clang-format on
output += StringPrintf(kReportRowFormat,
summary.iteration,
summary.cost,
summary.cost_change,
summary.gradient_max_norm,
summary.step_norm,
summary.relative_decrease,
summary.trust_region_radius,
summary.linear_solver_iterations,
summary.iteration_time_in_seconds,
summary.cumulative_time_in_seconds);
output += StringPrintf(
"% 4d % 8e % 3.2e % 3.2e % 3.2e % 3.2e % 3.2e % 4d % 3.2e % 3.2e", // NOLINT
// clang-format on
summary.iteration,
summary.cost,
summary.cost_change,
summary.gradient_max_norm,
summary.step_norm,
summary.relative_decrease,
summary.trust_region_radius,
summary.linear_solver_iterations,
summary.iteration_time_in_seconds,
summary.cumulative_time_in_seconds);
} else {
LOG(FATAL) << "Unknown minimizer type.";
}

View File

@ -33,7 +33,7 @@
#include <string>
#include "ceres/internal/port.h"
#include "ceres/internal/export.h"
#include "ceres/iteration_callback.h"
namespace ceres {
@ -43,10 +43,10 @@ class Program;
// Callback for updating the externally visible state of parameter
// blocks.
class StateUpdatingCallback : public IterationCallback {
class CERES_NO_EXPORT StateUpdatingCallback final : public IterationCallback {
public:
StateUpdatingCallback(Program* program, double* parameters);
virtual ~StateUpdatingCallback();
~StateUpdatingCallback() override;
CallbackReturnType operator()(const IterationSummary& summary) final;
private:
@ -56,12 +56,13 @@ class StateUpdatingCallback : public IterationCallback {
// Callback for updating the externally visible state of the
// parameters vector for GradientProblemSolver.
class GradientProblemSolverStateUpdatingCallback : public IterationCallback {
class CERES_NO_EXPORT GradientProblemSolverStateUpdatingCallback final
: public IterationCallback {
public:
GradientProblemSolverStateUpdatingCallback(int num_parameters,
const double* internal_parameters,
double* user_parameters);
virtual ~GradientProblemSolverStateUpdatingCallback();
~GradientProblemSolverStateUpdatingCallback() override;
CallbackReturnType operator()(const IterationSummary& summary) final;
private:
@ -72,10 +73,10 @@ class GradientProblemSolverStateUpdatingCallback : public IterationCallback {
// Callback for logging the state of the minimizer to STDERR or
// STDOUT depending on the user's preferences and logging level.
class LoggingCallback : public IterationCallback {
class CERES_NO_EXPORT LoggingCallback final : public IterationCallback {
public:
LoggingCallback(MinimizerType minimizer_type, bool log_to_stdout);
virtual ~LoggingCallback();
~LoggingCallback() override;
CallbackReturnType operator()(const IterationSummary& summary) final;
private:

View File

@ -35,6 +35,7 @@
#include <unordered_set>
#include "ceres/graph.h"
#include "ceres/internal/export.h"
#include "ceres/map_util.h"
#include "glog/logging.h"
@ -43,13 +44,11 @@ namespace internal {
using std::vector;
typedef std::unordered_map<int, int> IntMap;
typedef std::unordered_set<int> IntSet;
using IntMap = std::unordered_map<int, int>;
using IntSet = std::unordered_set<int>;
class CanonicalViewsClustering {
class CERES_NO_EXPORT CanonicalViewsClustering {
public:
CanonicalViewsClustering() {}
// Compute the canonical views clustering of the vertices of the
// graph. centers will contain the vertices that are the identified
// as the canonical views/cluster centers, and membership is a map
@ -85,11 +84,11 @@ void ComputeCanonicalViewsClustering(
const WeightedGraph<int>& graph,
vector<int>* centers,
IntMap* membership) {
time_t start_time = time(NULL);
time_t start_time = time(nullptr);
CanonicalViewsClustering cv;
cv.ComputeClustering(options, graph, centers, membership);
VLOG(2) << "Canonical views clustering time (secs): "
<< time(NULL) - start_time;
<< time(nullptr) - start_time;
}
// Implementation of CanonicalViewsClustering
@ -107,7 +106,7 @@ void CanonicalViewsClustering::ComputeClustering(
IntSet valid_views;
FindValidViews(&valid_views);
while (valid_views.size() > 0) {
while (!valid_views.empty()) {
// Find the next best canonical view.
double best_difference = -std::numeric_limits<double>::max();
int best_view = 0;
@ -174,9 +173,9 @@ double CanonicalViewsClustering::ComputeClusteringQualityDifference(
difference -= options_.size_penalty_weight;
// Orthogonality.
for (int i = 0; i < centers.size(); ++i) {
for (int center : centers) {
difference -= options_.similarity_penalty_weight *
graph_->EdgeWeight(centers[i], candidate);
graph_->EdgeWeight(center, candidate);
}
return difference;

View File

@ -45,7 +45,8 @@
#include <vector>
#include "ceres/graph.h"
#include "ceres/internal/port.h"
#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/export.h"
namespace ceres {
namespace internal {
@ -95,13 +96,13 @@ struct CanonicalViewsClusteringOptions;
// It is possible depending on the configuration of the clustering
// algorithm that some of the vertices may not be assigned to any
// cluster. In this case they are assigned to a cluster with id = -1;
CERES_EXPORT_INTERNAL void ComputeCanonicalViewsClustering(
CERES_NO_EXPORT void ComputeCanonicalViewsClustering(
const CanonicalViewsClusteringOptions& options,
const WeightedGraph<int>& graph,
std::vector<int>* centers,
std::unordered_map<int, int>* membership);
struct CERES_EXPORT_INTERNAL CanonicalViewsClusteringOptions {
struct CERES_NO_EXPORT CanonicalViewsClusteringOptions {
// The minimum number of canonical views to compute.
int min_views = 3;
@ -122,4 +123,6 @@ struct CERES_EXPORT_INTERNAL CanonicalViewsClusteringOptions {
} // namespace internal
} // namespace ceres
#include "ceres/internal/reenable_warnings.h"
#endif // CERES_INTERNAL_CANONICAL_VIEWS_CLUSTERING_H_

View File

@ -32,14 +32,13 @@
#define CERES_INTERNAL_CASTS_H_
#include <cassert>
#include <cstddef> // For NULL.
namespace ceres {
// Identity metafunction.
template <class T>
struct identity_ {
typedef T type;
using type = T;
};
// Use implicit_cast as a safe version of static_cast or const_cast
@ -86,6 +85,7 @@ inline To implicit_cast(typename identity_<From>::type const& f) {
// if (dynamic_cast<Subclass2>(foo)) HandleASubclass2Object(foo);
// You should design the code some other way not to need this.
// TODO(sameeragarwal): Modernize this.
template <typename To, typename From> // use like this: down_cast<T*>(foo);
inline To down_cast(From* f) { // so we only accept pointers
// Ensures that To is a sub-type of From *. This test is here only
@ -95,11 +95,11 @@ inline To down_cast(From* f) { // so we only accept pointers
// TODO(csilvers): This should use COMPILE_ASSERT.
if (false) {
implicit_cast<From*, To>(NULL);
implicit_cast<From*, To>(nullptr);
}
// uses RTTI in dbg and fastbuild. asserts are disabled in opt builds.
assert(f == NULL || dynamic_cast<To>(f) != NULL); // NOLINT
assert(f == nullptr || dynamic_cast<To>(f) != nullptr); // NOLINT
return static_cast<To>(f);
}

View File

@ -34,7 +34,9 @@
#include <algorithm>
#include <memory>
#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/export.h"
#include "ceres/linear_operator.h"
namespace ceres {
@ -78,11 +80,10 @@ class SparseMatrix;
// and z = A^T b
//
// Note: This class is not thread safe, since it uses some temporary storage.
class CgnrLinearOperator : public LinearOperator {
class CERES_NO_EXPORT CgnrLinearOperator final : public LinearOperator {
public:
CgnrLinearOperator(const LinearOperator& A, const double* D)
: A_(A), D_(D), z_(new double[A.num_rows()]) {}
virtual ~CgnrLinearOperator() {}
void RightMultiply(const double* x, double* y) const final {
std::fill(z_.get(), z_.get() + A_.num_rows(), 0.0);
@ -94,7 +95,7 @@ class CgnrLinearOperator : public LinearOperator {
A_.LeftMultiply(z_.get(), y);
// y = y + DtDx
if (D_ != NULL) {
if (D_ != nullptr) {
int n = A_.num_cols();
VectorRef(y, n).array() +=
ConstVectorRef(D_, n).array().square() * ConstVectorRef(x, n).array();
@ -117,4 +118,6 @@ class CgnrLinearOperator : public LinearOperator {
} // namespace internal
} // namespace ceres
#include "ceres/internal/reenable_warnings.h"
#endif // CERES_INTERNAL_CGNR_LINEAR_OPERATOR_H_

View File

@ -30,6 +30,9 @@
#include "ceres/cgnr_solver.h"
#include <memory>
#include <utility>
#include "ceres/block_jacobi_preconditioner.h"
#include "ceres/cgnr_linear_operator.h"
#include "ceres/conjugate_gradients_solver.h"
@ -42,8 +45,8 @@
namespace ceres {
namespace internal {
CgnrSolver::CgnrSolver(const LinearSolver::Options& options)
: options_(options) {
CgnrSolver::CgnrSolver(LinearSolver::Options options)
: options_(std::move(options)) {
if (options_.preconditioner_type != JACOBI &&
options_.preconditioner_type != IDENTITY &&
options_.preconditioner_type != SUBSET) {
@ -54,7 +57,7 @@ CgnrSolver::CgnrSolver(const LinearSolver::Options& options)
}
}
CgnrSolver::~CgnrSolver() {}
CgnrSolver::~CgnrSolver() = default;
LinearSolver::Summary CgnrSolver::SolveImpl(
BlockSparseMatrix* A,
@ -70,7 +73,7 @@ LinearSolver::Summary CgnrSolver::SolveImpl(
if (!preconditioner_) {
if (options_.preconditioner_type == JACOBI) {
preconditioner_.reset(new BlockJacobiPreconditioner(*A));
preconditioner_ = std::make_unique<BlockJacobiPreconditioner>(*A);
} else if (options_.preconditioner_type == SUBSET) {
Preconditioner::Options preconditioner_options;
preconditioner_options.type = SUBSET;
@ -81,8 +84,8 @@ LinearSolver::Summary CgnrSolver::SolveImpl(
preconditioner_options.use_postordering = options_.use_postordering;
preconditioner_options.num_threads = options_.num_threads;
preconditioner_options.context = options_.context;
preconditioner_.reset(
new SubsetPreconditioner(preconditioner_options, *A));
preconditioner_ =
std::make_unique<SubsetPreconditioner>(preconditioner_options, *A);
}
}

View File

@ -33,6 +33,7 @@
#include <memory>
#include "ceres/internal/export.h"
#include "ceres/linear_solver.h"
namespace ceres {
@ -49,12 +50,12 @@ class BlockJacobiPreconditioner;
//
// as required for solving for x in the least squares sense. Currently only
// block diagonal preconditioning is supported.
class CgnrSolver : public BlockSparseMatrixSolver {
class CERES_NO_EXPORT CgnrSolver final : public BlockSparseMatrixSolver {
public:
explicit CgnrSolver(const LinearSolver::Options& options);
explicit CgnrSolver(LinearSolver::Options options);
CgnrSolver(const CgnrSolver&) = delete;
void operator=(const CgnrSolver&) = delete;
virtual ~CgnrSolver();
~CgnrSolver() override;
Summary SolveImpl(BlockSparseMatrix* A,
const double* b,

View File

@ -33,7 +33,7 @@
#include <algorithm>
#include <vector>
#include "ceres/internal/port.h"
#include "ceres/internal/export.h"
#include "glog/logging.h"
namespace ceres {

View File

@ -31,9 +31,11 @@
#ifndef CERES_INTERNAL_COMPRESSED_COL_SPARSE_MATRIX_UTILS_H_
#define CERES_INTERNAL_COMPRESSED_COL_SPARSE_MATRIX_UTILS_H_
#include <algorithm>
#include <vector>
#include "ceres/internal/port.h"
#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/export.h"
namespace ceres {
namespace internal {
@ -48,7 +50,7 @@ namespace internal {
// and column block j, then it is expected that A contains at least
// one non-zero entry corresponding to the top left entry of c_ij,
// as that entry is used to detect the presence of a non-zero c_ij.
CERES_EXPORT_INTERNAL void CompressedColumnScalarMatrixToBlockMatrix(
CERES_NO_EXPORT void CompressedColumnScalarMatrixToBlockMatrix(
const int* scalar_rows,
const int* scalar_cols,
const std::vector<int>& row_blocks,
@ -59,7 +61,7 @@ CERES_EXPORT_INTERNAL void CompressedColumnScalarMatrixToBlockMatrix(
// Given a set of blocks and a permutation of these blocks, compute
// the corresponding "scalar" ordering, where the scalar ordering of
// size sum(blocks).
CERES_EXPORT_INTERNAL void BlockOrderingToScalarOrdering(
CERES_NO_EXPORT void BlockOrderingToScalarOrdering(
const std::vector<int>& blocks,
const std::vector<int>& block_ordering,
std::vector<int>* scalar_ordering);
@ -142,4 +144,6 @@ void SolveRTRWithSparseRHS(IntegerType num_cols,
} // namespace internal
} // namespace ceres
#include "ceres/internal/reenable_warnings.h"
#endif // CERES_INTERNAL_COMPRESSED_COL_SPARSE_MATRIX_UTILS_H_

View File

@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
// Copyright 2015 Google Inc. All rights reserved.
// Copyright 2022 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@ -30,7 +30,10 @@
#include "ceres/compressed_row_jacobian_writer.h"
#include <algorithm>
#include <iterator>
#include <memory>
#include <string>
#include <utility>
#include <vector>
@ -55,7 +58,7 @@ void CompressedRowJacobianWriter::PopulateJacobianRowAndColumnBlockVectors(
vector<int>& col_blocks = *(jacobian->mutable_col_blocks());
col_blocks.resize(parameter_blocks.size());
for (int i = 0; i < parameter_blocks.size(); ++i) {
col_blocks[i] = parameter_blocks[i]->LocalSize();
col_blocks[i] = parameter_blocks[i]->TangentSize();
}
const vector<ResidualBlock*>& residual_blocks = program->residual_blocks();
@ -81,10 +84,12 @@ void CompressedRowJacobianWriter::GetOrderedParameterBlocks(
make_pair(parameter_block->index(), j));
}
}
sort(evaluated_jacobian_blocks->begin(), evaluated_jacobian_blocks->end());
std::sort(evaluated_jacobian_blocks->begin(),
evaluated_jacobian_blocks->end());
}
SparseMatrix* CompressedRowJacobianWriter::CreateJacobian() const {
std::unique_ptr<SparseMatrix> CompressedRowJacobianWriter::CreateJacobian()
const {
const vector<ResidualBlock*>& residual_blocks = program_->residual_blocks();
int total_num_residuals = program_->NumResiduals();
@ -92,14 +97,13 @@ SparseMatrix* CompressedRowJacobianWriter::CreateJacobian() const {
// Count the number of jacobian nonzeros.
int num_jacobian_nonzeros = 0;
for (int i = 0; i < residual_blocks.size(); ++i) {
ResidualBlock* residual_block = residual_blocks[i];
for (auto* residual_block : residual_blocks) {
const int num_residuals = residual_block->NumResiduals();
const int num_parameter_blocks = residual_block->NumParameterBlocks();
for (int j = 0; j < num_parameter_blocks; ++j) {
ParameterBlock* parameter_block = residual_block->parameter_blocks()[j];
if (!parameter_block->IsConstant()) {
num_jacobian_nonzeros += num_residuals * parameter_block->LocalSize();
num_jacobian_nonzeros += num_residuals * parameter_block->TangentSize();
}
}
}
@ -108,10 +112,11 @@ SparseMatrix* CompressedRowJacobianWriter::CreateJacobian() const {
// Allocate more space than needed to store the jacobian so that when the LM
// algorithm adds the diagonal, no reallocation is necessary. This reduces
// peak memory usage significantly.
CompressedRowSparseMatrix* jacobian = new CompressedRowSparseMatrix(
total_num_residuals,
total_num_effective_parameters,
num_jacobian_nonzeros + total_num_effective_parameters);
std::unique_ptr<CompressedRowSparseMatrix> jacobian =
std::make_unique<CompressedRowSparseMatrix>(
total_num_residuals,
total_num_effective_parameters,
num_jacobian_nonzeros + total_num_effective_parameters);
// At this stage, the CompressedRowSparseMatrix is an invalid state. But this
// seems to be the only way to construct it without doing a memory copy.
@ -120,8 +125,7 @@ SparseMatrix* CompressedRowJacobianWriter::CreateJacobian() const {
int row_pos = 0;
rows[0] = 0;
for (int i = 0; i < residual_blocks.size(); ++i) {
const ResidualBlock* residual_block = residual_blocks[i];
for (auto* residual_block : residual_blocks) {
const int num_parameter_blocks = residual_block->NumParameterBlocks();
// Count the number of derivatives for a row of this residual block and
@ -132,7 +136,7 @@ SparseMatrix* CompressedRowJacobianWriter::CreateJacobian() const {
ParameterBlock* parameter_block = residual_block->parameter_blocks()[j];
if (!parameter_block->IsConstant()) {
parameter_indices.push_back(parameter_block->index());
num_derivatives += parameter_block->LocalSize();
num_derivatives += parameter_block->TangentSize();
}
}
@ -163,10 +167,10 @@ SparseMatrix* CompressedRowJacobianWriter::CreateJacobian() const {
// parameter vector. This code mirrors that in Write(), where jacobian
// values are updated.
int col_pos = 0;
for (int j = 0; j < parameter_indices.size(); ++j) {
for (int parameter_index : parameter_indices) {
ParameterBlock* parameter_block =
program_->parameter_blocks()[parameter_indices[j]];
const int parameter_block_size = parameter_block->LocalSize();
program_->parameter_blocks()[parameter_index];
const int parameter_block_size = parameter_block->TangentSize();
for (int r = 0; r < num_residuals; ++r) {
// This is the position in the values array of the jacobian where this
@ -183,7 +187,7 @@ SparseMatrix* CompressedRowJacobianWriter::CreateJacobian() const {
}
CHECK_EQ(num_jacobian_nonzeros, rows[total_num_residuals]);
PopulateJacobianRowAndColumnBlockVectors(program_, jacobian);
PopulateJacobianRowAndColumnBlockVectors(program_, jacobian.get());
return jacobian;
}
@ -192,8 +196,7 @@ void CompressedRowJacobianWriter::Write(int residual_id,
int residual_offset,
double** jacobians,
SparseMatrix* base_jacobian) {
CompressedRowSparseMatrix* jacobian =
down_cast<CompressedRowSparseMatrix*>(base_jacobian);
auto* jacobian = down_cast<CompressedRowSparseMatrix*>(base_jacobian);
double* jacobian_values = jacobian->mutable_values();
const int* jacobian_rows = jacobian->rows();
@ -210,11 +213,11 @@ void CompressedRowJacobianWriter::Write(int residual_id,
// Iterate over the jacobian blocks in increasing order of their
// positions in the reduced parameter vector.
for (int i = 0; i < evaluated_jacobian_blocks.size(); ++i) {
for (auto& evaluated_jacobian_block : evaluated_jacobian_blocks) {
const ParameterBlock* parameter_block =
program_->parameter_blocks()[evaluated_jacobian_blocks[i].first];
const int argument = evaluated_jacobian_blocks[i].second;
const int parameter_block_size = parameter_block->LocalSize();
program_->parameter_blocks()[evaluated_jacobian_block.first];
const int argument = evaluated_jacobian_block.second;
const int parameter_block_size = parameter_block->TangentSize();
// Copy one row of the jacobian block at a time.
for (int r = 0; r < num_residuals; ++r) {

Some files were not shown because too many files have changed in this diff Show More