Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'extern/ceres')
-rw-r--r--extern/ceres/ChangeLog825
-rw-r--r--extern/ceres/README.blender2
-rwxr-xr-xextern/ceres/bundle.sh4
-rw-r--r--extern/ceres/include/ceres/autodiff_cost_function.h31
-rw-r--r--extern/ceres/include/ceres/c_api.h2
-rw-r--r--extern/ceres/include/ceres/cost_function_to_functor.h3
-rw-r--r--extern/ceres/include/ceres/covariance.h10
-rw-r--r--extern/ceres/include/ceres/dynamic_autodiff_cost_function.h25
-rw-r--r--extern/ceres/include/ceres/dynamic_numeric_diff_cost_function.h5
-rw-r--r--extern/ceres/include/ceres/gradient_problem_solver.h3
-rw-r--r--extern/ceres/include/ceres/internal/autodiff.h6
-rw-r--r--extern/ceres/include/ceres/internal/disable_warnings.h4
-rw-r--r--extern/ceres/include/ceres/internal/eigen.h23
-rw-r--r--extern/ceres/include/ceres/internal/fixed_array.h3
-rw-r--r--extern/ceres/include/ceres/internal/integer_sequence_algorithm.h11
-rw-r--r--extern/ceres/include/ceres/internal/numeric_diff.h118
-rw-r--r--extern/ceres/include/ceres/internal/port.h92
-rw-r--r--extern/ceres/include/ceres/internal/reenable_warnings.h2
-rw-r--r--extern/ceres/include/ceres/internal/variadic_evaluate.h28
-rw-r--r--extern/ceres/include/ceres/iteration_callback.h2
-rw-r--r--extern/ceres/include/ceres/jet.h17
-rw-r--r--extern/ceres/include/ceres/local_parameterization.h12
-rw-r--r--extern/ceres/include/ceres/numeric_diff_cost_function.h5
-rw-r--r--extern/ceres/include/ceres/problem.h48
-rw-r--r--extern/ceres/include/ceres/rotation.h4
-rw-r--r--extern/ceres/include/ceres/solver.h5
-rw-r--r--extern/ceres/include/ceres/types.h53
-rw-r--r--extern/ceres/include/ceres/version.h7
-rw-r--r--extern/ceres/internal/ceres/accelerate_sparse.cc87
-rw-r--r--extern/ceres/internal/ceres/accelerate_sparse.h31
-rw-r--r--extern/ceres/internal/ceres/array_utils.cc14
-rw-r--r--extern/ceres/internal/ceres/array_utils.h13
-rw-r--r--extern/ceres/internal/ceres/blas.cc1
-rw-r--r--extern/ceres/internal/ceres/block_evaluate_preparer.cc7
-rw-r--r--extern/ceres/internal/ceres/block_jacobi_preconditioner.cc16
-rw-r--r--extern/ceres/internal/ceres/block_jacobi_preconditioner.h5
-rw-r--r--extern/ceres/internal/ceres/block_jacobian_writer.cc4
-rw-r--r--extern/ceres/internal/ceres/block_jacobian_writer.h4
-rw-r--r--extern/ceres/internal/ceres/block_random_access_dense_matrix.cc4
-rw-r--r--extern/ceres/internal/ceres/block_random_access_dense_matrix.h6
-rw-r--r--extern/ceres/internal/ceres/block_random_access_diagonal_matrix.cc15
-rw-r--r--extern/ceres/internal/ceres/block_random_access_diagonal_matrix.h6
-rw-r--r--extern/ceres/internal/ceres/block_random_access_matrix.cc3
-rw-r--r--extern/ceres/internal/ceres/block_random_access_matrix.h4
-rw-r--r--extern/ceres/internal/ceres/block_random_access_sparse_matrix.cc34
-rw-r--r--extern/ceres/internal/ceres/block_random_access_sparse_matrix.h9
-rw-r--r--extern/ceres/internal/ceres/block_sparse_matrix.cc38
-rw-r--r--extern/ceres/internal/ceres/block_sparse_matrix.h11
-rw-r--r--extern/ceres/internal/ceres/block_structure.cc2
-rw-r--r--extern/ceres/internal/ceres/block_structure.h1
-rw-r--r--extern/ceres/internal/ceres/c_api.cc31
-rw-r--r--extern/ceres/internal/ceres/callbacks.cc9
-rw-r--r--extern/ceres/internal/ceres/callbacks.h5
-rw-r--r--extern/ceres/internal/ceres/canonical_views_clustering.cc19
-rw-r--r--extern/ceres/internal/ceres/canonical_views_clustering.h5
-rw-r--r--extern/ceres/internal/ceres/casts.h10
-rw-r--r--extern/ceres/internal/ceres/cgnr_linear_operator.h12
-rw-r--r--extern/ceres/internal/ceres/cgnr_solver.h10
-rw-r--r--extern/ceres/internal/ceres/compressed_col_sparse_matrix_utils.cc24
-rw-r--r--extern/ceres/internal/ceres/compressed_col_sparse_matrix_utils.h9
-rw-r--r--extern/ceres/internal/ceres/compressed_row_jacobian_writer.cc34
-rw-r--r--extern/ceres/internal/ceres/compressed_row_jacobian_writer.h8
-rw-r--r--extern/ceres/internal/ceres/compressed_row_sparse_matrix.cc1
-rw-r--r--extern/ceres/internal/ceres/compressed_row_sparse_matrix.h3
-rw-r--r--extern/ceres/internal/ceres/concurrent_queue.h1
-rw-r--r--extern/ceres/internal/ceres/conditioned_cost_function.cc15
-rw-r--r--extern/ceres/internal/ceres/conjugate_gradients_solver.cc27
-rw-r--r--extern/ceres/internal/ceres/conjugate_gradients_solver.h3
-rw-r--r--extern/ceres/internal/ceres/context.cc4
-rw-r--r--extern/ceres/internal/ceres/context_impl.cc1
-rw-r--r--extern/ceres/internal/ceres/context_impl.h4
-rw-r--r--extern/ceres/internal/ceres/coordinate_descent_minimizer.cc26
-rw-r--r--extern/ceres/internal/ceres/corrector.cc9
-rw-r--r--extern/ceres/internal/ceres/corrector.h4
-rw-r--r--extern/ceres/internal/ceres/covariance.cc13
-rw-r--r--extern/ceres/internal/ceres/covariance_impl.cc241
-rw-r--r--extern/ceres/internal/ceres/covariance_impl.h22
-rw-r--r--extern/ceres/internal/ceres/cxsparse.cc3
-rw-r--r--extern/ceres/internal/ceres/cxsparse.h2
-rw-r--r--extern/ceres/internal/ceres/dense_jacobian_writer.h30
-rw-r--r--extern/ceres/internal/ceres/dense_normal_cholesky_solver.cc20
-rw-r--r--extern/ceres/internal/ceres/dense_normal_cholesky_solver.h2
-rw-r--r--extern/ceres/internal/ceres/dense_qr_solver.cc7
-rw-r--r--extern/ceres/internal/ceres/dense_qr_solver.h5
-rw-r--r--extern/ceres/internal/ceres/dense_sparse_matrix.cc59
-rw-r--r--extern/ceres/internal/ceres/dense_sparse_matrix.h5
-rw-r--r--extern/ceres/internal/ceres/detect_structure.cc11
-rw-r--r--extern/ceres/internal/ceres/detect_structure.h11
-rw-r--r--extern/ceres/internal/ceres/dogleg_strategy.cc60
-rw-r--r--extern/ceres/internal/ceres/dogleg_strategy.h9
-rw-r--r--extern/ceres/internal/ceres/dynamic_compressed_row_finalizer.h2
-rw-r--r--extern/ceres/internal/ceres/dynamic_compressed_row_jacobian_writer.h5
-rw-r--r--extern/ceres/internal/ceres/dynamic_compressed_row_sparse_matrix.cc24
-rw-r--r--extern/ceres/internal/ceres/dynamic_compressed_row_sparse_matrix.h4
-rw-r--r--extern/ceres/internal/ceres/dynamic_sparse_normal_cholesky_solver.cc2
-rw-r--r--extern/ceres/internal/ceres/dynamic_sparse_normal_cholesky_solver.h26
-rw-r--r--extern/ceres/internal/ceres/eigensparse.h8
-rw-r--r--extern/ceres/internal/ceres/evaluator.cc21
-rw-r--r--extern/ceres/internal/ceres/evaluator.h12
-rw-r--r--extern/ceres/internal/ceres/file.cc15
-rw-r--r--extern/ceres/internal/ceres/file.h10
-rw-r--r--extern/ceres/internal/ceres/float_cxsparse.h4
-rw-r--r--extern/ceres/internal/ceres/float_suitesparse.h6
-rw-r--r--extern/ceres/internal/ceres/function_sample.cc12
-rw-r--r--extern/ceres/internal/ceres/function_sample.h7
-rw-r--r--extern/ceres/internal/ceres/generate_template_specializations.py12
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_2.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_3.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_4.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_d.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_3.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_4.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_6.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_9.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_d.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_3.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_4.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_6.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_8.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_9.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_d.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_d_d.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_3_3_3.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_2.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_3.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_4.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_d.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_d_d_d.cc6
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_2_2_2.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_2_2_3.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_2_2_4.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_2_2_d.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_2_3_3.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_2_3_4.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_2_3_6.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_2_3_9.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_2_3_d.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_2_4_3.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_2_4_4.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_2_4_6.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_2_4_8.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_2_4_9.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_2_4_d.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_2_d_d.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_3_3_3.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_4_4_2.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_4_4_3.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_4_4_4.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_4_4_d.cc1
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_d_d_d.cc2
-rw-r--r--extern/ceres/internal/ceres/gradient_checker.cc98
-rw-r--r--extern/ceres/internal/ceres/gradient_checking_cost_function.cc36
-rw-r--r--extern/ceres/internal/ceres/gradient_checking_cost_function.h9
-rw-r--r--extern/ceres/internal/ceres/gradient_problem.cc17
-rw-r--r--extern/ceres/internal/ceres/gradient_problem_evaluator.h4
-rw-r--r--extern/ceres/internal/ceres/gradient_problem_solver.cc81
-rw-r--r--extern/ceres/internal/ceres/graph.h23
-rw-r--r--extern/ceres/internal/ceres/graph_algorithms.h20
-rw-r--r--extern/ceres/internal/ceres/implicit_schur_complement.cc30
-rw-r--r--extern/ceres/internal/ceres/implicit_schur_complement.h8
-rw-r--r--extern/ceres/internal/ceres/inner_product_computer.cc6
-rw-r--r--extern/ceres/internal/ceres/inner_product_computer.h3
-rw-r--r--extern/ceres/internal/ceres/invert_psd_matrix.h4
-rw-r--r--extern/ceres/internal/ceres/is_close.cc8
-rw-r--r--extern/ceres/internal/ceres/is_close.h12
-rw-r--r--extern/ceres/internal/ceres/iterative_refiner.cc3
-rw-r--r--extern/ceres/internal/ceres/iterative_refiner.h5
-rw-r--r--extern/ceres/internal/ceres/iterative_schur_complement_solver.cc3
-rw-r--r--extern/ceres/internal/ceres/iterative_schur_complement_solver.h19
-rw-r--r--extern/ceres/internal/ceres/lapack.cc14
-rw-r--r--extern/ceres/internal/ceres/lapack.h1
-rw-r--r--extern/ceres/internal/ceres/levenberg_marquardt_strategy.cc18
-rw-r--r--extern/ceres/internal/ceres/levenberg_marquardt_strategy.h8
-rw-r--r--extern/ceres/internal/ceres/line_search.cc327
-rw-r--r--extern/ceres/internal/ceres/line_search.h1
-rw-r--r--extern/ceres/internal/ceres/line_search_direction.cc56
-rw-r--r--extern/ceres/internal/ceres/line_search_direction.h3
-rw-r--r--extern/ceres/internal/ceres/line_search_minimizer.cc187
-rw-r--r--extern/ceres/internal/ceres/line_search_minimizer.h8
-rw-r--r--extern/ceres/internal/ceres/line_search_preprocessor.cc15
-rw-r--r--extern/ceres/internal/ceres/line_search_preprocessor.h3
-rw-r--r--extern/ceres/internal/ceres/linear_least_squares_problems.cc44
-rw-r--r--extern/ceres/internal/ceres/linear_least_squares_problems.h12
-rw-r--r--extern/ceres/internal/ceres/linear_operator.cc3
-rw-r--r--extern/ceres/internal/ceres/linear_operator.h3
-rw-r--r--extern/ceres/internal/ceres/linear_solver.cc8
-rw-r--r--extern/ceres/internal/ceres/linear_solver.h17
-rw-r--r--extern/ceres/internal/ceres/local_parameterization.cc4
-rw-r--r--extern/ceres/internal/ceres/loss_function.cc21
-rw-r--r--extern/ceres/internal/ceres/low_rank_inverse_hessian.cc15
-rw-r--r--extern/ceres/internal/ceres/map_util.h27
-rw-r--r--extern/ceres/internal/ceres/minimizer.cc12
-rw-r--r--extern/ceres/internal/ceres/minimizer.h17
-rw-r--r--extern/ceres/internal/ceres/normal_prior.cc4
-rw-r--r--extern/ceres/internal/ceres/pair_hash.h6
-rw-r--r--extern/ceres/internal/ceres/parallel_for.h23
-rw-r--r--extern/ceres/internal/ceres/parallel_for_cxx.cc18
-rw-r--r--extern/ceres/internal/ceres/parallel_for_nothreads.cc4
-rw-r--r--extern/ceres/internal/ceres/parallel_for_openmp.cc5
-rw-r--r--extern/ceres/internal/ceres/parallel_utils.h7
-rw-r--r--extern/ceres/internal/ceres/parameter_block_ordering.cc8
-rw-r--r--extern/ceres/internal/ceres/parameter_block_ordering.h23
-rw-r--r--extern/ceres/internal/ceres/partitioned_matrix_view.cc162
-rw-r--r--extern/ceres/internal/ceres/partitioned_matrix_view.h9
-rw-r--r--extern/ceres/internal/ceres/partitioned_matrix_view_impl.h87
-rw-r--r--extern/ceres/internal/ceres/partitioned_matrix_view_template.py25
-rw-r--r--extern/ceres/internal/ceres/polynomial.h33
-rw-r--r--extern/ceres/internal/ceres/preconditioner.cc9
-rw-r--r--extern/ceres/internal/ceres/preconditioner.h19
-rw-r--r--extern/ceres/internal/ceres/preprocessor.cc22
-rw-r--r--extern/ceres/internal/ceres/preprocessor.h6
-rw-r--r--extern/ceres/internal/ceres/problem.cc22
-rw-r--r--extern/ceres/internal/ceres/problem_impl.cc20
-rw-r--r--extern/ceres/internal/ceres/problem_impl.h3
-rw-r--r--extern/ceres/internal/ceres/program.h4
-rw-r--r--extern/ceres/internal/ceres/program_evaluator.h2
-rw-r--r--extern/ceres/internal/ceres/random.h7
-rw-r--r--extern/ceres/internal/ceres/reorder_program.cc136
-rw-r--r--extern/ceres/internal/ceres/reorder_program.h21
-rw-r--r--extern/ceres/internal/ceres/residual_block.cc29
-rw-r--r--extern/ceres/internal/ceres/residual_block.h3
-rw-r--r--extern/ceres/internal/ceres/residual_block_utils.cc7
-rw-r--r--extern/ceres/internal/ceres/residual_block_utils.h1
-rw-r--r--extern/ceres/internal/ceres/schur_complement_solver.cc6
-rw-r--r--extern/ceres/internal/ceres/schur_complement_solver.h8
-rw-r--r--extern/ceres/internal/ceres/schur_eliminator.cc137
-rw-r--r--extern/ceres/internal/ceres/schur_eliminator.h2
-rw-r--r--extern/ceres/internal/ceres/schur_eliminator_impl.h239
-rw-r--r--extern/ceres/internal/ceres/schur_eliminator_template.py16
-rw-r--r--extern/ceres/internal/ceres/schur_templates.cc252
-rw-r--r--extern/ceres/internal/ceres/scratch_evaluate_preparer.cc8
-rw-r--r--extern/ceres/internal/ceres/scratch_evaluate_preparer.h2
-rw-r--r--extern/ceres/internal/ceres/single_linkage_clustering.cc3
-rw-r--r--extern/ceres/internal/ceres/single_linkage_clustering.h10
-rw-r--r--extern/ceres/internal/ceres/small_blas.h124
-rw-r--r--extern/ceres/internal/ceres/small_blas_generic.h114
-rw-r--r--extern/ceres/internal/ceres/solver.cc371
-rw-r--r--extern/ceres/internal/ceres/solver_utils.cc14
-rw-r--r--extern/ceres/internal/ceres/sparse_cholesky.cc4
-rw-r--r--extern/ceres/internal/ceres/sparse_cholesky.h16
-rw-r--r--extern/ceres/internal/ceres/sparse_matrix.cc3
-rw-r--r--extern/ceres/internal/ceres/sparse_matrix.h6
-rw-r--r--extern/ceres/internal/ceres/sparse_normal_cholesky_solver.h12
-rw-r--r--extern/ceres/internal/ceres/split.cc7
-rw-r--r--extern/ceres/internal/ceres/split.h4
-rw-r--r--extern/ceres/internal/ceres/stl_util.h5
-rw-r--r--extern/ceres/internal/ceres/stringprintf.cc5
-rw-r--r--extern/ceres/internal/ceres/stringprintf.h17
-rw-r--r--extern/ceres/internal/ceres/subset_preconditioner.cc4
-rw-r--r--extern/ceres/internal/ceres/subset_preconditioner.h5
-rw-r--r--extern/ceres/internal/ceres/suitesparse.cc6
-rw-r--r--extern/ceres/internal/ceres/suitesparse.h27
-rw-r--r--extern/ceres/internal/ceres/thread_pool.cc21
-rw-r--r--extern/ceres/internal/ceres/thread_pool.h3
-rw-r--r--extern/ceres/internal/ceres/thread_token_provider.cc3
-rw-r--r--extern/ceres/internal/ceres/triplet_sparse_matrix.cc26
-rw-r--r--extern/ceres/internal/ceres/triplet_sparse_matrix.h10
-rw-r--r--extern/ceres/internal/ceres/trust_region_minimizer.cc95
-rw-r--r--extern/ceres/internal/ceres/trust_region_minimizer.h5
-rw-r--r--extern/ceres/internal/ceres/trust_region_preprocessor.cc5
-rw-r--r--extern/ceres/internal/ceres/trust_region_preprocessor.h3
-rw-r--r--extern/ceres/internal/ceres/trust_region_step_evaluator.cc18
-rw-r--r--extern/ceres/internal/ceres/trust_region_strategy.cc1
-rw-r--r--extern/ceres/internal/ceres/trust_region_strategy.h3
-rw-r--r--extern/ceres/internal/ceres/types.cc50
-rw-r--r--extern/ceres/internal/ceres/visibility.cc12
-rw-r--r--extern/ceres/internal/ceres/visibility.h11
-rw-r--r--extern/ceres/internal/ceres/visibility_based_preconditioner.cc10
-rw-r--r--extern/ceres/internal/ceres/visibility_based_preconditioner.h5
-rw-r--r--extern/ceres/internal/ceres/wall_time.h3
-rw-r--r--extern/ceres/patches/series2
-rw-r--r--extern/ceres/patches/unused_parameter.patch13
-rw-r--r--extern/ceres/patches/unused_variable.patch12
273 files changed, 3282 insertions, 3053 deletions
diff --git a/extern/ceres/ChangeLog b/extern/ceres/ChangeLog
index 64c75e572f4..40fe3f16bac 100644
--- a/extern/ceres/ChangeLog
+++ b/extern/ceres/ChangeLog
@@ -1,587 +1,596 @@
-commit e39d9ed1d60dfeb58dd2a0df4622c683f87b28e3
-Author: Carl Dehlin <carl@dehlin.com>
-Date: Tue Jun 16 09:02:05 2020 +0200
+commit 399cda773035d99eaf1f4a129a666b3c4df9d1b1
+Author: Alex Stewart <alexs.mac@gmail.com>
+Date: Fri Oct 23 19:36:08 2020 +0100
- Add a missing term and remove a superfluous word
+ Update build documentation to reflect detection of Eigen via config mode
- Change-Id: I25f40f0bf241302b975e6fc14690aa863c0728b0
+ Change-Id: I18d5f0fc1eb51ea630164c911d935e9bffea35ce
-commit 27cab77b699a1a2b5354820c57a91c92eaeb21e3
-Author: Carl Dehlin <carl@dehlin.com>
-Date: Mon Jun 15 20:01:18 2020 +0200
+commit bb127272f9b57672bca48424f2d83bc430a46eb8
+Author: Sameer Agarwal <sameeragarwal@google.com>
+Date: Mon Oct 19 09:28:34 2020 -0700
- Reformulate some sentences
+ Fix typos.
- Change-Id: I4841aa8e8522008dd816261d9ad98e5fb8ad1758
+ Contributed by Ishamis@, IanBoyanZhang@, gkrobner@ & mithunjacob@.
+
+ Change-Id: Iab3c19a07a6f3db2486e3557dcb55bfe5de2aee5
-commit 8ac6655ce85a4462f2882fcb9e9118a7057ebe09
-Author: Carl Dehlin <carl@dehlin.com>
-Date: Mon Jun 15 19:10:12 2020 +0200
+commit a0ec5c32af5c5f5a52168dc2748be910dba14810
+Author: Sameer Agarwal <sameeragarwal@google.com>
+Date: Sun Oct 18 15:20:36 2020 -0700
- Fix documentation formatting issues
+ Update version history for 2.0.0RC2
- Change-Id: Iea3a6e75dc3a7376eda866ab24e535a6df84f8ea
+ Change-Id: I75b7515fbf9880bd8eaea6ecd5e72ce1ae4a3a86
-commit 7ef83e07592ead74eeacc227b642df1959d2a246
-Author: Alex Stewart <alexs.mac@gmail.com>
-Date: Sat May 30 11:30:01 2020 +0100
+commit 3f6d2736769044e7c08c873c41a184849eea73ab
+Author: Taylor Braun-Jones <taylor@braun-jones.org>
+Date: Tue Jan 28 12:09:30 2020 -0500
- Update minimum required C++ version for Ceres to C++14
+ Unify symbol visibility configuration for all compilers
- - Removes all workarounds for pre-C++14 versions
- - Removes '11' qualifier from C++ threading option and associated
- defines.
- - Fix missing inclusion of 'Multithreading' in reported Ceres components
- when C++ threading model is enabled.
- - Update Sphinx documentation to specify C++14 as minimum requirement.
+ This makes it possible to build unit tests with shared libraries on MSVC.
- Change-Id: I706c8b367b3221e3c4d1a0aaf669a8f9c911e438
+ Change-Id: I1db66a80b2c78c4f3d354e35235244d17bac9809
-commit 1d75e7568172dc5a4dc97937dcf66e0f5d28272c
+commit 29c2912ee635c77f3ddf2e382a5d6a9cf9805a3d
Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Mon May 25 18:09:50 2020 -0700
+Date: Tue Oct 13 12:07:06 2020 -0700
- Improve documentation for LocalParameterization
+ Unbreak the bazel build some more
- Change-Id: I63fa81206e67bfac56cc42bf2bb4915a3a11332b
+ Change-Id: I6bbf3df977a473b9b5e16a9e59da5f535f8cdc24
-commit 763398ca4ed56952f48c48df6a98e277e3e05381
+commit bf47e1a36829f62697b930241d0a353932f34090
Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Mon May 25 12:12:03 2020 -0700
+Date: Tue Oct 13 10:00:22 2020 -0700
- Update the section on Preconditioners
+ Fix the Bazel build.
+
+ 1. Fix the path to eigen, now that it uses gitlab instead of bitbucket.
+ 2. Remove an unrecognized compiler option.
+ 3. Remove an obsolete benchmark.
- Re-organize the section, add some more references and details for
- existing preconditioners and add documentation for the SUBSET
- precondition.
+ This CL only unbreaks the build, it is likely that it is still not
+ at par with the cmake build.
- https://github.com/ceres-solver/ceres-solver/issues/490
+ https://github.com/ceres-solver/ceres-solver/issues/628
- Change-Id: I93d0af819c160f5e4ce48b18202f629ddb92ca7b
+ Change-Id: I470209cbb48b6a4f499564a86b52436e0c8d98ef
-commit a614f788a34ea86dd9f679b779ffbf920db45aa6
-Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Fri May 22 13:52:53 2020 -0700
+commit 600e8c529ebbb4bb89d5baefa3d5ab6ad923706a
+Author: Nikolaus Demmel <nikolaus@nikolaus-demmel.de>
+Date: Mon Oct 12 23:00:39 2020 +0200
- Call EvaluationCallback before evaluating the fixed cost.
+ fix minor typos
- Fixe a subtle bug in Program::RemoveFixedBlocks, where we call
- ResidualBlock::Evaluate on residual blocks with all constant parameter
- blocks without paying attention to the presence of an
- EvaluationCallback.
+ all timing values in the summary are initialized to -1, so the one
+ +1 is likely an oversight.
- In the process also run clang-format on some of the files touched by
- this change.
+ Change-Id: Ie355f3b7da08a56d49d19ca9a5bc48fe5581dee3
+
+commit bdcdcc78af61a0cb85317ebee52dc804bf4ea975
+Author: Nikolaus Demmel <nikolaus@nikolaus-demmel.de>
+Date: Mon Sep 7 01:48:50 2020 +0200
+
+ update docs for changed cmake usage
- https://github.com/ceres-solver/ceres-solver/issues/482
+ - update links to cmake docs to version 3.5
+ - highlight difference between dependencies with and without custom
+ find modules
+ - point out removal of CERES_INCLUDE_DIRS
+ - point out that TBB might be linked if SuiteSparseQR is found
+ - added 'Migration' section
+ - fixed typos
- Change-Id: I342b66f6f975fdee2eef139a31f24d4a3e568e84
+ Change-Id: Icbcc0e723d11f12246fb3cf09b9d7c6206195a82
-commit 70308f7bb9cac560db250262079c0f8b030b9d6b
+commit 3f69e5b36a49b44344e96a26b39693a914ba80c6
Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Tue May 26 06:12:13 2020 -0700
+Date: Mon Oct 12 11:46:40 2020 -0700
- Simplify documentation generation.
-
- 1. The MathJax font configuration is moved into conf.py and removed
- from make_docs.py along with better font sizing.
- 2. Remove the bread crumb replacement as it is not working anymore.
- 3. Fix a parsing error in nnls_modeling.rst which the new version of
- sphinx barfed on.
+ Corrections from William Rucklidge
- Change-Id: Ia3c2e732323a8b5cabafe851ac5ca0f0c82da071
+ Change-Id: I0b5d4808be48f68df7829c70ec93ffa67d81315d
-commit e886d7e65368e73e9d35c2ead895d81ced677977
+commit 8bfdb02fb18551bbd5f222c5472e45eddecd42b9
Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Mon May 25 13:09:39 2020 -0700
+Date: Mon Oct 12 10:07:13 2020 -0700
- Reduce the number of minimizer iterations in evaluation_callback_test.cc
-
- This should reduce the probability of the test heuristic failing due
- to floating point issues.
+ Rewrite uses of VLOG_IF and LOG_IF.
- https://github.com/ceres-solver/ceres-solver/issues/562
- https://github.com/ceres-solver/ceres-solver/issues/392
+ VLOG_IF's evaluation order is ambiguous - does it mean
+ `if (cond) VLOG(lvl)` or `if (VLOG_IS_ON(lvl) && cond) LOG(INFO)`?
+ In particular, the way it works now is inconsistent with the way the
+ rest of the LOG macros evaluate their arguments.
+ Fixing this would be hard, and the macro's behavior would still surprise
+ some people. Replacing it with an if statement is simple, clear, and unambiguous.
- Change-Id: I8ccf4164a8d595f5930d378f464313d4a2cae419
+ Change-Id: I97a92d17a932c0a5344a1bf98d676308793ba877
-commit 9483e6f2f57bf51bad7cefd155cd5b48ca672c63
+commit d1b35ffc161fd857c7c433574ca82aa9b2db7f98
Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Tue May 12 05:16:32 2020 -0700
+Date: Mon Oct 12 10:58:05 2020 -0700
- Simplify DynamicCompressedRowJacobianWriter::Write
+ Corrections from William Rucklidge
- Change-Id: I67aa2959bd479909b5cada79359c5cfdb8a37ef7
+ Change-Id: Ifb50e87aa915d00f9861fe1a6da0acee11bc0a94
-commit 323cc55bb92a513924e566f487b54556052a716f
+commit f34e80e91f600014a3030915cf9ea28bcbc576e7
Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Mon May 25 10:38:35 2020 -0700
+Date: Thu Oct 8 12:34:53 2020 -0700
- Update the version in package.xml to 2.0.0.
+ Add dividers between licenses.
- Change-Id: Ibac053916520e8c597c875a8c7f5668bb35b6ba1
+ Change-Id: I4e4aaa15e0621c5648550cfa622fe0a79f1f4f9f
-commit 303b078b50bd3311a9c86fc256be3e9f2f334411
-Author: Bayes Nie <niebayes@gmail.com>
-Date: Sun May 24 16:08:52 2020 +0800
+commit 65c397daeca77da53d16e73720b9a17edd6757ab
+Author: Sameer Agarwal <sameeragarwal@google.com>
+Date: Wed Oct 7 14:34:52 2020 -0700
- Fix few typos and alter a NULL to nullptr.
-
- Fix typos in docs/source/features.rst and examples/helloworld.cc. Alter a NULL to nullptr in include/ceres/autodiff_cost_function.h
+ Fix formatting
- Change-Id: Ibcf00b6ef665ad6be9af14b3add2dd4f3852e7e6
+ Change-Id: Ib4ca8a097059dbb8d2f3a6a888222c0188cb126e
-commit cca93fed63dd4117f3d6dd5339131fc7674e6e0a
-Author: Alex Stewart <alexs.mac@gmail.com>
-Date: Sun May 24 18:05:05 2020 +0100
+commit f63b1fea9cfa48ae4530c327b10efa4985e69631
+Author: Sameer Agarwal <sameeragarwal@google.com>
+Date: Wed Oct 7 14:30:54 2020 -0700
- Bypass Ceres' FindGlog.cmake in CeresConfig.cmake if possible
+ Add the MIT license text corresponding to the libmv derived files.
- - If the version of glog detected and used to build Ceres was built with
- CMake (i.e. Config mode) then we now use Config mode directly in
- CeresConfig.cmake and do not install Ceres' FindGlog.cmake module.
- - This has the benefit of removing any hard-coded paths from
- CeresConfig.cmake provided that all dependencies were also built with
- CMake.
-
- Change-Id: I85af8a953fd6d300e8bc0cdeb0b3636fec182f68
+ Change-Id: Ie72fb45ae96a7892c00411eee6873db7f0e365a8
-commit 77fc1d0fc4159ebb3a0a84a16651564eb2ce3c9d
-Author: Alex Stewart <alexs.mac@gmail.com>
-Date: Sun May 24 19:07:26 2020 +0100
+commit 542613c13d8b7469822aff5eec076f2cad4507ec
+Author: Nikolaus Demmel <nikolaus@nikolaus-demmel.de>
+Date: Tue Oct 6 22:48:59 2020 +0200
- Use build_depend for private dependencies in Catkin package.xml
+ minor formatting fix for trust_region_minimizer.cc
- Change-Id: If0c0569e7ebbf37c0d8e8daaf7765e20a6282531
+ Change-Id: I18ba27825fc23dd0e9e3e15dc13fc0833db01b5b
-commit a09682f00d8e50ada3c7ed16f8c48fa71a423f60
-Author: Alex Stewart <alexs.mac@gmail.com>
-Date: Sun May 24 16:49:28 2020 +0100
+commit 6d9e9843d8c61cfb04cc55b9def9518f823a592a
+Author: Sameer Agarwal <sameeragarwal@google.com>
+Date: Mon Sep 28 11:35:37 2020 -0700
- Fix MSVC version check to support use of clang-cl front-end
+ Remove inclusion of ceres/eigen.h
- - Raised as issue: #521
+ The initial reason for this is because of a previous reformatting CL
+ triggered a macro redefinition warning in the schur eliminator. But
+ actually it was worse because the reordering had caused the macro
+ definition to be ignored and caused a performance regression.
- Change-Id: Iaea6b43484b90ec8789bda0447c8a90759974ec1
+ This simplifies the generated files, fixes some formatting errors
+ and recovers the performance.
+
+ Change-Id: I9dbeffc38743b3f24b25843feec2e26a73188413
-commit b70687fcc86624c7d5520d25734938fa95d2af73
+commit eafeca5dcb7af8688d40a9c14b0d2fcb856c96fc
+Author: Sameer Agarwal <sameeragarwal@google.com>
+Date: Mon Sep 28 11:12:59 2020 -0700
+
+ Fix a logging bug in TrustRegionMinimizer.
+
+ Upon encountering an unsuccessful step (one where the cost goes up)
+ the the trust region minimizer failed to populate the gradient norm
+ in the IterationSummary. This would cause the gradient norm to be
+ logged as zero which is incorrect. Instead it should be the gradient
+ norm at the current point.
+
+ This CL fixes this issue.
+
+ Before:
+ iter cost cost_change |gradient| |step| tr_ratio tr_radius ls_iter iter_time total_time
+ 0 1.115206e+07 0.00e+00 1.90e+07 0.00e+00 0.00e+00 1.00e+04 0 2.72e-01 1.33e+00
+ 1 3.687552e+06 7.46e+06 1.84e+08 2.86e+03 6.91e-01 1.06e+04 1 1.32e+00 2.65e+00
+ 2 3.670266e+10 -3.67e+10 0.00e+00 3.27e+03 -1.07e+04 5.30e+03 1 7.52e-01 3.40e+00
+ 3 4.335397e+07 -3.97e+07 0.00e+00 2.74e+03 -1.16e+01 1.32e+03 1 7.28e-01 4.13e+00
+ 4 1.345488e+06 2.34e+06 4.12e+07 1.55e+03 6.87e-01 1.40e+03 1 9.31e-01 5.06e+00
+ 5 5.376653e+05 8.08e+05 9.99e+06 6.64e+02 7.46e-01 1.59e+03 1 9.64e-01 6.03e+00
+
+ After:
+ iter cost cost_change |gradient| |step| tr_ratio tr_radius ls_iter iter_time total_time
+ 0 1.115206e+07 0.00e+00 1.90e+07 0.00e+00 0.00e+00 1.00e+04 0 2.37e-01 1.13e+00
+ 1 3.687552e+06 7.46e+06 1.84e+08 2.86e+03 6.91e-01 1.06e+04 1 1.08e+00 2.21e+00
+ 2 3.670266e+10 -3.67e+10 1.84e+08 3.27e+03 -1.07e+04 5.30e+03 1 7.50e-01 2.96e+00
+ 3 4.335397e+07 -3.97e+07 1.84e+08 2.74e+03 -1.16e+01 1.32e+03 1 7.13e-01 3.67e+00
+ 4 1.345488e+06 2.34e+06 4.12e+07 1.55e+03 6.87e-01 1.40e+03 1 9.01e-01 4.57e+00
+ 5 5.376653e+05 8.08e+05 9.99e+06 6.64e+02 7.46e-01 1.59e+03 1 9.36e-01 5.51e+00
+
+ Change-Id: Iae538fe089be07c7bb219337a6f1392f7213acfe
+
+commit 1fd0be916dd4ff4241bd52264b9e9170bc7e4339
Author: Alex Stewart <alexs.mac@gmail.com>
-Date: Sun May 24 20:28:12 2020 +0100
+Date: Mon Sep 28 18:54:33 2020 +0100
- Add namespace qualified Ceres::ceres CMake target
+ Fix default initialisation of IterationCallback::cost
- - This reflects modern CMake style, and also provides a measure of
- protection against missing find_package() imports in downstream
- clients resulting in linker errors when 'ceres' matches the compiled
- library and not the imported target.
- - The original 'ceres' target remains, as a local imported interface
- target created by CeresConfig for backwards compatibility.
+ Change-Id: I9f529093fc09424c90dbff8e9648b90b16990623
+
+commit 137bbe845577929a87f8eef979196df6a8b30ee4
+Author: Nikolaus Demmel <nikolaus@nikolaus-demmel.de>
+Date: Mon Sep 28 02:17:32 2020 +0200
+
+ add info about clang-format to contributing docs
- Change-Id: Ie9ed8de9b7059bc0cae1ae5002bb94d8fe617188
+ Change-Id: I2f4dcbda2e4f36096df217d76de370103ffaa43e
-commit 99efa54bdb4e14c3f4382a166baf6772113f74a8
-Author: Alex Stewart <alexs.mac@gmail.com>
-Date: Sun May 24 19:18:38 2020 +0100
+commit d3f66d77f45482b90d01af47938289c32dd2cc08
+Author: Nikolaus Demmel <nikolaus@nikolaus-demmel.de>
+Date: Mon Sep 28 02:01:43 2020 +0200
- Replace type aliases deprecated/removed in C++17/C++20 from FixedArray
+ fix formatting generated files (best effort)
- - Raised as issue #551
- - Also adds C++20 to the set of ALLOWED_CXX_STANDARDS, although this
- will require a version of CMake >= 3.12.
+ - update file generator scripts / templates so generated files adhere
+ to clang-format
+ - A few exceptions are not fixed, where the file generation results in
+ lines of different width. To properly fix this would make the code
+ more complicated and it's not that important for generated files
+ anyway.
+ - note that generated files are excluded in ./scripts/format_all.sh
- Change-Id: I0f13c72e93a35391fd2d18590b4243a329a2322c
+ Change-Id: I4f42c83d1fec01242eada5e7ce6c1a5192234d37
-commit adb973e4a337c372aa81ca1a4f3bb704068c08b7
-Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Thu May 21 14:45:28 2020 -0700
+commit a9c7361c8dc1d37e78d216754a4c03e8a8f1e74f
+Author: Nikolaus Demmel <nikolaus@nikolaus-demmel.de>
+Date: Mon Sep 28 02:14:29 2020 +0200
- NULL -> nullptr
+ minor formatting fix (wrongly updated in earlier commit)
- Change-Id: Iaeea2ef7912d328653a76b65976adc8025a5be35
+ Change-Id: I544635fd936cb5b7f7bd9255876641cd5a9590c6
-commit 27b717951b58c134b3a5a9f664a66c7480364d6c
-Author: Alastair Harrison <aharrison24@gmail.com>
-Date: Fri May 15 10:10:12 2020 +0100
+commit 7b8f675bfdb1d924af6a2dcc1f79bda5ace7e886
+Author: Nikolaus Demmel <nikolaus@nikolaus-demmel.de>
+Date: Sun Sep 20 21:45:24 2020 +0200
- Respect FIND_QUIETLY flag in cmake config file
+ fix formatting for (non-generated) internal source files
- Ensure that Ceres does not print any log messages when somebody has
- used 'find_package(Ceres QUIET)' in their CMake project.
+ - Change formatting standard to Cpp11. Main difference is not having
+ the space between two closing >> for nested templates. We don't
+ choose c++14, because older versions of clang-format (version 9
+ and earlier) don't know this value yet, and it doesn't make a
+ difference in the formatting.
+ - Apply clang-format to all (non generated) internal source files.
+ - Manually fix some code sections (clang-format on/off) and c-strings
+ - Exclude some embedded external files with very different formatting
+ (gtest/gmock)
+ - Add script to format all source files
- Change-Id: Id6b68859cc8a5857f3fa78f29736cb82fd5a0943
+ Change-Id: Ic6cea41575ad6e37c9e136dbce176b0d505dc44d
-commit 646959ef118a1f10bf93741d97cf64265d42f8c6
-Author: huangqinjin <huangqinjin@gmail.com>
-Date: Sat Apr 25 02:03:11 2020 +0800
+commit 921368ce31c42ee793cf131860abba291a7e39ad
+Author: Sameer Agarwal <sameeragarwal@google.com>
+Date: Wed Sep 9 09:15:37 2020 -0700
- Do not export class template LineParameterization
+ Fix a number of typos in covariance.h
- For MSVC, instantiating a dllimport class template will cause error C2491:
- definition of dllimport function not allowed.
+ Also some minor cleanups in covariance_impl.h
- Change-Id: Icc7f7ea84598df0a5436f48ffc2bab5cfab93921
-
-commit 1f128d070a24224d12eb901bc74ba393ccdbd0c3
-Author: huangqinjin <huangqinjin@gmail.com>
-Date: Mon Mar 4 13:14:43 2019 +0800
-
- Change the type of parameter index/offset to match their getter/setter
+ Thanks to Lorenzo Lamia for pointing these out.
- Change-Id: If28b795e792f39db9775ada105e9038570195329
+ Change-Id: Icb4012a367fdd1f249bc1e7019e0114c868e45b6
-commit 072c8f070e16cb32f211473c40196c6b5618d5a9
-Author: huangqinjin <huangqinjin@gmail.com>
-Date: Sat Apr 25 00:04:58 2020 +0800
+commit 7b6b2491cc1be0b3abb67338366d8d69bef3a402
+Author: Nikolaus Demmel <nikolaus@nikolaus-demmel.de>
+Date: Tue Sep 8 17:51:32 2020 +0200
- Initialize integer variables with integer instead of double
+ fix formatting for examples
+
+ This is mostly just applying the existing clang format config, except:
+ - Use NOLINT on overlong comment lines.
+ - Wrap some sections in 'clang-format off' / 'clang format on'.
+ - Manually split or join some multi-line strings.
- Change-Id: I652aca4ceb3129706a5d5e38afe9f16b61200a5b
+ Change-Id: Ia1a40eeb92112e12c3a169309afe087af55b2f4f
-commit 8c36bcc81fbd4f78a2faa2c914ef40af264f4c31
-Author: Alex Stewart <alexs.mac@gmail.com>
-Date: Mon Apr 27 18:33:25 2020 +0100
+commit 82275d8a4eac4fc0bd07e17c3a41a6e429e72bfb
+Author: Nikolaus Demmel <nikolaus@nikolaus-demmel.de>
+Date: Tue Sep 8 02:00:21 2020 +0200
- Use inline & -inlinehint-threshold in auto-diff benchmarks
+ some fixes for Linux and macOS install docs
- - This results in the same performance as the original solution of
- increasing -inline-threshold, but this approach is more viable to
- incorporate in a large code base as its effects are more targeted.
+ Linux:
+ - Remove workaround for Ubuntu 14.04, which is EOL. libsuitesparse-dev
+ seems to come with a shared library on 16.04 and later, so linking
+ to a shared build of ceres doesn't seem to be an issue any more.
+ - Add missing libgflags-dev.
- Change-Id: Id798dbca7d3050de0ea847a5ecc69484ac78a2cf
-
-commit 57cf20aa5d3c1b2f25d255814f4fff5260db81c6
-Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Tue Apr 21 10:10:01 2020 -0700
-
- static const -> static constexpr where we can.
+ macOS:
+ - OS X is now called macOS.
+ - Update homebrew link.
+ - Mac homebrew the preferred method of installation.
+ - Fix OpenMP instructions.
+ - Remove reference to homebrew/science. Everything is in core.
+ - Add missing gflags.
- Change-Id: I8a6d26a89c4377dd440fa6dcf23513b7556533fc
+ Change-Id: I633b3c7ea84a87886bfd823f8187fdd0a84737c9
-commit 40b27482a202c8b0a5f9e8f2b4be0192d34195f5
-Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Tue Apr 21 09:35:30 2020 -0700
+commit 9d762d74f06b946bbd2f098de7216032d0e7b51d
+Author: Nikolaus Demmel <nikolaus@nikolaus-demmel.de>
+Date: Sun Sep 6 21:04:24 2020 +0200
- Add std::numeric_limit specialization for Jets
+ fix formatting for public header files
- This allows users to use std::numeric_limits on templated functors.
+ - ensure all public headers files adhere to clang-format
+ - preserve one-per-line for enums by adding trailing comma
+ - preserve include order for en/disable_warning.h
- Change-Id: I403cec5c9826033ce7dfd6047deb64f66c35f806
+ Change-Id: I78dbd0527a294ab2ec5f074fb426e48b20c393e6
-commit e751d6e4f0daa9f691c5ed25ca8dc564875d8bef
-Author: Darius Rueckert <darius.rueckert@fau.de>
-Date: Wed Apr 8 10:43:53 2020 +0200
+commit c76478c4898f3af11a6a826ac89c261205f4dd96
+Author: Nikolaus Demmel <nikolaus@nikolaus-demmel.de>
+Date: Sun Sep 6 23:29:56 2020 +0200
- Remove AutodiffCodegen
-
- - Remove Codegen files
- - Revert Jet and Rotation
+ gitignore *.pyc
- Change-Id: I005c5f98f2b6dfa5c7fd88d998b6aa83e47dab60
+ Change-Id: Ic6238a617a3c7ce92df7dcefcc44bae20c32b30b
-commit e9eb76f8ef9835940659cfb3a312ed6822c48152
-Author: Darius Rueckert <darius.rueckert@fau.de>
-Date: Mon Apr 6 11:11:43 2020 +0200
+commit 4e69a475cd7d7cbed983f5aebf79ae13a46e5415
+Author: Alex Stewart <alexs.mac@gmail.com>
+Date: Tue Sep 1 10:15:23 2020 +0100
- Remove AutodiffCodegen CMake integration
+ Fix potential for mismatched release/debug TBB libraries
- Change-Id: I403597540df8429378336626b8f748b7821fe6f5
+ - Protect against the case when the user has multiple installs of TBB
+ in their search paths and the first install does not contain debug
+ libraries. In this case it is possible to get mismatched versions
+ of TBB inserted into TBB_LIBRARIES.
+ - Also suppresses warning about use of TBB_ROOT on modern versions of
+ CMake due to CMP0074.
+
+ Change-Id: I2eaafdde4a028cbf6c500c63771973d85bc4723d
-commit 9435e08a7a7c903897e18e1dc24d801caf4f62a4
+commit 8e1d8e32ad0d28c0d4d1d7b2b1ce7fc01d90b7b0
Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Fri Apr 3 11:50:09 2020 -0700
+Date: Thu Sep 3 10:49:20 2020 -0700
- More clang-tidy and wjr@ comment fixes
+ A number of small changes.
+
+ 1. Add a move constructor to NumericDiffCostFunction, DynamicAutoDiffCostfunction
+ and DynamicNumericDiffCostFunction.
+ 2. Add optional ownership of the underlying functor.
+ 3. Update docs to reflect this as well as the variadic templates that allow an
+ arbitrary number of parameter blocks.
- Change-Id: I5736ae482f736fc56c00d21c659b1f8d41da68e9
+ Change-Id: I57bbb51fb9e75f36ec2a661b603beda270f30a19
-commit d93fac4b7ab670a936ce821284a0b9d099b4688c
-Author: Darius Rueckert <darius.rueckert@fau.de>
-Date: Fri Apr 3 09:33:17 2020 +0200
+commit 368a738e5281039f19587545806b7bc6f35e78f9
+Author: Julian Kent <jkflying@gmail.com>
+Date: Thu May 7 12:54:35 2020 +0200
- Remove AutodiffCodegen Tests
+ AutoDiffCostFunction: optional ownership
+
+ Add Ownership semantics to the AutoDiffCostFunction
+
+ This allows several benefits, such as pointer ordering always being the
+ same for numerical repeatability (due to blocks being ordered by
+ pointer address), memory adjacency for better cache performance, and
+ reduced allocator pressure / overhead.
- Change-Id: Icd194db7b22add518844f1b507d0fdd3e0fe17fe
+ This is then made use of in libmv by preallocating the errors and
+ cost functions into vectors
+
+ Change-Id: Ia5b97e7249b55a463264b6e26f7a02291927c9f2
-commit 2281c6ed24d2c12f133fa6039f224b3da18cebe3
-Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Thu Apr 2 16:43:42 2020 -0700
+commit 8cbd721c199c69f127af6ef7c187ddf7e8f116f9
+Author: Morten Hannemose <morten@hannemose.dk>
+Date: Thu Sep 3 17:54:20 2020 +0200
- Fixes for comments from William Rucklidge
+ Add erf and erfc to jet.h, including tests in jet_test.cc
+
+ erf is necessary for evaluating Gaussian functions.
+ erfc was added because it is so similar to erf.
- Change-Id: I64fcc25532cc66dc4cb7e2ea7ccfb220b0cb7e1f
+ Change-Id: I5e470dbe013cc938fabb87cde3b0ebf26a90fff4
-commit d797a87a4091af6ae0063e3c8291429c15318bdc
+commit 31366cff299cf2a8d97b43a7533d953ff28fdc29
Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Thu Apr 2 13:57:56 2020 -0700
+Date: Tue Sep 1 09:23:34 2020 -0700
- Use Ridders' method in GradientChecker.
+ Benchmarks for dynamic autodiff.
- Using Ridders' method gives orders of magnitude more accuracy compared
- to central differences. This will make things slower, but this is
- primarily a testing/debugging feature and the speed hit is not a
- concern. This should also reduce the false positive rates when users
- enable check_gradients. This is reflected the increased sensitivity of
- the tests for GradientChecker.
+ This patch is from Clement Courbet. courbet@google.com
- https://github.com/ceres-solver/ceres-solver/issues/554
-
- Change-Id: I6b871c72df55be1c31175ba062cf3c1e94e4b662
+ Change-Id: I886390663644733bfa5b7b52b0c883079e793726
-commit 41675682dc9df836bf15845064cfe1087619c79d
-Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Thu Apr 2 07:28:38 2020 -0700
+commit 29fb08aeae1ce691851724af7209fea6127523a9
+Author: Alex Stewart <alexs.mac@gmail.com>
+Date: Tue Sep 1 10:23:31 2020 +0100
- Fix a MSVC type deduction bug in ComputeHouseholderVector
+ Use CMAKE_PREFIX_PATH to pass Homebrew install location
- A recent change made this function templated and MSVC 16 has trouble
- doing automatic argument deduction, so the type of the template is
- simplified and all callsites are explicitly annotated with the type
- of the arguments.
+ - Passing HINTS disables the MODULE mode of find_package() which
+ precludes users from creating their own find modules to provide
+ Ceres' dependencies.
- Change-Id: I83cd0269e6e82c4a8f4e391f5fc03b92c942f74d
+ Change-Id: I6f2edf429331d13fe67bf61ac4b79d17579d9a57
-commit 947ec0c1fa0f67c89e21daaf8d1648822ae5293a
-Author: Darius Rueckert <darius.rueckert@fau.de>
-Date: Thu Apr 2 09:52:53 2020 +0200
+commit 242c703b501ffd64d645f4016d63c8b41c381038
+Author: Sameer Agarwal <sameeragarwal@google.com>
+Date: Tue Aug 4 21:02:11 2020 -0700
- Remove AutodiffCodegen autodiff benchmarks
+ Minor fixes to the documentation
- Change-Id: If1eaad31710cc91d40323ea6cae7cabe6fa64b1f
+ Change-Id: I65e6f648d963b8aa640078684ce02dcde6acb87d
-commit 27183d661ecae246dbce6d03cacf84f39fba1f1e
+commit 79bbf95103672fa4b5485e055ff7692ee4a1f9da
Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Thu Jul 11 16:30:59 2019 +0200
+Date: Tue Aug 4 18:26:02 2020 -0700
- Allow LocalParameterizations to have zero local size.
+ Add changelog for 2.0.0
- Local parameterizations with zero tangent/local size will cause the
- corresponding parameter block to be treated as constant.
-
- https://github.com/ceres-solver/ceres-solver/issues/347
-
- Change-Id: I554a2acc420f5dd9d0cc7f97b691877eb057b2c0
+ Change-Id: I8acad62bfe629454ae5032732693e43fe37b97ff
-commit 7ac7d79dca2ac6b482da50fd9ad0227ba8d6c632
-Author: Darius Rueckert <darius.rueckert@fau.de>
-Date: Wed Apr 1 14:51:12 2020 +0200
+commit 41d05f13d0ffb230d7a5a9d67ed31b0cfb35d669
+Author: Sameer Agarwal <sameeragarwal@google.com>
+Date: Tue Aug 4 14:56:33 2020 -0700
- Remove HelloWorldCodegen example
+ Fix lint errors in evaluation_callback_test.cc
- Change-Id: I2584f41d591a5d648b4832385c2a779bb25fc04d
+ Change-Id: I63eb069544ad0d8f495490fe4caa07b9f04f7ec2
-commit 8c8738bf832f0fc27f0d4a9585fc59b2eaa6a828
-Author: Nikolaus Demmel <nikolaus@nikolaus-demmel.de>
-Date: Sun Mar 29 13:29:02 2020 +0200
+commit 4b67903c1f96037048c83a723028c5d0991c09cf
+Author: Sameer Agarwal <sameeragarwal@google.com>
+Date: Tue Aug 4 14:40:50 2020 -0700
- Add photometric and relative-pose residuals to autodiff benchmarks
+ Remove unused variables from problem_test.cc
- Change-Id: Id100ff2656ab63bb4fd19a51b95e78281cfd8b4a
+ Change-Id: Ia1a13cfc6e462f6d249dcbf169ad34831dd93ec2
-commit 9f7fb66d62014ed62ba6aa617364e8591211c797
-Author: Darius Rueckert <darius.rueckert@fau.de>
-Date: Wed Mar 25 11:41:39 2020 +0100
+commit 10449fc3664c96d4b5454c092195432df79412f8
+Author: Sameer Agarwal <sameeragarwal@google.com>
+Date: Tue Aug 4 14:30:25 2020 -0700
- Add a constant cost function to the autodiff benchmarks
-
- The constant cost function is run with a variable number of
- parameters to test at which point, different compilers fail
- to optimize the autodiff code.
+ Add Apache license to the LICENSE file for FixedArray
- Clang achieves expected performance which fails at >50 parameters.
- G++ fails already at 20 parameters
+ FixedArray implementation comes from ABSL which is Apache
+ licensed.
- Change-Id: I75d8c683ef0011d813ec6d966d7ad58f86530f44
+ Change-Id: I566dbe9d236814c95945732c6347d3bf7b508283
-commit ab0d373e465f46ce483db640d0fb2f244f48702d
+commit 8c3ecec6db26d7a66f5de8dc654475ec7aa0df14
Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Tue Mar 24 12:30:46 2020 -0700
+Date: Tue May 26 04:44:11 2020 -0700
- Fix a comment in autodiff.h
+ Fix some minor errors in IterationCallback docs
- Change-Id: I613e537c834e3f29cd92808c65ddb74f112974cc
+ Change-Id: Id3d7f21a523ff8466868cdec542921c566bbbfa9
-commit 27bb997144d00dd4494d440627f1e782bf4adf43
-Author: Johannes Beck <Jodebo_Beck@gmx.de>
-Date: Tue Mar 24 08:05:43 2020 +0100
+commit 7d3ffcb4234632dc51ee84c8a509d9428263070b
+Author: Alex Stewart <alexs.mac@gmail.com>
+Date: Sun Jul 26 19:42:16 2020 +0100
- Change SVD algorithm in covariance computation.
+ Remove forced CONFIG from find_package(Eigen3)
- Switch from JacobiSVD to BDCSVD in
- ComputeCovarianceValuesUsingDenseSVD. This should increase
- the performance for larger covariance matrices. See
- https://eigen.tuxfamily.org/dox/classEigen_1_1BDCSVD.html
+ - Ceres will fail to configure if Eigen3::Eigen target is not found, and
+ the minimum required Eigen version specified (3.3) exports Eigen as
+ a CMake package and this is reflected in the default Ubuntu 18.04
+ packages.
+ - This permits users to specify their own Eigen3 detection should they
+ choose to do so, but they must do so via an imported target.
- Change-Id: Icde4dec89f506b638b0f9f1aee3b7cfc9e4d72fc
+ Change-Id: I5edff117c8001770004f49012ac1ae63b66ec9c1
-commit 84fdac38e033c8f9a63c6e6fca7b44219110f7df
-Author: Johannes Beck <Jodebo_Beck@gmx.de>
-Date: Tue Mar 24 08:02:21 2020 +0100
+commit a029fc0f93817f20b387b707bc578dc1f1a269ae
+Author: Alex Stewart <alexs.mac@gmail.com>
+Date: Sun Jul 26 18:44:59 2020 +0100
- Add const to GetCovarianceMatrix*
+ Use latest FindTBB.cmake from VTK project
- This CL adds const to the functions GetCovarianceMatrix and
- GetCovarianceMatrixInTangentSpace.
+ - Retrieved from [1], SHA: 0d9bbf9beb97f8f696c43a9edf1e52c082b3639b on
+ 2020-07-26
+ - [1]: https://gitlab.kitware.com/vtk/vtk/blob/master/CMake/FindTBB.cmake
- Change-Id: Ibe2cafebede47977a9aabcac8d245f30af184fd1
+ Change-Id: I953a8c87802a974d30ccc7c80f5229683826efbd
-commit 6bde61d6be9d81a2cd759a6bbb4a8cd3c24a529c
-Author: Johannes Beck <Jodebo_Beck@gmx.de>
-Date: Sat Dec 28 13:29:19 2019 +0100
+commit aa1abbc578797c6b17ee7221db31535dc249ae66
+Author: Alex Stewart <alexs.mac@gmail.com>
+Date: Sun Jul 26 19:57:31 2020 +0100
- Add line local parameterization.
+ Replace use of GFLAGS_LIBRARIES with export gflags target
- This CL adds a local parameterization for a n-dimensional
- line, which is represented as an origin point and a direction.
- The line direction is updated in the same way as a
- homogeneous vector and the origin point is updated
- perpendicular to the line direction.
+ - As our minimum required version of gflags (2.2) exports itself as
+ a CMake package and this is the case for the default 18.04 package
+ we can use the gflags target directly.
+ - Replaces forced use of CONFIG in find_package(gflags) with a check
+ that the gflags imported target exists to avoid ambiguity with
+ libgflags if installed in a default location. This permits users to
+ override the gflags detection should they so choose, provided that
+ they do so via an imported target.
+ - Also removes some previously removed legacy GLAGS_ vars from the
+ installation docs.
- Change-Id: I733f395e5cc4250abf9778c26fe0a5ae1de6b624
+ Change-Id: I015f5a751e5b22f956bbf9df692e63a6825c9f0d
-commit 2c1c0932e9d3f91691e5c5fce46b4440e181a8bc
+commit db2af1be8780bbe88944775400baa2dbd3592b7d
Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Mon Mar 23 11:15:32 2020 -0700
+Date: Mon Aug 3 04:57:08 2020 -0700
- Update documentation in autodiff.h
+ Add Problem::EvaluateResidualBlockAssumingParametersUnchanged
- Change-Id: Icc2753b4f5be95022ffd92e479cdd3d9d7959d4c
-
-commit 8904fa4887ed7b3e6d110ad5a98efbc2df48595e
-Author: Darius Rueckert <darius.rueckert@fau.de>
-Date: Mon Mar 23 14:59:26 2020 +0100
-
- Inline Jet initialization in Autodiff
+ Simplify the semantics for Problem::EvaluateResidualBlock to
+ not ignore the presence of EvaluationCallback and add another method
+ EvaluateResidualBlockAssumingParametersUnchanged to handle the case
+ where the user has an EvaluationCallback but knows that the parameter
+ blocks do not change between calls.
- Inlining the Jet initialzation is mandatory for good performance
- in autodiff, because all the constants in the dual part can be
- propagated into the cost functor.
+ Updated the documentation for the methods and EvaluationCallback to
+ reflect these semantics.
- This patch unrolls the initialization loop with templates and adds
- EIGEN_ALWAYS_INLINE to the constructors.
+ Also added tests for Evaluation related methods calling i
+ EvaluationCallback when its present.
- Change-Id: Ic89d645984f3e1df6c63948236da823ba60d9620
+ https://github.com/ceres-solver/ceres-solver/issues/483
+
+ Change-Id: If0a0c95c2f1f92e9183a90df240104a69a71c46d
-commit 18a464d4e566e17930005876af19e32cc8796fa3
+commit ab4ed32cda004befd29a0b4b02f1d907e0c4dab7
Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Mon Mar 23 07:12:12 2020 -0700
+Date: Mon Aug 3 04:17:33 2020 -0700
- Remove an errant CR from local_parameterization.cc
+ Replace NULL with nullptr in the documentation.
- Change-Id: Iff98a96f06de5755062a1c79523604dca78b298e
+ Change-Id: I995f68770e2a4b6027c0a1d3edf5eb5132b081d7
-commit 5c85f21799804d39cbfd20ec451aa219511e4212
-Author: Darius Rueckert <darius.rueckert@fau.de>
-Date: Mon Mar 23 10:12:00 2020 +0100
+commit ee280e27a6140295ef6258d24c92305628f3d508
+Author: Sameer Agarwal <sameeragarwal@google.com>
+Date: Fri Jul 31 16:48:06 2020 -0700
- Use ArraySelector in Autodiff
+ Allow SubsetParameterization to accept an empty vector of constant parameters.
- The class ArraySelector is now used in autodiff to store the
- parameters and residuals. This reduces overhead of FixedArray
- for fixed-sized residuals and allows more optimizations due
- to inlining and unrolling.
+ Thanks to Frédéric Devernay for reporting this and providing an initial fix.
- Change-Id: Ibadc5644e64d672f7a555e250fb1f8da262f9d4f
+ Change-Id: Id86a2051ab7841ecafdcfb00f4634b353a7ef3b4
-commit 80477ff073ab7af03cfb248cab4ef41a87f913d0
-Author: Darius Rueckert <darius.rueckert@fau.de>
-Date: Fri Mar 20 16:26:55 2020 +0100
+commit 4b8c731d8a4f3fda53c642ff14a25fab6c233918
+Author: Sameer Agarwal <sameeragarwal@google.com>
+Date: Fri Jul 31 10:05:52 2020 -0700
- Add class ArraySelector
+ Fix a bug in DynamicAutoDiffCostFunction
+
+ DynamicAutoDiffCostFunction::Evaluate when provided with a jacobians
+ array that was non-empty but all its entries are nullptr, would
+ compute num_active_parameters = 0, and then skip over all the loops
+ that evaluated the CostFunctor.
+
+ The fix is to check if num_active_parameters == 0, and then treat
+ it as the case where jacobians array is null.
- The ArraySelector selects the best array implementation based on
- template arguments.
+ Thanks to Ky Waegel for reporting and providing a reproduction for this.
- Change-Id: I93c6db1a638e924b85292e63bca9525610ec2e2f
+ Change-Id: Ib86930c2c3f722724d249f662bf88238679bbf98
-commit e7a30359ee754057f9bd7b349c98c291138d91f4
-Author: Darius Rueckert <darius.rueckert@fau.de>
-Date: Fri Mar 20 15:50:37 2020 +0100
+commit 5cb5b35a930c1702278083c75769dbb4e5801045
+Author: Alex Stewart <alexs.mac@gmail.com>
+Date: Sun Jul 26 20:42:12 2020 +0100
- Pass kNumResiduals to Autodiff
+ Fixed incorrect argument name in RotationMatrixToQuaternion()
- The compile-time constant kNumResiduals is now passed to the
- autodiff functions as a template parameter. This will be used
- by future patches to optimize autodiff performance.
+ - Raised as: https://github.com/ceres-solver/ceres-solver/pull/607 by
+ Frank Dellaert
- Change-Id: Ia2b2cc99b88752e8f12f4ce2542b1963bda552f5
+ Change-Id: Id3e9f190e814cf18206e2f8c3b1b67b995c21dd5
-commit f339d71dd64e4d871cc883f278a153f212f0d1f0
-Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Thu Mar 19 12:08:28 2020 -0700
-
- Refactor the automatic differentiation benchmarks.
-
- 1. Merge them into a single binary.
- 2. All benchmarks now do the Residual and the Residual + Jacobian
- evaluation.
- 3. Re-organize and simplify the CMake file in this directory.
- 4. Fix a bug in the file where the Clang compiler was not being matched.
-
- autodiff_benchmarks
- ---------------------------------------------------------------------------
- Benchmark Time CPU Iterations
- ---------------------------------------------------------------------------
- BM_Linear1CodeGen/0 3.02 ns 3.01 ns 233870456
- BM_Linear1CodeGen/1 3.02 ns 3.01 ns 233059100
- BM_Linear1AutoDiff/0 3.78 ns 3.77 ns 185791712
- BM_Linear1AutoDiff/1 14.0 ns 13.8 ns 53927875
- BM_Linear10CodeGen/0 5.10 ns 5.10 ns 126745007
- BM_Linear10CodeGen/1 29.1 ns 29.1 ns 23949310
- BM_Linear10AutoDiff/0 6.50 ns 6.49 ns 107516972
- BM_Linear10AutoDiff/1 169 ns 169 ns 4153218
- BM_Rat43AutoDiff/0 52.7 ns 51.2 ns 16444586
- BM_Rat43AutoDiff/1 91.8 ns 91.5 ns 7302316
- BM_SnavelyReprojectionCodeGen/0 38.0 ns 36.2 ns 21131501
- BM_SnavelyReprojectionCodeGen/1 113 ns 112 ns 5627779
- BM_SnavelyReprojectionAutoDiff/0 34.4 ns 34.3 ns 20476937
- BM_SnavelyReprojectionAutoDiff/1 242 ns 240 ns 2930611
- BM_BrdfCodeGen/0 53.9 ns 53.7 ns 11950083
- BM_BrdfCodeGen/1 507 ns 505 ns 1396732
- BM_BrdfAutoDiff/0 58.3 ns 57.8 ns 12220670
- BM_BrdfAutoDiff/1 2034 ns 1999 ns 257003
-
- autodiff_benchmarks_fast_math
- ---------------------------------------------------------------------------
- Benchmark Time CPU Iterations
- ---------------------------------------------------------------------------
- BM_Linear1CodeGen/0 3.19 ns 3.16 ns 215313065
- BM_Linear1CodeGen/1 2.78 ns 2.76 ns 201497994
- BM_Linear1AutoDiff/0 3.27 ns 3.26 ns 206154598
- BM_Linear1AutoDiff/1 13.2 ns 13.1 ns 57257840
- BM_Linear10CodeGen/0 5.70 ns 5.51 ns 121849325
- BM_Linear10CodeGen/1 33.9 ns 33.3 ns 21829295
- BM_Linear10AutoDiff/0 6.85 ns 6.78 ns 106813153
- BM_Linear10AutoDiff/1 173 ns 171 ns 3849877
- BM_Rat43AutoDiff/0 44.8 ns 44.2 ns 15577017
- BM_Rat43AutoDiff/1 96.2 ns 94.6 ns 7374864
- BM_SnavelyReprojectionCodeGen/0 33.9 ns 33.5 ns 20508373
- BM_SnavelyReprojectionCodeGen/1 89.7 ns 88.4 ns 7620624
- BM_SnavelyReprojectionAutoDiff/0 36.5 ns 35.8 ns 20546176
- BM_SnavelyReprojectionAutoDiff/1 257 ns 252 ns 3044325
- BM_BrdfCodeGen/0 61.1 ns 58.5 ns 11334013
- BM_BrdfCodeGen/1 265 ns 265 ns 2625459
- BM_BrdfAutoDiff/0 52.5 ns 52.5 ns 12938763
- BM_BrdfAutoDiff/1 1560 ns 1560 ns 440909
-
- Change-Id: I2d1a4293d3245a50f73af6cf5e5138084321ae6f
-
-commit d37b4cb150c4af65268f9ce5739d1c67e73cb358
-Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Thu Mar 19 07:36:58 2020 -0700
+commit e39d9ed1d60dfeb58dd2a0df4622c683f87b28e3
+Author: Carl Dehlin <carl@dehlin.com>
+Date: Tue Jun 16 09:02:05 2020 +0200
- Fix some include headers in codegen/test_utils.cc/h
+ Add a missing term and remove a superfluous word
- Change-Id: I769029ce2797eba0de6c7baeb76dc3f2782b6305
+ Change-Id: I25f40f0bf241302b975e6fc14690aa863c0728b0
-commit 550766e6da49dca895a6e2056b0872c557157c5b
-Author: Darius Rueckert <darius.rueckert@fau.de>
-Date: Wed Mar 18 20:09:20 2020 +0100
+commit 27cab77b699a1a2b5354820c57a91c92eaeb21e3
+Author: Carl Dehlin <carl@dehlin.com>
+Date: Mon Jun 15 20:01:18 2020 +0200
- Add Autodiff Brdf Benchmark
+ Reformulate some sentences
- The disney brdf is a good benchmark cost functor, because it has
- - 28 parameters in 7 blocks
- - 3 residuals
- - Lots of low-level arithmetic
+ Change-Id: I4841aa8e8522008dd816261d9ad98e5fb8ad1758
+
+commit 8ac6655ce85a4462f2882fcb9e9118a7057ebe09
+Author: Carl Dehlin <carl@dehlin.com>
+Date: Mon Jun 15 19:10:12 2020 +0200
+
+ Fix documentation formatting issues
- Change-Id: I62c8a717d0aecb64639158f971bdccf6afdfae36
+ Change-Id: Iea3a6e75dc3a7376eda866ab24e535a6df84f8ea
diff --git a/extern/ceres/README.blender b/extern/ceres/README.blender
index 44c389a42d9..0b6063c9703 100644
--- a/extern/ceres/README.blender
+++ b/extern/ceres/README.blender
@@ -1,4 +1,4 @@
Project: Ceres Solver
URL: http://ceres-solver.org/
-Upstream version 1.11 (aef9c9563b08d5f39eee1576af133a84749d1b48)
+Upstream version 2.0.0
Local modifications: None
diff --git a/extern/ceres/bundle.sh b/extern/ceres/bundle.sh
index 573db5a0a2f..e2d2dce8779 100755
--- a/extern/ceres/bundle.sh
+++ b/extern/ceres/bundle.sh
@@ -8,8 +8,8 @@ else
fi
repo="https://ceres-solver.googlesource.com/ceres-solver"
-branch="master"
-tag=""
+#branch="master"
+tag="2.0.0"
tmp=`mktemp -d`
checkout="$tmp/ceres"
diff --git a/extern/ceres/include/ceres/autodiff_cost_function.h b/extern/ceres/include/ceres/autodiff_cost_function.h
index 5e6e9c55db5..207f0a41688 100644
--- a/extern/ceres/include/ceres/autodiff_cost_function.h
+++ b/extern/ceres/include/ceres/autodiff_cost_function.h
@@ -153,28 +153,44 @@ template <typename CostFunctor,
int... Ns> // Number of parameters in each parameter block.
class AutoDiffCostFunction : public SizedCostFunction<kNumResiduals, Ns...> {
public:
- // Takes ownership of functor. Uses the template-provided value for the
- // number of residuals ("kNumResiduals").
- explicit AutoDiffCostFunction(CostFunctor* functor) : functor_(functor) {
+ // Takes ownership of functor by default. Uses the template-provided
+ // value for the number of residuals ("kNumResiduals").
+ explicit AutoDiffCostFunction(CostFunctor* functor,
+ Ownership ownership = TAKE_OWNERSHIP)
+ : functor_(functor), ownership_(ownership) {
static_assert(kNumResiduals != DYNAMIC,
"Can't run the fixed-size constructor if the number of "
"residuals is set to ceres::DYNAMIC.");
}
- // Takes ownership of functor. Ignores the template-provided
+ // Takes ownership of functor by default. Ignores the template-provided
// kNumResiduals in favor of the "num_residuals" argument provided.
//
// This allows for having autodiff cost functions which return varying
// numbers of residuals at runtime.
- AutoDiffCostFunction(CostFunctor* functor, int num_residuals)
- : functor_(functor) {
+ AutoDiffCostFunction(CostFunctor* functor,
+ int num_residuals,
+ Ownership ownership = TAKE_OWNERSHIP)
+ : functor_(functor), ownership_(ownership) {
static_assert(kNumResiduals == DYNAMIC,
"Can't run the dynamic-size constructor if the number of "
"residuals is not ceres::DYNAMIC.");
SizedCostFunction<kNumResiduals, Ns...>::set_num_residuals(num_residuals);
}
- virtual ~AutoDiffCostFunction() {}
+ explicit AutoDiffCostFunction(AutoDiffCostFunction&& other)
+ : functor_(std::move(other.functor_)), ownership_(other.ownership_) {}
+
+ virtual ~AutoDiffCostFunction() {
+ // Manually release pointer if configured to not take ownership rather than
+ // deleting only if ownership is taken.
+ // This is to stay maximally compatible to old user code which may have
+ // forgotten to implement a virtual destructor, from when the
+ // AutoDiffCostFunction always took ownership.
+ if (ownership_ == DO_NOT_TAKE_OWNERSHIP) {
+ functor_.release();
+ }
+ }
// Implementation details follow; clients of the autodiff cost function should
// not have to examine below here.
@@ -201,6 +217,7 @@ class AutoDiffCostFunction : public SizedCostFunction<kNumResiduals, Ns...> {
private:
std::unique_ptr<CostFunctor> functor_;
+ Ownership ownership_;
};
} // namespace ceres
diff --git a/extern/ceres/include/ceres/c_api.h b/extern/ceres/include/ceres/c_api.h
index 0e6e590d0f5..91b82bf995f 100644
--- a/extern/ceres/include/ceres/c_api.h
+++ b/extern/ceres/include/ceres/c_api.h
@@ -38,8 +38,10 @@
#ifndef CERES_PUBLIC_C_API_H_
#define CERES_PUBLIC_C_API_H_
+// clang-format off
#include "ceres/internal/port.h"
#include "ceres/internal/disable_warnings.h"
+// clang-format on
#ifdef __cplusplus
extern "C" {
diff --git a/extern/ceres/include/ceres/cost_function_to_functor.h b/extern/ceres/include/ceres/cost_function_to_functor.h
index 1beeb906063..9364293afc5 100644
--- a/extern/ceres/include/ceres/cost_function_to_functor.h
+++ b/extern/ceres/include/ceres/cost_function_to_functor.h
@@ -144,8 +144,7 @@ class CostFunctionToFunctor {
// Extract parameter block pointers from params.
using Indices =
- std::make_integer_sequence<int,
- ParameterDims::kNumParameterBlocks>;
+ std::make_integer_sequence<int, ParameterDims::kNumParameterBlocks>;
std::array<const T*, ParameterDims::kNumParameterBlocks> parameter_blocks =
GetParameterPointers<T>(params, Indices());
diff --git a/extern/ceres/include/ceres/covariance.h b/extern/ceres/include/ceres/covariance.h
index 99825c425ad..2fe025df3ce 100644
--- a/extern/ceres/include/ceres/covariance.h
+++ b/extern/ceres/include/ceres/covariance.h
@@ -51,7 +51,7 @@ class CovarianceImpl;
// =======
// It is very easy to use this class incorrectly without understanding
// the underlying mathematics. Please read and understand the
-// documentation completely before attempting to use this class.
+// documentation completely before attempting to use it.
//
//
// This class allows the user to evaluate the covariance for a
@@ -73,7 +73,7 @@ class CovarianceImpl;
// the maximum likelihood estimate of x given observations y is the
// solution to the non-linear least squares problem:
//
-// x* = arg min_x |f(x)|^2
+// x* = arg min_x |f(x) - y|^2
//
// And the covariance of x* is given by
//
@@ -220,11 +220,11 @@ class CERES_EXPORT Covariance {
// 1. DENSE_SVD uses Eigen's JacobiSVD to perform the
// computations. It computes the singular value decomposition
//
- // U * S * V' = J
+ // U * D * V' = J
//
// and then uses it to compute the pseudo inverse of J'J as
//
- // pseudoinverse[J'J]^ = V * pseudoinverse[S] * V'
+ // pseudoinverse[J'J] = V * pseudoinverse[D^2] * V'
//
// It is an accurate but slow method and should only be used
// for small to moderate sized problems. It can handle
@@ -235,7 +235,7 @@ class CERES_EXPORT Covariance {
//
// Q * R = J
//
- // [J'J]^-1 = [R*R']^-1
+ // [J'J]^-1 = [R'*R]^-1
//
// SPARSE_QR is not capable of computing the covariance if the
// Jacobian is rank deficient. Depending on the value of
diff --git a/extern/ceres/include/ceres/dynamic_autodiff_cost_function.h b/extern/ceres/include/ceres/dynamic_autodiff_cost_function.h
index 7b75150b5ce..7ccf6a88c32 100644
--- a/extern/ceres/include/ceres/dynamic_autodiff_cost_function.h
+++ b/extern/ceres/include/ceres/dynamic_autodiff_cost_function.h
@@ -40,6 +40,7 @@
#include "ceres/dynamic_cost_function.h"
#include "ceres/internal/fixed_array.h"
#include "ceres/jet.h"
+#include "ceres/types.h"
#include "glog/logging.h"
namespace ceres {
@@ -78,10 +79,24 @@ namespace ceres {
template <typename CostFunctor, int Stride = 4>
class DynamicAutoDiffCostFunction : public DynamicCostFunction {
public:
- explicit DynamicAutoDiffCostFunction(CostFunctor* functor)
- : functor_(functor) {}
+ // Takes ownership by default.
+ DynamicAutoDiffCostFunction(CostFunctor* functor,
+ Ownership ownership = TAKE_OWNERSHIP)
+ : functor_(functor), ownership_(ownership) {}
- virtual ~DynamicAutoDiffCostFunction() {}
+ explicit DynamicAutoDiffCostFunction(DynamicAutoDiffCostFunction&& other)
+ : functor_(std::move(other.functor_)), ownership_(other.ownership_) {}
+
+ virtual ~DynamicAutoDiffCostFunction() {
+ // Manually release pointer if configured to not take ownership
+ // rather than deleting only if ownership is taken. This is to
+ // stay maximally compatible to old user code which may have
+ // forgotten to implement a virtual destructor, from when the
+ // AutoDiffCostFunction always took ownership.
+ if (ownership_ == DO_NOT_TAKE_OWNERSHIP) {
+ functor_.release();
+ }
+ }
bool Evaluate(double const* const* parameters,
double* residuals,
@@ -151,6 +166,9 @@ class DynamicAutoDiffCostFunction : public DynamicCostFunction {
}
}
+ if (num_active_parameters == 0) {
+ return (*functor_)(parameters, residuals);
+ }
// When `num_active_parameters % Stride != 0` then it can be the case
// that `active_parameter_count < Stride` while parameter_cursor is less
// than the total number of parameters and with no remaining non-constant
@@ -248,6 +266,7 @@ class DynamicAutoDiffCostFunction : public DynamicCostFunction {
private:
std::unique_ptr<CostFunctor> functor_;
+ Ownership ownership_;
};
} // namespace ceres
diff --git a/extern/ceres/include/ceres/dynamic_numeric_diff_cost_function.h b/extern/ceres/include/ceres/dynamic_numeric_diff_cost_function.h
index 119b3f85e8e..ccc8f66db43 100644
--- a/extern/ceres/include/ceres/dynamic_numeric_diff_cost_function.h
+++ b/extern/ceres/include/ceres/dynamic_numeric_diff_cost_function.h
@@ -44,6 +44,7 @@
#include "ceres/internal/numeric_diff.h"
#include "ceres/internal/parameter_dims.h"
#include "ceres/numeric_diff_options.h"
+#include "ceres/types.h"
#include "glog/logging.h"
namespace ceres {
@@ -84,6 +85,10 @@ class DynamicNumericDiffCostFunction : public DynamicCostFunction {
const NumericDiffOptions& options = NumericDiffOptions())
: functor_(functor), ownership_(ownership), options_(options) {}
+ explicit DynamicNumericDiffCostFunction(
+ DynamicNumericDiffCostFunction&& other)
+ : functor_(std::move(other.functor_)), ownership_(other.ownership_) {}
+
virtual ~DynamicNumericDiffCostFunction() {
if (ownership_ != TAKE_OWNERSHIP) {
functor_.release();
diff --git a/extern/ceres/include/ceres/gradient_problem_solver.h b/extern/ceres/include/ceres/gradient_problem_solver.h
index 181699d8fd4..9fab62e6d94 100644
--- a/extern/ceres/include/ceres/gradient_problem_solver.h
+++ b/extern/ceres/include/ceres/gradient_problem_solver.h
@@ -62,7 +62,8 @@ class CERES_EXPORT GradientProblemSolver {
// Minimizer options ----------------------------------------
LineSearchDirectionType line_search_direction_type = LBFGS;
LineSearchType line_search_type = WOLFE;
- NonlinearConjugateGradientType nonlinear_conjugate_gradient_type = FLETCHER_REEVES;
+ NonlinearConjugateGradientType nonlinear_conjugate_gradient_type =
+ FLETCHER_REEVES;
// The LBFGS hessian approximation is a low rank approximation to
// the inverse of the Hessian matrix. The rank of the
diff --git a/extern/ceres/include/ceres/internal/autodiff.h b/extern/ceres/include/ceres/internal/autodiff.h
index cb7b1aca5b9..9d7de758508 100644
--- a/extern/ceres/include/ceres/internal/autodiff.h
+++ b/extern/ceres/include/ceres/internal/autodiff.h
@@ -198,7 +198,7 @@ struct Make1stOrderPerturbation {
template <int N, int Offset, typename T, typename JetT>
struct Make1stOrderPerturbation<N, N, Offset, T, JetT> {
public:
- static void Apply(const T* /*src*/, JetT* /*dst*/) {}
+ static void Apply(const T* src, JetT* dst) {}
};
// Calls Make1stOrderPerturbation for every parameter block.
@@ -229,7 +229,9 @@ struct Make1stOrderPerturbations<std::integer_sequence<int, N, Ns...>,
// End of 'recursion'. Nothing more to do.
template <int ParameterIdx, int Total>
-struct Make1stOrderPerturbations<std::integer_sequence<int>, ParameterIdx, Total> {
+struct Make1stOrderPerturbations<std::integer_sequence<int>,
+ ParameterIdx,
+ Total> {
template <typename T, typename JetT>
static void Apply(T const* const* /* NOT USED */, JetT* /* NOT USED */) {}
};
diff --git a/extern/ceres/include/ceres/internal/disable_warnings.h b/extern/ceres/include/ceres/internal/disable_warnings.h
index fd848feec0f..d7766a0a08f 100644
--- a/extern/ceres/include/ceres/internal/disable_warnings.h
+++ b/extern/ceres/include/ceres/internal/disable_warnings.h
@@ -34,11 +34,11 @@
#define CERES_WARNINGS_DISABLED
#ifdef _MSC_VER
-#pragma warning( push )
+#pragma warning(push)
// Disable the warning C4251 which is triggered by stl classes in
// Ceres' public interface. To quote MSDN: "C4251 can be ignored "
// "if you are deriving from a type in the Standard C++ Library"
-#pragma warning( disable : 4251 )
+#pragma warning(disable : 4251)
#endif
#endif // CERES_WARNINGS_DISABLED
diff --git a/extern/ceres/include/ceres/internal/eigen.h b/extern/ceres/include/ceres/internal/eigen.h
index 59545dfd9c9..b6d0b7f610c 100644
--- a/extern/ceres/include/ceres/internal/eigen.h
+++ b/extern/ceres/include/ceres/internal/eigen.h
@@ -36,31 +36,26 @@
namespace ceres {
typedef Eigen::Matrix<double, Eigen::Dynamic, 1> Vector;
-typedef Eigen::Matrix<double,
- Eigen::Dynamic,
- Eigen::Dynamic,
- Eigen::RowMajor> Matrix;
+typedef Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>
+ Matrix;
typedef Eigen::Map<Vector> VectorRef;
typedef Eigen::Map<Matrix> MatrixRef;
typedef Eigen::Map<const Vector> ConstVectorRef;
typedef Eigen::Map<const Matrix> ConstMatrixRef;
// Column major matrices for DenseSparseMatrix/DenseQRSolver
-typedef Eigen::Matrix<double,
- Eigen::Dynamic,
- Eigen::Dynamic,
- Eigen::ColMajor> ColMajorMatrix;
+typedef Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::ColMajor>
+ ColMajorMatrix;
-typedef Eigen::Map<ColMajorMatrix, 0,
- Eigen::Stride<Eigen::Dynamic, 1>> ColMajorMatrixRef;
+typedef Eigen::Map<ColMajorMatrix, 0, Eigen::Stride<Eigen::Dynamic, 1>>
+ ColMajorMatrixRef;
-typedef Eigen::Map<const ColMajorMatrix,
- 0,
- Eigen::Stride<Eigen::Dynamic, 1>> ConstColMajorMatrixRef;
+typedef Eigen::Map<const ColMajorMatrix, 0, Eigen::Stride<Eigen::Dynamic, 1>>
+ ConstColMajorMatrixRef;
// C++ does not support templated typdefs, thus the need for this
// struct so that we can support statically sized Matrix and Maps.
- template <int num_rows = Eigen::Dynamic, int num_cols = Eigen::Dynamic>
+template <int num_rows = Eigen::Dynamic, int num_cols = Eigen::Dynamic>
struct EigenTypes {
typedef Eigen::Matrix<double,
num_rows,
diff --git a/extern/ceres/include/ceres/internal/fixed_array.h b/extern/ceres/include/ceres/internal/fixed_array.h
index f8ef02d40e8..dcbddcd3a1d 100644
--- a/extern/ceres/include/ceres/internal/fixed_array.h
+++ b/extern/ceres/include/ceres/internal/fixed_array.h
@@ -30,6 +30,7 @@
#ifndef CERES_PUBLIC_INTERNAL_FIXED_ARRAY_H_
#define CERES_PUBLIC_INTERNAL_FIXED_ARRAY_H_
+#include <Eigen/Core> // For Eigen::aligned_allocator
#include <algorithm>
#include <array>
#include <cstddef>
@@ -37,8 +38,6 @@
#include <tuple>
#include <type_traits>
-#include <Eigen/Core> // For Eigen::aligned_allocator
-
#include "ceres/internal/memory.h"
#include "glog/logging.h"
diff --git a/extern/ceres/include/ceres/internal/integer_sequence_algorithm.h b/extern/ceres/include/ceres/internal/integer_sequence_algorithm.h
index 170acac2832..8c0f3bc8ac4 100644
--- a/extern/ceres/include/ceres/internal/integer_sequence_algorithm.h
+++ b/extern/ceres/include/ceres/internal/integer_sequence_algorithm.h
@@ -62,7 +62,8 @@ struct SumImpl;
// Strip of and sum the first number.
template <typename T, T N, T... Ns>
struct SumImpl<std::integer_sequence<T, N, Ns...>> {
- static constexpr T Value = N + SumImpl<std::integer_sequence<T, Ns...>>::Value;
+ static constexpr T Value =
+ N + SumImpl<std::integer_sequence<T, Ns...>>::Value;
};
// Strip of and sum the first two numbers.
@@ -129,10 +130,14 @@ template <typename T, T Sum, typename SeqIn, typename SeqOut>
struct ExclusiveScanImpl;
template <typename T, T Sum, T N, T... Ns, T... Rs>
-struct ExclusiveScanImpl<T, Sum, std::integer_sequence<T, N, Ns...>,
+struct ExclusiveScanImpl<T,
+ Sum,
+ std::integer_sequence<T, N, Ns...>,
std::integer_sequence<T, Rs...>> {
using Type =
- typename ExclusiveScanImpl<T, Sum + N, std::integer_sequence<T, Ns...>,
+ typename ExclusiveScanImpl<T,
+ Sum + N,
+ std::integer_sequence<T, Ns...>,
std::integer_sequence<T, Rs..., Sum>>::Type;
};
diff --git a/extern/ceres/include/ceres/internal/numeric_diff.h b/extern/ceres/include/ceres/internal/numeric_diff.h
index fb2e00baca5..ff7a2c345e4 100644
--- a/extern/ceres/include/ceres/internal/numeric_diff.h
+++ b/extern/ceres/include/ceres/internal/numeric_diff.h
@@ -47,15 +47,17 @@
#include "ceres/types.h"
#include "glog/logging.h"
-
namespace ceres {
namespace internal {
// This is split from the main class because C++ doesn't allow partial template
// specializations for member functions. The alternative is to repeat the main
// class for differing numbers of parameters, which is also unfortunate.
-template <typename CostFunctor, NumericDiffMethodType kMethod,
- int kNumResiduals, typename ParameterDims, int kParameterBlock,
+template <typename CostFunctor,
+ NumericDiffMethodType kMethod,
+ int kNumResiduals,
+ typename ParameterDims,
+ int kParameterBlock,
int kParameterBlockSize>
struct NumericDiff {
// Mutates parameters but must restore them before return.
@@ -66,23 +68,23 @@ struct NumericDiff {
int num_residuals,
int parameter_block_index,
int parameter_block_size,
- double **parameters,
- double *jacobian) {
+ double** parameters,
+ double* jacobian) {
+ using Eigen::ColMajor;
using Eigen::Map;
using Eigen::Matrix;
using Eigen::RowMajor;
- using Eigen::ColMajor;
DCHECK(jacobian);
const int num_residuals_internal =
(kNumResiduals != ceres::DYNAMIC ? kNumResiduals : num_residuals);
const int parameter_block_index_internal =
- (kParameterBlock != ceres::DYNAMIC ? kParameterBlock :
- parameter_block_index);
+ (kParameterBlock != ceres::DYNAMIC ? kParameterBlock
+ : parameter_block_index);
const int parameter_block_size_internal =
- (kParameterBlockSize != ceres::DYNAMIC ? kParameterBlockSize :
- parameter_block_size);
+ (kParameterBlockSize != ceres::DYNAMIC ? kParameterBlockSize
+ : parameter_block_size);
typedef Matrix<double, kNumResiduals, 1> ResidualVector;
typedef Matrix<double, kParameterBlockSize, 1> ParameterVector;
@@ -97,17 +99,17 @@ struct NumericDiff {
(kParameterBlockSize == 1) ? ColMajor : RowMajor>
JacobianMatrix;
- Map<JacobianMatrix> parameter_jacobian(jacobian,
- num_residuals_internal,
- parameter_block_size_internal);
+ Map<JacobianMatrix> parameter_jacobian(
+ jacobian, num_residuals_internal, parameter_block_size_internal);
Map<ParameterVector> x_plus_delta(
parameters[parameter_block_index_internal],
parameter_block_size_internal);
ParameterVector x(x_plus_delta);
- ParameterVector step_size = x.array().abs() *
- ((kMethod == RIDDERS) ? options.ridders_relative_initial_step_size :
- options.relative_step_size);
+ ParameterVector step_size =
+ x.array().abs() * ((kMethod == RIDDERS)
+ ? options.ridders_relative_initial_step_size
+ : options.relative_step_size);
// It is not a good idea to make the step size arbitrarily
// small. This will lead to problems with round off and numerical
@@ -118,8 +120,8 @@ struct NumericDiff {
// For Ridders' method, the initial step size is required to be large,
// thus ridders_relative_initial_step_size is used.
if (kMethod == RIDDERS) {
- min_step_size = std::max(min_step_size,
- options.ridders_relative_initial_step_size);
+ min_step_size =
+ std::max(min_step_size, options.ridders_relative_initial_step_size);
}
// For each parameter in the parameter block, use finite differences to
@@ -133,7 +135,9 @@ struct NumericDiff {
const double delta = std::max(min_step_size, step_size(j));
if (kMethod == RIDDERS) {
- if (!EvaluateRiddersJacobianColumn(functor, j, delta,
+ if (!EvaluateRiddersJacobianColumn(functor,
+ j,
+ delta,
options,
num_residuals_internal,
parameter_block_size_internal,
@@ -146,7 +150,9 @@ struct NumericDiff {
return false;
}
} else {
- if (!EvaluateJacobianColumn(functor, j, delta,
+ if (!EvaluateJacobianColumn(functor,
+ j,
+ delta,
num_residuals_internal,
parameter_block_size_internal,
x.data(),
@@ -182,8 +188,7 @@ struct NumericDiff {
typedef Matrix<double, kParameterBlockSize, 1> ParameterVector;
Map<const ParameterVector> x(x_ptr, parameter_block_size);
- Map<ParameterVector> x_plus_delta(x_plus_delta_ptr,
- parameter_block_size);
+ Map<ParameterVector> x_plus_delta(x_plus_delta_ptr, parameter_block_size);
Map<ResidualVector> residuals(residuals_ptr, num_residuals);
Map<ResidualVector> temp_residuals(temp_residuals_ptr, num_residuals);
@@ -191,9 +196,8 @@ struct NumericDiff {
// Mutate 1 element at a time and then restore.
x_plus_delta(parameter_index) = x(parameter_index) + delta;
- if (!VariadicEvaluate<ParameterDims>(*functor,
- parameters,
- residuals.data())) {
+ if (!VariadicEvaluate<ParameterDims>(
+ *functor, parameters, residuals.data())) {
return false;
}
@@ -206,9 +210,8 @@ struct NumericDiff {
// Compute the function on the other side of x(parameter_index).
x_plus_delta(parameter_index) = x(parameter_index) - delta;
- if (!VariadicEvaluate<ParameterDims>(*functor,
- parameters,
- temp_residuals.data())) {
+ if (!VariadicEvaluate<ParameterDims>(
+ *functor, parameters, temp_residuals.data())) {
return false;
}
@@ -217,8 +220,7 @@ struct NumericDiff {
} else {
// Forward difference only; reuse existing residuals evaluation.
residuals -=
- Map<const ResidualVector>(residuals_at_eval_point,
- num_residuals);
+ Map<const ResidualVector>(residuals_at_eval_point, num_residuals);
}
// Restore x_plus_delta.
@@ -254,17 +256,17 @@ struct NumericDiff {
double* x_plus_delta_ptr,
double* temp_residuals_ptr,
double* residuals_ptr) {
+ using Eigen::aligned_allocator;
using Eigen::Map;
using Eigen::Matrix;
- using Eigen::aligned_allocator;
typedef Matrix<double, kNumResiduals, 1> ResidualVector;
- typedef Matrix<double, kNumResiduals, Eigen::Dynamic> ResidualCandidateMatrix;
+ typedef Matrix<double, kNumResiduals, Eigen::Dynamic>
+ ResidualCandidateMatrix;
typedef Matrix<double, kParameterBlockSize, 1> ParameterVector;
Map<const ParameterVector> x(x_ptr, parameter_block_size);
- Map<ParameterVector> x_plus_delta(x_plus_delta_ptr,
- parameter_block_size);
+ Map<ParameterVector> x_plus_delta(x_plus_delta_ptr, parameter_block_size);
Map<ResidualVector> residuals(residuals_ptr, num_residuals);
Map<ResidualVector> temp_residuals(temp_residuals_ptr, num_residuals);
@@ -275,18 +277,16 @@ struct NumericDiff {
// As the derivative is estimated, the step size decreases.
// By default, the step sizes are chosen so that the middle column
// of the Romberg tableau uses the input delta.
- double current_step_size = delta *
- pow(options.ridders_step_shrink_factor,
- options.max_num_ridders_extrapolations / 2);
+ double current_step_size =
+ delta * pow(options.ridders_step_shrink_factor,
+ options.max_num_ridders_extrapolations / 2);
// Double-buffering temporary differential candidate vectors
// from previous step size.
ResidualCandidateMatrix stepsize_candidates_a(
- num_residuals,
- options.max_num_ridders_extrapolations);
+ num_residuals, options.max_num_ridders_extrapolations);
ResidualCandidateMatrix stepsize_candidates_b(
- num_residuals,
- options.max_num_ridders_extrapolations);
+ num_residuals, options.max_num_ridders_extrapolations);
ResidualCandidateMatrix* current_candidates = &stepsize_candidates_a;
ResidualCandidateMatrix* previous_candidates = &stepsize_candidates_b;
@@ -304,7 +304,9 @@ struct NumericDiff {
// 3. Extrapolation becomes numerically unstable.
for (int i = 0; i < options.max_num_ridders_extrapolations; ++i) {
// Compute the numerical derivative at this step size.
- if (!EvaluateJacobianColumn(functor, parameter_index, current_step_size,
+ if (!EvaluateJacobianColumn(functor,
+ parameter_index,
+ current_step_size,
num_residuals,
parameter_block_size,
x.data(),
@@ -327,23 +329,24 @@ struct NumericDiff {
// Extrapolation factor for Richardson acceleration method (see below).
double richardson_factor = options.ridders_step_shrink_factor *
- options.ridders_step_shrink_factor;
+ options.ridders_step_shrink_factor;
for (int k = 1; k <= i; ++k) {
// Extrapolate the various orders of finite differences using
// the Richardson acceleration method.
current_candidates->col(k) =
(richardson_factor * current_candidates->col(k - 1) -
- previous_candidates->col(k - 1)) / (richardson_factor - 1.0);
+ previous_candidates->col(k - 1)) /
+ (richardson_factor - 1.0);
richardson_factor *= options.ridders_step_shrink_factor *
- options.ridders_step_shrink_factor;
+ options.ridders_step_shrink_factor;
// Compute the difference between the previous value and the current.
double candidate_error = std::max(
- (current_candidates->col(k) -
- current_candidates->col(k - 1)).norm(),
- (current_candidates->col(k) -
- previous_candidates->col(k - 1)).norm());
+ (current_candidates->col(k) - current_candidates->col(k - 1))
+ .norm(),
+ (current_candidates->col(k) - previous_candidates->col(k - 1))
+ .norm());
// If the error has decreased, update results.
if (candidate_error <= norm_error) {
@@ -365,8 +368,9 @@ struct NumericDiff {
// Check to see if the current gradient estimate is numerically unstable.
// If so, bail out and return the last stable result.
if (i > 0) {
- double tableau_error = (current_candidates->col(i) -
- previous_candidates->col(i - 1)).norm();
+ double tableau_error =
+ (current_candidates->col(i) - previous_candidates->col(i - 1))
+ .norm();
// Compare current error to the chosen candidate's error.
if (tableau_error >= 2 * norm_error) {
@@ -482,14 +486,18 @@ struct EvaluateJacobianForParameterBlocks<ParameterDims,
// End of 'recursion'. Nothing more to do.
template <typename ParameterDims, int ParameterIdx>
-struct EvaluateJacobianForParameterBlocks<ParameterDims, std::integer_sequence<int>,
+struct EvaluateJacobianForParameterBlocks<ParameterDims,
+ std::integer_sequence<int>,
ParameterIdx> {
- template <NumericDiffMethodType method, int kNumResiduals,
+ template <NumericDiffMethodType method,
+ int kNumResiduals,
typename CostFunctor>
static bool Apply(const CostFunctor* /* NOT USED*/,
const double* /* NOT USED*/,
- const NumericDiffOptions& /* NOT USED*/, int /* NOT USED*/,
- double** /* NOT USED*/, double** /* NOT USED*/) {
+ const NumericDiffOptions& /* NOT USED*/,
+ int /* NOT USED*/,
+ double** /* NOT USED*/,
+ double** /* NOT USED*/) {
return true;
}
};
diff --git a/extern/ceres/include/ceres/internal/port.h b/extern/ceres/include/ceres/internal/port.h
index 958b0d15cb7..040a1efba02 100644
--- a/extern/ceres/include/ceres/internal/port.h
+++ b/extern/ceres/include/ceres/internal/port.h
@@ -35,17 +35,17 @@
#include "ceres/internal/config.h"
#if defined(CERES_USE_OPENMP)
-# if defined(CERES_USE_CXX_THREADS) || defined(CERES_NO_THREADS)
-# error CERES_USE_OPENMP is mutually exclusive to CERES_USE_CXX_THREADS and CERES_NO_THREADS
-# endif
+#if defined(CERES_USE_CXX_THREADS) || defined(CERES_NO_THREADS)
+#error CERES_USE_OPENMP is mutually exclusive to CERES_USE_CXX_THREADS and CERES_NO_THREADS
+#endif
#elif defined(CERES_USE_CXX_THREADS)
-# if defined(CERES_USE_OPENMP) || defined(CERES_NO_THREADS)
-# error CERES_USE_CXX_THREADS is mutually exclusive to CERES_USE_OPENMP, CERES_USE_CXX_THREADS and CERES_NO_THREADS
-# endif
+#if defined(CERES_USE_OPENMP) || defined(CERES_NO_THREADS)
+#error CERES_USE_CXX_THREADS is mutually exclusive to CERES_USE_OPENMP, CERES_USE_CXX_THREADS and CERES_NO_THREADS
+#endif
#elif defined(CERES_NO_THREADS)
-# if defined(CERES_USE_OPENMP) || defined(CERES_USE_CXX_THREADS)
-# error CERES_NO_THREADS is mutually exclusive to CERES_USE_OPENMP and CERES_USE_CXX_THREADS
-# endif
+#if defined(CERES_USE_OPENMP) || defined(CERES_USE_CXX_THREADS)
+#error CERES_NO_THREADS is mutually exclusive to CERES_USE_OPENMP and CERES_USE_CXX_THREADS
+#endif
#else
# error One of CERES_USE_OPENMP, CERES_USE_CXX_THREADS or CERES_NO_THREADS must be defined.
#endif
@@ -54,37 +54,57 @@
// compiled without any sparse back-end. Verify that it has not subsequently
// been inconsistently redefined.
#if defined(CERES_NO_SPARSE)
-# if !defined(CERES_NO_SUITESPARSE)
-# error CERES_NO_SPARSE requires CERES_NO_SUITESPARSE.
-# endif
-# if !defined(CERES_NO_CXSPARSE)
-# error CERES_NO_SPARSE requires CERES_NO_CXSPARSE
-# endif
-# if !defined(CERES_NO_ACCELERATE_SPARSE)
-# error CERES_NO_SPARSE requires CERES_NO_ACCELERATE_SPARSE
-# endif
-# if defined(CERES_USE_EIGEN_SPARSE)
-# error CERES_NO_SPARSE requires !CERES_USE_EIGEN_SPARSE
-# endif
+#if !defined(CERES_NO_SUITESPARSE)
+#error CERES_NO_SPARSE requires CERES_NO_SUITESPARSE.
+#endif
+#if !defined(CERES_NO_CXSPARSE)
+#error CERES_NO_SPARSE requires CERES_NO_CXSPARSE
+#endif
+#if !defined(CERES_NO_ACCELERATE_SPARSE)
+#error CERES_NO_SPARSE requires CERES_NO_ACCELERATE_SPARSE
+#endif
+#if defined(CERES_USE_EIGEN_SPARSE)
+#error CERES_NO_SPARSE requires !CERES_USE_EIGEN_SPARSE
+#endif
#endif
// A macro to signal which functions and classes are exported when
-// building a DLL with MSVC.
-//
-// Note that the ordering here is important, CERES_BUILDING_SHARED_LIBRARY
-// is only defined locally when Ceres is compiled, it is never exported to
-// users. However, in order that we do not have to configure config.h
-// separately for building vs installing, if we are using MSVC and building
-// a shared library, then both CERES_BUILDING_SHARED_LIBRARY and
-// CERES_USING_SHARED_LIBRARY will be defined when Ceres is compiled.
-// Hence it is important that the check for CERES_BUILDING_SHARED_LIBRARY
-// happens first.
-#if defined(_MSC_VER) && defined(CERES_BUILDING_SHARED_LIBRARY)
-# define CERES_EXPORT __declspec(dllexport)
-#elif defined(_MSC_VER) && defined(CERES_USING_SHARED_LIBRARY)
-# define CERES_EXPORT __declspec(dllimport)
+// building a shared library.
+#if defined(_MSC_VER)
+#define CERES_API_SHARED_IMPORT __declspec(dllimport)
+#define CERES_API_SHARED_EXPORT __declspec(dllexport)
+#elif defined(__GNUC__)
+#define CERES_API_SHARED_IMPORT __attribute__((visibility("default")))
+#define CERES_API_SHARED_EXPORT __attribute__((visibility("default")))
+#else
+#define CERES_API_SHARED_IMPORT
+#define CERES_API_SHARED_EXPORT
+#endif
+
+// CERES_BUILDING_SHARED_LIBRARY is only defined locally when Ceres itself is
+// compiled as a shared library, it is never exported to users. In order that
+// we do not have to configure config.h separately when building Ceres as either
+// a static or dynamic library, we define both CERES_USING_SHARED_LIBRARY and
+// CERES_BUILDING_SHARED_LIBRARY when building as a shared library.
+#if defined(CERES_USING_SHARED_LIBRARY)
+#if defined(CERES_BUILDING_SHARED_LIBRARY)
+// Compiling Ceres itself as a shared library.
+#define CERES_EXPORT CERES_API_SHARED_EXPORT
+#else
+// Using Ceres as a shared library.
+#define CERES_EXPORT CERES_API_SHARED_IMPORT
+#endif
+#else
+// Ceres was compiled as a static library, export everything.
+#define CERES_EXPORT
+#endif
+
+// Unit tests reach in and test internal functionality so we need a way to make
+// those symbols visible
+#ifdef CERES_EXPORT_INTERNAL_SYMBOLS
+#define CERES_EXPORT_INTERNAL CERES_EXPORT
#else
-# define CERES_EXPORT
+#define CERES_EXPORT_INTERNAL
#endif
#endif // CERES_PUBLIC_INTERNAL_PORT_H_
diff --git a/extern/ceres/include/ceres/internal/reenable_warnings.h b/extern/ceres/include/ceres/internal/reenable_warnings.h
index 7e410259d64..2c5db061fd7 100644
--- a/extern/ceres/include/ceres/internal/reenable_warnings.h
+++ b/extern/ceres/include/ceres/internal/reenable_warnings.h
@@ -32,7 +32,7 @@
#undef CERES_WARNINGS_DISABLED
#ifdef _MSC_VER
-#pragma warning( pop )
+#pragma warning(pop)
#endif
#endif // CERES_WARNINGS_DISABLED
diff --git a/extern/ceres/include/ceres/internal/variadic_evaluate.h b/extern/ceres/include/ceres/internal/variadic_evaluate.h
index 046832c0bb4..47ff6b18fa0 100644
--- a/extern/ceres/include/ceres/internal/variadic_evaluate.h
+++ b/extern/ceres/include/ceres/internal/variadic_evaluate.h
@@ -46,8 +46,10 @@ namespace internal {
// For fixed size cost functors
template <typename Functor, typename T, int... Indices>
-inline bool VariadicEvaluateImpl(const Functor& functor, T const* const* input,
- T* output, std::false_type /*is_dynamic*/,
+inline bool VariadicEvaluateImpl(const Functor& functor,
+ T const* const* input,
+ T* output,
+ std::false_type /*is_dynamic*/,
std::integer_sequence<int, Indices...>) {
static_assert(sizeof...(Indices),
"Invalid number of parameter blocks. At least one parameter "
@@ -57,26 +59,31 @@ inline bool VariadicEvaluateImpl(const Functor& functor, T const* const* input,
// For dynamic sized cost functors
template <typename Functor, typename T>
-inline bool VariadicEvaluateImpl(const Functor& functor, T const* const* input,
- T* output, std::true_type /*is_dynamic*/,
+inline bool VariadicEvaluateImpl(const Functor& functor,
+ T const* const* input,
+ T* output,
+ std::true_type /*is_dynamic*/,
std::integer_sequence<int>) {
return functor(input, output);
}
// For ceres cost functors (not ceres::CostFunction)
template <typename ParameterDims, typename Functor, typename T>
-inline bool VariadicEvaluateImpl(const Functor& functor, T const* const* input,
- T* output, const void* /* NOT USED */) {
+inline bool VariadicEvaluateImpl(const Functor& functor,
+ T const* const* input,
+ T* output,
+ const void* /* NOT USED */) {
using ParameterBlockIndices =
std::make_integer_sequence<int, ParameterDims::kNumParameterBlocks>;
using IsDynamic = std::integral_constant<bool, ParameterDims::kIsDynamic>;
- return VariadicEvaluateImpl(functor, input, output, IsDynamic(),
- ParameterBlockIndices());
+ return VariadicEvaluateImpl(
+ functor, input, output, IsDynamic(), ParameterBlockIndices());
}
// For ceres::CostFunction
template <typename ParameterDims, typename Functor, typename T>
-inline bool VariadicEvaluateImpl(const Functor& functor, T const* const* input,
+inline bool VariadicEvaluateImpl(const Functor& functor,
+ T const* const* input,
T* output,
const CostFunction* /* NOT USED */) {
return functor.Evaluate(input, output, nullptr);
@@ -95,7 +102,8 @@ inline bool VariadicEvaluateImpl(const Functor& functor, T const* const* input,
// blocks. The signature of the functor must have the following signature
// 'bool()(const T* i_1, const T* i_2, ... const T* i_n, T* output)'.
template <typename ParameterDims, typename Functor, typename T>
-inline bool VariadicEvaluate(const Functor& functor, T const* const* input,
+inline bool VariadicEvaluate(const Functor& functor,
+ T const* const* input,
T* output) {
return VariadicEvaluateImpl<ParameterDims>(functor, input, output, &functor);
}
diff --git a/extern/ceres/include/ceres/iteration_callback.h b/extern/ceres/include/ceres/iteration_callback.h
index 0a743ecc26f..4507fdf748c 100644
--- a/extern/ceres/include/ceres/iteration_callback.h
+++ b/extern/ceres/include/ceres/iteration_callback.h
@@ -73,7 +73,7 @@ struct CERES_EXPORT IterationSummary {
bool step_is_successful = false;
// Value of the objective function.
- double cost = 0.90;
+ double cost = 0.0;
// Change in the value of the objective function in this
// iteration. This can be positive or negative.
diff --git a/extern/ceres/include/ceres/jet.h b/extern/ceres/include/ceres/jet.h
index 7aafaa01d30..da49f32019f 100644
--- a/extern/ceres/include/ceres/jet.h
+++ b/extern/ceres/include/ceres/jet.h
@@ -388,6 +388,8 @@ using std::cbrt;
using std::ceil;
using std::cos;
using std::cosh;
+using std::erf;
+using std::erfc;
using std::exp;
using std::exp2;
using std::floor;
@@ -573,6 +575,21 @@ inline Jet<T, N> fmin(const Jet<T, N>& x, const Jet<T, N>& y) {
return y < x ? y : x;
}
+// erf is defined as an integral that cannot be expressed analyticaly
+// however, the derivative is trivial to compute
+// erf(x + h) = erf(x) + h * 2*exp(-x^2)/sqrt(pi)
+template <typename T, int N>
+inline Jet<T, N> erf(const Jet<T, N>& x) {
+ return Jet<T, N>(erf(x.a), x.v * M_2_SQRTPI * exp(-x.a * x.a));
+}
+
+// erfc(x) = 1-erf(x)
+// erfc(x + h) = erfc(x) + h * (-2*exp(-x^2)/sqrt(pi))
+template <typename T, int N>
+inline Jet<T, N> erfc(const Jet<T, N>& x) {
+ return Jet<T, N>(erfc(x.a), -x.v * M_2_SQRTPI * exp(-x.a * x.a));
+}
+
// Bessel functions of the first kind with integer order equal to 0, 1, n.
//
// Microsoft has deprecated the j[0,1,n]() POSIX Bessel functions in favour of
diff --git a/extern/ceres/include/ceres/local_parameterization.h b/extern/ceres/include/ceres/local_parameterization.h
index 1576e829e73..ba7579deca0 100644
--- a/extern/ceres/include/ceres/local_parameterization.h
+++ b/extern/ceres/include/ceres/local_parameterization.h
@@ -90,8 +90,8 @@ namespace ceres {
//
// An example that occurs commonly in Structure from Motion problems
// is when camera rotations are parameterized using Quaternion. There,
-// it is useful only make updates orthogonal to that 4-vector defining
-// the quaternion. One way to do this is to let delta be a 3
+// it is useful to only make updates orthogonal to that 4-vector
+// defining the quaternion. One way to do this is to let delta be a 3
// dimensional vector and define Plus to be
//
// Plus(x, delta) = [cos(|delta|), sin(|delta|) delta / |delta|] * x
@@ -99,7 +99,7 @@ namespace ceres {
// The multiplication between the two 4-vectors on the RHS is the
// standard quaternion product.
//
-// Given g and a point x, optimizing f can now be restated as
+// Given f and a point x, optimizing f can now be restated as
//
// min f(Plus(x, delta))
// delta
@@ -306,6 +306,7 @@ class CERES_EXPORT ProductParameterization : public LocalParameterization {
public:
ProductParameterization(const ProductParameterization&) = delete;
ProductParameterization& operator=(const ProductParameterization&) = delete;
+ virtual ~ProductParameterization() {}
//
// NOTE: The constructor takes ownership of the input local
// parameterizations.
@@ -341,7 +342,8 @@ class CERES_EXPORT ProductParameterization : public LocalParameterization {
bool Plus(const double* x,
const double* delta,
double* x_plus_delta) const override;
- bool ComputeJacobian(const double* x, double* jacobian) const override;
+ bool ComputeJacobian(const double* x,
+ double* jacobian) const override;
int GlobalSize() const override { return global_size_; }
int LocalSize() const override { return local_size_; }
@@ -354,8 +356,8 @@ class CERES_EXPORT ProductParameterization : public LocalParameterization {
} // namespace ceres
+// clang-format off
#include "ceres/internal/reenable_warnings.h"
#include "ceres/internal/line_parameterization.h"
#endif // CERES_PUBLIC_LOCAL_PARAMETERIZATION_H_
-
diff --git a/extern/ceres/include/ceres/numeric_diff_cost_function.h b/extern/ceres/include/ceres/numeric_diff_cost_function.h
index c69f262f572..cf7971cde79 100644
--- a/extern/ceres/include/ceres/numeric_diff_cost_function.h
+++ b/extern/ceres/include/ceres/numeric_diff_cost_function.h
@@ -192,7 +192,10 @@ class NumericDiffCostFunction : public SizedCostFunction<kNumResiduals, Ns...> {
}
}
- ~NumericDiffCostFunction() {
+ explicit NumericDiffCostFunction(NumericDiffCostFunction&& other)
+ : functor_(std::move(other.functor_)), ownership_(other.ownership_) {}
+
+ virtual ~NumericDiffCostFunction() {
if (ownership_ != TAKE_OWNERSHIP) {
functor_.release();
}
diff --git a/extern/ceres/include/ceres/problem.h b/extern/ceres/include/ceres/problem.h
index 88f99663f65..add12ea401d 100644
--- a/extern/ceres/include/ceres/problem.h
+++ b/extern/ceres/include/ceres/problem.h
@@ -453,13 +453,15 @@ class CERES_EXPORT Problem {
// problem.AddResidualBlock(new MyCostFunction, nullptr, &x);
//
// double cost = 0.0;
- // problem.Evaluate(Problem::EvaluateOptions(), &cost, nullptr, nullptr, nullptr);
+ // problem.Evaluate(Problem::EvaluateOptions(), &cost,
+ // nullptr, nullptr, nullptr);
//
// The cost is evaluated at x = 1. If you wish to evaluate the
// problem at x = 2, then
//
// x = 2;
- // problem.Evaluate(Problem::EvaluateOptions(), &cost, nullptr, nullptr, nullptr);
+ // problem.Evaluate(Problem::EvaluateOptions(), &cost,
+ // nullptr, nullptr, nullptr);
//
// is the way to do so.
//
@@ -475,7 +477,7 @@ class CERES_EXPORT Problem {
// at the end of an iteration during a solve.
//
// Note 4: If an EvaluationCallback is associated with the problem,
- // then its PrepareForEvaluation method will be called everytime
+ // then its PrepareForEvaluation method will be called every time
// this method is called with new_point = true.
bool Evaluate(const EvaluateOptions& options,
double* cost,
@@ -509,23 +511,41 @@ class CERES_EXPORT Problem {
// apply_loss_function as the name implies allows the user to switch
// the application of the loss function on and off.
//
- // WARNING: If an EvaluationCallback is associated with the problem
- // then it is the user's responsibility to call it before calling
- // this method.
- //
- // This is because, if the user calls this method multiple times, we
- // cannot tell if the underlying parameter blocks have changed
- // between calls or not. So if EvaluateResidualBlock was responsible
- // for calling the EvaluationCallback, it will have to do it
- // everytime it is called. Which makes the common case where the
- // parameter blocks do not change, inefficient. So we leave it to
- // the user to call the EvaluationCallback as needed.
+ // If an EvaluationCallback is associated with the problem, then its
+ // PrepareForEvaluation method will be called every time this method
+ // is called with new_point = true. This conservatively assumes that
+ // the user may have changed the parameter values since the previous
+ // call to evaluate / solve. For improved efficiency, and only if
+ // you know that the parameter values have not changed between
+ // calls, see EvaluateResidualBlockAssumingParametersUnchanged().
bool EvaluateResidualBlock(ResidualBlockId residual_block_id,
bool apply_loss_function,
double* cost,
double* residuals,
double** jacobians) const;
+ // Same as EvaluateResidualBlock except that if an
+ // EvaluationCallback is associated with the problem, then its
+ // PrepareForEvaluation method will be called every time this method
+ // is called with new_point = false.
+ //
+ // This means, if an EvaluationCallback is associated with the
+ // problem then it is the user's responsibility to call
+ // PrepareForEvaluation before calling this method if necessary,
+ // i.e. iff the parameter values have been changed since the last
+ // call to evaluate / solve.'
+ //
+ // This is because, as the name implies, we assume that the
+ // parameter blocks did not change since the last time
+ // PrepareForEvaluation was called (via Solve, Evaluate or
+ // EvaluateResidualBlock).
+ bool EvaluateResidualBlockAssumingParametersUnchanged(
+ ResidualBlockId residual_block_id,
+ bool apply_loss_function,
+ double* cost,
+ double* residuals,
+ double** jacobians) const;
+
private:
friend class Solver;
friend class Covariance;
diff --git a/extern/ceres/include/ceres/rotation.h b/extern/ceres/include/ceres/rotation.h
index 7d5c8ef1fb2..0c82a417a2c 100644
--- a/extern/ceres/include/ceres/rotation.h
+++ b/extern/ceres/include/ceres/rotation.h
@@ -320,8 +320,8 @@ inline void QuaternionToAngleAxis(const T* quaternion, T* angle_axis) {
}
template <typename T>
-void RotationMatrixToQuaternion(const T* R, T* angle_axis) {
- RotationMatrixToQuaternion(ColumnMajorAdapter3x3(R), angle_axis);
+void RotationMatrixToQuaternion(const T* R, T* quaternion) {
+ RotationMatrixToQuaternion(ColumnMajorAdapter3x3(R), quaternion);
}
// This algorithm comes from "Quaternion Calculus and Fast Animation",
diff --git a/extern/ceres/include/ceres/solver.h b/extern/ceres/include/ceres/solver.h
index 62631744fe2..61b8dd53eb3 100644
--- a/extern/ceres/include/ceres/solver.h
+++ b/extern/ceres/include/ceres/solver.h
@@ -360,7 +360,8 @@ class CERES_EXPORT Solver {
//
// If Solver::Options::preconditioner_type == SUBSET, then
// residual_blocks_for_subset_preconditioner must be non-empty.
- std::unordered_set<ResidualBlockId> residual_blocks_for_subset_preconditioner;
+ std::unordered_set<ResidualBlockId>
+ residual_blocks_for_subset_preconditioner;
// Ceres supports using multiple dense linear algebra libraries
// for dense matrix factorizations. Currently EIGEN and LAPACK are
@@ -838,7 +839,7 @@ class CERES_EXPORT Solver {
int num_linear_solves = -1;
// Time (in seconds) spent evaluating the residual vector.
- double residual_evaluation_time_in_seconds = 1.0;
+ double residual_evaluation_time_in_seconds = -1.0;
// Number of residual only evaluations.
int num_residual_evaluations = -1;
diff --git a/extern/ceres/include/ceres/types.h b/extern/ceres/include/ceres/types.h
index 3a19b7333b2..5ee6fdca576 100644
--- a/extern/ceres/include/ceres/types.h
+++ b/extern/ceres/include/ceres/types.h
@@ -50,7 +50,7 @@ namespace ceres {
// delete on it upon completion.
enum Ownership {
DO_NOT_TAKE_OWNERSHIP,
- TAKE_OWNERSHIP
+ TAKE_OWNERSHIP,
};
// TODO(keir): Considerably expand the explanations of each solver type.
@@ -185,19 +185,19 @@ enum SparseLinearAlgebraLibraryType {
enum DenseLinearAlgebraLibraryType {
EIGEN,
- LAPACK
+ LAPACK,
};
// Logging options
// The options get progressively noisier.
enum LoggingType {
SILENT,
- PER_MINIMIZER_ITERATION
+ PER_MINIMIZER_ITERATION,
};
enum MinimizerType {
LINE_SEARCH,
- TRUST_REGION
+ TRUST_REGION,
};
enum LineSearchDirectionType {
@@ -412,7 +412,7 @@ enum DumpFormatType {
// specified for the number of residuals. If specified, then the
// number of residuas for that cost function can vary at runtime.
enum DimensionType {
- DYNAMIC = -1
+ DYNAMIC = -1,
};
// The differentiation method used to compute numerical derivatives in
@@ -433,7 +433,7 @@ enum NumericDiffMethodType {
enum LineSearchInterpolationType {
BISECTION,
QUADRATIC,
- CUBIC
+ CUBIC,
};
enum CovarianceAlgorithmType {
@@ -448,8 +448,7 @@ enum CovarianceAlgorithmType {
// did not write to that memory location.
const double kImpossibleValue = 1e302;
-CERES_EXPORT const char* LinearSolverTypeToString(
- LinearSolverType type);
+CERES_EXPORT const char* LinearSolverTypeToString(LinearSolverType type);
CERES_EXPORT bool StringToLinearSolverType(std::string value,
LinearSolverType* type);
@@ -459,25 +458,23 @@ CERES_EXPORT bool StringToPreconditionerType(std::string value,
CERES_EXPORT const char* VisibilityClusteringTypeToString(
VisibilityClusteringType type);
-CERES_EXPORT bool StringToVisibilityClusteringType(std::string value,
- VisibilityClusteringType* type);
+CERES_EXPORT bool StringToVisibilityClusteringType(
+ std::string value, VisibilityClusteringType* type);
CERES_EXPORT const char* SparseLinearAlgebraLibraryTypeToString(
SparseLinearAlgebraLibraryType type);
CERES_EXPORT bool StringToSparseLinearAlgebraLibraryType(
- std::string value,
- SparseLinearAlgebraLibraryType* type);
+ std::string value, SparseLinearAlgebraLibraryType* type);
CERES_EXPORT const char* DenseLinearAlgebraLibraryTypeToString(
DenseLinearAlgebraLibraryType type);
CERES_EXPORT bool StringToDenseLinearAlgebraLibraryType(
- std::string value,
- DenseLinearAlgebraLibraryType* type);
+ std::string value, DenseLinearAlgebraLibraryType* type);
CERES_EXPORT const char* TrustRegionStrategyTypeToString(
TrustRegionStrategyType type);
-CERES_EXPORT bool StringToTrustRegionStrategyType(std::string value,
- TrustRegionStrategyType* type);
+CERES_EXPORT bool StringToTrustRegionStrategyType(
+ std::string value, TrustRegionStrategyType* type);
CERES_EXPORT const char* DoglegTypeToString(DoglegType type);
CERES_EXPORT bool StringToDoglegType(std::string value, DoglegType* type);
@@ -487,41 +484,39 @@ CERES_EXPORT bool StringToMinimizerType(std::string value, MinimizerType* type);
CERES_EXPORT const char* LineSearchDirectionTypeToString(
LineSearchDirectionType type);
-CERES_EXPORT bool StringToLineSearchDirectionType(std::string value,
- LineSearchDirectionType* type);
+CERES_EXPORT bool StringToLineSearchDirectionType(
+ std::string value, LineSearchDirectionType* type);
CERES_EXPORT const char* LineSearchTypeToString(LineSearchType type);
-CERES_EXPORT bool StringToLineSearchType(std::string value, LineSearchType* type);
+CERES_EXPORT bool StringToLineSearchType(std::string value,
+ LineSearchType* type);
CERES_EXPORT const char* NonlinearConjugateGradientTypeToString(
NonlinearConjugateGradientType type);
CERES_EXPORT bool StringToNonlinearConjugateGradientType(
- std::string value,
- NonlinearConjugateGradientType* type);
+ std::string value, NonlinearConjugateGradientType* type);
CERES_EXPORT const char* LineSearchInterpolationTypeToString(
LineSearchInterpolationType type);
CERES_EXPORT bool StringToLineSearchInterpolationType(
- std::string value,
- LineSearchInterpolationType* type);
+ std::string value, LineSearchInterpolationType* type);
CERES_EXPORT const char* CovarianceAlgorithmTypeToString(
CovarianceAlgorithmType type);
CERES_EXPORT bool StringToCovarianceAlgorithmType(
- std::string value,
- CovarianceAlgorithmType* type);
+ std::string value, CovarianceAlgorithmType* type);
CERES_EXPORT const char* NumericDiffMethodTypeToString(
NumericDiffMethodType type);
-CERES_EXPORT bool StringToNumericDiffMethodType(
- std::string value,
- NumericDiffMethodType* type);
+CERES_EXPORT bool StringToNumericDiffMethodType(std::string value,
+ NumericDiffMethodType* type);
CERES_EXPORT const char* LoggingTypeToString(LoggingType type);
CERES_EXPORT bool StringtoLoggingType(std::string value, LoggingType* type);
CERES_EXPORT const char* DumpFormatTypeToString(DumpFormatType type);
-CERES_EXPORT bool StringtoDumpFormatType(std::string value, DumpFormatType* type);
+CERES_EXPORT bool StringtoDumpFormatType(std::string value,
+ DumpFormatType* type);
CERES_EXPORT bool StringtoDumpFormatType(std::string value, LoggingType* type);
CERES_EXPORT const char* TerminationTypeToString(TerminationType type);
diff --git a/extern/ceres/include/ceres/version.h b/extern/ceres/include/ceres/version.h
index 50aa2124e75..a76cc1099c5 100644
--- a/extern/ceres/include/ceres/version.h
+++ b/extern/ceres/include/ceres/version.h
@@ -41,8 +41,9 @@
#define CERES_TO_STRING(x) CERES_TO_STRING_HELPER(x)
// The Ceres version as a string; for example "1.9.0".
-#define CERES_VERSION_STRING CERES_TO_STRING(CERES_VERSION_MAJOR) "." \
- CERES_TO_STRING(CERES_VERSION_MINOR) "." \
- CERES_TO_STRING(CERES_VERSION_REVISION)
+#define CERES_VERSION_STRING \
+ CERES_TO_STRING(CERES_VERSION_MAJOR) \
+ "." CERES_TO_STRING(CERES_VERSION_MINOR) "." CERES_TO_STRING( \
+ CERES_VERSION_REVISION)
#endif // CERES_PUBLIC_VERSION_H_
diff --git a/extern/ceres/internal/ceres/accelerate_sparse.cc b/extern/ceres/internal/ceres/accelerate_sparse.cc
index eb04e7113d7..d2b642bf5dc 100644
--- a/extern/ceres/internal/ceres/accelerate_sparse.cc
+++ b/extern/ceres/internal/ceres/accelerate_sparse.cc
@@ -33,18 +33,19 @@
#ifndef CERES_NO_ACCELERATE_SPARSE
-#include "ceres/accelerate_sparse.h"
-
#include <algorithm>
#include <string>
#include <vector>
+#include "ceres/accelerate_sparse.h"
#include "ceres/compressed_col_sparse_matrix_utils.h"
#include "ceres/compressed_row_sparse_matrix.h"
#include "ceres/triplet_sparse_matrix.h"
#include "glog/logging.h"
-#define CASESTR(x) case x: return #x
+#define CASESTR(x) \
+ case x: \
+ return #x
namespace ceres {
namespace internal {
@@ -68,7 +69,7 @@ const char* SparseStatusToString(SparseStatus_t status) {
// aligned to kAccelerateRequiredAlignment and returns a pointer to the
// aligned start.
void* ResizeForAccelerateAlignment(const size_t required_size,
- std::vector<uint8_t> *workspace) {
+ std::vector<uint8_t>* workspace) {
// As per the Accelerate documentation, all workspace memory passed to the
// sparse solver functions must be 16-byte aligned.
constexpr int kAccelerateRequiredAlignment = 16;
@@ -80,29 +81,28 @@ void* ResizeForAccelerateAlignment(const size_t required_size,
size_t size_from_aligned_start = workspace->size();
void* aligned_solve_workspace_start =
reinterpret_cast<void*>(workspace->data());
- aligned_solve_workspace_start =
- std::align(kAccelerateRequiredAlignment,
- required_size,
- aligned_solve_workspace_start,
- size_from_aligned_start);
+ aligned_solve_workspace_start = std::align(kAccelerateRequiredAlignment,
+ required_size,
+ aligned_solve_workspace_start,
+ size_from_aligned_start);
CHECK(aligned_solve_workspace_start != nullptr)
<< "required_size: " << required_size
<< ", workspace size: " << workspace->size();
return aligned_solve_workspace_start;
}
-template<typename Scalar>
+template <typename Scalar>
void AccelerateSparse<Scalar>::Solve(NumericFactorization* numeric_factor,
DenseVector* rhs_and_solution) {
// From SparseSolve() documentation in Solve.h
- const int required_size =
- numeric_factor->solveWorkspaceRequiredStatic +
- numeric_factor->solveWorkspaceRequiredPerRHS;
- SparseSolve(*numeric_factor, *rhs_and_solution,
+ const int required_size = numeric_factor->solveWorkspaceRequiredStatic +
+ numeric_factor->solveWorkspaceRequiredPerRHS;
+ SparseSolve(*numeric_factor,
+ *rhs_and_solution,
ResizeForAccelerateAlignment(required_size, &solve_workspace_));
}
-template<typename Scalar>
+template <typename Scalar>
typename AccelerateSparse<Scalar>::ASSparseMatrix
AccelerateSparse<Scalar>::CreateSparseMatrixTransposeView(
CompressedRowSparseMatrix* A) {
@@ -112,7 +112,7 @@ AccelerateSparse<Scalar>::CreateSparseMatrixTransposeView(
//
// Accelerate's columnStarts is a long*, not an int*. These types might be
// different (e.g. ARM on iOS) so always make a copy.
- column_starts_.resize(A->num_rows() +1); // +1 for final column length.
+ column_starts_.resize(A->num_rows() + 1); // +1 for final column length.
std::copy_n(A->rows(), column_starts_.size(), &column_starts_[0]);
ASSparseMatrix At;
@@ -136,29 +136,31 @@ AccelerateSparse<Scalar>::CreateSparseMatrixTransposeView(
return At;
}
-template<typename Scalar>
+template <typename Scalar>
typename AccelerateSparse<Scalar>::SymbolicFactorization
AccelerateSparse<Scalar>::AnalyzeCholesky(ASSparseMatrix* A) {
return SparseFactor(SparseFactorizationCholesky, A->structure);
}
-template<typename Scalar>
+template <typename Scalar>
typename AccelerateSparse<Scalar>::NumericFactorization
AccelerateSparse<Scalar>::Cholesky(ASSparseMatrix* A,
SymbolicFactorization* symbolic_factor) {
return SparseFactor(*symbolic_factor, *A);
}
-template<typename Scalar>
+template <typename Scalar>
void AccelerateSparse<Scalar>::Cholesky(ASSparseMatrix* A,
NumericFactorization* numeric_factor) {
// From SparseRefactor() documentation in Solve.h
- const int required_size = std::is_same<Scalar, double>::value
- ? numeric_factor->symbolicFactorization.workspaceSize_Double
- : numeric_factor->symbolicFactorization.workspaceSize_Float;
- return SparseRefactor(*A, numeric_factor,
- ResizeForAccelerateAlignment(required_size,
- &factorization_workspace_));
+ const int required_size =
+ std::is_same<Scalar, double>::value
+ ? numeric_factor->symbolicFactorization.workspaceSize_Double
+ : numeric_factor->symbolicFactorization.workspaceSize_Float;
+ return SparseRefactor(
+ *A,
+ numeric_factor,
+ ResizeForAccelerateAlignment(required_size, &factorization_workspace_));
}
// Instantiate only for the specific template types required/supported s/t the
@@ -166,34 +168,33 @@ void AccelerateSparse<Scalar>::Cholesky(ASSparseMatrix* A,
template class AccelerateSparse<double>;
template class AccelerateSparse<float>;
-template<typename Scalar>
-std::unique_ptr<SparseCholesky>
-AppleAccelerateCholesky<Scalar>::Create(OrderingType ordering_type) {
+template <typename Scalar>
+std::unique_ptr<SparseCholesky> AppleAccelerateCholesky<Scalar>::Create(
+ OrderingType ordering_type) {
return std::unique_ptr<SparseCholesky>(
new AppleAccelerateCholesky<Scalar>(ordering_type));
}
-template<typename Scalar>
+template <typename Scalar>
AppleAccelerateCholesky<Scalar>::AppleAccelerateCholesky(
const OrderingType ordering_type)
: ordering_type_(ordering_type) {}
-template<typename Scalar>
+template <typename Scalar>
AppleAccelerateCholesky<Scalar>::~AppleAccelerateCholesky() {
FreeSymbolicFactorization();
FreeNumericFactorization();
}
-template<typename Scalar>
+template <typename Scalar>
CompressedRowSparseMatrix::StorageType
AppleAccelerateCholesky<Scalar>::StorageType() const {
return CompressedRowSparseMatrix::LOWER_TRIANGULAR;
}
-template<typename Scalar>
-LinearSolverTerminationType
-AppleAccelerateCholesky<Scalar>::Factorize(CompressedRowSparseMatrix* lhs,
- std::string* message) {
+template <typename Scalar>
+LinearSolverTerminationType AppleAccelerateCholesky<Scalar>::Factorize(
+ CompressedRowSparseMatrix* lhs, std::string* message) {
CHECK_EQ(lhs->storage_type(), StorageType());
if (lhs == NULL) {
*message = "Failure: Input lhs is NULL.";
@@ -234,11 +235,9 @@ AppleAccelerateCholesky<Scalar>::Factorize(CompressedRowSparseMatrix* lhs,
return LINEAR_SOLVER_SUCCESS;
}
-template<typename Scalar>
-LinearSolverTerminationType
-AppleAccelerateCholesky<Scalar>::Solve(const double* rhs,
- double* solution,
- std::string* message) {
+template <typename Scalar>
+LinearSolverTerminationType AppleAccelerateCholesky<Scalar>::Solve(
+ const double* rhs, double* solution, std::string* message) {
CHECK_EQ(numeric_factor_->status, SparseStatusOK)
<< "Solve called without a call to Factorize first ("
<< SparseStatusToString(numeric_factor_->status) << ").";
@@ -262,7 +261,7 @@ AppleAccelerateCholesky<Scalar>::Solve(const double* rhs,
return LINEAR_SOLVER_SUCCESS;
}
-template<typename Scalar>
+template <typename Scalar>
void AppleAccelerateCholesky<Scalar>::FreeSymbolicFactorization() {
if (symbolic_factor_) {
SparseCleanup(*symbolic_factor_);
@@ -270,7 +269,7 @@ void AppleAccelerateCholesky<Scalar>::FreeSymbolicFactorization() {
}
}
-template<typename Scalar>
+template <typename Scalar>
void AppleAccelerateCholesky<Scalar>::FreeNumericFactorization() {
if (numeric_factor_) {
SparseCleanup(*numeric_factor_);
@@ -283,7 +282,7 @@ void AppleAccelerateCholesky<Scalar>::FreeNumericFactorization() {
template class AppleAccelerateCholesky<double>;
template class AppleAccelerateCholesky<float>;
-}
-}
+} // namespace internal
+} // namespace ceres
#endif // CERES_NO_ACCELERATE_SPARSE
diff --git a/extern/ceres/internal/ceres/accelerate_sparse.h b/extern/ceres/internal/ceres/accelerate_sparse.h
index 43b4ea5fd70..e53758dfa15 100644
--- a/extern/ceres/internal/ceres/accelerate_sparse.h
+++ b/extern/ceres/internal/ceres/accelerate_sparse.h
@@ -40,9 +40,9 @@
#include <string>
#include <vector>
+#include "Accelerate.h"
#include "ceres/linear_solver.h"
#include "ceres/sparse_cholesky.h"
-#include "Accelerate.h"
namespace ceres {
namespace internal {
@@ -50,11 +50,10 @@ namespace internal {
class CompressedRowSparseMatrix;
class TripletSparseMatrix;
-template<typename Scalar>
-struct SparseTypesTrait {
-};
+template <typename Scalar>
+struct SparseTypesTrait {};
-template<>
+template <>
struct SparseTypesTrait<double> {
typedef DenseVector_Double DenseVector;
typedef SparseMatrix_Double SparseMatrix;
@@ -62,7 +61,7 @@ struct SparseTypesTrait<double> {
typedef SparseOpaqueFactorization_Double NumericFactorization;
};
-template<>
+template <>
struct SparseTypesTrait<float> {
typedef DenseVector_Float DenseVector;
typedef SparseMatrix_Float SparseMatrix;
@@ -70,14 +69,16 @@ struct SparseTypesTrait<float> {
typedef SparseOpaqueFactorization_Float NumericFactorization;
};
-template<typename Scalar>
+template <typename Scalar>
class AccelerateSparse {
public:
using DenseVector = typename SparseTypesTrait<Scalar>::DenseVector;
// Use ASSparseMatrix to avoid collision with ceres::internal::SparseMatrix.
using ASSparseMatrix = typename SparseTypesTrait<Scalar>::SparseMatrix;
- using SymbolicFactorization = typename SparseTypesTrait<Scalar>::SymbolicFactorization;
- using NumericFactorization = typename SparseTypesTrait<Scalar>::NumericFactorization;
+ using SymbolicFactorization =
+ typename SparseTypesTrait<Scalar>::SymbolicFactorization;
+ using NumericFactorization =
+ typename SparseTypesTrait<Scalar>::NumericFactorization;
// Solves a linear system given its symbolic (reference counted within
// NumericFactorization) and numeric factorization.
@@ -109,7 +110,7 @@ class AccelerateSparse {
// An implementation of SparseCholesky interface using Apple's Accelerate
// framework.
-template<typename Scalar>
+template <typename Scalar>
class AppleAccelerateCholesky : public SparseCholesky {
public:
// Factory
@@ -122,7 +123,7 @@ class AppleAccelerateCholesky : public SparseCholesky {
std::string* message) final;
LinearSolverTerminationType Solve(const double* rhs,
double* solution,
- std::string* message) final ;
+ std::string* message) final;
private:
AppleAccelerateCholesky(const OrderingType ordering_type);
@@ -132,15 +133,15 @@ class AppleAccelerateCholesky : public SparseCholesky {
const OrderingType ordering_type_;
AccelerateSparse<Scalar> as_;
std::unique_ptr<typename AccelerateSparse<Scalar>::SymbolicFactorization>
- symbolic_factor_;
+ symbolic_factor_;
std::unique_ptr<typename AccelerateSparse<Scalar>::NumericFactorization>
- numeric_factor_;
+ numeric_factor_;
// Copy of rhs/solution if Scalar != double (necessitating a copy).
Eigen::Matrix<Scalar, Eigen::Dynamic, 1> scalar_rhs_and_solution_;
};
-}
-}
+} // namespace internal
+} // namespace ceres
#endif // CERES_NO_ACCELERATE_SPARSE
diff --git a/extern/ceres/internal/ceres/array_utils.cc b/extern/ceres/internal/ceres/array_utils.cc
index 32459e6dcd9..6bffd840f4b 100644
--- a/extern/ceres/internal/ceres/array_utils.cc
+++ b/extern/ceres/internal/ceres/array_utils.cc
@@ -35,6 +35,7 @@
#include <cstddef>
#include <string>
#include <vector>
+
#include "ceres/stringprintf.h"
#include "ceres/types.h"
namespace ceres {
@@ -45,7 +46,7 @@ using std::string;
bool IsArrayValid(const int size, const double* x) {
if (x != NULL) {
for (int i = 0; i < size; ++i) {
- if (!std::isfinite(x[i]) || (x[i] == kImpossibleValue)) {
+ if (!std::isfinite(x[i]) || (x[i] == kImpossibleValue)) {
return false;
}
}
@@ -59,7 +60,7 @@ int FindInvalidValue(const int size, const double* x) {
}
for (int i = 0; i < size; ++i) {
- if (!std::isfinite(x[i]) || (x[i] == kImpossibleValue)) {
+ if (!std::isfinite(x[i]) || (x[i] == kImpossibleValue)) {
return i;
}
}
@@ -92,14 +93,13 @@ void AppendArrayToString(const int size, const double* x, string* result) {
void MapValuesToContiguousRange(const int size, int* array) {
std::vector<int> unique_values(array, array + size);
std::sort(unique_values.begin(), unique_values.end());
- unique_values.erase(std::unique(unique_values.begin(),
- unique_values.end()),
+ unique_values.erase(std::unique(unique_values.begin(), unique_values.end()),
unique_values.end());
for (int i = 0; i < size; ++i) {
- array[i] = std::lower_bound(unique_values.begin(),
- unique_values.end(),
- array[i]) - unique_values.begin();
+ array[i] =
+ std::lower_bound(unique_values.begin(), unique_values.end(), array[i]) -
+ unique_values.begin();
}
}
diff --git a/extern/ceres/internal/ceres/array_utils.h b/extern/ceres/internal/ceres/array_utils.h
index 1d55733b7b8..68feca5e792 100644
--- a/extern/ceres/internal/ceres/array_utils.h
+++ b/extern/ceres/internal/ceres/array_utils.h
@@ -44,6 +44,7 @@
#define CERES_INTERNAL_ARRAY_UTILS_H_
#include <string>
+
#include "ceres/internal/port.h"
namespace ceres {
@@ -51,20 +52,22 @@ namespace internal {
// Fill the array x with an impossible value that the user code is
// never expected to compute.
-void InvalidateArray(int size, double* x);
+CERES_EXPORT_INTERNAL void InvalidateArray(int size, double* x);
// Check if all the entries of the array x are valid, i.e. all the
// values in the array should be finite and none of them should be
// equal to the "impossible" value used by InvalidateArray.
-bool IsArrayValid(int size, const double* x);
+CERES_EXPORT_INTERNAL bool IsArrayValid(int size, const double* x);
// If the array contains an invalid value, return the index for it,
// otherwise return size.
-int FindInvalidValue(const int size, const double* x);
+CERES_EXPORT_INTERNAL int FindInvalidValue(const int size, const double* x);
// Utility routine to print an array of doubles to a string. If the
// array pointer is NULL, it is treated as an array of zeros.
-void AppendArrayToString(const int size, const double* x, std::string* result);
+CERES_EXPORT_INTERNAL void AppendArrayToString(const int size,
+ const double* x,
+ std::string* result);
// This routine takes an array of integer values, sorts and uniques
// them and then maps each value in the array to its position in the
@@ -79,7 +82,7 @@ void AppendArrayToString(const int size, const double* x, std::string* result);
// gets mapped to
//
// [1 0 2 3 0 1 3]
-void MapValuesToContiguousRange(int size, int* array);
+CERES_EXPORT_INTERNAL void MapValuesToContiguousRange(int size, int* array);
} // namespace internal
} // namespace ceres
diff --git a/extern/ceres/internal/ceres/blas.cc b/extern/ceres/internal/ceres/blas.cc
index 3ba63bbed5a..f8d006e3069 100644
--- a/extern/ceres/internal/ceres/blas.cc
+++ b/extern/ceres/internal/ceres/blas.cc
@@ -29,6 +29,7 @@
// Author: sameeragarwal@google.com (Sameer Agarwal)
#include "ceres/blas.h"
+
#include "ceres/internal/port.h"
#include "glog/logging.h"
diff --git a/extern/ceres/internal/ceres/block_evaluate_preparer.cc b/extern/ceres/internal/ceres/block_evaluate_preparer.cc
index 59c0d3ecc10..7db96d94e0a 100644
--- a/extern/ceres/internal/ceres/block_evaluate_preparer.cc
+++ b/extern/ceres/internal/ceres/block_evaluate_preparer.cc
@@ -31,6 +31,7 @@
#include "ceres/block_evaluate_preparer.h"
#include <vector>
+
#include "ceres/block_sparse_matrix.h"
#include "ceres/casts.h"
#include "ceres/parameter_block.h"
@@ -53,10 +54,8 @@ void BlockEvaluatePreparer::Prepare(const ResidualBlock* residual_block,
double** jacobians) {
// If the overall jacobian is not available, use the scratch space.
if (jacobian == NULL) {
- scratch_evaluate_preparer_.Prepare(residual_block,
- residual_block_index,
- jacobian,
- jacobians);
+ scratch_evaluate_preparer_.Prepare(
+ residual_block, residual_block_index, jacobian, jacobians);
return;
}
diff --git a/extern/ceres/internal/ceres/block_jacobi_preconditioner.cc b/extern/ceres/internal/ceres/block_jacobi_preconditioner.cc
index 772c7af2ba5..6f37aca553c 100644
--- a/extern/ceres/internal/ceres/block_jacobi_preconditioner.cc
+++ b/extern/ceres/internal/ceres/block_jacobi_preconditioner.cc
@@ -30,9 +30,9 @@
#include "ceres/block_jacobi_preconditioner.h"
+#include "ceres/block_random_access_diagonal_matrix.h"
#include "ceres/block_sparse_matrix.h"
#include "ceres/block_structure.h"
-#include "ceres/block_random_access_diagonal_matrix.h"
#include "ceres/casts.h"
#include "ceres/internal/eigen.h"
@@ -65,13 +65,11 @@ bool BlockJacobiPreconditioner::UpdateImpl(const BlockSparseMatrix& A,
const int col_block_size = bs->cols[block_id].size;
int r, c, row_stride, col_stride;
- CellInfo* cell_info = m_->GetCell(block_id, block_id,
- &r, &c,
- &row_stride, &col_stride);
+ CellInfo* cell_info =
+ m_->GetCell(block_id, block_id, &r, &c, &row_stride, &col_stride);
MatrixRef m(cell_info->values, row_stride, col_stride);
- ConstMatrixRef b(values + cells[j].position,
- row_block_size,
- col_block_size);
+ ConstMatrixRef b(
+ values + cells[j].position, row_block_size, col_block_size);
m.block(r, c, col_block_size, col_block_size) += b.transpose() * b;
}
}
@@ -82,9 +80,7 @@ bool BlockJacobiPreconditioner::UpdateImpl(const BlockSparseMatrix& A,
for (int i = 0; i < bs->cols.size(); ++i) {
const int block_size = bs->cols[i].size;
int r, c, row_stride, col_stride;
- CellInfo* cell_info = m_->GetCell(i, i,
- &r, &c,
- &row_stride, &col_stride);
+ CellInfo* cell_info = m_->GetCell(i, i, &r, &c, &row_stride, &col_stride);
MatrixRef m(cell_info->values, row_stride, col_stride);
m.block(r, c, block_size, block_size).diagonal() +=
ConstVectorRef(D + position, block_size).array().square().matrix();
diff --git a/extern/ceres/internal/ceres/block_jacobi_preconditioner.h b/extern/ceres/internal/ceres/block_jacobi_preconditioner.h
index 856b506e073..18f749533e0 100644
--- a/extern/ceres/internal/ceres/block_jacobi_preconditioner.h
+++ b/extern/ceres/internal/ceres/block_jacobi_preconditioner.h
@@ -32,7 +32,9 @@
#define CERES_INTERNAL_BLOCK_JACOBI_PRECONDITIONER_H_
#include <memory>
+
#include "ceres/block_random_access_diagonal_matrix.h"
+#include "ceres/internal/port.h"
#include "ceres/preconditioner.h"
namespace ceres {
@@ -51,7 +53,8 @@ struct CompressedRowBlockStructure;
// update the matrix by running Update(A, D). The values of the matrix A are
// inspected to construct the preconditioner. The vector D is applied as the
// D^TD diagonal term.
-class BlockJacobiPreconditioner : public BlockSparseMatrixPreconditioner {
+class CERES_EXPORT_INTERNAL BlockJacobiPreconditioner
+ : public BlockSparseMatrixPreconditioner {
public:
// A must remain valid while the BlockJacobiPreconditioner is.
explicit BlockJacobiPreconditioner(const BlockSparseMatrix& A);
diff --git a/extern/ceres/internal/ceres/block_jacobian_writer.cc b/extern/ceres/internal/ceres/block_jacobian_writer.cc
index 6998bd66e61..17c157b47f9 100644
--- a/extern/ceres/internal/ceres/block_jacobian_writer.cc
+++ b/extern/ceres/internal/ceres/block_jacobian_writer.cc
@@ -32,11 +32,11 @@
#include "ceres/block_evaluate_preparer.h"
#include "ceres/block_sparse_matrix.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/port.h"
#include "ceres/parameter_block.h"
#include "ceres/program.h"
#include "ceres/residual_block.h"
-#include "ceres/internal/eigen.h"
-#include "ceres/internal/port.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/block_jacobian_writer.h b/extern/ceres/internal/ceres/block_jacobian_writer.h
index c94a0d3f909..8054d7b33aa 100644
--- a/extern/ceres/internal/ceres/block_jacobian_writer.h
+++ b/extern/ceres/internal/ceres/block_jacobian_writer.h
@@ -39,6 +39,7 @@
#define CERES_INTERNAL_BLOCK_JACOBIAN_WRITER_H_
#include <vector>
+
#include "ceres/evaluator.h"
#include "ceres/internal/port.h"
@@ -52,8 +53,7 @@ class SparseMatrix;
// TODO(sameeragarwal): This class needs documemtation.
class BlockJacobianWriter {
public:
- BlockJacobianWriter(const Evaluator::Options& options,
- Program* program);
+ BlockJacobianWriter(const Evaluator::Options& options, Program* program);
// JacobianWriter interface.
diff --git a/extern/ceres/internal/ceres/block_random_access_dense_matrix.cc b/extern/ceres/internal/ceres/block_random_access_dense_matrix.cc
index f567aa5816b..386f81eae77 100644
--- a/extern/ceres/internal/ceres/block_random_access_dense_matrix.cc
+++ b/extern/ceres/internal/ceres/block_random_access_dense_matrix.cc
@@ -31,6 +31,7 @@
#include "ceres/block_random_access_dense_matrix.h"
#include <vector>
+
#include "ceres/internal/eigen.h"
#include "glog/logging.h"
@@ -59,8 +60,7 @@ BlockRandomAccessDenseMatrix::BlockRandomAccessDenseMatrix(
// Assume that the user does not hold any locks on any cell blocks
// when they are calling SetZero.
-BlockRandomAccessDenseMatrix::~BlockRandomAccessDenseMatrix() {
-}
+BlockRandomAccessDenseMatrix::~BlockRandomAccessDenseMatrix() {}
CellInfo* BlockRandomAccessDenseMatrix::GetCell(const int row_block_id,
const int col_block_id,
diff --git a/extern/ceres/internal/ceres/block_random_access_dense_matrix.h b/extern/ceres/internal/ceres/block_random_access_dense_matrix.h
index 8c5e2527ec1..9e555242994 100644
--- a/extern/ceres/internal/ceres/block_random_access_dense_matrix.h
+++ b/extern/ceres/internal/ceres/block_random_access_dense_matrix.h
@@ -31,11 +31,10 @@
#ifndef CERES_INTERNAL_BLOCK_RANDOM_ACCESS_DENSE_MATRIX_H_
#define CERES_INTERNAL_BLOCK_RANDOM_ACCESS_DENSE_MATRIX_H_
-#include "ceres/block_random_access_matrix.h"
-
#include <memory>
#include <vector>
+#include "ceres/block_random_access_matrix.h"
#include "ceres/internal/port.h"
namespace ceres {
@@ -51,7 +50,8 @@ namespace internal {
// pair.
//
// ReturnCell is a nop.
-class BlockRandomAccessDenseMatrix : public BlockRandomAccessMatrix {
+class CERES_EXPORT_INTERNAL BlockRandomAccessDenseMatrix
+ : public BlockRandomAccessMatrix {
public:
// blocks is a vector of block sizes. The resulting matrix has
// blocks.size() * blocks.size() cells.
diff --git a/extern/ceres/internal/ceres/block_random_access_diagonal_matrix.cc b/extern/ceres/internal/ceres/block_random_access_diagonal_matrix.cc
index 526d173e4b0..08f6d7f1750 100644
--- a/extern/ceres/internal/ceres/block_random_access_diagonal_matrix.cc
+++ b/extern/ceres/internal/ceres/block_random_access_diagonal_matrix.cc
@@ -63,9 +63,8 @@ BlockRandomAccessDiagonalMatrix::BlockRandomAccessDiagonalMatrix(
num_nonzeros += blocks_[i] * blocks_[i];
}
- VLOG(1) << "Matrix Size [" << num_cols
- << "," << num_cols
- << "] " << num_nonzeros;
+ VLOG(1) << "Matrix Size [" << num_cols << "," << num_cols << "] "
+ << num_nonzeros;
tsm_.reset(new TripletSparseMatrix(num_cols, num_cols, num_nonzeros));
tsm_->set_num_nonzeros(num_nonzeros);
@@ -116,8 +115,7 @@ CellInfo* BlockRandomAccessDiagonalMatrix::GetCell(int row_block_id,
// when they are calling SetZero.
void BlockRandomAccessDiagonalMatrix::SetZero() {
if (tsm_->num_nonzeros()) {
- VectorRef(tsm_->mutable_values(),
- tsm_->num_nonzeros()).setZero();
+ VectorRef(tsm_->mutable_values(), tsm_->num_nonzeros()).setZero();
}
}
@@ -126,11 +124,8 @@ void BlockRandomAccessDiagonalMatrix::Invert() {
for (int i = 0; i < blocks_.size(); ++i) {
const int block_size = blocks_[i];
MatrixRef block(values, block_size, block_size);
- block =
- block
- .selfadjointView<Eigen::Upper>()
- .llt()
- .solve(Matrix::Identity(block_size, block_size));
+ block = block.selfadjointView<Eigen::Upper>().llt().solve(
+ Matrix::Identity(block_size, block_size));
values += block_size * block_size;
}
}
diff --git a/extern/ceres/internal/ceres/block_random_access_diagonal_matrix.h b/extern/ceres/internal/ceres/block_random_access_diagonal_matrix.h
index 3bda7d19074..3fe7c1e5b22 100644
--- a/extern/ceres/internal/ceres/block_random_access_diagonal_matrix.h
+++ b/extern/ceres/internal/ceres/block_random_access_diagonal_matrix.h
@@ -46,11 +46,13 @@ namespace internal {
// A thread safe block diagonal matrix implementation of
// BlockRandomAccessMatrix.
-class BlockRandomAccessDiagonalMatrix : public BlockRandomAccessMatrix {
+class CERES_EXPORT_INTERNAL BlockRandomAccessDiagonalMatrix
+ : public BlockRandomAccessMatrix {
public:
// blocks is an array of block sizes.
explicit BlockRandomAccessDiagonalMatrix(const std::vector<int>& blocks);
- BlockRandomAccessDiagonalMatrix(const BlockRandomAccessDiagonalMatrix&) = delete;
+ BlockRandomAccessDiagonalMatrix(const BlockRandomAccessDiagonalMatrix&) =
+ delete;
void operator=(const BlockRandomAccessDiagonalMatrix&) = delete;
// The destructor is not thread safe. It assumes that no one is
diff --git a/extern/ceres/internal/ceres/block_random_access_matrix.cc b/extern/ceres/internal/ceres/block_random_access_matrix.cc
index 347d765bbca..ea88855b59a 100644
--- a/extern/ceres/internal/ceres/block_random_access_matrix.cc
+++ b/extern/ceres/internal/ceres/block_random_access_matrix.cc
@@ -33,8 +33,7 @@
namespace ceres {
namespace internal {
-BlockRandomAccessMatrix::~BlockRandomAccessMatrix() {
-}
+BlockRandomAccessMatrix::~BlockRandomAccessMatrix() {}
} // namespace internal
} // namespace ceres
diff --git a/extern/ceres/internal/ceres/block_random_access_matrix.h b/extern/ceres/internal/ceres/block_random_access_matrix.h
index 6fcf0dc8a7c..f190622eafe 100644
--- a/extern/ceres/internal/ceres/block_random_access_matrix.h
+++ b/extern/ceres/internal/ceres/block_random_access_matrix.h
@@ -35,6 +35,8 @@
#include <mutex>
+#include "ceres/internal/port.h"
+
namespace ceres {
namespace internal {
@@ -91,7 +93,7 @@ struct CellInfo {
std::mutex m;
};
-class BlockRandomAccessMatrix {
+class CERES_EXPORT_INTERNAL BlockRandomAccessMatrix {
public:
virtual ~BlockRandomAccessMatrix();
diff --git a/extern/ceres/internal/ceres/block_random_access_sparse_matrix.cc b/extern/ceres/internal/ceres/block_random_access_sparse_matrix.cc
index 9c164546635..c28b7cef3f4 100644
--- a/extern/ceres/internal/ceres/block_random_access_sparse_matrix.cc
+++ b/extern/ceres/internal/ceres/block_random_access_sparse_matrix.cc
@@ -50,10 +50,8 @@ using std::set;
using std::vector;
BlockRandomAccessSparseMatrix::BlockRandomAccessSparseMatrix(
- const vector<int>& blocks,
- const set<pair<int, int>>& block_pairs)
- : kMaxRowBlocks(10 * 1000 * 1000),
- blocks_(blocks) {
+ const vector<int>& blocks, const set<pair<int, int>>& block_pairs)
+ : kMaxRowBlocks(10 * 1000 * 1000), blocks_(blocks) {
CHECK_LT(blocks.size(), kMaxRowBlocks);
// Build the row/column layout vector and count the number of scalar
@@ -75,9 +73,8 @@ BlockRandomAccessSparseMatrix::BlockRandomAccessSparseMatrix(
num_nonzeros += row_block_size * col_block_size;
}
- VLOG(1) << "Matrix Size [" << num_cols
- << "," << num_cols
- << "] " << num_nonzeros;
+ VLOG(1) << "Matrix Size [" << num_cols << "," << num_cols << "] "
+ << num_nonzeros;
tsm_.reset(new TripletSparseMatrix(num_cols, num_cols, num_nonzeros));
tsm_->set_num_nonzeros(num_nonzeros);
@@ -105,11 +102,11 @@ BlockRandomAccessSparseMatrix::BlockRandomAccessSparseMatrix(
layout_[IntPairToLong(row_block_id, col_block_id)]->values - values;
for (int r = 0; r < row_block_size; ++r) {
for (int c = 0; c < col_block_size; ++c, ++pos) {
- rows[pos] = block_positions_[row_block_id] + r;
- cols[pos] = block_positions_[col_block_id] + c;
- values[pos] = 1.0;
- DCHECK_LT(rows[pos], tsm_->num_rows());
- DCHECK_LT(cols[pos], tsm_->num_rows());
+ rows[pos] = block_positions_[row_block_id] + r;
+ cols[pos] = block_positions_[col_block_id] + c;
+ values[pos] = 1.0;
+ DCHECK_LT(rows[pos], tsm_->num_rows());
+ DCHECK_LT(cols[pos], tsm_->num_rows());
}
}
}
@@ -129,7 +126,7 @@ CellInfo* BlockRandomAccessSparseMatrix::GetCell(int row_block_id,
int* col,
int* row_stride,
int* col_stride) {
- const LayoutType::iterator it =
+ const LayoutType::iterator it =
layout_.find(IntPairToLong(row_block_id, col_block_id));
if (it == layout_.end()) {
return NULL;
@@ -147,8 +144,7 @@ CellInfo* BlockRandomAccessSparseMatrix::GetCell(int row_block_id,
// when they are calling SetZero.
void BlockRandomAccessSparseMatrix::SetZero() {
if (tsm_->num_nonzeros()) {
- VectorRef(tsm_->mutable_values(),
- tsm_->num_nonzeros()).setZero();
+ VectorRef(tsm_->mutable_values(), tsm_->num_nonzeros()).setZero();
}
}
@@ -164,7 +160,9 @@ void BlockRandomAccessSparseMatrix::SymmetricRightMultiply(const double* x,
const int col_block_pos = block_positions_[col];
MatrixVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
- cell_position_and_data.second, row_block_size, col_block_size,
+ cell_position_and_data.second,
+ row_block_size,
+ col_block_size,
x + col_block_pos,
y + row_block_pos);
@@ -174,7 +172,9 @@ void BlockRandomAccessSparseMatrix::SymmetricRightMultiply(const double* x,
// triangular multiply also.
if (row != col) {
MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
- cell_position_and_data.second, row_block_size, col_block_size,
+ cell_position_and_data.second,
+ row_block_size,
+ col_block_size,
x + row_block_pos,
y + col_block_pos);
}
diff --git a/extern/ceres/internal/ceres/block_random_access_sparse_matrix.h b/extern/ceres/internal/ceres/block_random_access_sparse_matrix.h
index d542a3d64e3..0e58bbb6b42 100644
--- a/extern/ceres/internal/ceres/block_random_access_sparse_matrix.h
+++ b/extern/ceres/internal/ceres/block_random_access_sparse_matrix.h
@@ -39,10 +39,10 @@
#include <vector>
#include "ceres/block_random_access_matrix.h"
-#include "ceres/triplet_sparse_matrix.h"
#include "ceres/internal/port.h"
-#include "ceres/types.h"
#include "ceres/small_blas.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/types.h"
namespace ceres {
namespace internal {
@@ -51,7 +51,8 @@ namespace internal {
// BlockRandomAccessMatrix. Internally a TripletSparseMatrix is used
// for doing the actual storage. This class augments this matrix with
// an unordered_map that allows random read/write access.
-class BlockRandomAccessSparseMatrix : public BlockRandomAccessMatrix {
+class CERES_EXPORT_INTERNAL BlockRandomAccessSparseMatrix
+ : public BlockRandomAccessMatrix {
public:
// blocks is an array of block sizes. block_pairs is a set of
// <row_block_id, col_block_id> pairs to identify the non-zero cells
@@ -110,7 +111,7 @@ class BlockRandomAccessSparseMatrix : public BlockRandomAccessMatrix {
// A mapping from <row_block_id, col_block_id> to the position in
// the values array of tsm_ where the block is stored.
- typedef std::unordered_map<long int, CellInfo* > LayoutType;
+ typedef std::unordered_map<long int, CellInfo*> LayoutType;
LayoutType layout_;
// In order traversal of contents of the matrix. This allows us to
diff --git a/extern/ceres/internal/ceres/block_sparse_matrix.cc b/extern/ceres/internal/ceres/block_sparse_matrix.cc
index 8f50f3561e2..5efd2e1ecfb 100644
--- a/extern/ceres/internal/ceres/block_sparse_matrix.cc
+++ b/extern/ceres/internal/ceres/block_sparse_matrix.cc
@@ -30,9 +30,10 @@
#include "ceres/block_sparse_matrix.h"
-#include <cstddef>
#include <algorithm>
+#include <cstddef>
#include <vector>
+
#include "ceres/block_structure.h"
#include "ceres/internal/eigen.h"
#include "ceres/random.h"
@@ -77,8 +78,8 @@ BlockSparseMatrix::BlockSparseMatrix(
CHECK_GE(num_rows_, 0);
CHECK_GE(num_cols_, 0);
CHECK_GE(num_nonzeros_, 0);
- VLOG(2) << "Allocating values array with "
- << num_nonzeros_ * sizeof(double) << " bytes."; // NOLINT
+ VLOG(2) << "Allocating values array with " << num_nonzeros_ * sizeof(double)
+ << " bytes."; // NOLINT
values_.reset(new double[num_nonzeros_]);
max_num_nonzeros_ = num_nonzeros_;
CHECK(values_ != nullptr);
@@ -88,7 +89,7 @@ void BlockSparseMatrix::SetZero() {
std::fill(values_.get(), values_.get() + num_nonzeros_, 0.0);
}
-void BlockSparseMatrix::RightMultiply(const double* x, double* y) const {
+void BlockSparseMatrix::RightMultiply(const double* x, double* y) const {
CHECK(x != nullptr);
CHECK(y != nullptr);
@@ -101,7 +102,9 @@ void BlockSparseMatrix::RightMultiply(const double* x, double* y) const {
int col_block_size = block_structure_->cols[col_block_id].size;
int col_block_pos = block_structure_->cols[col_block_id].position;
MatrixVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
- values_.get() + cells[j].position, row_block_size, col_block_size,
+ values_.get() + cells[j].position,
+ row_block_size,
+ col_block_size,
x + col_block_pos,
y + row_block_pos);
}
@@ -121,7 +124,9 @@ void BlockSparseMatrix::LeftMultiply(const double* x, double* y) const {
int col_block_size = block_structure_->cols[col_block_id].size;
int col_block_pos = block_structure_->cols[col_block_id].position;
MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
- values_.get() + cells[j].position, row_block_size, col_block_size,
+ values_.get() + cells[j].position,
+ row_block_size,
+ col_block_size,
x + row_block_pos,
y + col_block_pos);
}
@@ -138,8 +143,8 @@ void BlockSparseMatrix::SquaredColumnNorm(double* x) const {
int col_block_id = cells[j].block_id;
int col_block_size = block_structure_->cols[col_block_id].size;
int col_block_pos = block_structure_->cols[col_block_id].position;
- const MatrixRef m(values_.get() + cells[j].position,
- row_block_size, col_block_size);
+ const MatrixRef m(
+ values_.get() + cells[j].position, row_block_size, col_block_size);
VectorRef(x + col_block_pos, col_block_size) += m.colwise().squaredNorm();
}
}
@@ -155,8 +160,8 @@ void BlockSparseMatrix::ScaleColumns(const double* scale) {
int col_block_id = cells[j].block_id;
int col_block_size = block_structure_->cols[col_block_id].size;
int col_block_pos = block_structure_->cols[col_block_id].position;
- MatrixRef m(values_.get() + cells[j].position,
- row_block_size, col_block_size);
+ MatrixRef m(
+ values_.get() + cells[j].position, row_block_size, col_block_size);
m *= ConstVectorRef(scale + col_block_pos, col_block_size).asDiagonal();
}
}
@@ -178,8 +183,8 @@ void BlockSparseMatrix::ToDenseMatrix(Matrix* dense_matrix) const {
int col_block_size = block_structure_->cols[col_block_id].size;
int col_block_pos = block_structure_->cols[col_block_id].position;
int jac_pos = cells[j].position;
- m.block(row_block_pos, col_block_pos, row_block_size, col_block_size)
- += MatrixRef(values_.get() + jac_pos, row_block_size, col_block_size);
+ m.block(row_block_pos, col_block_pos, row_block_size, col_block_size) +=
+ MatrixRef(values_.get() + jac_pos, row_block_size, col_block_size);
}
}
}
@@ -201,7 +206,7 @@ void BlockSparseMatrix::ToTripletSparseMatrix(
int col_block_size = block_structure_->cols[col_block_id].size;
int col_block_pos = block_structure_->cols[col_block_id].position;
int jac_pos = cells[j].position;
- for (int r = 0; r < row_block_size; ++r) {
+ for (int r = 0; r < row_block_size; ++r) {
for (int c = 0; c < col_block_size; ++c, ++jac_pos) {
matrix->mutable_rows()[jac_pos] = row_block_pos + r;
matrix->mutable_cols()[jac_pos] = col_block_pos + c;
@@ -215,8 +220,7 @@ void BlockSparseMatrix::ToTripletSparseMatrix(
// Return a pointer to the block structure. We continue to hold
// ownership of the object though.
-const CompressedRowBlockStructure* BlockSparseMatrix::block_structure()
- const {
+const CompressedRowBlockStructure* BlockSparseMatrix::block_structure() const {
return block_structure_.get();
}
@@ -233,7 +237,8 @@ void BlockSparseMatrix::ToTextFile(FILE* file) const {
int jac_pos = cells[j].position;
for (int r = 0; r < row_block_size; ++r) {
for (int c = 0; c < col_block_size; ++c) {
- fprintf(file, "% 10d % 10d %17f\n",
+ fprintf(file,
+ "% 10d % 10d %17f\n",
row_block_pos + r,
col_block_pos + c,
values_[jac_pos++]);
@@ -369,7 +374,6 @@ BlockSparseMatrix* BlockSparseMatrix::CreateRandomMatrix(
int row_block_position = 0;
int value_position = 0;
for (int r = 0; r < options.num_row_blocks; ++r) {
-
const int delta_block_size =
Uniform(options.max_row_block_size - options.min_row_block_size);
const int row_block_size = options.min_row_block_size + delta_block_size;
diff --git a/extern/ceres/internal/ceres/block_sparse_matrix.h b/extern/ceres/internal/ceres/block_sparse_matrix.h
index d0c255de612..e5b3634c3cc 100644
--- a/extern/ceres/internal/ceres/block_sparse_matrix.h
+++ b/extern/ceres/internal/ceres/block_sparse_matrix.h
@@ -35,9 +35,11 @@
#define CERES_INTERNAL_BLOCK_SPARSE_MATRIX_H_
#include <memory>
+
#include "ceres/block_structure.h"
-#include "ceres/sparse_matrix.h"
#include "ceres/internal/eigen.h"
+#include "ceres/internal/port.h"
+#include "ceres/sparse_matrix.h"
namespace ceres {
namespace internal {
@@ -52,7 +54,7 @@ class TripletSparseMatrix;
//
// internal/ceres/block_structure.h
//
-class BlockSparseMatrix : public SparseMatrix {
+class CERES_EXPORT_INTERNAL BlockSparseMatrix : public SparseMatrix {
public:
// Construct a block sparse matrix with a fully initialized
// CompressedRowBlockStructure objected. The matrix takes over
@@ -77,11 +79,13 @@ class BlockSparseMatrix : public SparseMatrix {
void ToDenseMatrix(Matrix* dense_matrix) const final;
void ToTextFile(FILE* file) const final;
+ // clang-format off
int num_rows() const final { return num_rows_; }
int num_cols() const final { return num_cols_; }
int num_nonzeros() const final { return num_nonzeros_; }
const double* values() const final { return values_.get(); }
double* mutable_values() final { return values_.get(); }
+ // clang-format on
void ToTripletSparseMatrix(TripletSparseMatrix* matrix) const;
const CompressedRowBlockStructure* block_structure() const;
@@ -94,8 +98,7 @@ class BlockSparseMatrix : public SparseMatrix {
void DeleteRowBlocks(int delta_row_blocks);
static BlockSparseMatrix* CreateDiagonalMatrix(
- const double* diagonal,
- const std::vector<Block>& column_blocks);
+ const double* diagonal, const std::vector<Block>& column_blocks);
struct RandomMatrixOptions {
int num_row_blocks = 0;
diff --git a/extern/ceres/internal/ceres/block_structure.cc b/extern/ceres/internal/ceres/block_structure.cc
index 6479b60f700..39ba0826dab 100644
--- a/extern/ceres/internal/ceres/block_structure.cc
+++ b/extern/ceres/internal/ceres/block_structure.cc
@@ -35,7 +35,7 @@ namespace internal {
bool CellLessThan(const Cell& lhs, const Cell& rhs) {
if (lhs.block_id == rhs.block_id) {
- return (lhs.position < rhs.position);
+ return (lhs.position < rhs.position);
}
return (lhs.block_id < rhs.block_id);
}
diff --git a/extern/ceres/internal/ceres/block_structure.h b/extern/ceres/internal/ceres/block_structure.h
index b5218c0c2cc..d49d7d3f3a4 100644
--- a/extern/ceres/internal/ceres/block_structure.h
+++ b/extern/ceres/internal/ceres/block_structure.h
@@ -40,6 +40,7 @@
#include <cstdint>
#include <vector>
+
#include "ceres/internal/port.h"
namespace ceres {
diff --git a/extern/ceres/internal/ceres/c_api.cc b/extern/ceres/internal/ceres/c_api.cc
index 2244909131a..251cde42101 100644
--- a/extern/ceres/internal/ceres/c_api.cc
+++ b/extern/ceres/internal/ceres/c_api.cc
@@ -34,9 +34,10 @@
#include "ceres/c_api.h"
-#include <vector>
#include <iostream>
#include <string>
+#include <vector>
+
#include "ceres/cost_function.h"
#include "ceres/loss_function.h"
#include "ceres/problem.h"
@@ -70,8 +71,7 @@ class CallbackCostFunction : public ceres::CostFunction {
int num_residuals,
int num_parameter_blocks,
int* parameter_block_sizes)
- : cost_function_(cost_function),
- user_data_(user_data) {
+ : cost_function_(cost_function), user_data_(user_data) {
set_num_residuals(num_residuals);
for (int i = 0; i < num_parameter_blocks; ++i) {
mutable_parameter_block_sizes()->push_back(parameter_block_sizes[i]);
@@ -81,12 +81,10 @@ class CallbackCostFunction : public ceres::CostFunction {
virtual ~CallbackCostFunction() {}
bool Evaluate(double const* const* parameters,
- double* residuals,
- double** jacobians) const final {
- return (*cost_function_)(user_data_,
- const_cast<double**>(parameters),
- residuals,
- jacobians);
+ double* residuals,
+ double** jacobians) const final {
+ return (*cost_function_)(
+ user_data_, const_cast<double**>(parameters), residuals, jacobians);
}
private:
@@ -100,7 +98,7 @@ class CallbackLossFunction : public ceres::LossFunction {
public:
explicit CallbackLossFunction(ceres_loss_function_t loss_function,
void* user_data)
- : loss_function_(loss_function), user_data_(user_data) {}
+ : loss_function_(loss_function), user_data_(user_data) {}
void Evaluate(double sq_norm, double* rho) const final {
(*loss_function_)(user_data_, sq_norm, rho);
}
@@ -134,8 +132,8 @@ void ceres_free_stock_loss_function_data(void* loss_function_data) {
void ceres_stock_loss_function(void* user_data,
double squared_norm,
double out[3]) {
- reinterpret_cast<ceres::LossFunction*>(user_data)
- ->Evaluate(squared_norm, out);
+ reinterpret_cast<ceres::LossFunction*>(user_data)->Evaluate(squared_norm,
+ out);
}
ceres_residual_block_id_t* ceres_problem_add_residual_block(
@@ -159,16 +157,15 @@ ceres_residual_block_id_t* ceres_problem_add_residual_block(
ceres::LossFunction* callback_loss_function = NULL;
if (loss_function != NULL) {
- callback_loss_function = new CallbackLossFunction(loss_function,
- loss_function_data);
+ callback_loss_function =
+ new CallbackLossFunction(loss_function, loss_function_data);
}
std::vector<double*> parameter_blocks(parameters,
parameters + num_parameter_blocks);
return reinterpret_cast<ceres_residual_block_id_t*>(
- ceres_problem->AddResidualBlock(callback_cost_function,
- callback_loss_function,
- parameter_blocks));
+ ceres_problem->AddResidualBlock(
+ callback_cost_function, callback_loss_function, parameter_blocks));
}
void ceres_solve(ceres_problem_t* c_problem) {
diff --git a/extern/ceres/internal/ceres/callbacks.cc b/extern/ceres/internal/ceres/callbacks.cc
index 84576e40e7d..0e0df9d91b1 100644
--- a/extern/ceres/internal/ceres/callbacks.cc
+++ b/extern/ceres/internal/ceres/callbacks.cc
@@ -28,8 +28,10 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
-#include <iostream> // NO LINT
#include "ceres/callbacks.h"
+
+#include <iostream> // NO LINT
+
#include "ceres/program.h"
#include "ceres/stringprintf.h"
#include "glog/logging.h"
@@ -76,8 +78,7 @@ CallbackReturnType GradientProblemSolverStateUpdatingCallback::operator()(
LoggingCallback::LoggingCallback(const MinimizerType minimizer_type,
const bool log_to_stdout)
- : minimizer_type(minimizer_type),
- log_to_stdout_(log_to_stdout) {}
+ : minimizer_type(minimizer_type), log_to_stdout_(log_to_stdout) {}
LoggingCallback::~LoggingCallback() {}
@@ -99,11 +100,13 @@ CallbackReturnType LoggingCallback::operator()(
summary.iteration_time_in_seconds,
summary.cumulative_time_in_seconds);
} else if (minimizer_type == TRUST_REGION) {
+ // clang-format off
if (summary.iteration == 0) {
output = "iter cost cost_change |gradient| |step| tr_ratio tr_radius ls_iter iter_time total_time\n"; // NOLINT
}
const char* kReportRowFormat =
"% 4d % 8e % 3.2e % 3.2e % 3.2e % 3.2e % 3.2e % 4d % 3.2e % 3.2e"; // NOLINT
+ // clang-format on
output += StringPrintf(kReportRowFormat,
summary.iteration,
summary.cost,
diff --git a/extern/ceres/internal/ceres/callbacks.h b/extern/ceres/internal/ceres/callbacks.h
index d68bf7f6301..47112b88fd8 100644
--- a/extern/ceres/internal/ceres/callbacks.h
+++ b/extern/ceres/internal/ceres/callbacks.h
@@ -32,8 +32,9 @@
#define CERES_INTERNAL_CALLBACKS_H_
#include <string>
-#include "ceres/iteration_callback.h"
+
#include "ceres/internal/port.h"
+#include "ceres/iteration_callback.h"
namespace ceres {
namespace internal {
@@ -47,6 +48,7 @@ class StateUpdatingCallback : public IterationCallback {
StateUpdatingCallback(Program* program, double* parameters);
virtual ~StateUpdatingCallback();
CallbackReturnType operator()(const IterationSummary& summary) final;
+
private:
Program* program_;
double* parameters_;
@@ -61,6 +63,7 @@ class GradientProblemSolverStateUpdatingCallback : public IterationCallback {
double* user_parameters);
virtual ~GradientProblemSolverStateUpdatingCallback();
CallbackReturnType operator()(const IterationSummary& summary) final;
+
private:
int num_parameters_;
const double* internal_parameters_;
diff --git a/extern/ceres/internal/ceres/canonical_views_clustering.cc b/extern/ceres/internal/ceres/canonical_views_clustering.cc
index e927e1f91be..c193735f106 100644
--- a/extern/ceres/internal/ceres/canonical_views_clustering.cc
+++ b/extern/ceres/internal/ceres/canonical_views_clustering.cc
@@ -31,8 +31,8 @@
#include "ceres/canonical_views_clustering.h"
-#include <unordered_set>
#include <unordered_map>
+#include <unordered_set>
#include "ceres/graph.h"
#include "ceres/map_util.h"
@@ -126,8 +126,7 @@ void CanonicalViewsClustering::ComputeClustering(
// Add canonical view if quality improves, or if minimum is not
// yet met, otherwise break.
- if ((best_difference <= 0) &&
- (centers->size() >= options_.min_views)) {
+ if ((best_difference <= 0) && (centers->size() >= options_.min_views)) {
break;
}
@@ -141,8 +140,7 @@ void CanonicalViewsClustering::ComputeClustering(
// Return the set of vertices of the graph which have valid vertex
// weights.
-void CanonicalViewsClustering::FindValidViews(
- IntSet* valid_views) const {
+void CanonicalViewsClustering::FindValidViews(IntSet* valid_views) const {
const IntSet& views = graph_->vertices();
for (const auto& view : views) {
if (graph_->VertexWeight(view) != WeightedGraph<int>::InvalidWeight()) {
@@ -154,8 +152,7 @@ void CanonicalViewsClustering::FindValidViews(
// Computes the difference in the quality score if 'candidate' were
// added to the set of canonical views.
double CanonicalViewsClustering::ComputeClusteringQualityDifference(
- const int candidate,
- const vector<int>& centers) const {
+ const int candidate, const vector<int>& centers) const {
// View score.
double difference =
options_.view_score_weight * graph_->VertexWeight(candidate);
@@ -179,7 +176,7 @@ double CanonicalViewsClustering::ComputeClusteringQualityDifference(
// Orthogonality.
for (int i = 0; i < centers.size(); ++i) {
difference -= options_.similarity_penalty_weight *
- graph_->EdgeWeight(centers[i], candidate);
+ graph_->EdgeWeight(centers[i], candidate);
}
return difference;
@@ -192,8 +189,7 @@ void CanonicalViewsClustering::UpdateCanonicalViewAssignments(
for (const auto& neighbor : neighbors) {
const double old_similarity =
FindWithDefault(view_to_canonical_view_similarity_, neighbor, 0.0);
- const double new_similarity =
- graph_->EdgeWeight(neighbor, canonical_view);
+ const double new_similarity = graph_->EdgeWeight(neighbor, canonical_view);
if (new_similarity > old_similarity) {
view_to_canonical_view_[neighbor] = canonical_view;
view_to_canonical_view_similarity_[neighbor] = new_similarity;
@@ -203,8 +199,7 @@ void CanonicalViewsClustering::UpdateCanonicalViewAssignments(
// Assign a cluster id to each view.
void CanonicalViewsClustering::ComputeClusterMembership(
- const vector<int>& centers,
- IntMap* membership) const {
+ const vector<int>& centers, IntMap* membership) const {
CHECK(membership != nullptr);
membership->clear();
diff --git a/extern/ceres/internal/ceres/canonical_views_clustering.h b/extern/ceres/internal/ceres/canonical_views_clustering.h
index 630adfed717..465233ddfcd 100644
--- a/extern/ceres/internal/ceres/canonical_views_clustering.h
+++ b/extern/ceres/internal/ceres/canonical_views_clustering.h
@@ -45,6 +45,7 @@
#include <vector>
#include "ceres/graph.h"
+#include "ceres/internal/port.h"
namespace ceres {
namespace internal {
@@ -94,13 +95,13 @@ struct CanonicalViewsClusteringOptions;
// It is possible depending on the configuration of the clustering
// algorithm that some of the vertices may not be assigned to any
// cluster. In this case they are assigned to a cluster with id = -1;
-void ComputeCanonicalViewsClustering(
+CERES_EXPORT_INTERNAL void ComputeCanonicalViewsClustering(
const CanonicalViewsClusteringOptions& options,
const WeightedGraph<int>& graph,
std::vector<int>* centers,
std::unordered_map<int, int>* membership);
-struct CanonicalViewsClusteringOptions {
+struct CERES_EXPORT_INTERNAL CanonicalViewsClusteringOptions {
// The minimum number of canonical views to compute.
int min_views = 3;
diff --git a/extern/ceres/internal/ceres/casts.h b/extern/ceres/internal/ceres/casts.h
index f18fdea2d86..d13707131c2 100644
--- a/extern/ceres/internal/ceres/casts.h
+++ b/extern/ceres/internal/ceres/casts.h
@@ -56,15 +56,15 @@ struct identity_ {
//
// base::identity_ is used to make a non-deduced context, which
// forces all callers to explicitly specify the template argument.
-template<typename To>
+template <typename To>
inline To implicit_cast(typename identity_<To>::type to) {
return to;
}
// This version of implicit_cast is used when two template arguments
// are specified. It's obsolete and should not be used.
-template<typename To, typename From>
-inline To implicit_cast(typename identity_<From>::type const &f) {
+template <typename To, typename From>
+inline To implicit_cast(typename identity_<From>::type const& f) {
return f;
}
@@ -86,8 +86,8 @@ inline To implicit_cast(typename identity_<From>::type const &f) {
// if (dynamic_cast<Subclass2>(foo)) HandleASubclass2Object(foo);
// You should design the code some other way not to need this.
-template<typename To, typename From> // use like this: down_cast<T*>(foo);
-inline To down_cast(From* f) { // so we only accept pointers
+template <typename To, typename From> // use like this: down_cast<T*>(foo);
+inline To down_cast(From* f) { // so we only accept pointers
// Ensures that To is a sub-type of From *. This test is here only
// for compile-time type checking, and has no overhead in an
// optimized build at run-time, as it will be optimized away
diff --git a/extern/ceres/internal/ceres/cgnr_linear_operator.h b/extern/ceres/internal/ceres/cgnr_linear_operator.h
index 8e8febcc934..beb8bbc2c2a 100644
--- a/extern/ceres/internal/ceres/cgnr_linear_operator.h
+++ b/extern/ceres/internal/ceres/cgnr_linear_operator.h
@@ -33,8 +33,9 @@
#include <algorithm>
#include <memory>
-#include "ceres/linear_operator.h"
+
#include "ceres/internal/eigen.h"
+#include "ceres/linear_operator.h"
namespace ceres {
namespace internal {
@@ -79,9 +80,8 @@ class SparseMatrix;
// Note: This class is not thread safe, since it uses some temporary storage.
class CgnrLinearOperator : public LinearOperator {
public:
- CgnrLinearOperator(const LinearOperator& A, const double *D)
- : A_(A), D_(D), z_(new double[A.num_rows()]) {
- }
+ CgnrLinearOperator(const LinearOperator& A, const double* D)
+ : A_(A), D_(D), z_(new double[A.num_rows()]) {}
virtual ~CgnrLinearOperator() {}
void RightMultiply(const double* x, double* y) const final {
@@ -96,8 +96,8 @@ class CgnrLinearOperator : public LinearOperator {
// y = y + DtDx
if (D_ != NULL) {
int n = A_.num_cols();
- VectorRef(y, n).array() += ConstVectorRef(D_, n).array().square() *
- ConstVectorRef(x, n).array();
+ VectorRef(y, n).array() +=
+ ConstVectorRef(D_, n).array().square() * ConstVectorRef(x, n).array();
}
}
diff --git a/extern/ceres/internal/ceres/cgnr_solver.h b/extern/ceres/internal/ceres/cgnr_solver.h
index 52927333daa..bc701c0e9ed 100644
--- a/extern/ceres/internal/ceres/cgnr_solver.h
+++ b/extern/ceres/internal/ceres/cgnr_solver.h
@@ -32,6 +32,7 @@
#define CERES_INTERNAL_CGNR_SOLVER_H_
#include <memory>
+
#include "ceres/linear_solver.h"
namespace ceres {
@@ -55,11 +56,10 @@ class CgnrSolver : public BlockSparseMatrixSolver {
void operator=(const CgnrSolver&) = delete;
virtual ~CgnrSolver();
- Summary SolveImpl(
- BlockSparseMatrix* A,
- const double* b,
- const LinearSolver::PerSolveOptions& per_solve_options,
- double* x) final;
+ Summary SolveImpl(BlockSparseMatrix* A,
+ const double* b,
+ const LinearSolver::PerSolveOptions& per_solve_options,
+ double* x) final;
private:
const LinearSolver::Options options_;
diff --git a/extern/ceres/internal/ceres/compressed_col_sparse_matrix_utils.cc b/extern/ceres/internal/ceres/compressed_col_sparse_matrix_utils.cc
index 3f6672f858c..e1f6bb8ff9d 100644
--- a/extern/ceres/internal/ceres/compressed_col_sparse_matrix_utils.cc
+++ b/extern/ceres/internal/ceres/compressed_col_sparse_matrix_utils.cc
@@ -30,8 +30,9 @@
#include "ceres/compressed_col_sparse_matrix_utils.h"
-#include <vector>
#include <algorithm>
+#include <vector>
+
#include "ceres/internal/port.h"
#include "glog/logging.h"
@@ -40,13 +41,12 @@ namespace internal {
using std::vector;
-void CompressedColumnScalarMatrixToBlockMatrix(
- const int* scalar_rows,
- const int* scalar_cols,
- const vector<int>& row_blocks,
- const vector<int>& col_blocks,
- vector<int>* block_rows,
- vector<int>* block_cols) {
+void CompressedColumnScalarMatrixToBlockMatrix(const int* scalar_rows,
+ const int* scalar_cols,
+ const vector<int>& row_blocks,
+ const vector<int>& col_blocks,
+ vector<int>* block_rows,
+ vector<int>* block_cols) {
CHECK(block_rows != nullptr);
CHECK(block_cols != nullptr);
block_rows->clear();
@@ -71,10 +71,8 @@ void CompressedColumnScalarMatrixToBlockMatrix(
for (int col_block = 0; col_block < num_col_blocks; ++col_block) {
int column_size = 0;
for (int idx = scalar_cols[c]; idx < scalar_cols[c + 1]; ++idx) {
- vector<int>::const_iterator it =
- std::lower_bound(row_block_starts.begin(),
- row_block_starts.end(),
- scalar_rows[idx]);
+ vector<int>::const_iterator it = std::lower_bound(
+ row_block_starts.begin(), row_block_starts.end(), scalar_rows[idx]);
// Since we are using lower_bound, it will return the row id
// where the row block starts. For everything but the first row
// of the block, where these values will be the same, we can
@@ -104,7 +102,7 @@ void BlockOrderingToScalarOrdering(const vector<int>& blocks,
// block_starts = [0, block1, block1 + block2 ..]
vector<int> block_starts(num_blocks);
- for (int i = 0, cursor = 0; i < num_blocks ; ++i) {
+ for (int i = 0, cursor = 0; i < num_blocks; ++i) {
block_starts[i] = cursor;
cursor += blocks[i];
}
diff --git a/extern/ceres/internal/ceres/compressed_col_sparse_matrix_utils.h b/extern/ceres/internal/ceres/compressed_col_sparse_matrix_utils.h
index da2109fba3e..d442e1a9bb8 100644
--- a/extern/ceres/internal/ceres/compressed_col_sparse_matrix_utils.h
+++ b/extern/ceres/internal/ceres/compressed_col_sparse_matrix_utils.h
@@ -32,6 +32,7 @@
#define CERES_INTERNAL_COMPRESSED_COL_SPARSE_MATRIX_UTILS_H_
#include <vector>
+
#include "ceres/internal/port.h"
namespace ceres {
@@ -47,7 +48,7 @@ namespace internal {
// and column block j, then it is expected that A contains at least
// one non-zero entry corresponding to the top left entry of c_ij,
// as that entry is used to detect the presence of a non-zero c_ij.
-void CompressedColumnScalarMatrixToBlockMatrix(
+CERES_EXPORT_INTERNAL void CompressedColumnScalarMatrixToBlockMatrix(
const int* scalar_rows,
const int* scalar_cols,
const std::vector<int>& row_blocks,
@@ -58,7 +59,7 @@ void CompressedColumnScalarMatrixToBlockMatrix(
// Given a set of blocks and a permutation of these blocks, compute
// the corresponding "scalar" ordering, where the scalar ordering of
// size sum(blocks).
-void BlockOrderingToScalarOrdering(
+CERES_EXPORT_INTERNAL void BlockOrderingToScalarOrdering(
const std::vector<int>& blocks,
const std::vector<int>& block_ordering,
std::vector<int>* scalar_ordering);
@@ -101,7 +102,7 @@ void SolveUpperTriangularTransposeInPlace(IntegerType num_cols,
const double v = values[idx];
rhs_and_solution[c] -= v * rhs_and_solution[r];
}
- rhs_and_solution[c] = rhs_and_solution[c] / values[cols[c + 1] - 1];
+ rhs_and_solution[c] = rhs_and_solution[c] / values[cols[c + 1] - 1];
}
}
@@ -132,7 +133,7 @@ void SolveRTRWithSparseRHS(IntegerType num_cols,
const double v = values[idx];
solution[c] -= v * solution[r];
}
- solution[c] = solution[c] / values[cols[c + 1] - 1];
+ solution[c] = solution[c] / values[cols[c + 1] - 1];
}
SolveUpperTriangularInPlace(num_cols, rows, cols, values, solution);
diff --git a/extern/ceres/internal/ceres/compressed_row_jacobian_writer.cc b/extern/ceres/internal/ceres/compressed_row_jacobian_writer.cc
index 1fc0116815c..8e7e3e7e7e6 100644
--- a/extern/ceres/internal/ceres/compressed_row_jacobian_writer.cc
+++ b/extern/ceres/internal/ceres/compressed_row_jacobian_writer.cc
@@ -44,23 +44,21 @@
namespace ceres {
namespace internal {
+using std::adjacent_find;
using std::make_pair;
using std::pair;
using std::vector;
-using std::adjacent_find;
void CompressedRowJacobianWriter::PopulateJacobianRowAndColumnBlockVectors(
const Program* program, CompressedRowSparseMatrix* jacobian) {
- const vector<ParameterBlock*>& parameter_blocks =
- program->parameter_blocks();
+ const vector<ParameterBlock*>& parameter_blocks = program->parameter_blocks();
vector<int>& col_blocks = *(jacobian->mutable_col_blocks());
col_blocks.resize(parameter_blocks.size());
for (int i = 0; i < parameter_blocks.size(); ++i) {
col_blocks[i] = parameter_blocks[i]->LocalSize();
}
- const vector<ResidualBlock*>& residual_blocks =
- program->residual_blocks();
+ const vector<ResidualBlock*>& residual_blocks = program->residual_blocks();
vector<int>& row_blocks = *(jacobian->mutable_row_blocks());
row_blocks.resize(residual_blocks.size());
for (int i = 0; i < residual_blocks.size(); ++i) {
@@ -69,11 +67,10 @@ void CompressedRowJacobianWriter::PopulateJacobianRowAndColumnBlockVectors(
}
void CompressedRowJacobianWriter::GetOrderedParameterBlocks(
- const Program* program,
- int residual_id,
- vector<pair<int, int>>* evaluated_jacobian_blocks) {
- const ResidualBlock* residual_block =
- program->residual_blocks()[residual_id];
+ const Program* program,
+ int residual_id,
+ vector<pair<int, int>>* evaluated_jacobian_blocks) {
+ const ResidualBlock* residual_block = program->residual_blocks()[residual_id];
const int num_parameter_blocks = residual_block->NumParameterBlocks();
for (int j = 0; j < num_parameter_blocks; ++j) {
@@ -88,8 +85,7 @@ void CompressedRowJacobianWriter::GetOrderedParameterBlocks(
}
SparseMatrix* CompressedRowJacobianWriter::CreateJacobian() const {
- const vector<ResidualBlock*>& residual_blocks =
- program_->residual_blocks();
+ const vector<ResidualBlock*>& residual_blocks = program_->residual_blocks();
int total_num_residuals = program_->NumResiduals();
int total_num_effective_parameters = program_->NumEffectiveParameters();
@@ -112,11 +108,10 @@ SparseMatrix* CompressedRowJacobianWriter::CreateJacobian() const {
// Allocate more space than needed to store the jacobian so that when the LM
// algorithm adds the diagonal, no reallocation is necessary. This reduces
// peak memory usage significantly.
- CompressedRowSparseMatrix* jacobian =
- new CompressedRowSparseMatrix(
- total_num_residuals,
- total_num_effective_parameters,
- num_jacobian_nonzeros + total_num_effective_parameters);
+ CompressedRowSparseMatrix* jacobian = new CompressedRowSparseMatrix(
+ total_num_residuals,
+ total_num_effective_parameters,
+ num_jacobian_nonzeros + total_num_effective_parameters);
// At this stage, the CompressedRowSparseMatrix is an invalid state. But this
// seems to be the only way to construct it without doing a memory copy.
@@ -148,8 +143,7 @@ SparseMatrix* CompressedRowJacobianWriter::CreateJacobian() const {
std::string parameter_block_description;
for (int j = 0; j < num_parameter_blocks; ++j) {
ParameterBlock* parameter_block = residual_block->parameter_blocks()[j];
- parameter_block_description +=
- parameter_block->ToString() + "\n";
+ parameter_block_description += parameter_block->ToString() + "\n";
}
LOG(FATAL) << "Ceres internal error: "
<< "Duplicate parameter blocks detected in a cost function. "
@@ -196,7 +190,7 @@ SparseMatrix* CompressedRowJacobianWriter::CreateJacobian() const {
void CompressedRowJacobianWriter::Write(int residual_id,
int residual_offset,
- double **jacobians,
+ double** jacobians,
SparseMatrix* base_jacobian) {
CompressedRowSparseMatrix* jacobian =
down_cast<CompressedRowSparseMatrix*>(base_jacobian);
diff --git a/extern/ceres/internal/ceres/compressed_row_jacobian_writer.h b/extern/ceres/internal/ceres/compressed_row_jacobian_writer.h
index 9fb414e2519..b1251ca5cf5 100644
--- a/extern/ceres/internal/ceres/compressed_row_jacobian_writer.h
+++ b/extern/ceres/internal/ceres/compressed_row_jacobian_writer.h
@@ -50,8 +50,7 @@ class CompressedRowJacobianWriter {
public:
CompressedRowJacobianWriter(Evaluator::Options /* ignored */,
Program* program)
- : program_(program) {
- }
+ : program_(program) {}
// PopulateJacobianRowAndColumnBlockVectors sets col_blocks and
// row_blocks for a CompressedRowSparseMatrix, based on the
@@ -64,8 +63,7 @@ class CompressedRowJacobianWriter {
// (Jacobian writers do not fall under any type hierarchy; they only
// have to provide an interface as specified in program_evaluator.h).
static void PopulateJacobianRowAndColumnBlockVectors(
- const Program* program,
- CompressedRowSparseMatrix* jacobian);
+ const Program* program, CompressedRowSparseMatrix* jacobian);
// It is necessary to determine the order of the jacobian blocks
// before copying them into a CompressedRowSparseMatrix (or derived
@@ -99,7 +97,7 @@ class CompressedRowJacobianWriter {
void Write(int residual_id,
int residual_offset,
- double **jacobians,
+ double** jacobians,
SparseMatrix* base_jacobian);
private:
diff --git a/extern/ceres/internal/ceres/compressed_row_sparse_matrix.cc b/extern/ceres/internal/ceres/compressed_row_sparse_matrix.cc
index e56de16bf92..900586c2c45 100644
--- a/extern/ceres/internal/ceres/compressed_row_sparse_matrix.cc
+++ b/extern/ceres/internal/ceres/compressed_row_sparse_matrix.cc
@@ -33,6 +33,7 @@
#include <algorithm>
#include <numeric>
#include <vector>
+
#include "ceres/crs_matrix.h"
#include "ceres/internal/port.h"
#include "ceres/random.h"
diff --git a/extern/ceres/internal/ceres/compressed_row_sparse_matrix.h b/extern/ceres/internal/ceres/compressed_row_sparse_matrix.h
index 758b40bbc8a..0a1b945193d 100644
--- a/extern/ceres/internal/ceres/compressed_row_sparse_matrix.h
+++ b/extern/ceres/internal/ceres/compressed_row_sparse_matrix.h
@@ -32,6 +32,7 @@
#define CERES_INTERNAL_COMPRESSED_ROW_SPARSE_MATRIX_H_
#include <vector>
+
#include "ceres/internal/port.h"
#include "ceres/sparse_matrix.h"
#include "ceres/types.h"
@@ -45,7 +46,7 @@ namespace internal {
class TripletSparseMatrix;
-class CompressedRowSparseMatrix : public SparseMatrix {
+class CERES_EXPORT_INTERNAL CompressedRowSparseMatrix : public SparseMatrix {
public:
enum StorageType {
UNSYMMETRIC,
diff --git a/extern/ceres/internal/ceres/concurrent_queue.h b/extern/ceres/internal/ceres/concurrent_queue.h
index 52e2903022b..a04d147c5c0 100644
--- a/extern/ceres/internal/ceres/concurrent_queue.h
+++ b/extern/ceres/internal/ceres/concurrent_queue.h
@@ -152,7 +152,6 @@ class ConcurrentQueue {
bool wait_;
};
-
} // namespace internal
} // namespace ceres
diff --git a/extern/ceres/internal/ceres/conditioned_cost_function.cc b/extern/ceres/internal/ceres/conditioned_cost_function.cc
index d933ad7c462..fb4c52af084 100644
--- a/extern/ceres/internal/ceres/conditioned_cost_function.cc
+++ b/extern/ceres/internal/ceres/conditioned_cost_function.cc
@@ -68,7 +68,8 @@ ConditionedCostFunction::ConditionedCostFunction(
ConditionedCostFunction::~ConditionedCostFunction() {
if (ownership_ == TAKE_OWNERSHIP) {
- STLDeleteUniqueContainerPointers(conditioners_.begin(), conditioners_.end());
+ STLDeleteUniqueContainerPointers(conditioners_.begin(),
+ conditioners_.end());
} else {
wrapped_cost_function_.release();
}
@@ -77,8 +78,8 @@ ConditionedCostFunction::~ConditionedCostFunction() {
bool ConditionedCostFunction::Evaluate(double const* const* parameters,
double* residuals,
double** jacobians) const {
- bool success = wrapped_cost_function_->Evaluate(parameters, residuals,
- jacobians);
+ bool success =
+ wrapped_cost_function_->Evaluate(parameters, residuals, jacobians);
if (!success) {
return false;
}
@@ -102,9 +103,8 @@ bool ConditionedCostFunction::Evaluate(double const* const* parameters,
double unconditioned_residual = residuals[r];
double* parameter_pointer = &unconditioned_residual;
- success = conditioners_[r]->Evaluate(&parameter_pointer,
- &residuals[r],
- conditioner_derivative_pointer2);
+ success = conditioners_[r]->Evaluate(
+ &parameter_pointer, &residuals[r], conditioner_derivative_pointer2);
if (!success) {
return false;
}
@@ -117,7 +117,8 @@ bool ConditionedCostFunction::Evaluate(double const* const* parameters,
int parameter_block_size =
wrapped_cost_function_->parameter_block_sizes()[i];
VectorRef jacobian_row(jacobians[i] + r * parameter_block_size,
- parameter_block_size, 1);
+ parameter_block_size,
+ 1);
jacobian_row *= conditioner_derivative;
}
}
diff --git a/extern/ceres/internal/ceres/conjugate_gradients_solver.cc b/extern/ceres/internal/ceres/conjugate_gradients_solver.cc
index c6f85c15ea0..3019628a16c 100644
--- a/extern/ceres/internal/ceres/conjugate_gradients_solver.cc
+++ b/extern/ceres/internal/ceres/conjugate_gradients_solver.cc
@@ -41,6 +41,7 @@
#include <cmath>
#include <cstddef>
+
#include "ceres/internal/eigen.h"
#include "ceres/linear_operator.h"
#include "ceres/stringprintf.h"
@@ -51,16 +52,13 @@ namespace ceres {
namespace internal {
namespace {
-bool IsZeroOrInfinity(double x) {
- return ((x == 0.0) || std::isinf(x));
-}
+bool IsZeroOrInfinity(double x) { return ((x == 0.0) || std::isinf(x)); }
} // namespace
ConjugateGradientsSolver::ConjugateGradientsSolver(
const LinearSolver::Options& options)
- : options_(options) {
-}
+ : options_(options) {}
LinearSolver::Summary ConjugateGradientsSolver::Solve(
LinearOperator* A,
@@ -137,7 +135,10 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve(
summary.termination_type = LINEAR_SOLVER_FAILURE;
summary.message = StringPrintf(
"Numerical failure. beta = rho_n / rho_{n-1} = %e, "
- "rho_n = %e, rho_{n-1} = %e", beta, rho, last_rho);
+ "rho_n = %e, rho_{n-1} = %e",
+ beta,
+ rho,
+ last_rho);
break;
}
p = z + beta * p;
@@ -152,16 +153,20 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve(
summary.message = StringPrintf(
"Matrix is indefinite, no more progress can be made. "
"p'q = %e. |p| = %e, |q| = %e",
- pq, p.norm(), q.norm());
+ pq,
+ p.norm(),
+ q.norm());
break;
}
const double alpha = rho / pq;
if (std::isinf(alpha)) {
summary.termination_type = LINEAR_SOLVER_FAILURE;
- summary.message =
- StringPrintf("Numerical failure. alpha = rho / pq = %e, "
- "rho = %e, pq = %e.", alpha, rho, pq);
+ summary.message = StringPrintf(
+ "Numerical failure. alpha = rho / pq = %e, rho = %e, pq = %e.",
+ alpha,
+ rho,
+ pq);
break;
}
@@ -223,7 +228,7 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve(
Q0 = Q1;
// Residual based termination.
- norm_r = r. norm();
+ norm_r = r.norm();
if (norm_r <= tol_r &&
summary.num_iterations >= options_.min_num_iterations) {
summary.termination_type = LINEAR_SOLVER_SUCCESS;
diff --git a/extern/ceres/internal/ceres/conjugate_gradients_solver.h b/extern/ceres/internal/ceres/conjugate_gradients_solver.h
index d89383e6359..f79ca496531 100644
--- a/extern/ceres/internal/ceres/conjugate_gradients_solver.h
+++ b/extern/ceres/internal/ceres/conjugate_gradients_solver.h
@@ -34,6 +34,7 @@
#ifndef CERES_INTERNAL_CONJUGATE_GRADIENTS_SOLVER_H_
#define CERES_INTERNAL_CONJUGATE_GRADIENTS_SOLVER_H_
+#include "ceres/internal/port.h"
#include "ceres/linear_solver.h"
namespace ceres {
@@ -54,7 +55,7 @@ class LinearOperator;
// For more details see the documentation for
// LinearSolver::PerSolveOptions::r_tolerance and
// LinearSolver::PerSolveOptions::q_tolerance in linear_solver.h.
-class ConjugateGradientsSolver : public LinearSolver {
+class CERES_EXPORT_INTERNAL ConjugateGradientsSolver : public LinearSolver {
public:
explicit ConjugateGradientsSolver(const LinearSolver::Options& options);
Summary Solve(LinearOperator* A,
diff --git a/extern/ceres/internal/ceres/context.cc b/extern/ceres/internal/ceres/context.cc
index e2232013b4b..55e76351219 100644
--- a/extern/ceres/internal/ceres/context.cc
+++ b/extern/ceres/internal/ceres/context.cc
@@ -34,8 +34,6 @@
namespace ceres {
-Context* Context::Create() {
- return new internal::ContextImpl();
-}
+Context* Context::Create() { return new internal::ContextImpl(); }
} // namespace ceres
diff --git a/extern/ceres/internal/ceres/context_impl.cc b/extern/ceres/internal/ceres/context_impl.cc
index 622f33a9dc0..20fe5cbab2a 100644
--- a/extern/ceres/internal/ceres/context_impl.cc
+++ b/extern/ceres/internal/ceres/context_impl.cc
@@ -37,7 +37,6 @@ void ContextImpl::EnsureMinimumThreads(int num_threads) {
#ifdef CERES_USE_CXX_THREADS
thread_pool.Resize(num_threads);
#endif // CERES_USE_CXX_THREADS
-
}
} // namespace internal
} // namespace ceres
diff --git a/extern/ceres/internal/ceres/context_impl.h b/extern/ceres/internal/ceres/context_impl.h
index 5c03ad71bab..574d1efcc6d 100644
--- a/extern/ceres/internal/ceres/context_impl.h
+++ b/extern/ceres/internal/ceres/context_impl.h
@@ -32,7 +32,9 @@
#define CERES_INTERNAL_CONTEXT_IMPL_H_
// This include must come before any #ifndef check on Ceres compile options.
+// clang-format off
#include "ceres/internal/port.h"
+// clanf-format on
#include "ceres/context.h"
@@ -43,7 +45,7 @@
namespace ceres {
namespace internal {
-class ContextImpl : public Context {
+class CERES_EXPORT_INTERNAL ContextImpl : public Context {
public:
ContextImpl() {}
ContextImpl(const ContextImpl&) = delete;
diff --git a/extern/ceres/internal/ceres/coordinate_descent_minimizer.cc b/extern/ceres/internal/ceres/coordinate_descent_minimizer.cc
index c5d56f30bc3..93096ac0728 100644
--- a/extern/ceres/internal/ceres/coordinate_descent_minimizer.cc
+++ b/extern/ceres/internal/ceres/coordinate_descent_minimizer.cc
@@ -64,8 +64,7 @@ CoordinateDescentMinimizer::CoordinateDescentMinimizer(ContextImpl* context)
CHECK(context_ != nullptr);
}
-CoordinateDescentMinimizer::~CoordinateDescentMinimizer() {
-}
+CoordinateDescentMinimizer::~CoordinateDescentMinimizer() {}
bool CoordinateDescentMinimizer::Init(
const Program& program,
@@ -82,13 +81,13 @@ bool CoordinateDescentMinimizer::Init(
map<int, set<double*>> group_to_elements = ordering.group_to_elements();
for (const auto& g_t_e : group_to_elements) {
const auto& elements = g_t_e.second;
- for (double* parameter_block: elements) {
+ for (double* parameter_block : elements) {
parameter_blocks_.push_back(parameter_map.find(parameter_block)->second);
parameter_block_index[parameter_blocks_.back()] =
parameter_blocks_.size() - 1;
}
- independent_set_offsets_.push_back(
- independent_set_offsets_.back() + elements.size());
+ independent_set_offsets_.push_back(independent_set_offsets_.back() +
+ elements.size());
}
// The ordering does not have to contain all parameter blocks, so
@@ -126,10 +125,9 @@ bool CoordinateDescentMinimizer::Init(
return true;
}
-void CoordinateDescentMinimizer::Minimize(
- const Minimizer::Options& options,
- double* parameters,
- Solver::Summary* summary) {
+void CoordinateDescentMinimizer::Minimize(const Minimizer::Options& options,
+ double* parameters,
+ Solver::Summary* summary) {
// Set the state and mark all parameter blocks constant.
for (int i = 0; i < parameter_blocks_.size(); ++i) {
ParameterBlock* parameter_block = parameter_blocks_[i];
@@ -202,7 +200,7 @@ void CoordinateDescentMinimizer::Minimize(
});
}
- for (int i = 0; i < parameter_blocks_.size(); ++i) {
+ for (int i = 0; i < parameter_blocks_.size(); ++i) {
parameter_blocks_[i]->SetVarying();
}
@@ -251,10 +249,10 @@ bool CoordinateDescentMinimizer::IsOrderingValid(
// Verify that each group is an independent set
for (const auto& g_t_e : group_to_elements) {
if (!program.IsParameterBlockSetIndependent(g_t_e.second)) {
- *message =
- StringPrintf("The user-provided "
- "parameter_blocks_for_inner_iterations does not "
- "form an independent set. Group Id: %d", g_t_e.first);
+ *message = StringPrintf(
+ "The user-provided parameter_blocks_for_inner_iterations does not "
+ "form an independent set. Group Id: %d",
+ g_t_e.first);
return false;
}
}
diff --git a/extern/ceres/internal/ceres/corrector.cc b/extern/ceres/internal/ceres/corrector.cc
index 4ac0dc3cd86..6a79a06a544 100644
--- a/extern/ceres/internal/ceres/corrector.cc
+++ b/extern/ceres/internal/ceres/corrector.cc
@@ -30,8 +30,9 @@
#include "ceres/corrector.h"
-#include <cstddef>
#include <cmath>
+#include <cstddef>
+
#include "ceres/internal/eigen.h"
#include "glog/logging.h"
@@ -147,9 +148,9 @@ void Corrector::CorrectJacobian(const int num_rows,
}
for (int r = 0; r < num_rows; ++r) {
- jacobian[r * num_cols + c] = sqrt_rho1_ *
- (jacobian[r * num_cols + c] -
- alpha_sq_norm_ * residuals[r] * r_transpose_j);
+ jacobian[r * num_cols + c] =
+ sqrt_rho1_ * (jacobian[r * num_cols + c] -
+ alpha_sq_norm_ * residuals[r] * r_transpose_j);
}
}
}
diff --git a/extern/ceres/internal/ceres/corrector.h b/extern/ceres/internal/ceres/corrector.h
index a5b03dda803..3e11cdce1ae 100644
--- a/extern/ceres/internal/ceres/corrector.h
+++ b/extern/ceres/internal/ceres/corrector.h
@@ -35,6 +35,8 @@
#ifndef CERES_INTERNAL_CORRECTOR_H_
#define CERES_INTERNAL_CORRECTOR_H_
+#include "ceres/internal/port.h"
+
namespace ceres {
namespace internal {
@@ -46,7 +48,7 @@ namespace internal {
// gauss newton approximation and then take its square root to get the
// corresponding corrections to the residual and jacobian. For the
// full expressions see Eq. 10 and 11 in BANS by Triggs et al.
-class Corrector {
+class CERES_EXPORT_INTERNAL Corrector {
public:
// The constructor takes the squared norm, the value, the first and
// second derivatives of the LossFunction. It precalculates some of
diff --git a/extern/ceres/internal/ceres/covariance.cc b/extern/ceres/internal/ceres/covariance.cc
index 8256078409a..8e240ff317c 100644
--- a/extern/ceres/internal/ceres/covariance.cc
+++ b/extern/ceres/internal/ceres/covariance.cc
@@ -32,6 +32,7 @@
#include <utility>
#include <vector>
+
#include "ceres/covariance_impl.h"
#include "ceres/problem.h"
#include "ceres/problem_impl.h"
@@ -46,8 +47,7 @@ Covariance::Covariance(const Covariance::Options& options) {
impl_.reset(new internal::CovarianceImpl(options));
}
-Covariance::~Covariance() {
-}
+Covariance::~Covariance() {}
bool Covariance::Compute(
const vector<pair<const double*, const double*>>& covariance_blocks,
@@ -55,9 +55,8 @@ bool Covariance::Compute(
return impl_->Compute(covariance_blocks, problem->impl_.get());
}
-bool Covariance::Compute(
- const vector<const double*>& parameter_blocks,
- Problem* problem) {
+bool Covariance::Compute(const vector<const double*>& parameter_blocks,
+ Problem* problem) {
return impl_->Compute(parameter_blocks, problem->impl_.get());
}
@@ -89,8 +88,8 @@ bool Covariance::GetCovarianceMatrix(
}
bool Covariance::GetCovarianceMatrixInTangentSpace(
- const std::vector<const double *>& parameter_blocks,
- double *covariance_matrix) const {
+ const std::vector<const double*>& parameter_blocks,
+ double* covariance_matrix) const {
return impl_->GetCovarianceMatrixInTangentOrAmbientSpace(parameter_blocks,
false, // tangent
covariance_matrix);
diff --git a/extern/ceres/internal/ceres/covariance_impl.cc b/extern/ceres/internal/ceres/covariance_impl.cc
index 6c26412d854..1f86707f5a7 100644
--- a/extern/ceres/internal/ceres/covariance_impl.cc
+++ b/extern/ceres/internal/ceres/covariance_impl.cc
@@ -39,10 +39,9 @@
#include <utility>
#include <vector>
+#include "Eigen/SVD"
#include "Eigen/SparseCore"
#include "Eigen/SparseQR"
-#include "Eigen/SVD"
-
#include "ceres/compressed_col_sparse_matrix_utils.h"
#include "ceres/compressed_row_sparse_matrix.h"
#include "ceres/covariance.h"
@@ -61,25 +60,17 @@
namespace ceres {
namespace internal {
-using std::make_pair;
-using std::map;
-using std::pair;
-using std::sort;
using std::swap;
-using std::vector;
-typedef vector<pair<const double*, const double*>> CovarianceBlocks;
+using CovarianceBlocks = std::vector<std::pair<const double*, const double*>>;
CovarianceImpl::CovarianceImpl(const Covariance::Options& options)
- : options_(options),
- is_computed_(false),
- is_valid_(false) {
+ : options_(options), is_computed_(false), is_valid_(false) {
#ifdef CERES_NO_THREADS
if (options_.num_threads > 1) {
- LOG(WARNING)
- << "No threading support is compiled into this binary; "
- << "only options.num_threads = 1 is supported. Switching "
- << "to single threaded mode.";
+ LOG(WARNING) << "No threading support is compiled into this binary; "
+ << "only options.num_threads = 1 is supported. Switching "
+ << "to single threaded mode.";
options_.num_threads = 1;
}
#endif
@@ -88,16 +79,16 @@ CovarianceImpl::CovarianceImpl(const Covariance::Options& options)
evaluate_options_.apply_loss_function = options_.apply_loss_function;
}
-CovarianceImpl::~CovarianceImpl() {
-}
+CovarianceImpl::~CovarianceImpl() {}
-template <typename T> void CheckForDuplicates(vector<T> blocks) {
+template <typename T>
+void CheckForDuplicates(std::vector<T> blocks) {
sort(blocks.begin(), blocks.end());
- typename vector<T>::iterator it =
+ typename std::vector<T>::iterator it =
std::adjacent_find(blocks.begin(), blocks.end());
if (it != blocks.end()) {
// In case there are duplicates, we search for their location.
- map<T, vector<int>> blocks_map;
+ std::map<T, std::vector<int>> blocks_map;
for (int i = 0; i < blocks.size(); ++i) {
blocks_map[blocks[i]].push_back(i);
}
@@ -122,7 +113,8 @@ template <typename T> void CheckForDuplicates(vector<T> blocks) {
bool CovarianceImpl::Compute(const CovarianceBlocks& covariance_blocks,
ProblemImpl* problem) {
- CheckForDuplicates<pair<const double*, const double*>>(covariance_blocks);
+ CheckForDuplicates<std::pair<const double*, const double*>>(
+ covariance_blocks);
problem_ = problem;
parameter_block_to_row_index_.clear();
covariance_matrix_.reset(NULL);
@@ -132,14 +124,14 @@ bool CovarianceImpl::Compute(const CovarianceBlocks& covariance_blocks,
return is_valid_;
}
-bool CovarianceImpl::Compute(const vector<const double*>& parameter_blocks,
+bool CovarianceImpl::Compute(const std::vector<const double*>& parameter_blocks,
ProblemImpl* problem) {
CheckForDuplicates<const double*>(parameter_blocks);
CovarianceBlocks covariance_blocks;
for (int i = 0; i < parameter_blocks.size(); ++i) {
for (int j = i; j < parameter_blocks.size(); ++j) {
- covariance_blocks.push_back(make_pair(parameter_blocks[i],
- parameter_blocks[j]));
+ covariance_blocks.push_back(
+ std::make_pair(parameter_blocks[i], parameter_blocks[j]));
}
}
@@ -162,13 +154,11 @@ bool CovarianceImpl::GetCovarianceBlockInTangentOrAmbientSpace(
if (constant_parameter_blocks_.count(original_parameter_block1) > 0 ||
constant_parameter_blocks_.count(original_parameter_block2) > 0) {
const ProblemImpl::ParameterMap& parameter_map = problem_->parameter_map();
- ParameterBlock* block1 =
- FindOrDie(parameter_map,
- const_cast<double*>(original_parameter_block1));
+ ParameterBlock* block1 = FindOrDie(
+ parameter_map, const_cast<double*>(original_parameter_block1));
- ParameterBlock* block2 =
- FindOrDie(parameter_map,
- const_cast<double*>(original_parameter_block2));
+ ParameterBlock* block2 = FindOrDie(
+ parameter_map, const_cast<double*>(original_parameter_block2));
const int block1_size = block1->Size();
const int block2_size = block2->Size();
@@ -210,8 +200,7 @@ bool CovarianceImpl::GetCovarianceBlockInTangentOrAmbientSpace(
if (offset == row_size) {
LOG(ERROR) << "Unable to find covariance block for "
- << original_parameter_block1 << " "
- << original_parameter_block2;
+ << original_parameter_block1 << " " << original_parameter_block2;
return false;
}
@@ -227,9 +216,8 @@ bool CovarianceImpl::GetCovarianceBlockInTangentOrAmbientSpace(
const int block2_size = block2->Size();
const int block2_local_size = block2->LocalSize();
- ConstMatrixRef cov(covariance_matrix_->values() + rows[row_begin],
- block1_size,
- row_size);
+ ConstMatrixRef cov(
+ covariance_matrix_->values() + rows[row_begin], block1_size, row_size);
// Fast path when there are no local parameterizations or if the
// user does not want it lifted to the ambient space.
@@ -237,8 +225,8 @@ bool CovarianceImpl::GetCovarianceBlockInTangentOrAmbientSpace(
!lift_covariance_to_ambient_space) {
if (transpose) {
MatrixRef(covariance_block, block2_local_size, block1_local_size) =
- cov.block(0, offset, block1_local_size,
- block2_local_size).transpose();
+ cov.block(0, offset, block1_local_size, block2_local_size)
+ .transpose();
} else {
MatrixRef(covariance_block, block1_local_size, block2_local_size) =
cov.block(0, offset, block1_local_size, block2_local_size);
@@ -298,7 +286,7 @@ bool CovarianceImpl::GetCovarianceBlockInTangentOrAmbientSpace(
}
bool CovarianceImpl::GetCovarianceMatrixInTangentOrAmbientSpace(
- const vector<const double*>& parameters,
+ const std::vector<const double*>& parameters,
bool lift_covariance_to_ambient_space,
double* covariance_matrix) const {
CHECK(is_computed_)
@@ -310,8 +298,8 @@ bool CovarianceImpl::GetCovarianceMatrixInTangentOrAmbientSpace(
const ProblemImpl::ParameterMap& parameter_map = problem_->parameter_map();
// For OpenMP compatibility we need to define these vectors in advance
const int num_parameters = parameters.size();
- vector<int> parameter_sizes;
- vector<int> cum_parameter_size;
+ std::vector<int> parameter_sizes;
+ std::vector<int> cum_parameter_size;
parameter_sizes.reserve(num_parameters);
cum_parameter_size.resize(num_parameters + 1);
cum_parameter_size[0] = 0;
@@ -324,7 +312,8 @@ bool CovarianceImpl::GetCovarianceMatrixInTangentOrAmbientSpace(
parameter_sizes.push_back(block->LocalSize());
}
}
- std::partial_sum(parameter_sizes.begin(), parameter_sizes.end(),
+ std::partial_sum(parameter_sizes.begin(),
+ parameter_sizes.end(),
cum_parameter_size.begin() + 1);
const int max_covariance_block_size =
*std::max_element(parameter_sizes.begin(), parameter_sizes.end());
@@ -343,65 +332,66 @@ bool CovarianceImpl::GetCovarianceMatrixInTangentOrAmbientSpace(
// i = 1:n, j = i:n.
int iteration_count = (num_parameters * (num_parameters + 1)) / 2;
problem_->context()->EnsureMinimumThreads(num_threads);
- ParallelFor(
- problem_->context(),
- 0,
- iteration_count,
- num_threads,
- [&](int thread_id, int k) {
- int i, j;
- LinearIndexToUpperTriangularIndex(k, num_parameters, &i, &j);
-
- int covariance_row_idx = cum_parameter_size[i];
- int covariance_col_idx = cum_parameter_size[j];
- int size_i = parameter_sizes[i];
- int size_j = parameter_sizes[j];
- double* covariance_block =
- workspace.get() + thread_id * max_covariance_block_size *
- max_covariance_block_size;
- if (!GetCovarianceBlockInTangentOrAmbientSpace(
- parameters[i], parameters[j],
- lift_covariance_to_ambient_space, covariance_block)) {
- success = false;
- }
-
- covariance.block(covariance_row_idx, covariance_col_idx, size_i,
- size_j) = MatrixRef(covariance_block, size_i, size_j);
-
- if (i != j) {
- covariance.block(covariance_col_idx, covariance_row_idx,
- size_j, size_i) =
- MatrixRef(covariance_block, size_i, size_j).transpose();
- }
- });
+ ParallelFor(problem_->context(),
+ 0,
+ iteration_count,
+ num_threads,
+ [&](int thread_id, int k) {
+ int i, j;
+ LinearIndexToUpperTriangularIndex(k, num_parameters, &i, &j);
+
+ int covariance_row_idx = cum_parameter_size[i];
+ int covariance_col_idx = cum_parameter_size[j];
+ int size_i = parameter_sizes[i];
+ int size_j = parameter_sizes[j];
+ double* covariance_block =
+ workspace.get() + thread_id * max_covariance_block_size *
+ max_covariance_block_size;
+ if (!GetCovarianceBlockInTangentOrAmbientSpace(
+ parameters[i],
+ parameters[j],
+ lift_covariance_to_ambient_space,
+ covariance_block)) {
+ success = false;
+ }
+
+ covariance.block(
+ covariance_row_idx, covariance_col_idx, size_i, size_j) =
+ MatrixRef(covariance_block, size_i, size_j);
+
+ if (i != j) {
+ covariance.block(
+ covariance_col_idx, covariance_row_idx, size_j, size_i) =
+ MatrixRef(covariance_block, size_i, size_j).transpose();
+ }
+ });
return success;
}
// Determine the sparsity pattern of the covariance matrix based on
// the block pairs requested by the user.
bool CovarianceImpl::ComputeCovarianceSparsity(
- const CovarianceBlocks& original_covariance_blocks,
- ProblemImpl* problem) {
+ const CovarianceBlocks& original_covariance_blocks, ProblemImpl* problem) {
EventLogger event_logger("CovarianceImpl::ComputeCovarianceSparsity");
// Determine an ordering for the parameter block, by sorting the
// parameter blocks by their pointers.
- vector<double*> all_parameter_blocks;
+ std::vector<double*> all_parameter_blocks;
problem->GetParameterBlocks(&all_parameter_blocks);
const ProblemImpl::ParameterMap& parameter_map = problem->parameter_map();
std::unordered_set<ParameterBlock*> parameter_blocks_in_use;
- vector<ResidualBlock*> residual_blocks;
+ std::vector<ResidualBlock*> residual_blocks;
problem->GetResidualBlocks(&residual_blocks);
for (int i = 0; i < residual_blocks.size(); ++i) {
ResidualBlock* residual_block = residual_blocks[i];
parameter_blocks_in_use.insert(residual_block->parameter_blocks(),
residual_block->parameter_blocks() +
- residual_block->NumParameterBlocks());
+ residual_block->NumParameterBlocks());
}
constant_parameter_blocks_.clear();
- vector<double*>& active_parameter_blocks =
+ std::vector<double*>& active_parameter_blocks =
evaluate_options_.parameter_blocks;
active_parameter_blocks.clear();
for (int i = 0; i < all_parameter_blocks.size(); ++i) {
@@ -434,8 +424,8 @@ bool CovarianceImpl::ComputeCovarianceSparsity(
// triangular part of the matrix.
int num_nonzeros = 0;
CovarianceBlocks covariance_blocks;
- for (int i = 0; i < original_covariance_blocks.size(); ++i) {
- const pair<const double*, const double*>& block_pair =
+ for (int i = 0; i < original_covariance_blocks.size(); ++i) {
+ const std::pair<const double*, const double*>& block_pair =
original_covariance_blocks[i];
if (constant_parameter_blocks_.count(block_pair.first) > 0 ||
constant_parameter_blocks_.count(block_pair.second) > 0) {
@@ -450,8 +440,8 @@ bool CovarianceImpl::ComputeCovarianceSparsity(
// Make sure we are constructing a block upper triangular matrix.
if (index1 > index2) {
- covariance_blocks.push_back(make_pair(block_pair.second,
- block_pair.first));
+ covariance_blocks.push_back(
+ std::make_pair(block_pair.second, block_pair.first));
} else {
covariance_blocks.push_back(block_pair);
}
@@ -466,7 +456,7 @@ bool CovarianceImpl::ComputeCovarianceSparsity(
// Sort the block pairs. As a consequence we get the covariance
// blocks as they will occur in the CompressedRowSparseMatrix that
// will store the covariance.
- sort(covariance_blocks.begin(), covariance_blocks.end());
+ std::sort(covariance_blocks.begin(), covariance_blocks.end());
// Fill the sparsity pattern of the covariance matrix.
covariance_matrix_.reset(
@@ -486,10 +476,10 @@ bool CovarianceImpl::ComputeCovarianceSparsity(
// values of the parameter blocks. Thus iterating over the keys of
// parameter_block_to_row_index_ corresponds to iterating over the
// rows of the covariance matrix in order.
- int i = 0; // index into covariance_blocks.
+ int i = 0; // index into covariance_blocks.
int cursor = 0; // index into the covariance matrix.
for (const auto& entry : parameter_block_to_row_index_) {
- const double* row_block = entry.first;
+ const double* row_block = entry.first;
const int row_block_size = problem->ParameterBlockLocalSize(row_block);
int row_begin = entry.second;
@@ -498,7 +488,7 @@ bool CovarianceImpl::ComputeCovarianceSparsity(
int num_col_blocks = 0;
int num_columns = 0;
for (int j = i; j < covariance_blocks.size(); ++j, ++num_col_blocks) {
- const pair<const double*, const double*>& block_pair =
+ const std::pair<const double*, const double*>& block_pair =
covariance_blocks[j];
if (block_pair.first != row_block) {
break;
@@ -519,7 +509,7 @@ bool CovarianceImpl::ComputeCovarianceSparsity(
}
}
- i+= num_col_blocks;
+ i += num_col_blocks;
}
rows[num_rows] = cursor;
@@ -580,9 +570,9 @@ bool CovarianceImpl::ComputeCovarianceValuesUsingSuiteSparseQR() {
const int num_cols = jacobian.num_cols;
const int num_nonzeros = jacobian.values.size();
- vector<SuiteSparse_long> transpose_rows(num_cols + 1, 0);
- vector<SuiteSparse_long> transpose_cols(num_nonzeros, 0);
- vector<double> transpose_values(num_nonzeros, 0);
+ std::vector<SuiteSparse_long> transpose_rows(num_cols + 1, 0);
+ std::vector<SuiteSparse_long> transpose_cols(num_nonzeros, 0);
+ std::vector<double> transpose_values(num_nonzeros, 0);
for (int idx = 0; idx < num_nonzeros; ++idx) {
transpose_rows[jacobian.cols[idx] + 1] += 1;
@@ -602,7 +592,7 @@ bool CovarianceImpl::ComputeCovarianceValuesUsingSuiteSparseQR() {
}
}
- for (int i = transpose_rows.size() - 1; i > 0 ; --i) {
+ for (int i = transpose_rows.size() - 1; i > 0; --i) {
transpose_rows[i] = transpose_rows[i - 1];
}
transpose_rows[0] = 0;
@@ -642,14 +632,13 @@ bool CovarianceImpl::ComputeCovarianceValuesUsingSuiteSparseQR() {
// more efficient, both in runtime as well as the quality of
// ordering computed. So, it maybe worth doing that analysis
// separately.
- const SuiteSparse_long rank =
- SuiteSparseQR<double>(SPQR_ORDERING_BESTAMD,
- SPQR_DEFAULT_TOL,
- cholmod_jacobian.ncol,
- &cholmod_jacobian,
- &R,
- &permutation,
- &cc);
+ const SuiteSparse_long rank = SuiteSparseQR<double>(SPQR_ORDERING_BESTAMD,
+ SPQR_DEFAULT_TOL,
+ cholmod_jacobian.ncol,
+ &cholmod_jacobian,
+ &R,
+ &permutation,
+ &cc);
event_logger.AddEvent("Numeric Factorization");
if (R == nullptr) {
LOG(ERROR) << "Something is wrong. SuiteSparseQR returned R = nullptr.";
@@ -668,7 +657,7 @@ bool CovarianceImpl::ComputeCovarianceValuesUsingSuiteSparseQR() {
return false;
}
- vector<int> inverse_permutation(num_cols);
+ std::vector<int> inverse_permutation(num_cols);
if (permutation) {
for (SuiteSparse_long i = 0; i < num_cols; ++i) {
inverse_permutation[permutation[i]] = i;
@@ -697,19 +686,18 @@ bool CovarianceImpl::ComputeCovarianceValuesUsingSuiteSparseQR() {
problem_->context()->EnsureMinimumThreads(num_threads);
ParallelFor(
- problem_->context(),
- 0,
- num_cols,
- num_threads,
- [&](int thread_id, int r) {
+ problem_->context(), 0, num_cols, num_threads, [&](int thread_id, int r) {
const int row_begin = rows[r];
const int row_end = rows[r + 1];
if (row_end != row_begin) {
double* solution = workspace.get() + thread_id * num_cols;
SolveRTRWithSparseRHS<SuiteSparse_long>(
- num_cols, static_cast<SuiteSparse_long*>(R->i),
- static_cast<SuiteSparse_long*>(R->p), static_cast<double*>(R->x),
- inverse_permutation[r], solution);
+ num_cols,
+ static_cast<SuiteSparse_long*>(R->i),
+ static_cast<SuiteSparse_long*>(R->p),
+ static_cast<double*>(R->x),
+ inverse_permutation[r],
+ solution);
for (int idx = row_begin; idx < row_end; ++idx) {
const int c = cols[idx];
values[idx] = solution[inverse_permutation[c]];
@@ -801,10 +789,9 @@ bool CovarianceImpl::ComputeCovarianceValuesUsingDenseSVD() {
1.0 / (singular_values[i] * singular_values[i]);
}
- Matrix dense_covariance =
- svd.matrixV() *
- inverse_squared_singular_values.asDiagonal() *
- svd.matrixV().transpose();
+ Matrix dense_covariance = svd.matrixV() *
+ inverse_squared_singular_values.asDiagonal() *
+ svd.matrixV().transpose();
event_logger.AddEvent("PseudoInverse");
const int num_rows = covariance_matrix_->num_rows();
@@ -839,13 +826,16 @@ bool CovarianceImpl::ComputeCovarianceValuesUsingEigenSparseQR() {
// Convert the matrix to column major order as required by SparseQR.
EigenSparseMatrix sparse_jacobian =
Eigen::MappedSparseMatrix<double, Eigen::RowMajor>(
- jacobian.num_rows, jacobian.num_cols,
+ jacobian.num_rows,
+ jacobian.num_cols,
static_cast<int>(jacobian.values.size()),
- jacobian.rows.data(), jacobian.cols.data(), jacobian.values.data());
+ jacobian.rows.data(),
+ jacobian.cols.data(),
+ jacobian.values.data());
event_logger.AddEvent("ConvertToSparseMatrix");
- Eigen::SparseQR<EigenSparseMatrix, Eigen::COLAMDOrdering<int>>
- qr_solver(sparse_jacobian);
+ Eigen::SparseQR<EigenSparseMatrix, Eigen::COLAMDOrdering<int>> qr_solver(
+ sparse_jacobian);
event_logger.AddEvent("QRDecomposition");
if (qr_solver.info() != Eigen::Success) {
@@ -883,22 +873,17 @@ bool CovarianceImpl::ComputeCovarianceValuesUsingEigenSparseQR() {
problem_->context()->EnsureMinimumThreads(num_threads);
ParallelFor(
- problem_->context(),
- 0,
- num_cols,
- num_threads,
- [&](int thread_id, int r) {
+ problem_->context(), 0, num_cols, num_threads, [&](int thread_id, int r) {
const int row_begin = rows[r];
const int row_end = rows[r + 1];
if (row_end != row_begin) {
double* solution = workspace.get() + thread_id * num_cols;
- SolveRTRWithSparseRHS<int>(
- num_cols,
- qr_solver.matrixR().innerIndexPtr(),
- qr_solver.matrixR().outerIndexPtr(),
- &qr_solver.matrixR().data().value(0),
- inverse_permutation.indices().coeff(r),
- solution);
+ SolveRTRWithSparseRHS<int>(num_cols,
+ qr_solver.matrixR().innerIndexPtr(),
+ qr_solver.matrixR().outerIndexPtr(),
+ &qr_solver.matrixR().data().value(0),
+ inverse_permutation.indices().coeff(r),
+ solution);
// Assign the values of the computed covariance using the
// inverse permutation used in the QR factorization.
diff --git a/extern/ceres/internal/ceres/covariance_impl.h b/extern/ceres/internal/ceres/covariance_impl.h
index 065e43c60fc..394a04bbc78 100644
--- a/extern/ceres/internal/ceres/covariance_impl.h
+++ b/extern/ceres/internal/ceres/covariance_impl.h
@@ -36,7 +36,9 @@
#include <set>
#include <utility>
#include <vector>
+
#include "ceres/covariance.h"
+#include "ceres/internal/port.h"
#include "ceres/problem_impl.h"
#include "ceres/suitesparse.h"
@@ -45,19 +47,17 @@ namespace internal {
class CompressedRowSparseMatrix;
-class CovarianceImpl {
+class CERES_EXPORT_INTERNAL CovarianceImpl {
public:
explicit CovarianceImpl(const Covariance::Options& options);
~CovarianceImpl();
- bool Compute(
- const std::vector<std::pair<const double*,
- const double*>>& covariance_blocks,
- ProblemImpl* problem);
+ bool Compute(const std::vector<std::pair<const double*, const double*>>&
+ covariance_blocks,
+ ProblemImpl* problem);
- bool Compute(
- const std::vector<const double*>& parameter_blocks,
- ProblemImpl* problem);
+ bool Compute(const std::vector<const double*>& parameter_blocks,
+ ProblemImpl* problem);
bool GetCovarianceBlockInTangentOrAmbientSpace(
const double* parameter_block1,
@@ -68,11 +68,11 @@ class CovarianceImpl {
bool GetCovarianceMatrixInTangentOrAmbientSpace(
const std::vector<const double*>& parameters,
bool lift_covariance_to_ambient_space,
- double *covariance_matrix) const;
+ double* covariance_matrix) const;
bool ComputeCovarianceSparsity(
- const std::vector<std::pair<const double*,
- const double*>>& covariance_blocks,
+ const std::vector<std::pair<const double*, const double*>>&
+ covariance_blocks,
ProblemImpl* problem);
bool ComputeCovarianceValues();
diff --git a/extern/ceres/internal/ceres/cxsparse.cc b/extern/ceres/internal/ceres/cxsparse.cc
index 5a028773206..0167f988648 100644
--- a/extern/ceres/internal/ceres/cxsparse.cc
+++ b/extern/ceres/internal/ceres/cxsparse.cc
@@ -33,13 +33,12 @@
#ifndef CERES_NO_CXSPARSE
-#include "ceres/cxsparse.h"
-
#include <string>
#include <vector>
#include "ceres/compressed_col_sparse_matrix_utils.h"
#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/cxsparse.h"
#include "ceres/triplet_sparse_matrix.h"
#include "glog/logging.h"
diff --git a/extern/ceres/internal/ceres/cxsparse.h b/extern/ceres/internal/ceres/cxsparse.h
index dc4740ceaee..d3f76e0575e 100644
--- a/extern/ceres/internal/ceres/cxsparse.h
+++ b/extern/ceres/internal/ceres/cxsparse.h
@@ -166,7 +166,7 @@ class CXSparseCholesky : public SparseCholesky {
} // namespace internal
} // namespace ceres
-#else // CERES_NO_CXSPARSE
+#else
typedef void cs_dis;
diff --git a/extern/ceres/internal/ceres/dense_jacobian_writer.h b/extern/ceres/internal/ceres/dense_jacobian_writer.h
index 1b04f383f09..28c60e20a1b 100644
--- a/extern/ceres/internal/ceres/dense_jacobian_writer.h
+++ b/extern/ceres/internal/ceres/dense_jacobian_writer.h
@@ -35,21 +35,19 @@
#include "ceres/casts.h"
#include "ceres/dense_sparse_matrix.h"
+#include "ceres/internal/eigen.h"
#include "ceres/parameter_block.h"
#include "ceres/program.h"
#include "ceres/residual_block.h"
#include "ceres/scratch_evaluate_preparer.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
class DenseJacobianWriter {
public:
- DenseJacobianWriter(Evaluator::Options /* ignored */,
- Program* program)
- : program_(program) {
- }
+ DenseJacobianWriter(Evaluator::Options /* ignored */, Program* program)
+ : program_(program) {}
// JacobianWriter interface.
@@ -61,14 +59,13 @@ class DenseJacobianWriter {
}
SparseMatrix* CreateJacobian() const {
- return new DenseSparseMatrix(program_->NumResiduals(),
- program_->NumEffectiveParameters(),
- true);
+ return new DenseSparseMatrix(
+ program_->NumResiduals(), program_->NumEffectiveParameters(), true);
}
void Write(int residual_id,
int residual_offset,
- double **jacobians,
+ double** jacobians,
SparseMatrix* jacobian) {
DenseSparseMatrix* dense_jacobian = down_cast<DenseSparseMatrix*>(jacobian);
const ResidualBlock* residual_block =
@@ -86,15 +83,14 @@ class DenseJacobianWriter {
}
const int parameter_block_size = parameter_block->LocalSize();
- ConstMatrixRef parameter_jacobian(jacobians[j],
- num_residuals,
- parameter_block_size);
+ ConstMatrixRef parameter_jacobian(
+ jacobians[j], num_residuals, parameter_block_size);
- dense_jacobian->mutable_matrix().block(
- residual_offset,
- parameter_block->delta_offset(),
- num_residuals,
- parameter_block_size) = parameter_jacobian;
+ dense_jacobian->mutable_matrix().block(residual_offset,
+ parameter_block->delta_offset(),
+ num_residuals,
+ parameter_block_size) =
+ parameter_jacobian;
}
}
diff --git a/extern/ceres/internal/ceres/dense_normal_cholesky_solver.cc b/extern/ceres/internal/ceres/dense_normal_cholesky_solver.cc
index fe7d931a3fd..51c639097b6 100644
--- a/extern/ceres/internal/ceres/dense_normal_cholesky_solver.cc
+++ b/extern/ceres/internal/ceres/dense_normal_cholesky_solver.cc
@@ -132,13 +132,8 @@ LinearSolver::Summary DenseNormalCholeskySolver::SolveUsingLAPACK(
//
// Note: This is a bit delicate, it assumes that the stride on this
// matrix is the same as the number of rows.
- BLAS::SymmetricRankKUpdate(A->num_rows(),
- num_cols,
- A->values(),
- true,
- 1.0,
- 0.0,
- lhs.data());
+ BLAS::SymmetricRankKUpdate(
+ A->num_rows(), num_cols, A->values(), true, 1.0, 0.0, lhs.data());
if (per_solve_options.D != NULL) {
// Undo the modifications to the matrix A.
@@ -153,13 +148,10 @@ LinearSolver::Summary DenseNormalCholeskySolver::SolveUsingLAPACK(
LinearSolver::Summary summary;
summary.num_iterations = 1;
- summary.termination_type =
- LAPACK::SolveInPlaceUsingCholesky(num_cols,
- lhs.data(),
- x,
- &summary.message);
+ summary.termination_type = LAPACK::SolveInPlaceUsingCholesky(
+ num_cols, lhs.data(), x, &summary.message);
event_logger.AddEvent("Solve");
return summary;
}
-} // namespace internal
-} // namespace ceres
+} // namespace internal
+} // namespace ceres
diff --git a/extern/ceres/internal/ceres/dense_normal_cholesky_solver.h b/extern/ceres/internal/ceres/dense_normal_cholesky_solver.h
index 976718e8615..68ea611299f 100644
--- a/extern/ceres/internal/ceres/dense_normal_cholesky_solver.h
+++ b/extern/ceres/internal/ceres/dense_normal_cholesky_solver.h
@@ -73,7 +73,7 @@ class DenseSparseMatrix;
// library. This solver always returns a solution, it is the user's
// responsibility to judge if the solution is good enough for their
// purposes.
-class DenseNormalCholeskySolver: public DenseSparseMatrixSolver {
+class DenseNormalCholeskySolver : public DenseSparseMatrixSolver {
public:
explicit DenseNormalCholeskySolver(const LinearSolver::Options& options);
diff --git a/extern/ceres/internal/ceres/dense_qr_solver.cc b/extern/ceres/internal/ceres/dense_qr_solver.cc
index 161e9c67a00..44388f30aee 100644
--- a/extern/ceres/internal/ceres/dense_qr_solver.cc
+++ b/extern/ceres/internal/ceres/dense_qr_solver.cc
@@ -31,6 +31,7 @@
#include "ceres/dense_qr_solver.h"
#include <cstddef>
+
#include "Eigen/Dense"
#include "ceres/dense_sparse_matrix.h"
#include "ceres/internal/eigen.h"
@@ -77,7 +78,7 @@ LinearSolver::Summary DenseQRSolver::SolveUsingLAPACK(
// TODO(sameeragarwal): Since we are copying anyways, the diagonal
// can be appended to the matrix instead of doing it on A.
- lhs_ = A->matrix();
+ lhs_ = A->matrix();
if (per_solve_options.D != NULL) {
// Undo the modifications to the matrix A.
@@ -164,5 +165,5 @@ LinearSolver::Summary DenseQRSolver::SolveUsingEigen(
return summary;
}
-} // namespace internal
-} // namespace ceres
+} // namespace internal
+} // namespace ceres
diff --git a/extern/ceres/internal/ceres/dense_qr_solver.h b/extern/ceres/internal/ceres/dense_qr_solver.h
index 9ea959db68d..980243bd6c6 100644
--- a/extern/ceres/internal/ceres/dense_qr_solver.h
+++ b/extern/ceres/internal/ceres/dense_qr_solver.h
@@ -32,8 +32,9 @@
#ifndef CERES_INTERNAL_DENSE_QR_SOLVER_H_
#define CERES_INTERNAL_DENSE_QR_SOLVER_H_
-#include "ceres/linear_solver.h"
#include "ceres/internal/eigen.h"
+#include "ceres/internal/port.h"
+#include "ceres/linear_solver.h"
namespace ceres {
namespace internal {
@@ -78,7 +79,7 @@ class DenseSparseMatrix;
// library. This solver always returns a solution, it is the user's
// responsibility to judge if the solution is good enough for their
// purposes.
-class DenseQRSolver: public DenseSparseMatrixSolver {
+class CERES_EXPORT_INTERNAL DenseQRSolver : public DenseSparseMatrixSolver {
public:
explicit DenseQRSolver(const LinearSolver::Options& options);
diff --git a/extern/ceres/internal/ceres/dense_sparse_matrix.cc b/extern/ceres/internal/ceres/dense_sparse_matrix.cc
index 72e08360dd0..53207fe300e 100644
--- a/extern/ceres/internal/ceres/dense_sparse_matrix.cc
+++ b/extern/ceres/internal/ceres/dense_sparse_matrix.cc
@@ -31,17 +31,17 @@
#include "ceres/dense_sparse_matrix.h"
#include <algorithm>
-#include "ceres/triplet_sparse_matrix.h"
+
#include "ceres/internal/eigen.h"
#include "ceres/internal/port.h"
+#include "ceres/triplet_sparse_matrix.h"
#include "glog/logging.h"
namespace ceres {
namespace internal {
DenseSparseMatrix::DenseSparseMatrix(int num_rows, int num_cols)
- : has_diagonal_appended_(false),
- has_diagonal_reserved_(false) {
+ : has_diagonal_appended_(false), has_diagonal_reserved_(false) {
m_.resize(num_rows, num_cols);
m_.setZero();
}
@@ -49,11 +49,10 @@ DenseSparseMatrix::DenseSparseMatrix(int num_rows, int num_cols)
DenseSparseMatrix::DenseSparseMatrix(int num_rows,
int num_cols,
bool reserve_diagonal)
- : has_diagonal_appended_(false),
- has_diagonal_reserved_(reserve_diagonal) {
+ : has_diagonal_appended_(false), has_diagonal_reserved_(reserve_diagonal) {
if (reserve_diagonal) {
// Allocate enough space for the diagonal.
- m_.resize(num_rows + num_cols, num_cols);
+ m_.resize(num_rows + num_cols, num_cols);
} else {
m_.resize(num_rows, num_cols);
}
@@ -64,9 +63,9 @@ DenseSparseMatrix::DenseSparseMatrix(const TripletSparseMatrix& m)
: m_(Eigen::MatrixXd::Zero(m.num_rows(), m.num_cols())),
has_diagonal_appended_(false),
has_diagonal_reserved_(false) {
- const double *values = m.values();
- const int *rows = m.rows();
- const int *cols = m.cols();
+ const double* values = m.values();
+ const int* rows = m.rows();
+ const int* cols = m.cols();
int num_nonzeros = m.num_nonzeros();
for (int i = 0; i < num_nonzeros; ++i) {
@@ -75,14 +74,9 @@ DenseSparseMatrix::DenseSparseMatrix(const TripletSparseMatrix& m)
}
DenseSparseMatrix::DenseSparseMatrix(const ColMajorMatrix& m)
- : m_(m),
- has_diagonal_appended_(false),
- has_diagonal_reserved_(false) {
-}
+ : m_(m), has_diagonal_appended_(false), has_diagonal_reserved_(false) {}
-void DenseSparseMatrix::SetZero() {
- m_.setZero();
-}
+void DenseSparseMatrix::SetZero() { m_.setZero(); }
void DenseSparseMatrix::RightMultiply(const double* x, double* y) const {
VectorRef(y, num_rows()) += matrix() * ConstVectorRef(x, num_cols());
@@ -105,7 +99,7 @@ void DenseSparseMatrix::ToDenseMatrix(Matrix* dense_matrix) const {
*dense_matrix = m_.block(0, 0, num_rows(), num_cols());
}
-void DenseSparseMatrix::AppendDiagonal(double *d) {
+void DenseSparseMatrix::AppendDiagonal(double* d) {
CHECK(!has_diagonal_appended_);
if (!has_diagonal_reserved_) {
ColMajorMatrix tmp = m_;
@@ -133,9 +127,7 @@ int DenseSparseMatrix::num_rows() const {
return m_.rows();
}
-int DenseSparseMatrix::num_cols() const {
- return m_.cols();
-}
+int DenseSparseMatrix::num_cols() const { return m_.cols(); }
int DenseSparseMatrix::num_nonzeros() const {
if (has_diagonal_reserved_ && !has_diagonal_appended_) {
@@ -148,33 +140,30 @@ ConstColMajorMatrixRef DenseSparseMatrix::matrix() const {
return ConstColMajorMatrixRef(
m_.data(),
((has_diagonal_reserved_ && !has_diagonal_appended_)
- ? m_.rows() - m_.cols()
- : m_.rows()),
+ ? m_.rows() - m_.cols()
+ : m_.rows()),
m_.cols(),
Eigen::Stride<Eigen::Dynamic, 1>(m_.rows(), 1));
}
ColMajorMatrixRef DenseSparseMatrix::mutable_matrix() {
- return ColMajorMatrixRef(
- m_.data(),
- ((has_diagonal_reserved_ && !has_diagonal_appended_)
- ? m_.rows() - m_.cols()
- : m_.rows()),
- m_.cols(),
- Eigen::Stride<Eigen::Dynamic, 1>(m_.rows(), 1));
+ return ColMajorMatrixRef(m_.data(),
+ ((has_diagonal_reserved_ && !has_diagonal_appended_)
+ ? m_.rows() - m_.cols()
+ : m_.rows()),
+ m_.cols(),
+ Eigen::Stride<Eigen::Dynamic, 1>(m_.rows(), 1));
}
-
void DenseSparseMatrix::ToTextFile(FILE* file) const {
CHECK(file != nullptr);
- const int active_rows =
- (has_diagonal_reserved_ && !has_diagonal_appended_)
- ? (m_.rows() - m_.cols())
- : m_.rows();
+ const int active_rows = (has_diagonal_reserved_ && !has_diagonal_appended_)
+ ? (m_.rows() - m_.cols())
+ : m_.rows();
for (int r = 0; r < active_rows; ++r) {
for (int c = 0; c < m_.cols(); ++c) {
- fprintf(file, "% 10d % 10d %17f\n", r, c, m_(r, c));
+ fprintf(file, "% 10d % 10d %17f\n", r, c, m_(r, c));
}
}
}
diff --git a/extern/ceres/internal/ceres/dense_sparse_matrix.h b/extern/ceres/internal/ceres/dense_sparse_matrix.h
index 6d3d504ea36..94064b3eddc 100644
--- a/extern/ceres/internal/ceres/dense_sparse_matrix.h
+++ b/extern/ceres/internal/ceres/dense_sparse_matrix.h
@@ -34,6 +34,7 @@
#define CERES_INTERNAL_DENSE_SPARSE_MATRIX_H_
#include "ceres/internal/eigen.h"
+#include "ceres/internal/port.h"
#include "ceres/sparse_matrix.h"
#include "ceres/types.h"
@@ -42,7 +43,7 @@ namespace internal {
class TripletSparseMatrix;
-class DenseSparseMatrix : public SparseMatrix {
+class CERES_EXPORT_INTERNAL DenseSparseMatrix : public SparseMatrix {
public:
// Build a matrix with the same content as the TripletSparseMatrix
// m. This assumes that m does not have any repeated entries.
@@ -92,7 +93,7 @@ class DenseSparseMatrix : public SparseMatrix {
// Calling RemoveDiagonal removes the block. It is a fatal error to append a
// diagonal to a matrix that already has an appended diagonal, and it is also
// a fatal error to remove a diagonal from a matrix that has none.
- void AppendDiagonal(double *d);
+ void AppendDiagonal(double* d);
void RemoveDiagonal();
private:
diff --git a/extern/ceres/internal/ceres/detect_structure.cc b/extern/ceres/internal/ceres/detect_structure.cc
index 959a0ee3c84..4aac4452153 100644
--- a/extern/ceres/internal/ceres/detect_structure.cc
+++ b/extern/ceres/internal/ceres/detect_structure.cc
@@ -29,6 +29,7 @@
// Author: sameeragarwal@google.com (Sameer Agarwal)
#include "ceres/detect_structure.h"
+
#include "ceres/internal/eigen.h"
#include "glog/logging.h"
@@ -61,8 +62,7 @@ void DetectStructure(const CompressedRowBlockStructure& bs,
} else if (*row_block_size != Eigen::Dynamic &&
*row_block_size != row.block.size) {
VLOG(2) << "Dynamic row block size because the block size changed from "
- << *row_block_size << " to "
- << row.block.size;
+ << *row_block_size << " to " << row.block.size;
*row_block_size = Eigen::Dynamic;
}
@@ -73,8 +73,7 @@ void DetectStructure(const CompressedRowBlockStructure& bs,
} else if (*e_block_size != Eigen::Dynamic &&
*e_block_size != bs.cols[e_block_id].size) {
VLOG(2) << "Dynamic e block size because the block size changed from "
- << *e_block_size << " to "
- << bs.cols[e_block_id].size;
+ << *e_block_size << " to " << bs.cols[e_block_id].size;
*e_block_size = Eigen::Dynamic;
}
@@ -100,9 +99,11 @@ void DetectStructure(const CompressedRowBlockStructure& bs,
}
}
+ // clang-format off
const bool is_everything_dynamic = (*row_block_size == Eigen::Dynamic &&
*e_block_size == Eigen::Dynamic &&
*f_block_size == Eigen::Dynamic);
+ // clang-format on
if (is_everything_dynamic) {
break;
}
@@ -110,10 +111,12 @@ void DetectStructure(const CompressedRowBlockStructure& bs,
CHECK_NE(*row_block_size, 0) << "No rows found";
CHECK_NE(*e_block_size, 0) << "No e type blocks found";
+ // clang-format off
VLOG(1) << "Schur complement static structure <"
<< *row_block_size << ","
<< *e_block_size << ","
<< *f_block_size << ">.";
+ // clang-format on
}
} // namespace internal
diff --git a/extern/ceres/internal/ceres/detect_structure.h b/extern/ceres/internal/ceres/detect_structure.h
index 602581c846e..06242307ca8 100644
--- a/extern/ceres/internal/ceres/detect_structure.h
+++ b/extern/ceres/internal/ceres/detect_structure.h
@@ -32,6 +32,7 @@
#define CERES_INTERNAL_DETECT_STRUCTURE_H_
#include "ceres/block_structure.h"
+#include "ceres/internal/port.h"
namespace ceres {
namespace internal {
@@ -55,11 +56,11 @@ namespace internal {
// Note: The structure of rows without any e-blocks has no effect on
// the values returned by this function. It is entirely possible that
// the f_block_size and row_blocks_size is not constant in such rows.
-void DetectStructure(const CompressedRowBlockStructure& bs,
- const int num_eliminate_blocks,
- int* row_block_size,
- int* e_block_size,
- int* f_block_size);
+void CERES_EXPORT DetectStructure(const CompressedRowBlockStructure& bs,
+ const int num_eliminate_blocks,
+ int* row_block_size,
+ int* e_block_size,
+ int* f_block_size);
} // namespace internal
} // namespace ceres
diff --git a/extern/ceres/internal/ceres/dogleg_strategy.cc b/extern/ceres/internal/ceres/dogleg_strategy.cc
index ecc6b882338..03ae22f7e57 100644
--- a/extern/ceres/internal/ceres/dogleg_strategy.cc
+++ b/extern/ceres/internal/ceres/dogleg_strategy.cc
@@ -49,7 +49,7 @@ namespace internal {
namespace {
const double kMaxMu = 1.0;
const double kMinMu = 1e-8;
-}
+} // namespace
DoglegStrategy::DoglegStrategy(const TrustRegionStrategy::Options& options)
: linear_solver_(options.linear_solver),
@@ -122,8 +122,8 @@ TrustRegionStrategy::Summary DoglegStrategy::ComputeStep(
//
jacobian->SquaredColumnNorm(diagonal_.data());
for (int i = 0; i < n; ++i) {
- diagonal_[i] = std::min(std::max(diagonal_[i], min_diagonal_),
- max_diagonal_);
+ diagonal_[i] =
+ std::min(std::max(diagonal_[i], min_diagonal_), max_diagonal_);
}
diagonal_ = diagonal_.array().sqrt();
@@ -171,9 +171,8 @@ TrustRegionStrategy::Summary DoglegStrategy::ComputeStep(
// The gradient, the Gauss-Newton step, the Cauchy point,
// and all calculations involving the Jacobian have to
// be adjusted accordingly.
-void DoglegStrategy::ComputeGradient(
- SparseMatrix* jacobian,
- const double* residuals) {
+void DoglegStrategy::ComputeGradient(SparseMatrix* jacobian,
+ const double* residuals) {
gradient_.setZero();
jacobian->LeftMultiply(residuals, gradient_.data());
gradient_.array() /= diagonal_.array();
@@ -187,8 +186,7 @@ void DoglegStrategy::ComputeCauchyPoint(SparseMatrix* jacobian) {
Jg.setZero();
// The Jacobian is scaled implicitly by computing J * (D^-1 * (D^-1 * g))
// instead of (J * D^-1) * (D^-1 * g).
- Vector scaled_gradient =
- (gradient_.array() / diagonal_.array()).matrix();
+ Vector scaled_gradient = (gradient_.array() / diagonal_.array()).matrix();
jacobian->RightMultiply(scaled_gradient.data(), Jg.data());
alpha_ = gradient_.squaredNorm() / Jg.squaredNorm();
}
@@ -217,7 +215,7 @@ void DoglegStrategy::ComputeTraditionalDoglegStep(double* dogleg) {
// Case 2. The Cauchy point and the Gauss-Newton steps lie outside
// the trust region. Rescale the Cauchy point to the trust region
// and return.
- if (gradient_norm * alpha_ >= radius_) {
+ if (gradient_norm * alpha_ >= radius_) {
dogleg_step = -(radius_ / gradient_norm) * gradient_;
dogleg_step_norm_ = radius_;
dogleg_step.array() /= diagonal_.array();
@@ -242,14 +240,12 @@ void DoglegStrategy::ComputeTraditionalDoglegStep(double* dogleg) {
// = alpha * -gradient' gauss_newton_step - alpha^2 |gradient|^2
const double c = b_dot_a - a_squared_norm;
const double d = sqrt(c * c + b_minus_a_squared_norm *
- (pow(radius_, 2.0) - a_squared_norm));
-
- double beta =
- (c <= 0)
- ? (d - c) / b_minus_a_squared_norm
- : (radius_ * radius_ - a_squared_norm) / (d + c);
- dogleg_step = (-alpha_ * (1.0 - beta)) * gradient_
- + beta * gauss_newton_step_;
+ (pow(radius_, 2.0) - a_squared_norm));
+
+ double beta = (c <= 0) ? (d - c) / b_minus_a_squared_norm
+ : (radius_ * radius_ - a_squared_norm) / (d + c);
+ dogleg_step =
+ (-alpha_ * (1.0 - beta)) * gradient_ + beta * gauss_newton_step_;
dogleg_step_norm_ = dogleg_step.norm();
dogleg_step.array() /= diagonal_.array();
VLOG(3) << "Dogleg step size: " << dogleg_step_norm_
@@ -345,13 +341,13 @@ void DoglegStrategy::ComputeSubspaceDoglegStep(double* dogleg) {
// correctly determined.
const double kCosineThreshold = 0.99;
const Vector2d grad_minimum = subspace_B_ * minimum + subspace_g_;
- const double cosine_angle = -minimum.dot(grad_minimum) /
- (minimum.norm() * grad_minimum.norm());
+ const double cosine_angle =
+ -minimum.dot(grad_minimum) / (minimum.norm() * grad_minimum.norm());
if (cosine_angle < kCosineThreshold) {
LOG(WARNING) << "First order optimality seems to be violated "
<< "in the subspace method!\n"
- << "Cosine of angle between x and B x + g is "
- << cosine_angle << ".\n"
+ << "Cosine of angle between x and B x + g is " << cosine_angle
+ << ".\n"
<< "Taking a regular dogleg step instead.\n"
<< "Please consider filing a bug report if this "
<< "happens frequently or consistently.\n";
@@ -423,15 +419,17 @@ Vector DoglegStrategy::MakePolynomialForBoundaryConstrainedProblem() const {
const double trB = subspace_B_.trace();
const double r2 = radius_ * radius_;
Matrix2d B_adj;
+ // clang-format off
B_adj << subspace_B_(1, 1) , -subspace_B_(0, 1),
- -subspace_B_(1, 0) , subspace_B_(0, 0);
+ -subspace_B_(1, 0) , subspace_B_(0, 0);
+ // clang-format on
Vector polynomial(5);
polynomial(0) = r2;
polynomial(1) = 2.0 * r2 * trB;
polynomial(2) = r2 * (trB * trB + 2.0 * detB) - subspace_g_.squaredNorm();
- polynomial(3) = -2.0 * (subspace_g_.transpose() * B_adj * subspace_g_
- - r2 * detB * trB);
+ polynomial(3) =
+ -2.0 * (subspace_g_.transpose() * B_adj * subspace_g_ - r2 * detB * trB);
polynomial(4) = r2 * detB * detB - (B_adj * subspace_g_).squaredNorm();
return polynomial;
@@ -565,10 +563,8 @@ LinearSolver::Summary DoglegStrategy::ComputeGaussNewtonStep(
// of Jx = -r and later set x = -y to avoid having to modify
// either jacobian or residuals.
InvalidateArray(n, gauss_newton_step_.data());
- linear_solver_summary = linear_solver_->Solve(jacobian,
- residuals,
- solve_options,
- gauss_newton_step_.data());
+ linear_solver_summary = linear_solver_->Solve(
+ jacobian, residuals, solve_options, gauss_newton_step_.data());
if (per_solve_options.dump_format_type == CONSOLE ||
(per_solve_options.dump_format_type != CONSOLE &&
@@ -641,9 +637,7 @@ void DoglegStrategy::StepIsInvalid() {
reuse_ = false;
}
-double DoglegStrategy::Radius() const {
- return radius_;
-}
+double DoglegStrategy::Radius() const { return radius_; }
bool DoglegStrategy::ComputeSubspaceModel(SparseMatrix* jacobian) {
// Compute an orthogonal basis for the subspace using QR decomposition.
@@ -701,8 +695,8 @@ bool DoglegStrategy::ComputeSubspaceModel(SparseMatrix* jacobian) {
subspace_g_ = subspace_basis_.transpose() * gradient_;
- Eigen::Matrix<double, 2, Eigen::Dynamic, Eigen::RowMajor>
- Jb(2, jacobian->num_rows());
+ Eigen::Matrix<double, 2, Eigen::Dynamic, Eigen::RowMajor> Jb(
+ 2, jacobian->num_rows());
Jb.setZero();
Vector tmp;
diff --git a/extern/ceres/internal/ceres/dogleg_strategy.h b/extern/ceres/internal/ceres/dogleg_strategy.h
index 1150940efd3..cc3778ea2a0 100644
--- a/extern/ceres/internal/ceres/dogleg_strategy.h
+++ b/extern/ceres/internal/ceres/dogleg_strategy.h
@@ -31,6 +31,7 @@
#ifndef CERES_INTERNAL_DOGLEG_STRATEGY_H_
#define CERES_INTERNAL_DOGLEG_STRATEGY_H_
+#include "ceres/internal/port.h"
#include "ceres/linear_solver.h"
#include "ceres/trust_region_strategy.h"
@@ -52,16 +53,16 @@ namespace internal {
// DoglegStrategy follows the approach by Shultz, Schnabel, Byrd.
// This finds the exact optimum over the two-dimensional subspace
// spanned by the two Dogleg vectors.
-class DoglegStrategy : public TrustRegionStrategy {
+class CERES_EXPORT_INTERNAL DoglegStrategy : public TrustRegionStrategy {
public:
explicit DoglegStrategy(const TrustRegionStrategy::Options& options);
virtual ~DoglegStrategy() {}
// TrustRegionStrategy interface
Summary ComputeStep(const PerSolveOptions& per_solve_options,
- SparseMatrix* jacobian,
- const double* residuals,
- double* step) final;
+ SparseMatrix* jacobian,
+ const double* residuals,
+ double* step) final;
void StepAccepted(double step_quality) final;
void StepRejected(double step_quality) final;
void StepIsInvalid();
diff --git a/extern/ceres/internal/ceres/dynamic_compressed_row_finalizer.h b/extern/ceres/internal/ceres/dynamic_compressed_row_finalizer.h
index a25a3083120..30c98d86b6f 100644
--- a/extern/ceres/internal/ceres/dynamic_compressed_row_finalizer.h
+++ b/extern/ceres/internal/ceres/dynamic_compressed_row_finalizer.h
@@ -40,7 +40,7 @@ namespace internal {
struct DynamicCompressedRowJacobianFinalizer {
void operator()(SparseMatrix* base_jacobian, int num_parameters) {
DynamicCompressedRowSparseMatrix* jacobian =
- down_cast<DynamicCompressedRowSparseMatrix*>(base_jacobian);
+ down_cast<DynamicCompressedRowSparseMatrix*>(base_jacobian);
jacobian->Finalize(num_parameters);
}
};
diff --git a/extern/ceres/internal/ceres/dynamic_compressed_row_jacobian_writer.h b/extern/ceres/internal/ceres/dynamic_compressed_row_jacobian_writer.h
index 6e5ac38f07e..ef8fa25d7d4 100644
--- a/extern/ceres/internal/ceres/dynamic_compressed_row_jacobian_writer.h
+++ b/extern/ceres/internal/ceres/dynamic_compressed_row_jacobian_writer.h
@@ -47,8 +47,7 @@ class DynamicCompressedRowJacobianWriter {
public:
DynamicCompressedRowJacobianWriter(Evaluator::Options /* ignored */,
Program* program)
- : program_(program) {
- }
+ : program_(program) {}
// JacobianWriter interface.
@@ -70,7 +69,7 @@ class DynamicCompressedRowJacobianWriter {
// This method is thread-safe over residual blocks (each `residual_id`).
void Write(int residual_id,
int residual_offset,
- double **jacobians,
+ double** jacobians,
SparseMatrix* base_jacobian);
private:
diff --git a/extern/ceres/internal/ceres/dynamic_compressed_row_sparse_matrix.cc b/extern/ceres/internal/ceres/dynamic_compressed_row_sparse_matrix.cc
index f020768ce10..936e682b763 100644
--- a/extern/ceres/internal/ceres/dynamic_compressed_row_sparse_matrix.cc
+++ b/extern/ceres/internal/ceres/dynamic_compressed_row_sparse_matrix.cc
@@ -28,22 +28,19 @@
//
// Author: richie.stebbing@gmail.com (Richard Stebbing)
-#include <cstring>
#include "ceres/dynamic_compressed_row_sparse_matrix.h"
+#include <cstring>
+
namespace ceres {
namespace internal {
DynamicCompressedRowSparseMatrix::DynamicCompressedRowSparseMatrix(
- int num_rows,
- int num_cols,
- int initial_max_num_nonzeros)
- : CompressedRowSparseMatrix(num_rows,
- num_cols,
- initial_max_num_nonzeros) {
- dynamic_cols_.resize(num_rows);
- dynamic_values_.resize(num_rows);
- }
+ int num_rows, int num_cols, int initial_max_num_nonzeros)
+ : CompressedRowSparseMatrix(num_rows, num_cols, initial_max_num_nonzeros) {
+ dynamic_cols_.resize(num_rows);
+ dynamic_values_.resize(num_rows);
+}
void DynamicCompressedRowSparseMatrix::InsertEntry(int row,
int col,
@@ -56,8 +53,7 @@ void DynamicCompressedRowSparseMatrix::InsertEntry(int row,
dynamic_values_[row].push_back(value);
}
-void DynamicCompressedRowSparseMatrix::ClearRows(int row_start,
- int num_rows) {
+void DynamicCompressedRowSparseMatrix::ClearRows(int row_start, int num_rows) {
for (int r = 0; r < num_rows; ++r) {
const int i = row_start + r;
CHECK_GE(i, 0);
@@ -99,8 +95,8 @@ void DynamicCompressedRowSparseMatrix::Finalize(int num_additional_elements) {
mutable_rows()[num_rows()] = index_into_values_and_cols;
CHECK_EQ(index_into_values_and_cols, num_jacobian_nonzeros)
- << "Ceres bug: final index into values_ and cols_ should be equal to "
- << "the number of jacobian nonzeros. Please contact the developers!";
+ << "Ceres bug: final index into values_ and cols_ should be equal to "
+ << "the number of jacobian nonzeros. Please contact the developers!";
}
} // namespace internal
diff --git a/extern/ceres/internal/ceres/dynamic_compressed_row_sparse_matrix.h b/extern/ceres/internal/ceres/dynamic_compressed_row_sparse_matrix.h
index ad41da7b15a..d06c36ebb94 100644
--- a/extern/ceres/internal/ceres/dynamic_compressed_row_sparse_matrix.h
+++ b/extern/ceres/internal/ceres/dynamic_compressed_row_sparse_matrix.h
@@ -44,11 +44,13 @@
#include <vector>
#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/internal/port.h"
namespace ceres {
namespace internal {
-class DynamicCompressedRowSparseMatrix : public CompressedRowSparseMatrix {
+class CERES_EXPORT_INTERNAL DynamicCompressedRowSparseMatrix
+ : public CompressedRowSparseMatrix {
public:
// Set the number of rows and columns for the underlyig
// `CompressedRowSparseMatrix` and set the initial number of maximum non-zero
diff --git a/extern/ceres/internal/ceres/dynamic_sparse_normal_cholesky_solver.cc b/extern/ceres/internal/ceres/dynamic_sparse_normal_cholesky_solver.cc
index 25d5417bca8..d31c4228f05 100644
--- a/extern/ceres/internal/ceres/dynamic_sparse_normal_cholesky_solver.cc
+++ b/extern/ceres/internal/ceres/dynamic_sparse_normal_cholesky_solver.cc
@@ -95,7 +95,7 @@ LinearSolver::Summary DynamicSparseNormalCholeskySolver::SolveImpl(
LOG(FATAL) << "Unsupported sparse linear algebra library for "
<< "dynamic sparsity: "
<< SparseLinearAlgebraLibraryTypeToString(
- options_.sparse_linear_algebra_library_type);
+ options_.sparse_linear_algebra_library_type);
}
if (per_solve_options.D != nullptr) {
diff --git a/extern/ceres/internal/ceres/dynamic_sparse_normal_cholesky_solver.h b/extern/ceres/internal/ceres/dynamic_sparse_normal_cholesky_solver.h
index 4e31c7a8492..36118bab1a5 100644
--- a/extern/ceres/internal/ceres/dynamic_sparse_normal_cholesky_solver.h
+++ b/extern/ceres/internal/ceres/dynamic_sparse_normal_cholesky_solver.h
@@ -35,7 +35,9 @@
#define CERES_INTERNAL_DYNAMIC_SPARSE_NORMAL_CHOLESKY_SOLVER_H_
// This include must come before any #ifndef check on Ceres compile options.
+// clang-format off
#include "ceres/internal/port.h"
+// clang-format on
#include "ceres/linear_solver.h"
@@ -59,23 +61,19 @@ class DynamicSparseNormalCholeskySolver
virtual ~DynamicSparseNormalCholeskySolver() {}
private:
- LinearSolver::Summary SolveImpl(
- CompressedRowSparseMatrix* A,
- const double* b,
- const LinearSolver::PerSolveOptions& options,
- double* x) final;
+ LinearSolver::Summary SolveImpl(CompressedRowSparseMatrix* A,
+ const double* b,
+ const LinearSolver::PerSolveOptions& options,
+ double* x) final;
- LinearSolver::Summary SolveImplUsingSuiteSparse(
- CompressedRowSparseMatrix* A,
- double* rhs_and_solution);
+ LinearSolver::Summary SolveImplUsingSuiteSparse(CompressedRowSparseMatrix* A,
+ double* rhs_and_solution);
- LinearSolver::Summary SolveImplUsingCXSparse(
- CompressedRowSparseMatrix* A,
- double* rhs_and_solution);
+ LinearSolver::Summary SolveImplUsingCXSparse(CompressedRowSparseMatrix* A,
+ double* rhs_and_solution);
- LinearSolver::Summary SolveImplUsingEigen(
- CompressedRowSparseMatrix* A,
- double* rhs_and_solution);
+ LinearSolver::Summary SolveImplUsingEigen(CompressedRowSparseMatrix* A,
+ double* rhs_and_solution);
const LinearSolver::Options options_;
};
diff --git a/extern/ceres/internal/ceres/eigensparse.h b/extern/ceres/internal/ceres/eigensparse.h
index 2e6c6f01abb..bb89c2c11ac 100644
--- a/extern/ceres/internal/ceres/eigensparse.h
+++ b/extern/ceres/internal/ceres/eigensparse.h
@@ -56,8 +56,8 @@ class EigenSparseCholesky : public SparseCholesky {
// SparseCholesky interface.
virtual ~EigenSparseCholesky();
- virtual LinearSolverTerminationType Factorize(
- CompressedRowSparseMatrix* lhs, std::string* message) = 0;
+ virtual LinearSolverTerminationType Factorize(CompressedRowSparseMatrix* lhs,
+ std::string* message) = 0;
virtual CompressedRowSparseMatrix::StorageType StorageType() const = 0;
virtual LinearSolverTerminationType Solve(const double* rhs,
double* solution,
@@ -74,8 +74,8 @@ class FloatEigenSparseCholesky : public SparseCholesky {
// SparseCholesky interface.
virtual ~FloatEigenSparseCholesky();
- virtual LinearSolverTerminationType Factorize(
- CompressedRowSparseMatrix* lhs, std::string* message) = 0;
+ virtual LinearSolverTerminationType Factorize(CompressedRowSparseMatrix* lhs,
+ std::string* message) = 0;
virtual CompressedRowSparseMatrix::StorageType StorageType() const = 0;
virtual LinearSolverTerminationType Solve(const double* rhs,
double* solution,
diff --git a/extern/ceres/internal/ceres/evaluator.cc b/extern/ceres/internal/ceres/evaluator.cc
index 8387983553d..516874184d9 100644
--- a/extern/ceres/internal/ceres/evaluator.cc
+++ b/extern/ceres/internal/ceres/evaluator.cc
@@ -28,7 +28,10 @@
//
// Author: keir@google.com (Keir Mierle)
+#include "ceres/evaluator.h"
+
#include <vector>
+
#include "ceres/block_evaluate_preparer.h"
#include "ceres/block_jacobian_writer.h"
#include "ceres/compressed_row_jacobian_writer.h"
@@ -37,7 +40,6 @@
#include "ceres/dense_jacobian_writer.h"
#include "ceres/dynamic_compressed_row_finalizer.h"
#include "ceres/dynamic_compressed_row_jacobian_writer.h"
-#include "ceres/evaluator.h"
#include "ceres/internal/port.h"
#include "ceres/program_evaluator.h"
#include "ceres/scratch_evaluate_preparer.h"
@@ -56,26 +58,23 @@ Evaluator* Evaluator::Create(const Evaluator::Options& options,
switch (options.linear_solver_type) {
case DENSE_QR:
case DENSE_NORMAL_CHOLESKY:
- return new ProgramEvaluator<ScratchEvaluatePreparer,
- DenseJacobianWriter>(options,
- program);
+ return new ProgramEvaluator<ScratchEvaluatePreparer, DenseJacobianWriter>(
+ options, program);
case DENSE_SCHUR:
case SPARSE_SCHUR:
case ITERATIVE_SCHUR:
case CGNR:
- return new ProgramEvaluator<BlockEvaluatePreparer,
- BlockJacobianWriter>(options,
- program);
+ return new ProgramEvaluator<BlockEvaluatePreparer, BlockJacobianWriter>(
+ options, program);
case SPARSE_NORMAL_CHOLESKY:
if (options.dynamic_sparsity) {
return new ProgramEvaluator<ScratchEvaluatePreparer,
DynamicCompressedRowJacobianWriter,
DynamicCompressedRowJacobianFinalizer>(
- options, program);
+ options, program);
} else {
- return new ProgramEvaluator<BlockEvaluatePreparer,
- BlockJacobianWriter>(options,
- program);
+ return new ProgramEvaluator<BlockEvaluatePreparer, BlockJacobianWriter>(
+ options, program);
}
default:
diff --git a/extern/ceres/internal/ceres/evaluator.h b/extern/ceres/internal/ceres/evaluator.h
index b820958ed77..9cf42593e95 100644
--- a/extern/ceres/internal/ceres/evaluator.h
+++ b/extern/ceres/internal/ceres/evaluator.h
@@ -55,7 +55,7 @@ class SparseMatrix;
// function that is useful for an optimizer that wants to minimize the least
// squares objective. This insulates the optimizer from issues like Jacobian
// storage, parameterization, etc.
-class Evaluator {
+class CERES_EXPORT_INTERNAL Evaluator {
public:
virtual ~Evaluator();
@@ -124,12 +124,8 @@ class Evaluator {
double* residuals,
double* gradient,
SparseMatrix* jacobian) {
- return Evaluate(EvaluateOptions(),
- state,
- cost,
- residuals,
- gradient,
- jacobian);
+ return Evaluate(
+ EvaluateOptions(), state, cost, residuals, gradient, jacobian);
}
// Make a change delta (of size NumEffectiveParameters()) to state (of size
@@ -152,7 +148,7 @@ class Evaluator {
// This is the effective number of parameters that the optimizer may adjust.
// This applies when there are parameterizations on some of the parameters.
- virtual int NumEffectiveParameters() const = 0;
+ virtual int NumEffectiveParameters() const = 0;
// The number of residuals in the optimization problem.
virtual int NumResiduals() const = 0;
diff --git a/extern/ceres/internal/ceres/file.cc b/extern/ceres/internal/ceres/file.cc
index c95a44d2c38..94f21355a2b 100644
--- a/extern/ceres/internal/ceres/file.cc
+++ b/extern/ceres/internal/ceres/file.cc
@@ -33,6 +33,7 @@
#include "ceres/file.h"
#include <cstdio>
+
#include "glog/logging.h"
namespace ceres {
@@ -40,7 +41,7 @@ namespace internal {
using std::string;
-void WriteStringToFileOrDie(const string &data, const string &filename) {
+void WriteStringToFileOrDie(const string& data, const string& filename) {
FILE* file_descriptor = fopen(filename.c_str(), "wb");
if (!file_descriptor) {
LOG(FATAL) << "Couldn't write to file: " << filename;
@@ -49,7 +50,7 @@ void WriteStringToFileOrDie(const string &data, const string &filename) {
fclose(file_descriptor);
}
-void ReadFileToStringOrDie(const string &filename, string *data) {
+void ReadFileToStringOrDie(const string& filename, string* data) {
FILE* file_descriptor = fopen(filename.c_str(), "r");
if (!file_descriptor) {
@@ -63,10 +64,8 @@ void ReadFileToStringOrDie(const string &filename, string *data) {
// Read the data.
fseek(file_descriptor, 0L, SEEK_SET);
- int num_read = fread(&((*data)[0]),
- sizeof((*data)[0]),
- num_bytes,
- file_descriptor);
+ int num_read =
+ fread(&((*data)[0]), sizeof((*data)[0]), num_bytes, file_descriptor);
if (num_read != num_bytes) {
LOG(FATAL) << "Couldn't read all of " << filename
<< "expected bytes: " << num_bytes * sizeof((*data)[0])
@@ -77,9 +76,9 @@ void ReadFileToStringOrDie(const string &filename, string *data) {
string JoinPath(const string& dirname, const string& basename) {
#ifdef _WIN32
- static const char separator = '\\';
+ static const char separator = '\\';
#else
- static const char separator = '/';
+ static const char separator = '/';
#endif // _WIN32
if ((!basename.empty() && basename[0] == separator) || dirname.empty()) {
diff --git a/extern/ceres/internal/ceres/file.h b/extern/ceres/internal/ceres/file.h
index 219b459b919..c0015df60f0 100644
--- a/extern/ceres/internal/ceres/file.h
+++ b/extern/ceres/internal/ceres/file.h
@@ -34,18 +34,20 @@
#define CERES_INTERNAL_FILE_H_
#include <string>
+
#include "ceres/internal/port.h"
namespace ceres {
namespace internal {
-void WriteStringToFileOrDie(const std::string &data,
- const std::string &filename);
-void ReadFileToStringOrDie(const std::string &filename, std::string *data);
+void WriteStringToFileOrDie(const std::string& data,
+ const std::string& filename);
+void ReadFileToStringOrDie(const std::string& filename, std::string* data);
// Join two path components, adding a slash if necessary. If basename is an
// absolute path then JoinPath ignores dirname and simply returns basename.
-std::string JoinPath(const std::string& dirname, const std::string& basename);
+CERES_EXPORT_INTERNAL std::string JoinPath(const std::string& dirname,
+ const std::string& basename);
} // namespace internal
} // namespace ceres
diff --git a/extern/ceres/internal/ceres/float_cxsparse.h b/extern/ceres/internal/ceres/float_cxsparse.h
index 57fc5e4c010..9a274c23636 100644
--- a/extern/ceres/internal/ceres/float_cxsparse.h
+++ b/extern/ceres/internal/ceres/float_cxsparse.h
@@ -37,6 +37,7 @@
#if !defined(CERES_NO_CXSPARSE)
#include <memory>
+
#include "ceres/sparse_cholesky.h"
namespace ceres {
@@ -46,8 +47,7 @@ namespace internal {
// CXSparse.
class FloatCXSparseCholesky : public SparseCholesky {
public:
- static std::unique_ptr<SparseCholesky> Create(
- OrderingType ordering_type);
+ static std::unique_ptr<SparseCholesky> Create(OrderingType ordering_type);
};
} // namespace internal
diff --git a/extern/ceres/internal/ceres/float_suitesparse.h b/extern/ceres/internal/ceres/float_suitesparse.h
index ac4d4090922..c436da43f86 100644
--- a/extern/ceres/internal/ceres/float_suitesparse.h
+++ b/extern/ceres/internal/ceres/float_suitesparse.h
@@ -32,9 +32,12 @@
#define CERES_INTERNAL_FLOAT_SUITESPARSE_H_
// This include must come before any #ifndef check on Ceres compile options.
+// clang-format off
#include "ceres/internal/port.h"
+// clang-format on
#include <memory>
+
#include "ceres/sparse_cholesky.h"
#if !defined(CERES_NO_SUITESPARSE)
@@ -46,8 +49,7 @@ namespace internal {
// SuiteSparse.
class FloatSuiteSparseCholesky : public SparseCholesky {
public:
- static std::unique_ptr<SparseCholesky> Create(
- OrderingType ordering_type);
+ static std::unique_ptr<SparseCholesky> Create(OrderingType ordering_type);
};
} // namespace internal
diff --git a/extern/ceres/internal/ceres/function_sample.cc b/extern/ceres/internal/ceres/function_sample.cc
index 2fd3dbdf7c5..3e0ae60ca5d 100644
--- a/extern/ceres/internal/ceres/function_sample.cc
+++ b/extern/ceres/internal/ceres/function_sample.cc
@@ -29,6 +29,7 @@
// Author: sameeragarwal@google.com (Sameer Agarwal)
#include "ceres/function_sample.h"
+
#include "ceres/stringprintf.h"
namespace ceres {
@@ -64,9 +65,14 @@ FunctionSample::FunctionSample(const double x,
gradient_is_valid(true) {}
std::string FunctionSample::ToDebugString() const {
- return StringPrintf("[x: %.8e, value: %.8e, gradient: %.8e, "
- "value_is_valid: %d, gradient_is_valid: %d]",
- x, value, gradient, value_is_valid, gradient_is_valid);
+ return StringPrintf(
+ "[x: %.8e, value: %.8e, gradient: %.8e, "
+ "value_is_valid: %d, gradient_is_valid: %d]",
+ x,
+ value,
+ gradient,
+ value_is_valid,
+ gradient_is_valid);
}
} // namespace internal
diff --git a/extern/ceres/internal/ceres/function_sample.h b/extern/ceres/internal/ceres/function_sample.h
index df79aef944f..3bcea1bc5ff 100644
--- a/extern/ceres/internal/ceres/function_sample.h
+++ b/extern/ceres/internal/ceres/function_sample.h
@@ -32,7 +32,9 @@
#define CERES_INTERNAL_FUNCTION_SAMPLE_H_
#include <string>
+
#include "ceres/internal/eigen.h"
+#include "ceres/internal/port.h"
namespace ceres {
namespace internal {
@@ -45,7 +47,7 @@ namespace internal {
// line/direction. FunctionSample contains the information in two
// ways. Information in the ambient space and information along the
// direction of search.
-struct FunctionSample {
+struct CERES_EXPORT_INTERNAL FunctionSample {
FunctionSample();
FunctionSample(double x, double value);
FunctionSample(double x, double value, double gradient);
@@ -85,9 +87,6 @@ struct FunctionSample {
bool gradient_is_valid;
};
-
-
-
} // namespace internal
} // namespace ceres
diff --git a/extern/ceres/internal/ceres/generate_template_specializations.py b/extern/ceres/internal/ceres/generate_template_specializations.py
index 5e91f8d2b6a..74e46c28b78 100644
--- a/extern/ceres/internal/ceres/generate_template_specializations.py
+++ b/extern/ceres/internal/ceres/generate_template_specializations.py
@@ -101,9 +101,9 @@ def GenerateFactoryConditional(row_block_size, e_block_size, f_block_size):
return "%s"
if (len(conditionals) == 1):
- return " if " + conditionals[0] + "{\n %s\n }\n"
+ return " if " + conditionals[0] + " {\n %s\n }\n"
- return " if (" + " &&\n ".join(conditionals) + ") {\n %s\n }\n"
+ return " if (" + " &&\n ".join(conditionals) + ") {\n %s\n }\n"
def Specialize(name, data):
"""
@@ -214,10 +214,10 @@ QUERY_FOOTER = """
} // namespace ceres
"""
-QUERY_ACTION = """ *row_block_size = %s;
- *e_block_size = %s;
- *f_block_size = %s;
- return;"""
+QUERY_ACTION = """ *row_block_size = %s;
+ *e_block_size = %s;
+ *f_block_size = %s;
+ return;"""
def GenerateQueryFile():
"""
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_2.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_2.cc
index 86ad17b4f71..f5753bef544 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_2.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_2.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_3.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_3.cc
index 33018d573a4..a7a9b5231cf 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_3.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_3.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_4.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_4.cc
index a429a546e3d..faf6c4a754a 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_4.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_4.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_d.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_d.cc
index f6f03ea6dcc..92fd4cddf43 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_d.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_d.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_3.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_3.cc
index 0b73e1a2aa8..2df314f137a 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_3.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_3.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_4.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_4.cc
index bc4a86194eb..ff1ca3e7f1f 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_4.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_4.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_6.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_6.cc
index fe8f7dd37af..5041df9152d 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_6.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_6.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_9.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_9.cc
index ac493fcd0c0..c0b72fec8b8 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_9.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_9.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_d.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_d.cc
index e29efaf4832..8a3c162ab7e 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_d.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_d.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_3.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_3.cc
index e61e0a31314..0e69ca6404d 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_3.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_3.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_4.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_4.cc
index 2e1170da01f..ba9bb615291 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_4.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_4.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_6.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_6.cc
index 4a5590d9751..1acdb9b21d5 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_6.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_6.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_8.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_8.cc
index 83015f1ecc5..888ff99557d 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_8.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_8.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_9.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_9.cc
index 25671f913dd..bd4dde3d207 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_9.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_9.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_d.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_d.cc
index d259802bd5a..6d3516fc6d5 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_d.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_d.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_d_d.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_d_d.cc
index c9567595acd..77d22ed6bc2 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_d_d.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_d_d.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_3_3_3.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_3_3_3.cc
index d3b20be70e7..aeb456c6e2a 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_3_3_3.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_3_3_3.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_2.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_2.cc
index f08049c9653..bb240b9e3f6 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_2.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_2.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_3.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_3.cc
index 9342612022f..5d47543644d 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_3.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_3.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_4.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_4.cc
index 8b273fa0da0..e14f980933d 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_4.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_4.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_d.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_d.cc
index e8b45e49eca..9ec50563ac8 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_d.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_d.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_d_d_d.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_d_d_d.cc
index 3545b869d5f..1e124797598 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_d_d_d.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_d_d_d.cc
@@ -39,14 +39,14 @@
//
// This file is generated using generate_template_specializations.py.
-
#include "ceres/partitioned_matrix_view_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
-template class PartitionedMatrixView<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic>;
+template class PartitionedMatrixView<Eigen::Dynamic,
+ Eigen::Dynamic,
+ Eigen::Dynamic>;
} // namespace internal
} // namespace ceres
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_2_2_2.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_2_2_2.cc
index 79fcf437981..289a809acb7 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_2_2_2.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_2_2_2.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_2_2_3.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_2_2_3.cc
index edd7fb649b4..20311ba843d 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_2_2_3.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_2_2_3.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_2_2_4.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_2_2_4.cc
index 692267dba46..1f6a8ae4a0e 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_2_2_4.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_2_2_4.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_2_2_d.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_2_2_d.cc
index 33d9c6de270..08b18d357bd 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_2_2_d.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_2_2_d.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_3.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_3.cc
index 4a5e2fe30c0..115b4c8cce1 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_3.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_3.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_4.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_4.cc
index 7ee63d069aa..c7035370424 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_4.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_4.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_6.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_6.cc
index 108760ef1f8..edb9afea969 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_6.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_6.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_9.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_9.cc
index 4fea2fa4417..faa5c19f5c0 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_9.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_9.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_d.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_d.cc
index 0d13c99e7ca..81b6f975e7f 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_d.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_d.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_3.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_3.cc
index 3827c653a63..2cb2d15ac93 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_3.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_3.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_4.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_4.cc
index 47bdfab1f22..a78eff3aa02 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_4.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_4.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_6.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_6.cc
index 3777be22707..e2534f235b6 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_6.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_6.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_8.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_8.cc
index 862c76a2a9c..296a46273bc 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_8.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_8.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_9.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_9.cc
index 5b5b7ccd415..0d0b04e686c 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_9.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_9.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_d.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_d.cc
index ce2d450b073..797992660d7 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_d.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_d.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_2_d_d.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_2_d_d.cc
index 9b02bd9db5a..189be043af8 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_2_d_d.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_2_d_d.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_3_3_3.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_3_3_3.cc
index 1cbeadf518f..35c14a8f4bd 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_3_3_3.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_3_3_3.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_4_4_2.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_4_4_2.cc
index 10f709d7577..878500a2100 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_4_4_2.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_4_4_2.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_4_4_3.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_4_4_3.cc
index bcbcc745519..c4b0959db6a 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_4_4_3.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_4_4_3.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_4_4_4.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_4_4_4.cc
index 44ecc87deba..20df5343335 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_4_4_4.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_4_4_4.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_4_4_d.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_4_4_d.cc
index 69c856304f0..17368dca4f6 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_4_4_d.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_4_4_d.cc
@@ -45,7 +45,6 @@
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_d_d_d.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_d_d_d.cc
index 348708bb335..ca598fe5eca 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_d_d_d.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_d_d_d.cc
@@ -39,9 +39,7 @@
//
// This file is generated using generate_template_specializations.py.
-
#include "ceres/schur_eliminator_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/gradient_checker.cc b/extern/ceres/internal/ceres/gradient_checker.cc
index ef56666970d..dadaaa08734 100644
--- a/extern/ceres/internal/ceres/gradient_checker.cc
+++ b/extern/ceres/internal/ceres/gradient_checker.cc
@@ -56,7 +56,7 @@ namespace {
// the local space of the respective local parameterizations.
bool EvaluateCostFunction(
const ceres::CostFunction* function,
- double const* const * parameters,
+ double const* const* parameters,
const std::vector<const ceres::LocalParameterization*>&
local_parameterizations,
Vector* residuals,
@@ -95,8 +95,8 @@ bool EvaluateCostFunction(
CHECK_NE(0, function->num_residuals());
residuals->resize(function->num_residuals());
residuals->setZero();
- if (!function->Evaluate(parameters, residuals->data(),
- jacobian_data.data())) {
+ if (!function->Evaluate(
+ parameters, residuals->data(), jacobian_data.data())) {
return false;
}
@@ -109,20 +109,20 @@ bool EvaluateCostFunction(
int local_size = local_parameterizations.at(i)->LocalSize();
CHECK_EQ(jacobians->at(i).cols(), global_size);
Matrix global_J_local(global_size, local_size);
- local_parameterizations.at(i)->ComputeJacobian(
- parameters[i], global_J_local.data());
+ local_parameterizations.at(i)->ComputeJacobian(parameters[i],
+ global_J_local.data());
local_jacobians->at(i).noalias() = jacobians->at(i) * global_J_local;
}
}
return true;
}
-} // namespace
+} // namespace
GradientChecker::GradientChecker(
- const CostFunction* function,
- const vector<const LocalParameterization*>* local_parameterizations,
- const NumericDiffOptions& options) :
- function_(function) {
+ const CostFunction* function,
+ const vector<const LocalParameterization*>* local_parameterizations,
+ const NumericDiffOptions& options)
+ : function_(function) {
CHECK(function != nullptr);
if (local_parameterizations != NULL) {
local_parameterizations_ = *local_parameterizations;
@@ -132,8 +132,8 @@ GradientChecker::GradientChecker(
}
DynamicNumericDiffCostFunction<CostFunction, RIDDERS>*
finite_diff_cost_function =
- new DynamicNumericDiffCostFunction<CostFunction, RIDDERS>(
- function, DO_NOT_TAKE_OWNERSHIP, options);
+ new DynamicNumericDiffCostFunction<CostFunction, RIDDERS>(
+ function, DO_NOT_TAKE_OWNERSHIP, options);
finite_diff_cost_function_.reset(finite_diff_cost_function);
const vector<int32_t>& parameter_block_sizes =
@@ -145,7 +145,7 @@ GradientChecker::GradientChecker(
finite_diff_cost_function->SetNumResiduals(function->num_residuals());
}
-bool GradientChecker::Probe(double const* const * parameters,
+bool GradientChecker::Probe(double const* const* parameters,
double relative_precision,
ProbeResults* results_param) const {
int num_residuals = function_->num_residuals();
@@ -171,8 +171,12 @@ bool GradientChecker::Probe(double const* const * parameters,
// Evaluate the derivative using the user supplied code.
vector<Matrix>& jacobians = results->jacobians;
vector<Matrix>& local_jacobians = results->local_jacobians;
- if (!EvaluateCostFunction(function_, parameters, local_parameterizations_,
- &results->residuals, &jacobians, &local_jacobians)) {
+ if (!EvaluateCostFunction(function_,
+ parameters,
+ local_parameterizations_,
+ &results->residuals,
+ &jacobians,
+ &local_jacobians)) {
results->error_log = "Function evaluation with Jacobians failed.";
results->return_value = false;
}
@@ -181,10 +185,14 @@ bool GradientChecker::Probe(double const* const * parameters,
vector<Matrix>& numeric_jacobians = results->numeric_jacobians;
vector<Matrix>& local_numeric_jacobians = results->local_numeric_jacobians;
Vector finite_diff_residuals;
- if (!EvaluateCostFunction(finite_diff_cost_function_.get(), parameters,
- local_parameterizations_, &finite_diff_residuals,
- &numeric_jacobians, &local_numeric_jacobians)) {
- results->error_log += "\nFunction evaluation with numerical "
+ if (!EvaluateCostFunction(finite_diff_cost_function_.get(),
+ parameters,
+ local_parameterizations_,
+ &finite_diff_residuals,
+ &numeric_jacobians,
+ &local_numeric_jacobians)) {
+ results->error_log +=
+ "\nFunction evaluation with numerical "
"differentiation failed.";
results->return_value = false;
}
@@ -194,13 +202,13 @@ bool GradientChecker::Probe(double const* const * parameters,
}
for (int i = 0; i < num_residuals; ++i) {
- if (!IsClose(
- results->residuals[i],
- finite_diff_residuals[i],
- relative_precision,
- NULL,
- NULL)) {
- results->error_log = "Function evaluation with and without Jacobians "
+ if (!IsClose(results->residuals[i],
+ finite_diff_residuals[i],
+ relative_precision,
+ NULL,
+ NULL)) {
+ results->error_log =
+ "Function evaluation with and without Jacobians "
"resulted in different residuals.";
LOG(INFO) << results->residuals.transpose();
LOG(INFO) << finite_diff_residuals.transpose();
@@ -219,7 +227,7 @@ bool GradientChecker::Probe(double const* const * parameters,
for (int k = 0; k < function_->parameter_block_sizes().size(); k++) {
StringAppendF(&error_log,
"========== "
- "Jacobian for " "block %d: (%ld by %ld)) "
+ "Jacobian for block %d: (%ld by %ld)) "
"==========\n",
k,
static_cast<long>(local_jacobians[k].rows()),
@@ -234,28 +242,33 @@ bool GradientChecker::Probe(double const* const * parameters,
double term_jacobian = local_jacobians[k](i, j);
double finite_jacobian = local_numeric_jacobians[k](i, j);
double relative_error, absolute_error;
- bool bad_jacobian_entry =
- !IsClose(term_jacobian,
- finite_jacobian,
- relative_precision,
- &relative_error,
- &absolute_error);
+ bool bad_jacobian_entry = !IsClose(term_jacobian,
+ finite_jacobian,
+ relative_precision,
+ &relative_error,
+ &absolute_error);
worst_relative_error = std::max(worst_relative_error, relative_error);
StringAppendF(&error_log,
"%6d %4d %4d %17g %17g %17g %17g %17g %17g",
- k, i, j,
- term_jacobian, finite_jacobian,
- absolute_error, relative_error,
+ k,
+ i,
+ j,
+ term_jacobian,
+ finite_jacobian,
+ absolute_error,
+ relative_error,
parameters[k][j],
results->residuals[i]);
if (bad_jacobian_entry) {
num_bad_jacobian_components++;
- StringAppendF(
- &error_log,
- " ------ (%d,%d,%d) Relative error worse than %g",
- k, i, j, relative_precision);
+ StringAppendF(&error_log,
+ " ------ (%d,%d,%d) Relative error worse than %g",
+ k,
+ i,
+ j,
+ relative_precision);
}
error_log += "\n";
}
@@ -264,11 +277,12 @@ bool GradientChecker::Probe(double const* const * parameters,
// Since there were some bad errors, dump comprehensive debug info.
if (num_bad_jacobian_components) {
- string header = StringPrintf("\nDetected %d bad Jacobian component(s). "
+ string header = StringPrintf(
+ "\nDetected %d bad Jacobian component(s). "
"Worst relative error was %g.\n",
num_bad_jacobian_components,
worst_relative_error);
- results->error_log = header + "\n" + error_log;
+ results->error_log = header + "\n" + error_log;
return false;
}
return true;
diff --git a/extern/ceres/internal/ceres/gradient_checking_cost_function.cc b/extern/ceres/internal/ceres/gradient_checking_cost_function.cc
index 13d6c58a6b7..2eb6d627167 100644
--- a/extern/ceres/internal/ceres/gradient_checking_cost_function.cc
+++ b/extern/ceres/internal/ceres/gradient_checking_cost_function.cc
@@ -38,6 +38,7 @@
#include <string>
#include <vector>
+#include "ceres/dynamic_numeric_diff_cost_function.h"
#include "ceres/gradient_checker.h"
#include "ceres/internal/eigen.h"
#include "ceres/parameter_block.h"
@@ -45,7 +46,6 @@
#include "ceres/problem_impl.h"
#include "ceres/program.h"
#include "ceres/residual_block.h"
-#include "ceres/dynamic_numeric_diff_cost_function.h"
#include "ceres/stringprintf.h"
#include "ceres/types.h"
#include "glog/logging.h"
@@ -81,7 +81,7 @@ class GradientCheckingCostFunction : public CostFunction {
set_num_residuals(function->num_residuals());
}
- virtual ~GradientCheckingCostFunction() { }
+ virtual ~GradientCheckingCostFunction() {}
bool Evaluate(double const* const* parameters,
double* residuals,
@@ -92,9 +92,8 @@ class GradientCheckingCostFunction : public CostFunction {
}
GradientChecker::ProbeResults results;
- bool okay = gradient_checker_.Probe(parameters,
- relative_precision_,
- &results);
+ bool okay =
+ gradient_checker_.Probe(parameters, relative_precision_, &results);
// If the cost function returned false, there's nothing we can say about
// the gradients.
@@ -117,8 +116,9 @@ class GradientCheckingCostFunction : public CostFunction {
}
if (!okay) {
- std::string error_log = "Gradient Error detected!\nExtra info for "
- "this residual: " + extra_info_ + "\n" + results.error_log;
+ std::string error_log =
+ "Gradient Error detected!\nExtra info for this residual: " +
+ extra_info_ + "\n" + results.error_log;
callback_->SetGradientErrorDetected(error_log);
}
return true;
@@ -135,13 +135,12 @@ class GradientCheckingCostFunction : public CostFunction {
} // namespace
GradientCheckingIterationCallback::GradientCheckingIterationCallback()
- : gradient_error_detected_(false) {
-}
+ : gradient_error_detected_(false) {}
CallbackReturnType GradientCheckingIterationCallback::operator()(
const IterationSummary& summary) {
if (gradient_error_detected_) {
- LOG(ERROR)<< "Gradient error detected. Terminating solver.";
+ LOG(ERROR) << "Gradient error detected. Terminating solver.";
return SOLVER_ABORT;
}
return SOLVER_CONTINUE;
@@ -166,7 +165,8 @@ CostFunction* CreateGradientCheckingCostFunction(
return new GradientCheckingCostFunction(cost_function,
local_parameterizations,
numeric_diff_options,
- relative_precision, extra_info,
+ relative_precision,
+ extra_info,
callback);
}
@@ -193,8 +193,8 @@ ProblemImpl* CreateGradientCheckingProblemImpl(
NumericDiffOptions numeric_diff_options;
numeric_diff_options.relative_step_size = relative_step_size;
- ProblemImpl* gradient_checking_problem_impl = new ProblemImpl(
- gradient_checking_problem_options);
+ ProblemImpl* gradient_checking_problem_impl =
+ new ProblemImpl(gradient_checking_problem_options);
Program* program = problem_impl->mutable_program();
@@ -213,7 +213,7 @@ ProblemImpl* CreateGradientCheckingProblemImpl(
parameter_block->mutable_user_state());
}
- for (int i = 0; i < parameter_block->Size(); ++i) {
+ for (int i = 0; i < parameter_block->Size(); ++i) {
gradient_checking_problem_impl->SetParameterUpperBound(
parameter_block->mutable_user_state(),
i,
@@ -235,8 +235,8 @@ ProblemImpl* CreateGradientCheckingProblemImpl(
// Build a human readable string which identifies the
// ResidualBlock. This is used by the GradientCheckingCostFunction
// when logging debugging information.
- string extra_info = StringPrintf(
- "Residual block id %d; depends on parameters [", i);
+ string extra_info =
+ StringPrintf("Residual block id %d; depends on parameters [", i);
vector<double*> parameter_blocks;
vector<const LocalParameterization*> local_parameterizations;
parameter_blocks.reserve(residual_block->NumParameterBlocks());
@@ -277,13 +277,11 @@ ProblemImpl* CreateGradientCheckingProblemImpl(
// depend on this being the case, so we explicitly call
// SetParameterBlockStatePtrsToUserStatePtrs to ensure that this is
// the case.
- gradient_checking_problem_impl
- ->mutable_program()
+ gradient_checking_problem_impl->mutable_program()
->SetParameterBlockStatePtrsToUserStatePtrs();
return gradient_checking_problem_impl;
}
-
} // namespace internal
} // namespace ceres
diff --git a/extern/ceres/internal/ceres/gradient_checking_cost_function.h b/extern/ceres/internal/ceres/gradient_checking_cost_function.h
index e9a34f7eb9f..ea6e9b31c8c 100644
--- a/extern/ceres/internal/ceres/gradient_checking_cost_function.h
+++ b/extern/ceres/internal/ceres/gradient_checking_cost_function.h
@@ -36,6 +36,7 @@
#include <string>
#include "ceres/cost_function.h"
+#include "ceres/internal/port.h"
#include "ceres/iteration_callback.h"
#include "ceres/local_parameterization.h"
@@ -46,7 +47,8 @@ class ProblemImpl;
// Callback that collects information about gradient checking errors, and
// will abort the solve as soon as an error occurs.
-class GradientCheckingIterationCallback : public IterationCallback {
+class CERES_EXPORT_INTERNAL GradientCheckingIterationCallback
+ : public IterationCallback {
public:
GradientCheckingIterationCallback();
@@ -60,6 +62,7 @@ class GradientCheckingIterationCallback : public IterationCallback {
// Retrieve error status (not thread safe).
bool gradient_error_detected() const { return gradient_error_detected_; }
const std::string& error_log() const { return error_log_; }
+
private:
bool gradient_error_detected_;
std::string error_log_;
@@ -70,7 +73,7 @@ class GradientCheckingIterationCallback : public IterationCallback {
// with finite differences. This API is only intended for unit tests that intend
// to check the functionality of the GradientCheckingCostFunction
// implementation directly.
-CostFunction* CreateGradientCheckingCostFunction(
+CERES_EXPORT_INTERNAL CostFunction* CreateGradientCheckingCostFunction(
const CostFunction* cost_function,
const std::vector<const LocalParameterization*>* local_parameterizations,
double relative_step_size,
@@ -99,7 +102,7 @@ CostFunction* CreateGradientCheckingCostFunction(
// jacobians obtained by numerically differentiating them. See the
// documentation of 'numeric_derivative_relative_step_size' in solver.h for a
// better explanation.
-ProblemImpl* CreateGradientCheckingProblemImpl(
+CERES_EXPORT_INTERNAL ProblemImpl* CreateGradientCheckingProblemImpl(
ProblemImpl* problem_impl,
double relative_step_size,
double relative_precision,
diff --git a/extern/ceres/internal/ceres/gradient_problem.cc b/extern/ceres/internal/ceres/gradient_problem.cc
index 4ebd3e60610..ba33fbc90f8 100644
--- a/extern/ceres/internal/ceres/gradient_problem.cc
+++ b/extern/ceres/internal/ceres/gradient_problem.cc
@@ -29,6 +29,7 @@
// Author: sameeragarwal@google.com (Sameer Agarwal)
#include "ceres/gradient_problem.h"
+
#include "ceres/local_parameterization.h"
#include "glog/logging.h"
@@ -38,14 +39,13 @@ GradientProblem::GradientProblem(FirstOrderFunction* function)
: function_(function),
parameterization_(
new IdentityParameterization(function_->NumParameters())),
- scratch_(new double[function_->NumParameters()]) {
-}
+ scratch_(new double[function_->NumParameters()]) {}
GradientProblem::GradientProblem(FirstOrderFunction* function,
LocalParameterization* parameterization)
- : function_(function),
- parameterization_(parameterization),
- scratch_(new double[function_->NumParameters()]) {
+ : function_(function),
+ parameterization_(parameterization),
+ scratch_(new double[function_->NumParameters()]) {
CHECK_EQ(function_->NumParameters(), parameterization_->GlobalSize());
}
@@ -57,7 +57,6 @@ int GradientProblem::NumLocalParameters() const {
return parameterization_->LocalSize();
}
-
bool GradientProblem::Evaluate(const double* parameters,
double* cost,
double* gradient) const {
@@ -66,10 +65,8 @@ bool GradientProblem::Evaluate(const double* parameters,
}
return (function_->Evaluate(parameters, cost, scratch_.get()) &&
- parameterization_->MultiplyByJacobian(parameters,
- 1,
- scratch_.get(),
- gradient));
+ parameterization_->MultiplyByJacobian(
+ parameters, 1, scratch_.get(), gradient));
}
bool GradientProblem::Plus(const double* x,
diff --git a/extern/ceres/internal/ceres/gradient_problem_evaluator.h b/extern/ceres/internal/ceres/gradient_problem_evaluator.h
index c5ad1d71607..d224dbed0ae 100644
--- a/extern/ceres/internal/ceres/gradient_problem_evaluator.h
+++ b/extern/ceres/internal/ceres/gradient_problem_evaluator.h
@@ -76,9 +76,7 @@ class GradientProblemEvaluator : public Evaluator {
return problem_.Plus(state, delta, state_plus_delta);
}
- int NumParameters() const final {
- return problem_.NumParameters();
- }
+ int NumParameters() const final { return problem_.NumParameters(); }
int NumEffectiveParameters() const final {
return problem_.NumLocalParameters();
diff --git a/extern/ceres/internal/ceres/gradient_problem_solver.cc b/extern/ceres/internal/ceres/gradient_problem_solver.cc
index 1639e30666c..b72fad91542 100644
--- a/extern/ceres/internal/ceres/gradient_problem_solver.cc
+++ b/extern/ceres/internal/ceres/gradient_problem_solver.cc
@@ -31,6 +31,7 @@
#include "ceres/gradient_problem_solver.h"
#include <memory>
+
#include "ceres/callbacks.h"
#include "ceres/gradient_problem.h"
#include "ceres/gradient_problem_evaluator.h"
@@ -45,8 +46,8 @@
#include "ceres/wall_time.h"
namespace ceres {
-using internal::StringPrintf;
using internal::StringAppendF;
+using internal::StringPrintf;
using std::string;
namespace {
@@ -83,7 +84,6 @@ Solver::Options GradientProblemSolverOptionsToSolverOptions(
#undef COPY_OPTION
}
-
} // namespace
bool GradientProblemSolver::Options::IsValid(std::string* error) const {
@@ -92,8 +92,7 @@ bool GradientProblemSolver::Options::IsValid(std::string* error) const {
return solver_options.IsValid(error);
}
-GradientProblemSolver::~GradientProblemSolver() {
-}
+GradientProblemSolver::~GradientProblemSolver() {}
void GradientProblemSolver::Solve(const GradientProblemSolver::Options& options,
const GradientProblem& problem,
@@ -111,6 +110,7 @@ void GradientProblemSolver::Solve(const GradientProblemSolver::Options& options,
CHECK(summary != nullptr);
*summary = Summary();
+ // clang-format off
summary->num_parameters = problem.NumParameters();
summary->num_local_parameters = problem.NumLocalParameters();
summary->line_search_direction_type = options.line_search_direction_type; // NOLINT
@@ -118,6 +118,7 @@ void GradientProblemSolver::Solve(const GradientProblemSolver::Options& options,
summary->line_search_type = options.line_search_type;
summary->max_lbfgs_rank = options.max_lbfgs_rank;
summary->nonlinear_conjugate_gradient_type = options.nonlinear_conjugate_gradient_type; // NOLINT
+ // clang-format on
// Check validity
if (!options.IsValid(&summary->message)) {
@@ -163,11 +164,13 @@ void GradientProblemSolver::Solve(const GradientProblemSolver::Options& options,
minimizer->Minimize(minimizer_options, solution.data(), &solver_summary);
+ // clang-format off
summary->termination_type = solver_summary.termination_type;
summary->message = solver_summary.message;
summary->initial_cost = solver_summary.initial_cost;
summary->final_cost = solver_summary.final_cost;
summary->iterations = solver_summary.iterations;
+ // clang-format on
summary->line_search_polynomial_minimization_time_in_seconds =
solver_summary.line_search_polynomial_minimization_time_in_seconds;
@@ -200,15 +203,16 @@ bool GradientProblemSolver::Summary::IsSolutionUsable() const {
}
string GradientProblemSolver::Summary::BriefReport() const {
- return StringPrintf("Ceres GradientProblemSolver Report: "
- "Iterations: %d, "
- "Initial cost: %e, "
- "Final cost: %e, "
- "Termination: %s",
- static_cast<int>(iterations.size()),
- initial_cost,
- final_cost,
- TerminationTypeToString(termination_type));
+ return StringPrintf(
+ "Ceres GradientProblemSolver Report: "
+ "Iterations: %d, "
+ "Initial cost: %e, "
+ "Final cost: %e, "
+ "Termination: %s",
+ static_cast<int>(iterations.size()),
+ initial_cost,
+ final_cost,
+ TerminationTypeToString(termination_type));
}
string GradientProblemSolver::Summary::FullReport() const {
@@ -218,60 +222,63 @@ string GradientProblemSolver::Summary::FullReport() const {
StringAppendF(&report, "Parameters % 25d\n", num_parameters);
if (num_local_parameters != num_parameters) {
- StringAppendF(&report, "Local parameters % 25d\n",
- num_local_parameters);
+ StringAppendF(&report, "Local parameters % 25d\n", num_local_parameters);
}
string line_search_direction_string;
if (line_search_direction_type == LBFGS) {
line_search_direction_string = StringPrintf("LBFGS (%d)", max_lbfgs_rank);
} else if (line_search_direction_type == NONLINEAR_CONJUGATE_GRADIENT) {
- line_search_direction_string =
- NonlinearConjugateGradientTypeToString(
- nonlinear_conjugate_gradient_type);
+ line_search_direction_string = NonlinearConjugateGradientTypeToString(
+ nonlinear_conjugate_gradient_type);
} else {
line_search_direction_string =
LineSearchDirectionTypeToString(line_search_direction_type);
}
- StringAppendF(&report, "Line search direction %19s\n",
+ StringAppendF(&report,
+ "Line search direction %19s\n",
line_search_direction_string.c_str());
- const string line_search_type_string =
- StringPrintf("%s %s",
- LineSearchInterpolationTypeToString(
- line_search_interpolation_type),
- LineSearchTypeToString(line_search_type));
- StringAppendF(&report, "Line search type %19s\n",
+ const string line_search_type_string = StringPrintf(
+ "%s %s",
+ LineSearchInterpolationTypeToString(line_search_interpolation_type),
+ LineSearchTypeToString(line_search_type));
+ StringAppendF(&report,
+ "Line search type %19s\n",
line_search_type_string.c_str());
StringAppendF(&report, "\n");
StringAppendF(&report, "\nCost:\n");
StringAppendF(&report, "Initial % 30e\n", initial_cost);
- if (termination_type != FAILURE &&
- termination_type != USER_FAILURE) {
+ if (termination_type != FAILURE && termination_type != USER_FAILURE) {
StringAppendF(&report, "Final % 30e\n", final_cost);
- StringAppendF(&report, "Change % 30e\n",
- initial_cost - final_cost);
+ StringAppendF(&report, "Change % 30e\n", initial_cost - final_cost);
}
- StringAppendF(&report, "\nMinimizer iterations % 16d\n",
+ StringAppendF(&report,
+ "\nMinimizer iterations % 16d\n",
static_cast<int>(iterations.size()));
StringAppendF(&report, "\nTime (in seconds):\n");
- StringAppendF(&report, "\n Cost evaluation %23.6f (%d)\n",
+ StringAppendF(&report,
+ "\n Cost evaluation %23.6f (%d)\n",
cost_evaluation_time_in_seconds,
num_cost_evaluations);
- StringAppendF(&report, " Gradient & cost evaluation %16.6f (%d)\n",
+ StringAppendF(&report,
+ " Gradient & cost evaluation %16.6f (%d)\n",
gradient_evaluation_time_in_seconds,
num_gradient_evaluations);
- StringAppendF(&report, " Polynomial minimization %17.6f\n",
+ StringAppendF(&report,
+ " Polynomial minimization %17.6f\n",
line_search_polynomial_minimization_time_in_seconds);
- StringAppendF(&report, "Total %25.6f\n\n",
- total_time_in_seconds);
+ StringAppendF(
+ &report, "Total %25.6f\n\n", total_time_in_seconds);
- StringAppendF(&report, "Termination: %25s (%s)\n",
- TerminationTypeToString(termination_type), message.c_str());
+ StringAppendF(&report,
+ "Termination: %25s (%s)\n",
+ TerminationTypeToString(termination_type),
+ message.c_str());
return report;
}
diff --git a/extern/ceres/internal/ceres/graph.h b/extern/ceres/internal/ceres/graph.h
index 4e1fd81c1ea..9b26158753f 100644
--- a/extern/ceres/internal/ceres/graph.h
+++ b/extern/ceres/internal/ceres/graph.h
@@ -32,9 +32,10 @@
#define CERES_INTERNAL_GRAPH_H_
#include <limits>
-#include <unordered_set>
#include <unordered_map>
+#include <unordered_set>
#include <utility>
+
#include "ceres/map_util.h"
#include "ceres/pair_hash.h"
#include "ceres/types.h"
@@ -93,9 +94,7 @@ class Graph {
return FindOrDie(edges_, vertex);
}
- const std::unordered_set<Vertex>& vertices() const {
- return vertices_;
- }
+ const std::unordered_set<Vertex>& vertices() const { return vertices_; }
private:
std::unordered_set<Vertex> vertices_;
@@ -121,9 +120,7 @@ class WeightedGraph {
// Uses weight = 1.0. If vertex already exists, its weight is set to
// 1.0.
- void AddVertex(const Vertex& vertex) {
- AddVertex(vertex, 1.0);
- }
+ void AddVertex(const Vertex& vertex) { AddVertex(vertex, 1.0); }
bool RemoveVertex(const Vertex& vertex) {
if (vertices_.find(vertex) == vertices_.end()) {
@@ -184,11 +181,11 @@ class WeightedGraph {
// the edge weight is zero.
double EdgeWeight(const Vertex& vertex1, const Vertex& vertex2) const {
if (vertex1 < vertex2) {
- return FindWithDefault(edge_weights_,
- std::make_pair(vertex1, vertex2), 0.0);
+ return FindWithDefault(
+ edge_weights_, std::make_pair(vertex1, vertex2), 0.0);
} else {
- return FindWithDefault(edge_weights_,
- std::make_pair(vertex2, vertex1), 0.0);
+ return FindWithDefault(
+ edge_weights_, std::make_pair(vertex2, vertex1), 0.0);
}
}
@@ -198,9 +195,7 @@ class WeightedGraph {
return FindOrDie(edges_, vertex);
}
- const std::unordered_set<Vertex>& vertices() const {
- return vertices_;
- }
+ const std::unordered_set<Vertex>& vertices() const { return vertices_; }
static double InvalidWeight() {
return std::numeric_limits<double>::quiet_NaN();
diff --git a/extern/ceres/internal/ceres/graph_algorithms.h b/extern/ceres/internal/ceres/graph_algorithms.h
index b0629313159..7d63b337f68 100644
--- a/extern/ceres/internal/ceres/graph_algorithms.h
+++ b/extern/ceres/internal/ceres/graph_algorithms.h
@@ -36,8 +36,9 @@
#include <algorithm>
#include <unordered_map>
#include <unordered_set>
-#include <vector>
#include <utility>
+#include <vector>
+
#include "ceres/graph.h"
#include "ceres/wall_time.h"
#include "glog/logging.h"
@@ -50,8 +51,7 @@ namespace internal {
template <typename Vertex>
class VertexTotalOrdering {
public:
- explicit VertexTotalOrdering(const Graph<Vertex>& graph)
- : graph_(graph) {}
+ explicit VertexTotalOrdering(const Graph<Vertex>& graph) : graph_(graph) {}
bool operator()(const Vertex& lhs, const Vertex& rhs) const {
if (graph_.Neighbors(lhs).size() == graph_.Neighbors(rhs).size()) {
@@ -67,8 +67,7 @@ class VertexTotalOrdering {
template <typename Vertex>
class VertexDegreeLessThan {
public:
- explicit VertexDegreeLessThan(const Graph<Vertex>& graph)
- : graph_(graph) {}
+ explicit VertexDegreeLessThan(const Graph<Vertex>& graph) : graph_(graph) {}
bool operator()(const Vertex& lhs, const Vertex& rhs) const {
return graph_.Neighbors(lhs).size() < graph_.Neighbors(rhs).size();
@@ -177,8 +176,9 @@ int StableIndependentSetOrdering(const Graph<Vertex>& graph,
std::vector<Vertex> vertex_queue(*ordering);
- std::stable_sort(vertex_queue.begin(), vertex_queue.end(),
- VertexDegreeLessThan<Vertex>(graph));
+ std::stable_sort(vertex_queue.begin(),
+ vertex_queue.end(),
+ VertexDegreeLessThan<Vertex>(graph));
// Mark all vertices white.
std::unordered_map<Vertex, char> vertex_color;
@@ -257,8 +257,8 @@ Vertex FindConnectedComponent(const Vertex& vertex,
// spanning forest, or a collection of linear paths that span the
// graph G.
template <typename Vertex>
-WeightedGraph<Vertex>*
-Degree2MaximumSpanningForest(const WeightedGraph<Vertex>& graph) {
+WeightedGraph<Vertex>* Degree2MaximumSpanningForest(
+ const WeightedGraph<Vertex>& graph) {
// Array of edges sorted in decreasing order of their weights.
std::vector<std::pair<double, std::pair<Vertex, Vertex>>> weighted_edges;
WeightedGraph<Vertex>* forest = new WeightedGraph<Vertex>();
@@ -294,7 +294,7 @@ Degree2MaximumSpanningForest(const WeightedGraph<Vertex>& graph) {
// Greedily add edges to the spanning tree/forest as long as they do
// not violate the degree/cycle constraint.
- for (int i =0; i < weighted_edges.size(); ++i) {
+ for (int i = 0; i < weighted_edges.size(); ++i) {
const std::pair<Vertex, Vertex>& edge = weighted_edges[i].second;
const Vertex vertex1 = edge.first;
const Vertex vertex2 = edge.second;
diff --git a/extern/ceres/internal/ceres/implicit_schur_complement.cc b/extern/ceres/internal/ceres/implicit_schur_complement.cc
index bf680d1d952..f2196d4ef9c 100644
--- a/extern/ceres/internal/ceres/implicit_schur_complement.cc
+++ b/extern/ceres/internal/ceres/implicit_schur_complement.cc
@@ -43,13 +43,9 @@ namespace internal {
ImplicitSchurComplement::ImplicitSchurComplement(
const LinearSolver::Options& options)
- : options_(options),
- D_(NULL),
- b_(NULL) {
-}
+ : options_(options), D_(NULL), b_(NULL) {}
-ImplicitSchurComplement::~ImplicitSchurComplement() {
-}
+ImplicitSchurComplement::~ImplicitSchurComplement() {}
void ImplicitSchurComplement::Init(const BlockSparseMatrix& A,
const double* D,
@@ -88,7 +84,7 @@ void ImplicitSchurComplement::Init(const BlockSparseMatrix& A,
// the block diagonals and invert them.
AddDiagonalAndInvert(D_, block_diagonal_EtE_inverse_.get());
if (options_.preconditioner_type == JACOBI) {
- AddDiagonalAndInvert((D_ == NULL) ? NULL : D_ + A_->num_cols_e(),
+ AddDiagonalAndInvert((D_ == NULL) ? NULL : D_ + A_->num_cols_e(),
block_diagonal_FtF_inverse_.get());
}
@@ -125,8 +121,8 @@ void ImplicitSchurComplement::RightMultiply(const double* x, double* y) const {
if (D_ != NULL) {
ConstVectorRef Dref(D_ + A_->num_cols_e(), num_cols());
VectorRef(y, num_cols()) =
- (Dref.array().square() *
- ConstVectorRef(x, num_cols()).array()).matrix();
+ (Dref.array().square() * ConstVectorRef(x, num_cols()).array())
+ .matrix();
} else {
VectorRef(y, num_cols()).setZero();
}
@@ -139,8 +135,7 @@ void ImplicitSchurComplement::RightMultiply(const double* x, double* y) const {
// entries D, add them to the diagonal of the matrix and compute the
// inverse of each diagonal block.
void ImplicitSchurComplement::AddDiagonalAndInvert(
- const double* D,
- BlockSparseMatrix* block_diagonal) {
+ const double* D, BlockSparseMatrix* block_diagonal) {
const CompressedRowBlockStructure* block_diagonal_structure =
block_diagonal->block_structure();
for (int r = 0; r < block_diagonal_structure->rows.size(); ++r) {
@@ -148,17 +143,16 @@ void ImplicitSchurComplement::AddDiagonalAndInvert(
const int row_block_size = block_diagonal_structure->rows[r].block.size;
const Cell& cell = block_diagonal_structure->rows[r].cells[0];
MatrixRef m(block_diagonal->mutable_values() + cell.position,
- row_block_size, row_block_size);
+ row_block_size,
+ row_block_size);
if (D != NULL) {
ConstVectorRef d(D + row_block_pos, row_block_size);
m += d.array().square().matrix().asDiagonal();
}
- m = m
- .selfadjointView<Eigen::Upper>()
- .llt()
- .solve(Matrix::Identity(row_block_size, row_block_size));
+ m = m.selfadjointView<Eigen::Upper>().llt().solve(
+ Matrix::Identity(row_block_size, row_block_size));
}
}
@@ -167,7 +161,7 @@ void ImplicitSchurComplement::AddDiagonalAndInvert(
void ImplicitSchurComplement::BackSubstitute(const double* x, double* y) {
const int num_cols_e = A_->num_cols_e();
const int num_cols_f = A_->num_cols_f();
- const int num_cols = A_->num_cols();
+ const int num_cols = A_->num_cols();
const int num_rows = A_->num_rows();
// y1 = F x
@@ -190,7 +184,7 @@ void ImplicitSchurComplement::BackSubstitute(const double* x, double* y) {
// computed via back substitution. The second block of variables
// corresponds to the Schur complement system, so we just copy those
// values from the solution to the Schur complement.
- VectorRef(y + num_cols_e, num_cols_f) = ConstVectorRef(x, num_cols_f);
+ VectorRef(y + num_cols_e, num_cols_f) = ConstVectorRef(x, num_cols_f);
}
// Compute the RHS of the Schur complement system.
diff --git a/extern/ceres/internal/ceres/implicit_schur_complement.h b/extern/ceres/internal/ceres/implicit_schur_complement.h
index f4ddf724910..e83892af017 100644
--- a/extern/ceres/internal/ceres/implicit_schur_complement.h
+++ b/extern/ceres/internal/ceres/implicit_schur_complement.h
@@ -35,10 +35,12 @@
#define CERES_INTERNAL_IMPLICIT_SCHUR_COMPLEMENT_H_
#include <memory>
+
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/port.h"
#include "ceres/linear_operator.h"
#include "ceres/linear_solver.h"
#include "ceres/partitioned_matrix_view.h"
-#include "ceres/internal/eigen.h"
#include "ceres/types.h"
namespace ceres {
@@ -86,7 +88,7 @@ class BlockSparseMatrix;
// RightMultiply (and the LeftMultiply) methods are not thread safe as
// they depend on mutable arrays used for the temporaries needed to
// compute the product y += Sx;
-class ImplicitSchurComplement : public LinearOperator {
+class CERES_EXPORT_INTERNAL ImplicitSchurComplement : public LinearOperator {
public:
// num_eliminate_blocks is the number of E blocks in the matrix
// A.
@@ -129,7 +131,7 @@ class ImplicitSchurComplement : public LinearOperator {
int num_rows() const final { return A_->num_cols_f(); }
int num_cols() const final { return A_->num_cols_f(); }
- const Vector& rhs() const { return rhs_; }
+ const Vector& rhs() const { return rhs_; }
const BlockSparseMatrix* block_diagonal_EtE_inverse() const {
return block_diagonal_EtE_inverse_.get();
diff --git a/extern/ceres/internal/ceres/inner_product_computer.cc b/extern/ceres/internal/ceres/inner_product_computer.cc
index 2bf88365d99..ef38b7b3ad4 100644
--- a/extern/ceres/internal/ceres/inner_product_computer.cc
+++ b/extern/ceres/internal/ceres/inner_product_computer.cc
@@ -31,12 +31,12 @@
#include "ceres/inner_product_computer.h"
#include <algorithm>
+
#include "ceres/small_blas.h"
namespace ceres {
namespace internal {
-
// Create the CompressedRowSparseMatrix matrix that will contain the
// inner product.
//
@@ -297,7 +297,7 @@ void InnerProductComputer::Compute() {
const Cell& cell1 = m_row.cells[c1];
const int c1_size = bs->cols[cell1.block_id].size;
const int row_nnz = rows[bs->cols[cell1.block_id].position + 1] -
- rows[bs->cols[cell1.block_id].position];
+ rows[bs->cols[cell1.block_id].position];
int c2_begin, c2_end;
if (storage_type == CompressedRowSparseMatrix::LOWER_TRIANGULAR) {
@@ -311,6 +311,7 @@ void InnerProductComputer::Compute() {
for (int c2 = c2_begin; c2 < c2_end; ++c2, ++cursor) {
const Cell& cell2 = m_row.cells[c2];
const int c2_size = bs->cols[cell2.block_id].size;
+ // clang-format off
MatrixTransposeMatrixMultiply<Eigen::Dynamic, Eigen::Dynamic,
Eigen::Dynamic, Eigen::Dynamic, 1>(
m_values + cell1.position,
@@ -319,6 +320,7 @@ void InnerProductComputer::Compute() {
m_row.block.size, c2_size,
values + result_offsets_[cursor],
0, 0, c1_size, row_nnz);
+ // clang-format on
}
}
}
diff --git a/extern/ceres/internal/ceres/inner_product_computer.h b/extern/ceres/internal/ceres/inner_product_computer.h
index 73073f8ad06..04ec1d18316 100644
--- a/extern/ceres/internal/ceres/inner_product_computer.h
+++ b/extern/ceres/internal/ceres/inner_product_computer.h
@@ -36,6 +36,7 @@
#include "ceres/block_sparse_matrix.h"
#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/internal/port.h"
namespace ceres {
namespace internal {
@@ -60,7 +61,7 @@ namespace internal {
// This is not a problem as sparse linear algebra libraries can ignore
// these entries with ease and the space used is minimal/linear in the
// size of the matrices.
-class InnerProductComputer {
+class CERES_EXPORT_INTERNAL InnerProductComputer {
public:
// Factory
//
diff --git a/extern/ceres/internal/ceres/invert_psd_matrix.h b/extern/ceres/internal/ceres/invert_psd_matrix.h
index 21d301a77ee..ac8808b5a04 100644
--- a/extern/ceres/internal/ceres/invert_psd_matrix.h
+++ b/extern/ceres/internal/ceres/invert_psd_matrix.h
@@ -31,9 +31,9 @@
#ifndef CERES_INTERNAL_INVERT_PSD_MATRIX_H_
#define CERES_INTERNAL_INVERT_PSD_MATRIX_H_
+#include "Eigen/Dense"
#include "ceres/internal/eigen.h"
#include "glog/logging.h"
-#include "Eigen/Dense"
namespace ceres {
namespace internal {
@@ -76,4 +76,4 @@ typename EigenTypes<kSize, kSize>::Matrix InvertPSDMatrix(
} // namespace internal
} // namespace ceres
-#endif // CERES_INTERNAL_INVERT_PSD_MATRIX_H_
+#endif // CERES_INTERNAL_INVERT_PSD_MATRIX_H_
diff --git a/extern/ceres/internal/ceres/is_close.cc b/extern/ceres/internal/ceres/is_close.cc
index a91a17454d9..0becf5546a0 100644
--- a/extern/ceres/internal/ceres/is_close.cc
+++ b/extern/ceres/internal/ceres/is_close.cc
@@ -35,9 +35,11 @@
namespace ceres {
namespace internal {
-bool IsClose(double x, double y, double relative_precision,
- double *relative_error,
- double *absolute_error) {
+bool IsClose(double x,
+ double y,
+ double relative_precision,
+ double* relative_error,
+ double* absolute_error) {
double local_absolute_error;
double local_relative_error;
if (!absolute_error) {
diff --git a/extern/ceres/internal/ceres/is_close.h b/extern/ceres/internal/ceres/is_close.h
index 7789448c8e8..b781a4493ff 100644
--- a/extern/ceres/internal/ceres/is_close.h
+++ b/extern/ceres/internal/ceres/is_close.h
@@ -33,6 +33,8 @@
#ifndef CERES_INTERNAL_IS_CLOSE_H_
#define CERES_INTERNAL_IS_CLOSE_H_
+#include "ceres/internal/port.h"
+
namespace ceres {
namespace internal {
// Returns true if x and y have a relative (unsigned) difference less than
@@ -40,11 +42,11 @@ namespace internal {
// difference in relative/absolute_error if non-NULL. If one of the two values
// is exactly zero, the absolute difference will be compared, and relative_error
// will be set to the absolute difference.
-bool IsClose(double x,
- double y,
- double relative_precision,
- double *relative_error,
- double *absolute_error);
+CERES_EXPORT_INTERNAL bool IsClose(double x,
+ double y,
+ double relative_precision,
+ double* relative_error,
+ double* absolute_error);
} // namespace internal
} // namespace ceres
diff --git a/extern/ceres/internal/ceres/iterative_refiner.cc b/extern/ceres/internal/ceres/iterative_refiner.cc
index fb0e45bdcdd..5f0bfdd250d 100644
--- a/extern/ceres/internal/ceres/iterative_refiner.cc
+++ b/extern/ceres/internal/ceres/iterative_refiner.cc
@@ -28,9 +28,10 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
-#include <string>
#include "ceres/iterative_refiner.h"
+#include <string>
+
#include "Eigen/Core"
#include "ceres/sparse_cholesky.h"
#include "ceres/sparse_matrix.h"
diff --git a/extern/ceres/internal/ceres/iterative_refiner.h b/extern/ceres/internal/ceres/iterative_refiner.h
index f969935ae46..08f8d6762cf 100644
--- a/extern/ceres/internal/ceres/iterative_refiner.h
+++ b/extern/ceres/internal/ceres/iterative_refiner.h
@@ -32,7 +32,10 @@
#define CERES_INTERNAL_ITERATIVE_REFINER_H_
// This include must come before any #ifndef check on Ceres compile options.
+// clang-format off
#include "ceres/internal/port.h"
+// clang-format on
+
#include "ceres/internal/eigen.h"
namespace ceres {
@@ -54,7 +57,7 @@ class SparseMatrix;
// Definite linear systems.
//
// The above iterative loop is run until max_num_iterations is reached.
-class IterativeRefiner {
+class CERES_EXPORT_INTERNAL IterativeRefiner {
public:
// max_num_iterations is the number of refinement iterations to
// perform.
diff --git a/extern/ceres/internal/ceres/iterative_schur_complement_solver.cc b/extern/ceres/internal/ceres/iterative_schur_complement_solver.cc
index 6076c38c71d..143df5e5814 100644
--- a/extern/ceres/internal/ceres/iterative_schur_complement_solver.cc
+++ b/extern/ceres/internal/ceres/iterative_schur_complement_solver.cc
@@ -55,8 +55,7 @@ namespace internal {
IterativeSchurComplementSolver::IterativeSchurComplementSolver(
const LinearSolver::Options& options)
- : options_(options) {
-}
+ : options_(options) {}
IterativeSchurComplementSolver::~IterativeSchurComplementSolver() {}
diff --git a/extern/ceres/internal/ceres/iterative_schur_complement_solver.h b/extern/ceres/internal/ceres/iterative_schur_complement_solver.h
index 9aed94fcb1a..37606b32d3a 100644
--- a/extern/ceres/internal/ceres/iterative_schur_complement_solver.h
+++ b/extern/ceres/internal/ceres/iterative_schur_complement_solver.h
@@ -32,8 +32,10 @@
#define CERES_INTERNAL_ITERATIVE_SCHUR_COMPLEMENT_SOLVER_H_
#include <memory>
-#include "ceres/linear_solver.h"
+
#include "ceres/internal/eigen.h"
+#include "ceres/internal/port.h"
+#include "ceres/linear_solver.h"
#include "ceres/types.h"
namespace ceres {
@@ -67,20 +69,21 @@ class Preconditioner;
// a proof of this fact and others related to this solver please see
// the section on Domain Decomposition Methods in Saad's book
// "Iterative Methods for Sparse Linear Systems".
-class IterativeSchurComplementSolver : public BlockSparseMatrixSolver {
+class CERES_EXPORT_INTERNAL IterativeSchurComplementSolver
+ : public BlockSparseMatrixSolver {
public:
explicit IterativeSchurComplementSolver(const LinearSolver::Options& options);
- IterativeSchurComplementSolver(const IterativeSchurComplementSolver&) = delete;
+ IterativeSchurComplementSolver(const IterativeSchurComplementSolver&) =
+ delete;
void operator=(const IterativeSchurComplementSolver&) = delete;
virtual ~IterativeSchurComplementSolver();
private:
- LinearSolver::Summary SolveImpl(
- BlockSparseMatrix* A,
- const double* b,
- const LinearSolver::PerSolveOptions& options,
- double* x) final;
+ LinearSolver::Summary SolveImpl(BlockSparseMatrix* A,
+ const double* b,
+ const LinearSolver::PerSolveOptions& options,
+ double* x) final;
void CreatePreconditioner(BlockSparseMatrix* A);
diff --git a/extern/ceres/internal/ceres/lapack.cc b/extern/ceres/internal/ceres/lapack.cc
index 37efbcd27d1..a159ec70696 100644
--- a/extern/ceres/internal/ceres/lapack.cc
+++ b/extern/ceres/internal/ceres/lapack.cc
@@ -36,11 +36,7 @@
#ifndef CERES_NO_LAPACK
// C interface to the LAPACK Cholesky factorization and triangular solve.
-extern "C" void dpotrf_(char* uplo,
- int* n,
- double* a,
- int* lda,
- int* info);
+extern "C" void dpotrf_(char* uplo, int* n, double* a, int* lda, int* info);
extern "C" void dpotrs_(char* uplo,
int* n,
@@ -92,10 +88,10 @@ LinearSolverTerminationType LAPACK::SolveInPlaceUsingCholesky(
}
if (info > 0) {
- *message =
- StringPrintf(
- "LAPACK::dpotrf numerical failure. "
- "The leading minor of order %d is not positive definite.", info);
+ *message = StringPrintf(
+ "LAPACK::dpotrf numerical failure. "
+ "The leading minor of order %d is not positive definite.",
+ info);
return LINEAR_SOLVER_FAILURE;
}
diff --git a/extern/ceres/internal/ceres/lapack.h b/extern/ceres/internal/ceres/lapack.h
index 5bb1a220c26..5c5bf8bf8b8 100644
--- a/extern/ceres/internal/ceres/lapack.h
+++ b/extern/ceres/internal/ceres/lapack.h
@@ -32,6 +32,7 @@
#define CERES_INTERNAL_LAPACK_H_
#include <string>
+
#include "ceres/internal/port.h"
#include "ceres/linear_solver.h"
diff --git a/extern/ceres/internal/ceres/levenberg_marquardt_strategy.cc b/extern/ceres/internal/ceres/levenberg_marquardt_strategy.cc
index 9eec631e6dd..cb0e9371b75 100644
--- a/extern/ceres/internal/ceres/levenberg_marquardt_strategy.cc
+++ b/extern/ceres/internal/ceres/levenberg_marquardt_strategy.cc
@@ -61,8 +61,7 @@ LevenbergMarquardtStrategy::LevenbergMarquardtStrategy(
CHECK_GT(max_radius_, 0.0);
}
-LevenbergMarquardtStrategy::~LevenbergMarquardtStrategy() {
-}
+LevenbergMarquardtStrategy::~LevenbergMarquardtStrategy() {}
TrustRegionStrategy::Summary LevenbergMarquardtStrategy::ComputeStep(
const TrustRegionStrategy::PerSolveOptions& per_solve_options,
@@ -81,8 +80,8 @@ TrustRegionStrategy::Summary LevenbergMarquardtStrategy::ComputeStep(
jacobian->SquaredColumnNorm(diagonal_.data());
for (int i = 0; i < num_parameters; ++i) {
- diagonal_[i] = std::min(std::max(diagonal_[i], min_diagonal_),
- max_diagonal_);
+ diagonal_[i] =
+ std::min(std::max(diagonal_[i], min_diagonal_), max_diagonal_);
}
}
@@ -112,7 +111,7 @@ TrustRegionStrategy::Summary LevenbergMarquardtStrategy::ComputeStep(
if (linear_solver_summary.termination_type == LINEAR_SOLVER_FATAL_ERROR) {
LOG(WARNING) << "Linear solver fatal error: "
<< linear_solver_summary.message;
- } else if (linear_solver_summary.termination_type == LINEAR_SOLVER_FAILURE) {
+ } else if (linear_solver_summary.termination_type == LINEAR_SOLVER_FAILURE) {
LOG(WARNING) << "Linear solver failure. Failed to compute a step: "
<< linear_solver_summary.message;
} else if (!IsArrayValid(num_parameters, step)) {
@@ -138,7 +137,6 @@ TrustRegionStrategy::Summary LevenbergMarquardtStrategy::ComputeStep(
}
}
-
TrustRegionStrategy::Summary summary;
summary.residual_norm = linear_solver_summary.residual_norm;
summary.num_iterations = linear_solver_summary.num_iterations;
@@ -148,8 +146,8 @@ TrustRegionStrategy::Summary LevenbergMarquardtStrategy::ComputeStep(
void LevenbergMarquardtStrategy::StepAccepted(double step_quality) {
CHECK_GT(step_quality, 0.0);
- radius_ = radius_ / std::max(1.0 / 3.0,
- 1.0 - pow(2.0 * step_quality - 1.0, 3));
+ radius_ =
+ radius_ / std::max(1.0 / 3.0, 1.0 - pow(2.0 * step_quality - 1.0, 3));
radius_ = std::min(max_radius_, radius_);
decrease_factor_ = 2.0;
reuse_diagonal_ = false;
@@ -161,9 +159,7 @@ void LevenbergMarquardtStrategy::StepRejected(double step_quality) {
reuse_diagonal_ = true;
}
-double LevenbergMarquardtStrategy::Radius() const {
- return radius_;
-}
+double LevenbergMarquardtStrategy::Radius() const { return radius_; }
} // namespace internal
} // namespace ceres
diff --git a/extern/ceres/internal/ceres/levenberg_marquardt_strategy.h b/extern/ceres/internal/ceres/levenberg_marquardt_strategy.h
index 8fb37f32959..12cd463c152 100644
--- a/extern/ceres/internal/ceres/levenberg_marquardt_strategy.h
+++ b/extern/ceres/internal/ceres/levenberg_marquardt_strategy.h
@@ -32,6 +32,7 @@
#define CERES_INTERNAL_LEVENBERG_MARQUARDT_STRATEGY_H_
#include "ceres/internal/eigen.h"
+#include "ceres/internal/port.h"
#include "ceres/trust_region_strategy.h"
namespace ceres {
@@ -42,7 +43,8 @@ namespace internal {
// K. Madsen, H.B. Nielsen and O. Tingleff. Available to download from
//
// http://www2.imm.dtu.dk/pubdb/views/edoc_download.php/3215/pdf/imm3215.pdf
-class LevenbergMarquardtStrategy : public TrustRegionStrategy {
+class CERES_EXPORT_INTERNAL LevenbergMarquardtStrategy
+ : public TrustRegionStrategy {
public:
explicit LevenbergMarquardtStrategy(
const TrustRegionStrategy::Options& options);
@@ -74,11 +76,11 @@ class LevenbergMarquardtStrategy : public TrustRegionStrategy {
const double max_diagonal_;
double decrease_factor_;
bool reuse_diagonal_;
- Vector diagonal_; // diagonal_ = diag(J'J)
+ Vector diagonal_; // diagonal_ = diag(J'J)
// Scaled copy of diagonal_. Stored here as optimization to prevent
// allocations in every iteration and reuse when a step fails and
// ComputeStep is called again.
- Vector lm_diagonal_; // lm_diagonal_ = diagonal_ / radius_;
+ Vector lm_diagonal_; // lm_diagonal_ = sqrt(diagonal_ / radius_);
};
} // namespace internal
diff --git a/extern/ceres/internal/ceres/line_search.cc b/extern/ceres/internal/ceres/line_search.cc
index 352c64f5057..7e871a20a53 100644
--- a/extern/ceres/internal/ceres/line_search.cc
+++ b/extern/ceres/internal/ceres/line_search.cc
@@ -57,10 +57,10 @@ namespace {
const int kErrorMessageNumericPrecision = 8;
} // namespace
-ostream& operator<<(ostream &os, const FunctionSample& sample);
+ostream& operator<<(ostream& os, const FunctionSample& sample);
// Convenience stream operator for pushing FunctionSamples into log messages.
-ostream& operator<<(ostream &os, const FunctionSample& sample) {
+ostream& operator<<(ostream& os, const FunctionSample& sample) {
os << sample.ToDebugString();
return os;
}
@@ -73,17 +73,17 @@ LineSearch* LineSearch::Create(const LineSearchType line_search_type,
string* error) {
LineSearch* line_search = NULL;
switch (line_search_type) {
- case ceres::ARMIJO:
- line_search = new ArmijoLineSearch(options);
- break;
- case ceres::WOLFE:
- line_search = new WolfeLineSearch(options);
- break;
- default:
- *error = string("Invalid line search algorithm type: ") +
- LineSearchTypeToString(line_search_type) +
- string(", unable to create line search.");
- return NULL;
+ case ceres::ARMIJO:
+ line_search = new ArmijoLineSearch(options);
+ break;
+ case ceres::WOLFE:
+ line_search = new WolfeLineSearch(options);
+ break;
+ default:
+ *error = string("Invalid line search algorithm type: ") +
+ LineSearchTypeToString(line_search_type) +
+ string(", unable to create line search.");
+ return NULL;
}
return line_search;
}
@@ -96,8 +96,7 @@ LineSearchFunction::LineSearchFunction(Evaluator* evaluator)
initial_evaluator_residual_time_in_seconds(0.0),
initial_evaluator_jacobian_time_in_seconds(0.0) {}
-void LineSearchFunction::Init(const Vector& position,
- const Vector& direction) {
+void LineSearchFunction::Init(const Vector& position, const Vector& direction) {
position_ = position;
direction_ = direction;
}
@@ -200,9 +199,9 @@ void LineSearch::Search(double step_size_estimate,
summary->polynomial_minimization_time_in_seconds = 0.0;
options().function->ResetTimeStatistics();
this->DoSearch(step_size_estimate, initial_cost, initial_gradient, summary);
- options().function->
- TimeStatistics(&summary->cost_evaluation_time_in_seconds,
- &summary->gradient_evaluation_time_in_seconds);
+ options().function->TimeStatistics(
+ &summary->cost_evaluation_time_in_seconds,
+ &summary->gradient_evaluation_time_in_seconds);
summary->total_time_in_seconds = WallTimeInSeconds() - start_time;
}
@@ -218,8 +217,7 @@ double LineSearch::InterpolatingPolynomialMinimizingStepSize(
const double min_step_size,
const double max_step_size) const {
if (!current.value_is_valid ||
- (interpolation_type == BISECTION &&
- max_step_size <= current.x)) {
+ (interpolation_type == BISECTION && max_step_size <= current.x)) {
// Either: sample is invalid; or we are using BISECTION and contracting
// the step size.
return std::min(std::max(current.x * 0.5, min_step_size), max_step_size);
@@ -274,8 +272,8 @@ double LineSearch::InterpolatingPolynomialMinimizingStepSize(
}
double step_size = 0.0, unused_min_value = 0.0;
- MinimizeInterpolatingPolynomial(samples, min_step_size, max_step_size,
- &step_size, &unused_min_value);
+ MinimizeInterpolatingPolynomial(
+ samples, min_step_size, max_step_size, &step_size, &unused_min_value);
return step_size;
}
@@ -315,41 +313,43 @@ void ArmijoLineSearch::DoSearch(const double step_size_estimate,
function->Evaluate(step_size_estimate, kEvaluateGradient, &current);
while (!current.value_is_valid ||
- current.value > (initial_cost
- + options().sufficient_decrease
- * initial_gradient
- * current.x)) {
+ current.value > (initial_cost + options().sufficient_decrease *
+ initial_gradient * current.x)) {
// If current.value_is_valid is false, we treat it as if the cost at that
// point is not large enough to satisfy the sufficient decrease condition.
++summary->num_iterations;
if (summary->num_iterations >= options().max_num_iterations) {
- summary->error =
- StringPrintf("Line search failed: Armijo failed to find a point "
- "satisfying the sufficient decrease condition within "
- "specified max_num_iterations: %d.",
- options().max_num_iterations);
- LOG_IF(WARNING, !options().is_silent) << summary->error;
+ summary->error = StringPrintf(
+ "Line search failed: Armijo failed to find a point "
+ "satisfying the sufficient decrease condition within "
+ "specified max_num_iterations: %d.",
+ options().max_num_iterations);
+ if (!options().is_silent) {
+ LOG(WARNING) << summary->error;
+ }
return;
}
const double polynomial_minimization_start_time = WallTimeInSeconds();
- const double step_size =
- this->InterpolatingPolynomialMinimizingStepSize(
- options().interpolation_type,
- initial_position,
- previous,
- current,
- (options().max_step_contraction * current.x),
- (options().min_step_contraction * current.x));
+ const double step_size = this->InterpolatingPolynomialMinimizingStepSize(
+ options().interpolation_type,
+ initial_position,
+ previous,
+ current,
+ (options().max_step_contraction * current.x),
+ (options().min_step_contraction * current.x));
summary->polynomial_minimization_time_in_seconds +=
(WallTimeInSeconds() - polynomial_minimization_start_time);
if (step_size * descent_direction_max_norm < options().min_step_size) {
- summary->error =
- StringPrintf("Line search failed: step_size too small: %.5e "
- "with descent_direction_max_norm: %.5e.", step_size,
- descent_direction_max_norm);
- LOG_IF(WARNING, !options().is_silent) << summary->error;
+ summary->error = StringPrintf(
+ "Line search failed: step_size too small: %.5e "
+ "with descent_direction_max_norm: %.5e.",
+ step_size,
+ descent_direction_max_norm);
+ if (!options().is_silent) {
+ LOG(WARNING) << summary->error;
+ }
return;
}
@@ -435,8 +435,8 @@ void WolfeLineSearch::DoSearch(const double step_size_estimate,
}
VLOG(3) << std::scientific << std::setprecision(kErrorMessageNumericPrecision)
- << "Starting line search zoom phase with bracket_low: "
- << bracket_low << ", bracket_high: " << bracket_high
+ << "Starting line search zoom phase with bracket_low: " << bracket_low
+ << ", bracket_high: " << bracket_high
<< ", bracket width: " << fabs(bracket_low.x - bracket_high.x)
<< ", bracket abs delta cost: "
<< fabs(bracket_low.value - bracket_high.value);
@@ -461,11 +461,9 @@ void WolfeLineSearch::DoSearch(const double step_size_estimate,
// but still has bracket_high.value < initial_position.value.
// 3. bracket_high is chosen after bracket_low, s.t.
// bracket_low.gradient * (bracket_high.x - bracket_low.x) < 0.
- if (!this->ZoomPhase(initial_position,
- bracket_low,
- bracket_high,
- &solution,
- summary) && !solution.value_is_valid) {
+ if (!this->ZoomPhase(
+ initial_position, bracket_low, bracket_high, &solution, summary) &&
+ !solution.value_is_valid) {
// Failed to find a valid point (given the specified decrease parameters)
// within the specified bracket.
return;
@@ -501,20 +499,18 @@ void WolfeLineSearch::DoSearch(const double step_size_estimate,
//
// Returns false if no step size > minimum step size was found which
// satisfies at least the Armijo condition.
-bool WolfeLineSearch::BracketingPhase(
- const FunctionSample& initial_position,
- const double step_size_estimate,
- FunctionSample* bracket_low,
- FunctionSample* bracket_high,
- bool* do_zoom_search,
- Summary* summary) const {
+bool WolfeLineSearch::BracketingPhase(const FunctionSample& initial_position,
+ const double step_size_estimate,
+ FunctionSample* bracket_low,
+ FunctionSample* bracket_high,
+ bool* do_zoom_search,
+ Summary* summary) const {
LineSearchFunction* function = options().function;
FunctionSample previous = initial_position;
FunctionSample current;
- const double descent_direction_max_norm =
- function->DirectionInfinityNorm();
+ const double descent_direction_max_norm = function->DirectionInfinityNorm();
*do_zoom_search = false;
*bracket_low = initial_position;
@@ -536,10 +532,9 @@ bool WolfeLineSearch::BracketingPhase(
++summary->num_iterations;
if (current.value_is_valid &&
- (current.value > (initial_position.value
- + options().sufficient_decrease
- * initial_position.gradient
- * current.x) ||
+ (current.value > (initial_position.value +
+ options().sufficient_decrease *
+ initial_position.gradient * current.x) ||
(previous.value_is_valid && current.value > previous.value))) {
// Bracket found: current step size violates Armijo sufficient decrease
// condition, or has stepped past an inflection point of f() relative to
@@ -556,8 +551,8 @@ bool WolfeLineSearch::BracketingPhase(
}
if (current.value_is_valid &&
- fabs(current.gradient) <=
- -options().sufficient_curvature_decrease * initial_position.gradient) {
+ fabs(current.gradient) <= -options().sufficient_curvature_decrease *
+ initial_position.gradient) {
// Current step size satisfies the strong Wolfe conditions, and is thus a
// valid termination point, therefore a Zoom not required.
*bracket_low = current;
@@ -585,23 +580,25 @@ bool WolfeLineSearch::BracketingPhase(
break;
} else if (current.value_is_valid &&
- fabs(current.x - previous.x) * descent_direction_max_norm
- < options().min_step_size) {
+ fabs(current.x - previous.x) * descent_direction_max_norm <
+ options().min_step_size) {
// We have shrunk the search bracket to a width less than our tolerance,
// and still not found either a point satisfying the strong Wolfe
// conditions, or a valid bracket containing such a point. Stop searching
// and set bracket_low to the size size amongst all those tested which
// minimizes f() and satisfies the Armijo condition.
- LOG_IF(WARNING, !options().is_silent)
- << "Line search failed: Wolfe bracketing phase shrank "
- << "bracket width: " << fabs(current.x - previous.x)
- << ", to < tolerance: " << options().min_step_size
- << ", with descent_direction_max_norm: "
- << descent_direction_max_norm << ", and failed to find "
- << "a point satisfying the strong Wolfe conditions or a "
- << "bracketing containing such a point. Accepting "
- << "point found satisfying Armijo condition only, to "
- << "allow continuation.";
+
+ if (!options().is_silent) {
+ LOG(WARNING) << "Line search failed: Wolfe bracketing phase shrank "
+ << "bracket width: " << fabs(current.x - previous.x)
+ << ", to < tolerance: " << options().min_step_size
+ << ", with descent_direction_max_norm: "
+ << descent_direction_max_norm << ", and failed to find "
+ << "a point satisfying the strong Wolfe conditions or a "
+ << "bracketing containing such a point. Accepting "
+ << "point found satisfying Armijo condition only, to "
+ << "allow continuation.";
+ }
*bracket_low = current;
break;
@@ -609,18 +606,22 @@ bool WolfeLineSearch::BracketingPhase(
// Check num iterations bound here so that we always evaluate the
// max_num_iterations-th iteration against all conditions, and
// then perform no additional (unused) evaluations.
- summary->error =
- StringPrintf("Line search failed: Wolfe bracketing phase failed to "
- "find a point satisfying strong Wolfe conditions, or a "
- "bracket containing such a point within specified "
- "max_num_iterations: %d", options().max_num_iterations);
- LOG_IF(WARNING, !options().is_silent) << summary->error;
+ summary->error = StringPrintf(
+ "Line search failed: Wolfe bracketing phase failed to "
+ "find a point satisfying strong Wolfe conditions, or a "
+ "bracket containing such a point within specified "
+ "max_num_iterations: %d",
+ options().max_num_iterations);
+ if (!options().is_silent) {
+ LOG(WARNING) << summary->error;
+ }
// Ensure that bracket_low is always set to the step size amongst all
// those tested which minimizes f() and satisfies the Armijo condition
// when we terminate due to the 'artificial' max_num_iterations condition.
*bracket_low =
current.value_is_valid && current.value < bracket_low->value
- ? current : *bracket_low;
+ ? current
+ : *bracket_low;
break;
}
// Either: f(current) is invalid; or, f(current) is valid, but does not
@@ -632,17 +633,16 @@ bool WolfeLineSearch::BracketingPhase(
// size.
//
// In Nocedal & Wright [1] (p60), the step-size can only increase in the
- // bracketing phase: step_size_{k+1} \in [step_size_k, step_size_k * factor].
- // However this does not account for the function returning invalid values
- // which we support, in which case we need to contract the step size whilst
- // ensuring that we do not invert the bracket, i.e, we require that:
+ // bracketing phase: step_size_{k+1} \in [step_size_k, step_size_k *
+ // factor]. However this does not account for the function returning invalid
+ // values which we support, in which case we need to contract the step size
+ // whilst ensuring that we do not invert the bracket, i.e, we require that:
// step_size_{k-1} <= step_size_{k+1} < step_size_k.
const double min_step_size =
- current.value_is_valid
- ? current.x : previous.x;
+ current.value_is_valid ? current.x : previous.x;
const double max_step_size =
- current.value_is_valid
- ? (current.x * options().max_step_expansion) : current.x;
+ current.value_is_valid ? (current.x * options().max_step_expansion)
+ : current.x;
// We are performing 2-point interpolation only here, but the API of
// InterpolatingPolynomialMinimizingStepSize() allows for up to
@@ -652,22 +652,24 @@ bool WolfeLineSearch::BracketingPhase(
DCHECK(!unused_previous.value_is_valid);
// Contracts step size if f(current) is not valid.
const double polynomial_minimization_start_time = WallTimeInSeconds();
- const double step_size =
- this->InterpolatingPolynomialMinimizingStepSize(
- options().interpolation_type,
- previous,
- unused_previous,
- current,
- min_step_size,
- max_step_size);
+ const double step_size = this->InterpolatingPolynomialMinimizingStepSize(
+ options().interpolation_type,
+ previous,
+ unused_previous,
+ current,
+ min_step_size,
+ max_step_size);
summary->polynomial_minimization_time_in_seconds +=
(WallTimeInSeconds() - polynomial_minimization_start_time);
if (step_size * descent_direction_max_norm < options().min_step_size) {
- summary->error =
- StringPrintf("Line search failed: step_size too small: %.5e "
- "with descent_direction_max_norm: %.5e", step_size,
- descent_direction_max_norm);
- LOG_IF(WARNING, !options().is_silent) << summary->error;
+ summary->error = StringPrintf(
+ "Line search failed: step_size too small: %.5e "
+ "with descent_direction_max_norm: %.5e",
+ step_size,
+ descent_direction_max_norm);
+ if (!options().is_silent) {
+ LOG(WARNING) << summary->error;
+ }
return false;
}
@@ -684,8 +686,8 @@ bool WolfeLineSearch::BracketingPhase(
// Ensure that even if a valid bracket was found, we will only mark a zoom
// as required if the bracket's width is greater than our minimum tolerance.
if (*do_zoom_search &&
- fabs(bracket_high->x - bracket_low->x) * descent_direction_max_norm
- < options().min_step_size) {
+ fabs(bracket_high->x - bracket_low->x) * descent_direction_max_norm <
+ options().min_step_size) {
*do_zoom_search = false;
}
@@ -707,8 +709,7 @@ bool WolfeLineSearch::ZoomPhase(const FunctionSample& initial_position,
<< std::scientific << std::setprecision(kErrorMessageNumericPrecision)
<< "Ceres bug: f_low input to Wolfe Zoom invalid, please contact "
<< "the developers!, initial_position: " << initial_position
- << ", bracket_low: " << bracket_low
- << ", bracket_high: "<< bracket_high;
+ << ", bracket_low: " << bracket_low << ", bracket_high: " << bracket_high;
// We do not require bracket_high.gradient_is_valid as the gradient condition
// for a valid bracket is only dependent upon bracket_low.gradient, and
// in order to minimize jacobian evaluations, bracket_high.gradient may
@@ -725,8 +726,7 @@ bool WolfeLineSearch::ZoomPhase(const FunctionSample& initial_position,
<< std::scientific << std::setprecision(kErrorMessageNumericPrecision)
<< "Ceres bug: f_high input to Wolfe Zoom invalid, please "
<< "contact the developers!, initial_position: " << initial_position
- << ", bracket_low: " << bracket_low
- << ", bracket_high: "<< bracket_high;
+ << ", bracket_low: " << bracket_low << ", bracket_high: " << bracket_high;
if (bracket_low.gradient * (bracket_high.x - bracket_low.x) >= 0) {
// The third condition for a valid initial bracket:
@@ -738,18 +738,20 @@ bool WolfeLineSearch::ZoomPhase(const FunctionSample& initial_position,
// returns inconsistent gradient values relative to the function values,
// we do not CHECK_LT(), but we do stop processing and return an invalid
// value.
- summary->error =
- StringPrintf("Line search failed: Wolfe zoom phase passed a bracket "
- "which does not satisfy: bracket_low.gradient * "
- "(bracket_high.x - bracket_low.x) < 0 [%.8e !< 0] "
- "with initial_position: %s, bracket_low: %s, bracket_high:"
- " %s, the most likely cause of which is the cost function "
- "returning inconsistent gradient & function values.",
- bracket_low.gradient * (bracket_high.x - bracket_low.x),
- initial_position.ToDebugString().c_str(),
- bracket_low.ToDebugString().c_str(),
- bracket_high.ToDebugString().c_str());
- LOG_IF(WARNING, !options().is_silent) << summary->error;
+ summary->error = StringPrintf(
+ "Line search failed: Wolfe zoom phase passed a bracket "
+ "which does not satisfy: bracket_low.gradient * "
+ "(bracket_high.x - bracket_low.x) < 0 [%.8e !< 0] "
+ "with initial_position: %s, bracket_low: %s, bracket_high:"
+ " %s, the most likely cause of which is the cost function "
+ "returning inconsistent gradient & function values.",
+ bracket_low.gradient * (bracket_high.x - bracket_low.x),
+ initial_position.ToDebugString().c_str(),
+ bracket_low.ToDebugString().c_str(),
+ bracket_high.ToDebugString().c_str());
+ if (!options().is_silent) {
+ LOG(WARNING) << summary->error;
+ }
solution->value_is_valid = false;
return false;
}
@@ -763,25 +765,30 @@ bool WolfeLineSearch::ZoomPhase(const FunctionSample& initial_position,
// not satisfy the Wolfe condition.
*solution = bracket_low;
if (summary->num_iterations >= options().max_num_iterations) {
- summary->error =
- StringPrintf("Line search failed: Wolfe zoom phase failed to "
- "find a point satisfying strong Wolfe conditions "
- "within specified max_num_iterations: %d, "
- "(num iterations taken for bracketing: %d).",
- options().max_num_iterations, num_bracketing_iterations);
- LOG_IF(WARNING, !options().is_silent) << summary->error;
+ summary->error = StringPrintf(
+ "Line search failed: Wolfe zoom phase failed to "
+ "find a point satisfying strong Wolfe conditions "
+ "within specified max_num_iterations: %d, "
+ "(num iterations taken for bracketing: %d).",
+ options().max_num_iterations,
+ num_bracketing_iterations);
+ if (!options().is_silent) {
+ LOG(WARNING) << summary->error;
+ }
return false;
}
- if (fabs(bracket_high.x - bracket_low.x) * descent_direction_max_norm
- < options().min_step_size) {
+ if (fabs(bracket_high.x - bracket_low.x) * descent_direction_max_norm <
+ options().min_step_size) {
// Bracket width has been reduced below tolerance, and no point satisfying
// the strong Wolfe conditions has been found.
- summary->error =
- StringPrintf("Line search failed: Wolfe zoom bracket width: %.5e "
- "too small with descent_direction_max_norm: %.5e.",
- fabs(bracket_high.x - bracket_low.x),
- descent_direction_max_norm);
- LOG_IF(WARNING, !options().is_silent) << summary->error;
+ summary->error = StringPrintf(
+ "Line search failed: Wolfe zoom bracket width: %.5e "
+ "too small with descent_direction_max_norm: %.5e.",
+ fabs(bracket_high.x - bracket_low.x),
+ descent_direction_max_norm);
+ if (!options().is_silent) {
+ LOG(WARNING) << summary->error;
+ }
return false;
}
@@ -799,14 +806,13 @@ bool WolfeLineSearch::ZoomPhase(const FunctionSample& initial_position,
const FunctionSample unused_previous;
DCHECK(!unused_previous.value_is_valid);
const double polynomial_minimization_start_time = WallTimeInSeconds();
- const double step_size =
- this->InterpolatingPolynomialMinimizingStepSize(
- options().interpolation_type,
- lower_bound_step,
- unused_previous,
- upper_bound_step,
- lower_bound_step.x,
- upper_bound_step.x);
+ const double step_size = this->InterpolatingPolynomialMinimizingStepSize(
+ options().interpolation_type,
+ lower_bound_step,
+ unused_previous,
+ upper_bound_step,
+ lower_bound_step.x,
+ upper_bound_step.x);
summary->polynomial_minimization_time_in_seconds +=
(WallTimeInSeconds() - polynomial_minimization_start_time);
// No check on magnitude of step size being too small here as it is
@@ -826,13 +832,17 @@ bool WolfeLineSearch::ZoomPhase(const FunctionSample& initial_position,
const bool kEvaluateGradient = true;
function->Evaluate(step_size, kEvaluateGradient, solution);
if (!solution->value_is_valid || !solution->gradient_is_valid) {
- summary->error =
- StringPrintf("Line search failed: Wolfe Zoom phase found "
- "step_size: %.5e, for which function is invalid, "
- "between low_step: %.5e and high_step: %.5e "
- "at which function is valid.",
- solution->x, bracket_low.x, bracket_high.x);
- LOG_IF(WARNING, !options().is_silent) << summary->error;
+ summary->error = StringPrintf(
+ "Line search failed: Wolfe Zoom phase found "
+ "step_size: %.5e, for which function is invalid, "
+ "between low_step: %.5e and high_step: %.5e "
+ "at which function is valid.",
+ solution->x,
+ bracket_low.x,
+ bracket_high.x);
+ if (!options().is_silent) {
+ LOG(WARNING) << summary->error;
+ }
return false;
}
@@ -842,10 +852,9 @@ bool WolfeLineSearch::ZoomPhase(const FunctionSample& initial_position,
<< ", bracket_high: " << bracket_high
<< ", minimizing solution: " << *solution;
- if ((solution->value > (initial_position.value
- + options().sufficient_decrease
- * initial_position.gradient
- * solution->x)) ||
+ if ((solution->value > (initial_position.value +
+ options().sufficient_decrease *
+ initial_position.gradient * solution->x)) ||
(solution->value >= bracket_low.value)) {
// Armijo sufficient decrease not satisfied, or not better
// than current lowest sample, use as new upper bound.
diff --git a/extern/ceres/internal/ceres/line_search.h b/extern/ceres/internal/ceres/line_search.h
index d59fd777367..634c9717532 100644
--- a/extern/ceres/internal/ceres/line_search.h
+++ b/extern/ceres/internal/ceres/line_search.h
@@ -35,6 +35,7 @@
#include <string>
#include <vector>
+
#include "ceres/function_sample.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/port.h"
diff --git a/extern/ceres/internal/ceres/line_search_direction.cc b/extern/ceres/internal/ceres/line_search_direction.cc
index 1f9d205bff5..48e6c9812e9 100644
--- a/extern/ceres/internal/ceres/line_search_direction.cc
+++ b/extern/ceres/internal/ceres/line_search_direction.cc
@@ -29,9 +29,10 @@
// Author: sameeragarwal@google.com (Sameer Agarwal)
#include "ceres/line_search_direction.h"
+
+#include "ceres/internal/eigen.h"
#include "ceres/line_search_minimizer.h"
#include "ceres/low_rank_inverse_hessian.h"
-#include "ceres/internal/eigen.h"
#include "glog/logging.h"
namespace ceres {
@@ -52,9 +53,7 @@ class NonlinearConjugateGradient : public LineSearchDirection {
public:
NonlinearConjugateGradient(const NonlinearConjugateGradientType type,
const double function_tolerance)
- : type_(type),
- function_tolerance_(function_tolerance) {
- }
+ : type_(type), function_tolerance_(function_tolerance) {}
bool NextDirection(const LineSearchMinimizer::State& previous,
const LineSearchMinimizer::State& current,
@@ -72,14 +71,14 @@ class NonlinearConjugateGradient : public LineSearchDirection {
break;
case HESTENES_STIEFEL:
gradient_change = current.gradient - previous.gradient;
- beta = (current.gradient.dot(gradient_change) /
- previous.search_direction.dot(gradient_change));
+ beta = (current.gradient.dot(gradient_change) /
+ previous.search_direction.dot(gradient_change));
break;
default:
LOG(FATAL) << "Unknown nonlinear conjugate gradient type: " << type_;
}
- *search_direction = -current.gradient + beta * previous.search_direction;
+ *search_direction = -current.gradient + beta * previous.search_direction;
const double directional_derivative =
current.gradient.dot(*search_direction);
if (directional_derivative > -function_tolerance_) {
@@ -144,18 +143,19 @@ class LBFGS : public LineSearchDirection {
class BFGS : public LineSearchDirection {
public:
- BFGS(const int num_parameters,
- const bool use_approximate_eigenvalue_scaling)
+ BFGS(const int num_parameters, const bool use_approximate_eigenvalue_scaling)
: num_parameters_(num_parameters),
use_approximate_eigenvalue_scaling_(use_approximate_eigenvalue_scaling),
initialized_(false),
is_positive_definite_(true) {
- LOG_IF(WARNING, num_parameters_ >= 1e3)
- << "BFGS line search being created with: " << num_parameters_
- << " parameters, this will allocate a dense approximate inverse Hessian"
- << " of size: " << num_parameters_ << " x " << num_parameters_
- << ", consider using the L-BFGS memory-efficient line search direction "
- << "instead.";
+ if (num_parameters_ >= 1000) {
+ LOG(WARNING) << "BFGS line search being created with: " << num_parameters_
+ << " parameters, this will allocate a dense approximate "
+ << "inverse Hessian of size: " << num_parameters_ << " x "
+ << num_parameters_
+ << ", consider using the L-BFGS memory-efficient line "
+ << "search direction instead.";
+ }
// Construct inverse_hessian_ after logging warning about size s.t. if the
// allocation crashes us, the log will highlight what the issue likely was.
inverse_hessian_ = Matrix::Identity(num_parameters, num_parameters);
@@ -212,8 +212,8 @@ class BFGS : public LineSearchDirection {
if (delta_x_dot_delta_gradient <=
kBFGSSecantConditionHessianUpdateTolerance) {
VLOG(2) << "Skipping BFGS Update, delta_x_dot_delta_gradient too "
- << "small: " << delta_x_dot_delta_gradient << ", tolerance: "
- << kBFGSSecantConditionHessianUpdateTolerance
+ << "small: " << delta_x_dot_delta_gradient
+ << ", tolerance: " << kBFGSSecantConditionHessianUpdateTolerance
<< " (Secant condition).";
} else {
// Update dense inverse Hessian approximation.
@@ -300,13 +300,13 @@ class BFGS : public LineSearchDirection {
// Calculate scalar: (1 + \rho_k * y_k' * H_k * y_k)
const double delta_x_times_delta_x_transpose_scale_factor =
- (1.0 + (rho_k * delta_gradient.transpose() *
- inverse_hessian_.selfadjointView<Eigen::Lower>() *
- delta_gradient));
+ (1.0 +
+ (rho_k * delta_gradient.transpose() *
+ inverse_hessian_.selfadjointView<Eigen::Lower>() * delta_gradient));
// Calculate: B = (1 + \rho_k * y_k' * H_k * y_k) * s_k * s_k'
Matrix B = Matrix::Zero(num_parameters_, num_parameters_);
- B.selfadjointView<Eigen::Lower>().
- rankUpdate(delta_x, delta_x_times_delta_x_transpose_scale_factor);
+ B.selfadjointView<Eigen::Lower>().rankUpdate(
+ delta_x, delta_x_times_delta_x_transpose_scale_factor);
// Finally, update inverse Hessian approximation according to:
// H_k = H_{k-1} + \rho_k * (B - (A + A')). Note that (A + A') is
@@ -315,9 +315,8 @@ class BFGS : public LineSearchDirection {
rho_k * (B - A - A.transpose());
}
- *search_direction =
- inverse_hessian_.selfadjointView<Eigen::Lower>() *
- (-1.0 * current.gradient);
+ *search_direction = inverse_hessian_.selfadjointView<Eigen::Lower>() *
+ (-1.0 * current.gradient);
if (search_direction->dot(current.gradient) >= 0.0) {
LOG(WARNING) << "Numerical failure in BFGS update: inverse Hessian "
@@ -339,16 +338,15 @@ class BFGS : public LineSearchDirection {
bool is_positive_definite_;
};
-LineSearchDirection*
-LineSearchDirection::Create(const LineSearchDirection::Options& options) {
+LineSearchDirection* LineSearchDirection::Create(
+ const LineSearchDirection::Options& options) {
if (options.type == STEEPEST_DESCENT) {
return new SteepestDescent;
}
if (options.type == NONLINEAR_CONJUGATE_GRADIENT) {
return new NonlinearConjugateGradient(
- options.nonlinear_conjugate_gradient_type,
- options.function_tolerance);
+ options.nonlinear_conjugate_gradient_type, options.function_tolerance);
}
if (options.type == ceres::LBFGS) {
diff --git a/extern/ceres/internal/ceres/line_search_direction.h b/extern/ceres/internal/ceres/line_search_direction.h
index 467578d5f7c..2fcf4729ca5 100644
--- a/extern/ceres/internal/ceres/line_search_direction.h
+++ b/extern/ceres/internal/ceres/line_search_direction.h
@@ -47,8 +47,7 @@ class LineSearchDirection {
nonlinear_conjugate_gradient_type(FLETCHER_REEVES),
function_tolerance(1e-12),
max_lbfgs_rank(20),
- use_approximate_eigenvalue_bfgs_scaling(true) {
- }
+ use_approximate_eigenvalue_bfgs_scaling(true) {}
int num_parameters;
LineSearchDirectionType type;
diff --git a/extern/ceres/internal/ceres/line_search_minimizer.cc b/extern/ceres/internal/ceres/line_search_minimizer.cc
index 931f56c960c..ea1c5072a14 100644
--- a/extern/ceres/internal/ceres/line_search_minimizer.cc
+++ b/extern/ceres/internal/ceres/line_search_minimizer.cc
@@ -41,8 +41,8 @@
#include "ceres/line_search_minimizer.h"
#include <algorithm>
-#include <cstdlib>
#include <cmath>
+#include <cstdlib>
#include <memory>
#include <string>
#include <vector>
@@ -88,7 +88,7 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
Solver::Summary* summary) {
const bool is_not_silent = !options.is_silent;
double start_time = WallTimeInSeconds();
- double iteration_start_time = start_time;
+ double iteration_start_time = start_time;
CHECK(options.evaluator != nullptr);
Evaluator* evaluator = options.evaluator.get();
@@ -123,15 +123,20 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
nullptr)) {
summary->termination_type = FAILURE;
summary->message = "Initial cost and jacobian evaluation failed.";
- LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
+ if (is_not_silent) {
+ LOG(WARNING) << "Terminating: " << summary->message;
+ }
return;
}
if (!EvaluateGradientNorms(evaluator, x, &current_state, &summary->message)) {
summary->termination_type = FAILURE;
- summary->message = "Initial cost and jacobian evaluation failed. "
- "More details: " + summary->message;
- LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
+ summary->message =
+ "Initial cost and jacobian evaluation failed. More details: " +
+ summary->message;
+ if (is_not_silent) {
+ LOG(WARNING) << "Terminating: " << summary->message;
+ }
return;
}
@@ -141,20 +146,21 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
iteration_summary.gradient_norm = sqrt(current_state.gradient_squared_norm);
iteration_summary.gradient_max_norm = current_state.gradient_max_norm;
if (iteration_summary.gradient_max_norm <= options.gradient_tolerance) {
- summary->message = StringPrintf("Gradient tolerance reached. "
- "Gradient max norm: %e <= %e",
- iteration_summary.gradient_max_norm,
- options.gradient_tolerance);
+ summary->message =
+ StringPrintf("Gradient tolerance reached. Gradient max norm: %e <= %e",
+ iteration_summary.gradient_max_norm,
+ options.gradient_tolerance);
summary->termination_type = CONVERGENCE;
- VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
+ if (is_not_silent) {
+ VLOG(1) << "Terminating: " << summary->message;
+ }
return;
}
iteration_summary.iteration_time_in_seconds =
WallTimeInSeconds() - iteration_start_time;
iteration_summary.cumulative_time_in_seconds =
- WallTimeInSeconds() - start_time
- + summary->preprocessor_time_in_seconds;
+ WallTimeInSeconds() - start_time + summary->preprocessor_time_in_seconds;
summary->iterations.push_back(iteration_summary);
LineSearchDirection::Options line_search_direction_options;
@@ -189,13 +195,13 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
line_search_options.is_silent = options.is_silent;
line_search_options.function = &line_search_function;
- std::unique_ptr<LineSearch>
- line_search(LineSearch::Create(options.line_search_type,
- line_search_options,
- &summary->message));
+ std::unique_ptr<LineSearch> line_search(LineSearch::Create(
+ options.line_search_type, line_search_options, &summary->message));
if (line_search.get() == nullptr) {
summary->termination_type = FAILURE;
- LOG_IF(ERROR, is_not_silent) << "Terminating: " << summary->message;
+ if (is_not_silent) {
+ LOG(ERROR) << "Terminating: " << summary->message;
+ }
return;
}
@@ -211,16 +217,20 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
if (iteration_summary.iteration >= options.max_num_iterations) {
summary->message = "Maximum number of iterations reached.";
summary->termination_type = NO_CONVERGENCE;
- VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
+ if (is_not_silent) {
+ VLOG(1) << "Terminating: " << summary->message;
+ }
break;
}
const double total_solver_time = iteration_start_time - start_time +
- summary->preprocessor_time_in_seconds;
+ summary->preprocessor_time_in_seconds;
if (total_solver_time >= options.max_solver_time_in_seconds) {
summary->message = "Maximum solver time reached.";
summary->termination_type = NO_CONVERGENCE;
- VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
+ if (is_not_silent) {
+ VLOG(1) << "Terminating: " << summary->message;
+ }
break;
}
@@ -234,23 +244,23 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
current_state.search_direction = -current_state.gradient;
} else {
line_search_status = line_search_direction->NextDirection(
- previous_state,
- current_state,
- &current_state.search_direction);
+ previous_state, current_state, &current_state.search_direction);
}
if (!line_search_status &&
num_line_search_direction_restarts >=
- options.max_num_line_search_direction_restarts) {
+ options.max_num_line_search_direction_restarts) {
// Line search direction failed to generate a new direction, and we
// have already reached our specified maximum number of restarts,
// terminate optimization.
- summary->message =
- StringPrintf("Line search direction failure: specified "
- "max_num_line_search_direction_restarts: %d reached.",
- options.max_num_line_search_direction_restarts);
+ summary->message = StringPrintf(
+ "Line search direction failure: specified "
+ "max_num_line_search_direction_restarts: %d reached.",
+ options.max_num_line_search_direction_restarts);
summary->termination_type = FAILURE;
- LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
+ if (is_not_silent) {
+ LOG(WARNING) << "Terminating: " << summary->message;
+ }
break;
} else if (!line_search_status) {
// Restart line search direction with gradient descent on first iteration
@@ -259,16 +269,17 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
options.max_num_line_search_direction_restarts);
++num_line_search_direction_restarts;
- LOG_IF(WARNING, is_not_silent)
- << "Line search direction algorithm: "
- << LineSearchDirectionTypeToString(
- options.line_search_direction_type)
- << ", failed to produce a valid new direction at "
- << "iteration: " << iteration_summary.iteration
- << ". Restarting, number of restarts: "
- << num_line_search_direction_restarts << " / "
- << options.max_num_line_search_direction_restarts
- << " [max].";
+ if (is_not_silent) {
+ LOG(WARNING) << "Line search direction algorithm: "
+ << LineSearchDirectionTypeToString(
+ options.line_search_direction_type)
+ << ", failed to produce a valid new direction at "
+ << "iteration: " << iteration_summary.iteration
+ << ". Restarting, number of restarts: "
+ << num_line_search_direction_restarts << " / "
+ << options.max_num_line_search_direction_restarts
+ << " [max].";
+ }
line_search_direction.reset(
LineSearchDirection::Create(line_search_direction_options));
current_state.search_direction = -current_state.gradient;
@@ -286,21 +297,25 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
// iteration.
const double initial_step_size =
(iteration_summary.iteration == 1 || !line_search_status)
- ? std::min(1.0, 1.0 / current_state.gradient_max_norm)
- : std::min(1.0, 2.0 * (current_state.cost - previous_state.cost) /
- current_state.directional_derivative);
+ ? std::min(1.0, 1.0 / current_state.gradient_max_norm)
+ : std::min(1.0,
+ 2.0 * (current_state.cost - previous_state.cost) /
+ current_state.directional_derivative);
// By definition, we should only ever go forwards along the specified search
// direction in a line search, most likely cause for this being violated
// would be a numerical failure in the line search direction calculation.
if (initial_step_size < 0.0) {
- summary->message =
- StringPrintf("Numerical failure in line search, initial_step_size is "
- "negative: %.5e, directional_derivative: %.5e, "
- "(current_cost - previous_cost): %.5e",
- initial_step_size, current_state.directional_derivative,
- (current_state.cost - previous_state.cost));
+ summary->message = StringPrintf(
+ "Numerical failure in line search, initial_step_size is "
+ "negative: %.5e, directional_derivative: %.5e, "
+ "(current_cost - previous_cost): %.5e",
+ initial_step_size,
+ current_state.directional_derivative,
+ (current_state.cost - previous_state.cost));
summary->termination_type = FAILURE;
- LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
+ if (is_not_silent) {
+ LOG(WARNING) << "Terminating: " << summary->message;
+ }
break;
}
@@ -309,14 +324,17 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
current_state.directional_derivative,
&line_search_summary);
if (!line_search_summary.success) {
- summary->message =
- StringPrintf("Numerical failure in line search, failed to find "
- "a valid step size, (did not run out of iterations) "
- "using initial_step_size: %.5e, initial_cost: %.5e, "
- "initial_gradient: %.5e.",
- initial_step_size, current_state.cost,
- current_state.directional_derivative);
- LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
+ summary->message = StringPrintf(
+ "Numerical failure in line search, failed to find "
+ "a valid step size, (did not run out of iterations) "
+ "using initial_step_size: %.5e, initial_cost: %.5e, "
+ "initial_gradient: %.5e.",
+ initial_step_size,
+ current_state.cost,
+ current_state.directional_derivative);
+ if (is_not_silent) {
+ LOG(WARNING) << "Terminating: " << summary->message;
+ }
summary->termination_type = FAILURE;
break;
}
@@ -343,7 +361,9 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
nullptr)) {
summary->termination_type = FAILURE;
summary->message = "Cost and jacobian evaluation failed.";
- LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
+ if (is_not_silent) {
+ LOG(WARNING) << "Terminating: " << summary->message;
+ }
return;
}
}
@@ -357,7 +377,9 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
"Step failed to evaluate. This should not happen as the step was "
"valid when it was selected by the line search. More details: " +
summary->message;
- LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
+ if (is_not_silent) {
+ LOG(WARNING) << "Terminating: " << summary->message;
+ }
break;
}
@@ -373,7 +395,7 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
iteration_summary.step_is_valid = true;
iteration_summary.step_is_successful = true;
- iteration_summary.step_size = current_state.step_size;
+ iteration_summary.step_size = current_state.step_size;
iteration_summary.line_search_function_evaluations =
line_search_summary.num_function_evaluations;
iteration_summary.line_search_gradient_evaluations =
@@ -383,8 +405,8 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
iteration_summary.iteration_time_in_seconds =
WallTimeInSeconds() - iteration_start_time;
iteration_summary.cumulative_time_in_seconds =
- WallTimeInSeconds() - start_time
- + summary->preprocessor_time_in_seconds;
+ WallTimeInSeconds() - start_time +
+ summary->preprocessor_time_in_seconds;
summary->iterations.push_back(iteration_summary);
// Iterations inside the line search algorithm are considered
@@ -393,7 +415,7 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
// minimizer. The number of line search steps is the total number
// of inner line search iterations (or steps) across the entire
// minimization.
- summary->num_line_search_steps += line_search_summary.num_iterations;
+ summary->num_line_search_steps += line_search_summary.num_iterations;
summary->line_search_cost_evaluation_time_in_seconds +=
line_search_summary.cost_evaluation_time_in_seconds;
summary->line_search_gradient_evaluation_time_in_seconds +=
@@ -404,27 +426,32 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
line_search_summary.total_time_in_seconds;
++summary->num_successful_steps;
- const double step_size_tolerance = options.parameter_tolerance *
- (x_norm + options.parameter_tolerance);
+ const double step_size_tolerance =
+ options.parameter_tolerance * (x_norm + options.parameter_tolerance);
if (iteration_summary.step_norm <= step_size_tolerance) {
- summary->message =
- StringPrintf("Parameter tolerance reached. "
- "Relative step_norm: %e <= %e.",
- (iteration_summary.step_norm /
- (x_norm + options.parameter_tolerance)),
- options.parameter_tolerance);
+ summary->message = StringPrintf(
+ "Parameter tolerance reached. "
+ "Relative step_norm: %e <= %e.",
+ (iteration_summary.step_norm /
+ (x_norm + options.parameter_tolerance)),
+ options.parameter_tolerance);
summary->termination_type = CONVERGENCE;
- VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
+ if (is_not_silent) {
+ VLOG(1) << "Terminating: " << summary->message;
+ }
return;
}
if (iteration_summary.gradient_max_norm <= options.gradient_tolerance) {
- summary->message = StringPrintf("Gradient tolerance reached. "
- "Gradient max norm: %e <= %e",
- iteration_summary.gradient_max_norm,
- options.gradient_tolerance);
+ summary->message = StringPrintf(
+ "Gradient tolerance reached. "
+ "Gradient max norm: %e <= %e",
+ iteration_summary.gradient_max_norm,
+ options.gradient_tolerance);
summary->termination_type = CONVERGENCE;
- VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
+ if (is_not_silent) {
+ VLOG(1) << "Terminating: " << summary->message;
+ }
break;
}
@@ -438,7 +465,9 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
std::abs(iteration_summary.cost_change) / previous_state.cost,
options.function_tolerance);
summary->termination_type = CONVERGENCE;
- VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
+ if (is_not_silent) {
+ VLOG(1) << "Terminating: " << summary->message;
+ }
break;
}
}
diff --git a/extern/ceres/internal/ceres/line_search_minimizer.h b/extern/ceres/internal/ceres/line_search_minimizer.h
index 191128a933b..79e8dc9e49a 100644
--- a/extern/ceres/internal/ceres/line_search_minimizer.h
+++ b/extern/ceres/internal/ceres/line_search_minimizer.h
@@ -31,10 +31,10 @@
#ifndef CERES_INTERNAL_LINE_SEARCH_MINIMIZER_H_
#define CERES_INTERNAL_LINE_SEARCH_MINIMIZER_H_
+#include "ceres/internal/eigen.h"
#include "ceres/minimizer.h"
#include "ceres/solver.h"
#include "ceres/types.h"
-#include "ceres/internal/eigen.h"
#include "glog/logging.h"
namespace ceres {
@@ -46,15 +46,13 @@ namespace internal {
class LineSearchMinimizer : public Minimizer {
public:
struct State {
- State(int num_parameters,
- int num_effective_parameters)
+ State(int num_parameters, int num_effective_parameters)
: cost(0.0),
gradient(num_effective_parameters),
gradient_squared_norm(0.0),
search_direction(num_effective_parameters),
directional_derivative(0.0),
- step_size(0.0) {
- }
+ step_size(0.0) {}
double cost;
Vector gradient;
diff --git a/extern/ceres/internal/ceres/line_search_preprocessor.cc b/extern/ceres/internal/ceres/line_search_preprocessor.cc
index 5a21809e509..6a69425e764 100644
--- a/extern/ceres/internal/ceres/line_search_preprocessor.cc
+++ b/extern/ceres/internal/ceres/line_search_preprocessor.cc
@@ -32,6 +32,7 @@
#include <numeric>
#include <string>
+
#include "ceres/casts.h"
#include "ceres/context_impl.h"
#include "ceres/evaluator.h"
@@ -62,16 +63,14 @@ bool SetupEvaluator(PreprocessedProblem* pp) {
pp->evaluator_options.context = pp->problem->context();
pp->evaluator_options.evaluation_callback =
pp->reduced_program->mutable_evaluation_callback();
- pp->evaluator.reset(Evaluator::Create(pp->evaluator_options,
- pp->reduced_program.get(),
- &pp->error));
+ pp->evaluator.reset(Evaluator::Create(
+ pp->evaluator_options, pp->reduced_program.get(), &pp->error));
return (pp->evaluator.get() != NULL);
}
} // namespace
-LineSearchPreprocessor::~LineSearchPreprocessor() {
-}
+LineSearchPreprocessor::~LineSearchPreprocessor() {}
bool LineSearchPreprocessor::Preprocess(const Solver::Options& options,
ProblemImpl* problem,
@@ -86,10 +85,8 @@ bool LineSearchPreprocessor::Preprocess(const Solver::Options& options,
return false;
}
- pp->reduced_program.reset(
- program->CreateReducedProgram(&pp->removed_parameter_blocks,
- &pp->fixed_cost,
- &pp->error));
+ pp->reduced_program.reset(program->CreateReducedProgram(
+ &pp->removed_parameter_blocks, &pp->fixed_cost, &pp->error));
if (pp->reduced_program.get() == NULL) {
return false;
diff --git a/extern/ceres/internal/ceres/line_search_preprocessor.h b/extern/ceres/internal/ceres/line_search_preprocessor.h
index 12ccb53e011..bd426c7f2f6 100644
--- a/extern/ceres/internal/ceres/line_search_preprocessor.h
+++ b/extern/ceres/internal/ceres/line_search_preprocessor.h
@@ -31,12 +31,13 @@
#ifndef CERES_INTERNAL_LINE_SEARCH_PREPROCESSOR_H_
#define CERES_INTERNAL_LINE_SEARCH_PREPROCESSOR_H_
+#include "ceres/internal/port.h"
#include "ceres/preprocessor.h"
namespace ceres {
namespace internal {
-class LineSearchPreprocessor : public Preprocessor {
+class CERES_EXPORT_INTERNAL LineSearchPreprocessor : public Preprocessor {
public:
virtual ~LineSearchPreprocessor();
bool Preprocess(const Solver::Options& options,
diff --git a/extern/ceres/internal/ceres/linear_least_squares_problems.cc b/extern/ceres/internal/ceres/linear_least_squares_problems.cc
index 7c523d399e1..299051c5bcf 100644
--- a/extern/ceres/internal/ceres/linear_least_squares_problems.cc
+++ b/extern/ceres/internal/ceres/linear_least_squares_problems.cc
@@ -101,7 +101,7 @@ LinearLeastSquaresProblem* LinearLeastSquaresProblem0() {
int counter = 0;
for (int i = 0; i < 3; ++i) {
- for (int j = 0; j< 2; ++j) {
+ for (int j = 0; j < 2; ++j) {
Ai[counter] = i;
Aj[counter] = j;
++counter;
@@ -132,7 +132,6 @@ LinearLeastSquaresProblem* LinearLeastSquaresProblem0() {
return problem;
}
-
/*
A = [1 0 | 2 0 0
3 0 | 0 4 0
@@ -187,9 +186,8 @@ LinearLeastSquaresProblem* LinearLeastSquaresProblem1() {
int num_cols = 5;
LinearLeastSquaresProblem* problem = new LinearLeastSquaresProblem;
- TripletSparseMatrix* A = new TripletSparseMatrix(num_rows,
- num_cols,
- num_rows * num_cols);
+ TripletSparseMatrix* A =
+ new TripletSparseMatrix(num_rows, num_cols, num_rows * num_cols);
problem->b.reset(new double[num_rows]);
problem->D.reset(new double[num_cols]);
problem->num_eliminate_blocks = 2;
@@ -404,7 +402,6 @@ LinearLeastSquaresProblem* LinearLeastSquaresProblem2() {
return problem;
}
-
/*
A = [1 0
3 0
@@ -620,8 +617,7 @@ bool DumpLinearLeastSquaresProblemToConsole(const SparseMatrix* A,
LOG(INFO) << "A^T: \n" << AA.transpose();
if (D != NULL) {
- LOG(INFO) << "A's appended diagonal:\n"
- << ConstVectorRef(D, A->num_cols());
+ LOG(INFO) << "A's appended diagonal:\n" << ConstVectorRef(D, A->num_cols());
}
if (b != NULL) {
@@ -659,10 +655,8 @@ bool DumpLinearLeastSquaresProblemToTextFile(const string& filename_base,
string matlab_script;
StringAppendF(&matlab_script,
"function lsqp = load_trust_region_problem()\n");
- StringAppendF(&matlab_script,
- "lsqp.num_rows = %d;\n", A->num_rows());
- StringAppendF(&matlab_script,
- "lsqp.num_cols = %d;\n", A->num_cols());
+ StringAppendF(&matlab_script, "lsqp.num_rows = %d;\n", A->num_rows());
+ StringAppendF(&matlab_script, "lsqp.num_cols = %d;\n", A->num_cols());
{
string filename = filename_base + "_A.txt";
@@ -670,8 +664,8 @@ bool DumpLinearLeastSquaresProblemToTextFile(const string& filename_base,
CHECK(fptr != nullptr);
A->ToTextFile(fptr);
fclose(fptr);
- StringAppendF(&matlab_script,
- "tmp = load('%s', '-ascii');\n", filename.c_str());
+ StringAppendF(
+ &matlab_script, "tmp = load('%s', '-ascii');\n", filename.c_str());
StringAppendF(
&matlab_script,
"lsqp.A = sparse(tmp(:, 1) + 1, tmp(:, 2) + 1, tmp(:, 3), %d, %d);\n",
@@ -679,26 +673,25 @@ bool DumpLinearLeastSquaresProblemToTextFile(const string& filename_base,
A->num_cols());
}
-
if (D != NULL) {
string filename = filename_base + "_D.txt";
WriteArrayToFileOrDie(filename, D, A->num_cols());
- StringAppendF(&matlab_script,
- "lsqp.D = load('%s', '-ascii');\n", filename.c_str());
+ StringAppendF(
+ &matlab_script, "lsqp.D = load('%s', '-ascii');\n", filename.c_str());
}
if (b != NULL) {
string filename = filename_base + "_b.txt";
WriteArrayToFileOrDie(filename, b, A->num_rows());
- StringAppendF(&matlab_script,
- "lsqp.b = load('%s', '-ascii');\n", filename.c_str());
+ StringAppendF(
+ &matlab_script, "lsqp.b = load('%s', '-ascii');\n", filename.c_str());
}
if (x != NULL) {
string filename = filename_base + "_x.txt";
WriteArrayToFileOrDie(filename, x, A->num_cols());
- StringAppendF(&matlab_script,
- "lsqp.x = load('%s', '-ascii');\n", filename.c_str());
+ StringAppendF(
+ &matlab_script, "lsqp.x = load('%s', '-ascii');\n", filename.c_str());
}
string matlab_filename = filename_base + ".m";
@@ -716,12 +709,11 @@ bool DumpLinearLeastSquaresProblem(const string& filename_base,
int num_eliminate_blocks) {
switch (dump_format_type) {
case CONSOLE:
- return DumpLinearLeastSquaresProblemToConsole(A, D, b, x,
- num_eliminate_blocks);
+ return DumpLinearLeastSquaresProblemToConsole(
+ A, D, b, x, num_eliminate_blocks);
case TEXTFILE:
- return DumpLinearLeastSquaresProblemToTextFile(filename_base,
- A, D, b, x,
- num_eliminate_blocks);
+ return DumpLinearLeastSquaresProblemToTextFile(
+ filename_base, A, D, b, x, num_eliminate_blocks);
default:
LOG(FATAL) << "Unknown DumpFormatType " << dump_format_type;
}
diff --git a/extern/ceres/internal/ceres/linear_least_squares_problems.h b/extern/ceres/internal/ceres/linear_least_squares_problems.h
index 5dfcd34e109..cddaa9fd4d7 100644
--- a/extern/ceres/internal/ceres/linear_least_squares_problems.h
+++ b/extern/ceres/internal/ceres/linear_least_squares_problems.h
@@ -34,18 +34,17 @@
#include <memory>
#include <string>
#include <vector>
-#include "ceres/sparse_matrix.h"
+
#include "ceres/internal/port.h"
+#include "ceres/sparse_matrix.h"
namespace ceres {
namespace internal {
// Structure defining a linear least squares problem and if possible
// ground truth solutions. To be used by various LinearSolver tests.
-struct LinearLeastSquaresProblem {
- LinearLeastSquaresProblem()
- : num_eliminate_blocks(0) {
- }
+struct CERES_EXPORT_INTERNAL LinearLeastSquaresProblem {
+ LinearLeastSquaresProblem() : num_eliminate_blocks(0) {}
std::unique_ptr<SparseMatrix> A;
std::unique_ptr<double[]> b;
@@ -61,7 +60,8 @@ struct LinearLeastSquaresProblem {
};
// Factories for linear least squares problem.
-LinearLeastSquaresProblem* CreateLinearLeastSquaresProblemFromId(int id);
+CERES_EXPORT_INTERNAL LinearLeastSquaresProblem*
+CreateLinearLeastSquaresProblemFromId(int id);
LinearLeastSquaresProblem* LinearLeastSquaresProblem0();
LinearLeastSquaresProblem* LinearLeastSquaresProblem1();
diff --git a/extern/ceres/internal/ceres/linear_operator.cc b/extern/ceres/internal/ceres/linear_operator.cc
index 9d291bd3465..548c724267e 100644
--- a/extern/ceres/internal/ceres/linear_operator.cc
+++ b/extern/ceres/internal/ceres/linear_operator.cc
@@ -33,8 +33,7 @@
namespace ceres {
namespace internal {
-LinearOperator::~LinearOperator() {
-}
+LinearOperator::~LinearOperator() {}
} // namespace internal
} // namespace ceres
diff --git a/extern/ceres/internal/ceres/linear_operator.h b/extern/ceres/internal/ceres/linear_operator.h
index 6463fb5089a..9c59fc39c13 100644
--- a/extern/ceres/internal/ceres/linear_operator.h
+++ b/extern/ceres/internal/ceres/linear_operator.h
@@ -33,6 +33,7 @@
#ifndef CERES_INTERNAL_LINEAR_OPERATOR_H_
#define CERES_INTERNAL_LINEAR_OPERATOR_H_
+#include "ceres/internal/port.h"
#include "ceres/types.h"
namespace ceres {
@@ -40,7 +41,7 @@ namespace internal {
// This is an abstract base class for linear operators. It supports
// access to size information and left and right multiply operators.
-class LinearOperator {
+class CERES_EXPORT_INTERNAL LinearOperator {
public:
virtual ~LinearOperator();
diff --git a/extern/ceres/internal/ceres/linear_solver.cc b/extern/ceres/internal/ceres/linear_solver.cc
index 107af6afcc8..6cae2488f07 100644
--- a/extern/ceres/internal/ceres/linear_solver.cc
+++ b/extern/ceres/internal/ceres/linear_solver.cc
@@ -33,9 +33,9 @@
#include "ceres/cgnr_solver.h"
#include "ceres/dense_normal_cholesky_solver.h"
#include "ceres/dense_qr_solver.h"
+#include "ceres/dynamic_sparse_normal_cholesky_solver.h"
#include "ceres/iterative_schur_complement_solver.h"
#include "ceres/schur_complement_solver.h"
-#include "ceres/dynamic_sparse_normal_cholesky_solver.h"
#include "ceres/sparse_normal_cholesky_solver.h"
#include "ceres/types.h"
#include "glog/logging.h"
@@ -43,8 +43,7 @@
namespace ceres {
namespace internal {
-LinearSolver::~LinearSolver() {
-}
+LinearSolver::~LinearSolver() {}
LinearSolverType LinearSolver::LinearSolverForZeroEBlocks(
LinearSolverType linear_solver_type) {
@@ -112,8 +111,7 @@ LinearSolver* LinearSolver::Create(const LinearSolver::Options& options) {
return new DenseNormalCholeskySolver(options);
default:
- LOG(FATAL) << "Unknown linear solver type :"
- << options.type;
+ LOG(FATAL) << "Unknown linear solver type :" << options.type;
return NULL; // MSVC doesn't understand that LOG(FATAL) never returns.
}
}
diff --git a/extern/ceres/internal/ceres/linear_solver.h b/extern/ceres/internal/ceres/linear_solver.h
index cb624b372dd..49c6527acc9 100644
--- a/extern/ceres/internal/ceres/linear_solver.h
+++ b/extern/ceres/internal/ceres/linear_solver.h
@@ -38,12 +38,14 @@
#include <map>
#include <string>
#include <vector>
+
#include "ceres/block_sparse_matrix.h"
#include "ceres/casts.h"
#include "ceres/compressed_row_sparse_matrix.h"
#include "ceres/context_impl.h"
#include "ceres/dense_sparse_matrix.h"
#include "ceres/execution_summary.h"
+#include "ceres/internal/port.h"
#include "ceres/triplet_sparse_matrix.h"
#include "ceres/types.h"
#include "glog/logging.h"
@@ -74,11 +76,11 @@ enum LinearSolverTerminationType {
// algebra library should use before computing a sparse factorization
// (usually Cholesky).
enum OrderingType {
- NATURAL, // Do not re-order the matrix. This is useful when the
- // matrix has been ordered using a fill-reducing ordering
- // already.
- AMD // Use the Approximate Minimum Degree algorithm to re-order
- // the matrix.
+ NATURAL, // Do not re-order the matrix. This is useful when the
+ // matrix has been ordered using a fill-reducing ordering
+ // already.
+ AMD // Use the Approximate Minimum Degree algorithm to re-order
+ // the matrix.
};
class LinearOperator;
@@ -99,7 +101,7 @@ class LinearOperator;
// The Options struct configures the LinearSolver object for its
// lifetime. The PerSolveOptions struct is used to specify options for
// a particular Solve call.
-class LinearSolver {
+class CERES_EXPORT_INTERNAL LinearSolver {
public:
struct Options {
LinearSolverType type = SPARSE_NORMAL_CHOLESKY;
@@ -215,7 +217,6 @@ class LinearSolver {
// used a preconditioner.
LinearOperator* preconditioner = nullptr;
-
// The following tolerance related options only makes sense for
// iterative solvers. Direct solvers ignore them.
@@ -329,10 +330,12 @@ class TypedLinearSolver : public LinearSolver {
// Linear solvers that depend on acccess to the low level structure of
// a SparseMatrix.
+// clang-format off
typedef TypedLinearSolver<BlockSparseMatrix> BlockSparseMatrixSolver; // NOLINT
typedef TypedLinearSolver<CompressedRowSparseMatrix> CompressedRowSparseMatrixSolver; // NOLINT
typedef TypedLinearSolver<DenseSparseMatrix> DenseSparseMatrixSolver; // NOLINT
typedef TypedLinearSolver<TripletSparseMatrix> TripletSparseMatrixSolver; // NOLINT
+// clang-format on
} // namespace internal
} // namespace ceres
diff --git a/extern/ceres/internal/ceres/local_parameterization.cc b/extern/ceres/internal/ceres/local_parameterization.cc
index 97fdbed3eda..62947f06fcc 100644
--- a/extern/ceres/internal/ceres/local_parameterization.cc
+++ b/extern/ceres/internal/ceres/local_parameterization.cc
@@ -94,6 +94,10 @@ bool IdentityParameterization::MultiplyByJacobian(const double* x,
SubsetParameterization::SubsetParameterization(
int size, const vector<int>& constant_parameters)
: local_size_(size - constant_parameters.size()), constancy_mask_(size, 0) {
+ if (constant_parameters.empty()) {
+ return;
+ }
+
vector<int> constant = constant_parameters;
std::sort(constant.begin(), constant.end());
CHECK_GE(constant.front(), 0) << "Indices indicating constant parameter must "
diff --git a/extern/ceres/internal/ceres/loss_function.cc b/extern/ceres/internal/ceres/loss_function.cc
index 2c21a7377ca..353f29a5bf5 100644
--- a/extern/ceres/internal/ceres/loss_function.cc
+++ b/extern/ceres/internal/ceres/loss_function.cc
@@ -52,7 +52,7 @@ void HuberLoss::Evaluate(double s, double rho[3]) const {
const double r = sqrt(s);
rho[0] = 2.0 * a_ * r - b_;
rho[1] = std::max(std::numeric_limits<double>::min(), a_ / r);
- rho[2] = - rho[1] / (2.0 * s);
+ rho[2] = -rho[1] / (2.0 * s);
} else {
// Inlier region.
rho[0] = s;
@@ -67,7 +67,7 @@ void SoftLOneLoss::Evaluate(double s, double rho[3]) const {
// 'sum' and 'tmp' are always positive, assuming that 's' is.
rho[0] = 2.0 * b_ * (tmp - 1.0);
rho[1] = std::max(std::numeric_limits<double>::min(), 1.0 / tmp);
- rho[2] = - (c_ * rho[1]) / (2.0 * sum);
+ rho[2] = -(c_ * rho[1]) / (2.0 * sum);
}
void CauchyLoss::Evaluate(double s, double rho[3]) const {
@@ -76,7 +76,7 @@ void CauchyLoss::Evaluate(double s, double rho[3]) const {
// 'sum' and 'inv' are always positive, assuming that 's' is.
rho[0] = b_ * log(sum);
rho[1] = std::max(std::numeric_limits<double>::min(), inv);
- rho[2] = - c_ * (inv * inv);
+ rho[2] = -c_ * (inv * inv);
}
void ArctanLoss::Evaluate(double s, double rho[3]) const {
@@ -89,9 +89,7 @@ void ArctanLoss::Evaluate(double s, double rho[3]) const {
}
TolerantLoss::TolerantLoss(double a, double b)
- : a_(a),
- b_(b),
- c_(b * log(1.0 + exp(-a / b))) {
+ : a_(a), b_(b), c_(b * log(1.0 + exp(-a / b))) {
CHECK_GE(a, 0.0);
CHECK_GT(b, 0.0);
}
@@ -133,12 +131,11 @@ void TukeyLoss::Evaluate(double s, double* rho) const {
}
}
-ComposedLoss::ComposedLoss(const LossFunction* f, Ownership ownership_f,
- const LossFunction* g, Ownership ownership_g)
- : f_(f),
- g_(g),
- ownership_f_(ownership_f),
- ownership_g_(ownership_g) {
+ComposedLoss::ComposedLoss(const LossFunction* f,
+ Ownership ownership_f,
+ const LossFunction* g,
+ Ownership ownership_g)
+ : f_(f), g_(g), ownership_f_(ownership_f), ownership_g_(ownership_g) {
CHECK(f_ != nullptr);
CHECK(g_ != nullptr);
}
diff --git a/extern/ceres/internal/ceres/low_rank_inverse_hessian.cc b/extern/ceres/internal/ceres/low_rank_inverse_hessian.cc
index f3953c46006..c73e5dbf88d 100644
--- a/extern/ceres/internal/ceres/low_rank_inverse_hessian.cc
+++ b/extern/ceres/internal/ceres/low_rank_inverse_hessian.cc
@@ -28,10 +28,11 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
+#include "ceres/low_rank_inverse_hessian.h"
+
#include <list>
#include "ceres/internal/eigen.h"
-#include "ceres/low_rank_inverse_hessian.h"
#include "glog/logging.h"
namespace ceres {
@@ -84,8 +85,7 @@ LowRankInverseHessian::LowRankInverseHessian(
approximate_eigenvalue_scale_(1.0),
delta_x_history_(num_parameters, max_num_corrections),
delta_gradient_history_(num_parameters, max_num_corrections),
- delta_x_dot_delta_gradient_(max_num_corrections) {
-}
+ delta_x_dot_delta_gradient_(max_num_corrections) {}
bool LowRankInverseHessian::Update(const Vector& delta_x,
const Vector& delta_gradient) {
@@ -93,13 +93,12 @@ bool LowRankInverseHessian::Update(const Vector& delta_x,
if (delta_x_dot_delta_gradient <=
kLBFGSSecantConditionHessianUpdateTolerance) {
VLOG(2) << "Skipping L-BFGS Update, delta_x_dot_delta_gradient too "
- << "small: " << delta_x_dot_delta_gradient << ", tolerance: "
- << kLBFGSSecantConditionHessianUpdateTolerance
+ << "small: " << delta_x_dot_delta_gradient
+ << ", tolerance: " << kLBFGSSecantConditionHessianUpdateTolerance
<< " (Secant condition).";
return false;
}
-
int next = indices_.size();
// Once the size of the list reaches max_num_corrections_, simulate
// a circular buffer by removing the first element of the list and
@@ -132,7 +131,7 @@ void LowRankInverseHessian::RightMultiply(const double* x_ptr,
it != indices_.rend();
++it) {
const double alpha_i = delta_x_history_.col(*it).dot(search_direction) /
- delta_x_dot_delta_gradient_(*it);
+ delta_x_dot_delta_gradient_(*it);
search_direction -= alpha_i * delta_gradient_history_.col(*it);
alpha(*it) = alpha_i;
}
@@ -177,7 +176,7 @@ void LowRankInverseHessian::RightMultiply(const double* x_ptr,
for (const int i : indices_) {
const double beta = delta_gradient_history_.col(i).dot(search_direction) /
- delta_x_dot_delta_gradient_(i);
+ delta_x_dot_delta_gradient_(i);
search_direction += delta_x_history_.col(i) * (alpha(i) - beta);
}
}
diff --git a/extern/ceres/internal/ceres/map_util.h b/extern/ceres/internal/ceres/map_util.h
index f55aee37689..6e310f8db2d 100644
--- a/extern/ceres/internal/ceres/map_util.h
+++ b/extern/ceres/internal/ceres/map_util.h
@@ -34,6 +34,7 @@
#define CERES_INTERNAL_MAP_UTIL_H_
#include <utility>
+
#include "ceres/internal/port.h"
#include "glog/logging.h"
@@ -55,9 +56,9 @@ namespace ceres {
// This version assumes the key is printable, and includes it in the fatal log
// message.
template <class Collection>
-const typename Collection::value_type::second_type&
-FindOrDie(const Collection& collection,
- const typename Collection::value_type::first_type& key) {
+const typename Collection::value_type::second_type& FindOrDie(
+ const Collection& collection,
+ const typename Collection::value_type::first_type& key) {
typename Collection::const_iterator it = collection.find(key);
CHECK(it != collection.end()) << "Map key not found: " << key;
return it->second;
@@ -67,10 +68,10 @@ FindOrDie(const Collection& collection,
// If the key is present in the map then the value associated with that
// key is returned, otherwise the value passed as a default is returned.
template <class Collection>
-const typename Collection::value_type::second_type
-FindWithDefault(const Collection& collection,
- const typename Collection::value_type::first_type& key,
- const typename Collection::value_type::second_type& value) {
+const typename Collection::value_type::second_type FindWithDefault(
+ const Collection& collection,
+ const typename Collection::value_type::first_type& key,
+ const typename Collection::value_type::second_type& value) {
typename Collection::const_iterator it = collection.find(key);
if (it == collection.end()) {
return value;
@@ -84,7 +85,7 @@ FindWithDefault(const Collection& collection,
// took place, false indicates the key was already present.
template <class Collection>
bool InsertIfNotPresent(
- Collection * const collection,
+ Collection* const collection,
const typename Collection::value_type::first_type& key,
const typename Collection::value_type::second_type& value) {
std::pair<typename Collection::iterator, bool> ret =
@@ -96,9 +97,9 @@ bool InsertIfNotPresent(
// Same as above but the returned pointer is not const and can be used to change
// the stored value.
template <class Collection>
-typename Collection::value_type::second_type*
-FindOrNull(Collection& collection, // NOLINT
- const typename Collection::value_type::first_type& key) {
+typename Collection::value_type::second_type* FindOrNull(
+ Collection& collection, // NOLINT
+ const typename Collection::value_type::first_type& key) {
typename Collection::iterator it = collection.find(key);
if (it == collection.end()) {
return 0;
@@ -116,13 +117,13 @@ bool ContainsKey(const Collection& collection, const Key& key) {
// Inserts a new key/value into a map or hash_map.
// Dies if the key is already present.
-template<class Collection>
+template <class Collection>
void InsertOrDie(Collection* const collection,
const typename Collection::value_type::first_type& key,
const typename Collection::value_type::second_type& data) {
typedef typename Collection::value_type value_type;
CHECK(collection->insert(value_type(key, data)).second)
- << "duplicate key: " << key;
+ << "duplicate key: " << key;
}
} // namespace ceres
diff --git a/extern/ceres/internal/ceres/minimizer.cc b/extern/ceres/internal/ceres/minimizer.cc
index f5960336f12..b96e0c9de44 100644
--- a/extern/ceres/internal/ceres/minimizer.cc
+++ b/extern/ceres/internal/ceres/minimizer.cc
@@ -28,8 +28,9 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
-#include "ceres/line_search_minimizer.h"
#include "ceres/minimizer.h"
+
+#include "ceres/line_search_minimizer.h"
#include "ceres/trust_region_minimizer.h"
#include "ceres/types.h"
#include "glog/logging.h"
@@ -50,7 +51,6 @@ Minimizer* Minimizer::Create(MinimizerType minimizer_type) {
return NULL;
}
-
Minimizer::~Minimizer() {}
bool Minimizer::RunCallbacks(const Minimizer::Options& options,
@@ -70,12 +70,16 @@ bool Minimizer::RunCallbacks(const Minimizer::Options& options,
summary->termination_type = USER_SUCCESS;
summary->message =
"User callback returned SOLVER_TERMINATE_SUCCESSFULLY.";
- VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
+ if (is_not_silent) {
+ VLOG(1) << "Terminating: " << summary->message;
+ }
return false;
case SOLVER_ABORT:
summary->termination_type = USER_FAILURE;
summary->message = "User callback returned SOLVER_ABORT.";
- VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
+ if (is_not_silent) {
+ VLOG(1) << "Terminating: " << summary->message;
+ }
return false;
default:
LOG(FATAL) << "Unknown type of user callback status";
diff --git a/extern/ceres/internal/ceres/minimizer.h b/extern/ceres/internal/ceres/minimizer.h
index afdd60d2944..246550de7cd 100644
--- a/extern/ceres/internal/ceres/minimizer.h
+++ b/extern/ceres/internal/ceres/minimizer.h
@@ -34,6 +34,7 @@
#include <memory>
#include <string>
#include <vector>
+
#include "ceres/internal/port.h"
#include "ceres/iteration_callback.h"
#include "ceres/solver.h"
@@ -48,19 +49,15 @@ class CoordinateDescentMinimizer;
class LinearSolver;
// Interface for non-linear least squares solvers.
-class Minimizer {
+class CERES_EXPORT_INTERNAL Minimizer {
public:
// Options struct to control the behaviour of the Minimizer. Please
// see solver.h for detailed information about the meaning and
// default values of each of these parameters.
struct Options {
- Options() {
- Init(Solver::Options());
- }
+ Options() { Init(Solver::Options()); }
- explicit Options(const Solver::Options& options) {
- Init(options);
- }
+ explicit Options(const Solver::Options& options) { Init(options); }
void Init(const Solver::Options& options) {
num_threads = options.num_threads;
@@ -92,8 +89,7 @@ class Minimizer {
max_lbfgs_rank = options.max_lbfgs_rank;
use_approximate_eigenvalue_bfgs_scaling =
options.use_approximate_eigenvalue_bfgs_scaling;
- line_search_interpolation_type =
- options.line_search_interpolation_type;
+ line_search_interpolation_type = options.line_search_interpolation_type;
min_line_search_step_size = options.min_line_search_step_size;
line_search_sufficient_function_decrease =
options.line_search_sufficient_function_decrease;
@@ -107,8 +103,7 @@ class Minimizer {
options.max_num_line_search_direction_restarts;
line_search_sufficient_curvature_decrease =
options.line_search_sufficient_curvature_decrease;
- max_line_search_step_expansion =
- options.max_line_search_step_expansion;
+ max_line_search_step_expansion = options.max_line_search_step_expansion;
inner_iteration_tolerance = options.inner_iteration_tolerance;
is_silent = (options.logging_type == SILENT);
is_constrained = false;
diff --git a/extern/ceres/internal/ceres/normal_prior.cc b/extern/ceres/internal/ceres/normal_prior.cc
index a3d5d8ed772..4a62132dbda 100644
--- a/extern/ceres/internal/ceres/normal_prior.cc
+++ b/extern/ceres/internal/ceres/normal_prior.cc
@@ -32,14 +32,14 @@
#include <cstddef>
#include <vector>
+
#include "ceres/internal/eigen.h"
#include "ceres/types.h"
#include "glog/logging.h"
namespace ceres {
-NormalPrior::NormalPrior(const Matrix& A, const Vector& b)
- : A_(A), b_(b) {
+NormalPrior::NormalPrior(const Matrix& A, const Vector& b) : A_(A), b_(b) {
CHECK_GT(b_.rows(), 0);
CHECK_GT(A_.rows(), 0);
CHECK_EQ(b_.rows(), A.cols());
diff --git a/extern/ceres/internal/ceres/pair_hash.h b/extern/ceres/internal/ceres/pair_hash.h
index 80453bae7db..abbedccf961 100644
--- a/extern/ceres/internal/ceres/pair_hash.h
+++ b/extern/ceres/internal/ceres/pair_hash.h
@@ -33,10 +33,11 @@
#ifndef CERES_INTERNAL_PAIR_HASH_H_
#define CERES_INTERNAL_PAIR_HASH_H_
-#include "ceres/internal/port.h"
#include <cstdint>
#include <utility>
+#include "ceres/internal/port.h"
+
namespace ceres {
namespace internal {
@@ -53,6 +54,8 @@ namespace internal {
// in 18 cycles if you're lucky. On x86 architectures, this requires 45
// instructions in 27 cycles, if you're lucky.
//
+// clang-format off
+//
// 32bit version
inline void hash_mix(uint32_t& a, uint32_t& b, uint32_t& c) {
a -= b; a -= c; a ^= (c>>13);
@@ -78,6 +81,7 @@ inline void hash_mix(uint64_t& a, uint64_t& b, uint64_t& c) {
b -= c; b -= a; b ^= (a<<49);
c -= a; c -= b; c ^= (b>>11);
}
+// clang-format on
inline uint32_t Hash32NumWithSeed(uint32_t num, uint32_t c) {
// The golden ratio; an arbitrary value.
diff --git a/extern/ceres/internal/ceres/parallel_for.h b/extern/ceres/internal/ceres/parallel_for.h
index 2da2320c137..b64bd310650 100644
--- a/extern/ceres/internal/ceres/parallel_for.h
+++ b/extern/ceres/internal/ceres/parallel_for.h
@@ -34,6 +34,7 @@
#include <functional>
#include "ceres/context_impl.h"
+#include "ceres/internal/port.h"
namespace ceres {
namespace internal {
@@ -45,22 +46,24 @@ int MaxNumThreadsAvailable();
// Execute the function for every element in the range [start, end) with at most
// num_threads. It will execute all the work on the calling thread if
// num_threads is 1.
-void ParallelFor(ContextImpl* context,
- int start,
- int end,
- int num_threads,
- const std::function<void(int)>& function);
+CERES_EXPORT_INTERNAL void ParallelFor(
+ ContextImpl* context,
+ int start,
+ int end,
+ int num_threads,
+ const std::function<void(int)>& function);
// Execute the function for every element in the range [start, end) with at most
// num_threads. It will execute all the work on the calling thread if
// num_threads is 1. Each invocation of function() will be passed a thread_id
// in [0, num_threads) that is guaranteed to be distinct from the value passed
// to any concurrent execution of function().
-void ParallelFor(ContextImpl* context,
- int start,
- int end,
- int num_threads,
- const std::function<void(int thread_id, int i)>& function);
+CERES_EXPORT_INTERNAL void ParallelFor(
+ ContextImpl* context,
+ int start,
+ int end,
+ int num_threads,
+ const std::function<void(int thread_id, int i)>& function);
} // namespace internal
} // namespace ceres
diff --git a/extern/ceres/internal/ceres/parallel_for_cxx.cc b/extern/ceres/internal/ceres/parallel_for_cxx.cc
index 8e358f5900b..4da40c01eb6 100644
--- a/extern/ceres/internal/ceres/parallel_for_cxx.cc
+++ b/extern/ceres/internal/ceres/parallel_for_cxx.cc
@@ -33,14 +33,13 @@
#ifdef CERES_USE_CXX_THREADS
-#include "ceres/parallel_for.h"
-
#include <cmath>
#include <condition_variable>
#include <memory>
#include <mutex>
#include "ceres/concurrent_queue.h"
+#include "ceres/parallel_for.h"
#include "ceres/scoped_thread_token.h"
#include "ceres/thread_token_provider.h"
#include "glog/logging.h"
@@ -117,9 +116,7 @@ struct SharedState {
} // namespace
-int MaxNumThreadsAvailable() {
- return ThreadPool::MaxNumThreadsAvailable();
-}
+int MaxNumThreadsAvailable() { return ThreadPool::MaxNumThreadsAvailable(); }
// See ParallelFor (below) for more details.
void ParallelFor(ContextImpl* context,
@@ -141,8 +138,10 @@ void ParallelFor(ContextImpl* context,
return;
}
- ParallelFor(context, start, end, num_threads,
- [&function](int /*thread_id*/, int i) { function(i); });
+ ParallelFor(
+ context, start, end, num_threads, [&function](int /*thread_id*/, int i) {
+ function(i);
+ });
}
// This implementation uses a fixed size max worker pool with a shared task
@@ -213,8 +212,7 @@ void ParallelFor(ContextImpl* context,
const int thread_id = scoped_thread_token.token();
// Perform each task.
- for (int j = shared_state->start + i;
- j < shared_state->end;
+ for (int j = shared_state->start + i; j < shared_state->end;
j += shared_state->num_work_items) {
function(thread_id, j);
}
@@ -244,4 +242,4 @@ void ParallelFor(ContextImpl* context,
} // namespace internal
} // namespace ceres
-#endif // CERES_USE_CXX_THREADS
+#endif // CERES_USE_CXX_THREADS
diff --git a/extern/ceres/internal/ceres/parallel_for_nothreads.cc b/extern/ceres/internal/ceres/parallel_for_nothreads.cc
index e8f450a714d..d036569fcd7 100644
--- a/extern/ceres/internal/ceres/parallel_for_nothreads.cc
+++ b/extern/ceres/internal/ceres/parallel_for_nothreads.cc
@@ -72,7 +72,7 @@ void ParallelFor(ContextImpl* context,
}
}
-}
-}
+} // namespace internal
+} // namespace ceres
#endif // CERES_NO_THREADS
diff --git a/extern/ceres/internal/ceres/parallel_for_openmp.cc b/extern/ceres/internal/ceres/parallel_for_openmp.cc
index 8afe3b11f8d..eb9d90581ae 100644
--- a/extern/ceres/internal/ceres/parallel_for_openmp.cc
+++ b/extern/ceres/internal/ceres/parallel_for_openmp.cc
@@ -34,7 +34,6 @@
#if defined(CERES_USE_OPENMP)
#include "ceres/parallel_for.h"
-
#include "ceres/scoped_thread_token.h"
#include "ceres/thread_token_provider.h"
#include "glog/logging.h"
@@ -43,9 +42,7 @@
namespace ceres {
namespace internal {
-int MaxNumThreadsAvailable() {
- return omp_get_max_threads();
-}
+int MaxNumThreadsAvailable() { return omp_get_max_threads(); }
void ParallelFor(ContextImpl* context,
int start,
diff --git a/extern/ceres/internal/ceres/parallel_utils.h b/extern/ceres/internal/ceres/parallel_utils.h
index 1291428228a..89d21106d74 100644
--- a/extern/ceres/internal/ceres/parallel_utils.h
+++ b/extern/ceres/internal/ceres/parallel_utils.h
@@ -31,6 +31,8 @@
#ifndef CERES_INTERNAL_PARALLEL_UTILS_H_
#define CERES_INTERNAL_PARALLEL_UTILS_H_
+#include "ceres/internal/port.h"
+
namespace ceres {
namespace internal {
@@ -59,7 +61,10 @@ namespace internal {
// });
// which in each iteration will produce i and j satisfying
// 0 <= i <= j < n
-void LinearIndexToUpperTriangularIndex(int k, int n, int* i, int* j);
+CERES_EXPORT_INTERNAL void LinearIndexToUpperTriangularIndex(int k,
+ int n,
+ int* i,
+ int* j);
} // namespace internal
} // namespace ceres
diff --git a/extern/ceres/internal/ceres/parameter_block_ordering.cc b/extern/ceres/internal/ceres/parameter_block_ordering.cc
index ef521c0e11b..9899c243899 100644
--- a/extern/ceres/internal/ceres/parameter_block_ordering.cc
+++ b/extern/ceres/internal/ceres/parameter_block_ordering.cc
@@ -50,11 +50,11 @@ using std::set;
using std::vector;
int ComputeStableSchurOrdering(const Program& program,
- vector<ParameterBlock*>* ordering) {
+ vector<ParameterBlock*>* ordering) {
CHECK(ordering != nullptr);
ordering->clear();
EventLogger event_logger("ComputeStableSchurOrdering");
- std::unique_ptr<Graph< ParameterBlock*> > graph(CreateHessianGraph(program));
+ std::unique_ptr<Graph<ParameterBlock*>> graph(CreateHessianGraph(program));
event_logger.AddEvent("CreateHessianGraph");
const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
@@ -86,7 +86,7 @@ int ComputeSchurOrdering(const Program& program,
CHECK(ordering != nullptr);
ordering->clear();
- std::unique_ptr<Graph< ParameterBlock*> > graph(CreateHessianGraph(program));
+ std::unique_ptr<Graph<ParameterBlock*>> graph(CreateHessianGraph(program));
int independent_set_size = IndependentSetOrdering(*graph, ordering);
const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
@@ -106,7 +106,7 @@ void ComputeRecursiveIndependentSetOrdering(const Program& program,
CHECK(ordering != nullptr);
ordering->Clear();
const vector<ParameterBlock*> parameter_blocks = program.parameter_blocks();
- std::unique_ptr<Graph< ParameterBlock*> > graph(CreateHessianGraph(program));
+ std::unique_ptr<Graph<ParameterBlock*>> graph(CreateHessianGraph(program));
int num_covered = 0;
int round = 0;
diff --git a/extern/ceres/internal/ceres/parameter_block_ordering.h b/extern/ceres/internal/ceres/parameter_block_ordering.h
index f996929f6b3..82ab75dc6dc 100644
--- a/extern/ceres/internal/ceres/parameter_block_ordering.h
+++ b/extern/ceres/internal/ceres/parameter_block_ordering.h
@@ -32,8 +32,10 @@
#define CERES_INTERNAL_PARAMETER_BLOCK_ORDERING_H_
#include <vector>
-#include "ceres/ordered_groups.h"
+
#include "ceres/graph.h"
+#include "ceres/internal/port.h"
+#include "ceres/ordered_groups.h"
#include "ceres/types.h"
namespace ceres {
@@ -55,33 +57,34 @@ class ParameterBlock;
// ordering = [independent set,
// complement of the independent set,
// fixed blocks]
-int ComputeSchurOrdering(const Program& program,
- std::vector<ParameterBlock* >* ordering);
+CERES_EXPORT_INTERNAL int ComputeSchurOrdering(
+ const Program& program, std::vector<ParameterBlock*>* ordering);
// Same as above, except that ties while computing the independent set
// ordering are resolved in favour of the order in which the parameter
// blocks occur in the program.
-int ComputeStableSchurOrdering(const Program& program,
- std::vector<ParameterBlock* >* ordering);
+CERES_EXPORT_INTERNAL int ComputeStableSchurOrdering(
+ const Program& program, std::vector<ParameterBlock*>* ordering);
// Use an approximate independent set ordering to decompose the
// parameter blocks of a problem in a sequence of independent
// sets. The ordering covers all the non-constant parameter blocks in
// the program.
-void ComputeRecursiveIndependentSetOrdering(const Program& program,
- ParameterBlockOrdering* ordering);
+CERES_EXPORT_INTERNAL void ComputeRecursiveIndependentSetOrdering(
+ const Program& program, ParameterBlockOrdering* ordering);
// Builds a graph on the parameter blocks of a Problem, whose
// structure reflects the sparsity structure of the Hessian. Each
// vertex corresponds to a parameter block in the Problem except for
// parameter blocks that are marked constant. An edge connects two
// parameter blocks, if they co-occur in a residual block.
-Graph<ParameterBlock*>* CreateHessianGraph(const Program& program);
+CERES_EXPORT_INTERNAL Graph<ParameterBlock*>* CreateHessianGraph(
+ const Program& program);
// Iterate over each of the groups in order of their priority and fill
// summary with their sizes.
-void OrderingToGroupSizes(const ParameterBlockOrdering* ordering,
- std::vector<int>* group_sizes);
+CERES_EXPORT_INTERNAL void OrderingToGroupSizes(
+ const ParameterBlockOrdering* ordering, std::vector<int>* group_sizes);
} // namespace internal
} // namespace ceres
diff --git a/extern/ceres/internal/ceres/partitioned_matrix_view.cc b/extern/ceres/internal/ceres/partitioned_matrix_view.cc
index d7a998d68a3..b67bc905d15 100644
--- a/extern/ceres/internal/ceres/partitioned_matrix_view.cc
+++ b/extern/ceres/internal/ceres/partitioned_matrix_view.cc
@@ -41,122 +41,142 @@
#include "ceres/linear_solver.h"
#include "ceres/partitioned_matrix_view.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
-PartitionedMatrixViewBase*
-PartitionedMatrixViewBase::Create(const LinearSolver::Options& options,
- const BlockSparseMatrix& matrix) {
+PartitionedMatrixViewBase* PartitionedMatrixViewBase::Create(
+ const LinearSolver::Options& options, const BlockSparseMatrix& matrix) {
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
- if ((options.row_block_size == 2) &&
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 2) &&
(options.f_block_size == 2)) {
- return new PartitionedMatrixView<2, 2, 2>(matrix, options.elimination_groups[0]);
- }
- if ((options.row_block_size == 2) &&
+ return new PartitionedMatrixView<2, 2, 2>(matrix,
+ options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 2) &&
(options.f_block_size == 3)) {
- return new PartitionedMatrixView<2, 2, 3>(matrix, options.elimination_groups[0]);
- }
- if ((options.row_block_size == 2) &&
+ return new PartitionedMatrixView<2, 2, 3>(matrix,
+ options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 2) &&
(options.f_block_size == 4)) {
- return new PartitionedMatrixView<2, 2, 4>(matrix, options.elimination_groups[0]);
- }
- if ((options.row_block_size == 2) &&
+ return new PartitionedMatrixView<2, 2, 4>(matrix,
+ options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 2)) {
- return new PartitionedMatrixView<2, 2, Eigen::Dynamic>(matrix, options.elimination_groups[0]);
- }
- if ((options.row_block_size == 2) &&
+ return new PartitionedMatrixView<2, 2, Eigen::Dynamic>(matrix,
+ options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 3) &&
(options.f_block_size == 3)) {
- return new PartitionedMatrixView<2, 3, 3>(matrix, options.elimination_groups[0]);
- }
- if ((options.row_block_size == 2) &&
+ return new PartitionedMatrixView<2, 3, 3>(matrix,
+ options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 3) &&
(options.f_block_size == 4)) {
- return new PartitionedMatrixView<2, 3, 4>(matrix, options.elimination_groups[0]);
- }
- if ((options.row_block_size == 2) &&
+ return new PartitionedMatrixView<2, 3, 4>(matrix,
+ options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 3) &&
(options.f_block_size == 6)) {
- return new PartitionedMatrixView<2, 3, 6>(matrix, options.elimination_groups[0]);
- }
- if ((options.row_block_size == 2) &&
+ return new PartitionedMatrixView<2, 3, 6>(matrix,
+ options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 3) &&
(options.f_block_size == 9)) {
- return new PartitionedMatrixView<2, 3, 9>(matrix, options.elimination_groups[0]);
- }
- if ((options.row_block_size == 2) &&
+ return new PartitionedMatrixView<2, 3, 9>(matrix,
+ options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 3)) {
- return new PartitionedMatrixView<2, 3, Eigen::Dynamic>(matrix, options.elimination_groups[0]);
- }
- if ((options.row_block_size == 2) &&
+ return new PartitionedMatrixView<2, 3, Eigen::Dynamic>(matrix,
+ options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 4) &&
(options.f_block_size == 3)) {
- return new PartitionedMatrixView<2, 4, 3>(matrix, options.elimination_groups[0]);
- }
- if ((options.row_block_size == 2) &&
+ return new PartitionedMatrixView<2, 4, 3>(matrix,
+ options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 4) &&
(options.f_block_size == 4)) {
- return new PartitionedMatrixView<2, 4, 4>(matrix, options.elimination_groups[0]);
- }
- if ((options.row_block_size == 2) &&
+ return new PartitionedMatrixView<2, 4, 4>(matrix,
+ options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 4) &&
(options.f_block_size == 6)) {
- return new PartitionedMatrixView<2, 4, 6>(matrix, options.elimination_groups[0]);
- }
- if ((options.row_block_size == 2) &&
+ return new PartitionedMatrixView<2, 4, 6>(matrix,
+ options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 4) &&
(options.f_block_size == 8)) {
- return new PartitionedMatrixView<2, 4, 8>(matrix, options.elimination_groups[0]);
- }
- if ((options.row_block_size == 2) &&
+ return new PartitionedMatrixView<2, 4, 8>(matrix,
+ options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 4) &&
(options.f_block_size == 9)) {
- return new PartitionedMatrixView<2, 4, 9>(matrix, options.elimination_groups[0]);
- }
- if ((options.row_block_size == 2) &&
+ return new PartitionedMatrixView<2, 4, 9>(matrix,
+ options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 4)) {
- return new PartitionedMatrixView<2, 4, Eigen::Dynamic>(matrix, options.elimination_groups[0]);
- }
- if (options.row_block_size == 2){
- return new PartitionedMatrixView<2, Eigen::Dynamic, Eigen::Dynamic>(matrix, options.elimination_groups[0]);
- }
- if ((options.row_block_size == 3) &&
+ return new PartitionedMatrixView<2, 4, Eigen::Dynamic>(matrix,
+ options.elimination_groups[0]);
+ }
+ if (options.row_block_size == 2) {
+ return new PartitionedMatrixView<2, Eigen::Dynamic, Eigen::Dynamic>(matrix,
+ options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 3) &&
(options.e_block_size == 3) &&
(options.f_block_size == 3)) {
- return new PartitionedMatrixView<3, 3, 3>(matrix, options.elimination_groups[0]);
- }
- if ((options.row_block_size == 4) &&
+ return new PartitionedMatrixView<3, 3, 3>(matrix,
+ options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 4) &&
(options.e_block_size == 4) &&
(options.f_block_size == 2)) {
- return new PartitionedMatrixView<4, 4, 2>(matrix, options.elimination_groups[0]);
- }
- if ((options.row_block_size == 4) &&
+ return new PartitionedMatrixView<4, 4, 2>(matrix,
+ options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 4) &&
(options.e_block_size == 4) &&
(options.f_block_size == 3)) {
- return new PartitionedMatrixView<4, 4, 3>(matrix, options.elimination_groups[0]);
- }
- if ((options.row_block_size == 4) &&
+ return new PartitionedMatrixView<4, 4, 3>(matrix,
+ options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 4) &&
(options.e_block_size == 4) &&
(options.f_block_size == 4)) {
- return new PartitionedMatrixView<4, 4, 4>(matrix, options.elimination_groups[0]);
- }
- if ((options.row_block_size == 4) &&
+ return new PartitionedMatrixView<4, 4, 4>(matrix,
+ options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 4) &&
(options.e_block_size == 4)) {
- return new PartitionedMatrixView<4, 4, Eigen::Dynamic>(matrix, options.elimination_groups[0]);
- }
+ return new PartitionedMatrixView<4, 4, Eigen::Dynamic>(matrix,
+ options.elimination_groups[0]);
+ }
#endif
VLOG(1) << "Template specializations not found for <"
- << options.row_block_size << ","
- << options.e_block_size << ","
+ << options.row_block_size << "," << options.e_block_size << ","
<< options.f_block_size << ">";
- return new PartitionedMatrixView<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic>(
- matrix, options.elimination_groups[0]);
+ return new PartitionedMatrixView<Eigen::Dynamic,
+ Eigen::Dynamic,
+ Eigen::Dynamic>(
+ matrix, options.elimination_groups[0]);
};
} // namespace internal
diff --git a/extern/ceres/internal/ceres/partitioned_matrix_view.h b/extern/ceres/internal/ceres/partitioned_matrix_view.h
index 3853ea10dd6..9f204ee1fdd 100644
--- a/extern/ceres/internal/ceres/partitioned_matrix_view.h
+++ b/extern/ceres/internal/ceres/partitioned_matrix_view.h
@@ -42,6 +42,7 @@
#include "ceres/block_structure.h"
#include "ceres/internal/eigen.h"
+#include "ceres/internal/port.h"
#include "ceres/linear_solver.h"
#include "ceres/small_blas.h"
#include "glog/logging.h"
@@ -59,7 +60,7 @@ namespace internal {
// block structure of the matrix does not satisfy the requirements of
// the Schur complement solver it will result in unpredictable and
// wrong output.
-class PartitionedMatrixViewBase {
+class CERES_EXPORT_INTERNAL PartitionedMatrixViewBase {
public:
virtual ~PartitionedMatrixViewBase() {}
@@ -98,12 +99,14 @@ class PartitionedMatrixViewBase {
virtual void UpdateBlockDiagonalFtF(
BlockSparseMatrix* block_diagonal) const = 0;
+ // clang-format off
virtual int num_col_blocks_e() const = 0;
virtual int num_col_blocks_f() const = 0;
virtual int num_cols_e() const = 0;
virtual int num_cols_f() const = 0;
virtual int num_rows() const = 0;
virtual int num_cols() const = 0;
+ // clang-format on
static PartitionedMatrixViewBase* Create(const LinearSolver::Options& options,
const BlockSparseMatrix& matrix);
@@ -111,7 +114,7 @@ class PartitionedMatrixViewBase {
template <int kRowBlockSize = Eigen::Dynamic,
int kEBlockSize = Eigen::Dynamic,
- int kFBlockSize = Eigen::Dynamic >
+ int kFBlockSize = Eigen::Dynamic>
class PartitionedMatrixView : public PartitionedMatrixViewBase {
public:
// matrix = [E F], where the matrix E contains the first
@@ -127,12 +130,14 @@ class PartitionedMatrixView : public PartitionedMatrixViewBase {
BlockSparseMatrix* CreateBlockDiagonalFtF() const final;
void UpdateBlockDiagonalEtE(BlockSparseMatrix* block_diagonal) const final;
void UpdateBlockDiagonalFtF(BlockSparseMatrix* block_diagonal) const final;
+ // clang-format off
int num_col_blocks_e() const final { return num_col_blocks_e_; }
int num_col_blocks_f() const final { return num_col_blocks_f_; }
int num_cols_e() const final { return num_cols_e_; }
int num_cols_f() const final { return num_cols_f_; }
int num_rows() const final { return matrix_.num_rows(); }
int num_cols() const final { return matrix_.num_cols(); }
+ // clang-format on
private:
BlockSparseMatrix* CreateBlockDiagonalMatrixLayout(int start_col_block,
diff --git a/extern/ceres/internal/ceres/partitioned_matrix_view_impl.h b/extern/ceres/internal/ceres/partitioned_matrix_view_impl.h
index f3f548c7a80..0b6a57fb9f2 100644
--- a/extern/ceres/internal/ceres/partitioned_matrix_view_impl.h
+++ b/extern/ceres/internal/ceres/partitioned_matrix_view_impl.h
@@ -28,14 +28,14 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
-#include "ceres/partitioned_matrix_view.h"
-
#include <algorithm>
#include <cstring>
#include <vector>
+
#include "ceres/block_sparse_matrix.h"
#include "ceres/block_structure.h"
#include "ceres/internal/eigen.h"
+#include "ceres/partitioned_matrix_view.h"
#include "ceres/small_blas.h"
#include "glog/logging.h"
@@ -44,11 +44,8 @@ namespace internal {
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
-PartitionedMatrixView(
- const BlockSparseMatrix& matrix,
- int num_col_blocks_e)
- : matrix_(matrix),
- num_col_blocks_e_(num_col_blocks_e) {
+ PartitionedMatrixView(const BlockSparseMatrix& matrix, int num_col_blocks_e)
+ : matrix_(matrix), num_col_blocks_e_(num_col_blocks_e) {
const CompressedRowBlockStructure* bs = matrix_.block_structure();
CHECK(bs != nullptr);
@@ -85,8 +82,7 @@ PartitionedMatrixView(
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
-~PartitionedMatrixView() {
-}
+ ~PartitionedMatrixView() {}
// The next four methods don't seem to be particularly cache
// friendly. This is an artifact of how the BlockStructure of the
@@ -94,9 +90,8 @@ PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
// multithreading as well as improved data layout.
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
-void
-PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
-RightMultiplyE(const double* x, double* y) const {
+void PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+ RightMultiplyE(const double* x, double* y) const {
const CompressedRowBlockStructure* bs = matrix_.block_structure();
// Iterate over the first num_row_blocks_e_ row blocks, and multiply
@@ -109,17 +104,18 @@ RightMultiplyE(const double* x, double* y) const {
const int col_block_id = cell.block_id;
const int col_block_pos = bs->cols[col_block_id].position;
const int col_block_size = bs->cols[col_block_id].size;
+ // clang-format off
MatrixVectorMultiply<kRowBlockSize, kEBlockSize, 1>(
values + cell.position, row_block_size, col_block_size,
x + col_block_pos,
y + row_block_pos);
+ // clang-format on
}
}
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
-void
-PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
-RightMultiplyF(const double* x, double* y) const {
+void PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+ RightMultiplyF(const double* x, double* y) const {
const CompressedRowBlockStructure* bs = matrix_.block_structure();
// Iterate over row blocks, and if the row block is in E, then
@@ -136,10 +132,12 @@ RightMultiplyF(const double* x, double* y) const {
const int col_block_id = cells[c].block_id;
const int col_block_pos = bs->cols[col_block_id].position;
const int col_block_size = bs->cols[col_block_id].size;
+ // clang-format off
MatrixVectorMultiply<kRowBlockSize, kFBlockSize, 1>(
values + cells[c].position, row_block_size, col_block_size,
x + col_block_pos - num_cols_e_,
y + row_block_pos);
+ // clang-format on
}
}
@@ -151,18 +149,19 @@ RightMultiplyF(const double* x, double* y) const {
const int col_block_id = cells[c].block_id;
const int col_block_pos = bs->cols[col_block_id].position;
const int col_block_size = bs->cols[col_block_id].size;
+ // clang-format off
MatrixVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
values + cells[c].position, row_block_size, col_block_size,
x + col_block_pos - num_cols_e_,
y + row_block_pos);
+ // clang-format on
}
}
}
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
-void
-PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
-LeftMultiplyE(const double* x, double* y) const {
+void PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+ LeftMultiplyE(const double* x, double* y) const {
const CompressedRowBlockStructure* bs = matrix_.block_structure();
// Iterate over the first num_row_blocks_e_ row blocks, and multiply
@@ -175,17 +174,18 @@ LeftMultiplyE(const double* x, double* y) const {
const int col_block_id = cell.block_id;
const int col_block_pos = bs->cols[col_block_id].position;
const int col_block_size = bs->cols[col_block_id].size;
+ // clang-format off
MatrixTransposeVectorMultiply<kRowBlockSize, kEBlockSize, 1>(
values + cell.position, row_block_size, col_block_size,
x + row_block_pos,
y + col_block_pos);
+ // clang-format on
}
}
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
-void
-PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
-LeftMultiplyF(const double* x, double* y) const {
+void PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+ LeftMultiplyF(const double* x, double* y) const {
const CompressedRowBlockStructure* bs = matrix_.block_structure();
// Iterate over row blocks, and if the row block is in E, then
@@ -202,10 +202,12 @@ LeftMultiplyF(const double* x, double* y) const {
const int col_block_id = cells[c].block_id;
const int col_block_pos = bs->cols[col_block_id].position;
const int col_block_size = bs->cols[col_block_id].size;
+ // clang-format off
MatrixTransposeVectorMultiply<kRowBlockSize, kFBlockSize, 1>(
values + cells[c].position, row_block_size, col_block_size,
x + row_block_pos,
y + col_block_pos - num_cols_e_);
+ // clang-format on
}
}
@@ -217,10 +219,12 @@ LeftMultiplyF(const double* x, double* y) const {
const int col_block_id = cells[c].block_id;
const int col_block_pos = bs->cols[col_block_id].position;
const int col_block_size = bs->cols[col_block_id].size;
+ // clang-format off
MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
values + cells[c].position, row_block_size, col_block_size,
x + row_block_pos,
y + col_block_pos - num_cols_e_);
+ // clang-format on
}
}
}
@@ -233,7 +237,8 @@ LeftMultiplyF(const double* x, double* y) const {
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
BlockSparseMatrix*
PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
-CreateBlockDiagonalMatrixLayout(int start_col_block, int end_col_block) const {
+ CreateBlockDiagonalMatrixLayout(int start_col_block,
+ int end_col_block) const {
const CompressedRowBlockStructure* bs = matrix_.block_structure();
CompressedRowBlockStructure* block_diagonal_structure =
new CompressedRowBlockStructure;
@@ -269,9 +274,10 @@ CreateBlockDiagonalMatrixLayout(int start_col_block, int end_col_block) const {
}
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
-BlockSparseMatrix*
-PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
-CreateBlockDiagonalEtE() const {
+BlockSparseMatrix* PartitionedMatrixView<kRowBlockSize,
+ kEBlockSize,
+ kFBlockSize>::CreateBlockDiagonalEtE()
+ const {
BlockSparseMatrix* block_diagonal =
CreateBlockDiagonalMatrixLayout(0, num_col_blocks_e_);
UpdateBlockDiagonalEtE(block_diagonal);
@@ -279,12 +285,12 @@ CreateBlockDiagonalEtE() const {
}
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
-BlockSparseMatrix*
-PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
-CreateBlockDiagonalFtF() const {
- BlockSparseMatrix* block_diagonal =
- CreateBlockDiagonalMatrixLayout(
- num_col_blocks_e_, num_col_blocks_e_ + num_col_blocks_f_);
+BlockSparseMatrix* PartitionedMatrixView<kRowBlockSize,
+ kEBlockSize,
+ kFBlockSize>::CreateBlockDiagonalFtF()
+ const {
+ BlockSparseMatrix* block_diagonal = CreateBlockDiagonalMatrixLayout(
+ num_col_blocks_e_, num_col_blocks_e_ + num_col_blocks_f_);
UpdateBlockDiagonalFtF(block_diagonal);
return block_diagonal;
}
@@ -295,17 +301,15 @@ CreateBlockDiagonalFtF() const {
// block_diagonal = block_diagonal(E'E)
//
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
-void
-PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
-UpdateBlockDiagonalEtE(
- BlockSparseMatrix* block_diagonal) const {
+void PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+ UpdateBlockDiagonalEtE(BlockSparseMatrix* block_diagonal) const {
const CompressedRowBlockStructure* bs = matrix_.block_structure();
const CompressedRowBlockStructure* block_diagonal_structure =
block_diagonal->block_structure();
block_diagonal->SetZero();
const double* values = matrix_.values();
- for (int r = 0; r < num_row_blocks_e_ ; ++r) {
+ for (int r = 0; r < num_row_blocks_e_; ++r) {
const Cell& cell = bs->rows[r].cells[0];
const int row_block_size = bs->rows[r].block.size;
const int block_id = cell.block_id;
@@ -313,12 +317,14 @@ UpdateBlockDiagonalEtE(
const int cell_position =
block_diagonal_structure->rows[block_id].cells[0].position;
+ // clang-format off
MatrixTransposeMatrixMultiply
<kRowBlockSize, kEBlockSize, kRowBlockSize, kEBlockSize, 1>(
values + cell.position, row_block_size, col_block_size,
values + cell.position, row_block_size, col_block_size,
block_diagonal->mutable_values() + cell_position,
0, 0, col_block_size, col_block_size);
+ // clang-format on
}
}
@@ -328,9 +334,8 @@ UpdateBlockDiagonalEtE(
// block_diagonal = block_diagonal(F'F)
//
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
-void
-PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
-UpdateBlockDiagonalFtF(BlockSparseMatrix* block_diagonal) const {
+void PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>::
+ UpdateBlockDiagonalFtF(BlockSparseMatrix* block_diagonal) const {
const CompressedRowBlockStructure* bs = matrix_.block_structure();
const CompressedRowBlockStructure* block_diagonal_structure =
block_diagonal->block_structure();
@@ -347,12 +352,14 @@ UpdateBlockDiagonalFtF(BlockSparseMatrix* block_diagonal) const {
const int cell_position =
block_diagonal_structure->rows[diagonal_block_id].cells[0].position;
+ // clang-format off
MatrixTransposeMatrixMultiply
<kRowBlockSize, kFBlockSize, kRowBlockSize, kFBlockSize, 1>(
values + cells[c].position, row_block_size, col_block_size,
values + cells[c].position, row_block_size, col_block_size,
block_diagonal->mutable_values() + cell_position,
0, 0, col_block_size, col_block_size);
+ // clang-format on
}
}
@@ -366,12 +373,14 @@ UpdateBlockDiagonalFtF(BlockSparseMatrix* block_diagonal) const {
const int cell_position =
block_diagonal_structure->rows[diagonal_block_id].cells[0].position;
+ // clang-format off
MatrixTransposeMatrixMultiply
<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, 1>(
values + cells[c].position, row_block_size, col_block_size,
values + cells[c].position, row_block_size, col_block_size,
block_diagonal->mutable_values() + cell_position,
0, 0, col_block_size, col_block_size);
+ // clang-format on
}
}
}
diff --git a/extern/ceres/internal/ceres/partitioned_matrix_view_template.py b/extern/ceres/internal/ceres/partitioned_matrix_view_template.py
index 7894523cdea..05a25bf8335 100644
--- a/extern/ceres/internal/ceres/partitioned_matrix_view_template.py
+++ b/extern/ceres/internal/ceres/partitioned_matrix_view_template.py
@@ -89,14 +89,14 @@ HEADER = """// Ceres Solver - A fast non-linear least squares minimizer
"""
DYNAMIC_FILE = """
-
#include "ceres/partitioned_matrix_view_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
-template class PartitionedMatrixView<%s, %s, %s>;
+template class PartitionedMatrixView<%s,
+ %s,
+ %s>;
} // namespace internal
} // namespace ceres
@@ -109,7 +109,6 @@ SPECIALIZATION_FILE = """
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/partitioned_matrix_view_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
@@ -125,26 +124,26 @@ template class PartitionedMatrixView<%s, %s, %s>;
FACTORY_FILE_HEADER = """
#include "ceres/linear_solver.h"
#include "ceres/partitioned_matrix_view.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
-PartitionedMatrixViewBase*
-PartitionedMatrixViewBase::Create(const LinearSolver::Options& options,
- const BlockSparseMatrix& matrix) {
+PartitionedMatrixViewBase* PartitionedMatrixViewBase::Create(
+ const LinearSolver::Options& options, const BlockSparseMatrix& matrix) {
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
"""
-FACTORY = """ return new PartitionedMatrixView<%s, %s, %s>(matrix, options.elimination_groups[0]);"""
+FACTORY = """ return new PartitionedMatrixView<%s, %s, %s>(matrix,
+ options.elimination_groups[0]);"""
FACTORY_FOOTER = """
#endif
VLOG(1) << "Template specializations not found for <"
- << options.row_block_size << ","
- << options.e_block_size << ","
+ << options.row_block_size << "," << options.e_block_size << ","
<< options.f_block_size << ">";
- return new PartitionedMatrixView<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic>(
- matrix, options.elimination_groups[0]);
+ return new PartitionedMatrixView<Eigen::Dynamic,
+ Eigen::Dynamic,
+ Eigen::Dynamic>(
+ matrix, options.elimination_groups[0]);
};
} // namespace internal
diff --git a/extern/ceres/internal/ceres/polynomial.h b/extern/ceres/internal/ceres/polynomial.h
index 3e09bae3d0f..20071f2c693 100644
--- a/extern/ceres/internal/ceres/polynomial.h
+++ b/extern/ceres/internal/ceres/polynomial.h
@@ -33,6 +33,7 @@
#define CERES_INTERNAL_POLYNOMIAL_SOLVER_H_
#include <vector>
+
#include "ceres/internal/eigen.h"
#include "ceres/internal/port.h"
@@ -65,13 +66,13 @@ inline double EvaluatePolynomial(const Vector& polynomial, double x) {
// On failure, a more detailed message will be written to LOG(ERROR).
// If real is not NULL, the real parts of the roots will be returned in it.
// Likewise, if imaginary is not NULL, imaginary parts will be returned in it.
-bool FindPolynomialRoots(const Vector& polynomial,
- Vector* real,
- Vector* imaginary);
+CERES_EXPORT_INTERNAL bool FindPolynomialRoots(const Vector& polynomial,
+ Vector* real,
+ Vector* imaginary);
// Return the derivative of the given polynomial. It is assumed that
// the input polynomial is at least of degree zero.
-Vector DifferentiatePolynomial(const Vector& polynomial);
+CERES_EXPORT_INTERNAL Vector DifferentiatePolynomial(const Vector& polynomial);
// Find the minimum value of the polynomial in the interval [x_min,
// x_max]. The minimum is obtained by computing all the roots of the
@@ -79,11 +80,11 @@ Vector DifferentiatePolynomial(const Vector& polynomial);
// interval [x_min, x_max] are considered as well as the end points
// x_min and x_max. Since polynomials are differentiable functions,
// this ensures that the true minimum is found.
-void MinimizePolynomial(const Vector& polynomial,
- double x_min,
- double x_max,
- double* optimal_x,
- double* optimal_value);
+CERES_EXPORT_INTERNAL void MinimizePolynomial(const Vector& polynomial,
+ double x_min,
+ double x_max,
+ double* optimal_x,
+ double* optimal_value);
// Given a set of function value and/or gradient samples, find a
// polynomial whose value and gradients are exactly equal to the ones
@@ -96,7 +97,8 @@ void MinimizePolynomial(const Vector& polynomial,
// Of course its possible to sample a polynomial any number of times,
// in which case, generally speaking the spurious higher order
// coefficients will be zero.
-Vector FindInterpolatingPolynomial(const std::vector<FunctionSample>& samples);
+CERES_EXPORT_INTERNAL Vector
+FindInterpolatingPolynomial(const std::vector<FunctionSample>& samples);
// Interpolate the function described by samples with a polynomial,
// and minimize it on the interval [x_min, x_max]. Depending on the
@@ -104,11 +106,12 @@ Vector FindInterpolatingPolynomial(const std::vector<FunctionSample>& samples);
// finding algorithms may fail due to numerical difficulties. But the
// function is guaranteed to return its best guess of an answer, by
// considering the samples and the end points as possible solutions.
-void MinimizeInterpolatingPolynomial(const std::vector<FunctionSample>& samples,
- double x_min,
- double x_max,
- double* optimal_x,
- double* optimal_value);
+CERES_EXPORT_INTERNAL void MinimizeInterpolatingPolynomial(
+ const std::vector<FunctionSample>& samples,
+ double x_min,
+ double x_max,
+ double* optimal_x,
+ double* optimal_value);
} // namespace internal
} // namespace ceres
diff --git a/extern/ceres/internal/ceres/preconditioner.cc b/extern/ceres/internal/ceres/preconditioner.cc
index f98374e0cf8..69ba04db8f5 100644
--- a/extern/ceres/internal/ceres/preconditioner.cc
+++ b/extern/ceres/internal/ceres/preconditioner.cc
@@ -29,13 +29,13 @@
// Author: sameeragarwal@google.com (Sameer Agarwal)
#include "ceres/preconditioner.h"
+
#include "glog/logging.h"
namespace ceres {
namespace internal {
-Preconditioner::~Preconditioner() {
-}
+Preconditioner::~Preconditioner() {}
PreconditionerType Preconditioner::PreconditionerForZeroEBlocks(
PreconditionerType preconditioner_type) {
@@ -53,8 +53,7 @@ SparseMatrixPreconditionerWrapper::SparseMatrixPreconditionerWrapper(
CHECK(matrix != nullptr);
}
-SparseMatrixPreconditionerWrapper::~SparseMatrixPreconditionerWrapper() {
-}
+SparseMatrixPreconditionerWrapper::~SparseMatrixPreconditionerWrapper() {}
bool SparseMatrixPreconditionerWrapper::UpdateImpl(const SparseMatrix& A,
const double* D) {
@@ -66,7 +65,7 @@ void SparseMatrixPreconditionerWrapper::RightMultiply(const double* x,
matrix_->RightMultiply(x, y);
}
-int SparseMatrixPreconditionerWrapper::num_rows() const {
+int SparseMatrixPreconditionerWrapper::num_rows() const {
return matrix_->num_rows();
}
diff --git a/extern/ceres/internal/ceres/preconditioner.h b/extern/ceres/internal/ceres/preconditioner.h
index 3e46ed83db2..dd843b01ce3 100644
--- a/extern/ceres/internal/ceres/preconditioner.h
+++ b/extern/ceres/internal/ceres/preconditioner.h
@@ -32,9 +32,11 @@
#define CERES_INTERNAL_PRECONDITIONER_H_
#include <vector>
+
#include "ceres/casts.h"
#include "ceres/compressed_row_sparse_matrix.h"
#include "ceres/context_impl.h"
+#include "ceres/internal/port.h"
#include "ceres/linear_operator.h"
#include "ceres/sparse_matrix.h"
#include "ceres/types.h"
@@ -45,12 +47,13 @@ namespace internal {
class BlockSparseMatrix;
class SparseMatrix;
-class Preconditioner : public LinearOperator {
+class CERES_EXPORT_INTERNAL Preconditioner : public LinearOperator {
public:
struct Options {
PreconditionerType type = JACOBI;
VisibilityClusteringType visibility_clustering_type = CANONICAL_VIEWS;
- SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type = SUITE_SPARSE;
+ SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type =
+ SUITE_SPARSE;
// When using the subset preconditioner, all row blocks starting
// from this row block are used to construct the preconditioner.
@@ -137,9 +140,7 @@ class Preconditioner : public LinearOperator {
}
int num_rows() const override = 0;
- int num_cols() const override {
- return num_rows();
- }
+ int num_cols() const override { return num_rows(); }
};
// This templated subclass of Preconditioner serves as a base class for
@@ -159,9 +160,11 @@ class TypedPreconditioner : public Preconditioner {
// Preconditioners that depend on access to the low level structure
// of a SparseMatrix.
-typedef TypedPreconditioner<SparseMatrix> SparseMatrixPreconditioner; // NOLINT
-typedef TypedPreconditioner<BlockSparseMatrix> BlockSparseMatrixPreconditioner; // NOLINT
-typedef TypedPreconditioner<CompressedRowSparseMatrix> CompressedRowSparseMatrixPreconditioner; // NOLINT
+// clang-format off
+typedef TypedPreconditioner<SparseMatrix> SparseMatrixPreconditioner;
+typedef TypedPreconditioner<BlockSparseMatrix> BlockSparseMatrixPreconditioner;
+typedef TypedPreconditioner<CompressedRowSparseMatrix> CompressedRowSparseMatrixPreconditioner;
+// clang-format on
// Wrap a SparseMatrix object as a preconditioner.
class SparseMatrixPreconditionerWrapper : public SparseMatrixPreconditioner {
diff --git a/extern/ceres/internal/ceres/preprocessor.cc b/extern/ceres/internal/ceres/preprocessor.cc
index 02219147d75..6a67d385645 100644
--- a/extern/ceres/internal/ceres/preprocessor.cc
+++ b/extern/ceres/internal/ceres/preprocessor.cc
@@ -28,11 +28,12 @@
//
// Author: sameragarwal@google.com (Sameer Agarwal)
+#include "ceres/preprocessor.h"
+
#include "ceres/callbacks.h"
#include "ceres/gradient_checking_cost_function.h"
#include "ceres/line_search_preprocessor.h"
#include "ceres/parallel_for.h"
-#include "ceres/preprocessor.h"
#include "ceres/problem_impl.h"
#include "ceres/solver.h"
#include "ceres/trust_region_preprocessor.h"
@@ -53,17 +54,15 @@ Preprocessor* Preprocessor::Create(MinimizerType minimizer_type) {
return NULL;
}
-Preprocessor::~Preprocessor() {
-}
+Preprocessor::~Preprocessor() {}
void ChangeNumThreadsIfNeeded(Solver::Options* options) {
const int num_threads_available = MaxNumThreadsAvailable();
if (options->num_threads > num_threads_available) {
- LOG(WARNING)
- << "Specified options.num_threads: " << options->num_threads
- << " exceeds maximum available from the threading model Ceres "
- << "was compiled with: " << num_threads_available
- << ". Bounding to maximum number available.";
+ LOG(WARNING) << "Specified options.num_threads: " << options->num_threads
+ << " exceeds maximum available from the threading model Ceres "
+ << "was compiled with: " << num_threads_available
+ << ". Bounding to maximum number available.";
options->num_threads = num_threads_available;
}
}
@@ -83,16 +82,15 @@ void SetupCommonMinimizerOptions(PreprocessedProblem* pp) {
minimizer_options.evaluator = pp->evaluator;
if (options.logging_type != SILENT) {
- pp->logging_callback.reset(
- new LoggingCallback(options.minimizer_type,
- options.minimizer_progress_to_stdout));
+ pp->logging_callback.reset(new LoggingCallback(
+ options.minimizer_type, options.minimizer_progress_to_stdout));
minimizer_options.callbacks.insert(minimizer_options.callbacks.begin(),
pp->logging_callback.get());
}
if (options.update_state_every_iteration) {
pp->state_updating_callback.reset(
- new StateUpdatingCallback(program, reduced_parameters));
+ new StateUpdatingCallback(program, reduced_parameters));
// This must get pushed to the front of the callbacks so that it
// is run before any of the user callbacks.
minimizer_options.callbacks.insert(minimizer_options.callbacks.begin(),
diff --git a/extern/ceres/internal/ceres/preprocessor.h b/extern/ceres/internal/ceres/preprocessor.h
index 99bd6c0c5dd..ec56c6e430a 100644
--- a/extern/ceres/internal/ceres/preprocessor.h
+++ b/extern/ceres/internal/ceres/preprocessor.h
@@ -67,7 +67,7 @@ struct PreprocessedProblem;
//
// The output of the Preprocessor is stored in a PreprocessedProblem
// object.
-class Preprocessor {
+class CERES_EXPORT_INTERNAL Preprocessor {
public:
// Factory.
static Preprocessor* Create(MinimizerType minimizer_type);
@@ -80,9 +80,7 @@ class Preprocessor {
// A PreprocessedProblem is the result of running the Preprocessor on
// a Problem and Solver::Options object.
struct PreprocessedProblem {
- PreprocessedProblem()
- : fixed_cost(0.0) {
- }
+ PreprocessedProblem() : fixed_cost(0.0) {}
std::string error;
Solver::Options options;
diff --git a/extern/ceres/internal/ceres/problem.cc b/extern/ceres/internal/ceres/problem.cc
index 767fe977296..f3ffd546ef7 100644
--- a/extern/ceres/internal/ceres/problem.cc
+++ b/extern/ceres/internal/ceres/problem.cc
@@ -139,8 +139,26 @@ bool Problem::EvaluateResidualBlock(ResidualBlockId residual_block_id,
double* cost,
double* residuals,
double** jacobians) const {
- return impl_->EvaluateResidualBlock(
- residual_block_id, apply_loss_function, cost, residuals, jacobians);
+ return impl_->EvaluateResidualBlock(residual_block_id,
+ apply_loss_function,
+ /* new_point = */ true,
+ cost,
+ residuals,
+ jacobians);
+}
+
+bool Problem::EvaluateResidualBlockAssumingParametersUnchanged(
+ ResidualBlockId residual_block_id,
+ bool apply_loss_function,
+ double* cost,
+ double* residuals,
+ double** jacobians) const {
+ return impl_->EvaluateResidualBlock(residual_block_id,
+ apply_loss_function,
+ /* new_point = */ false,
+ cost,
+ residuals,
+ jacobians);
}
int Problem::NumParameterBlocks() const { return impl_->NumParameterBlocks(); }
diff --git a/extern/ceres/internal/ceres/problem_impl.cc b/extern/ceres/internal/ceres/problem_impl.cc
index 6cc4d336c6a..3155bc3569e 100644
--- a/extern/ceres/internal/ceres/problem_impl.cc
+++ b/extern/ceres/internal/ceres/problem_impl.cc
@@ -601,7 +601,6 @@ bool ProblemImpl::Evaluate(const Problem::EvaluateOptions& evaluate_options,
CRSMatrix* jacobian) {
if (cost == nullptr && residuals == nullptr && gradient == nullptr &&
jacobian == nullptr) {
- LOG(INFO) << "Nothing to do.";
return true;
}
@@ -686,10 +685,12 @@ bool ProblemImpl::Evaluate(const Problem::EvaluateOptions& evaluate_options,
// type of linear solver being used.
evaluator_options.linear_solver_type = SPARSE_NORMAL_CHOLESKY;
#ifdef CERES_NO_THREADS
- LOG_IF(WARNING, evaluate_options.num_threads > 1)
- << "No threading support is compiled into this binary; "
- << "only evaluate_options.num_threads = 1 is supported. Switching "
- << "to single threaded mode.";
+ if (evaluate_options.num_threads > 1) {
+ LOG(WARNING)
+ << "No threading support is compiled into this binary; "
+ << "only evaluate_options.num_threads = 1 is supported. Switching "
+ << "to single threaded mode.";
+ }
evaluator_options.num_threads = 1;
#else
evaluator_options.num_threads = evaluate_options.num_threads;
@@ -768,9 +769,15 @@ bool ProblemImpl::Evaluate(const Problem::EvaluateOptions& evaluate_options,
bool ProblemImpl::EvaluateResidualBlock(ResidualBlock* residual_block,
bool apply_loss_function,
+ bool new_point,
double* cost,
double* residuals,
double** jacobians) const {
+ auto evaluation_callback = program_->mutable_evaluation_callback();
+ if (evaluation_callback) {
+ evaluation_callback->PrepareForEvaluation(jacobians != nullptr, new_point);
+ }
+
ParameterBlock* const* parameter_blocks = residual_block->parameter_blocks();
const int num_parameter_blocks = residual_block->NumParameterBlocks();
for (int i = 0; i < num_parameter_blocks; ++i) {
@@ -789,7 +796,8 @@ bool ProblemImpl::EvaluateResidualBlock(ResidualBlock* residual_block,
}
double dummy_cost = 0.0;
- FixedArray<double> scratch(residual_block->NumScratchDoublesForEvaluate());
+ FixedArray<double, 32> scratch(
+ residual_block->NumScratchDoublesForEvaluate());
return residual_block->Evaluate(apply_loss_function,
cost ? cost : &dummy_cost,
residuals,
diff --git a/extern/ceres/internal/ceres/problem_impl.h b/extern/ceres/internal/ceres/problem_impl.h
index 8bbe7238d27..9abff3f19ae 100644
--- a/extern/ceres/internal/ceres/problem_impl.h
+++ b/extern/ceres/internal/ceres/problem_impl.h
@@ -63,7 +63,7 @@ namespace internal {
class Program;
class ResidualBlock;
-class ProblemImpl {
+class CERES_EXPORT_INTERNAL ProblemImpl {
public:
typedef std::map<double*, ParameterBlock*> ParameterMap;
typedef std::unordered_set<ResidualBlock*> ResidualBlockSet;
@@ -124,6 +124,7 @@ class ProblemImpl {
bool EvaluateResidualBlock(ResidualBlock* residual_block,
bool apply_loss_function,
+ bool new_point,
double* cost,
double* residuals,
double** jacobians) const;
diff --git a/extern/ceres/internal/ceres/program.h b/extern/ceres/internal/ceres/program.h
index 797129980e3..ca29d316284 100644
--- a/extern/ceres/internal/ceres/program.h
+++ b/extern/ceres/internal/ceres/program.h
@@ -36,8 +36,8 @@
#include <string>
#include <vector>
-#include "ceres/internal/port.h"
#include "ceres/evaluation_callback.h"
+#include "ceres/internal/port.h"
namespace ceres {
namespace internal {
@@ -57,7 +57,7 @@ class TripletSparseMatrix;
// another; for example, the first stage of solving involves stripping all
// constant parameters and residuals. This is in contrast with Problem, which is
// not built for transformation.
-class Program {
+class CERES_EXPORT_INTERNAL Program {
public:
Program();
explicit Program(const Program& program);
diff --git a/extern/ceres/internal/ceres/program_evaluator.h b/extern/ceres/internal/ceres/program_evaluator.h
index 97ee590fbab..36c9c64baf6 100644
--- a/extern/ceres/internal/ceres/program_evaluator.h
+++ b/extern/ceres/internal/ceres/program_evaluator.h
@@ -80,7 +80,9 @@
#define CERES_INTERNAL_PROGRAM_EVALUATOR_H_
// This include must come before any #ifndef check on Ceres compile options.
+// clang-format off
#include "ceres/internal/port.h"
+// clang-format on
#include <atomic>
#include <map>
diff --git a/extern/ceres/internal/ceres/random.h b/extern/ceres/internal/ceres/random.h
index 87d9d77d90d..6b280f9ee64 100644
--- a/extern/ceres/internal/ceres/random.h
+++ b/extern/ceres/internal/ceres/random.h
@@ -34,13 +34,12 @@
#include <cmath>
#include <cstdlib>
+
#include "ceres/internal/port.h"
namespace ceres {
-inline void SetRandomState(int state) {
- srand(state);
-}
+inline void SetRandomState(int state) { srand(state); }
inline int Uniform(int n) {
if (n) {
@@ -63,7 +62,7 @@ inline double RandNormal() {
x1 = 2.0 * RandDouble() - 1.0;
x2 = 2.0 * RandDouble() - 1.0;
w = x1 * x1 + x2 * x2;
- } while ( w >= 1.0 || w == 0.0 );
+ } while (w >= 1.0 || w == 0.0);
w = sqrt((-2.0 * log(w)) / w);
return x1 * w;
diff --git a/extern/ceres/internal/ceres/reorder_program.cc b/extern/ceres/internal/ceres/reorder_program.cc
index aa6032a9e9e..5d802365f33 100644
--- a/extern/ceres/internal/ceres/reorder_program.cc
+++ b/extern/ceres/internal/ceres/reorder_program.cc
@@ -35,6 +35,7 @@
#include <numeric>
#include <vector>
+#include "Eigen/SparseCore"
#include "ceres/cxsparse.h"
#include "ceres/internal/port.h"
#include "ceres/ordered_groups.h"
@@ -47,7 +48,6 @@
#include "ceres/suitesparse.h"
#include "ceres/triplet_sparse_matrix.h"
#include "ceres/types.h"
-#include "Eigen/SparseCore"
#ifdef CERES_USE_EIGEN_SPARSE
#include "Eigen/OrderingMethods"
@@ -78,8 +78,8 @@ static int MinParameterBlock(const ResidualBlock* residual_block,
CHECK_NE(parameter_block->index(), -1)
<< "Did you forget to call Program::SetParameterOffsetsAndIndex()? "
<< "This is a Ceres bug; please contact the developers!";
- min_parameter_block_position = std::min(parameter_block->index(),
- min_parameter_block_position);
+ min_parameter_block_position =
+ std::min(parameter_block->index(), min_parameter_block_position);
}
}
return min_parameter_block_position;
@@ -117,9 +117,8 @@ void OrderingForSparseNormalCholeskyUsingSuiteSparse(
<< "Please report this error to the developers.";
#else
SuiteSparse ss;
- cholmod_sparse* block_jacobian_transpose =
- ss.CreateSparseMatrix(
- const_cast<TripletSparseMatrix*>(&tsm_block_jacobian_transpose));
+ cholmod_sparse* block_jacobian_transpose = ss.CreateSparseMatrix(
+ const_cast<TripletSparseMatrix*>(&tsm_block_jacobian_transpose));
// No CAMD or the user did not supply a useful ordering, then just
// use regular AMD.
@@ -129,18 +128,16 @@ void OrderingForSparseNormalCholeskyUsingSuiteSparse(
} else {
vector<int> constraints;
for (int i = 0; i < parameter_blocks.size(); ++i) {
- constraints.push_back(
- parameter_block_ordering.GroupId(
- parameter_blocks[i]->mutable_user_state()));
+ constraints.push_back(parameter_block_ordering.GroupId(
+ parameter_blocks[i]->mutable_user_state()));
}
// Renumber the entries of constraints to be contiguous integers
// as CAMD requires that the group ids be in the range [0,
// parameter_blocks.size() - 1].
MapValuesToContiguousRange(constraints.size(), &constraints[0]);
- ss.ConstrainedApproximateMinimumDegreeOrdering(block_jacobian_transpose,
- &constraints[0],
- ordering);
+ ss.ConstrainedApproximateMinimumDegreeOrdering(
+ block_jacobian_transpose, &constraints[0], ordering);
}
VLOG(2) << "Block ordering stats: "
@@ -153,20 +150,18 @@ void OrderingForSparseNormalCholeskyUsingSuiteSparse(
}
void OrderingForSparseNormalCholeskyUsingCXSparse(
- const TripletSparseMatrix& tsm_block_jacobian_transpose,
- int* ordering) {
+ const TripletSparseMatrix& tsm_block_jacobian_transpose, int* ordering) {
#ifdef CERES_NO_CXSPARSE
LOG(FATAL) << "Congratulations, you found a Ceres bug! "
<< "Please report this error to the developers.";
-#else // CERES_NO_CXSPARSE
+#else
// CXSparse works with J'J instead of J'. So compute the block
// sparsity for J'J and compute an approximate minimum degree
// ordering.
CXSparse cxsparse;
cs_di* block_jacobian_transpose;
- block_jacobian_transpose =
- cxsparse.CreateSparseMatrix(
- const_cast<TripletSparseMatrix*>(&tsm_block_jacobian_transpose));
+ block_jacobian_transpose = cxsparse.CreateSparseMatrix(
+ const_cast<TripletSparseMatrix*>(&tsm_block_jacobian_transpose));
cs_di* block_jacobian = cxsparse.TransposeMatrix(block_jacobian_transpose);
cs_di* block_hessian =
cxsparse.MatrixMatrixMultiply(block_jacobian_transpose, block_jacobian);
@@ -178,16 +173,13 @@ void OrderingForSparseNormalCholeskyUsingCXSparse(
#endif // CERES_NO_CXSPARSE
}
-
void OrderingForSparseNormalCholeskyUsingEigenSparse(
- const TripletSparseMatrix& tsm_block_jacobian_transpose,
- int* ordering) {
+ const TripletSparseMatrix& tsm_block_jacobian_transpose, int* ordering) {
#ifndef CERES_USE_EIGEN_SPARSE
- LOG(FATAL) <<
- "SPARSE_NORMAL_CHOLESKY cannot be used with EIGEN_SPARSE "
- "because Ceres was not built with support for "
- "Eigen's SimplicialLDLT decomposition. "
- "This requires enabling building with -DEIGENSPARSE=ON.";
+ LOG(FATAL) << "SPARSE_NORMAL_CHOLESKY cannot be used with EIGEN_SPARSE "
+ "because Ceres was not built with support for "
+ "Eigen's SimplicialLDLT decomposition. "
+ "This requires enabling building with -DEIGENSPARSE=ON.";
#else
// This conversion from a TripletSparseMatrix to a Eigen::Triplet
@@ -218,13 +210,14 @@ bool ApplyOrdering(const ProblemImpl::ParameterMap& parameter_map,
const ParameterBlockOrdering& ordering,
Program* program,
string* error) {
- const int num_parameter_blocks = program->NumParameterBlocks();
+ const int num_parameter_blocks = program->NumParameterBlocks();
if (ordering.NumElements() != num_parameter_blocks) {
- *error = StringPrintf("User specified ordering does not have the same "
- "number of parameters as the problem. The problem"
- "has %d blocks while the ordering has %d blocks.",
- num_parameter_blocks,
- ordering.NumElements());
+ *error = StringPrintf(
+ "User specified ordering does not have the same "
+ "number of parameters as the problem. The problem"
+ "has %d blocks while the ordering has %d blocks.",
+ num_parameter_blocks,
+ ordering.NumElements());
return false;
}
@@ -238,10 +231,11 @@ bool ApplyOrdering(const ProblemImpl::ParameterMap& parameter_map,
for (double* parameter_block_ptr : group) {
auto it = parameter_map.find(parameter_block_ptr);
if (it == parameter_map.end()) {
- *error = StringPrintf("User specified ordering contains a pointer "
- "to a double that is not a parameter block in "
- "the problem. The invalid double is in group: %d",
- p.first);
+ *error = StringPrintf(
+ "User specified ordering contains a pointer "
+ "to a double that is not a parameter block in "
+ "the problem. The invalid double is in group: %d",
+ p.first);
return false;
}
parameter_blocks->push_back(it->second);
@@ -265,8 +259,8 @@ bool LexicographicallyOrderResidualBlocks(
vector<int> min_position_per_residual(residual_blocks->size());
for (int i = 0; i < residual_blocks->size(); ++i) {
ResidualBlock* residual_block = (*residual_blocks)[i];
- int position = MinParameterBlock(residual_block,
- size_of_first_elimination_group);
+ int position =
+ MinParameterBlock(residual_block, size_of_first_elimination_group);
min_position_per_residual[i] = position;
DCHECK_LE(position, size_of_first_elimination_group);
residual_blocks_per_e_block[position]++;
@@ -284,8 +278,8 @@ bool LexicographicallyOrderResidualBlocks(
<< "to the developers.";
CHECK(find(residual_blocks_per_e_block.begin(),
- residual_blocks_per_e_block.end() - 1, 0) !=
- residual_blocks_per_e_block.end())
+ residual_blocks_per_e_block.end() - 1,
+ 0) != residual_blocks_per_e_block.end())
<< "Congratulations, you found a Ceres bug! Please report this error "
<< "to the developers.";
@@ -334,8 +328,7 @@ bool LexicographicallyOrderResidualBlocks(
// Pre-order the columns corresponding to the schur complement if
// possible.
static void MaybeReorderSchurComplementColumnsUsingSuiteSparse(
- const ParameterBlockOrdering& parameter_block_ordering,
- Program* program) {
+ const ParameterBlockOrdering& parameter_block_ordering, Program* program) {
#ifndef CERES_NO_SUITESPARSE
SuiteSparse ss;
if (!SuiteSparse::IsConstrainedApproximateMinimumDegreeOrderingAvailable()) {
@@ -347,9 +340,8 @@ static void MaybeReorderSchurComplementColumnsUsingSuiteSparse(
*(program->mutable_parameter_blocks());
for (int i = 0; i < parameter_blocks.size(); ++i) {
- constraints.push_back(
- parameter_block_ordering.GroupId(
- parameter_blocks[i]->mutable_user_state()));
+ constraints.push_back(parameter_block_ordering.GroupId(
+ parameter_blocks[i]->mutable_user_state()));
}
// Renumber the entries of constraints to be contiguous integers as
@@ -365,9 +357,8 @@ static void MaybeReorderSchurComplementColumnsUsingSuiteSparse(
ss.CreateSparseMatrix(tsm_block_jacobian_transpose.get());
vector<int> ordering(parameter_blocks.size(), 0);
- ss.ConstrainedApproximateMinimumDegreeOrdering(block_jacobian_transpose,
- &constraints[0],
- &ordering[0]);
+ ss.ConstrainedApproximateMinimumDegreeOrdering(
+ block_jacobian_transpose, &constraints[0], &ordering[0]);
ss.Free(block_jacobian_transpose);
const vector<ParameterBlock*> parameter_blocks_copy(parameter_blocks);
@@ -396,10 +387,7 @@ static void MaybeReorderSchurComplementColumnsUsingEigen(
// Vertically partition the jacobian in parameter blocks of type E
// and F.
const SparseMatrix E =
- block_jacobian.block(0,
- 0,
- num_rows,
- size_of_first_elimination_group);
+ block_jacobian.block(0, 0, num_rows, size_of_first_elimination_group);
const SparseMatrix F =
block_jacobian.block(0,
size_of_first_elimination_group,
@@ -482,22 +470,17 @@ bool ReorderProgramForSchurTypeLinearSolver(
// Verify that the first elimination group is an independent set.
const set<double*>& first_elimination_group =
- parameter_block_ordering
- ->group_to_elements()
- .begin()
- ->second;
+ parameter_block_ordering->group_to_elements().begin()->second;
if (!program->IsParameterBlockSetIndependent(first_elimination_group)) {
- *error =
- StringPrintf("The first elimination group in the parameter block "
- "ordering of size %zd is not an independent set",
- first_elimination_group.size());
+ *error = StringPrintf(
+ "The first elimination group in the parameter block "
+ "ordering of size %zd is not an independent set",
+ first_elimination_group.size());
return false;
}
- if (!ApplyOrdering(parameter_map,
- *parameter_block_ordering,
- program,
- error)) {
+ if (!ApplyOrdering(
+ parameter_map, *parameter_block_ordering, program, error)) {
return false;
}
}
@@ -510,13 +493,10 @@ bool ReorderProgramForSchurTypeLinearSolver(
if (linear_solver_type == SPARSE_SCHUR) {
if (sparse_linear_algebra_library_type == SUITE_SPARSE) {
MaybeReorderSchurComplementColumnsUsingSuiteSparse(
- *parameter_block_ordering,
- program);
+ *parameter_block_ordering, program);
} else if (sparse_linear_algebra_library_type == EIGEN_SPARSE) {
MaybeReorderSchurComplementColumnsUsingEigen(
- size_of_first_elimination_group,
- parameter_map,
- program);
+ size_of_first_elimination_group, parameter_map, program);
}
}
@@ -556,9 +536,8 @@ bool ReorderProgramForSparseCholesky(
parameter_block_ordering,
&ordering[0]);
} else if (sparse_linear_algebra_library_type == CX_SPARSE) {
- OrderingForSparseNormalCholeskyUsingCXSparse(
- *tsm_block_jacobian_transpose,
- &ordering[0]);
+ OrderingForSparseNormalCholeskyUsingCXSparse(*tsm_block_jacobian_transpose,
+ &ordering[0]);
} else if (sparse_linear_algebra_library_type == ACCELERATE_SPARSE) {
// Accelerate does not provide a function to perform reordering without
// performing a full symbolic factorisation. As such, we have nothing
@@ -570,8 +549,7 @@ bool ReorderProgramForSparseCholesky(
} else if (sparse_linear_algebra_library_type == EIGEN_SPARSE) {
OrderingForSparseNormalCholeskyUsingEigenSparse(
- *tsm_block_jacobian_transpose,
- &ordering[0]);
+ *tsm_block_jacobian_transpose, &ordering[0]);
}
// Apply ordering.
@@ -588,11 +566,11 @@ int ReorderResidualBlocksByPartition(
const std::unordered_set<ResidualBlockId>& bottom_residual_blocks,
Program* program) {
auto residual_blocks = program->mutable_residual_blocks();
- auto it = std::partition(
- residual_blocks->begin(), residual_blocks->end(),
- [&bottom_residual_blocks](ResidualBlock* r) {
- return bottom_residual_blocks.count(r) == 0;
- });
+ auto it = std::partition(residual_blocks->begin(),
+ residual_blocks->end(),
+ [&bottom_residual_blocks](ResidualBlock* r) {
+ return bottom_residual_blocks.count(r) == 0;
+ });
return it - residual_blocks->begin();
}
diff --git a/extern/ceres/internal/ceres/reorder_program.h b/extern/ceres/internal/ceres/reorder_program.h
index 88cbee3af21..2e0c3264377 100644
--- a/extern/ceres/internal/ceres/reorder_program.h
+++ b/extern/ceres/internal/ceres/reorder_program.h
@@ -32,6 +32,7 @@
#define CERES_INTERNAL_REORDER_PROGRAM_H_
#include <string>
+
#include "ceres/internal/port.h"
#include "ceres/parameter_block_ordering.h"
#include "ceres/problem_impl.h"
@@ -43,17 +44,17 @@ namespace internal {
class Program;
// Reorder the parameter blocks in program using the ordering
-bool ApplyOrdering(const ProblemImpl::ParameterMap& parameter_map,
- const ParameterBlockOrdering& ordering,
- Program* program,
- std::string* error);
+CERES_EXPORT_INTERNAL bool ApplyOrdering(
+ const ProblemImpl::ParameterMap& parameter_map,
+ const ParameterBlockOrdering& ordering,
+ Program* program,
+ std::string* error);
// Reorder the residuals for program, if necessary, so that the residuals
// involving each E block occur together. This is a necessary condition for the
// Schur eliminator, which works on these "row blocks" in the jacobian.
-bool LexicographicallyOrderResidualBlocks(int size_of_first_elimination_group,
- Program* program,
- std::string* error);
+CERES_EXPORT_INTERNAL bool LexicographicallyOrderResidualBlocks(
+ int size_of_first_elimination_group, Program* program, std::string* error);
// Schur type solvers require that all parameter blocks eliminated
// by the Schur eliminator occur before others and the residuals be
@@ -71,7 +72,7 @@ bool LexicographicallyOrderResidualBlocks(int size_of_first_elimination_group,
//
// Upon return, ordering contains the parameter block ordering that
// was used to order the program.
-bool ReorderProgramForSchurTypeLinearSolver(
+CERES_EXPORT_INTERNAL bool ReorderProgramForSchurTypeLinearSolver(
LinearSolverType linear_solver_type,
SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
const ProblemImpl::ParameterMap& parameter_map,
@@ -89,7 +90,7 @@ bool ReorderProgramForSchurTypeLinearSolver(
// fill-reducing ordering is available in the sparse linear algebra
// library (SuiteSparse version >= 4.2.0) then the fill reducing
// ordering will take it into account, otherwise it will be ignored.
-bool ReorderProgramForSparseCholesky(
+CERES_EXPORT_INTERNAL bool ReorderProgramForSparseCholesky(
SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
const ParameterBlockOrdering& parameter_block_ordering,
int start_row_block,
@@ -106,7 +107,7 @@ bool ReorderProgramForSparseCholesky(
// bottom_residual_blocks.size() because we allow
// bottom_residual_blocks to contain residual blocks not present in
// the Program.
-int ReorderResidualBlocksByPartition(
+CERES_EXPORT_INTERNAL int ReorderResidualBlocksByPartition(
const std::unordered_set<ResidualBlockId>& bottom_residual_blocks,
Program* program);
diff --git a/extern/ceres/internal/ceres/residual_block.cc b/extern/ceres/internal/ceres/residual_block.cc
index 0bf30bcf446..067c9efe83d 100644
--- a/extern/ceres/internal/ceres/residual_block.cc
+++ b/extern/ceres/internal/ceres/residual_block.cc
@@ -34,6 +34,7 @@
#include <algorithm>
#include <cstddef>
#include <vector>
+
#include "ceres/corrector.h"
#include "ceres/cost_function.h"
#include "ceres/internal/eigen.h"
@@ -50,8 +51,10 @@ namespace ceres {
namespace internal {
ResidualBlock::ResidualBlock(
- const CostFunction* cost_function, const LossFunction* loss_function,
- const std::vector<ParameterBlock*>& parameter_blocks, int index)
+ const CostFunction* cost_function,
+ const LossFunction* loss_function,
+ const std::vector<ParameterBlock*>& parameter_blocks,
+ int index)
: cost_function_(cost_function),
loss_function_(loss_function),
parameter_blocks_(
@@ -111,22 +114,18 @@ bool ResidualBlock::Evaluate(const bool apply_loss_function,
return false;
}
- if (!IsEvaluationValid(*this,
- parameters.data(),
- cost,
- residuals,
- eval_jacobians)) {
+ if (!IsEvaluationValid(
+ *this, parameters.data(), cost, residuals, eval_jacobians)) {
+ // clang-format off
std::string message =
"\n\n"
"Error in evaluating the ResidualBlock.\n\n"
"There are two possible reasons. Either the CostFunction did not evaluate and fill all \n" // NOLINT
"residual and jacobians that were requested or there was a non-finite value (nan/infinite)\n" // NOLINT
"generated during the or jacobian computation. \n\n" +
- EvaluationToString(*this,
- parameters.data(),
- cost,
- residuals,
- eval_jacobians);
+ EvaluationToString(
+ *this, parameters.data(), cost, residuals, eval_jacobians);
+ // clang-format on
LOG(WARNING) << message;
return false;
}
@@ -149,7 +148,11 @@ bool ResidualBlock::Evaluate(const bool apply_loss_function,
parameter_block->LocalParameterizationJacobian(),
parameter_block->Size(),
parameter_block->LocalSize(),
- jacobians[i], 0, 0, num_residuals, parameter_block->LocalSize());
+ jacobians[i],
+ 0,
+ 0,
+ num_residuals,
+ parameter_block->LocalSize());
}
}
}
diff --git a/extern/ceres/internal/ceres/residual_block.h b/extern/ceres/internal/ceres/residual_block.h
index a2e4425b911..f28fd42857c 100644
--- a/extern/ceres/internal/ceres/residual_block.h
+++ b/extern/ceres/internal/ceres/residual_block.h
@@ -65,7 +65,7 @@ class ParameterBlock;
//
// The residual block stores pointers to but does not own the cost functions,
// loss functions, and parameter blocks.
-class ResidualBlock {
+class CERES_EXPORT_INTERNAL ResidualBlock {
public:
// Construct the residual block with the given cost/loss functions. Loss may
// be null. The index is the index of the residual block in the Program's
@@ -105,7 +105,6 @@ class ResidualBlock {
double** jacobians,
double* scratch) const;
-
const CostFunction* cost_function() const { return cost_function_; }
const LossFunction* loss_function() const { return loss_function_; }
diff --git a/extern/ceres/internal/ceres/residual_block_utils.cc b/extern/ceres/internal/ceres/residual_block_utils.cc
index 35e928bbcc1..d5b3fa1fa37 100644
--- a/extern/ceres/internal/ceres/residual_block_utils.cc
+++ b/extern/ceres/internal/ceres/residual_block_utils.cc
@@ -33,6 +33,7 @@
#include <cmath>
#include <cstddef>
#include <limits>
+
#include "ceres/array_utils.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/port.h"
@@ -75,6 +76,7 @@ string EvaluationToString(const ResidualBlock& block,
const int num_residuals = block.NumResiduals();
string result = "";
+ // clang-format off
StringAppendF(&result,
"Residual Block size: %d parameter blocks x %d residuals\n\n",
num_parameter_blocks, num_residuals);
@@ -85,6 +87,7 @@ string EvaluationToString(const ResidualBlock& block,
"of the Jacobian/residual array was requested but was not written to by user code, it is \n" // NOLINT
"indicated by 'Uninitialized'. This is an error. Residuals or Jacobian values evaluating \n" // NOLINT
"to Inf or NaN is also an error. \n\n"; // NOLINT
+ // clang-format on
string space = "Residuals: ";
result += space;
@@ -102,8 +105,8 @@ string EvaluationToString(const ResidualBlock& block,
for (int k = 0; k < num_residuals; ++k) {
AppendArrayToString(1,
(jacobians != NULL && jacobians[i] != NULL)
- ? jacobians[i] + k * parameter_block_size + j
- : NULL,
+ ? jacobians[i] + k * parameter_block_size + j
+ : NULL,
&result);
}
StringAppendF(&result, "\n");
diff --git a/extern/ceres/internal/ceres/residual_block_utils.h b/extern/ceres/internal/ceres/residual_block_utils.h
index 627337f743c..41ae81abc99 100644
--- a/extern/ceres/internal/ceres/residual_block_utils.h
+++ b/extern/ceres/internal/ceres/residual_block_utils.h
@@ -44,6 +44,7 @@
#define CERES_INTERNAL_RESIDUAL_BLOCK_UTILS_H_
#include <string>
+
#include "ceres/internal/port.h"
namespace ceres {
diff --git a/extern/ceres/internal/ceres/schur_complement_solver.cc b/extern/ceres/internal/ceres/schur_complement_solver.cc
index 0083300b036..65e7854f9e5 100644
--- a/extern/ceres/internal/ceres/schur_complement_solver.cc
+++ b/extern/ceres/internal/ceres/schur_complement_solver.cc
@@ -139,10 +139,8 @@ LinearSolver::Summary SchurComplementSolver::SolveImpl(
//
// TODO(sameeragarwal): A more scalable template specialization
// mechanism that does not cause binary bloat.
- if (options_.row_block_size == 2 &&
- options_.e_block_size == 3 &&
- options_.f_block_size == 6 &&
- num_f_blocks == 1) {
+ if (options_.row_block_size == 2 && options_.e_block_size == 3 &&
+ options_.f_block_size == 6 && num_f_blocks == 1) {
eliminator_.reset(new SchurEliminatorForOneFBlock<2, 3, 6>);
} else {
eliminator_.reset(SchurEliminatorBase::Create(options_));
diff --git a/extern/ceres/internal/ceres/schur_complement_solver.h b/extern/ceres/internal/ceres/schur_complement_solver.h
index 87f04785794..3bfa22f22e4 100644
--- a/extern/ceres/internal/ceres/schur_complement_solver.h
+++ b/extern/ceres/internal/ceres/schur_complement_solver.h
@@ -46,8 +46,8 @@
#include "ceres/types.h"
#ifdef CERES_USE_EIGEN_SPARSE
-#include "Eigen/SparseCholesky"
#include "Eigen/OrderingMethods"
+#include "Eigen/SparseCholesky"
#endif
namespace ceres {
@@ -107,7 +107,8 @@ class SparseCholesky;
// set to DENSE_SCHUR and SPARSE_SCHUR
// respectively. LinearSolver::Options::elimination_groups[0] should
// be at least 1.
-class SchurComplementSolver : public BlockSparseMatrixSolver {
+class CERES_EXPORT_INTERNAL SchurComplementSolver
+ : public BlockSparseMatrixSolver {
public:
explicit SchurComplementSolver(const LinearSolver::Options& options)
: options_(options) {
@@ -179,8 +180,7 @@ class SparseSchurComplementSolver : public SchurComplementSolver {
const LinearSolver::PerSolveOptions& per_solve_options,
double* solution) final;
LinearSolver::Summary SolveReducedLinearSystemUsingConjugateGradients(
- const LinearSolver::PerSolveOptions& per_solve_options,
- double* solution);
+ const LinearSolver::PerSolveOptions& per_solve_options, double* solution);
// Size of the blocks in the Schur complement.
std::vector<int> blocks_;
diff --git a/extern/ceres/internal/ceres/schur_eliminator.cc b/extern/ceres/internal/ceres/schur_eliminator.cc
index bc8ced4bc7c..613ae9558e9 100644
--- a/extern/ceres/internal/ceres/schur_eliminator.cc
+++ b/extern/ceres/internal/ceres/schur_eliminator.cc
@@ -41,120 +41,119 @@
#include "ceres/linear_solver.h"
#include "ceres/schur_eliminator.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
-SchurEliminatorBase*
-SchurEliminatorBase::Create(const LinearSolver::Options& options) {
+SchurEliminatorBase* SchurEliminatorBase::Create(
+ const LinearSolver::Options& options) {
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
- if ((options.row_block_size == 2) &&
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 2) &&
(options.f_block_size == 2)) {
- return new SchurEliminator<2, 2, 2>(options);
- }
- if ((options.row_block_size == 2) &&
+ return new SchurEliminator<2, 2, 2>(options);
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 2) &&
(options.f_block_size == 3)) {
- return new SchurEliminator<2, 2, 3>(options);
- }
- if ((options.row_block_size == 2) &&
+ return new SchurEliminator<2, 2, 3>(options);
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 2) &&
(options.f_block_size == 4)) {
- return new SchurEliminator<2, 2, 4>(options);
- }
- if ((options.row_block_size == 2) &&
+ return new SchurEliminator<2, 2, 4>(options);
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 2)) {
- return new SchurEliminator<2, 2, Eigen::Dynamic>(options);
- }
- if ((options.row_block_size == 2) &&
+ return new SchurEliminator<2, 2, Eigen::Dynamic>(options);
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 3) &&
(options.f_block_size == 3)) {
- return new SchurEliminator<2, 3, 3>(options);
- }
- if ((options.row_block_size == 2) &&
+ return new SchurEliminator<2, 3, 3>(options);
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 3) &&
(options.f_block_size == 4)) {
- return new SchurEliminator<2, 3, 4>(options);
- }
- if ((options.row_block_size == 2) &&
+ return new SchurEliminator<2, 3, 4>(options);
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 3) &&
(options.f_block_size == 6)) {
- return new SchurEliminator<2, 3, 6>(options);
- }
- if ((options.row_block_size == 2) &&
+ return new SchurEliminator<2, 3, 6>(options);
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 3) &&
(options.f_block_size == 9)) {
- return new SchurEliminator<2, 3, 9>(options);
- }
- if ((options.row_block_size == 2) &&
+ return new SchurEliminator<2, 3, 9>(options);
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 3)) {
- return new SchurEliminator<2, 3, Eigen::Dynamic>(options);
- }
- if ((options.row_block_size == 2) &&
+ return new SchurEliminator<2, 3, Eigen::Dynamic>(options);
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 4) &&
(options.f_block_size == 3)) {
- return new SchurEliminator<2, 4, 3>(options);
- }
- if ((options.row_block_size == 2) &&
+ return new SchurEliminator<2, 4, 3>(options);
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 4) &&
(options.f_block_size == 4)) {
- return new SchurEliminator<2, 4, 4>(options);
- }
- if ((options.row_block_size == 2) &&
+ return new SchurEliminator<2, 4, 4>(options);
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 4) &&
(options.f_block_size == 6)) {
- return new SchurEliminator<2, 4, 6>(options);
- }
- if ((options.row_block_size == 2) &&
+ return new SchurEliminator<2, 4, 6>(options);
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 4) &&
(options.f_block_size == 8)) {
- return new SchurEliminator<2, 4, 8>(options);
- }
- if ((options.row_block_size == 2) &&
+ return new SchurEliminator<2, 4, 8>(options);
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 4) &&
(options.f_block_size == 9)) {
- return new SchurEliminator<2, 4, 9>(options);
- }
- if ((options.row_block_size == 2) &&
+ return new SchurEliminator<2, 4, 9>(options);
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 4)) {
- return new SchurEliminator<2, 4, Eigen::Dynamic>(options);
- }
- if (options.row_block_size == 2){
- return new SchurEliminator<2, Eigen::Dynamic, Eigen::Dynamic>(options);
- }
- if ((options.row_block_size == 3) &&
+ return new SchurEliminator<2, 4, Eigen::Dynamic>(options);
+ }
+ if (options.row_block_size == 2) {
+ return new SchurEliminator<2, Eigen::Dynamic, Eigen::Dynamic>(options);
+ }
+ if ((options.row_block_size == 3) &&
(options.e_block_size == 3) &&
(options.f_block_size == 3)) {
- return new SchurEliminator<3, 3, 3>(options);
- }
- if ((options.row_block_size == 4) &&
+ return new SchurEliminator<3, 3, 3>(options);
+ }
+ if ((options.row_block_size == 4) &&
(options.e_block_size == 4) &&
(options.f_block_size == 2)) {
- return new SchurEliminator<4, 4, 2>(options);
- }
- if ((options.row_block_size == 4) &&
+ return new SchurEliminator<4, 4, 2>(options);
+ }
+ if ((options.row_block_size == 4) &&
(options.e_block_size == 4) &&
(options.f_block_size == 3)) {
- return new SchurEliminator<4, 4, 3>(options);
- }
- if ((options.row_block_size == 4) &&
+ return new SchurEliminator<4, 4, 3>(options);
+ }
+ if ((options.row_block_size == 4) &&
(options.e_block_size == 4) &&
(options.f_block_size == 4)) {
- return new SchurEliminator<4, 4, 4>(options);
- }
- if ((options.row_block_size == 4) &&
+ return new SchurEliminator<4, 4, 4>(options);
+ }
+ if ((options.row_block_size == 4) &&
(options.e_block_size == 4)) {
- return new SchurEliminator<4, 4, Eigen::Dynamic>(options);
- }
+ return new SchurEliminator<4, 4, Eigen::Dynamic>(options);
+ }
#endif
VLOG(1) << "Template specializations not found for <"
- << options.row_block_size << ","
- << options.e_block_size << ","
+ << options.row_block_size << "," << options.e_block_size << ","
<< options.f_block_size << ">";
- return new SchurEliminator<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic>(options);
+ return new SchurEliminator<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic>(
+ options);
}
} // namespace internal
diff --git a/extern/ceres/internal/ceres/schur_eliminator.h b/extern/ceres/internal/ceres/schur_eliminator.h
index 66fcb4d58e8..42c016ee9b0 100644
--- a/extern/ceres/internal/ceres/schur_eliminator.h
+++ b/extern/ceres/internal/ceres/schur_eliminator.h
@@ -164,7 +164,7 @@ namespace internal {
// 2008 for an example of such use].
//
// Example usage: Please see schur_complement_solver.cc
-class SchurEliminatorBase {
+class CERES_EXPORT_INTERNAL SchurEliminatorBase {
public:
virtual ~SchurEliminatorBase() {}
diff --git a/extern/ceres/internal/ceres/schur_eliminator_impl.h b/extern/ceres/internal/ceres/schur_eliminator_impl.h
index bd0881eec1e..1f0b4fa481d 100644
--- a/extern/ceres/internal/ceres/schur_eliminator_impl.h
+++ b/extern/ceres/internal/ceres/schur_eliminator_impl.h
@@ -46,7 +46,9 @@
#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 10
// This include must come before any #ifndef check on Ceres compile options.
+// clang-format off
#include "ceres/internal/port.h"
+// clang-format on
#include <algorithm>
#include <map>
@@ -152,7 +154,7 @@ void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::Init(
++chunk.size;
}
- CHECK_GT(chunk.size, 0); // This check will need to be resolved.
+ CHECK_GT(chunk.size, 0); // This check will need to be resolved.
r += chunk.size;
}
const Chunk& chunk = chunks_.back();
@@ -174,13 +176,12 @@ void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::Init(
}
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
-void
-SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
-Eliminate(const BlockSparseMatrixData& A,
- const double* b,
- const double* D,
- BlockRandomAccessMatrix* lhs,
- double* rhs) {
+void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::Eliminate(
+ const BlockSparseMatrixData& A,
+ const double* b,
+ const double* D,
+ BlockRandomAccessMatrix* lhs,
+ double* rhs) {
if (lhs->num_rows() > 0) {
lhs->SetZero();
if (rhs) {
@@ -193,27 +194,26 @@ Eliminate(const BlockSparseMatrixData& A,
// Add the diagonal to the schur complement.
if (D != NULL) {
- ParallelFor(
- context_,
- num_eliminate_blocks_,
- num_col_blocks,
- num_threads_,
- [&](int i) {
- const int block_id = i - num_eliminate_blocks_;
- int r, c, row_stride, col_stride;
- CellInfo* cell_info = lhs->GetCell(block_id, block_id, &r, &c,
- &row_stride, &col_stride);
- if (cell_info != NULL) {
- const int block_size = bs->cols[i].size;
- typename EigenTypes<Eigen::Dynamic>::ConstVectorRef diag(
- D + bs->cols[i].position, block_size);
-
- std::lock_guard<std::mutex> l(cell_info->m);
- MatrixRef m(cell_info->values, row_stride, col_stride);
- m.block(r, c, block_size, block_size).diagonal() +=
- diag.array().square().matrix();
- }
- });
+ ParallelFor(context_,
+ num_eliminate_blocks_,
+ num_col_blocks,
+ num_threads_,
+ [&](int i) {
+ const int block_id = i - num_eliminate_blocks_;
+ int r, c, row_stride, col_stride;
+ CellInfo* cell_info = lhs->GetCell(
+ block_id, block_id, &r, &c, &row_stride, &col_stride);
+ if (cell_info != NULL) {
+ const int block_size = bs->cols[i].size;
+ typename EigenTypes<Eigen::Dynamic>::ConstVectorRef diag(
+ D + bs->cols[i].position, block_size);
+
+ std::lock_guard<std::mutex> l(cell_info->m);
+ MatrixRef m(cell_info->values, row_stride, col_stride);
+ m.block(r, c, block_size, block_size).diagonal() +=
+ diag.array().square().matrix();
+ }
+ });
}
// Eliminate y blocks one chunk at a time. For each chunk, compute
@@ -242,12 +242,12 @@ Eliminate(const BlockSparseMatrixData& A,
VectorRef(buffer, buffer_size_).setZero();
- typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix
- ete(e_block_size, e_block_size);
+ typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix ete(e_block_size,
+ e_block_size);
if (D != NULL) {
- const typename EigenTypes<kEBlockSize>::ConstVectorRef
- diag(D + bs->cols[e_block_id].position, e_block_size);
+ const typename EigenTypes<kEBlockSize>::ConstVectorRef diag(
+ D + bs->cols[e_block_id].position, e_block_size);
ete = diag.array().square().matrix().asDiagonal();
} else {
ete.setZero();
@@ -299,31 +299,25 @@ Eliminate(const BlockSparseMatrixData& A,
// S -= F'E(E'E)^{-1}E'F
ChunkOuterProduct(
- thread_id, bs, inverse_ete, buffer, chunk.buffer_layout, lhs);
+ thread_id, bs, inverse_ete, buffer, chunk.buffer_layout, lhs);
});
// For rows with no e_blocks, the schur complement update reduces to
// S += F'F.
- NoEBlockRowsUpdate(A, b, uneliminated_row_begins_, lhs, rhs);
+ NoEBlockRowsUpdate(A, b, uneliminated_row_begins_, lhs, rhs);
}
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
-void
-SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
-BackSubstitute(const BlockSparseMatrixData& A,
- const double* b,
- const double* D,
- const double* z,
- double* y) {
+void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::BackSubstitute(
+ const BlockSparseMatrixData& A,
+ const double* b,
+ const double* D,
+ const double* z,
+ double* y) {
const CompressedRowBlockStructure* bs = A.block_structure();
const double* values = A.values();
- ParallelFor(
- context_,
- 0,
- int(chunks_.size()),
- num_threads_,
- [&](int i) {
+ ParallelFor(context_, 0, int(chunks_.size()), num_threads_, [&](int i) {
const Chunk& chunk = chunks_[i];
const int e_block_id = bs->rows[chunk.start].cells.front().block_id;
const int e_block_size = bs->cols[e_block_id].size;
@@ -331,11 +325,11 @@ BackSubstitute(const BlockSparseMatrixData& A,
double* y_ptr = y + bs->cols[e_block_id].position;
typename EigenTypes<kEBlockSize>::VectorRef y_block(y_ptr, e_block_size);
- typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix
- ete(e_block_size, e_block_size);
+ typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix ete(e_block_size,
+ e_block_size);
if (D != NULL) {
- const typename EigenTypes<kEBlockSize>::ConstVectorRef
- diag(D + bs->cols[e_block_id].position, e_block_size);
+ const typename EigenTypes<kEBlockSize>::ConstVectorRef diag(
+ D + bs->cols[e_block_id].position, e_block_size);
ete = diag.array().square().matrix().asDiagonal();
} else {
ete.setZero();
@@ -357,6 +351,7 @@ BackSubstitute(const BlockSparseMatrixData& A,
const int f_block_size = bs->cols[f_block_id].size;
const int r_block = f_block_id - num_eliminate_blocks_;
+ // clang-format off
MatrixVectorMultiply<kRowBlockSize, kFBlockSize, -1>(
values + row.cells[c].position, row.block.size, f_block_size,
z + lhs_row_layout_[r_block],
@@ -373,6 +368,7 @@ BackSubstitute(const BlockSparseMatrixData& A,
values + e_cell.position, row.block.size, e_block_size,
values + e_cell.position, row.block.size, e_block_size,
ete.data(), 0, 0, e_block_size, e_block_size);
+ // clang-format on
}
y_block =
@@ -384,14 +380,13 @@ BackSubstitute(const BlockSparseMatrixData& A,
//
// F'b - F'E(E'E)^(-1) E'b
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
-void
-SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
-UpdateRhs(const Chunk& chunk,
- const BlockSparseMatrixData& A,
- const double* b,
- int row_block_counter,
- const double* inverse_ete_g,
- double* rhs) {
+void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::UpdateRhs(
+ const Chunk& chunk,
+ const BlockSparseMatrixData& A,
+ const double* b,
+ int row_block_counter,
+ const double* inverse_ete_g,
+ double* rhs) {
const CompressedRowBlockStructure* bs = A.block_structure();
const double* values = A.values();
@@ -403,22 +398,26 @@ UpdateRhs(const Chunk& chunk,
const Cell& e_cell = row.cells.front();
typename EigenTypes<kRowBlockSize>::Vector sj =
- typename EigenTypes<kRowBlockSize>::ConstVectorRef
- (b + b_pos, row.block.size);
+ typename EigenTypes<kRowBlockSize>::ConstVectorRef(b + b_pos,
+ row.block.size);
+ // clang-format off
MatrixVectorMultiply<kRowBlockSize, kEBlockSize, -1>(
values + e_cell.position, row.block.size, e_block_size,
inverse_ete_g, sj.data());
+ // clang-format on
for (int c = 1; c < row.cells.size(); ++c) {
const int block_id = row.cells[c].block_id;
const int block_size = bs->cols[block_id].size;
const int block = block_id - num_eliminate_blocks_;
std::lock_guard<std::mutex> l(*rhs_locks_[block]);
+ // clang-format off
MatrixTransposeVectorMultiply<kRowBlockSize, kFBlockSize, 1>(
values + row.cells[c].position,
row.block.size, block_size,
sj.data(), rhs + lhs_row_layout_[block]);
+ // clang-format on
}
b_pos += row.block.size;
}
@@ -444,17 +443,16 @@ UpdateRhs(const Chunk& chunk,
//
// and the gradient of the e_block, E'b.
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
-void
-SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
-ChunkDiagonalBlockAndGradient(
- const Chunk& chunk,
- const BlockSparseMatrixData& A,
- const double* b,
- int row_block_counter,
- typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix* ete,
- double* g,
- double* buffer,
- BlockRandomAccessMatrix* lhs) {
+void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
+ ChunkDiagonalBlockAndGradient(
+ const Chunk& chunk,
+ const BlockSparseMatrixData& A,
+ const double* b,
+ int row_block_counter,
+ typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix* ete,
+ double* g,
+ double* buffer,
+ BlockRandomAccessMatrix* lhs) {
const CompressedRowBlockStructure* bs = A.block_structure();
const double* values = A.values();
@@ -474,18 +472,22 @@ ChunkDiagonalBlockAndGradient(
// Extract the e_block, ETE += E_i' E_i
const Cell& e_cell = row.cells.front();
+ // clang-format off
MatrixTransposeMatrixMultiply
<kRowBlockSize, kEBlockSize, kRowBlockSize, kEBlockSize, 1>(
values + e_cell.position, row.block.size, e_block_size,
values + e_cell.position, row.block.size, e_block_size,
ete->data(), 0, 0, e_block_size, e_block_size);
+ // clang-format on
if (b) {
// g += E_i' b_i
+ // clang-format off
MatrixTransposeVectorMultiply<kRowBlockSize, kEBlockSize, 1>(
values + e_cell.position, row.block.size, e_block_size,
b + b_pos,
g);
+ // clang-format on
}
// buffer = E'F. This computation is done by iterating over the
@@ -493,13 +495,14 @@ ChunkDiagonalBlockAndGradient(
for (int c = 1; c < row.cells.size(); ++c) {
const int f_block_id = row.cells[c].block_id;
const int f_block_size = bs->cols[f_block_id].size;
- double* buffer_ptr =
- buffer + FindOrDie(chunk.buffer_layout, f_block_id);
+ double* buffer_ptr = buffer + FindOrDie(chunk.buffer_layout, f_block_id);
+ // clang-format off
MatrixTransposeMatrixMultiply
<kRowBlockSize, kEBlockSize, kRowBlockSize, kFBlockSize, 1>(
values + e_cell.position, row.block.size, e_block_size,
values + row.cells[c].position, row.block.size, f_block_size,
buffer_ptr, 0, 0, e_block_size, f_block_size);
+ // clang-format on
}
b_pos += row.block.size;
}
@@ -510,14 +513,13 @@ ChunkDiagonalBlockAndGradient(
//
// S -= F'E(E'E)^{-1}E'F.
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
-void
-SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
-ChunkOuterProduct(int thread_id,
- const CompressedRowBlockStructure* bs,
- const Matrix& inverse_ete,
- const double* buffer,
- const BufferLayoutType& buffer_layout,
- BlockRandomAccessMatrix* lhs) {
+void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
+ ChunkOuterProduct(int thread_id,
+ const CompressedRowBlockStructure* bs,
+ const Matrix& inverse_ete,
+ const double* buffer,
+ const BufferLayoutType& buffer_layout,
+ BlockRandomAccessMatrix* lhs) {
// This is the most computationally expensive part of this
// code. Profiling experiments reveal that the bottleneck is not the
// computation of the right-hand matrix product, but memory
@@ -532,28 +534,31 @@ ChunkOuterProduct(int thread_id,
for (; it1 != buffer_layout.end(); ++it1) {
const int block1 = it1->first - num_eliminate_blocks_;
const int block1_size = bs->cols[it1->first].size;
+ // clang-format off
MatrixTransposeMatrixMultiply
<kEBlockSize, kFBlockSize, kEBlockSize, kEBlockSize, 0>(
buffer + it1->second, e_block_size, block1_size,
inverse_ete.data(), e_block_size, e_block_size,
b1_transpose_inverse_ete, 0, 0, block1_size, e_block_size);
+ // clang-format on
BufferLayoutType::const_iterator it2 = it1;
for (; it2 != buffer_layout.end(); ++it2) {
const int block2 = it2->first - num_eliminate_blocks_;
int r, c, row_stride, col_stride;
- CellInfo* cell_info = lhs->GetCell(block1, block2,
- &r, &c,
- &row_stride, &col_stride);
+ CellInfo* cell_info =
+ lhs->GetCell(block1, block2, &r, &c, &row_stride, &col_stride);
if (cell_info != NULL) {
const int block2_size = bs->cols[it2->first].size;
std::lock_guard<std::mutex> l(cell_info->m);
+ // clang-format off
MatrixMatrixMultiply
<kFBlockSize, kEBlockSize, kEBlockSize, kFBlockSize, -1>(
b1_transpose_inverse_ete, block1_size, e_block_size,
buffer + it2->second, e_block_size, block2_size,
cell_info->values, r, c, row_stride, col_stride);
+ // clang-format on
}
}
}
@@ -563,13 +568,12 @@ ChunkOuterProduct(int thread_id,
// += F'F. This function iterates over the rows of A with no e_block,
// and calls NoEBlockRowOuterProduct on each row.
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
-void
-SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
-NoEBlockRowsUpdate(const BlockSparseMatrixData& A,
- const double* b,
- int row_block_counter,
- BlockRandomAccessMatrix* lhs,
- double* rhs) {
+void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
+ NoEBlockRowsUpdate(const BlockSparseMatrixData& A,
+ const double* b,
+ int row_block_counter,
+ BlockRandomAccessMatrix* lhs,
+ double* rhs) {
const CompressedRowBlockStructure* bs = A.block_structure();
const double* values = A.values();
for (; row_block_counter < bs->rows.size(); ++row_block_counter) {
@@ -582,15 +586,16 @@ NoEBlockRowsUpdate(const BlockSparseMatrixData& A,
const int block_id = row.cells[c].block_id;
const int block_size = bs->cols[block_id].size;
const int block = block_id - num_eliminate_blocks_;
+ // clang-format off
MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
values + row.cells[c].position, row.block.size, block_size,
b + row.block.position,
rhs + lhs_row_layout_[block]);
+ // clang-format on
}
}
}
-
// A row r of A, which has no e_blocks gets added to the Schur
// Complement as S += r r'. This function is responsible for computing
// the contribution of a single row r to the Schur complement. It is
@@ -606,11 +611,10 @@ NoEBlockRowsUpdate(const BlockSparseMatrixData& A,
// dynamic. Since the number of rows without e_blocks is small, the
// lack of templating is not an issue.
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
-void
-SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
-NoEBlockRowOuterProduct(const BlockSparseMatrixData& A,
- int row_block_index,
- BlockRandomAccessMatrix* lhs) {
+void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
+ NoEBlockRowOuterProduct(const BlockSparseMatrixData& A,
+ int row_block_index,
+ BlockRandomAccessMatrix* lhs) {
const CompressedRowBlockStructure* bs = A.block_structure();
const double* values = A.values();
@@ -621,18 +625,19 @@ NoEBlockRowOuterProduct(const BlockSparseMatrixData& A,
const int block1_size = bs->cols[row.cells[i].block_id].size;
int r, c, row_stride, col_stride;
- CellInfo* cell_info = lhs->GetCell(block1, block1,
- &r, &c,
- &row_stride, &col_stride);
+ CellInfo* cell_info =
+ lhs->GetCell(block1, block1, &r, &c, &row_stride, &col_stride);
if (cell_info != NULL) {
std::lock_guard<std::mutex> l(cell_info->m);
// This multiply currently ignores the fact that this is a
// symmetric outer product.
+ // clang-format off
MatrixTransposeMatrixMultiply
<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, 1>(
values + row.cells[i].position, row.block.size, block1_size,
values + row.cells[i].position, row.block.size, block1_size,
cell_info->values, r, c, row_stride, col_stride);
+ // clang-format on
}
for (int j = i + 1; j < row.cells.size(); ++j) {
@@ -640,17 +645,18 @@ NoEBlockRowOuterProduct(const BlockSparseMatrixData& A,
DCHECK_GE(block2, 0);
DCHECK_LT(block1, block2);
int r, c, row_stride, col_stride;
- CellInfo* cell_info = lhs->GetCell(block1, block2,
- &r, &c,
- &row_stride, &col_stride);
+ CellInfo* cell_info =
+ lhs->GetCell(block1, block2, &r, &c, &row_stride, &col_stride);
if (cell_info != NULL) {
const int block2_size = bs->cols[row.cells[j].block_id].size;
std::lock_guard<std::mutex> l(cell_info->m);
+ // clang-format off
MatrixTransposeMatrixMultiply
<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, 1>(
values + row.cells[i].position, row.block.size, block1_size,
values + row.cells[j].position, row.block.size, block2_size,
cell_info->values, r, c, row_stride, col_stride);
+ // clang-format on
}
}
}
@@ -660,11 +666,10 @@ NoEBlockRowOuterProduct(const BlockSparseMatrixData& A,
// function has the same structure as NoEBlockRowOuterProduct, except
// that this function uses the template parameters.
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
-void
-SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
-EBlockRowOuterProduct(const BlockSparseMatrixData& A,
- int row_block_index,
- BlockRandomAccessMatrix* lhs) {
+void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
+ EBlockRowOuterProduct(const BlockSparseMatrixData& A,
+ int row_block_index,
+ BlockRandomAccessMatrix* lhs) {
const CompressedRowBlockStructure* bs = A.block_structure();
const double* values = A.values();
@@ -675,17 +680,18 @@ EBlockRowOuterProduct(const BlockSparseMatrixData& A,
const int block1_size = bs->cols[row.cells[i].block_id].size;
int r, c, row_stride, col_stride;
- CellInfo* cell_info = lhs->GetCell(block1, block1,
- &r, &c,
- &row_stride, &col_stride);
+ CellInfo* cell_info =
+ lhs->GetCell(block1, block1, &r, &c, &row_stride, &col_stride);
if (cell_info != NULL) {
std::lock_guard<std::mutex> l(cell_info->m);
// block += b1.transpose() * b1;
+ // clang-format off
MatrixTransposeMatrixMultiply
<kRowBlockSize, kFBlockSize, kRowBlockSize, kFBlockSize, 1>(
values + row.cells[i].position, row.block.size, block1_size,
values + row.cells[i].position, row.block.size, block1_size,
cell_info->values, r, c, row_stride, col_stride);
+ // clang-format on
}
for (int j = i + 1; j < row.cells.size(); ++j) {
@@ -694,17 +700,18 @@ EBlockRowOuterProduct(const BlockSparseMatrixData& A,
DCHECK_LT(block1, block2);
const int block2_size = bs->cols[row.cells[j].block_id].size;
int r, c, row_stride, col_stride;
- CellInfo* cell_info = lhs->GetCell(block1, block2,
- &r, &c,
- &row_stride, &col_stride);
+ CellInfo* cell_info =
+ lhs->GetCell(block1, block2, &r, &c, &row_stride, &col_stride);
if (cell_info != NULL) {
// block += b1.transpose() * b2;
std::lock_guard<std::mutex> l(cell_info->m);
+ // clang-format off
MatrixTransposeMatrixMultiply
<kRowBlockSize, kFBlockSize, kRowBlockSize, kFBlockSize, 1>(
values + row.cells[i].position, row.block.size, block1_size,
values + row.cells[j].position, row.block.size, block2_size,
cell_info->values, r, c, row_stride, col_stride);
+ // clang-format on
}
}
}
diff --git a/extern/ceres/internal/ceres/schur_eliminator_template.py b/extern/ceres/internal/ceres/schur_eliminator_template.py
index 2f38cf5ad8f..50515956e89 100644
--- a/extern/ceres/internal/ceres/schur_eliminator_template.py
+++ b/extern/ceres/internal/ceres/schur_eliminator_template.py
@@ -93,9 +93,7 @@ HEADER = """// Ceres Solver - A fast non-linear least squares minimizer
"""
DYNAMIC_FILE = """
-
#include "ceres/schur_eliminator_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
@@ -113,7 +111,6 @@ SPECIALIZATION_FILE = """
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
#include "ceres/schur_eliminator_impl.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
@@ -129,25 +126,24 @@ template class SchurEliminator<%s, %s, %s>;
FACTORY_FILE_HEADER = """
#include "ceres/linear_solver.h"
#include "ceres/schur_eliminator.h"
-#include "ceres/internal/eigen.h"
namespace ceres {
namespace internal {
-SchurEliminatorBase*
-SchurEliminatorBase::Create(const LinearSolver::Options& options) {
+SchurEliminatorBase* SchurEliminatorBase::Create(
+ const LinearSolver::Options& options) {
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
"""
-FACTORY = """ return new SchurEliminator<%s, %s, %s>(options);"""
+FACTORY = """ return new SchurEliminator<%s, %s, %s>(options);"""
FACTORY_FOOTER = """
#endif
VLOG(1) << "Template specializations not found for <"
- << options.row_block_size << ","
- << options.e_block_size << ","
+ << options.row_block_size << "," << options.e_block_size << ","
<< options.f_block_size << ">";
- return new SchurEliminator<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic>(options);
+ return new SchurEliminator<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic>(
+ options);
}
} // namespace internal
diff --git a/extern/ceres/internal/ceres/schur_templates.cc b/extern/ceres/internal/ceres/schur_templates.cc
index 01528619b1b..bcf0d14902d 100644
--- a/extern/ceres/internal/ceres/schur_templates.cc
+++ b/extern/ceres/internal/ceres/schur_templates.cc
@@ -56,168 +56,168 @@ void GetBestSchurTemplateSpecialization(int* row_block_size,
*e_block_size = Eigen::Dynamic;
*f_block_size = Eigen::Dynamic;
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
- if ((options.row_block_size == 2) &&
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 2) &&
(options.f_block_size == 2)) {
- *row_block_size = 2;
- *e_block_size = 2;
- *f_block_size = 2;
- return;
- }
- if ((options.row_block_size == 2) &&
+ *row_block_size = 2;
+ *e_block_size = 2;
+ *f_block_size = 2;
+ return;
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 2) &&
(options.f_block_size == 3)) {
- *row_block_size = 2;
- *e_block_size = 2;
- *f_block_size = 3;
- return;
- }
- if ((options.row_block_size == 2) &&
+ *row_block_size = 2;
+ *e_block_size = 2;
+ *f_block_size = 3;
+ return;
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 2) &&
(options.f_block_size == 4)) {
- *row_block_size = 2;
- *e_block_size = 2;
- *f_block_size = 4;
- return;
- }
- if ((options.row_block_size == 2) &&
+ *row_block_size = 2;
+ *e_block_size = 2;
+ *f_block_size = 4;
+ return;
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 2)) {
- *row_block_size = 2;
- *e_block_size = 2;
- *f_block_size = Eigen::Dynamic;
- return;
- }
- if ((options.row_block_size == 2) &&
+ *row_block_size = 2;
+ *e_block_size = 2;
+ *f_block_size = Eigen::Dynamic;
+ return;
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 3) &&
(options.f_block_size == 3)) {
- *row_block_size = 2;
- *e_block_size = 3;
- *f_block_size = 3;
- return;
- }
- if ((options.row_block_size == 2) &&
+ *row_block_size = 2;
+ *e_block_size = 3;
+ *f_block_size = 3;
+ return;
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 3) &&
(options.f_block_size == 4)) {
- *row_block_size = 2;
- *e_block_size = 3;
- *f_block_size = 4;
- return;
- }
- if ((options.row_block_size == 2) &&
+ *row_block_size = 2;
+ *e_block_size = 3;
+ *f_block_size = 4;
+ return;
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 3) &&
(options.f_block_size == 6)) {
- *row_block_size = 2;
- *e_block_size = 3;
- *f_block_size = 6;
- return;
- }
- if ((options.row_block_size == 2) &&
+ *row_block_size = 2;
+ *e_block_size = 3;
+ *f_block_size = 6;
+ return;
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 3) &&
(options.f_block_size == 9)) {
- *row_block_size = 2;
- *e_block_size = 3;
- *f_block_size = 9;
- return;
- }
- if ((options.row_block_size == 2) &&
+ *row_block_size = 2;
+ *e_block_size = 3;
+ *f_block_size = 9;
+ return;
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 3)) {
- *row_block_size = 2;
- *e_block_size = 3;
- *f_block_size = Eigen::Dynamic;
- return;
- }
- if ((options.row_block_size == 2) &&
+ *row_block_size = 2;
+ *e_block_size = 3;
+ *f_block_size = Eigen::Dynamic;
+ return;
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 4) &&
(options.f_block_size == 3)) {
- *row_block_size = 2;
- *e_block_size = 4;
- *f_block_size = 3;
- return;
- }
- if ((options.row_block_size == 2) &&
+ *row_block_size = 2;
+ *e_block_size = 4;
+ *f_block_size = 3;
+ return;
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 4) &&
(options.f_block_size == 4)) {
- *row_block_size = 2;
- *e_block_size = 4;
- *f_block_size = 4;
- return;
- }
- if ((options.row_block_size == 2) &&
+ *row_block_size = 2;
+ *e_block_size = 4;
+ *f_block_size = 4;
+ return;
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 4) &&
(options.f_block_size == 6)) {
- *row_block_size = 2;
- *e_block_size = 4;
- *f_block_size = 6;
- return;
- }
- if ((options.row_block_size == 2) &&
+ *row_block_size = 2;
+ *e_block_size = 4;
+ *f_block_size = 6;
+ return;
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 4) &&
(options.f_block_size == 8)) {
- *row_block_size = 2;
- *e_block_size = 4;
- *f_block_size = 8;
- return;
- }
- if ((options.row_block_size == 2) &&
+ *row_block_size = 2;
+ *e_block_size = 4;
+ *f_block_size = 8;
+ return;
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 4) &&
(options.f_block_size == 9)) {
- *row_block_size = 2;
- *e_block_size = 4;
- *f_block_size = 9;
- return;
- }
- if ((options.row_block_size == 2) &&
+ *row_block_size = 2;
+ *e_block_size = 4;
+ *f_block_size = 9;
+ return;
+ }
+ if ((options.row_block_size == 2) &&
(options.e_block_size == 4)) {
- *row_block_size = 2;
- *e_block_size = 4;
- *f_block_size = Eigen::Dynamic;
- return;
- }
- if (options.row_block_size == 2){
- *row_block_size = 2;
- *e_block_size = Eigen::Dynamic;
- *f_block_size = Eigen::Dynamic;
- return;
- }
- if ((options.row_block_size == 3) &&
+ *row_block_size = 2;
+ *e_block_size = 4;
+ *f_block_size = Eigen::Dynamic;
+ return;
+ }
+ if (options.row_block_size == 2) {
+ *row_block_size = 2;
+ *e_block_size = Eigen::Dynamic;
+ *f_block_size = Eigen::Dynamic;
+ return;
+ }
+ if ((options.row_block_size == 3) &&
(options.e_block_size == 3) &&
(options.f_block_size == 3)) {
- *row_block_size = 3;
- *e_block_size = 3;
- *f_block_size = 3;
- return;
- }
- if ((options.row_block_size == 4) &&
+ *row_block_size = 3;
+ *e_block_size = 3;
+ *f_block_size = 3;
+ return;
+ }
+ if ((options.row_block_size == 4) &&
(options.e_block_size == 4) &&
(options.f_block_size == 2)) {
- *row_block_size = 4;
- *e_block_size = 4;
- *f_block_size = 2;
- return;
- }
- if ((options.row_block_size == 4) &&
+ *row_block_size = 4;
+ *e_block_size = 4;
+ *f_block_size = 2;
+ return;
+ }
+ if ((options.row_block_size == 4) &&
(options.e_block_size == 4) &&
(options.f_block_size == 3)) {
- *row_block_size = 4;
- *e_block_size = 4;
- *f_block_size = 3;
- return;
- }
- if ((options.row_block_size == 4) &&
+ *row_block_size = 4;
+ *e_block_size = 4;
+ *f_block_size = 3;
+ return;
+ }
+ if ((options.row_block_size == 4) &&
(options.e_block_size == 4) &&
(options.f_block_size == 4)) {
- *row_block_size = 4;
- *e_block_size = 4;
- *f_block_size = 4;
- return;
- }
- if ((options.row_block_size == 4) &&
+ *row_block_size = 4;
+ *e_block_size = 4;
+ *f_block_size = 4;
+ return;
+ }
+ if ((options.row_block_size == 4) &&
(options.e_block_size == 4)) {
- *row_block_size = 4;
- *e_block_size = 4;
- *f_block_size = Eigen::Dynamic;
- return;
- }
+ *row_block_size = 4;
+ *e_block_size = 4;
+ *f_block_size = Eigen::Dynamic;
+ return;
+ }
#endif
return;
diff --git a/extern/ceres/internal/ceres/scratch_evaluate_preparer.cc b/extern/ceres/internal/ceres/scratch_evaluate_preparer.cc
index f01ef11c26f..9905b220fbf 100644
--- a/extern/ceres/internal/ceres/scratch_evaluate_preparer.cc
+++ b/extern/ceres/internal/ceres/scratch_evaluate_preparer.cc
@@ -37,9 +37,8 @@
namespace ceres {
namespace internal {
-ScratchEvaluatePreparer* ScratchEvaluatePreparer::Create(
- const Program &program,
- int num_threads) {
+ScratchEvaluatePreparer* ScratchEvaluatePreparer::Create(const Program& program,
+ int num_threads) {
ScratchEvaluatePreparer* preparers = new ScratchEvaluatePreparer[num_threads];
int max_derivatives_per_residual_block =
program.MaxDerivativesPerResidualBlock();
@@ -50,8 +49,7 @@ ScratchEvaluatePreparer* ScratchEvaluatePreparer::Create(
}
void ScratchEvaluatePreparer::Init(int max_derivatives_per_residual_block) {
- jacobian_scratch_.reset(
- new double[max_derivatives_per_residual_block]);
+ jacobian_scratch_.reset(new double[max_derivatives_per_residual_block]);
}
// Point the jacobian blocks into the scratch area of this evaluate preparer.
diff --git a/extern/ceres/internal/ceres/scratch_evaluate_preparer.h b/extern/ceres/internal/ceres/scratch_evaluate_preparer.h
index c8d9b937b47..2d2745d6269 100644
--- a/extern/ceres/internal/ceres/scratch_evaluate_preparer.h
+++ b/extern/ceres/internal/ceres/scratch_evaluate_preparer.h
@@ -47,7 +47,7 @@ class SparseMatrix;
class ScratchEvaluatePreparer {
public:
// Create num_threads ScratchEvaluatePreparers.
- static ScratchEvaluatePreparer* Create(const Program &program,
+ static ScratchEvaluatePreparer* Create(const Program& program,
int num_threads);
// EvaluatePreparer interface
diff --git a/extern/ceres/internal/ceres/single_linkage_clustering.cc b/extern/ceres/internal/ceres/single_linkage_clustering.cc
index 394492cdf23..0e7813140da 100644
--- a/extern/ceres/internal/ceres/single_linkage_clustering.cc
+++ b/extern/ceres/internal/ceres/single_linkage_clustering.cc
@@ -30,8 +30,9 @@
#include "ceres/single_linkage_clustering.h"
-#include <unordered_set>
#include <unordered_map>
+#include <unordered_set>
+
#include "ceres/graph.h"
#include "ceres/graph_algorithms.h"
diff --git a/extern/ceres/internal/ceres/single_linkage_clustering.h b/extern/ceres/internal/ceres/single_linkage_clustering.h
index ccd6f8ea37d..e891a9eec0a 100644
--- a/extern/ceres/internal/ceres/single_linkage_clustering.h
+++ b/extern/ceres/internal/ceres/single_linkage_clustering.h
@@ -32,7 +32,9 @@
#define CERES_INTERNAL_SINGLE_LINKAGE_CLUSTERING_H_
#include <unordered_map>
+
#include "ceres/graph.h"
+#include "ceres/internal/port.h"
namespace ceres {
namespace internal {
@@ -53,10 +55,10 @@ struct SingleLinkageClusteringOptions {
//
// The return value of this function is the number of clusters
// identified by the algorithm.
-int ComputeSingleLinkageClustering(
- const SingleLinkageClusteringOptions& options,
- const WeightedGraph<int>& graph,
- std::unordered_map<int, int>* membership);
+int CERES_EXPORT_INTERNAL
+ComputeSingleLinkageClustering(const SingleLinkageClusteringOptions& options,
+ const WeightedGraph<int>& graph,
+ std::unordered_map<int, int>* membership);
} // namespace internal
} // namespace ceres
diff --git a/extern/ceres/internal/ceres/small_blas.h b/extern/ceres/internal/ceres/small_blas.h
index 81c58722d5b..4ee9229f35f 100644
--- a/extern/ceres/internal/ceres/small_blas.h
+++ b/extern/ceres/internal/ceres/small_blas.h
@@ -35,8 +35,8 @@
#ifndef CERES_INTERNAL_SMALL_BLAS_H_
#define CERES_INTERNAL_SMALL_BLAS_H_
-#include "ceres/internal/port.h"
#include "ceres/internal/eigen.h"
+#include "ceres/internal/port.h"
#include "glog/logging.h"
#include "small_blas_generic.h"
@@ -46,7 +46,7 @@ namespace internal {
// The following three macros are used to share code and reduce
// template junk across the various GEMM variants.
#define CERES_GEMM_BEGIN(name) \
- template<int kRowA, int kColA, int kRowB, int kColB, int kOperation> \
+ template <int kRowA, int kColA, int kRowB, int kColB, int kOperation> \
inline void name(const double* A, \
const int num_row_a, \
const int num_col_a, \
@@ -59,56 +59,58 @@ namespace internal {
const int row_stride_c, \
const int col_stride_c)
-#define CERES_GEMM_NAIVE_HEADER \
- DCHECK_GT(num_row_a, 0); \
- DCHECK_GT(num_col_a, 0); \
- DCHECK_GT(num_row_b, 0); \
- DCHECK_GT(num_col_b, 0); \
- DCHECK_GE(start_row_c, 0); \
- DCHECK_GE(start_col_c, 0); \
- DCHECK_GT(row_stride_c, 0); \
- DCHECK_GT(col_stride_c, 0); \
- DCHECK((kRowA == Eigen::Dynamic) || (kRowA == num_row_a)); \
- DCHECK((kColA == Eigen::Dynamic) || (kColA == num_col_a)); \
- DCHECK((kRowB == Eigen::Dynamic) || (kRowB == num_row_b)); \
- DCHECK((kColB == Eigen::Dynamic) || (kColB == num_col_b)); \
- const int NUM_ROW_A = (kRowA != Eigen::Dynamic ? kRowA : num_row_a); \
- const int NUM_COL_A = (kColA != Eigen::Dynamic ? kColA : num_col_a); \
- const int NUM_ROW_B = (kRowB != Eigen::Dynamic ? kRowB : num_row_b); \
+#define CERES_GEMM_NAIVE_HEADER \
+ DCHECK_GT(num_row_a, 0); \
+ DCHECK_GT(num_col_a, 0); \
+ DCHECK_GT(num_row_b, 0); \
+ DCHECK_GT(num_col_b, 0); \
+ DCHECK_GE(start_row_c, 0); \
+ DCHECK_GE(start_col_c, 0); \
+ DCHECK_GT(row_stride_c, 0); \
+ DCHECK_GT(col_stride_c, 0); \
+ DCHECK((kRowA == Eigen::Dynamic) || (kRowA == num_row_a)); \
+ DCHECK((kColA == Eigen::Dynamic) || (kColA == num_col_a)); \
+ DCHECK((kRowB == Eigen::Dynamic) || (kRowB == num_row_b)); \
+ DCHECK((kColB == Eigen::Dynamic) || (kColB == num_col_b)); \
+ const int NUM_ROW_A = (kRowA != Eigen::Dynamic ? kRowA : num_row_a); \
+ const int NUM_COL_A = (kColA != Eigen::Dynamic ? kColA : num_col_a); \
+ const int NUM_ROW_B = (kRowB != Eigen::Dynamic ? kRowB : num_row_b); \
const int NUM_COL_B = (kColB != Eigen::Dynamic ? kColB : num_col_b);
-#define CERES_GEMM_EIGEN_HEADER \
- const typename EigenTypes<kRowA, kColA>::ConstMatrixRef \
- Aref(A, num_row_a, num_col_a); \
- const typename EigenTypes<kRowB, kColB>::ConstMatrixRef \
- Bref(B, num_row_b, num_col_b); \
- MatrixRef Cref(C, row_stride_c, col_stride_c); \
+#define CERES_GEMM_EIGEN_HEADER \
+ const typename EigenTypes<kRowA, kColA>::ConstMatrixRef Aref( \
+ A, num_row_a, num_col_a); \
+ const typename EigenTypes<kRowB, kColB>::ConstMatrixRef Bref( \
+ B, num_row_b, num_col_b); \
+ MatrixRef Cref(C, row_stride_c, col_stride_c);
+// clang-format off
#define CERES_CALL_GEMM(name) \
name<kRowA, kColA, kRowB, kColB, kOperation>( \
A, num_row_a, num_col_a, \
B, num_row_b, num_col_b, \
C, start_row_c, start_col_c, row_stride_c, col_stride_c);
-
-#define CERES_GEMM_STORE_SINGLE(p, index, value) \
- if (kOperation > 0) { \
- p[index] += value; \
- } else if (kOperation < 0) { \
- p[index] -= value; \
- } else { \
- p[index] = value; \
+// clang-format on
+
+#define CERES_GEMM_STORE_SINGLE(p, index, value) \
+ if (kOperation > 0) { \
+ p[index] += value; \
+ } else if (kOperation < 0) { \
+ p[index] -= value; \
+ } else { \
+ p[index] = value; \
}
-#define CERES_GEMM_STORE_PAIR(p, index, v1, v2) \
- if (kOperation > 0) { \
- p[index] += v1; \
- p[index + 1] += v2; \
- } else if (kOperation < 0) { \
- p[index] -= v1; \
- p[index + 1] -= v2; \
- } else { \
- p[index] = v1; \
- p[index + 1] = v2; \
+#define CERES_GEMM_STORE_PAIR(p, index, v1, v2) \
+ if (kOperation > 0) { \
+ p[index] += v1; \
+ p[index + 1] += v2; \
+ } else if (kOperation < 0) { \
+ p[index] -= v1; \
+ p[index + 1] -= v2; \
+ } else { \
+ p[index] = v1; \
+ p[index + 1] = v2; \
}
// For the matrix-matrix functions below, there are three variants for
@@ -161,8 +163,8 @@ namespace internal {
//
CERES_GEMM_BEGIN(MatrixMatrixMultiplyEigen) {
CERES_GEMM_EIGEN_HEADER
- Eigen::Block<MatrixRef, kRowA, kColB>
- block(Cref, start_row_c, start_col_c, num_row_a, num_col_b);
+ Eigen::Block<MatrixRef, kRowA, kColB> block(
+ Cref, start_row_c, start_col_c, num_row_a, num_col_b);
if (kOperation > 0) {
block.noalias() += Aref * Bref;
@@ -208,7 +210,7 @@ CERES_GEMM_BEGIN(MatrixMatrixMultiplyNaive) {
// Process the couple columns in remainder if present.
if (NUM_COL_C & 2) {
- int col = NUM_COL_C & (int)(~(span - 1)) ;
+ int col = NUM_COL_C & (int)(~(span - 1));
const double* pa = &A[0];
for (int row = 0; row < NUM_ROW_C; ++row, pa += NUM_COL_A) {
const double* pb = &B[col];
@@ -234,11 +236,12 @@ CERES_GEMM_BEGIN(MatrixMatrixMultiplyNaive) {
for (int col = 0; col < col_m; col += span) {
for (int row = 0; row < NUM_ROW_C; ++row) {
const int index = (row + start_row_c) * col_stride_c + start_col_c + col;
+ // clang-format off
MMM_mat1x4(NUM_COL_A, &A[row * NUM_COL_A],
&B[col], NUM_COL_B, &C[index], kOperation);
+ // clang-format on
}
}
-
}
CERES_GEMM_BEGIN(MatrixMatrixMultiply) {
@@ -261,9 +264,11 @@ CERES_GEMM_BEGIN(MatrixMatrixMultiply) {
CERES_GEMM_BEGIN(MatrixTransposeMatrixMultiplyEigen) {
CERES_GEMM_EIGEN_HEADER
+ // clang-format off
Eigen::Block<MatrixRef, kColA, kColB> block(Cref,
start_row_c, start_col_c,
num_col_a, num_col_b);
+ // clang-format on
if (kOperation > 0) {
block.noalias() += Aref.transpose() * Bref;
} else if (kOperation < 0) {
@@ -310,7 +315,7 @@ CERES_GEMM_BEGIN(MatrixTransposeMatrixMultiplyNaive) {
// Process the couple columns in remainder if present.
if (NUM_COL_C & 2) {
- int col = NUM_COL_C & (int)(~(span - 1)) ;
+ int col = NUM_COL_C & (int)(~(span - 1));
for (int row = 0; row < NUM_ROW_C; ++row) {
const double* pa = &A[row];
const double* pb = &B[col];
@@ -338,11 +343,12 @@ CERES_GEMM_BEGIN(MatrixTransposeMatrixMultiplyNaive) {
for (int col = 0; col < col_m; col += span) {
for (int row = 0; row < NUM_ROW_C; ++row) {
const int index = (row + start_row_c) * col_stride_c + start_col_c + col;
+ // clang-format off
MTM_mat1x4(NUM_ROW_A, &A[row], NUM_COL_A,
&B[col], NUM_COL_B, &C[index], kOperation);
+ // clang-format on
}
}
-
}
CERES_GEMM_BEGIN(MatrixTransposeMatrixMultiply) {
@@ -376,15 +382,15 @@ CERES_GEMM_BEGIN(MatrixTransposeMatrixMultiply) {
// kOperation = 1 -> c += A' * b
// kOperation = -1 -> c -= A' * b
// kOperation = 0 -> c = A' * b
-template<int kRowA, int kColA, int kOperation>
+template <int kRowA, int kColA, int kOperation>
inline void MatrixVectorMultiply(const double* A,
const int num_row_a,
const int num_col_a,
const double* b,
double* c) {
#ifdef CERES_NO_CUSTOM_BLAS
- const typename EigenTypes<kRowA, kColA>::ConstMatrixRef
- Aref(A, num_row_a, num_col_a);
+ const typename EigenTypes<kRowA, kColA>::ConstMatrixRef Aref(
+ A, num_row_a, num_col_a);
const typename EigenTypes<kColA>::ConstVectorRef bref(b, num_col_a);
typename EigenTypes<kRowA>::VectorRef cref(c, num_row_a);
@@ -412,7 +418,7 @@ inline void MatrixVectorMultiply(const double* A,
// Process the last odd row if present.
if (NUM_ROW_A & 1) {
- int row = NUM_ROW_A - 1;
+ int row = NUM_ROW_A - 1;
const double* pa = &A[row * NUM_COL_A];
const double* pb = &b[0];
double tmp = 0.0;
@@ -450,8 +456,10 @@ inline void MatrixVectorMultiply(const double* A,
// Calculate the main part with multiples of 4.
int row_m = NUM_ROW_A & (int)(~(span - 1));
for (int row = 0; row < row_m; row += span) {
+ // clang-format off
MVM_mat4x1(NUM_COL_A, &A[row * NUM_COL_A], NUM_COL_A,
&b[0], &c[row], kOperation);
+ // clang-format on
}
#endif // CERES_NO_CUSTOM_BLAS
@@ -460,15 +468,15 @@ inline void MatrixVectorMultiply(const double* A,
// Similar to MatrixVectorMultiply, except that A is transposed, i.e.,
//
// c op A' * b;
-template<int kRowA, int kColA, int kOperation>
+template <int kRowA, int kColA, int kOperation>
inline void MatrixTransposeVectorMultiply(const double* A,
const int num_row_a,
const int num_col_a,
const double* b,
double* c) {
#ifdef CERES_NO_CUSTOM_BLAS
- const typename EigenTypes<kRowA, kColA>::ConstMatrixRef
- Aref(A, num_row_a, num_col_a);
+ const typename EigenTypes<kRowA, kColA>::ConstMatrixRef Aref(
+ A, num_row_a, num_col_a);
const typename EigenTypes<kRowA>::ConstVectorRef bref(b, num_row_a);
typename EigenTypes<kColA>::VectorRef cref(c, num_col_a);
@@ -496,7 +504,7 @@ inline void MatrixTransposeVectorMultiply(const double* A,
// Process the last odd column if present.
if (NUM_COL_A & 1) {
- int row = NUM_COL_A - 1;
+ int row = NUM_COL_A - 1;
const double* pa = &A[row];
const double* pb = &b[0];
double tmp = 0.0;
@@ -519,10 +527,12 @@ inline void MatrixTransposeVectorMultiply(const double* A,
const double* pb = &b[0];
double tmp1 = 0.0, tmp2 = 0.0;
for (int col = 0; col < NUM_ROW_A; ++col) {
+ // clang-format off
double bv = *pb++;
tmp1 += *(pa ) * bv;
tmp2 += *(pa + 1) * bv;
pa += NUM_COL_A;
+ // clang-format on
}
CERES_GEMM_STORE_PAIR(c, row, tmp1, tmp2);
@@ -535,8 +545,10 @@ inline void MatrixTransposeVectorMultiply(const double* A,
// Calculate the main part with multiples of 4.
int row_m = NUM_COL_A & (int)(~(span - 1));
for (int row = 0; row < row_m; row += span) {
+ // clang-format off
MTV_mat4x1(NUM_ROW_A, &A[row], NUM_COL_A,
&b[0], &c[row], kOperation);
+ // clang-format on
}
#endif // CERES_NO_CUSTOM_BLAS
diff --git a/extern/ceres/internal/ceres/small_blas_generic.h b/extern/ceres/internal/ceres/small_blas_generic.h
index 978c5d56a84..3f3ea424c80 100644
--- a/extern/ceres/internal/ceres/small_blas_generic.h
+++ b/extern/ceres/internal/ceres/small_blas_generic.h
@@ -39,33 +39,33 @@ namespace ceres {
namespace internal {
// The following macros are used to share code
-#define CERES_GEMM_OPT_NAIVE_HEADER \
- double c0 = 0.0; \
- double c1 = 0.0; \
- double c2 = 0.0; \
- double c3 = 0.0; \
- const double* pa = a; \
- const double* pb = b; \
- const int span = 4; \
- int col_r = col_a & (span - 1); \
+#define CERES_GEMM_OPT_NAIVE_HEADER \
+ double c0 = 0.0; \
+ double c1 = 0.0; \
+ double c2 = 0.0; \
+ double c3 = 0.0; \
+ const double* pa = a; \
+ const double* pb = b; \
+ const int span = 4; \
+ int col_r = col_a & (span - 1); \
int col_m = col_a - col_r;
-#define CERES_GEMM_OPT_STORE_MAT1X4 \
- if (kOperation > 0) { \
- *c++ += c0; \
- *c++ += c1; \
- *c++ += c2; \
- *c++ += c3; \
- } else if (kOperation < 0) { \
- *c++ -= c0; \
- *c++ -= c1; \
- *c++ -= c2; \
- *c++ -= c3; \
- } else { \
- *c++ = c0; \
- *c++ = c1; \
- *c++ = c2; \
- *c++ = c3; \
+#define CERES_GEMM_OPT_STORE_MAT1X4 \
+ if (kOperation > 0) { \
+ *c++ += c0; \
+ *c++ += c1; \
+ *c++ += c2; \
+ *c++ += c3; \
+ } else if (kOperation < 0) { \
+ *c++ -= c0; \
+ *c++ -= c1; \
+ *c++ -= c2; \
+ *c++ -= c3; \
+ } else { \
+ *c++ = c0; \
+ *c++ = c1; \
+ *c++ = c2; \
+ *c++ = c3; \
}
// Matrix-Matrix Multiplication
@@ -97,14 +97,14 @@ static inline void MMM_mat1x4(const int col_a,
double av = 0.0;
int bi = 0;
-#define CERES_GEMM_OPT_MMM_MAT1X4_MUL \
- av = pa[k]; \
- pb = b + bi; \
- c0 += av * *pb++; \
- c1 += av * *pb++; \
- c2 += av * *pb++; \
- c3 += av * *pb++; \
- bi += col_stride_b; \
+#define CERES_GEMM_OPT_MMM_MAT1X4_MUL \
+ av = pa[k]; \
+ pb = b + bi; \
+ c0 += av * *pb++; \
+ c1 += av * *pb++; \
+ c2 += av * *pb++; \
+ c3 += av * *pb++; \
+ bi += col_stride_b; \
k++;
for (int k = 0; k < col_m;) {
@@ -164,14 +164,14 @@ static inline void MTM_mat1x4(const int col_a,
int ai = 0;
int bi = 0;
-#define CERES_GEMM_OPT_MTM_MAT1X4_MUL \
- av = pa[ai]; \
- pb = b + bi; \
- c0 += av * *pb++; \
- c1 += av * *pb++; \
- c2 += av * *pb++; \
- c3 += av * *pb++; \
- ai += col_stride_a; \
+#define CERES_GEMM_OPT_MTM_MAT1X4_MUL \
+ av = pa[ai]; \
+ pb = b + bi; \
+ c0 += av * *pb++; \
+ c1 += av * *pb++; \
+ c2 += av * *pb++; \
+ c3 += av * *pb++; \
+ ai += col_stride_a; \
bi += col_stride_b;
for (int k = 0; k < col_m; k += span) {
@@ -218,14 +218,16 @@ static inline void MVM_mat4x1(const int col_a,
CERES_GEMM_OPT_NAIVE_HEADER
double bv = 0.0;
-#define CERES_GEMM_OPT_MVM_MAT4X1_MUL \
- bv = *pb; \
- c0 += *(pa ) * bv; \
- c1 += *(pa + col_stride_a ) * bv; \
- c2 += *(pa + col_stride_a * 2) * bv; \
- c3 += *(pa + col_stride_a * 3) * bv; \
- pa++; \
+ // clang-format off
+#define CERES_GEMM_OPT_MVM_MAT4X1_MUL \
+ bv = *pb; \
+ c0 += *(pa ) * bv; \
+ c1 += *(pa + col_stride_a ) * bv; \
+ c2 += *(pa + col_stride_a * 2) * bv; \
+ c3 += *(pa + col_stride_a * 3) * bv; \
+ pa++; \
pb++;
+ // clang-format on
for (int k = 0; k < col_m; k += span) {
CERES_GEMM_OPT_MVM_MAT4X1_MUL
@@ -281,14 +283,16 @@ static inline void MTV_mat4x1(const int col_a,
CERES_GEMM_OPT_NAIVE_HEADER
double bv = 0.0;
-#define CERES_GEMM_OPT_MTV_MAT4X1_MUL \
- bv = *pb; \
- c0 += *(pa ) * bv; \
- c1 += *(pa + 1) * bv; \
- c2 += *(pa + 2) * bv; \
- c3 += *(pa + 3) * bv; \
- pa += col_stride_a; \
+ // clang-format off
+#define CERES_GEMM_OPT_MTV_MAT4X1_MUL \
+ bv = *pb; \
+ c0 += *(pa ) * bv; \
+ c1 += *(pa + 1) * bv; \
+ c2 += *(pa + 2) * bv; \
+ c3 += *(pa + 3) * bv; \
+ pa += col_stride_a; \
pb++;
+ // clang-format on
for (int k = 0; k < col_m; k += span) {
CERES_GEMM_OPT_MTV_MAT4X1_MUL
diff --git a/extern/ceres/internal/ceres/solver.cc b/extern/ceres/internal/ceres/solver.cc
index 861d8d30485..dfde1221b61 100644
--- a/extern/ceres/internal/ceres/solver.cc
+++ b/extern/ceres/internal/ceres/solver.cc
@@ -56,34 +56,34 @@
namespace ceres {
namespace {
+using internal::StringAppendF;
+using internal::StringPrintf;
using std::map;
using std::string;
using std::vector;
-using internal::StringAppendF;
-using internal::StringPrintf;
-#define OPTION_OP(x, y, OP) \
- if (!(options.x OP y)) { \
- std::stringstream ss; \
- ss << "Invalid configuration. "; \
- ss << string("Solver::Options::" #x " = ") << options.x << ". "; \
- ss << "Violated constraint: "; \
- ss << string("Solver::Options::" #x " " #OP " "#y); \
- *error = ss.str(); \
- return false; \
- }
-
-#define OPTION_OP_OPTION(x, y, OP) \
- if (!(options.x OP options.y)) { \
- std::stringstream ss; \
- ss << "Invalid configuration. "; \
- ss << string("Solver::Options::" #x " = ") << options.x << ". "; \
- ss << string("Solver::Options::" #y " = ") << options.y << ". "; \
- ss << "Violated constraint: "; \
- ss << string("Solver::Options::" #x); \
- ss << string(#OP " Solver::Options::" #y "."); \
- *error = ss.str(); \
- return false; \
+#define OPTION_OP(x, y, OP) \
+ if (!(options.x OP y)) { \
+ std::stringstream ss; \
+ ss << "Invalid configuration. "; \
+ ss << string("Solver::Options::" #x " = ") << options.x << ". "; \
+ ss << "Violated constraint: "; \
+ ss << string("Solver::Options::" #x " " #OP " " #y); \
+ *error = ss.str(); \
+ return false; \
+ }
+
+#define OPTION_OP_OPTION(x, y, OP) \
+ if (!(options.x OP options.y)) { \
+ std::stringstream ss; \
+ ss << "Invalid configuration. "; \
+ ss << string("Solver::Options::" #x " = ") << options.x << ". "; \
+ ss << string("Solver::Options::" #y " = ") << options.y << ". "; \
+ ss << "Violated constraint: "; \
+ ss << string("Solver::Options::" #x); \
+ ss << string(#OP " Solver::Options::" #y "."); \
+ *error = ss.str(); \
+ return false; \
}
#define OPTION_GE(x, y) OPTION_OP(x, y, >=);
@@ -135,7 +135,8 @@ bool TrustRegionOptionsAreValid(const Solver::Options& options, string* error) {
if (options.linear_solver_type == ITERATIVE_SCHUR &&
options.use_explicit_schur_complement &&
options.preconditioner_type != SCHUR_JACOBI) {
- *error = "use_explicit_schur_complement only supports "
+ *error =
+ "use_explicit_schur_complement only supports "
"SCHUR_JACOBI as the preconditioner.";
return false;
}
@@ -174,7 +175,8 @@ bool TrustRegionOptionsAreValid(const Solver::Options& options, string* error) {
*error = StringPrintf(
"Can't use %s with "
"Solver::Options::sparse_linear_algebra_library_type = %s.",
- name, sparse_linear_algebra_library_name);
+ name,
+ sparse_linear_algebra_library_name);
return false;
} else if (!IsSparseLinearAlgebraLibraryTypeAvailable(
options.sparse_linear_algebra_library_type)) {
@@ -182,7 +184,8 @@ bool TrustRegionOptionsAreValid(const Solver::Options& options, string* error) {
"Can't use %s with "
"Solver::Options::sparse_linear_algebra_library_type = %s, "
"because support was not enabled when Ceres Solver was built.",
- name, sparse_linear_algebra_library_name);
+ name,
+ sparse_linear_algebra_library_name);
return false;
}
}
@@ -191,7 +194,8 @@ bool TrustRegionOptionsAreValid(const Solver::Options& options, string* error) {
if (options.trust_region_strategy_type == DOGLEG) {
if (options.linear_solver_type == ITERATIVE_SCHUR ||
options.linear_solver_type == CGNR) {
- *error = "DOGLEG only supports exact factorization based linear "
+ *error =
+ "DOGLEG only supports exact factorization based linear "
"solvers. If you want to use an iterative solver please "
"use LEVENBERG_MARQUARDT as the trust_region_strategy_type";
return false;
@@ -207,12 +211,13 @@ bool TrustRegionOptionsAreValid(const Solver::Options& options, string* error) {
if (options.dynamic_sparsity) {
if (options.linear_solver_type != SPARSE_NORMAL_CHOLESKY) {
- *error = "Dynamic sparsity is only supported with SPARSE_NORMAL_CHOLESKY.";
+ *error =
+ "Dynamic sparsity is only supported with SPARSE_NORMAL_CHOLESKY.";
return false;
}
if (options.sparse_linear_algebra_library_type == ACCELERATE_SPARSE) {
- *error = "ACCELERATE_SPARSE is not currently supported with dynamic "
- "sparsity.";
+ *error =
+ "ACCELERATE_SPARSE is not currently supported with dynamic sparsity.";
return false;
}
}
@@ -250,10 +255,11 @@ bool LineSearchOptionsAreValid(const Solver::Options& options, string* error) {
options.line_search_direction_type == ceres::LBFGS) &&
options.line_search_type != ceres::WOLFE) {
*error =
- string("Invalid configuration: Solver::Options::line_search_type = ")
- + string(LineSearchTypeToString(options.line_search_type))
- + string(". When using (L)BFGS, "
- "Solver::Options::line_search_type must be set to WOLFE.");
+ string("Invalid configuration: Solver::Options::line_search_type = ") +
+ string(LineSearchTypeToString(options.line_search_type)) +
+ string(
+ ". When using (L)BFGS, "
+ "Solver::Options::line_search_type must be set to WOLFE.");
return false;
}
@@ -261,17 +267,18 @@ bool LineSearchOptionsAreValid(const Solver::Options& options, string* error) {
// on max/min step size change during line search prevent bisection scaling
// from occurring. Warn only, as this is likely a user mistake, but one which
// does not prevent us from continuing.
- LOG_IF(WARNING,
- (options.line_search_interpolation_type == ceres::BISECTION &&
- (options.max_line_search_step_contraction > 0.5 ||
- options.min_line_search_step_contraction < 0.5)))
- << "Line search interpolation type is BISECTION, but specified "
- << "max_line_search_step_contraction: "
- << options.max_line_search_step_contraction << ", and "
- << "min_line_search_step_contraction: "
- << options.min_line_search_step_contraction
- << ", prevent bisection (0.5) scaling, continuing with solve regardless.";
-
+ if (options.line_search_interpolation_type == ceres::BISECTION &&
+ (options.max_line_search_step_contraction > 0.5 ||
+ options.min_line_search_step_contraction < 0.5)) {
+ LOG(WARNING)
+ << "Line search interpolation type is BISECTION, but specified "
+ << "max_line_search_step_contraction: "
+ << options.max_line_search_step_contraction << ", and "
+ << "min_line_search_step_contraction: "
+ << options.min_line_search_step_contraction
+ << ", prevent bisection (0.5) scaling, continuing with solve "
+ "regardless.";
+ }
return true;
}
@@ -298,20 +305,24 @@ void StringifyOrdering(const vector<int>& ordering, string* report) {
void SummarizeGivenProgram(const internal::Program& program,
Solver::Summary* summary) {
+ // clang-format off
summary->num_parameter_blocks = program.NumParameterBlocks();
summary->num_parameters = program.NumParameters();
summary->num_effective_parameters = program.NumEffectiveParameters();
summary->num_residual_blocks = program.NumResidualBlocks();
summary->num_residuals = program.NumResiduals();
+ // clang-format on
}
void SummarizeReducedProgram(const internal::Program& program,
Solver::Summary* summary) {
+ // clang-format off
summary->num_parameter_blocks_reduced = program.NumParameterBlocks();
summary->num_parameters_reduced = program.NumParameters();
summary->num_effective_parameters_reduced = program.NumEffectiveParameters();
summary->num_residual_blocks_reduced = program.NumResidualBlocks();
summary->num_residuals_reduced = program.NumResiduals();
+ // clang-format on
}
void PreSolveSummarize(const Solver::Options& options,
@@ -323,6 +334,7 @@ void PreSolveSummarize(const Solver::Options& options,
internal::OrderingToGroupSizes(options.inner_iteration_ordering.get(),
&(summary->inner_iteration_ordering_given));
+ // clang-format off
summary->dense_linear_algebra_library_type = options.dense_linear_algebra_library_type; // NOLINT
summary->dogleg_type = options.dogleg_type;
summary->inner_iteration_time_in_seconds = 0.0;
@@ -344,6 +356,7 @@ void PreSolveSummarize(const Solver::Options& options,
summary->sparse_linear_algebra_library_type = options.sparse_linear_algebra_library_type; // NOLINT
summary->trust_region_strategy_type = options.trust_region_strategy_type; // NOLINT
summary->visibility_clustering_type = options.visibility_clustering_type; // NOLINT
+ // clang-format on
}
void PostSolveSummarize(const internal::PreprocessedProblem& pp,
@@ -353,10 +366,12 @@ void PostSolveSummarize(const internal::PreprocessedProblem& pp,
internal::OrderingToGroupSizes(pp.options.inner_iteration_ordering.get(),
&(summary->inner_iteration_ordering_used));
+ // clang-format off
summary->inner_iterations_used = pp.inner_iteration_minimizer.get() != NULL; // NOLINT
summary->linear_solver_type_used = pp.linear_solver_options.type;
summary->num_threads_used = pp.options.num_threads;
summary->preconditioner_type_used = pp.options.preconditioner_type;
+ // clang-format on
internal::SetSummaryFinalCost(summary);
@@ -402,17 +417,19 @@ void PostSolveSummarize(const internal::PreprocessedProblem& pp,
}
}
-void Minimize(internal::PreprocessedProblem* pp,
- Solver::Summary* summary) {
- using internal::Program;
+void Minimize(internal::PreprocessedProblem* pp, Solver::Summary* summary) {
using internal::Minimizer;
+ using internal::Program;
Program* program = pp->reduced_program.get();
if (pp->reduced_program->NumParameterBlocks() == 0) {
- summary->message = "Function tolerance reached. "
+ summary->message =
+ "Function tolerance reached. "
"No non-constant parameter blocks found.";
summary->termination_type = CONVERGENCE;
- VLOG_IF(1, pp->options.logging_type != SILENT) << summary->message;
+ if (pp->options.logging_type != SILENT) {
+ VLOG(1) << summary->message;
+ }
summary->initial_cost = summary->fixed_cost;
summary->final_cost = summary->fixed_cost;
return;
@@ -421,31 +438,29 @@ void Minimize(internal::PreprocessedProblem* pp,
const Vector original_reduced_parameters = pp->reduced_parameters;
std::unique_ptr<Minimizer> minimizer(
Minimizer::Create(pp->options.minimizer_type));
- minimizer->Minimize(pp->minimizer_options,
- pp->reduced_parameters.data(),
- summary);
+ minimizer->Minimize(
+ pp->minimizer_options, pp->reduced_parameters.data(), summary);
program->StateVectorToParameterBlocks(
- summary->IsSolutionUsable()
- ? pp->reduced_parameters.data()
- : original_reduced_parameters.data());
+ summary->IsSolutionUsable() ? pp->reduced_parameters.data()
+ : original_reduced_parameters.data());
program->CopyParameterBlockStateToUserState();
}
std::string SchurStructureToString(const int row_block_size,
const int e_block_size,
const int f_block_size) {
- const std::string row =
- (row_block_size == Eigen::Dynamic)
- ? "d" : internal::StringPrintf("%d", row_block_size);
+ const std::string row = (row_block_size == Eigen::Dynamic)
+ ? "d"
+ : internal::StringPrintf("%d", row_block_size);
- const std::string e =
- (e_block_size == Eigen::Dynamic)
- ? "d" : internal::StringPrintf("%d", e_block_size);
+ const std::string e = (e_block_size == Eigen::Dynamic)
+ ? "d"
+ : internal::StringPrintf("%d", e_block_size);
- const std::string f =
- (f_block_size == Eigen::Dynamic)
- ? "d" : internal::StringPrintf("%d", f_block_size);
+ const std::string f = (f_block_size == Eigen::Dynamic)
+ ? "d"
+ : internal::StringPrintf("%d", f_block_size);
return internal::StringPrintf("%s,%s,%s", row.c_str(), e.c_str(), f.c_str());
}
@@ -503,12 +518,11 @@ void Solver::Solve(const Solver::Options& options,
Solver::Options modified_options = options;
if (options.check_gradients) {
modified_options.callbacks.push_back(&gradient_checking_callback);
- gradient_checking_problem.reset(
- CreateGradientCheckingProblemImpl(
- problem_impl,
- options.gradient_check_numeric_derivative_relative_step_size,
- options.gradient_check_relative_precision,
- &gradient_checking_callback));
+ gradient_checking_problem.reset(CreateGradientCheckingProblemImpl(
+ problem_impl,
+ options.gradient_check_numeric_derivative_relative_step_size,
+ options.gradient_check_relative_precision,
+ &gradient_checking_callback));
problem_impl = gradient_checking_problem.get();
program = problem_impl->mutable_program();
}
@@ -524,7 +538,8 @@ void Solver::Solve(const Solver::Options& options,
Preprocessor::Create(modified_options.minimizer_type));
PreprocessedProblem pp;
- const bool status = preprocessor->Preprocess(modified_options, problem_impl, &pp);
+ const bool status =
+ preprocessor->Preprocess(modified_options, problem_impl, &pp);
// We check the linear_solver_options.type rather than
// modified_options.linear_solver_type because, depending on the
@@ -538,17 +553,16 @@ void Solver::Solve(const Solver::Options& options,
int e_block_size;
int f_block_size;
DetectStructure(*static_cast<internal::BlockSparseMatrix*>(
- pp.minimizer_options.jacobian.get())
- ->block_structure(),
+ pp.minimizer_options.jacobian.get())
+ ->block_structure(),
pp.linear_solver_options.elimination_groups[0],
&row_block_size,
&e_block_size,
&f_block_size);
summary->schur_structure_given =
SchurStructureToString(row_block_size, e_block_size, f_block_size);
- internal::GetBestSchurTemplateSpecialization(&row_block_size,
- &e_block_size,
- &f_block_size);
+ internal::GetBestSchurTemplateSpecialization(
+ &row_block_size, &e_block_size, &f_block_size);
summary->schur_structure_used =
SchurStructureToString(row_block_size, e_block_size, f_block_size);
}
@@ -595,15 +609,16 @@ void Solve(const Solver::Options& options,
}
string Solver::Summary::BriefReport() const {
- return StringPrintf("Ceres Solver Report: "
- "Iterations: %d, "
- "Initial cost: %e, "
- "Final cost: %e, "
- "Termination: %s",
- num_successful_steps + num_unsuccessful_steps,
- initial_cost,
- final_cost,
- TerminationTypeToString(termination_type));
+ return StringPrintf(
+ "Ceres Solver Report: "
+ "Iterations: %d, "
+ "Initial cost: %e, "
+ "Final cost: %e, "
+ "Termination: %s",
+ num_successful_steps + num_unsuccessful_steps,
+ initial_cost,
+ final_cost,
+ TerminationTypeToString(termination_type));
}
string Solver::Summary::FullReport() const {
@@ -612,28 +627,39 @@ string Solver::Summary::FullReport() const {
string report = string("\nSolver Summary (v " + VersionString() + ")\n\n");
StringAppendF(&report, "%45s %21s\n", "Original", "Reduced");
- StringAppendF(&report, "Parameter blocks % 25d% 25d\n",
- num_parameter_blocks, num_parameter_blocks_reduced);
- StringAppendF(&report, "Parameters % 25d% 25d\n",
- num_parameters, num_parameters_reduced);
+ StringAppendF(&report,
+ "Parameter blocks % 25d% 25d\n",
+ num_parameter_blocks,
+ num_parameter_blocks_reduced);
+ StringAppendF(&report,
+ "Parameters % 25d% 25d\n",
+ num_parameters,
+ num_parameters_reduced);
if (num_effective_parameters_reduced != num_parameters_reduced) {
- StringAppendF(&report, "Effective parameters% 25d% 25d\n",
- num_effective_parameters, num_effective_parameters_reduced);
- }
- StringAppendF(&report, "Residual blocks % 25d% 25d\n",
- num_residual_blocks, num_residual_blocks_reduced);
- StringAppendF(&report, "Residuals % 25d% 25d\n",
- num_residuals, num_residuals_reduced);
+ StringAppendF(&report,
+ "Effective parameters% 25d% 25d\n",
+ num_effective_parameters,
+ num_effective_parameters_reduced);
+ }
+ StringAppendF(&report,
+ "Residual blocks % 25d% 25d\n",
+ num_residual_blocks,
+ num_residual_blocks_reduced);
+ StringAppendF(&report,
+ "Residuals % 25d% 25d\n",
+ num_residuals,
+ num_residuals_reduced);
if (minimizer_type == TRUST_REGION) {
// TRUST_SEARCH HEADER
- StringAppendF(&report, "\nMinimizer %19s\n",
- "TRUST_REGION");
+ StringAppendF(
+ &report, "\nMinimizer %19s\n", "TRUST_REGION");
if (linear_solver_type_used == DENSE_NORMAL_CHOLESKY ||
linear_solver_type_used == DENSE_SCHUR ||
linear_solver_type_used == DENSE_QR) {
- StringAppendF(&report, "\nDense linear algebra library %15s\n",
+ StringAppendF(&report,
+ "\nDense linear algebra library %15s\n",
DenseLinearAlgebraLibraryTypeToString(
dense_linear_algebra_library_type));
}
@@ -643,14 +669,15 @@ string Solver::Summary::FullReport() const {
(linear_solver_type_used == ITERATIVE_SCHUR &&
(preconditioner_type_used == CLUSTER_JACOBI ||
preconditioner_type_used == CLUSTER_TRIDIAGONAL))) {
- StringAppendF(&report, "\nSparse linear algebra library %15s\n",
+ StringAppendF(&report,
+ "\nSparse linear algebra library %15s\n",
SparseLinearAlgebraLibraryTypeToString(
sparse_linear_algebra_library_type));
}
- StringAppendF(&report, "Trust region strategy %19s",
- TrustRegionStrategyTypeToString(
- trust_region_strategy_type));
+ StringAppendF(&report,
+ "Trust region strategy %19s",
+ TrustRegionStrategyTypeToString(trust_region_strategy_type));
if (trust_region_strategy_type == DOGLEG) {
if (dogleg_type == TRADITIONAL_DOGLEG) {
StringAppendF(&report, " (TRADITIONAL)");
@@ -661,28 +688,32 @@ string Solver::Summary::FullReport() const {
StringAppendF(&report, "\n");
StringAppendF(&report, "\n");
- StringAppendF(&report, "%45s %21s\n", "Given", "Used");
- StringAppendF(&report, "Linear solver %25s%25s\n",
+ StringAppendF(&report, "%45s %21s\n", "Given", "Used");
+ StringAppendF(&report,
+ "Linear solver %25s%25s\n",
LinearSolverTypeToString(linear_solver_type_given),
LinearSolverTypeToString(linear_solver_type_used));
if (linear_solver_type_given == CGNR ||
linear_solver_type_given == ITERATIVE_SCHUR) {
- StringAppendF(&report, "Preconditioner %25s%25s\n",
+ StringAppendF(&report,
+ "Preconditioner %25s%25s\n",
PreconditionerTypeToString(preconditioner_type_given),
PreconditionerTypeToString(preconditioner_type_used));
}
if (preconditioner_type_used == CLUSTER_JACOBI ||
preconditioner_type_used == CLUSTER_TRIDIAGONAL) {
- StringAppendF(&report, "Visibility clustering%24s%25s\n",
- VisibilityClusteringTypeToString(
- visibility_clustering_type),
- VisibilityClusteringTypeToString(
- visibility_clustering_type));
+ StringAppendF(
+ &report,
+ "Visibility clustering%24s%25s\n",
+ VisibilityClusteringTypeToString(visibility_clustering_type),
+ VisibilityClusteringTypeToString(visibility_clustering_type));
}
- StringAppendF(&report, "Threads % 25d% 25d\n",
- num_threads_given, num_threads_used);
+ StringAppendF(&report,
+ "Threads % 25d% 25d\n",
+ num_threads_given,
+ num_threads_used);
string given;
StringifyOrdering(linear_solver_ordering_given, &given);
@@ -711,68 +742,71 @@ string Solver::Summary::FullReport() const {
StringifyOrdering(inner_iteration_ordering_given, &given);
string used;
StringifyOrdering(inner_iteration_ordering_used, &used);
- StringAppendF(&report,
- "Inner iteration ordering %20s %24s\n",
- given.c_str(),
- used.c_str());
+ StringAppendF(&report,
+ "Inner iteration ordering %20s %24s\n",
+ given.c_str(),
+ used.c_str());
}
} else {
// LINE_SEARCH HEADER
StringAppendF(&report, "\nMinimizer %19s\n", "LINE_SEARCH");
-
string line_search_direction_string;
if (line_search_direction_type == LBFGS) {
line_search_direction_string = StringPrintf("LBFGS (%d)", max_lbfgs_rank);
} else if (line_search_direction_type == NONLINEAR_CONJUGATE_GRADIENT) {
- line_search_direction_string =
- NonlinearConjugateGradientTypeToString(
- nonlinear_conjugate_gradient_type);
+ line_search_direction_string = NonlinearConjugateGradientTypeToString(
+ nonlinear_conjugate_gradient_type);
} else {
line_search_direction_string =
LineSearchDirectionTypeToString(line_search_direction_type);
}
- StringAppendF(&report, "Line search direction %19s\n",
+ StringAppendF(&report,
+ "Line search direction %19s\n",
line_search_direction_string.c_str());
- const string line_search_type_string =
- StringPrintf("%s %s",
- LineSearchInterpolationTypeToString(
- line_search_interpolation_type),
- LineSearchTypeToString(line_search_type));
- StringAppendF(&report, "Line search type %19s\n",
+ const string line_search_type_string = StringPrintf(
+ "%s %s",
+ LineSearchInterpolationTypeToString(line_search_interpolation_type),
+ LineSearchTypeToString(line_search_type));
+ StringAppendF(&report,
+ "Line search type %19s\n",
line_search_type_string.c_str());
StringAppendF(&report, "\n");
- StringAppendF(&report, "%45s %21s\n", "Given", "Used");
- StringAppendF(&report, "Threads % 25d% 25d\n",
- num_threads_given, num_threads_used);
+ StringAppendF(&report, "%45s %21s\n", "Given", "Used");
+ StringAppendF(&report,
+ "Threads % 25d% 25d\n",
+ num_threads_given,
+ num_threads_used);
}
StringAppendF(&report, "\nCost:\n");
StringAppendF(&report, "Initial % 30e\n", initial_cost);
- if (termination_type != FAILURE &&
- termination_type != USER_FAILURE) {
+ if (termination_type != FAILURE && termination_type != USER_FAILURE) {
StringAppendF(&report, "Final % 30e\n", final_cost);
- StringAppendF(&report, "Change % 30e\n",
- initial_cost - final_cost);
+ StringAppendF(&report, "Change % 30e\n", initial_cost - final_cost);
}
- StringAppendF(&report, "\nMinimizer iterations % 16d\n",
+ StringAppendF(&report,
+ "\nMinimizer iterations % 16d\n",
num_successful_steps + num_unsuccessful_steps);
// Successful/Unsuccessful steps only matter in the case of the
// trust region solver. Line search terminates when it encounters
// the first unsuccessful step.
if (minimizer_type == TRUST_REGION) {
- StringAppendF(&report, "Successful steps % 14d\n",
+ StringAppendF(&report,
+ "Successful steps % 14d\n",
num_successful_steps);
- StringAppendF(&report, "Unsuccessful steps % 14d\n",
+ StringAppendF(&report,
+ "Unsuccessful steps % 14d\n",
num_unsuccessful_steps);
}
if (inner_iterations_used) {
- StringAppendF(&report, "Steps with inner iterations % 14d\n",
+ StringAppendF(&report,
+ "Steps with inner iterations % 14d\n",
num_inner_iteration_steps);
}
@@ -781,53 +815,66 @@ string Solver::Summary::FullReport() const {
(minimizer_type == TRUST_REGION && is_constrained));
if (line_search_used) {
- StringAppendF(&report, "Line search steps % 14d\n",
+ StringAppendF(&report,
+ "Line search steps % 14d\n",
num_line_search_steps);
}
StringAppendF(&report, "\nTime (in seconds):\n");
- StringAppendF(&report, "Preprocessor %25.6f\n",
- preprocessor_time_in_seconds);
+ StringAppendF(
+ &report, "Preprocessor %25.6f\n", preprocessor_time_in_seconds);
- StringAppendF(&report, "\n Residual only evaluation %18.6f (%d)\n",
- residual_evaluation_time_in_seconds, num_residual_evaluations);
+ StringAppendF(&report,
+ "\n Residual only evaluation %18.6f (%d)\n",
+ residual_evaluation_time_in_seconds,
+ num_residual_evaluations);
if (line_search_used) {
- StringAppendF(&report, " Line search cost evaluation %10.6f\n",
+ StringAppendF(&report,
+ " Line search cost evaluation %10.6f\n",
line_search_cost_evaluation_time_in_seconds);
}
- StringAppendF(&report, " Jacobian & residual evaluation %12.6f (%d)\n",
- jacobian_evaluation_time_in_seconds, num_jacobian_evaluations);
+ StringAppendF(&report,
+ " Jacobian & residual evaluation %12.6f (%d)\n",
+ jacobian_evaluation_time_in_seconds,
+ num_jacobian_evaluations);
if (line_search_used) {
- StringAppendF(&report, " Line search gradient evaluation %6.6f\n",
+ StringAppendF(&report,
+ " Line search gradient evaluation %6.6f\n",
line_search_gradient_evaluation_time_in_seconds);
}
if (minimizer_type == TRUST_REGION) {
- StringAppendF(&report, " Linear solver %23.6f (%d)\n",
- linear_solver_time_in_seconds, num_linear_solves);
+ StringAppendF(&report,
+ " Linear solver %23.6f (%d)\n",
+ linear_solver_time_in_seconds,
+ num_linear_solves);
}
if (inner_iterations_used) {
- StringAppendF(&report, " Inner iterations %23.6f\n",
+ StringAppendF(&report,
+ " Inner iterations %23.6f\n",
inner_iteration_time_in_seconds);
}
if (line_search_used) {
- StringAppendF(&report, " Line search polynomial minimization %.6f\n",
+ StringAppendF(&report,
+ " Line search polynomial minimization %.6f\n",
line_search_polynomial_minimization_time_in_seconds);
}
- StringAppendF(&report, "Minimizer %25.6f\n\n",
- minimizer_time_in_seconds);
+ StringAppendF(
+ &report, "Minimizer %25.6f\n\n", minimizer_time_in_seconds);
- StringAppendF(&report, "Postprocessor %24.6f\n",
- postprocessor_time_in_seconds);
+ StringAppendF(
+ &report, "Postprocessor %24.6f\n", postprocessor_time_in_seconds);
- StringAppendF(&report, "Total %25.6f\n\n",
- total_time_in_seconds);
+ StringAppendF(
+ &report, "Total %25.6f\n\n", total_time_in_seconds);
- StringAppendF(&report, "Termination: %25s (%s)\n",
- TerminationTypeToString(termination_type), message.c_str());
+ StringAppendF(&report,
+ "Termination: %25s (%s)\n",
+ TerminationTypeToString(termination_type),
+ message.c_str());
return report;
}
diff --git a/extern/ceres/internal/ceres/solver_utils.cc b/extern/ceres/internal/ceres/solver_utils.cc
index 177a928e090..eb5aafa061c 100644
--- a/extern/ceres/internal/ceres/solver_utils.cc
+++ b/extern/ceres/internal/ceres/solver_utils.cc
@@ -28,22 +28,24 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
-#include <string>
+#include "ceres/solver_utils.h"
-#include "ceres/internal/config.h"
+#include <string>
#include "Eigen/Core"
+#include "ceres/internal/config.h"
#include "ceres/internal/port.h"
-#include "ceres/solver_utils.h"
#include "ceres/version.h"
namespace ceres {
namespace internal {
-#define CERES_EIGEN_VERSION \
- CERES_TO_STRING(EIGEN_WORLD_VERSION) "." \
- CERES_TO_STRING(EIGEN_MAJOR_VERSION) "." \
+// clang-format off
+#define CERES_EIGEN_VERSION \
+ CERES_TO_STRING(EIGEN_WORLD_VERSION) "." \
+ CERES_TO_STRING(EIGEN_MAJOR_VERSION) "." \
CERES_TO_STRING(EIGEN_MINOR_VERSION)
+// clang-format on
std::string VersionString() {
std::string value = std::string(CERES_VERSION_STRING);
diff --git a/extern/ceres/internal/ceres/sparse_cholesky.cc b/extern/ceres/internal/ceres/sparse_cholesky.cc
index d9d2100d3f9..91cdf671b1a 100644
--- a/extern/ceres/internal/ceres/sparse_cholesky.cc
+++ b/extern/ceres/internal/ceres/sparse_cholesky.cc
@@ -56,7 +56,6 @@ std::unique_ptr<SparseCholesky> SparseCholesky::Create(
}
break;
#else
- (void)ordering_type;
LOG(FATAL) << "Ceres was compiled without support for SuiteSparse.";
#endif
@@ -90,7 +89,8 @@ std::unique_ptr<SparseCholesky> SparseCholesky::Create(
if (options.use_mixed_precision_solves) {
sparse_cholesky = AppleAccelerateCholesky<float>::Create(ordering_type);
} else {
- sparse_cholesky = AppleAccelerateCholesky<double>::Create(ordering_type);
+ sparse_cholesky =
+ AppleAccelerateCholesky<double>::Create(ordering_type);
}
break;
#else
diff --git a/extern/ceres/internal/ceres/sparse_cholesky.h b/extern/ceres/internal/ceres/sparse_cholesky.h
index bbe42370505..a6af6b2c207 100644
--- a/extern/ceres/internal/ceres/sparse_cholesky.h
+++ b/extern/ceres/internal/ceres/sparse_cholesky.h
@@ -32,9 +32,12 @@
#define CERES_INTERNAL_SPARSE_CHOLESKY_H_
// This include must come before any #ifndef check on Ceres compile options.
+// clang-format off
#include "ceres/internal/port.h"
+// clang-format on
#include <memory>
+
#include "ceres/linear_solver.h"
#include "glog/logging.h"
@@ -64,7 +67,7 @@ namespace internal {
// CHECK_EQ(sparse_cholesky->Solve(rhs.data(), solution.data(), &message),
// LINEAR_SOLVER_SUCCESS);
-class SparseCholesky {
+class CERES_EXPORT_INTERNAL SparseCholesky {
public:
static std::unique_ptr<SparseCholesky> Create(
const LinearSolver::Options& options);
@@ -88,8 +91,8 @@ class SparseCholesky {
// Subsequent calls to Factorize will use that symbolic
// factorization assuming that the sparsity of the matrix has
// remained constant.
- virtual LinearSolverTerminationType Factorize(
- CompressedRowSparseMatrix* lhs, std::string* message) = 0;
+ virtual LinearSolverTerminationType Factorize(CompressedRowSparseMatrix* lhs,
+ std::string* message) = 0;
// Computes the solution to the equation
//
@@ -106,22 +109,21 @@ class SparseCholesky {
const double* rhs,
double* solution,
std::string* message);
-
};
class IterativeRefiner;
// Computes an initial solution using the given instance of
// SparseCholesky, and then refines it using the IterativeRefiner.
-class RefinedSparseCholesky : public SparseCholesky {
+class CERES_EXPORT_INTERNAL RefinedSparseCholesky : public SparseCholesky {
public:
RefinedSparseCholesky(std::unique_ptr<SparseCholesky> sparse_cholesky,
std::unique_ptr<IterativeRefiner> iterative_refiner);
virtual ~RefinedSparseCholesky();
virtual CompressedRowSparseMatrix::StorageType StorageType() const;
- virtual LinearSolverTerminationType Factorize(
- CompressedRowSparseMatrix* lhs, std::string* message);
+ virtual LinearSolverTerminationType Factorize(CompressedRowSparseMatrix* lhs,
+ std::string* message);
virtual LinearSolverTerminationType Solve(const double* rhs,
double* solution,
std::string* message);
diff --git a/extern/ceres/internal/ceres/sparse_matrix.cc b/extern/ceres/internal/ceres/sparse_matrix.cc
index f95ff3220bd..32388f58fc3 100644
--- a/extern/ceres/internal/ceres/sparse_matrix.cc
+++ b/extern/ceres/internal/ceres/sparse_matrix.cc
@@ -33,8 +33,7 @@
namespace ceres {
namespace internal {
-SparseMatrix::~SparseMatrix() {
-}
+SparseMatrix::~SparseMatrix() {}
} // namespace internal
} // namespace ceres
diff --git a/extern/ceres/internal/ceres/sparse_matrix.h b/extern/ceres/internal/ceres/sparse_matrix.h
index 074d847807e..b57f10890fc 100644
--- a/extern/ceres/internal/ceres/sparse_matrix.h
+++ b/extern/ceres/internal/ceres/sparse_matrix.h
@@ -34,8 +34,10 @@
#define CERES_INTERNAL_SPARSE_MATRIX_H_
#include <cstdio>
-#include "ceres/linear_operator.h"
+
#include "ceres/internal/eigen.h"
+#include "ceres/internal/port.h"
+#include "ceres/linear_operator.h"
#include "ceres/types.h"
namespace ceres {
@@ -62,7 +64,7 @@ namespace internal {
// matrix type dependent and we are at this stage unable to come up
// with an efficient high level interface that spans multiple sparse
// matrix types.
-class SparseMatrix : public LinearOperator {
+class CERES_EXPORT_INTERNAL SparseMatrix : public LinearOperator {
public:
virtual ~SparseMatrix();
diff --git a/extern/ceres/internal/ceres/sparse_normal_cholesky_solver.h b/extern/ceres/internal/ceres/sparse_normal_cholesky_solver.h
index cbff2bdb3f6..ef3274323f5 100644
--- a/extern/ceres/internal/ceres/sparse_normal_cholesky_solver.h
+++ b/extern/ceres/internal/ceres/sparse_normal_cholesky_solver.h
@@ -35,9 +35,12 @@
#define CERES_INTERNAL_SPARSE_NORMAL_CHOLESKY_SOLVER_H_
// This include must come before any #ifndef check on Ceres compile options.
+// clang-format off
#include "ceres/internal/port.h"
+// clang-format on
#include <vector>
+
#include "ceres/linear_solver.h"
namespace ceres {
@@ -58,11 +61,10 @@ class SparseNormalCholeskySolver : public BlockSparseMatrixSolver {
virtual ~SparseNormalCholeskySolver();
private:
- LinearSolver::Summary SolveImpl(
- BlockSparseMatrix* A,
- const double* b,
- const LinearSolver::PerSolveOptions& options,
- double* x) final;
+ LinearSolver::Summary SolveImpl(BlockSparseMatrix* A,
+ const double* b,
+ const LinearSolver::PerSolveOptions& options,
+ double* x) final;
const LinearSolver::Options options_;
Vector rhs_;
diff --git a/extern/ceres/internal/ceres/split.cc b/extern/ceres/internal/ceres/split.cc
index 3a09e866839..804f4412deb 100644
--- a/extern/ceres/internal/ceres/split.cc
+++ b/extern/ceres/internal/ceres/split.cc
@@ -75,10 +75,9 @@ static int CalculateReserveForVector(const string& full, const char* delim) {
}
template <typename StringType, typename ITR>
-static inline
-void SplitStringToIteratorUsing(const StringType& full,
- const char* delim,
- ITR& result) {
+static inline void SplitStringToIteratorUsing(const StringType& full,
+ const char* delim,
+ ITR& result) {
// Optimize the common case where delim is a single character.
if (delim[0] != '\0' && delim[1] == '\0') {
char c = delim[0];
diff --git a/extern/ceres/internal/ceres/split.h b/extern/ceres/internal/ceres/split.h
index 94b773dee4d..f513023ec69 100644
--- a/extern/ceres/internal/ceres/split.h
+++ b/extern/ceres/internal/ceres/split.h
@@ -33,6 +33,7 @@
#include <string>
#include <vector>
+
#include "ceres/internal/port.h"
namespace ceres {
@@ -41,7 +42,8 @@ namespace internal {
// Split a string using one or more character delimiters, presented as a
// nul-terminated c string. Append the components to 'result'. If there are
// consecutive delimiters, this function skips over all of them.
-void SplitStringUsing(const std::string& full, const char* delim,
+void SplitStringUsing(const std::string& full,
+ const char* delim,
std::vector<std::string>* res);
} // namespace internal
diff --git a/extern/ceres/internal/ceres/stl_util.h b/extern/ceres/internal/ceres/stl_util.h
index 0595a4cf2e9..d3411b73376 100644
--- a/extern/ceres/internal/ceres/stl_util.h
+++ b/extern/ceres/internal/ceres/stl_util.h
@@ -46,8 +46,7 @@ namespace ceres {
// advanced, which could result in the hash function trying to deference a
// stale pointer.
template <class ForwardIterator>
-void STLDeleteContainerPointers(ForwardIterator begin,
- ForwardIterator end) {
+void STLDeleteContainerPointers(ForwardIterator begin, ForwardIterator end) {
while (begin != end) {
ForwardIterator temp = begin;
++begin;
@@ -80,7 +79,7 @@ void STLDeleteUniqueContainerPointers(ForwardIterator begin,
// ElementDeleter (defined below), which ensures that your container's elements
// are deleted when the ElementDeleter goes out of scope.
template <class T>
-void STLDeleteElements(T *container) {
+void STLDeleteElements(T* container) {
if (!container) return;
STLDeleteContainerPointers(container->begin(), container->end());
container->clear();
diff --git a/extern/ceres/internal/ceres/stringprintf.cc b/extern/ceres/internal/ceres/stringprintf.cc
index 7a21f0e2118..b0e2acce8f9 100644
--- a/extern/ceres/internal/ceres/stringprintf.cc
+++ b/extern/ceres/internal/ceres/stringprintf.cc
@@ -62,7 +62,7 @@ void StringAppendV(string* dst, const char* format, va_list ap) {
return;
}
-#if defined (_MSC_VER)
+#if defined(_MSC_VER)
// Error or MSVC running out of space. MSVC 8.0 and higher
// can be asked about space needed with the special idiom below:
va_copy(backup_ap, ap);
@@ -78,7 +78,7 @@ void StringAppendV(string* dst, const char* format, va_list ap) {
// Increase the buffer size to the size requested by vsnprintf,
// plus one for the closing \0.
- int length = result+1;
+ int length = result + 1;
char* buf = new char[length];
// Restore the va_list before we use it again
@@ -93,7 +93,6 @@ void StringAppendV(string* dst, const char* format, va_list ap) {
delete[] buf;
}
-
string StringPrintf(const char* format, ...) {
va_list ap;
va_start(ap, format);
diff --git a/extern/ceres/internal/ceres/stringprintf.h b/extern/ceres/internal/ceres/stringprintf.h
index feeb9c23430..4d512784905 100644
--- a/extern/ceres/internal/ceres/stringprintf.h
+++ b/extern/ceres/internal/ceres/stringprintf.h
@@ -55,31 +55,36 @@ namespace internal {
// have an implicit 'this' argument, the arguments of such methods
// should be counted from two, not one."
#define CERES_PRINTF_ATTRIBUTE(string_index, first_to_check) \
- __attribute__((__format__ (__printf__, string_index, first_to_check)))
+ __attribute__((__format__(__printf__, string_index, first_to_check)))
#define CERES_SCANF_ATTRIBUTE(string_index, first_to_check) \
- __attribute__((__format__ (__scanf__, string_index, first_to_check)))
+ __attribute__((__format__(__scanf__, string_index, first_to_check)))
#else
#define CERES_PRINTF_ATTRIBUTE(string_index, first_to_check)
#endif
// Return a C++ string.
-extern std::string StringPrintf(const char* format, ...)
+CERES_EXPORT_INTERNAL extern std::string StringPrintf(const char* format, ...)
// Tell the compiler to do printf format string checking.
CERES_PRINTF_ATTRIBUTE(1, 2);
// Store result into a supplied string and return it.
-extern const std::string& SStringPrintf(std::string* dst, const char* format, ...)
+CERES_EXPORT_INTERNAL extern const std::string& SStringPrintf(
+ std::string* dst, const char* format, ...)
// Tell the compiler to do printf format string checking.
CERES_PRINTF_ATTRIBUTE(2, 3);
// Append result to a supplied string.
-extern void StringAppendF(std::string* dst, const char* format, ...)
+CERES_EXPORT_INTERNAL extern void StringAppendF(std::string* dst,
+ const char* format,
+ ...)
// Tell the compiler to do printf format string checking.
CERES_PRINTF_ATTRIBUTE(2, 3);
// Lower-level routine that takes a va_list and appends to a specified string.
// All other routines are just convenience wrappers around it.
-extern void StringAppendV(std::string* dst, const char* format, va_list ap);
+CERES_EXPORT_INTERNAL extern void StringAppendV(std::string* dst,
+ const char* format,
+ va_list ap);
#undef CERES_PRINTF_ATTRIBUTE
diff --git a/extern/ceres/internal/ceres/subset_preconditioner.cc b/extern/ceres/internal/ceres/subset_preconditioner.cc
index 7c24ae9f288..779a34ae741 100644
--- a/extern/ceres/internal/ceres/subset_preconditioner.cc
+++ b/extern/ceres/internal/ceres/subset_preconditioner.cc
@@ -32,6 +32,7 @@
#include <memory>
#include <string>
+
#include "ceres/compressed_row_sparse_matrix.h"
#include "ceres/inner_product_computer.h"
#include "ceres/linear_solver.h"
@@ -50,8 +51,7 @@ SubsetPreconditioner::SubsetPreconditioner(
LinearSolver::Options sparse_cholesky_options;
sparse_cholesky_options.sparse_linear_algebra_library_type =
options_.sparse_linear_algebra_library_type;
- sparse_cholesky_options.use_postordering =
- options_.use_postordering;
+ sparse_cholesky_options.use_postordering = options_.use_postordering;
sparse_cholesky_ = SparseCholesky::Create(sparse_cholesky_options);
}
diff --git a/extern/ceres/internal/ceres/subset_preconditioner.h b/extern/ceres/internal/ceres/subset_preconditioner.h
index 6f3c9ecd052..9844a669f45 100644
--- a/extern/ceres/internal/ceres/subset_preconditioner.h
+++ b/extern/ceres/internal/ceres/subset_preconditioner.h
@@ -32,6 +32,8 @@
#define CERES_INTERNAL_SUBSET_PRECONDITIONER_H_
#include <memory>
+
+#include "ceres/internal/port.h"
#include "ceres/preconditioner.h"
namespace ceres {
@@ -65,7 +67,8 @@ class InnerProductComputer;
// computationally expensive this preconditioner will be.
//
// See the tests for example usage.
-class SubsetPreconditioner : public BlockSparseMatrixPreconditioner {
+class CERES_EXPORT_INTERNAL SubsetPreconditioner
+ : public BlockSparseMatrixPreconditioner {
public:
SubsetPreconditioner(const Preconditioner::Options& options,
const BlockSparseMatrix& A);
diff --git a/extern/ceres/internal/ceres/suitesparse.cc b/extern/ceres/internal/ceres/suitesparse.cc
index 190d1755add..0d6f6bdfb88 100644
--- a/extern/ceres/internal/ceres/suitesparse.cc
+++ b/extern/ceres/internal/ceres/suitesparse.cc
@@ -32,13 +32,12 @@
#include "ceres/internal/port.h"
#ifndef CERES_NO_SUITESPARSE
-#include "ceres/suitesparse.h"
-
#include <vector>
#include "ceres/compressed_col_sparse_matrix_utils.h"
#include "ceres/compressed_row_sparse_matrix.h"
#include "ceres/linear_solver.h"
+#include "ceres/suitesparse.h"
#include "ceres/triplet_sparse_matrix.h"
#include "cholmod.h"
@@ -353,7 +352,8 @@ bool SuiteSparse::ConstrainedApproximateMinimumDegreeOrdering(
std::unique_ptr<SparseCholesky> SuiteSparseCholesky::Create(
const OrderingType ordering_type) {
- return std::unique_ptr<SparseCholesky>(new SuiteSparseCholesky(ordering_type));
+ return std::unique_ptr<SparseCholesky>(
+ new SuiteSparseCholesky(ordering_type));
}
SuiteSparseCholesky::SuiteSparseCholesky(const OrderingType ordering_type)
diff --git a/extern/ceres/internal/ceres/suitesparse.h b/extern/ceres/internal/ceres/suitesparse.h
index b77b296a66e..5dcc53f0167 100644
--- a/extern/ceres/internal/ceres/suitesparse.h
+++ b/extern/ceres/internal/ceres/suitesparse.h
@@ -41,6 +41,7 @@
#include <cstring>
#include <string>
#include <vector>
+
#include "SuiteSparseQR.hpp"
#include "ceres/linear_solver.h"
#include "ceres/sparse_cholesky.h"
@@ -116,20 +117,23 @@ class SuiteSparse {
// for symmetric scaling which scales both the rows and the columns
// - diag(scale) * A * diag(scale).
void Scale(cholmod_dense* scale, int mode, cholmod_sparse* A) {
- cholmod_scale(scale, mode, A, &cc_);
+ cholmod_scale(scale, mode, A, &cc_);
}
// Create and return a matrix m = A * A'. Caller owns the
// result. The matrix A is not modified.
cholmod_sparse* AATranspose(cholmod_sparse* A) {
- cholmod_sparse*m = cholmod_aat(A, NULL, A->nrow, 1, &cc_);
+ cholmod_sparse* m = cholmod_aat(A, NULL, A->nrow, 1, &cc_);
m->stype = 1; // Pay attention to the upper triangular part.
return m;
}
// y = alpha * A * x + beta * y. Only y is modified.
- void SparseDenseMultiply(cholmod_sparse* A, double alpha, double beta,
- cholmod_dense* x, cholmod_dense* y) {
+ void SparseDenseMultiply(cholmod_sparse* A,
+ double alpha,
+ double beta,
+ cholmod_dense* x,
+ cholmod_dense* y) {
double alpha_[2] = {alpha, 0};
double beta_[2] = {beta, 0};
cholmod_sdmult(A, 0, alpha_, beta_, x, y, &cc_);
@@ -195,7 +199,9 @@ class SuiteSparse {
// NULL is returned. Caller owns the result.
//
// message contains an explanation of the failures if any.
- cholmod_dense* Solve(cholmod_factor* L, cholmod_dense* b, std::string* message);
+ cholmod_dense* Solve(cholmod_factor* L,
+ cholmod_dense* b,
+ std::string* message);
// By virtue of the modeling layer in Ceres being block oriented,
// all the matrices used by Ceres are also block oriented. When
@@ -229,7 +235,6 @@ class SuiteSparse {
// ordering.
bool ApproximateMinimumDegreeOrdering(cholmod_sparse* matrix, int* ordering);
-
// Before SuiteSparse version 4.2.0, cholmod_camd was only enabled
// if SuiteSparse was compiled with Metis support. This makes
// calling and linking into cholmod_camd problematic even though it
@@ -262,7 +267,7 @@ class SuiteSparse {
int* ordering);
void Free(cholmod_sparse* m) { cholmod_free_sparse(&m, &cc_); }
- void Free(cholmod_dense* m) { cholmod_free_dense(&m, &cc_); }
+ void Free(cholmod_dense* m) { cholmod_free_dense(&m, &cc_); }
void Free(cholmod_factor* m) { cholmod_free_factor(&m, &cc_); }
void Print(cholmod_sparse* m, const std::string& name) {
@@ -285,17 +290,17 @@ class SuiteSparse {
class SuiteSparseCholesky : public SparseCholesky {
public:
- static std::unique_ptr<SparseCholesky> Create(
- OrderingType ordering_type);
+ static std::unique_ptr<SparseCholesky> Create(OrderingType ordering_type);
// SparseCholesky interface.
virtual ~SuiteSparseCholesky();
CompressedRowSparseMatrix::StorageType StorageType() const final;
- LinearSolverTerminationType Factorize(
- CompressedRowSparseMatrix* lhs, std::string* message) final;
+ LinearSolverTerminationType Factorize(CompressedRowSparseMatrix* lhs,
+ std::string* message) final;
LinearSolverTerminationType Solve(const double* rhs,
double* solution,
std::string* message) final;
+
private:
SuiteSparseCholesky(const OrderingType ordering_type);
diff --git a/extern/ceres/internal/ceres/thread_pool.cc b/extern/ceres/internal/ceres/thread_pool.cc
index 5a52c9d06a6..821431cedb4 100644
--- a/extern/ceres/internal/ceres/thread_pool.cc
+++ b/extern/ceres/internal/ceres/thread_pool.cc
@@ -33,11 +33,11 @@
#ifdef CERES_USE_CXX_THREADS
-#include "ceres/thread_pool.h"
-
#include <cmath>
#include <limits>
+#include "ceres/thread_pool.h"
+
namespace ceres {
namespace internal {
namespace {
@@ -53,16 +53,13 @@ int ThreadPool::MaxNumThreadsAvailable() {
const int num_hardware_threads = std::thread::hardware_concurrency();
// hardware_concurrency() can return 0 if the value is not well defined or not
// computable.
- return num_hardware_threads == 0
- ? std::numeric_limits<int>::max()
- : num_hardware_threads;
+ return num_hardware_threads == 0 ? std::numeric_limits<int>::max()
+ : num_hardware_threads;
}
-ThreadPool::ThreadPool() { }
+ThreadPool::ThreadPool() {}
-ThreadPool::ThreadPool(int num_threads) {
- Resize(num_threads);
-}
+ThreadPool::ThreadPool(int num_threads) { Resize(num_threads); }
ThreadPool::~ThreadPool() {
std::lock_guard<std::mutex> lock(thread_pool_mutex_);
@@ -106,11 +103,9 @@ void ThreadPool::ThreadMainLoop() {
}
}
-void ThreadPool::Stop() {
- task_queue_.StopWaiters();
-}
+void ThreadPool::Stop() { task_queue_.StopWaiters(); }
} // namespace internal
} // namespace ceres
-#endif // CERES_USE_CXX_THREADS
+#endif // CERES_USE_CXX_THREADS
diff --git a/extern/ceres/internal/ceres/thread_pool.h b/extern/ceres/internal/ceres/thread_pool.h
index 1ebb52eb6b4..cdf6625e196 100644
--- a/extern/ceres/internal/ceres/thread_pool.h
+++ b/extern/ceres/internal/ceres/thread_pool.h
@@ -37,6 +37,7 @@
#include <vector>
#include "ceres/concurrent_queue.h"
+#include "ceres/internal/port.h"
namespace ceres {
namespace internal {
@@ -57,7 +58,7 @@ namespace internal {
// workers to stop. The workers will finish all of the tasks that have already
// been added to the thread pool.
//
-class ThreadPool {
+class CERES_EXPORT_INTERNAL ThreadPool {
public:
// Returns the maximum number of hardware threads.
static int MaxNumThreadsAvailable();
diff --git a/extern/ceres/internal/ceres/thread_token_provider.cc b/extern/ceres/internal/ceres/thread_token_provider.cc
index b04cf844488..c7ec67f31aa 100644
--- a/extern/ceres/internal/ceres/thread_token_provider.cc
+++ b/extern/ceres/internal/ceres/thread_token_provider.cc
@@ -44,7 +44,6 @@ ThreadTokenProvider::ThreadTokenProvider(int num_threads) {
pool_.Push(i);
}
#endif
-
}
int ThreadTokenProvider::Acquire() {
@@ -61,7 +60,6 @@ int ThreadTokenProvider::Acquire() {
CHECK(pool_.Wait(&thread_id));
return thread_id;
#endif
-
}
void ThreadTokenProvider::Release(int thread_id) {
@@ -69,7 +67,6 @@ void ThreadTokenProvider::Release(int thread_id) {
#ifdef CERES_USE_CXX_THREADS
pool_.Push(thread_id);
#endif
-
}
} // namespace internal
diff --git a/extern/ceres/internal/ceres/triplet_sparse_matrix.cc b/extern/ceres/internal/ceres/triplet_sparse_matrix.cc
index 54b588ba466..5dbf0e7cd3a 100644
--- a/extern/ceres/internal/ceres/triplet_sparse_matrix.cc
+++ b/extern/ceres/internal/ceres/triplet_sparse_matrix.cc
@@ -43,11 +43,7 @@ namespace ceres {
namespace internal {
TripletSparseMatrix::TripletSparseMatrix()
- : num_rows_(0),
- num_cols_(0),
- max_num_nonzeros_(0),
- num_nonzeros_(0) {}
-
+ : num_rows_(0), num_cols_(0), max_num_nonzeros_(0), num_nonzeros_(0) {}
TripletSparseMatrix::~TripletSparseMatrix() {}
@@ -111,9 +107,11 @@ TripletSparseMatrix& TripletSparseMatrix::operator=(
bool TripletSparseMatrix::AllTripletsWithinBounds() const {
for (int i = 0; i < num_nonzeros_; ++i) {
+ // clang-format off
if ((rows_[i] < 0) || (rows_[i] >= num_rows_) ||
(cols_[i] < 0) || (cols_[i] >= num_cols_))
return false;
+ // clang-format on
}
return true;
}
@@ -123,8 +121,7 @@ void TripletSparseMatrix::Reserve(int new_max_num_nonzeros) {
<< "Reallocation will cause data loss";
// Nothing to do if we have enough space already.
- if (new_max_num_nonzeros <= max_num_nonzeros_)
- return;
+ if (new_max_num_nonzeros <= max_num_nonzeros_) return;
int* new_rows = new int[new_max_num_nonzeros];
int* new_cols = new int[new_max_num_nonzeros];
@@ -168,15 +165,15 @@ void TripletSparseMatrix::CopyData(const TripletSparseMatrix& orig) {
}
}
-void TripletSparseMatrix::RightMultiply(const double* x, double* y) const {
+void TripletSparseMatrix::RightMultiply(const double* x, double* y) const {
for (int i = 0; i < num_nonzeros_; ++i) {
- y[rows_[i]] += values_[i]*x[cols_[i]];
+ y[rows_[i]] += values_[i] * x[cols_[i]];
}
}
void TripletSparseMatrix::LeftMultiply(const double* x, double* y) const {
for (int i = 0; i < num_nonzeros_; ++i) {
- y[cols_[i]] += values_[i]*x[rows_[i]];
+ y[cols_[i]] += values_[i] * x[rows_[i]];
}
}
@@ -226,10 +223,9 @@ void TripletSparseMatrix::AppendCols(const TripletSparseMatrix& B) {
num_cols_ = num_cols_ + B.num_cols();
}
-
void TripletSparseMatrix::Resize(int new_num_rows, int new_num_cols) {
if ((new_num_rows >= num_rows_) && (new_num_cols >= num_cols_)) {
- num_rows_ = new_num_rows;
+ num_rows_ = new_num_rows;
num_cols_ = new_num_cols;
return;
}
@@ -245,9 +241,9 @@ void TripletSparseMatrix::Resize(int new_num_rows, int new_num_cols) {
for (int i = 0; i < num_nonzeros_; ++i) {
if ((r_ptr[i] < num_rows_) && (c_ptr[i] < num_cols_)) {
if (dropped_terms) {
- r_ptr[i-dropped_terms] = r_ptr[i];
- c_ptr[i-dropped_terms] = c_ptr[i];
- v_ptr[i-dropped_terms] = v_ptr[i];
+ r_ptr[i - dropped_terms] = r_ptr[i];
+ c_ptr[i - dropped_terms] = c_ptr[i];
+ v_ptr[i - dropped_terms] = v_ptr[i];
}
} else {
++dropped_terms;
diff --git a/extern/ceres/internal/ceres/triplet_sparse_matrix.h b/extern/ceres/internal/ceres/triplet_sparse_matrix.h
index 2ee0fa992b5..cc9fee572a2 100644
--- a/extern/ceres/internal/ceres/triplet_sparse_matrix.h
+++ b/extern/ceres/internal/ceres/triplet_sparse_matrix.h
@@ -33,8 +33,10 @@
#include <memory>
#include <vector>
-#include "ceres/sparse_matrix.h"
+
#include "ceres/internal/eigen.h"
+#include "ceres/internal/port.h"
+#include "ceres/sparse_matrix.h"
#include "ceres/types.h"
namespace ceres {
@@ -44,7 +46,7 @@ namespace internal {
// manipulate sparse matrices in triplet (i,j,s) form. This object is
// inspired by the design of the cholmod_triplet struct used in the
// SuiteSparse package and is memory layout compatible with it.
-class TripletSparseMatrix : public SparseMatrix {
+class CERES_EXPORT_INTERNAL TripletSparseMatrix : public SparseMatrix {
public:
TripletSparseMatrix();
TripletSparseMatrix(int num_rows, int num_cols, int max_num_nonzeros);
@@ -68,11 +70,13 @@ class TripletSparseMatrix : public SparseMatrix {
void ScaleColumns(const double* scale) final;
void ToDenseMatrix(Matrix* dense_matrix) const final;
void ToTextFile(FILE* file) const final;
+ // clang-format off
int num_rows() const final { return num_rows_; }
int num_cols() const final { return num_cols_; }
int num_nonzeros() const final { return num_nonzeros_; }
const double* values() const final { return values_.get(); }
double* mutable_values() final { return values_.get(); }
+ // clang-format on
void set_num_nonzeros(int num_nonzeros);
// Increase max_num_nonzeros and correspondingly increase the size
@@ -94,11 +98,13 @@ class TripletSparseMatrix : public SparseMatrix {
// bounds are dropped and the num_non_zeros changed accordingly.
void Resize(int new_num_rows, int new_num_cols);
+ // clang-format off
int max_num_nonzeros() const { return max_num_nonzeros_; }
const int* rows() const { return rows_.get(); }
const int* cols() const { return cols_.get(); }
int* mutable_rows() { return rows_.get(); }
int* mutable_cols() { return cols_.get(); }
+ // clang-format on
// Returns true if the entries of the matrix obey the row, column,
// and column size bounds and false otherwise.
diff --git a/extern/ceres/internal/ceres/trust_region_minimizer.cc b/extern/ceres/internal/ceres/trust_region_minimizer.cc
index 7065977ad77..bcf05b3ddfb 100644
--- a/extern/ceres/internal/ceres/trust_region_minimizer.cc
+++ b/extern/ceres/internal/ceres/trust_region_minimizer.cc
@@ -83,6 +83,11 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
while (FinalizeIterationAndCheckIfMinimizerCanContinue()) {
iteration_start_time_in_secs_ = WallTimeInSeconds();
+
+ const double previous_gradient_norm = iteration_summary_.gradient_norm;
+ const double previous_gradient_max_norm =
+ iteration_summary_.gradient_max_norm;
+
iteration_summary_ = IterationSummary();
iteration_summary_.iteration =
solver_summary->iterations.back().iteration + 1;
@@ -113,10 +118,18 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
if (IsStepSuccessful()) {
RETURN_IF_ERROR_AND_LOG(HandleSuccessfulStep());
- continue;
+ } else {
+ // Declare the step unsuccessful and inform the trust region strategy.
+ iteration_summary_.step_is_successful = false;
+ iteration_summary_.cost = candidate_cost_ + solver_summary_->fixed_cost;
+
+ // When the step is unsuccessful, we do not compute the gradient
+ // (or update x), so we preserve its value from the last
+ // successful iteration.
+ iteration_summary_.gradient_norm = previous_gradient_norm;
+ iteration_summary_.gradient_max_norm = previous_gradient_max_norm;
+ strategy_->StepRejected(iteration_summary_.relative_decrease);
}
-
- HandleUnsuccessfulStep();
}
}
@@ -425,10 +438,12 @@ bool TrustRegionMinimizer::ComputeTrustRegionStep() {
num_consecutive_invalid_steps_ = 0;
}
- VLOG_IF(1, is_not_silent_ && !iteration_summary_.step_is_valid)
- << "Invalid step: current_cost: " << x_cost_
- << " absolute model cost change: " << model_cost_change_
- << " relative model cost change: " << (model_cost_change_ / x_cost_);
+ if (is_not_silent_ && !iteration_summary_.step_is_valid) {
+ VLOG(1) << "Invalid step: current_cost: " << x_cost_
+ << " absolute model cost change: " << model_cost_change_
+ << " relative model cost change: "
+ << (model_cost_change_ / x_cost_);
+ }
return true;
}
@@ -496,15 +511,17 @@ void TrustRegionMinimizer::DoInnerIterationsIfNeeded() {
nullptr,
nullptr,
nullptr)) {
- VLOG_IF(2, is_not_silent_) << "Inner iteration failed.";
+ if (is_not_silent_) {
+ VLOG(2) << "Inner iteration failed.";
+ }
return;
}
- VLOG_IF(2, is_not_silent_)
- << "Inner iteration succeeded; Current cost: " << x_cost_
- << " Trust region step cost: " << candidate_cost_
- << " Inner iteration cost: " << inner_iteration_cost;
-
+ if (is_not_silent_) {
+ VLOG(2) << "Inner iteration succeeded; Current cost: " << x_cost_
+ << " Trust region step cost: " << candidate_cost_
+ << " Inner iteration cost: " << inner_iteration_cost;
+ }
candidate_x_ = inner_iteration_x_;
// Normally, the quality of a trust region step is measured by
@@ -539,9 +556,10 @@ void TrustRegionMinimizer::DoInnerIterationsIfNeeded() {
// drops below tolerance.
inner_iterations_are_enabled_ =
(inner_iteration_relative_progress > options_.inner_iteration_tolerance);
- VLOG_IF(2, is_not_silent_ && !inner_iterations_are_enabled_)
- << "Disabling inner iterations. Progress : "
- << inner_iteration_relative_progress;
+ if (is_not_silent_ && !inner_iterations_are_enabled_) {
+ VLOG(2) << "Disabling inner iterations. Progress : "
+ << inner_iteration_relative_progress;
+ }
candidate_cost_ = inner_iteration_cost;
solver_summary_->inner_iteration_time_in_seconds +=
@@ -619,7 +637,9 @@ bool TrustRegionMinimizer::MaxSolverTimeReached() {
total_solver_time,
options_.max_solver_time_in_seconds);
solver_summary_->termination_type = NO_CONVERGENCE;
- VLOG_IF(1, is_not_silent_) << "Terminating: " << solver_summary_->message;
+ if (is_not_silent_) {
+ VLOG(1) << "Terminating: " << solver_summary_->message;
+ }
return true;
}
@@ -637,7 +657,9 @@ bool TrustRegionMinimizer::MaxSolverIterationsReached() {
iteration_summary_.iteration);
solver_summary_->termination_type = NO_CONVERGENCE;
- VLOG_IF(1, is_not_silent_) << "Terminating: " << solver_summary_->message;
+ if (is_not_silent_) {
+ VLOG(1) << "Terminating: " << solver_summary_->message;
+ }
return true;
}
@@ -655,7 +677,9 @@ bool TrustRegionMinimizer::GradientToleranceReached() {
iteration_summary_.gradient_max_norm,
options_.gradient_tolerance);
solver_summary_->termination_type = CONVERGENCE;
- VLOG_IF(1, is_not_silent_) << "Terminating: " << solver_summary_->message;
+ if (is_not_silent_) {
+ VLOG(1) << "Terminating: " << solver_summary_->message;
+ }
return true;
}
@@ -672,7 +696,9 @@ bool TrustRegionMinimizer::MinTrustRegionRadiusReached() {
iteration_summary_.trust_region_radius,
options_.min_trust_region_radius);
solver_summary_->termination_type = CONVERGENCE;
- VLOG_IF(1, is_not_silent_) << "Terminating: " << solver_summary_->message;
+ if (is_not_silent_) {
+ VLOG(1) << "Terminating: " << solver_summary_->message;
+ }
return true;
}
@@ -693,7 +719,9 @@ bool TrustRegionMinimizer::ParameterToleranceReached() {
(iteration_summary_.step_norm / (x_norm_ + options_.parameter_tolerance)),
options_.parameter_tolerance);
solver_summary_->termination_type = CONVERGENCE;
- VLOG_IF(1, is_not_silent_) << "Terminating: " << solver_summary_->message;
+ if (is_not_silent_) {
+ VLOG(1) << "Terminating: " << solver_summary_->message;
+ }
return true;
}
@@ -713,7 +741,9 @@ bool TrustRegionMinimizer::FunctionToleranceReached() {
fabs(iteration_summary_.cost_change) / x_cost_,
options_.function_tolerance);
solver_summary_->termination_type = CONVERGENCE;
- VLOG_IF(1, is_not_silent_) << "Terminating: " << solver_summary_->message;
+ if (is_not_silent_) {
+ VLOG(1) << "Terminating: " << solver_summary_->message;
+ }
return true;
}
@@ -730,18 +760,20 @@ bool TrustRegionMinimizer::FunctionToleranceReached() {
// CostFunction objects.
void TrustRegionMinimizer::ComputeCandidatePointAndEvaluateCost() {
if (!evaluator_->Plus(x_.data(), delta_.data(), candidate_x_.data())) {
- LOG_IF(WARNING, is_not_silent_)
- << "x_plus_delta = Plus(x, delta) failed. "
- << "Treating it as a step with infinite cost";
+ if (is_not_silent_) {
+ LOG(WARNING) << "x_plus_delta = Plus(x, delta) failed. "
+ << "Treating it as a step with infinite cost";
+ }
candidate_cost_ = std::numeric_limits<double>::max();
return;
}
if (!evaluator_->Evaluate(
candidate_x_.data(), &candidate_cost_, nullptr, nullptr, nullptr)) {
- LOG_IF(WARNING, is_not_silent_)
- << "Step failed to evaluate. "
- << "Treating it as a step with infinite cost";
+ if (is_not_silent_) {
+ LOG(WARNING) << "Step failed to evaluate. "
+ << "Treating it as a step with infinite cost";
+ }
candidate_cost_ = std::numeric_limits<double>::max();
}
}
@@ -793,12 +825,5 @@ bool TrustRegionMinimizer::HandleSuccessfulStep() {
return true;
}
-// Declare the step unsuccessful and inform the trust region strategy.
-void TrustRegionMinimizer::HandleUnsuccessfulStep() {
- iteration_summary_.step_is_successful = false;
- strategy_->StepRejected(iteration_summary_.relative_decrease);
- iteration_summary_.cost = candidate_cost_ + solver_summary_->fixed_cost;
-}
-
} // namespace internal
} // namespace ceres
diff --git a/extern/ceres/internal/ceres/trust_region_minimizer.h b/extern/ceres/internal/ceres/trust_region_minimizer.h
index b5c41225ddc..be4d40653c4 100644
--- a/extern/ceres/internal/ceres/trust_region_minimizer.h
+++ b/extern/ceres/internal/ceres/trust_region_minimizer.h
@@ -32,7 +32,9 @@
#define CERES_INTERNAL_TRUST_REGION_MINIMIZER_H_
#include <memory>
+
#include "ceres/internal/eigen.h"
+#include "ceres/internal/port.h"
#include "ceres/minimizer.h"
#include "ceres/solver.h"
#include "ceres/sparse_matrix.h"
@@ -46,7 +48,7 @@ namespace internal {
// Generic trust region minimization algorithm.
//
// For example usage, see SolverImpl::Minimize.
-class TrustRegionMinimizer : public Minimizer {
+class CERES_EXPORT_INTERNAL TrustRegionMinimizer : public Minimizer {
public:
~TrustRegionMinimizer();
@@ -80,7 +82,6 @@ class TrustRegionMinimizer : public Minimizer {
bool MinTrustRegionRadiusReached();
bool IsStepSuccessful();
- void HandleUnsuccessfulStep();
bool HandleSuccessfulStep();
bool HandleInvalidStep();
diff --git a/extern/ceres/internal/ceres/trust_region_preprocessor.cc b/extern/ceres/internal/ceres/trust_region_preprocessor.cc
index b8c6b49d1ca..0943edbba85 100644
--- a/extern/ceres/internal/ceres/trust_region_preprocessor.cc
+++ b/extern/ceres/internal/ceres/trust_region_preprocessor.cc
@@ -101,8 +101,9 @@ void AlternateLinearSolverAndPreconditionerForSchurTypeLinearSolver(
LinearSolverTypeToString(linear_solver_type_given),
LinearSolverTypeToString(options->linear_solver_type));
}
-
- VLOG_IF(1, options->logging_type != SILENT) << message;
+ if (options->logging_type != SILENT) {
+ VLOG(1) << message;
+ }
}
// Reorder the program to reduce fill-in and increase cache coherency.
diff --git a/extern/ceres/internal/ceres/trust_region_preprocessor.h b/extern/ceres/internal/ceres/trust_region_preprocessor.h
index 9597905ae9a..2655abe4b2e 100644
--- a/extern/ceres/internal/ceres/trust_region_preprocessor.h
+++ b/extern/ceres/internal/ceres/trust_region_preprocessor.h
@@ -31,12 +31,13 @@
#ifndef CERES_INTERNAL_TRUST_REGION_PREPROCESSOR_H_
#define CERES_INTERNAL_TRUST_REGION_PREPROCESSOR_H_
+#include "ceres/internal/port.h"
#include "ceres/preprocessor.h"
namespace ceres {
namespace internal {
-class TrustRegionPreprocessor : public Preprocessor {
+class CERES_EXPORT_INTERNAL TrustRegionPreprocessor : public Preprocessor {
public:
virtual ~TrustRegionPreprocessor();
bool Preprocess(const Solver::Options& options,
diff --git a/extern/ceres/internal/ceres/trust_region_step_evaluator.cc b/extern/ceres/internal/ceres/trust_region_step_evaluator.cc
index 33b0c41ead6..19045ae0070 100644
--- a/extern/ceres/internal/ceres/trust_region_step_evaluator.cc
+++ b/extern/ceres/internal/ceres/trust_region_step_evaluator.cc
@@ -28,17 +28,18 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
+#include "ceres/trust_region_step_evaluator.h"
+
#include <algorithm>
#include <limits>
-#include "ceres/trust_region_step_evaluator.h"
+
#include "glog/logging.h"
namespace ceres {
namespace internal {
TrustRegionStepEvaluator::TrustRegionStepEvaluator(
- const double initial_cost,
- const int max_consecutive_nonmonotonic_steps)
+ const double initial_cost, const int max_consecutive_nonmonotonic_steps)
: max_consecutive_nonmonotonic_steps_(max_consecutive_nonmonotonic_steps),
minimum_cost_(initial_cost),
current_cost_(initial_cost),
@@ -46,12 +47,10 @@ TrustRegionStepEvaluator::TrustRegionStepEvaluator(
candidate_cost_(initial_cost),
accumulated_reference_model_cost_change_(0.0),
accumulated_candidate_model_cost_change_(0.0),
- num_consecutive_nonmonotonic_steps_(0){
-}
+ num_consecutive_nonmonotonic_steps_(0) {}
double TrustRegionStepEvaluator::StepQuality(
- const double cost,
- const double model_cost_change) const {
+ const double cost, const double model_cost_change) const {
// If the function evaluation for this step was a failure, in which
// case the TrustRegionMinimizer would have set the cost to
// std::numeric_limits<double>::max(). In this case, the division by
@@ -68,9 +67,8 @@ double TrustRegionStepEvaluator::StepQuality(
return std::max(relative_decrease, historical_relative_decrease);
}
-void TrustRegionStepEvaluator::StepAccepted(
- const double cost,
- const double model_cost_change) {
+void TrustRegionStepEvaluator::StepAccepted(const double cost,
+ const double model_cost_change) {
// Algorithm 10.1.2 from Trust Region Methods by Conn, Gould &
// Toint.
//
diff --git a/extern/ceres/internal/ceres/trust_region_strategy.cc b/extern/ceres/internal/ceres/trust_region_strategy.cc
index 2db6a6c899b..7e429d5e557 100644
--- a/extern/ceres/internal/ceres/trust_region_strategy.cc
+++ b/extern/ceres/internal/ceres/trust_region_strategy.cc
@@ -31,6 +31,7 @@
// keir@google.com (Keir Mierle)
#include "ceres/trust_region_strategy.h"
+
#include "ceres/dogleg_strategy.h"
#include "ceres/levenberg_marquardt_strategy.h"
diff --git a/extern/ceres/internal/ceres/trust_region_strategy.h b/extern/ceres/internal/ceres/trust_region_strategy.h
index 5751691e5ec..176f73a4876 100644
--- a/extern/ceres/internal/ceres/trust_region_strategy.h
+++ b/extern/ceres/internal/ceres/trust_region_strategy.h
@@ -32,6 +32,7 @@
#define CERES_INTERNAL_TRUST_REGION_STRATEGY_H_
#include <string>
+
#include "ceres/internal/port.h"
#include "ceres/linear_solver.h"
@@ -53,7 +54,7 @@ class SparseMatrix;
// the LevenbergMarquardtStrategy uses the inverse of the trust region
// radius to scale the damping term, which controls the step size, but
// does not set a hard limit on its size.
-class TrustRegionStrategy {
+class CERES_EXPORT_INTERNAL TrustRegionStrategy {
public:
struct Options {
TrustRegionStrategyType trust_region_strategy_type = LEVENBERG_MARQUARDT;
diff --git a/extern/ceres/internal/ceres/types.cc b/extern/ceres/internal/ceres/types.cc
index 93c4cfcb027..39bb2d8cc4d 100644
--- a/extern/ceres/internal/ceres/types.cc
+++ b/extern/ceres/internal/ceres/types.cc
@@ -28,18 +28,22 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
+#include "ceres/types.h"
+
#include <algorithm>
#include <cctype>
#include <string>
-#include "ceres/types.h"
+
#include "glog/logging.h"
namespace ceres {
using std::string;
+// clang-format off
#define CASESTR(x) case x: return #x
-#define STRENUM(x) if (value == #x) { *type = x; return true;}
+#define STRENUM(x) if (value == #x) { *type = x; return true; }
+// clang-format on
static void UpperCase(string* input) {
std::transform(input->begin(), input->end(), input->begin(), ::toupper);
@@ -109,8 +113,7 @@ const char* SparseLinearAlgebraLibraryTypeToString(
}
bool StringToSparseLinearAlgebraLibraryType(
- string value,
- SparseLinearAlgebraLibraryType* type) {
+ string value, SparseLinearAlgebraLibraryType* type) {
UpperCase(&value);
STRENUM(SUITE_SPARSE);
STRENUM(CX_SPARSE);
@@ -131,8 +134,7 @@ const char* DenseLinearAlgebraLibraryTypeToString(
}
bool StringToDenseLinearAlgebraLibraryType(
- string value,
- DenseLinearAlgebraLibraryType* type) {
+ string value, DenseLinearAlgebraLibraryType* type) {
UpperCase(&value);
STRENUM(EIGEN);
STRENUM(LAPACK);
@@ -236,9 +238,8 @@ const char* LineSearchInterpolationTypeToString(
}
}
-bool StringToLineSearchInterpolationType(
- string value,
- LineSearchInterpolationType* type) {
+bool StringToLineSearchInterpolationType(string value,
+ LineSearchInterpolationType* type) {
UpperCase(&value);
STRENUM(BISECTION);
STRENUM(QUADRATIC);
@@ -258,8 +259,7 @@ const char* NonlinearConjugateGradientTypeToString(
}
bool StringToNonlinearConjugateGradientType(
- string value,
- NonlinearConjugateGradientType* type) {
+ string value, NonlinearConjugateGradientType* type) {
UpperCase(&value);
STRENUM(FLETCHER_REEVES);
STRENUM(POLAK_RIBIERE);
@@ -267,8 +267,7 @@ bool StringToNonlinearConjugateGradientType(
return false;
}
-const char* CovarianceAlgorithmTypeToString(
- CovarianceAlgorithmType type) {
+const char* CovarianceAlgorithmTypeToString(CovarianceAlgorithmType type) {
switch (type) {
CASESTR(DENSE_SVD);
CASESTR(SPARSE_QR);
@@ -277,17 +276,15 @@ const char* CovarianceAlgorithmTypeToString(
}
}
-bool StringToCovarianceAlgorithmType(
- string value,
- CovarianceAlgorithmType* type) {
+bool StringToCovarianceAlgorithmType(string value,
+ CovarianceAlgorithmType* type) {
UpperCase(&value);
STRENUM(DENSE_SVD);
STRENUM(SPARSE_QR);
return false;
}
-const char* NumericDiffMethodTypeToString(
- NumericDiffMethodType type) {
+const char* NumericDiffMethodTypeToString(NumericDiffMethodType type) {
switch (type) {
CASESTR(CENTRAL);
CASESTR(FORWARD);
@@ -297,9 +294,7 @@ const char* NumericDiffMethodTypeToString(
}
}
-bool StringToNumericDiffMethodType(
- string value,
- NumericDiffMethodType* type) {
+bool StringToNumericDiffMethodType(string value, NumericDiffMethodType* type) {
UpperCase(&value);
STRENUM(CENTRAL);
STRENUM(FORWARD);
@@ -307,8 +302,7 @@ bool StringToNumericDiffMethodType(
return false;
}
-const char* VisibilityClusteringTypeToString(
- VisibilityClusteringType type) {
+const char* VisibilityClusteringTypeToString(VisibilityClusteringType type) {
switch (type) {
CASESTR(CANONICAL_VIEWS);
CASESTR(SINGLE_LINKAGE);
@@ -317,9 +311,8 @@ const char* VisibilityClusteringTypeToString(
}
}
-bool StringToVisibilityClusteringType(
- string value,
- VisibilityClusteringType* type) {
+bool StringToVisibilityClusteringType(string value,
+ VisibilityClusteringType* type) {
UpperCase(&value);
STRENUM(CANONICAL_VIEWS);
STRENUM(SINGLE_LINKAGE);
@@ -354,9 +347,8 @@ bool StringtoLoggingType(std::string value, LoggingType* type) {
return false;
}
-
const char* DumpFormatTypeToString(DumpFormatType type) {
- switch (type) {
+ switch (type) {
CASESTR(CONSOLE);
CASESTR(TEXTFILE);
default:
@@ -375,9 +367,11 @@ bool StringtoDumpFormatType(std::string value, DumpFormatType* type) {
#undef STRENUM
bool IsSchurType(LinearSolverType type) {
+ // clang-format off
return ((type == SPARSE_SCHUR) ||
(type == DENSE_SCHUR) ||
(type == ITERATIVE_SCHUR));
+ // clang-format on
}
bool IsSparseLinearAlgebraLibraryTypeAvailable(
diff --git a/extern/ceres/internal/ceres/visibility.cc b/extern/ceres/internal/ceres/visibility.cc
index 0981eeddcbf..82bf6f170b8 100644
--- a/extern/ceres/internal/ceres/visibility.cc
+++ b/extern/ceres/internal/ceres/visibility.cc
@@ -30,13 +30,14 @@
#include "ceres/visibility.h"
+#include <algorithm>
#include <cmath>
#include <ctime>
-#include <algorithm>
#include <set>
-#include <vector>
#include <unordered_map>
#include <utility>
+#include <vector>
+
#include "ceres/block_structure.h"
#include "ceres/graph.h"
#include "ceres/pair_hash.h"
@@ -138,9 +139,10 @@ WeightedGraph<int>* CreateSchurComplementGraph(
const int count = camera_pair_count.second;
DCHECK_NE(camera1, camera2);
// Static cast necessary for Windows.
- const double weight = static_cast<double>(count) /
- (sqrt(static_cast<double>(
- visibility[camera1].size() * visibility[camera2].size())));
+ const double weight =
+ static_cast<double>(count) /
+ (sqrt(static_cast<double>(visibility[camera1].size() *
+ visibility[camera2].size())));
graph->AddEdge(camera1, camera2, weight);
}
diff --git a/extern/ceres/internal/ceres/visibility.h b/extern/ceres/internal/ceres/visibility.h
index 115d45f7cf6..68c6723fad7 100644
--- a/extern/ceres/internal/ceres/visibility.h
+++ b/extern/ceres/internal/ceres/visibility.h
@@ -37,7 +37,9 @@
#include <set>
#include <vector>
+
#include "ceres/graph.h"
+#include "ceres/internal/port.h"
namespace ceres {
namespace internal {
@@ -52,9 +54,10 @@ struct CompressedRowBlockStructure;
//
// In a structure from motion problem, e_blocks correspond to 3D
// points and f_blocks correspond to cameras.
-void ComputeVisibility(const CompressedRowBlockStructure& block_structure,
- int num_eliminate_blocks,
- std::vector<std::set<int>>* visibility);
+CERES_EXPORT_INTERNAL void ComputeVisibility(
+ const CompressedRowBlockStructure& block_structure,
+ int num_eliminate_blocks,
+ std::vector<std::set<int>>* visibility);
// Given f_block visibility as computed by the ComputeVisibility
// function above, construct and return a graph whose vertices are
@@ -69,7 +72,7 @@ void ComputeVisibility(const CompressedRowBlockStructure& block_structure,
//
// Caller acquires ownership of the returned WeightedGraph pointer
// (heap-allocated).
-WeightedGraph<int>* CreateSchurComplementGraph(
+CERES_EXPORT_INTERNAL WeightedGraph<int>* CreateSchurComplementGraph(
const std::vector<std::set<int>>& visibility);
} // namespace internal
diff --git a/extern/ceres/internal/ceres/visibility_based_preconditioner.cc b/extern/ceres/internal/ceres/visibility_based_preconditioner.cc
index 3372e82d1e1..0cf4afaae06 100644
--- a/extern/ceres/internal/ceres/visibility_based_preconditioner.cc
+++ b/extern/ceres/internal/ceres/visibility_based_preconditioner.cc
@@ -144,11 +144,11 @@ void VisibilityBasedPreconditioner::ComputeClusterJacobiSparsity(
}
// Determine the sparsity structure of the CLUSTER_TRIDIAGONAL
-// preconditioner. It clusters cameras using using the scene
-// visibility and then finds the strongly interacting pairs of
-// clusters by constructing another graph with the clusters as
-// vertices and approximating it with a degree-2 maximum spanning
-// forest. The set of edges in this forest are the cluster pairs.
+// preconditioner. It clusters cameras using the scene visibility and
+// then finds the strongly interacting pairs of clusters by
+// constructing another graph with the clusters as vertices and
+// approximating it with a degree-2 maximum spanning forest. The set
+// of edges in this forest are the cluster pairs.
void VisibilityBasedPreconditioner::ComputeClusterTridiagonalSparsity(
const CompressedRowBlockStructure& bs) {
vector<set<int>> visibility;
diff --git a/extern/ceres/internal/ceres/visibility_based_preconditioner.h b/extern/ceres/internal/ceres/visibility_based_preconditioner.h
index aa582d5e7ef..0457b9a376a 100644
--- a/extern/ceres/internal/ceres/visibility_based_preconditioner.h
+++ b/extern/ceres/internal/ceres/visibility_based_preconditioner.h
@@ -162,8 +162,9 @@ class VisibilityBasedPreconditioner : public BlockSparseMatrixPreconditioner {
std::vector<std::set<int>>* cluster_visibility) const;
WeightedGraph<int>* CreateClusterGraph(
const std::vector<std::set<int>>& visibility) const;
- void ForestToClusterPairs(const WeightedGraph<int>& forest,
- std::unordered_set<std::pair<int, int>, pair_hash>* cluster_pairs) const;
+ void ForestToClusterPairs(
+ const WeightedGraph<int>& forest,
+ std::unordered_set<std::pair<int, int>, pair_hash>* cluster_pairs) const;
void ComputeBlockPairsInPreconditioner(const CompressedRowBlockStructure& bs);
bool IsBlockPairInPreconditioner(int block1, int block2) const;
bool IsBlockPairOffDiagonal(int block1, int block2) const;
diff --git a/extern/ceres/internal/ceres/wall_time.h b/extern/ceres/internal/ceres/wall_time.h
index ed0610f27da..9c92e9e60ef 100644
--- a/extern/ceres/internal/ceres/wall_time.h
+++ b/extern/ceres/internal/ceres/wall_time.h
@@ -33,6 +33,7 @@
#include <map>
#include <string>
+
#include "ceres/internal/port.h"
#include "ceres/stringprintf.h"
#include "glog/logging.h"
@@ -44,7 +45,7 @@ namespace internal {
// OpenMP is available then the high precision openmp_get_wtime()
// function is used. Otherwise on unixes, gettimeofday is used. The
// granularity is in seconds on windows systems.
-double WallTimeInSeconds();
+CERES_EXPORT_INTERNAL double WallTimeInSeconds();
// Log a series of events, recording for each event the time elapsed
// since the last event and since the creation of the object.
diff --git a/extern/ceres/patches/series b/extern/ceres/patches/series
index 7fa3673acac..e69de29bb2d 100644
--- a/extern/ceres/patches/series
+++ b/extern/ceres/patches/series
@@ -1,2 +0,0 @@
-unused_parameter.patch
-unused_variable.patch
diff --git a/extern/ceres/patches/unused_parameter.patch b/extern/ceres/patches/unused_parameter.patch
deleted file mode 100644
index 14969d6a19f..00000000000
--- a/extern/ceres/patches/unused_parameter.patch
+++ /dev/null
@@ -1,13 +0,0 @@
-diff --git a/extern/ceres/include/ceres/internal/autodiff.h b/extern/ceres/include/ceres/internal/autodiff.h
-index 71b7bae4757..cb7b1aca5b9 100644
---- a/include/ceres/internal/autodiff.h
-+++ b/include/ceres/internal/autodiff.h
-@@ -198,7 +198,7 @@ struct Make1stOrderPerturbation {
- template <int N, int Offset, typename T, typename JetT>
- struct Make1stOrderPerturbation<N, N, Offset, T, JetT> {
- public:
-- static void Apply(const T* src, JetT* dst) {}
-+ static void Apply(const T* /*src*/, JetT* /*dst*/) {}
- };
-
- // Calls Make1stOrderPerturbation for every parameter block.
diff --git a/extern/ceres/patches/unused_variable.patch b/extern/ceres/patches/unused_variable.patch
deleted file mode 100644
index 24a4f392962..00000000000
--- a/extern/ceres/patches/unused_variable.patch
+++ /dev/null
@@ -1,12 +0,0 @@
-diff --git a/extern/ceres/internal/ceres/sparse_cholesky.cc b/extern/ceres/internal/ceres/sparse_cholesky.cc
-index 0639ea90664..d9d2100d3f9 100644
---- a/internal/ceres/sparse_cholesky.cc
-+++ b/internal/ceres/sparse_cholesky.cc
-@@ -56,6 +56,7 @@ std::unique_ptr<SparseCholesky> SparseCholesky::Create(
- }
- break;
- #else
-+ (void)ordering_type;
- LOG(FATAL) << "Ceres was compiled without support for SuiteSparse.";
- #endif
-