Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/extern
diff options
context:
space:
mode:
authorSergey Sharybin <sergey.vfx@gmail.com>2020-06-18 11:12:01 +0300
committerSergey Sharybin <sergey.vfx@gmail.com>2020-06-19 13:02:21 +0300
commit31ae83381106a7aaee5303710b818c13f42ceddc (patch)
tree6935ebe7e84db7e54c689ea3308fb6df67e03d73 /extern
parent171c4fb238a2a65291540ac5406187bc69f3a6bc (diff)
Ceres: Update to the latest upstream version
Using latest master because of various compilation error fixes. Brings a lot of recent development. From most interesting parts: - New threading model. - Tiny solver. - Compatibility with C++17.
Diffstat (limited to 'extern')
-rw-r--r--extern/ceres/CMakeLists.txt576
-rw-r--r--extern/ceres/ChangeLog839
-rwxr-xr-xextern/ceres/bundle.sh9
-rw-r--r--extern/ceres/files.txt81
-rw-r--r--extern/ceres/include/ceres/autodiff_cost_function.h89
-rw-r--r--extern/ceres/include/ceres/autodiff_first_order_function.h151
-rw-r--r--extern/ceres/include/ceres/autodiff_local_parameterization.h40
-rw-r--r--extern/ceres/include/ceres/c_api.h4
-rw-r--r--extern/ceres/include/ceres/ceres.h9
-rw-r--r--extern/ceres/include/ceres/conditioned_cost_function.h16
-rw-r--r--extern/ceres/include/ceres/context.h56
-rw-r--r--extern/ceres/include/ceres/cost_function.h27
-rw-r--r--extern/ceres/include/ceres/cost_function_to_functor.h612
-rw-r--r--extern/ceres/include/ceres/covariance.h79
-rw-r--r--extern/ceres/include/ceres/crs_matrix.h5
-rw-r--r--extern/ceres/include/ceres/cubic_interpolation.h436
-rw-r--r--extern/ceres/include/ceres/dynamic_autodiff_cost_function.h95
-rw-r--r--extern/ceres/include/ceres/dynamic_cost_function.h56
-rw-r--r--extern/ceres/include/ceres/dynamic_cost_function_to_functor.h62
-rw-r--r--extern/ceres/include/ceres/dynamic_numeric_diff_cost_function.h125
-rw-r--r--extern/ceres/include/ceres/evaluation_callback.h80
-rw-r--r--extern/ceres/include/ceres/first_order_function.h54
-rw-r--r--extern/ceres/include/ceres/gradient_checker.h23
-rw-r--r--extern/ceres/include/ceres/gradient_problem.h25
-rw-r--r--extern/ceres/include/ceres/gradient_problem_solver.h128
-rw-r--r--extern/ceres/include/ceres/internal/array_selector.h95
-rw-r--r--extern/ceres/include/ceres/internal/autodiff.h291
-rw-r--r--extern/ceres/include/ceres/internal/disable_warnings.h2
-rw-r--r--extern/ceres/include/ceres/internal/eigen.h41
-rw-r--r--extern/ceres/include/ceres/internal/fixed_array.h573
-rw-r--r--extern/ceres/include/ceres/internal/householder_vector.h (renamed from extern/ceres/internal/ceres/householder_vector.h)19
-rw-r--r--extern/ceres/include/ceres/internal/integer_sequence_algorithm.h165
-rw-r--r--extern/ceres/include/ceres/internal/line_parameterization.h183
-rw-r--r--extern/ceres/include/ceres/internal/macros.h170
-rw-r--r--extern/ceres/include/ceres/internal/manual_constructor.h208
-rw-r--r--extern/ceres/include/ceres/internal/memory.h90
-rw-r--r--extern/ceres/include/ceres/internal/numeric_diff.h198
-rw-r--r--extern/ceres/include/ceres/internal/parameter_dims.h124
-rw-r--r--extern/ceres/include/ceres/internal/port.h62
-rw-r--r--extern/ceres/include/ceres/internal/scoped_ptr.h310
-rw-r--r--extern/ceres/include/ceres/internal/variadic_evaluate.h210
-rw-r--r--extern/ceres/include/ceres/iteration_callback.h64
-rw-r--r--extern/ceres/include/ceres/jet.h693
-rw-r--r--extern/ceres/include/ceres/local_parameterization.h186
-rw-r--r--extern/ceres/include/ceres/loss_function.h85
-rw-r--r--extern/ceres/include/ceres/normal_prior.h12
-rw-r--r--extern/ceres/include/ceres/numeric_diff_cost_function.h147
-rw-r--r--extern/ceres/include/ceres/numeric_diff_options.h22
-rw-r--r--extern/ceres/include/ceres/ordered_groups.h47
-rw-r--r--extern/ceres/include/ceres/problem.h235
-rw-r--r--extern/ceres/include/ceres/rotation.h255
-rw-r--r--extern/ceres/include/ceres/sized_cost_function.h51
-rw-r--r--extern/ceres/include/ceres/solver.h494
-rw-r--r--extern/ceres/include/ceres/tiny_solver.h368
-rw-r--r--extern/ceres/include/ceres/tiny_solver_autodiff_function.h206
-rw-r--r--extern/ceres/include/ceres/tiny_solver_cost_function_adapter.h142
-rw-r--r--extern/ceres/include/ceres/types.h71
-rw-r--r--extern/ceres/include/ceres/version.h6
-rw-r--r--extern/ceres/internal/ceres/accelerate_sparse.cc289
-rw-r--r--extern/ceres/internal/ceres/accelerate_sparse.h147
-rw-r--r--extern/ceres/internal/ceres/array_utils.cc14
-rw-r--r--extern/ceres/internal/ceres/array_utils.h4
-rw-r--r--extern/ceres/internal/ceres/block_jacobi_preconditioner.cc1
-rw-r--r--extern/ceres/internal/ceres/block_jacobi_preconditioner.h18
-rw-r--r--extern/ceres/internal/ceres/block_jacobian_writer.cc3
-rw-r--r--extern/ceres/internal/ceres/block_jacobian_writer.h1
-rw-r--r--extern/ceres/internal/ceres/block_random_access_dense_matrix.cc1
-rw-r--r--extern/ceres/internal/ceres/block_random_access_dense_matrix.h29
-rw-r--r--extern/ceres/internal/ceres/block_random_access_diagonal_matrix.cc6
-rw-r--r--extern/ceres/internal/ceres/block_random_access_diagonal_matrix.h34
-rw-r--r--extern/ceres/internal/ceres/block_random_access_matrix.h17
-rw-r--r--extern/ceres/internal/ceres/block_random_access_sparse_matrix.cc53
-rw-r--r--extern/ceres/internal/ceres/block_random_access_sparse_matrix.h46
-rw-r--r--extern/ceres/internal/ceres/block_sparse_matrix.cc182
-rw-r--r--extern/ceres/internal/ceres/block_sparse_matrix.h102
-rw-r--r--extern/ceres/internal/ceres/block_structure.h9
-rw-r--r--extern/ceres/internal/ceres/c_api.cc6
-rw-r--r--extern/ceres/internal/ceres/callbacks.cc24
-rw-r--r--extern/ceres/internal/ceres/callbacks.h19
-rw-r--r--extern/ceres/internal/ceres/canonical_views_clustering.cc232
-rw-r--r--extern/ceres/internal/ceres/canonical_views_clustering.h124
-rw-r--r--extern/ceres/internal/ceres/cgnr_linear_operator.h12
-rw-r--r--extern/ceres/internal/ceres/cgnr_solver.cc39
-rw-r--r--extern/ceres/internal/ceres/cgnr_solver.h13
-rw-r--r--extern/ceres/internal/ceres/collections_port.h196
-rw-r--r--extern/ceres/internal/ceres/compressed_col_sparse_matrix_utils.cc6
-rw-r--r--extern/ceres/internal/ceres/compressed_row_jacobian_writer.cc6
-rw-r--r--extern/ceres/internal/ceres/compressed_row_jacobian_writer.h2
-rw-r--r--extern/ceres/internal/ceres/compressed_row_sparse_matrix.cc682
-rw-r--r--extern/ceres/internal/ceres/compressed_row_sparse_matrix.h173
-rw-r--r--extern/ceres/internal/ceres/concurrent_queue.h159
-rw-r--r--extern/ceres/internal/ceres/conditioned_cost_function.cc2
-rw-r--r--extern/ceres/internal/ceres/conjugate_gradients_solver.cc13
-rw-r--r--extern/ceres/internal/ceres/conjugate_gradients_solver.h10
-rw-r--r--extern/ceres/internal/ceres/context.cc41
-rw-r--r--extern/ceres/internal/ceres/context_impl.cc43
-rw-r--r--extern/ceres/internal/ceres/context_impl.h67
-rw-r--r--extern/ceres/internal/ceres/coordinate_descent_minimizer.cc137
-rw-r--r--extern/ceres/internal/ceres/coordinate_descent_minimizer.h14
-rw-r--r--extern/ceres/internal/ceres/corrector.cc4
-rw-r--r--extern/ceres/internal/ceres/corrector.h2
-rw-r--r--extern/ceres/internal/ceres/covariance.cc10
-rw-r--r--extern/ceres/internal/ceres/covariance_impl.cc257
-rw-r--r--extern/ceres/internal/ceres/covariance_impl.h8
-rw-r--r--extern/ceres/internal/ceres/cxsparse.cc284
-rw-r--r--extern/ceres/internal/ceres/cxsparse.h59
-rw-r--r--extern/ceres/internal/ceres/dense_normal_cholesky_solver.cc1
-rw-r--r--extern/ceres/internal/ceres/dense_normal_cholesky_solver.h6
-rw-r--r--extern/ceres/internal/ceres/dense_qr_solver.cc2
-rw-r--r--extern/ceres/internal/ceres/dense_qr_solver.h6
-rw-r--r--extern/ceres/internal/ceres/dense_sparse_matrix.cc2
-rw-r--r--extern/ceres/internal/ceres/dense_sparse_matrix.h28
-rw-r--r--extern/ceres/internal/ceres/dogleg_strategy.cc12
-rw-r--r--extern/ceres/internal/ceres/dogleg_strategy.h15
-rw-r--r--extern/ceres/internal/ceres/dynamic_compressed_row_jacobian_writer.cc41
-rw-r--r--extern/ceres/internal/ceres/dynamic_compressed_row_sparse_matrix.h4
-rw-r--r--extern/ceres/internal/ceres/dynamic_sparse_normal_cholesky_solver.cc286
-rw-r--r--extern/ceres/internal/ceres/dynamic_sparse_normal_cholesky_solver.h86
-rw-r--r--extern/ceres/internal/ceres/eigensparse.cc190
-rw-r--r--extern/ceres/internal/ceres/eigensparse.h90
-rw-r--r--extern/ceres/internal/ceres/evaluator.cc8
-rw-r--r--extern/ceres/internal/ceres/evaluator.h61
-rw-r--r--extern/ceres/internal/ceres/execution_summary.h38
-rw-r--r--extern/ceres/internal/ceres/float_cxsparse.cc47
-rw-r--r--extern/ceres/internal/ceres/float_cxsparse.h58
-rw-r--r--extern/ceres/internal/ceres/float_suitesparse.cc47
-rw-r--r--extern/ceres/internal/ceres/float_suitesparse.h58
-rw-r--r--extern/ceres/internal/ceres/function_sample.cc (renamed from extern/ceres/internal/ceres/integral_types.h)86
-rw-r--r--extern/ceres/internal/ceres/function_sample.h94
-rw-r--r--extern/ceres/internal/ceres/generate_template_specializations.py246
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_2.cc5
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_3.cc5
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_4.cc5
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_d.cc5
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_3.cc5
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_4.cc5
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_6.cc5
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_9.cc5
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_d.cc5
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_3.cc5
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_4.cc5
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_6.cc58
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_8.cc5
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_9.cc5
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_d.cc5
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_d_d.cc5
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_3_3_3.cc58
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_2.cc5
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_3.cc5
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_4.cc5
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_d.cc5
-rw-r--r--extern/ceres/internal/ceres/generated/partitioned_matrix_view_d_d_d.cc5
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_2_2_2.cc5
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_2_2_3.cc5
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_2_2_4.cc5
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_2_2_d.cc5
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_2_3_3.cc5
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_2_3_4.cc5
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_2_3_6.cc5
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_2_3_9.cc5
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_2_3_d.cc5
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_2_4_3.cc5
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_2_4_4.cc5
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_2_4_6.cc58
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_2_4_8.cc5
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_2_4_9.cc5
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_2_4_d.cc5
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_2_d_d.cc5
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_3_3_3.cc58
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_4_4_2.cc5
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_4_4_3.cc5
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_4_4_4.cc5
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_4_4_d.cc5
-rw-r--r--extern/ceres/internal/ceres/generated/schur_eliminator_d_d_d.cc5
-rw-r--r--extern/ceres/internal/ceres/gradient_checker.cc19
-rw-r--r--extern/ceres/internal/ceres/gradient_checking_cost_function.cc34
-rw-r--r--extern/ceres/internal/ceres/gradient_checking_cost_function.h7
-rw-r--r--extern/ceres/internal/ceres/gradient_problem_evaluator.h43
-rw-r--r--extern/ceres/internal/ceres/gradient_problem_solver.cc89
-rw-r--r--extern/ceres/internal/ceres/graph.h55
-rw-r--r--extern/ceres/internal/ceres/graph_algorithms.h84
-rw-r--r--extern/ceres/internal/ceres/implicit_schur_complement.cc1
-rw-r--r--extern/ceres/internal/ceres/implicit_schur_complement.h16
-rw-r--r--extern/ceres/internal/ceres/inner_product_computer.cc330
-rw-r--r--extern/ceres/internal/ceres/inner_product_computer.h157
-rw-r--r--extern/ceres/internal/ceres/invert_psd_matrix.h79
-rw-r--r--extern/ceres/internal/ceres/iterative_refiner.cc74
-rw-r--r--extern/ceres/internal/ceres/iterative_refiner.h93
-rw-r--r--extern/ceres/internal/ceres/iterative_schur_complement_solver.cc108
-rw-r--r--extern/ceres/internal/ceres/iterative_schur_complement_solver.h16
-rw-r--r--extern/ceres/internal/ceres/lapack.cc3
-rw-r--r--extern/ceres/internal/ceres/levenberg_marquardt_strategy.cc12
-rw-r--r--extern/ceres/internal/ceres/levenberg_marquardt_strategy.h12
-rw-r--r--extern/ceres/internal/ceres/line_search.cc219
-rw-r--r--extern/ceres/internal/ceres/line_search.h98
-rw-r--r--extern/ceres/internal/ceres/line_search_minimizer.cc122
-rw-r--r--extern/ceres/internal/ceres/line_search_minimizer.h6
-rw-r--r--extern/ceres/internal/ceres/line_search_preprocessor.cc7
-rw-r--r--extern/ceres/internal/ceres/line_search_preprocessor.h6
-rw-r--r--extern/ceres/internal/ceres/linear_least_squares_problems.cc19
-rw-r--r--extern/ceres/internal/ceres/linear_least_squares_problems.h15
-rw-r--r--extern/ceres/internal/ceres/linear_solver.cc15
-rw-r--r--extern/ceres/internal/ceres/linear_solver.h122
-rw-r--r--extern/ceres/internal/ceres/local_parameterization.cc154
-rw-r--r--extern/ceres/internal/ceres/loss_function.cc19
-rw-r--r--extern/ceres/internal/ceres/low_rank_inverse_hessian.cc10
-rw-r--r--extern/ceres/internal/ceres/low_rank_inverse_hessian.h10
-rw-r--r--extern/ceres/internal/ceres/minimizer.h9
-rw-r--r--extern/ceres/internal/ceres/mutex.h329
-rw-r--r--extern/ceres/internal/ceres/normal_prior.cc1
-rw-r--r--extern/ceres/internal/ceres/pair_hash.h112
-rw-r--r--extern/ceres/internal/ceres/parallel_for.h67
-rw-r--r--extern/ceres/internal/ceres/parallel_for_cxx.cc247
-rw-r--r--extern/ceres/internal/ceres/parallel_for_nothreads.cc78
-rw-r--r--extern/ceres/internal/ceres/parallel_for_openmp.cc88
-rw-r--r--extern/ceres/internal/ceres/parallel_utils.cc90
-rw-r--r--extern/ceres/internal/ceres/parallel_utils.h67
-rw-r--r--extern/ceres/internal/ceres/parameter_block.h217
-rw-r--r--extern/ceres/internal/ceres/parameter_block_ordering.cc35
-rw-r--r--extern/ceres/internal/ceres/partitioned_matrix_view.cc224
-rw-r--r--extern/ceres/internal/ceres/partitioned_matrix_view.h28
-rw-r--r--extern/ceres/internal/ceres/partitioned_matrix_view_impl.h2
-rw-r--r--extern/ceres/internal/ceres/partitioned_matrix_view_template.py (renamed from extern/ceres/internal/ceres/generate_partitioned_matrix_view_specializations.py)86
-rw-r--r--extern/ceres/internal/ceres/polynomial.cc18
-rw-r--r--extern/ceres/internal/ceres/polynomial.h24
-rw-r--r--extern/ceres/internal/ceres/preconditioner.cc3
-rw-r--r--extern/ceres/internal/ceres/preconditioner.h54
-rw-r--r--extern/ceres/internal/ceres/preprocessor.cc25
-rw-r--r--extern/ceres/internal/ceres/preprocessor.h21
-rw-r--r--extern/ceres/internal/ceres/problem.cc207
-rw-r--r--extern/ceres/internal/ceres/problem_impl.cc629
-rw-r--r--extern/ceres/internal/ceres/problem_impl.h121
-rw-r--r--extern/ceres/internal/ceres/program.cc153
-rw-r--r--extern/ceres/internal/ceres/program.h15
-rw-r--r--extern/ceres/internal/ceres/program_evaluator.h252
-rw-r--r--extern/ceres/internal/ceres/random.h6
-rw-r--r--extern/ceres/internal/ceres/reorder_program.cc87
-rw-r--r--extern/ceres/internal/ceres/reorder_program.h17
-rw-r--r--extern/ceres/internal/ceres/residual_block.cc48
-rw-r--r--extern/ceres/internal/ceres/residual_block.h11
-rw-r--r--extern/ceres/internal/ceres/residual_block_utils.cc4
-rw-r--r--extern/ceres/internal/ceres/schur_complement_solver.cc442
-rw-r--r--extern/ceres/internal/ceres/schur_complement_solver.h102
-rw-r--r--extern/ceres/internal/ceres/schur_eliminator.cc204
-rw-r--r--extern/ceres/internal/ceres/schur_eliminator.h352
-rw-r--r--extern/ceres/internal/ceres/schur_eliminator_impl.h336
-rw-r--r--extern/ceres/internal/ceres/schur_eliminator_template.py (renamed from extern/ceres/internal/ceres/generate_eliminator_specialization.py)86
-rw-r--r--extern/ceres/internal/ceres/schur_jacobi_preconditioner.cc35
-rw-r--r--extern/ceres/internal/ceres/schur_jacobi_preconditioner.h21
-rw-r--r--extern/ceres/internal/ceres/schur_templates.cc227
-rw-r--r--extern/ceres/internal/ceres/schur_templates.h46
-rw-r--r--extern/ceres/internal/ceres/scoped_thread_token.h61
-rw-r--r--extern/ceres/internal/ceres/scratch_evaluate_preparer.h4
-rw-r--r--extern/ceres/internal/ceres/single_linkage_clustering.cc94
-rw-r--r--extern/ceres/internal/ceres/single_linkage_clustering.h (renamed from extern/ceres/include/ceres/fpclassify.h)60
-rw-r--r--extern/ceres/internal/ceres/small_blas.h244
-rw-r--r--extern/ceres/internal/ceres/small_blas_generic.h315
-rw-r--r--extern/ceres/internal/ceres/solver.cc398
-rw-r--r--extern/ceres/internal/ceres/solver_utils.cc6
-rw-r--r--extern/ceres/internal/ceres/sparse_cholesky.cc163
-rw-r--r--extern/ceres/internal/ceres/sparse_cholesky.h138
-rw-r--r--extern/ceres/internal/ceres/sparse_matrix.h2
-rw-r--r--extern/ceres/internal/ceres/sparse_normal_cholesky_solver.cc467
-rw-r--r--extern/ceres/internal/ceres/sparse_normal_cholesky_solver.h83
-rw-r--r--extern/ceres/internal/ceres/split.cc2
-rw-r--r--extern/ceres/internal/ceres/stringprintf.cc22
-rw-r--r--extern/ceres/internal/ceres/subset_preconditioner.cc117
-rw-r--r--extern/ceres/internal/ceres/subset_preconditioner.h91
-rw-r--r--extern/ceres/internal/ceres/suitesparse.cc430
-rw-r--r--extern/ceres/internal/ceres/suitesparse.h44
-rw-r--r--extern/ceres/internal/ceres/thread_pool.cc116
-rw-r--r--extern/ceres/internal/ceres/thread_pool.h120
-rw-r--r--extern/ceres/internal/ceres/thread_token_provider.cc76
-rw-r--r--extern/ceres/internal/ceres/thread_token_provider.h97
-rw-r--r--extern/ceres/internal/ceres/triplet_sparse_matrix.cc77
-rw-r--r--extern/ceres/internal/ceres/triplet_sparse_matrix.h62
-rw-r--r--extern/ceres/internal/ceres/trust_region_minimizer.cc73
-rw-r--r--extern/ceres/internal/ceres/trust_region_minimizer.h12
-rw-r--r--extern/ceres/internal/ceres/trust_region_preprocessor.cc173
-rw-r--r--extern/ceres/internal/ceres/trust_region_preprocessor.h6
-rw-r--r--extern/ceres/internal/ceres/trust_region_step_evaluator.cc10
-rw-r--r--extern/ceres/internal/ceres/trust_region_step_evaluator.h4
-rw-r--r--extern/ceres/internal/ceres/trust_region_strategy.h54
-rw-r--r--extern/ceres/internal/ceres/types.cc51
-rw-r--r--extern/ceres/internal/ceres/visibility.cc152
-rw-r--r--extern/ceres/internal/ceres/visibility.h78
-rw-r--r--extern/ceres/internal/ceres/visibility_based_preconditioner.cc585
-rw-r--r--extern/ceres/internal/ceres/visibility_based_preconditioner.h82
-rw-r--r--extern/ceres/internal/ceres/wall_time.cc31
-rw-r--r--extern/ceres/internal/ceres/wall_time.h2
-rw-r--r--extern/ceres/patches/series2
-rw-r--r--extern/ceres/patches/unused_parameter.patch13
-rw-r--r--extern/ceres/patches/unused_variable.patch12
293 files changed, 17546 insertions, 9409 deletions
diff --git a/extern/ceres/CMakeLists.txt b/extern/ceres/CMakeLists.txt
index 009445ea690..51cf9657319 100644
--- a/extern/ceres/CMakeLists.txt
+++ b/extern/ceres/CMakeLists.txt
@@ -37,223 +37,279 @@ set(INC_SYS
)
set(SRC
- internal/ceres/array_utils.cc
- internal/ceres/blas.cc
- internal/ceres/block_evaluate_preparer.cc
- internal/ceres/block_jacobian_writer.cc
- internal/ceres/block_jacobi_preconditioner.cc
- internal/ceres/block_random_access_dense_matrix.cc
- internal/ceres/block_random_access_diagonal_matrix.cc
- internal/ceres/block_random_access_matrix.cc
- internal/ceres/block_random_access_sparse_matrix.cc
- internal/ceres/block_sparse_matrix.cc
- internal/ceres/block_structure.cc
- internal/ceres/callbacks.cc
- internal/ceres/c_api.cc
- internal/ceres/cgnr_solver.cc
- internal/ceres/compressed_col_sparse_matrix_utils.cc
- internal/ceres/compressed_row_jacobian_writer.cc
- internal/ceres/compressed_row_sparse_matrix.cc
- internal/ceres/conditioned_cost_function.cc
- internal/ceres/conjugate_gradients_solver.cc
- internal/ceres/coordinate_descent_minimizer.cc
- internal/ceres/corrector.cc
- internal/ceres/covariance.cc
- internal/ceres/covariance_impl.cc
- internal/ceres/dense_normal_cholesky_solver.cc
- internal/ceres/dense_qr_solver.cc
- internal/ceres/dense_sparse_matrix.cc
- internal/ceres/detect_structure.cc
- internal/ceres/dogleg_strategy.cc
- internal/ceres/dynamic_compressed_row_jacobian_writer.cc
- internal/ceres/dynamic_compressed_row_sparse_matrix.cc
- internal/ceres/evaluator.cc
- internal/ceres/file.cc
- internal/ceres/generated/partitioned_matrix_view_d_d_d.cc
- internal/ceres/generated/schur_eliminator_d_d_d.cc
- internal/ceres/gradient_checker.cc
- internal/ceres/gradient_checking_cost_function.cc
- internal/ceres/gradient_problem.cc
- internal/ceres/gradient_problem_solver.cc
- internal/ceres/implicit_schur_complement.cc
- internal/ceres/is_close.cc
- internal/ceres/iterative_schur_complement_solver.cc
- internal/ceres/lapack.cc
- internal/ceres/levenberg_marquardt_strategy.cc
- internal/ceres/linear_least_squares_problems.cc
- internal/ceres/linear_operator.cc
- internal/ceres/linear_solver.cc
- internal/ceres/line_search.cc
- internal/ceres/line_search_direction.cc
- internal/ceres/line_search_minimizer.cc
- internal/ceres/line_search_preprocessor.cc
- internal/ceres/local_parameterization.cc
- internal/ceres/loss_function.cc
- internal/ceres/low_rank_inverse_hessian.cc
- internal/ceres/minimizer.cc
- internal/ceres/normal_prior.cc
- internal/ceres/parameter_block_ordering.cc
- internal/ceres/partitioned_matrix_view.cc
- internal/ceres/polynomial.cc
- internal/ceres/preconditioner.cc
- internal/ceres/preprocessor.cc
- internal/ceres/problem.cc
- internal/ceres/problem_impl.cc
- internal/ceres/program.cc
- internal/ceres/reorder_program.cc
- internal/ceres/residual_block.cc
- internal/ceres/residual_block_utils.cc
- internal/ceres/schur_complement_solver.cc
- internal/ceres/schur_eliminator.cc
- internal/ceres/schur_jacobi_preconditioner.cc
- internal/ceres/scratch_evaluate_preparer.cc
- internal/ceres/solver.cc
- internal/ceres/solver_utils.cc
- internal/ceres/sparse_matrix.cc
- internal/ceres/sparse_normal_cholesky_solver.cc
- internal/ceres/split.cc
- internal/ceres/stringprintf.cc
- internal/ceres/triplet_sparse_matrix.cc
- internal/ceres/trust_region_minimizer.cc
- internal/ceres/trust_region_preprocessor.cc
- internal/ceres/trust_region_step_evaluator.cc
- internal/ceres/trust_region_strategy.cc
- internal/ceres/types.cc
- internal/ceres/wall_time.cc
+ internal/ceres/accelerate_sparse.cc
+ internal/ceres/array_utils.cc
+ internal/ceres/blas.cc
+ internal/ceres/block_evaluate_preparer.cc
+ internal/ceres/block_jacobian_writer.cc
+ internal/ceres/block_jacobi_preconditioner.cc
+ internal/ceres/block_random_access_dense_matrix.cc
+ internal/ceres/block_random_access_diagonal_matrix.cc
+ internal/ceres/block_random_access_matrix.cc
+ internal/ceres/block_random_access_sparse_matrix.cc
+ internal/ceres/block_sparse_matrix.cc
+ internal/ceres/block_structure.cc
+ internal/ceres/callbacks.cc
+ internal/ceres/canonical_views_clustering.cc
+ internal/ceres/c_api.cc
+ internal/ceres/cgnr_solver.cc
+ internal/ceres/compressed_col_sparse_matrix_utils.cc
+ internal/ceres/compressed_row_jacobian_writer.cc
+ internal/ceres/compressed_row_sparse_matrix.cc
+ internal/ceres/conditioned_cost_function.cc
+ internal/ceres/conjugate_gradients_solver.cc
+ internal/ceres/context.cc
+ internal/ceres/context_impl.cc
+ internal/ceres/coordinate_descent_minimizer.cc
+ internal/ceres/corrector.cc
+ internal/ceres/covariance.cc
+ internal/ceres/covariance_impl.cc
+ internal/ceres/cxsparse.cc
+ internal/ceres/dense_normal_cholesky_solver.cc
+ internal/ceres/dense_qr_solver.cc
+ internal/ceres/dense_sparse_matrix.cc
+ internal/ceres/detect_structure.cc
+ internal/ceres/dogleg_strategy.cc
+ internal/ceres/dynamic_compressed_row_jacobian_writer.cc
+ internal/ceres/dynamic_compressed_row_sparse_matrix.cc
+ internal/ceres/dynamic_sparse_normal_cholesky_solver.cc
+ internal/ceres/eigensparse.cc
+ internal/ceres/evaluator.cc
+ internal/ceres/file.cc
+ internal/ceres/float_cxsparse.cc
+ internal/ceres/float_suitesparse.cc
+ internal/ceres/function_sample.cc
+ internal/ceres/generated/partitioned_matrix_view_d_d_d.cc
+ internal/ceres/generated/schur_eliminator_d_d_d.cc
+ internal/ceres/gradient_checker.cc
+ internal/ceres/gradient_checking_cost_function.cc
+ internal/ceres/gradient_problem.cc
+ internal/ceres/gradient_problem_solver.cc
+ internal/ceres/implicit_schur_complement.cc
+ internal/ceres/inner_product_computer.cc
+ internal/ceres/is_close.cc
+ internal/ceres/iterative_refiner.cc
+ internal/ceres/iterative_schur_complement_solver.cc
+ internal/ceres/lapack.cc
+ internal/ceres/levenberg_marquardt_strategy.cc
+ internal/ceres/linear_least_squares_problems.cc
+ internal/ceres/linear_operator.cc
+ internal/ceres/linear_solver.cc
+ internal/ceres/line_search.cc
+ internal/ceres/line_search_direction.cc
+ internal/ceres/line_search_minimizer.cc
+ internal/ceres/line_search_preprocessor.cc
+ internal/ceres/local_parameterization.cc
+ internal/ceres/loss_function.cc
+ internal/ceres/low_rank_inverse_hessian.cc
+ internal/ceres/minimizer.cc
+ internal/ceres/normal_prior.cc
+ internal/ceres/parallel_for_cxx.cc
+ internal/ceres/parallel_for_nothreads.cc
+ internal/ceres/parallel_for_openmp.cc
+ internal/ceres/parallel_utils.cc
+ internal/ceres/parameter_block_ordering.cc
+ internal/ceres/partitioned_matrix_view.cc
+ internal/ceres/polynomial.cc
+ internal/ceres/preconditioner.cc
+ internal/ceres/preprocessor.cc
+ internal/ceres/problem.cc
+ internal/ceres/problem_impl.cc
+ internal/ceres/program.cc
+ internal/ceres/reorder_program.cc
+ internal/ceres/residual_block.cc
+ internal/ceres/residual_block_utils.cc
+ internal/ceres/schur_complement_solver.cc
+ internal/ceres/schur_eliminator.cc
+ internal/ceres/schur_jacobi_preconditioner.cc
+ internal/ceres/schur_templates.cc
+ internal/ceres/scratch_evaluate_preparer.cc
+ internal/ceres/single_linkage_clustering.cc
+ internal/ceres/solver.cc
+ internal/ceres/solver_utils.cc
+ internal/ceres/sparse_cholesky.cc
+ internal/ceres/sparse_matrix.cc
+ internal/ceres/sparse_normal_cholesky_solver.cc
+ internal/ceres/split.cc
+ internal/ceres/stringprintf.cc
+ internal/ceres/subset_preconditioner.cc
+ internal/ceres/suitesparse.cc
+ internal/ceres/thread_pool.cc
+ internal/ceres/thread_token_provider.cc
+ internal/ceres/triplet_sparse_matrix.cc
+ internal/ceres/trust_region_minimizer.cc
+ internal/ceres/trust_region_preprocessor.cc
+ internal/ceres/trust_region_step_evaluator.cc
+ internal/ceres/trust_region_strategy.cc
+ internal/ceres/types.cc
+ internal/ceres/visibility_based_preconditioner.cc
+ internal/ceres/visibility.cc
+ internal/ceres/wall_time.cc
- include/ceres/autodiff_cost_function.h
- include/ceres/autodiff_local_parameterization.h
- include/ceres/c_api.h
- include/ceres/ceres.h
- include/ceres/conditioned_cost_function.h
- include/ceres/cost_function.h
- include/ceres/cost_function_to_functor.h
- include/ceres/covariance.h
- include/ceres/crs_matrix.h
- include/ceres/dynamic_autodiff_cost_function.h
- include/ceres/dynamic_cost_function_to_functor.h
- include/ceres/dynamic_numeric_diff_cost_function.h
- include/ceres/fpclassify.h
- include/ceres/gradient_checker.h
- include/ceres/gradient_problem.h
- include/ceres/gradient_problem_solver.h
- include/ceres/internal/autodiff.h
- include/ceres/internal/disable_warnings.h
- include/ceres/internal/eigen.h
- include/ceres/internal/fixed_array.h
- include/ceres/internal/macros.h
- include/ceres/internal/manual_constructor.h
- include/ceres/internal/numeric_diff.h
- include/ceres/internal/port.h
- include/ceres/internal/reenable_warnings.h
- include/ceres/internal/scoped_ptr.h
- include/ceres/internal/variadic_evaluate.h
- include/ceres/iteration_callback.h
- include/ceres/jet.h
- include/ceres/local_parameterization.h
- include/ceres/loss_function.h
- include/ceres/normal_prior.h
- include/ceres/numeric_diff_cost_function.h
- include/ceres/numeric_diff_options.h
- include/ceres/ordered_groups.h
- include/ceres/problem.h
- include/ceres/rotation.h
- include/ceres/sized_cost_function.h
- include/ceres/solver.h
- include/ceres/types.h
- include/ceres/version.h
- internal/ceres/array_utils.h
- internal/ceres/blas.h
- internal/ceres/block_evaluate_preparer.h
- internal/ceres/block_jacobian_writer.h
- internal/ceres/block_jacobi_preconditioner.h
- internal/ceres/block_random_access_dense_matrix.h
- internal/ceres/block_random_access_diagonal_matrix.h
- internal/ceres/block_random_access_matrix.h
- internal/ceres/block_random_access_sparse_matrix.h
- internal/ceres/block_sparse_matrix.h
- internal/ceres/block_structure.h
- internal/ceres/callbacks.h
- internal/ceres/casts.h
- internal/ceres/cgnr_linear_operator.h
- internal/ceres/cgnr_solver.h
- internal/ceres/collections_port.h
- internal/ceres/compressed_col_sparse_matrix_utils.h
- internal/ceres/compressed_row_jacobian_writer.h
- internal/ceres/compressed_row_sparse_matrix.h
- internal/ceres/conjugate_gradients_solver.h
- internal/ceres/coordinate_descent_minimizer.h
- internal/ceres/corrector.h
- internal/ceres/covariance_impl.h
- internal/ceres/cxsparse.h
- internal/ceres/dense_jacobian_writer.h
- internal/ceres/dense_normal_cholesky_solver.h
- internal/ceres/dense_qr_solver.h
- internal/ceres/dense_sparse_matrix.h
- internal/ceres/detect_structure.h
- internal/ceres/dogleg_strategy.h
- internal/ceres/dynamic_compressed_row_finalizer.h
- internal/ceres/dynamic_compressed_row_jacobian_writer.h
- internal/ceres/dynamic_compressed_row_sparse_matrix.h
- internal/ceres/evaluator.h
- internal/ceres/execution_summary.h
- internal/ceres/file.h
- internal/ceres/gradient_checking_cost_function.h
- internal/ceres/gradient_problem_evaluator.h
- internal/ceres/graph_algorithms.h
- internal/ceres/graph.h
- internal/ceres/householder_vector.h
- internal/ceres/implicit_schur_complement.h
- internal/ceres/integral_types.h
- internal/ceres/is_close.h
- internal/ceres/iterative_schur_complement_solver.h
- internal/ceres/lapack.h
- internal/ceres/levenberg_marquardt_strategy.h
- internal/ceres/linear_least_squares_problems.h
- internal/ceres/linear_operator.h
- internal/ceres/linear_solver.h
- internal/ceres/line_search_direction.h
- internal/ceres/line_search.h
- internal/ceres/line_search_minimizer.h
- internal/ceres/line_search_preprocessor.h
- internal/ceres/low_rank_inverse_hessian.h
- internal/ceres/map_util.h
- internal/ceres/minimizer.h
- internal/ceres/mutex.h
- internal/ceres/parameter_block.h
- internal/ceres/parameter_block_ordering.h
- internal/ceres/partitioned_matrix_view.h
- internal/ceres/partitioned_matrix_view_impl.h
- internal/ceres/polynomial.h
- internal/ceres/preconditioner.h
- internal/ceres/preprocessor.h
- internal/ceres/problem_impl.h
- internal/ceres/program_evaluator.h
- internal/ceres/program.h
- internal/ceres/random.h
- internal/ceres/reorder_program.h
- internal/ceres/residual_block.h
- internal/ceres/residual_block_utils.h
- internal/ceres/schur_complement_solver.h
- internal/ceres/schur_eliminator.h
- internal/ceres/schur_eliminator_impl.h
- internal/ceres/schur_jacobi_preconditioner.h
- internal/ceres/scratch_evaluate_preparer.h
- internal/ceres/small_blas.h
- internal/ceres/solver_utils.h
- internal/ceres/sparse_matrix.h
- internal/ceres/sparse_normal_cholesky_solver.h
- internal/ceres/split.h
- internal/ceres/stl_util.h
- internal/ceres/stringprintf.h
- internal/ceres/suitesparse.h
- internal/ceres/triplet_sparse_matrix.h
- internal/ceres/trust_region_minimizer.h
- internal/ceres/trust_region_preprocessor.h
- internal/ceres/trust_region_step_evaluator.h
- internal/ceres/trust_region_strategy.h
- internal/ceres/visibility_based_preconditioner.h
- internal/ceres/wall_time.h
+ include/ceres/autodiff_cost_function.h
+ include/ceres/autodiff_first_order_function.h
+ include/ceres/autodiff_local_parameterization.h
+ include/ceres/c_api.h
+ include/ceres/ceres.h
+ include/ceres/conditioned_cost_function.h
+ include/ceres/context.h
+ include/ceres/cost_function.h
+ include/ceres/cost_function_to_functor.h
+ include/ceres/covariance.h
+ include/ceres/crs_matrix.h
+ include/ceres/cubic_interpolation.h
+ include/ceres/dynamic_autodiff_cost_function.h
+ include/ceres/dynamic_cost_function.h
+ include/ceres/dynamic_cost_function_to_functor.h
+ include/ceres/dynamic_numeric_diff_cost_function.h
+ include/ceres/evaluation_callback.h
+ include/ceres/first_order_function.h
+ include/ceres/gradient_checker.h
+ include/ceres/gradient_problem.h
+ include/ceres/gradient_problem_solver.h
+ include/ceres/internal/array_selector.h
+ include/ceres/internal/autodiff.h
+ include/ceres/internal/disable_warnings.h
+ include/ceres/internal/eigen.h
+ include/ceres/internal/fixed_array.h
+ include/ceres/internal/householder_vector.h
+ include/ceres/internal/integer_sequence_algorithm.h
+ include/ceres/internal/line_parameterization.h
+ include/ceres/internal/memory.h
+ include/ceres/internal/numeric_diff.h
+ include/ceres/internal/parameter_dims.h
+ include/ceres/internal/port.h
+ include/ceres/internal/reenable_warnings.h
+ include/ceres/internal/variadic_evaluate.h
+ include/ceres/iteration_callback.h
+ include/ceres/jet.h
+ include/ceres/local_parameterization.h
+ include/ceres/loss_function.h
+ include/ceres/normal_prior.h
+ include/ceres/numeric_diff_cost_function.h
+ include/ceres/numeric_diff_options.h
+ include/ceres/ordered_groups.h
+ include/ceres/problem.h
+ include/ceres/rotation.h
+ include/ceres/sized_cost_function.h
+ include/ceres/solver.h
+ include/ceres/tiny_solver_autodiff_function.h
+ include/ceres/tiny_solver_cost_function_adapter.h
+ include/ceres/tiny_solver.h
+ include/ceres/types.h
+ include/ceres/version.h
+ internal/ceres/accelerate_sparse.h
+ internal/ceres/array_utils.h
+ internal/ceres/blas.h
+ internal/ceres/block_evaluate_preparer.h
+ internal/ceres/block_jacobian_writer.h
+ internal/ceres/block_jacobi_preconditioner.h
+ internal/ceres/block_random_access_dense_matrix.h
+ internal/ceres/block_random_access_diagonal_matrix.h
+ internal/ceres/block_random_access_matrix.h
+ internal/ceres/block_random_access_sparse_matrix.h
+ internal/ceres/block_sparse_matrix.h
+ internal/ceres/block_structure.h
+ internal/ceres/callbacks.h
+ internal/ceres/canonical_views_clustering.h
+ internal/ceres/casts.h
+ internal/ceres/cgnr_linear_operator.h
+ internal/ceres/cgnr_solver.h
+ internal/ceres/compressed_col_sparse_matrix_utils.h
+ internal/ceres/compressed_row_jacobian_writer.h
+ internal/ceres/compressed_row_sparse_matrix.h
+ internal/ceres/concurrent_queue.h
+ internal/ceres/conjugate_gradients_solver.h
+ internal/ceres/context_impl.h
+ internal/ceres/coordinate_descent_minimizer.h
+ internal/ceres/corrector.h
+ internal/ceres/covariance_impl.h
+ internal/ceres/cxsparse.h
+ internal/ceres/dense_jacobian_writer.h
+ internal/ceres/dense_normal_cholesky_solver.h
+ internal/ceres/dense_qr_solver.h
+ internal/ceres/dense_sparse_matrix.h
+ internal/ceres/detect_structure.h
+ internal/ceres/dogleg_strategy.h
+ internal/ceres/dynamic_compressed_row_finalizer.h
+ internal/ceres/dynamic_compressed_row_jacobian_writer.h
+ internal/ceres/dynamic_compressed_row_sparse_matrix.h
+ internal/ceres/dynamic_sparse_normal_cholesky_solver.h
+ internal/ceres/eigensparse.h
+ internal/ceres/evaluator.h
+ internal/ceres/execution_summary.h
+ internal/ceres/file.h
+ internal/ceres/float_cxsparse.h
+ internal/ceres/float_suitesparse.h
+ internal/ceres/function_sample.h
+ internal/ceres/gradient_checking_cost_function.h
+ internal/ceres/gradient_problem_evaluator.h
+ internal/ceres/graph_algorithms.h
+ internal/ceres/graph.h
+ internal/ceres/implicit_schur_complement.h
+ internal/ceres/inner_product_computer.h
+ internal/ceres/invert_psd_matrix.h
+ internal/ceres/is_close.h
+ internal/ceres/iterative_refiner.h
+ internal/ceres/iterative_schur_complement_solver.h
+ internal/ceres/lapack.h
+ internal/ceres/levenberg_marquardt_strategy.h
+ internal/ceres/linear_least_squares_problems.h
+ internal/ceres/linear_operator.h
+ internal/ceres/linear_solver.h
+ internal/ceres/line_search_direction.h
+ internal/ceres/line_search.h
+ internal/ceres/line_search_minimizer.h
+ internal/ceres/line_search_preprocessor.h
+ internal/ceres/low_rank_inverse_hessian.h
+ internal/ceres/map_util.h
+ internal/ceres/minimizer.h
+ internal/ceres/pair_hash.h
+ internal/ceres/parallel_for.h
+ internal/ceres/parallel_utils.h
+ internal/ceres/parameter_block.h
+ internal/ceres/parameter_block_ordering.h
+ internal/ceres/partitioned_matrix_view.h
+ internal/ceres/partitioned_matrix_view_impl.h
+ internal/ceres/polynomial.h
+ internal/ceres/preconditioner.h
+ internal/ceres/preprocessor.h
+ internal/ceres/problem_impl.h
+ internal/ceres/program_evaluator.h
+ internal/ceres/program.h
+ internal/ceres/random.h
+ internal/ceres/reorder_program.h
+ internal/ceres/residual_block.h
+ internal/ceres/residual_block_utils.h
+ internal/ceres/schur_complement_solver.h
+ internal/ceres/schur_eliminator.h
+ internal/ceres/schur_eliminator_impl.h
+ internal/ceres/schur_jacobi_preconditioner.h
+ internal/ceres/schur_templates.h
+ internal/ceres/scoped_thread_token.h
+ internal/ceres/scratch_evaluate_preparer.h
+ internal/ceres/single_linkage_clustering.h
+ internal/ceres/small_blas_generic.h
+ internal/ceres/small_blas.h
+ internal/ceres/solver_utils.h
+ internal/ceres/sparse_cholesky.h
+ internal/ceres/sparse_matrix.h
+ internal/ceres/sparse_normal_cholesky_solver.h
+ internal/ceres/split.h
+ internal/ceres/stl_util.h
+ internal/ceres/stringprintf.h
+ internal/ceres/subset_preconditioner.h
+ internal/ceres/suitesparse.h
+ internal/ceres/thread_pool.h
+ internal/ceres/thread_token_provider.h
+ internal/ceres/triplet_sparse_matrix.h
+ internal/ceres/trust_region_minimizer.h
+ internal/ceres/trust_region_preprocessor.h
+ internal/ceres/trust_region_step_evaluator.h
+ internal/ceres/trust_region_strategy.h
+ internal/ceres/visibility_based_preconditioner.h
+ internal/ceres/visibility.h
+ internal/ceres/wall_time.h
)
set(LIB
@@ -263,44 +319,48 @@ set(LIB
if(WITH_LIBMV_SCHUR_SPECIALIZATIONS)
list(APPEND SRC
- internal/ceres/generated/partitioned_matrix_view_2_2_2.cc
- internal/ceres/generated/partitioned_matrix_view_2_2_3.cc
- internal/ceres/generated/partitioned_matrix_view_2_2_4.cc
- internal/ceres/generated/partitioned_matrix_view_2_2_d.cc
- internal/ceres/generated/partitioned_matrix_view_2_3_3.cc
- internal/ceres/generated/partitioned_matrix_view_2_3_4.cc
- internal/ceres/generated/partitioned_matrix_view_2_3_6.cc
- internal/ceres/generated/partitioned_matrix_view_2_3_9.cc
- internal/ceres/generated/partitioned_matrix_view_2_3_d.cc
- internal/ceres/generated/partitioned_matrix_view_2_4_3.cc
- internal/ceres/generated/partitioned_matrix_view_2_4_4.cc
- internal/ceres/generated/partitioned_matrix_view_2_4_8.cc
- internal/ceres/generated/partitioned_matrix_view_2_4_9.cc
- internal/ceres/generated/partitioned_matrix_view_2_4_d.cc
- internal/ceres/generated/partitioned_matrix_view_2_d_d.cc
- internal/ceres/generated/partitioned_matrix_view_4_4_2.cc
- internal/ceres/generated/partitioned_matrix_view_4_4_3.cc
- internal/ceres/generated/partitioned_matrix_view_4_4_4.cc
- internal/ceres/generated/partitioned_matrix_view_4_4_d.cc
- internal/ceres/generated/schur_eliminator_2_2_2.cc
- internal/ceres/generated/schur_eliminator_2_2_3.cc
- internal/ceres/generated/schur_eliminator_2_2_4.cc
- internal/ceres/generated/schur_eliminator_2_2_d.cc
- internal/ceres/generated/schur_eliminator_2_3_3.cc
- internal/ceres/generated/schur_eliminator_2_3_4.cc
- internal/ceres/generated/schur_eliminator_2_3_6.cc
- internal/ceres/generated/schur_eliminator_2_3_9.cc
- internal/ceres/generated/schur_eliminator_2_3_d.cc
- internal/ceres/generated/schur_eliminator_2_4_3.cc
- internal/ceres/generated/schur_eliminator_2_4_4.cc
- internal/ceres/generated/schur_eliminator_2_4_8.cc
- internal/ceres/generated/schur_eliminator_2_4_9.cc
- internal/ceres/generated/schur_eliminator_2_4_d.cc
- internal/ceres/generated/schur_eliminator_2_d_d.cc
- internal/ceres/generated/schur_eliminator_4_4_2.cc
- internal/ceres/generated/schur_eliminator_4_4_3.cc
- internal/ceres/generated/schur_eliminator_4_4_4.cc
- internal/ceres/generated/schur_eliminator_4_4_d.cc
+ internal/ceres/generated/partitioned_matrix_view_2_2_2.cc
+ internal/ceres/generated/partitioned_matrix_view_2_2_3.cc
+ internal/ceres/generated/partitioned_matrix_view_2_2_4.cc
+ internal/ceres/generated/partitioned_matrix_view_2_2_d.cc
+ internal/ceres/generated/partitioned_matrix_view_2_3_3.cc
+ internal/ceres/generated/partitioned_matrix_view_2_3_4.cc
+ internal/ceres/generated/partitioned_matrix_view_2_3_6.cc
+ internal/ceres/generated/partitioned_matrix_view_2_3_9.cc
+ internal/ceres/generated/partitioned_matrix_view_2_3_d.cc
+ internal/ceres/generated/partitioned_matrix_view_2_4_3.cc
+ internal/ceres/generated/partitioned_matrix_view_2_4_4.cc
+ internal/ceres/generated/partitioned_matrix_view_2_4_6.cc
+ internal/ceres/generated/partitioned_matrix_view_2_4_8.cc
+ internal/ceres/generated/partitioned_matrix_view_2_4_9.cc
+ internal/ceres/generated/partitioned_matrix_view_2_4_d.cc
+ internal/ceres/generated/partitioned_matrix_view_2_d_d.cc
+ internal/ceres/generated/partitioned_matrix_view_3_3_3.cc
+ internal/ceres/generated/partitioned_matrix_view_4_4_2.cc
+ internal/ceres/generated/partitioned_matrix_view_4_4_3.cc
+ internal/ceres/generated/partitioned_matrix_view_4_4_4.cc
+ internal/ceres/generated/partitioned_matrix_view_4_4_d.cc
+ internal/ceres/generated/schur_eliminator_2_2_2.cc
+ internal/ceres/generated/schur_eliminator_2_2_3.cc
+ internal/ceres/generated/schur_eliminator_2_2_4.cc
+ internal/ceres/generated/schur_eliminator_2_2_d.cc
+ internal/ceres/generated/schur_eliminator_2_3_3.cc
+ internal/ceres/generated/schur_eliminator_2_3_4.cc
+ internal/ceres/generated/schur_eliminator_2_3_6.cc
+ internal/ceres/generated/schur_eliminator_2_3_9.cc
+ internal/ceres/generated/schur_eliminator_2_3_d.cc
+ internal/ceres/generated/schur_eliminator_2_4_3.cc
+ internal/ceres/generated/schur_eliminator_2_4_4.cc
+ internal/ceres/generated/schur_eliminator_2_4_6.cc
+ internal/ceres/generated/schur_eliminator_2_4_8.cc
+ internal/ceres/generated/schur_eliminator_2_4_9.cc
+ internal/ceres/generated/schur_eliminator_2_4_d.cc
+ internal/ceres/generated/schur_eliminator_2_d_d.cc
+ internal/ceres/generated/schur_eliminator_3_3_3.cc
+ internal/ceres/generated/schur_eliminator_4_4_2.cc
+ internal/ceres/generated/schur_eliminator_4_4_3.cc
+ internal/ceres/generated/schur_eliminator_4_4_4.cc
+ internal/ceres/generated/schur_eliminator_4_4_d.cc
)
else()
add_definitions(-DCERES_RESTRICT_SCHUR_SPECIALIZATION)
@@ -315,13 +375,9 @@ add_definitions(
-DCERES_NO_SUITESPARSE
-DCERES_NO_CXSPARSE
-DCERES_NO_LAPACK
+ -DCERES_NO_ACCELERATE_SPARSE
-DCERES_HAVE_RWLOCK
+ -DCERES_USE_CXX_THREADS
)
-if(WITH_OPENMP)
- add_definitions(
- -DCERES_USE_OPENMP
- )
-endif()
-
blender_add_lib(extern_ceres "${SRC}" "${INC}" "${INC_SYS}" "${LIB}")
diff --git a/extern/ceres/ChangeLog b/extern/ceres/ChangeLog
index ae8d42a7c95..64c75e572f4 100644
--- a/extern/ceres/ChangeLog
+++ b/extern/ceres/ChangeLog
@@ -1,588 +1,587 @@
-commit 8590e6e8e057adba4ec0083446d00268565bb444
-Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Thu Oct 27 12:29:37 2016 -0700
+commit e39d9ed1d60dfeb58dd2a0df4622c683f87b28e3
+Author: Carl Dehlin <carl@dehlin.com>
+Date: Tue Jun 16 09:02:05 2020 +0200
- Remove two checks from rotation.h
-
- This allows rotation.h to remove its dependency on glog.
+ Add a missing term and remove a superfluous word
- Change-Id: Ia6aede93ee51a4bd4039570dc8edd100a7045329
+ Change-Id: I25f40f0bf241302b975e6fc14690aa863c0728b0
-commit e892499e8d8977b9178a760348bdd201ec5f3489
-Author: Je Hyeong Hong <jhh37@outlook.com>
-Date: Tue Oct 18 22:49:11 2016 +0100
+commit 27cab77b699a1a2b5354820c57a91c92eaeb21e3
+Author: Carl Dehlin <carl@dehlin.com>
+Date: Mon Jun 15 20:01:18 2020 +0200
- Relax the tolerance in QuaternionParameterizationTestHelper.
-
- This commit relaxes the tolerance value for comparing between the actual
- local matrix and the expected local matrix. Without this fix,
- EigenQuaternionParameterization.ZeroTest could fail as the difference
- exactly matches the value of std::numeric_limits<double>::epsilon().
+ Reformulate some sentences
- Change-Id: Ic4d3f26c0acdf5f16fead80dfdc53df9e7dabbf9
+ Change-Id: I4841aa8e8522008dd816261d9ad98e5fb8ad1758
-commit 7ed9e2fb7f1dff264c5e4fbaa89ee1c4c99df269
-Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Wed Oct 19 04:45:23 2016 -0700
+commit 8ac6655ce85a4462f2882fcb9e9118a7057ebe09
+Author: Carl Dehlin <carl@dehlin.com>
+Date: Mon Jun 15 19:10:12 2020 +0200
- Occured -> Occurred.
-
- Thanks to Phillip Huebner for reporting this.
+ Fix documentation formatting issues
- Change-Id: I9cddfbb373aeb496961d08e434fe661bff4abd29
+ Change-Id: Iea3a6e75dc3a7376eda866ab24e535a6df84f8ea
-commit b82f97279682962d8c8ae1b6d9e801ba072a0ab1
-Author: Je Hyeong Hong <jhh37@outlook.com>
-Date: Tue Oct 18 21:18:32 2016 +0100
+commit 7ef83e07592ead74eeacc227b642df1959d2a246
+Author: Alex Stewart <alexs.mac@gmail.com>
+Date: Sat May 30 11:30:01 2020 +0100
- Fix a test error in autodiff_test.cc.
+ Update minimum required C++ version for Ceres to C++14
- Previously, the test for the projective camera model would fail as no
- tolerance is set in line 144. To resolve this, this commit changes
- assert_equal to assert_near.
+ - Removes all workarounds for pre-C++14 versions
+ - Removes '11' qualifier from C++ threading option and associated
+ defines.
+ - Fix missing inclusion of 'Multithreading' in reported Ceres components
+ when C++ threading model is enabled.
+ - Update Sphinx documentation to specify C++14 as minimum requirement.
- Change-Id: I6cd3379083b1a10c7cd0a9cc83fd6962bb993cc9
+ Change-Id: I706c8b367b3221e3c4d1a0aaf669a8f9c911e438
-commit 5690b447de5beed6bdda99b7f30f372283c2fb1a
+commit 1d75e7568172dc5a4dc97937dcf66e0f5d28272c
Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Thu Oct 13 09:52:02 2016 -0700
+Date: Mon May 25 18:09:50 2020 -0700
- Fix documentation source for templated functions in rotation.h
+ Improve documentation for LocalParameterization
- Change-Id: Ic1b2e6f0e6eb9914f419fd0bb5af77b66252e57c
+ Change-Id: I63fa81206e67bfac56cc42bf2bb4915a3a11332b
-commit 2f8f98f7e8940e465de126fb51282396f42bea20
+commit 763398ca4ed56952f48c48df6a98e277e3e05381
Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Thu Oct 13 09:35:18 2016 -0700
+Date: Mon May 25 12:12:03 2020 -0700
- Prepare for 1.12.0RC1
+ Update the section on Preconditioners
- Change-Id: I23eaf0b46117a01440143001b74dacfa5e57cbf0
-
-commit 55c12d2e9569fe4aeac3ba688ac36810935a37ba
-Author: Damon Kohler <damonkohler@google.com>
-Date: Wed Oct 5 16:30:31 2016 +0200
-
- Adds package.xml to support Catkin.
+ Re-organize the section, add some more references and details for
+ existing preconditioners and add documentation for the SUBSET
+ precondition.
- Change-Id: I8ad4d36a8b036417604a54644e0bb70dd1615feb
-
-commit 0bcce6565202f5476e40f12afc0a99eb44bd9dfb
-Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Mon Oct 10 23:30:42 2016 -0700
-
- Fix tabs in Android.mk
+ https://github.com/ceres-solver/ceres-solver/issues/490
- Change-Id: Ie5ab9a8ba2b727721565e1ded242609b6df5f8f5
+ Change-Id: I93d0af819c160f5e4ce48b18202f629ddb92ca7b
-commit e6ffe2667170d2fc435443685c0163396fc52d7b
+commit a614f788a34ea86dd9f679b779ffbf920db45aa6
Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Mon Oct 10 22:47:08 2016 -0700
+Date: Fri May 22 13:52:53 2020 -0700
- Update the version history.
+ Call EvaluationCallback before evaluating the fixed cost.
- Change-Id: I9a57b0541d6cebcb695ecb364a1d4ca04ea4e06c
-
-commit 0a4ccb7ee939ab35b22e26758401e039b033b176
-Author: David Gossow <dgossow@google.com>
-Date: Wed Sep 7 21:38:12 2016 +0200
-
- Relaxing Jacobian matching in Gradient Checker test.
+ Fixe a subtle bug in Program::RemoveFixedBlocks, where we call
+ ResidualBlock::Evaluate on residual blocks with all constant parameter
+ blocks without paying attention to the presence of an
+ EvaluationCallback.
+
+ In the process also run clang-format on some of the files touched by
+ this change.
- Any result of an arithmetic operation on floating-point matrices
- should never be checked for strict equality with some expected
- value, due to limited floating point precision on different machines.
- This fixes some occurences of exact checks in the gradient checker
- unit test that were causing problems on some platforms.
+ https://github.com/ceres-solver/ceres-solver/issues/482
- Change-Id: I48e804c9c705dc485ce74ddfe51037d4957c8fcb
+ Change-Id: I342b66f6f975fdee2eef139a31f24d4a3e568e84
-commit ee44fc91b59584921c1d1c8db153fda6d633b092
-Author: Je Hyeong Hong <jhh37@outlook.com>
-Date: Mon Oct 3 12:19:30 2016 +0100
+commit 70308f7bb9cac560db250262079c0f8b030b9d6b
+Author: Sameer Agarwal <sameeragarwal@google.com>
+Date: Tue May 26 06:12:13 2020 -0700
- Fix an Intel compiler error in covariance_impl.cc.
+ Simplify documentation generation.
- Intel C compiler strictly asks for parallel loops with collapse to be
- perfectly nested. Otherwise, compiling Ceres with ICC will throw an
- error at line 348 of covariance_impl.cc.
+ 1. The MathJax font configuration is moved into conf.py and removed
+ from make_docs.py along with better font sizing.
+ 2. Remove the bread crumb replacement as it is not working anymore.
+ 3. Fix a parsing error in nnls_modeling.rst which the new version of
+ sphinx barfed on.
- Change-Id: I1ecb68e89b7faf79e4153dfe6675c390d1780db4
+ Change-Id: Ia3c2e732323a8b5cabafe851ac5ca0f0c82da071
-commit 9026d69d1ce1e0bcd21debd54a38246d85c7c6e4
-Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Thu Sep 22 17:20:14 2016 -0700
-
- Allow SubsetParameterization to hold all parameters constant
-
- 1. SubsetParameterization can now be constructed such that all
- parameters are constant. This is required for it be used as part
- of a ProductParameterization to hold a part of parameter block
- constant. For example, a parameter block consisting of a rotation
- as a quaternion and a translation vector can now have a local
- parameterization where the translation part is constant and the
- quaternion part has a QuaternionParameterization associated with it.
-
- 2. The check for the tangent space of a parameterization being
- positive dimensional. We were not doing this check up till now
- and the user could accidentally create parameterizations like this
- and create a problem for themselves. This will ensure that even
- though one can construct a SubsetParameterization where all
- parameters are constant, you cannot actually use it as a local
- parameterization for an entire parameter block. Which is how
- it was before, but the check was inside the SubsetParameterization
- constructor.
-
- 3. Added more tests and refactored existing tests to be more
- granular.
-
- Change-Id: Ic0184a1f30e3bd8a416b02341781a9d98e855ff7
-
-commit a36693f83da7a3fd19dce473d060231d4cc97499
+commit e886d7e65368e73e9d35c2ead895d81ced677977
Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Sat Sep 17 16:31:41 2016 -0700
+Date: Mon May 25 13:09:39 2020 -0700
- Update version history
+ Reduce the number of minimizer iterations in evaluation_callback_test.cc
- Change-Id: Ib2f0138ed7a1879ca3b2173e54092f7ae8dd5c9d
+ This should reduce the probability of the test heuristic failing due
+ to floating point issues.
+
+ https://github.com/ceres-solver/ceres-solver/issues/562
+ https://github.com/ceres-solver/ceres-solver/issues/392
+
+ Change-Id: I8ccf4164a8d595f5930d378f464313d4a2cae419
-commit 01e23e3d33178fdd050973666505c1080cfe04c3
-Author: David Gossow <dgossow@google.com>
-Date: Thu Sep 8 12:22:28 2016 +0200
+commit 9483e6f2f57bf51bad7cefd155cd5b48ca672c63
+Author: Sameer Agarwal <sameeragarwal@google.com>
+Date: Tue May 12 05:16:32 2020 -0700
- Removing duplicate include directive.
+ Simplify DynamicCompressedRowJacobianWriter::Write
- Change-Id: I729ae6501497746d1bb615cb893ad592e16ddf3f
+ Change-Id: I67aa2959bd479909b5cada79359c5cfdb8a37ef7
-commit 99b8210cee92cb972267537fb44bebf56f812d52
+commit 323cc55bb92a513924e566f487b54556052a716f
Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Wed Sep 7 15:31:30 2016 -0700
+Date: Mon May 25 10:38:35 2020 -0700
- Update Android.mk to include new files.
+ Update the version in package.xml to 2.0.0.
- Change-Id: Id543ee7d2a65b65c868554a17f593c0a4958e873
+ Change-Id: Ibac053916520e8c597c875a8c7f5668bb35b6ba1
-commit 195d8d13a6a3962ac39ef7fcdcc6add0216eb8bc
-Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Tue Sep 6 07:12:23 2016 -0700
+commit 303b078b50bd3311a9c86fc256be3e9f2f334411
+Author: Bayes Nie <niebayes@gmail.com>
+Date: Sun May 24 16:08:52 2020 +0800
- Remove two DCHECKs from CubicHermiteSpline.
+ Fix few typos and alter a NULL to nullptr.
- They were present as debugging checks but were causing problems
- with the build on 32bit i386 due to numerical cancellation issues,
- where x ~ -epsilon.
+ Fix typos in docs/source/features.rst and examples/helloworld.cc. Alter a NULL to nullptr in include/ceres/autodiff_cost_function.h
- Removing these checks only changes the behaviour in Debug mode.
- We are already handling such small negative numbers in production
- if they occur. All that this change does is to remove the crash.
+ Change-Id: Ibcf00b6ef665ad6be9af14b3add2dd4f3852e7e6
+
+commit cca93fed63dd4117f3d6dd5339131fc7674e6e0a
+Author: Alex Stewart <alexs.mac@gmail.com>
+Date: Sun May 24 18:05:05 2020 +0100
+
+ Bypass Ceres' FindGlog.cmake in CeresConfig.cmake if possible
- https://github.com/ceres-solver/ceres-solver/issues/212
+ - If the version of glog detected and used to build Ceres was built with
+ CMake (i.e. Config mode) then we now use Config mode directly in
+ CeresConfig.cmake and do not install Ceres' FindGlog.cmake module.
+ - This has the benefit of removing any hard-coded paths from
+ CeresConfig.cmake provided that all dependencies were also built with
+ CMake.
- Thanks to @NeroBurner and @debalance for reporting this.
+ Change-Id: I85af8a953fd6d300e8bc0cdeb0b3636fec182f68
+
+commit 77fc1d0fc4159ebb3a0a84a16651564eb2ce3c9d
+Author: Alex Stewart <alexs.mac@gmail.com>
+Date: Sun May 24 19:07:26 2020 +0100
+
+ Use build_depend for private dependencies in Catkin package.xml
- Change-Id: I66480e86d4fa0a4b621204f2ff44cc3ff8d01c04
+ Change-Id: If0c0569e7ebbf37c0d8e8daaf7765e20a6282531
-commit 83041ac84f2d67c28559c67515e0e596a3f3aa20
-Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Fri Sep 2 19:10:35 2016 -0700
+commit a09682f00d8e50ada3c7ed16f8c48fa71a423f60
+Author: Alex Stewart <alexs.mac@gmail.com>
+Date: Sun May 24 16:49:28 2020 +0100
- Fix some compiler warnings.
+ Fix MSVC version check to support use of clang-cl front-end
- Reported by Richard Trieu.
+ - Raised as issue: #521
- Change-Id: I202b7a7df09cc19c92582d276ccf171edf88a9fb
+ Change-Id: Iaea6b43484b90ec8789bda0447c8a90759974ec1
-commit 8c4623c63a2676e79e7917bb0561f903760f19b9
-Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Thu Sep 1 00:05:09 2016 -0700
+commit b70687fcc86624c7d5520d25734938fa95d2af73
+Author: Alex Stewart <alexs.mac@gmail.com>
+Date: Sun May 24 20:28:12 2020 +0100
- Update ExpectArraysClose to use ExpectClose instead of EXPECT_NEAR
+ Add namespace qualified Ceres::ceres CMake target
- The documentation for ExpectArraysClose and its implementation
- did not match.
+ - This reflects modern CMake style, and also provides a measure of
+ protection against missing find_package() imports in downstream
+ clients resulting in linker errors when 'ceres' matches the compiled
+ library and not the imported target.
+ - The original 'ceres' target remains, as a local imported interface
+ target created by CeresConfig for backwards compatibility.
- This change makes the polynomial_test not fail on 64bit AMD builds.
+ Change-Id: Ie9ed8de9b7059bc0cae1ae5002bb94d8fe617188
+
+commit 99efa54bdb4e14c3f4382a166baf6772113f74a8
+Author: Alex Stewart <alexs.mac@gmail.com>
+Date: Sun May 24 19:18:38 2020 +0100
+
+ Replace type aliases deprecated/removed in C++17/C++20 from FixedArray
- Thanks to Phillip Huebner for reporting this.
+ - Raised as issue #551
+ - Also adds C++20 to the set of ALLOWED_CXX_STANDARDS, although this
+ will require a version of CMake >= 3.12.
- Change-Id: I503f2d3317a28d5885a34f8bdbccd49d20ae9ba2
+ Change-Id: I0f13c72e93a35391fd2d18590b4243a329a2322c
-commit 2fd39fcecb47eebce727081c9ffb8edf86c33669
+commit adb973e4a337c372aa81ca1a4f3bb704068c08b7
Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Thu Sep 1 16:05:06 2016 -0700
+Date: Thu May 21 14:45:28 2020 -0700
- FindWithDefault returns by value rather than reference.
-
- Returning by reference leads to lifetime issues with the default
- value which may go out of scope by the time it is used.
-
- Thanks to @Ardavel for reporting this, as this causes graph_test
- to fail on VS2015x64.
-
- https://github.com/ceres-solver/ceres-solver/issues/216
+ NULL -> nullptr
- Change-Id: I596481219cfbf7622d49a6511ea29193b82c8ba3
+ Change-Id: Iaeea2ef7912d328653a76b65976adc8025a5be35
-commit 716f049a7b91a8f3a4632c367d9534d1d9190a81
-Author: Mike Vitus <vitus@google.com>
-Date: Wed Aug 31 13:38:30 2016 -0700
+commit 27b717951b58c134b3a5a9f664a66c7480364d6c
+Author: Alastair Harrison <aharrison24@gmail.com>
+Date: Fri May 15 10:10:12 2020 +0100
- Convert pose graph 2D example to glog and gflags.
+ Respect FIND_QUIETLY flag in cmake config file
- Change-Id: I0ed75a60718ef95199bb36f33d9eb99157d11d40
+ Ensure that Ceres does not print any log messages when somebody has
+ used 'find_package(Ceres QUIET)' in their CMake project.
+
+ Change-Id: Id6b68859cc8a5857f3fa78f29736cb82fd5a0943
-commit 46c5ce89dda308088a5fdc238d0c126fdd2c2b58
-Author: David Gossow <dgossow@google.com>
-Date: Wed Aug 31 18:40:57 2016 +0200
+commit 646959ef118a1f10bf93741d97cf64265d42f8c6
+Author: huangqinjin <huangqinjin@gmail.com>
+Date: Sat Apr 25 02:03:11 2020 +0800
- Fix compiler errors on some systems
+ Do not export class template LineParameterization
- This fixes some signed-unsigned comparisons and a missing header
- include.
+ For MSVC, instantiating a dllimport class template will cause error C2491:
+ definition of dllimport function not allowed.
- Change-Id: Ieb2bf6e905faa74851bc4ac4658d2f1da24b6ecc
+ Change-Id: Icc7f7ea84598df0a5436f48ffc2bab5cfab93921
-commit b102d53e1dd7dab132e58411183b6fffc2090590
-Author: David Gossow <dgossow@google.com>
-Date: Wed Aug 31 10:21:20 2016 +0200
+commit 1f128d070a24224d12eb901bc74ba393ccdbd0c3
+Author: huangqinjin <huangqinjin@gmail.com>
+Date: Mon Mar 4 13:14:43 2019 +0800
- Gradient checker multithreading bugfix.
+ Change the type of parameter index/offset to match their getter/setter
- This is a follow-up on c/7470. GradientCheckingCostFunction calls
- callback_->SetGradientErrorDetected() in its Evaluate method,
- which will run in multiple threads simultaneously when enabling
- this option in the solver. Thus, the string append operation
- inside that method has to be protected by a mutex.
+ Change-Id: If28b795e792f39db9775ada105e9038570195329
+
+commit 072c8f070e16cb32f211473c40196c6b5618d5a9
+Author: huangqinjin <huangqinjin@gmail.com>
+Date: Sat Apr 25 00:04:58 2020 +0800
+
+ Initialize integer variables with integer instead of double
- Change-Id: I314ef1df2be52595370d9af05851bf6da39bb45e
+ Change-Id: I652aca4ceb3129706a5d5e38afe9f16b61200a5b
-commit 79a28d1e49af53f67af7f3387d07e7c9b7339433
-Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Wed Aug 31 06:47:45 2016 -0700
+commit 8c36bcc81fbd4f78a2faa2c914ef40af264f4c31
+Author: Alex Stewart <alexs.mac@gmail.com>
+Date: Mon Apr 27 18:33:25 2020 +0100
- Rename a confusingly named member of Solver::Options
+ Use inline & -inlinehint-threshold in auto-diff benchmarks
- Solver::Options::numeric_derivative_relative_step_size to
- Solver::Options::gradient_check_numeric_derivative_relative_step_size
+ - This results in the same performance as the original solution of
+ increasing -inline-threshold, but this approach is more viable to
+ incorporate in a large code base as its effects are more targeted.
- Change-Id: Ib89ae3f87e588d4aba2a75361770d2cec26f07aa
+ Change-Id: Id798dbca7d3050de0ea847a5ecc69484ac78a2cf
-commit 358ae741c8c4545b03d95c91fa546d9a36683677
+commit 57cf20aa5d3c1b2f25d255814f4fff5260db81c6
Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Wed Aug 31 06:58:41 2016 -0700
+Date: Tue Apr 21 10:10:01 2020 -0700
- Note that Problem::Evaluate cannot be called from an IterationCallback
+ static const -> static constexpr where we can.
- Change-Id: Ieabdc2d40715e6b547ab22156ba32e9c8444b7ed
+ Change-Id: I8a6d26a89c4377dd440fa6dcf23513b7556533fc
-commit 44044e25b14d7e623baae4505a17c913bdde59f8
+commit 40b27482a202c8b0a5f9e8f2b4be0192d34195f5
Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Wed Aug 31 05:50:58 2016 -0700
+Date: Tue Apr 21 09:35:30 2020 -0700
- Update the NumTraits for Jets
+ Add std::numeric_limit specialization for Jets
- 1. Use AVX if EIGEN_VECTORIZE_AVX is defined.
- 2. Make the cost of division same as the cost of multiplication.
+ This allows users to use std::numeric_limits on templated functors.
- These are updates to the original numtraits update needed for eigen 3.3
- that Shaheen Gandhi sent out.
-
- Change-Id: Ic1e3ed7d05a659c7badc79a894679b2dd61c51b9
+ Change-Id: I403cec5c9826033ce7dfd6047deb64f66c35f806
-commit 4b6ad5d88e45ce8638c882d3e8f08161089b6dba
-Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Sat Aug 27 23:21:55 2016 -0700
+commit e751d6e4f0daa9f691c5ed25ca8dc564875d8bef
+Author: Darius Rueckert <darius.rueckert@fau.de>
+Date: Wed Apr 8 10:43:53 2020 +0200
- Use ProductParameterization in bundle_adjuster.cc
-
- Previously, when using a quaternion to parameterize the camera
- orientation, the camera parameter block was split into two
- parameter blocks. One for the rotation and another for the
- translation and intrinsics. This was to enable the use of the
- Quaternion parameterization.
+ Remove AutodiffCodegen
- Now that we have a ProductParameterization which allows us
- to compose multiple parameterizations, this is no longer needed
- and we use a size 10 parameter block instead.
+ - Remove Codegen files
+ - Revert Jet and Rotation
- This leads to a more than 2x improvements in the linear solver time.
-
- Change-Id: I78b8f06696f81fee54cfe1a4ae193ee8a5f8e920
+ Change-Id: I005c5f98f2b6dfa5c7fd88d998b6aa83e47dab60
-commit bfc916cf1cf753b85c1e2ac037e2019ee891f6f9
-Author: Shaheen Gandhi <visigoth@gmail.com>
-Date: Thu Aug 4 12:10:14 2016 -0700
+commit e9eb76f8ef9835940659cfb3a312ed6822c48152
+Author: Darius Rueckert <darius.rueckert@fau.de>
+Date: Mon Apr 6 11:11:43 2020 +0200
- Allow ceres to be used with the latest version of Eigen
+ Remove AutodiffCodegen CMake integration
- Change-Id: Ief3b0f6b405484ec04ecd9ab6a1e1e5409a594c2
+ Change-Id: I403597540df8429378336626b8f748b7821fe6f5
-commit edbd48ab502aa418ad9700ee5c3ada5f9268b90a
-Author: Alex Stewart <alexs.mac@gmail.com>
-Date: Sun Jul 10 14:13:51 2016 +0100
+commit 9435e08a7a7c903897e18e1dc24d801caf4f62a4
+Author: Sameer Agarwal <sameeragarwal@google.com>
+Date: Fri Apr 3 11:50:09 2020 -0700
- Enable support for OpenMP in Clang if detected.
-
- - Previously we disabled OpenMP if Clang was detected, as it did not
- support it. However as of Clang 3.8 (and potentially Xcode 8) OpenMP
- is supported.
+ More clang-tidy and wjr@ comment fixes
- Change-Id: Ia39dac9fe746f1fc6310e08553f85f3c37349707
+ Change-Id: I5736ae482f736fc56c00d21c659b1f8d41da68e9
-commit f6df6c05dd83b19fa90044106ebaca40957998ae
-Author: Mike Vitus <vitus@google.com>
-Date: Thu Aug 18 19:27:43 2016 -0700
+commit d93fac4b7ab670a936ce821284a0b9d099b4688c
+Author: Darius Rueckert <darius.rueckert@fau.de>
+Date: Fri Apr 3 09:33:17 2020 +0200
- Add an example for modeling and solving a 3D pose graph SLAM problem.
+ Remove AutodiffCodegen Tests
- Change-Id: I750ca5f20c495edfee5f60ffedccc5bd8ba2bb37
+ Change-Id: Icd194db7b22add518844f1b507d0fdd3e0fe17fe
-commit ac3b8e82175122e38bafaaa9cd419ba3cee11087
-Author: David Gossow <dgossow@google.com>
-Date: Fri Apr 29 16:07:11 2016 +0200
+commit 2281c6ed24d2c12f133fa6039f224b3da18cebe3
+Author: Sameer Agarwal <sameeragarwal@google.com>
+Date: Thu Apr 2 16:43:42 2020 -0700
- Gradient checking cleanup and local parameterization bugfix
-
- Change the Ceres gradient checking API to make is useful for
- unit testing, clean up code duplication and fix interaction between
- gradient checking and local parameterizations.
-
- There were two gradient checking implementations, one being used
- when using the check_gradients flag in the Solver, the other
- being a standalone class. The standalone version was restricted
- to cost functions with fixed parameter sizes at compile time, which
- is being lifted here. This enables it to be used inside the
- GradientCheckingCostFunction as well.
+ Fixes for comments from William Rucklidge
- In addition, this installs new hooks in the Solver to ensure
- that Solve will fail if any incorrect gradients are detected. This
- way, you can set the check_gradient flags to true and detect
- errors in an automated way, instead of just printing error information
- to the log. The error log is now also returned in the Solver summary
- instead of being printed directly. The user can then decide what to
- do with it. The existing hooks for user callbacks are used for
- this purpose to keep the internal API changes minimal and non-invasive.
-
- The last and biggest change is the way the the interaction between
- local parameterizations and the gradient checker works. Before,
- local parameterizations would be ignored by the checker. However,
- if a cost function does not compute its Jacobian along the null
- space of the local parameterization, this wil not have any effect
- on the solver, but would result in a gradient checker error.
- With this change, the Jacobians are multiplied by the Jacobians
- of the respective local parameterization and thus being compared
- in the tangent space only.
-
- The typical use case for this are quaternion parameters, where
- a cost function will typically assume that the quaternion is
- always normalized, skipping the correct computation of the Jacobian
- along the normal to save computation cost.
-
- Change-Id: I5e1bb97b8a899436cea25101efe5011b0bb13282
+ Change-Id: I64fcc25532cc66dc4cb7e2ea7ccfb220b0cb7e1f
-commit d4264ec10d9a270b53b5db86c0245ae8cbd2cf18
-Author: Mike Vitus <vitus@google.com>
-Date: Wed Aug 17 13:39:05 2016 -0700
+commit d797a87a4091af6ae0063e3c8291429c15318bdc
+Author: Sameer Agarwal <sameeragarwal@google.com>
+Date: Thu Apr 2 13:57:56 2020 -0700
- Add a quaternion local parameterization for Eigen's quaternion element convention.
+ Use Ridders' method in GradientChecker.
- Change-Id: I7046e8b24805313c5fb6a767de581d0054fcdb83
-
-commit fd7cab65ef30fbc33612220abed52dd5073413c4
-Author: Mike Vitus <vitus@google.com>
-Date: Wed Aug 10 09:29:12 2016 -0700
-
- Fix typos in the pose graph 2D example.
+ Using Ridders' method gives orders of magnitude more accuracy compared
+ to central differences. This will make things slower, but this is
+ primarily a testing/debugging feature and the speed hit is not a
+ concern. This should also reduce the false positive rates when users
+ enable check_gradients. This is reflected the increased sensitivity of
+ the tests for GradientChecker.
+
+ https://github.com/ceres-solver/ceres-solver/issues/554
- Change-Id: Ie024ff6b6cab9f2e8011d21121a91931bd987bd1
+ Change-Id: I6b871c72df55be1c31175ba062cf3c1e94e4b662
-commit 375dc348745081f89693607142d8b6744a7fb6b4
-Author: Mike Vitus <vitus@google.com>
-Date: Wed Aug 3 18:51:16 2016 -0700
+commit 41675682dc9df836bf15845064cfe1087619c79d
+Author: Sameer Agarwal <sameeragarwal@google.com>
+Date: Thu Apr 2 07:28:38 2020 -0700
- Remove duplicate entry for the NIST example in the docs.
+ Fix a MSVC type deduction bug in ComputeHouseholderVector
- Change-Id: Ic4e8f9b68b77b5235b5c96fe588cc56866dab759
+ A recent change made this function templated and MSVC 16 has trouble
+ doing automatic argument deduction, so the type of the template is
+ simplified and all callsites are explicitly annotated with the type
+ of the arguments.
+
+ Change-Id: I83cd0269e6e82c4a8f4e391f5fc03b92c942f74d
-commit f554681bf22d769abc12dd6d346ef65f9bb22431
-Author: Mike Vitus <vitus@google.com>
-Date: Mon Jul 25 18:30:48 2016 -0700
+commit 947ec0c1fa0f67c89e21daaf8d1648822ae5293a
+Author: Darius Rueckert <darius.rueckert@fau.de>
+Date: Thu Apr 2 09:52:53 2020 +0200
- Add an example for modeling and solving a 2D pose graph SLAM problem.
+ Remove AutodiffCodegen autodiff benchmarks
- Change-Id: Ia89b12af7afa33e7b1b9a68d69cf2a0b53416737
+ Change-Id: If1eaad31710cc91d40323ea6cae7cabe6fa64b1f
-commit e1bcc6e0f51512f43aa7bfb7b0d62f7ac1d0cd4b
+commit 27183d661ecae246dbce6d03cacf84f39fba1f1e
Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Wed May 18 07:52:48 2016 -0700
+Date: Thu Jul 11 16:30:59 2019 +0200
- Add additional logging for analyzing orderings
+ Allow LocalParameterizations to have zero local size.
+
+ Local parameterizations with zero tangent/local size will cause the
+ corresponding parameter block to be treated as constant.
- Change-Id: Ic68d2959db35254e2895f11294fb25de4d4b8a81
+ https://github.com/ceres-solver/ceres-solver/issues/347
+
+ Change-Id: I554a2acc420f5dd9d0cc7f97b691877eb057b2c0
-commit 16980b4fec846f86910c18772b8145bcb55f4728
-Author: Mike Vitus <vitus@google.com>
-Date: Fri Jul 15 13:37:49 2016 -0700
+commit 7ac7d79dca2ac6b482da50fd9ad0227ba8d6c632
+Author: Darius Rueckert <darius.rueckert@fau.de>
+Date: Wed Apr 1 14:51:12 2020 +0200
- Delete the remove_definitons command from sampled_functions
- CMakeLists.txt because it will be inherited from the top level examples
- CMakeLists.txt.
+ Remove HelloWorldCodegen example
- Change-Id: I25593587df0ae84fd8ddddc589bc2a13f3777427
+ Change-Id: I2584f41d591a5d648b4832385c2a779bb25fc04d
-commit a04490be97800e78e59db5eb67fa46226738ffba
-Author: Mike Vitus <vitus@google.com>
-Date: Thu Jul 14 10:10:13 2016 -0700
+commit 8c8738bf832f0fc27f0d4a9585fc59b2eaa6a828
+Author: Nikolaus Demmel <nikolaus@nikolaus-demmel.de>
+Date: Sun Mar 29 13:29:02 2020 +0200
- Add readme for the sampled_function example.
+ Add photometric and relative-pose residuals to autodiff benchmarks
- Change-Id: I9468b6a7b9f2ffdd2bf9f0dd1f4e1d5f894e540c
+ Change-Id: Id100ff2656ab63bb4fd19a51b95e78281cfd8b4a
-commit ff11d0e63d4678188e8cabd40a532ba06912fe5a
-Author: Alex Stewart <alexs.mac@gmail.com>
-Date: Wed Jun 29 09:31:45 2016 +0100
+commit 9f7fb66d62014ed62ba6aa617364e8591211c797
+Author: Darius Rueckert <darius.rueckert@fau.de>
+Date: Wed Mar 25 11:41:39 2020 +0100
- Use _j[0,1,n]() Bessel functions on MSVC to avoid deprecation errors.
+ Add a constant cost function to the autodiff benchmarks
+
+ The constant cost function is run with a variable number of
+ parameters to test at which point, different compilers fail
+ to optimize the autodiff code.
- - Microsoft deprecated the POSIX Bessel functions: j[0,1,n]() in favour
- of _j[0,1,n](), it appears since at least MSVC 2005:
- https://msdn.microsoft.com/en-us/library/ms235384(v=vs.100).aspx.
- - As this occurs in jet.h (templated public header), although Ceres
- suppresses the warning when it itself is built (to suppress a warning
- about the insecurity of using std::copy), it will crop up again in
- client code (without this fix) unless it is explicitly suppressed
- there also.
- - Raised as Issue #190:
- https://github.com/ceres-solver/ceres-solver/issues/190.
+ Clang achieves expected performance which fails at >50 parameters.
+ G++ fails already at 20 parameters
- Change-Id: If7ac5dbb856748f9900be93ec0452a40c0b00524
+ Change-Id: I75d8c683ef0011d813ec6d966d7ad58f86530f44
-commit 8ea86e1614cf77644ce782e43cde6565a54444f5
-Author: Nicolai Wojke <nwojke@uni-koblenz.de>
-Date: Mon Apr 25 14:24:41 2016 +0200
+commit ab0d373e465f46ce483db640d0fb2f244f48702d
+Author: Sameer Agarwal <sameeragarwal@google.com>
+Date: Tue Mar 24 12:30:46 2020 -0700
- Fix: Copy minimizer option 'is_silent' to LinSearchDirection::Options
+ Fix a comment in autodiff.h
- Change-Id: I23b4c3383cad30033c539ac93883d77c8dd4ba1a
+ Change-Id: I613e537c834e3f29cd92808c65ddb74f112974cc
-commit 080ca4c5f2ac42620971a07f06d2d13deb7befa8
-Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Sun Apr 24 22:46:54 2016 -0700
+commit 27bb997144d00dd4494d440627f1e782bf4adf43
+Author: Johannes Beck <Jodebo_Beck@gmx.de>
+Date: Tue Mar 24 08:05:43 2020 +0100
- Fix typos in users.rst
+ Change SVD algorithm in covariance computation.
- Change-Id: Ifdc67638a39403354bc9589f42a1b42cb9984dd2
+ Switch from JacobiSVD to BDCSVD in
+ ComputeCovarianceValuesUsingDenseSVD. This should increase
+ the performance for larger covariance matrices. See
+ https://eigen.tuxfamily.org/dox/classEigen_1_1BDCSVD.html
+
+ Change-Id: Icde4dec89f506b638b0f9f1aee3b7cfc9e4d72fc
-commit 21ab397dc55335c147fdd795899b1f8981037b09
-Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Sun Apr 24 21:13:00 2016 -0700
+commit 84fdac38e033c8f9a63c6e6fca7b44219110f7df
+Author: Johannes Beck <Jodebo_Beck@gmx.de>
+Date: Tue Mar 24 08:02:21 2020 +0100
- Make some Jet comparisons exact.
+ Add const to GetCovarianceMatrix*
+
+ This CL adds const to the functions GetCovarianceMatrix and
+ GetCovarianceMatrixInTangentSpace.
- Change-Id: Ia08c72f3b8779df96f5c0d5a954b2c0a1dd3a061
+ Change-Id: Ibe2cafebede47977a9aabcac8d245f30af184fd1
-commit ee40f954cf464087eb8943abf4d9db8917a33fbe
-Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Sun Apr 24 07:49:55 2016 -0700
+commit 6bde61d6be9d81a2cd759a6bbb4a8cd3c24a529c
+Author: Johannes Beck <Jodebo_Beck@gmx.de>
+Date: Sat Dec 28 13:29:19 2019 +0100
- Add colmap to users.rst
+ Add line local parameterization.
+
+ This CL adds a local parameterization for a n-dimensional
+ line, which is represented as an origin point and a direction.
+ The line direction is updated in the same way as a
+ homogeneous vector and the origin point is updated
+ perpendicular to the line direction.
- Change-Id: I452a8c1dc6a3bc55734b2fc3a4002ff7939ba863
+ Change-Id: I733f395e5cc4250abf9778c26fe0a5ae1de6b624
-commit 9665e099022bd06e53b0779550e9aebded7f274d
+commit 2c1c0932e9d3f91691e5c5fce46b4440e181a8bc
Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Mon Apr 18 06:00:58 2016 -0700
+Date: Mon Mar 23 11:15:32 2020 -0700
- Fix step norm evaluation in LineSearchMinimizer
+ Update documentation in autodiff.h
- TrustRegionMinimizer evaluates the size of the step
- taken in the ambient space, where as the LineSearchMinimizer
- was using the norm in the tangent space. This change fixes
- this discrepancy.
-
- Change-Id: I9fef64cbb5622c9769c0413003cfb1dc6e89cfa3
+ Change-Id: Icc2753b4f5be95022ffd92e479cdd3d9d7959d4c
-commit 620ca9d0668cd4a00402264fddca3cf6bd2e7265
-Author: Alex Stewart <alexs.mac@gmail.com>
-Date: Mon Apr 18 15:14:11 2016 +0100
+commit 8904fa4887ed7b3e6d110ad5a98efbc2df48595e
+Author: Darius Rueckert <darius.rueckert@fau.de>
+Date: Mon Mar 23 14:59:26 2020 +0100
- Remove use of -Werror when compiling Ceres.
+ Inline Jet initialization in Autodiff
+
+ Inlining the Jet initialzation is mandatory for good performance
+ in autodiff, because all the constants in the dual part can be
+ propagated into the cost functor.
- - As noted in Issue #193 (in that case for GCC 6), Ceres' use of -Werror
- when compiling on *nix can prevent compilation on new compilers that
- add new warnings and there is an inevitable delay between new compiler
- versions and Ceres versions.
- - Removing the explicit use of -Werror, and relying on indirect
- verification by maintainers should fix build issues for Ceres releases
- on newer compilers.
+ This patch unrolls the initialization loop with templates and adds
+ EIGEN_ALWAYS_INLINE to the constructors.
- Change-Id: I38e9ade28d4a90e53dcd918a7d470f1a1debd7b4
+ Change-Id: Ic89d645984f3e1df6c63948236da823ba60d9620
-commit 0c63bd3efbf1d41151c9fab41d4b77dc64c572c8
-Author: Mike Vitus <vitus@google.com>
-Date: Thu Apr 14 10:25:52 2016 -0700
+commit 18a464d4e566e17930005876af19e32cc8796fa3
+Author: Sameer Agarwal <sameeragarwal@google.com>
+Date: Mon Mar 23 07:12:12 2020 -0700
- Add floor and ceil functions to the Jet implementation.
+ Remove an errant CR from local_parameterization.cc
- Change-Id: I72ebfb0e9ade2964dbf3a014225ead345d5ae352
+ Change-Id: Iff98a96f06de5755062a1c79523604dca78b298e
-commit 9843f3280356c158d23c06a16085c6c5ba35e053
-Author: Alex Stewart <alexs.mac@gmail.com>
-Date: Mon Mar 7 21:24:32 2016 +0000
+commit 5c85f21799804d39cbfd20ec451aa219511e4212
+Author: Darius Rueckert <darius.rueckert@fau.de>
+Date: Mon Mar 23 10:12:00 2020 +0100
- Report Ceres compile options as components in find_package().
+ Use ArraySelector in Autodiff
- - Users can now specify particular components from Ceres, such as
- SuiteSparse support) that must be present in a detected version of
- Ceres in order for it to be reported as found by find_package().
- - This allows users to specify for example that they require a version
- of Ceres with SuiteSparse support at configure time, rather than
- finding out only at run time that Ceres was not compiled with the
- options they require.
- - The list of available components are built directly from the Ceres
- compile options.
- - The meta-module SparseLinearAlgebraLibrary is present if at least
- one sparse linear algebra backend is available.
+ The class ArraySelector is now used in autodiff to store the
+ parameters and residuals. This reduces overhead of FixedArray
+ for fixed-sized residuals and allows more optimizations due
+ to inlining and unrolling.
- Change-Id: I65f1ddfd7697e6dd25bb4ac7e54f5097d3ca6266
+ Change-Id: Ibadc5644e64d672f7a555e250fb1f8da262f9d4f
-commit e4d4d88bbe51b9cc0f7450171511abbea0779790
-Author: Timer <linyicx@126.com>
-Date: Fri Apr 8 15:42:18 2016 +0800
+commit 80477ff073ab7af03cfb248cab4ef41a87f913d0
+Author: Darius Rueckert <darius.rueckert@fau.de>
+Date: Fri Mar 20 16:26:55 2020 +0100
- Fix a spelling error in nnls_modeling.rst
+ Add class ArraySelector
- Change-Id: I341d901d3df993bc5397ed15e6cb330b0c38fd72
+ The ArraySelector selects the best array implementation based on
+ template arguments.
+
+ Change-Id: I93c6db1a638e924b85292e63bca9525610ec2e2f
-commit 5512f58536e1be0d92010d8325b606e7b4733a08
-Author: Keir Mierle <mierle@gmail.com>
-Date: Thu Apr 7 12:03:16 2016 -0700
+commit e7a30359ee754057f9bd7b349c98c291138d91f4
+Author: Darius Rueckert <darius.rueckert@fau.de>
+Date: Fri Mar 20 15:50:37 2020 +0100
- Only use collapse() directive with OpenMP 3.0 or higher
+ Pass kNumResiduals to Autodiff
+
+ The compile-time constant kNumResiduals is now passed to the
+ autodiff functions as a template parameter. This will be used
+ by future patches to optimize autodiff performance.
- Change-Id: Icba544c0494763c57eb6dc61e98379312ca15972
+ Change-Id: Ia2b2cc99b88752e8f12f4ce2542b1963bda552f5
-commit d61e94da5225217cab7b4f93b72f97055094681f
-Author: Thomas Schneider <schneith@ethz.ch>
-Date: Wed Apr 6 10:40:29 2016 +0200
+commit f339d71dd64e4d871cc883f278a153f212f0d1f0
+Author: Sameer Agarwal <sameeragarwal@google.com>
+Date: Thu Mar 19 12:08:28 2020 -0700
+
+ Refactor the automatic differentiation benchmarks.
+
+ 1. Merge them into a single binary.
+ 2. All benchmarks now do the Residual and the Residual + Jacobian
+ evaluation.
+ 3. Re-organize and simplify the CMake file in this directory.
+ 4. Fix a bug in the file where the Clang compiler was not being matched.
+
+ autodiff_benchmarks
+ ---------------------------------------------------------------------------
+ Benchmark Time CPU Iterations
+ ---------------------------------------------------------------------------
+ BM_Linear1CodeGen/0 3.02 ns 3.01 ns 233870456
+ BM_Linear1CodeGen/1 3.02 ns 3.01 ns 233059100
+ BM_Linear1AutoDiff/0 3.78 ns 3.77 ns 185791712
+ BM_Linear1AutoDiff/1 14.0 ns 13.8 ns 53927875
+ BM_Linear10CodeGen/0 5.10 ns 5.10 ns 126745007
+ BM_Linear10CodeGen/1 29.1 ns 29.1 ns 23949310
+ BM_Linear10AutoDiff/0 6.50 ns 6.49 ns 107516972
+ BM_Linear10AutoDiff/1 169 ns 169 ns 4153218
+ BM_Rat43AutoDiff/0 52.7 ns 51.2 ns 16444586
+ BM_Rat43AutoDiff/1 91.8 ns 91.5 ns 7302316
+ BM_SnavelyReprojectionCodeGen/0 38.0 ns 36.2 ns 21131501
+ BM_SnavelyReprojectionCodeGen/1 113 ns 112 ns 5627779
+ BM_SnavelyReprojectionAutoDiff/0 34.4 ns 34.3 ns 20476937
+ BM_SnavelyReprojectionAutoDiff/1 242 ns 240 ns 2930611
+ BM_BrdfCodeGen/0 53.9 ns 53.7 ns 11950083
+ BM_BrdfCodeGen/1 507 ns 505 ns 1396732
+ BM_BrdfAutoDiff/0 58.3 ns 57.8 ns 12220670
+ BM_BrdfAutoDiff/1 2034 ns 1999 ns 257003
+
+ autodiff_benchmarks_fast_math
+ ---------------------------------------------------------------------------
+ Benchmark Time CPU Iterations
+ ---------------------------------------------------------------------------
+ BM_Linear1CodeGen/0 3.19 ns 3.16 ns 215313065
+ BM_Linear1CodeGen/1 2.78 ns 2.76 ns 201497994
+ BM_Linear1AutoDiff/0 3.27 ns 3.26 ns 206154598
+ BM_Linear1AutoDiff/1 13.2 ns 13.1 ns 57257840
+ BM_Linear10CodeGen/0 5.70 ns 5.51 ns 121849325
+ BM_Linear10CodeGen/1 33.9 ns 33.3 ns 21829295
+ BM_Linear10AutoDiff/0 6.85 ns 6.78 ns 106813153
+ BM_Linear10AutoDiff/1 173 ns 171 ns 3849877
+ BM_Rat43AutoDiff/0 44.8 ns 44.2 ns 15577017
+ BM_Rat43AutoDiff/1 96.2 ns 94.6 ns 7374864
+ BM_SnavelyReprojectionCodeGen/0 33.9 ns 33.5 ns 20508373
+ BM_SnavelyReprojectionCodeGen/1 89.7 ns 88.4 ns 7620624
+ BM_SnavelyReprojectionAutoDiff/0 36.5 ns 35.8 ns 20546176
+ BM_SnavelyReprojectionAutoDiff/1 257 ns 252 ns 3044325
+ BM_BrdfCodeGen/0 61.1 ns 58.5 ns 11334013
+ BM_BrdfCodeGen/1 265 ns 265 ns 2625459
+ BM_BrdfAutoDiff/0 52.5 ns 52.5 ns 12938763
+ BM_BrdfAutoDiff/1 1560 ns 1560 ns 440909
+
+ Change-Id: I2d1a4293d3245a50f73af6cf5e5138084321ae6f
+
+commit d37b4cb150c4af65268f9ce5739d1c67e73cb358
+Author: Sameer Agarwal <sameeragarwal@google.com>
+Date: Thu Mar 19 07:36:58 2020 -0700
- Add IsParameterBlockConstant to the ceres::Problem class.
+ Fix some include headers in codegen/test_utils.cc/h
- Change-Id: I7d0e828e81324443209c17fa54dd1d37605e5bfe
+ Change-Id: I769029ce2797eba0de6c7baeb76dc3f2782b6305
-commit 77d94b34741574e958a417561702d6093fba87fb
-Author: Alex Stewart <alexs.mac@gmail.com>
-Date: Sun Feb 14 16:54:03 2016 +0000
+commit 550766e6da49dca895a6e2056b0872c557157c5b
+Author: Darius Rueckert <darius.rueckert@fau.de>
+Date: Wed Mar 18 20:09:20 2020 +0100
- Fix install path for CeresConfig.cmake to be architecture-aware.
+ Add Autodiff Brdf Benchmark
- - Previously we were auto-detecting a "64" suffix for the install path
- for the Ceres library on non-Debian/Arch Linux distributions, but
- we were installing CeresConfig.cmake to an architecture independent
- location.
- - We now install CeresConfig.cmake to lib${LIB_SUFFIX}/cmake/Ceres.
- - Also make LIB_SUFFIX visible to the user in the CMake GUI s/t they can
- easily override the auto-detected value if desired.
- - Reported by jpgr87@gmail.com as Issue #194.
+ The disney brdf is a good benchmark cost functor, because it has
+ - 28 parameters in 7 blocks
+ - 3 residuals
+ - Lots of low-level arithmetic
- Change-Id: If126260d7af685779487c01220ae178ac31f7aea
+ Change-Id: I62c8a717d0aecb64639158f971bdccf6afdfae36
diff --git a/extern/ceres/bundle.sh b/extern/ceres/bundle.sh
index 02af59faf28..573db5a0a2f 100755
--- a/extern/ceres/bundle.sh
+++ b/extern/ceres/bundle.sh
@@ -9,7 +9,6 @@ fi
repo="https://ceres-solver.googlesource.com/ceres-solver"
branch="master"
-#tag="1.4.0"
tag=""
tmp=`mktemp -d`
checkout="$tmp/ceres"
@@ -157,14 +156,10 @@ add_definitions(
-DCERES_NO_SUITESPARSE
-DCERES_NO_CXSPARSE
-DCERES_NO_LAPACK
+ -DCERES_NO_ACCELERATE_SPARSE
-DCERES_HAVE_RWLOCK
+ -DCERES_USE_CXX_THREADS
)
-if(WITH_OPENMP)
- add_definitions(
- -DCERES_USE_OPENMP
- )
-endif()
-
blender_add_lib(extern_ceres "\${SRC}" "\${INC}" "\${INC_SYS}" "\${LIB}")
EOF
diff --git a/extern/ceres/files.txt b/extern/ceres/files.txt
index 4d973bbcdc2..bfbd57090c9 100644
--- a/extern/ceres/files.txt
+++ b/extern/ceres/files.txt
@@ -1,29 +1,37 @@
include/ceres/autodiff_cost_function.h
+include/ceres/autodiff_first_order_function.h
include/ceres/autodiff_local_parameterization.h
include/ceres/c_api.h
include/ceres/ceres.h
include/ceres/conditioned_cost_function.h
+include/ceres/context.h
include/ceres/cost_function.h
include/ceres/cost_function_to_functor.h
include/ceres/covariance.h
include/ceres/crs_matrix.h
+include/ceres/cubic_interpolation.h
include/ceres/dynamic_autodiff_cost_function.h
+include/ceres/dynamic_cost_function.h
include/ceres/dynamic_cost_function_to_functor.h
include/ceres/dynamic_numeric_diff_cost_function.h
-include/ceres/fpclassify.h
+include/ceres/evaluation_callback.h
+include/ceres/first_order_function.h
include/ceres/gradient_checker.h
include/ceres/gradient_problem.h
include/ceres/gradient_problem_solver.h
+include/ceres/internal/array_selector.h
include/ceres/internal/autodiff.h
include/ceres/internal/disable_warnings.h
include/ceres/internal/eigen.h
include/ceres/internal/fixed_array.h
-include/ceres/internal/macros.h
-include/ceres/internal/manual_constructor.h
+include/ceres/internal/householder_vector.h
+include/ceres/internal/integer_sequence_algorithm.h
+include/ceres/internal/line_parameterization.h
+include/ceres/internal/memory.h
include/ceres/internal/numeric_diff.h
+include/ceres/internal/parameter_dims.h
include/ceres/internal/port.h
include/ceres/internal/reenable_warnings.h
-include/ceres/internal/scoped_ptr.h
include/ceres/internal/variadic_evaluate.h
include/ceres/iteration_callback.h
include/ceres/jet.h
@@ -37,8 +45,13 @@ include/ceres/problem.h
include/ceres/rotation.h
include/ceres/sized_cost_function.h
include/ceres/solver.h
+include/ceres/tiny_solver_autodiff_function.h
+include/ceres/tiny_solver_cost_function_adapter.h
+include/ceres/tiny_solver.h
include/ceres/types.h
include/ceres/version.h
+internal/ceres/accelerate_sparse.cc
+internal/ceres/accelerate_sparse.h
internal/ceres/array_utils.cc
internal/ceres/array_utils.h
internal/ceres/blas.cc
@@ -63,21 +76,26 @@ internal/ceres/block_structure.cc
internal/ceres/block_structure.h
internal/ceres/callbacks.cc
internal/ceres/callbacks.h
+internal/ceres/canonical_views_clustering.cc
+internal/ceres/canonical_views_clustering.h
internal/ceres/c_api.cc
internal/ceres/casts.h
internal/ceres/cgnr_linear_operator.h
internal/ceres/cgnr_solver.cc
internal/ceres/cgnr_solver.h
-internal/ceres/collections_port.h
internal/ceres/compressed_col_sparse_matrix_utils.cc
internal/ceres/compressed_col_sparse_matrix_utils.h
internal/ceres/compressed_row_jacobian_writer.cc
internal/ceres/compressed_row_jacobian_writer.h
internal/ceres/compressed_row_sparse_matrix.cc
internal/ceres/compressed_row_sparse_matrix.h
+internal/ceres/concurrent_queue.h
internal/ceres/conditioned_cost_function.cc
internal/ceres/conjugate_gradients_solver.cc
internal/ceres/conjugate_gradients_solver.h
+internal/ceres/context.cc
+internal/ceres/context_impl.cc
+internal/ceres/context_impl.h
internal/ceres/coordinate_descent_minimizer.cc
internal/ceres/coordinate_descent_minimizer.h
internal/ceres/corrector.cc
@@ -85,6 +103,7 @@ internal/ceres/corrector.h
internal/ceres/covariance.cc
internal/ceres/covariance_impl.cc
internal/ceres/covariance_impl.h
+internal/ceres/cxsparse.cc
internal/ceres/cxsparse.h
internal/ceres/dense_jacobian_writer.h
internal/ceres/dense_normal_cholesky_solver.cc
@@ -102,11 +121,21 @@ internal/ceres/dynamic_compressed_row_jacobian_writer.cc
internal/ceres/dynamic_compressed_row_jacobian_writer.h
internal/ceres/dynamic_compressed_row_sparse_matrix.cc
internal/ceres/dynamic_compressed_row_sparse_matrix.h
+internal/ceres/dynamic_sparse_normal_cholesky_solver.cc
+internal/ceres/dynamic_sparse_normal_cholesky_solver.h
+internal/ceres/eigensparse.cc
+internal/ceres/eigensparse.h
internal/ceres/evaluator.cc
internal/ceres/evaluator.h
internal/ceres/execution_summary.h
internal/ceres/file.cc
internal/ceres/file.h
+internal/ceres/float_cxsparse.cc
+internal/ceres/float_cxsparse.h
+internal/ceres/float_suitesparse.cc
+internal/ceres/float_suitesparse.h
+internal/ceres/function_sample.cc
+internal/ceres/function_sample.h
internal/ceres/generated/partitioned_matrix_view_2_2_2.cc
internal/ceres/generated/partitioned_matrix_view_2_2_3.cc
internal/ceres/generated/partitioned_matrix_view_2_2_4.cc
@@ -118,10 +147,12 @@ internal/ceres/generated/partitioned_matrix_view_2_3_9.cc
internal/ceres/generated/partitioned_matrix_view_2_3_d.cc
internal/ceres/generated/partitioned_matrix_view_2_4_3.cc
internal/ceres/generated/partitioned_matrix_view_2_4_4.cc
+internal/ceres/generated/partitioned_matrix_view_2_4_6.cc
internal/ceres/generated/partitioned_matrix_view_2_4_8.cc
internal/ceres/generated/partitioned_matrix_view_2_4_9.cc
internal/ceres/generated/partitioned_matrix_view_2_4_d.cc
internal/ceres/generated/partitioned_matrix_view_2_d_d.cc
+internal/ceres/generated/partitioned_matrix_view_3_3_3.cc
internal/ceres/generated/partitioned_matrix_view_4_4_2.cc
internal/ceres/generated/partitioned_matrix_view_4_4_3.cc
internal/ceres/generated/partitioned_matrix_view_4_4_4.cc
@@ -138,17 +169,18 @@ internal/ceres/generated/schur_eliminator_2_3_9.cc
internal/ceres/generated/schur_eliminator_2_3_d.cc
internal/ceres/generated/schur_eliminator_2_4_3.cc
internal/ceres/generated/schur_eliminator_2_4_4.cc
+internal/ceres/generated/schur_eliminator_2_4_6.cc
internal/ceres/generated/schur_eliminator_2_4_8.cc
internal/ceres/generated/schur_eliminator_2_4_9.cc
internal/ceres/generated/schur_eliminator_2_4_d.cc
internal/ceres/generated/schur_eliminator_2_d_d.cc
+internal/ceres/generated/schur_eliminator_3_3_3.cc
internal/ceres/generated/schur_eliminator_4_4_2.cc
internal/ceres/generated/schur_eliminator_4_4_3.cc
internal/ceres/generated/schur_eliminator_4_4_4.cc
internal/ceres/generated/schur_eliminator_4_4_d.cc
internal/ceres/generated/schur_eliminator_d_d_d.cc
-internal/ceres/generate_eliminator_specialization.py
-internal/ceres/generate_partitioned_matrix_view_specializations.py
+internal/ceres/generate_template_specializations.py
internal/ceres/gradient_checker.cc
internal/ceres/gradient_checking_cost_function.cc
internal/ceres/gradient_checking_cost_function.h
@@ -157,12 +189,15 @@ internal/ceres/gradient_problem_evaluator.h
internal/ceres/gradient_problem_solver.cc
internal/ceres/graph_algorithms.h
internal/ceres/graph.h
-internal/ceres/householder_vector.h
internal/ceres/implicit_schur_complement.cc
internal/ceres/implicit_schur_complement.h
-internal/ceres/integral_types.h
+internal/ceres/inner_product_computer.cc
+internal/ceres/inner_product_computer.h
+internal/ceres/invert_psd_matrix.h
internal/ceres/is_close.cc
internal/ceres/is_close.h
+internal/ceres/iterative_refiner.cc
+internal/ceres/iterative_refiner.h
internal/ceres/iterative_schur_complement_solver.cc
internal/ceres/iterative_schur_complement_solver.h
internal/ceres/lapack.cc
@@ -190,14 +225,21 @@ internal/ceres/low_rank_inverse_hessian.h
internal/ceres/map_util.h
internal/ceres/minimizer.cc
internal/ceres/minimizer.h
-internal/ceres/mutex.h
internal/ceres/normal_prior.cc
+internal/ceres/pair_hash.h
+internal/ceres/parallel_for_cxx.cc
+internal/ceres/parallel_for.h
+internal/ceres/parallel_for_nothreads.cc
+internal/ceres/parallel_for_openmp.cc
+internal/ceres/parallel_utils.cc
+internal/ceres/parallel_utils.h
internal/ceres/parameter_block.h
internal/ceres/parameter_block_ordering.cc
internal/ceres/parameter_block_ordering.h
internal/ceres/partitioned_matrix_view.cc
internal/ceres/partitioned_matrix_view.h
internal/ceres/partitioned_matrix_view_impl.h
+internal/ceres/partitioned_matrix_view_template.py
internal/ceres/polynomial.cc
internal/ceres/polynomial.h
internal/ceres/preconditioner.cc
@@ -222,14 +264,23 @@ internal/ceres/schur_complement_solver.h
internal/ceres/schur_eliminator.cc
internal/ceres/schur_eliminator.h
internal/ceres/schur_eliminator_impl.h
+internal/ceres/schur_eliminator_template.py
internal/ceres/schur_jacobi_preconditioner.cc
internal/ceres/schur_jacobi_preconditioner.h
+internal/ceres/schur_templates.cc
+internal/ceres/schur_templates.h
+internal/ceres/scoped_thread_token.h
internal/ceres/scratch_evaluate_preparer.cc
internal/ceres/scratch_evaluate_preparer.h
+internal/ceres/single_linkage_clustering.cc
+internal/ceres/single_linkage_clustering.h
+internal/ceres/small_blas_generic.h
internal/ceres/small_blas.h
internal/ceres/solver.cc
internal/ceres/solver_utils.cc
internal/ceres/solver_utils.h
+internal/ceres/sparse_cholesky.cc
+internal/ceres/sparse_cholesky.h
internal/ceres/sparse_matrix.cc
internal/ceres/sparse_matrix.h
internal/ceres/sparse_normal_cholesky_solver.cc
@@ -239,7 +290,14 @@ internal/ceres/split.h
internal/ceres/stl_util.h
internal/ceres/stringprintf.cc
internal/ceres/stringprintf.h
+internal/ceres/subset_preconditioner.cc
+internal/ceres/subset_preconditioner.h
+internal/ceres/suitesparse.cc
internal/ceres/suitesparse.h
+internal/ceres/thread_pool.cc
+internal/ceres/thread_pool.h
+internal/ceres/thread_token_provider.cc
+internal/ceres/thread_token_provider.h
internal/ceres/triplet_sparse_matrix.cc
internal/ceres/triplet_sparse_matrix.h
internal/ceres/trust_region_minimizer.cc
@@ -251,7 +309,10 @@ internal/ceres/trust_region_step_evaluator.h
internal/ceres/trust_region_strategy.cc
internal/ceres/trust_region_strategy.h
internal/ceres/types.cc
+internal/ceres/visibility_based_preconditioner.cc
internal/ceres/visibility_based_preconditioner.h
+internal/ceres/visibility.cc
+internal/ceres/visibility.h
internal/ceres/wall_time.cc
internal/ceres/wall_time.h
config/ceres/internal/config.h
diff --git a/extern/ceres/include/ceres/autodiff_cost_function.h b/extern/ceres/include/ceres/autodiff_cost_function.h
index e7893e4828e..5e6e9c55db5 100644
--- a/extern/ceres/include/ceres/autodiff_cost_function.h
+++ b/extern/ceres/include/ceres/autodiff_cost_function.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -30,7 +30,7 @@
//
// Create CostFunctions as needed by the least squares framework, with
// Jacobians computed via automatic differentiation. For more
-// information on automatic differentation, see the wikipedia article
+// information on automatic differentiation, see the wikipedia article
// at http://en.wikipedia.org/wiki/Automatic_differentiation
//
// To get an auto differentiated cost function, you must define a class with a
@@ -54,7 +54,7 @@
// for a series of measurements, where there is an instance of the cost function
// for each measurement k.
//
-// The actual cost added to the total problem is e^2, or (k - x'k)^2; however,
+// The actual cost added to the total problem is e^2, or (k - x'y)^2; however,
// the squaring is implicitly done by the optimization framework.
//
// To write an auto-differentiable cost function for the above model, first
@@ -90,7 +90,7 @@
// Dimension of x ---------------+ |
// Dimension of y ------------------+
//
-// In this example, there is usually an instance for each measumerent of k.
+// In this example, there is usually an instance for each measurement of k.
//
// In the instantiation above, the template parameters following
// "MyScalarCostFunctor", "1, 2, 2", describe the functor as computing a
@@ -110,12 +110,8 @@
// Dimension of x ------------------------------------+ |
// Dimension of y ---------------------------------------+
//
-// The framework can currently accommodate cost functions of up to 10
-// independent variables, and there is no limit on the dimensionality
-// of each of them.
-//
// WARNING #1: Since the functor will get instantiated with different types for
-// T, you must to convert from other numeric types to T before mixing
+// T, you must convert from other numeric types to T before mixing
// computations with other variables of type T. In the example above, this is
// seen where instead of using k_ directly, k_ is wrapped with T(k_).
//
@@ -129,8 +125,9 @@
#ifndef CERES_PUBLIC_AUTODIFF_COST_FUNCTION_H_
#define CERES_PUBLIC_AUTODIFF_COST_FUNCTION_H_
+#include <memory>
+
#include "ceres/internal/autodiff.h"
-#include "ceres/internal/scoped_ptr.h"
#include "ceres/sized_cost_function.h"
#include "ceres/types.h"
#include "glog/logging.h"
@@ -138,7 +135,7 @@
namespace ceres {
// A cost function which computes the derivative of the cost with respect to
-// the parameters (a.k.a. the jacobian) using an autodifferentiation framework.
+// the parameters (a.k.a. the jacobian) using an auto differentiation framework.
// The first template argument is the functor object, described in the header
// comment. The second argument is the dimension of the residual (or
// ceres::DYNAMIC to indicate it will be set at runtime), and subsequent
@@ -153,27 +150,15 @@ namespace ceres {
// of residuals for a single autodiff cost function at runtime.
template <typename CostFunctor,
int kNumResiduals, // Number of residuals, or ceres::DYNAMIC.
- int N0, // Number of parameters in block 0.
- int N1 = 0, // Number of parameters in block 1.
- int N2 = 0, // Number of parameters in block 2.
- int N3 = 0, // Number of parameters in block 3.
- int N4 = 0, // Number of parameters in block 4.
- int N5 = 0, // Number of parameters in block 5.
- int N6 = 0, // Number of parameters in block 6.
- int N7 = 0, // Number of parameters in block 7.
- int N8 = 0, // Number of parameters in block 8.
- int N9 = 0> // Number of parameters in block 9.
-class AutoDiffCostFunction : public SizedCostFunction<kNumResiduals,
- N0, N1, N2, N3, N4,
- N5, N6, N7, N8, N9> {
+ int... Ns> // Number of parameters in each parameter block.
+class AutoDiffCostFunction : public SizedCostFunction<kNumResiduals, Ns...> {
public:
// Takes ownership of functor. Uses the template-provided value for the
// number of residuals ("kNumResiduals").
- explicit AutoDiffCostFunction(CostFunctor* functor)
- : functor_(functor) {
- CHECK_NE(kNumResiduals, DYNAMIC)
- << "Can't run the fixed-size constructor if the "
- << "number of residuals is set to ceres::DYNAMIC.";
+ explicit AutoDiffCostFunction(CostFunctor* functor) : functor_(functor) {
+ static_assert(kNumResiduals != DYNAMIC,
+ "Can't run the fixed-size constructor if the number of "
+ "residuals is set to ceres::DYNAMIC.");
}
// Takes ownership of functor. Ignores the template-provided
@@ -183,13 +168,10 @@ class AutoDiffCostFunction : public SizedCostFunction<kNumResiduals,
// numbers of residuals at runtime.
AutoDiffCostFunction(CostFunctor* functor, int num_residuals)
: functor_(functor) {
- CHECK_EQ(kNumResiduals, DYNAMIC)
- << "Can't run the dynamic-size constructor if the "
- << "number of residuals is not ceres::DYNAMIC.";
- SizedCostFunction<kNumResiduals,
- N0, N1, N2, N3, N4,
- N5, N6, N7, N8, N9>
- ::set_num_residuals(num_residuals);
+ static_assert(kNumResiduals == DYNAMIC,
+ "Can't run the dynamic-size constructor if the number of "
+ "residuals is not ceres::DYNAMIC.");
+ SizedCostFunction<kNumResiduals, Ns...>::set_num_residuals(num_residuals);
}
virtual ~AutoDiffCostFunction() {}
@@ -197,29 +179,28 @@ class AutoDiffCostFunction : public SizedCostFunction<kNumResiduals,
// Implementation details follow; clients of the autodiff cost function should
// not have to examine below here.
//
- // To handle varardic cost functions, some template magic is needed. It's
+ // To handle variadic cost functions, some template magic is needed. It's
// mostly hidden inside autodiff.h.
- virtual bool Evaluate(double const* const* parameters,
- double* residuals,
- double** jacobians) const {
+ bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const override {
+ using ParameterDims =
+ typename SizedCostFunction<kNumResiduals, Ns...>::ParameterDims;
+
if (!jacobians) {
- return internal::VariadicEvaluate<
- CostFunctor, double, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>
- ::Call(*functor_, parameters, residuals);
+ return internal::VariadicEvaluate<ParameterDims>(
+ *functor_, parameters, residuals);
}
- return internal::AutoDiff<CostFunctor, double,
- N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>::Differentiate(
- *functor_,
- parameters,
- SizedCostFunction<kNumResiduals,
- N0, N1, N2, N3, N4,
- N5, N6, N7, N8, N9>::num_residuals(),
- residuals,
- jacobians);
- }
+ return internal::AutoDifferentiate<kNumResiduals, ParameterDims>(
+ *functor_,
+ parameters,
+ SizedCostFunction<kNumResiduals, Ns...>::num_residuals(),
+ residuals,
+ jacobians);
+ };
private:
- internal::scoped_ptr<CostFunctor> functor_;
+ std::unique_ptr<CostFunctor> functor_;
};
} // namespace ceres
diff --git a/extern/ceres/include/ceres/autodiff_first_order_function.h b/extern/ceres/include/ceres/autodiff_first_order_function.h
new file mode 100644
index 00000000000..b98d845655b
--- /dev/null
+++ b/extern/ceres/include/ceres/autodiff_first_order_function.h
@@ -0,0 +1,151 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2019 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_PUBLIC_AUTODIFF_FIRST_ORDER_FUNCTION_H_
+#define CERES_PUBLIC_AUTODIFF_FIRST_ORDER_FUNCTION_H_
+
+#include <memory>
+
+#include "ceres/first_order_function.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/fixed_array.h"
+#include "ceres/jet.h"
+#include "ceres/types.h"
+
+namespace ceres {
+
+// Create FirstOrderFunctions as needed by the GradientProblem
+// framework, with gradients computed via automatic
+// differentiation. For more information on automatic differentiation,
+// see the wikipedia article at
+// http://en.wikipedia.org/wiki/Automatic_differentiation
+//
+// To get an auto differentiated function, you must define a class
+// with a templated operator() (a functor) that computes the cost
+// function in terms of the template parameter T. The autodiff
+// framework substitutes appropriate "jet" objects for T in order to
+// compute the derivative when necessary, but this is hidden, and you
+// should write the function as if T were a scalar type (e.g. a
+// double-precision floating point number).
+//
+// The function must write the computed value in the last argument
+// (the only non-const one) and return true to indicate
+// success.
+//
+// For example, consider a scalar error e = x'y - a, where both x and y are
+// two-dimensional column vector parameters, the prime sign indicates
+// transposition, and a is a constant.
+//
+// To write an auto-differentiable FirstOrderFunction for the above model, first
+// define the object
+//
+// class QuadraticCostFunctor {
+// public:
+// explicit QuadraticCostFunctor(double a) : a_(a) {}
+// template <typename T>
+// bool operator()(const T* const xy, T* cost) const {
+// const T* const x = xy;
+// const T* const y = xy + 2;
+// *cost = x[0] * y[0] + x[1] * y[1] - T(a_);
+// return true;
+// }
+//
+// private:
+// double a_;
+// };
+//
+// Note that in the declaration of operator() the input parameters xy come
+// first, and are passed as const pointers to arrays of T. The
+// output is the last parameter.
+//
+// Then given this class definition, the auto differentiated FirstOrderFunction
+// for it can be constructed as follows.
+//
+// FirstOrderFunction* function =
+// new AutoDiffFirstOrderFunction<QuadraticCostFunctor, 4>(
+// new QuadraticCostFunctor(1.0)));
+//
+// In the instantiation above, the template parameters following
+// "QuadraticCostFunctor", "4", describe the functor as computing a
+// 1-dimensional output from a four dimensional vector.
+//
+// WARNING: Since the functor will get instantiated with different types for
+// T, you must convert from other numeric types to T before mixing
+// computations with other variables of type T. In the example above, this is
+// seen where instead of using a_ directly, a_ is wrapped with T(a_).
+
+template <typename FirstOrderFunctor, int kNumParameters>
+class AutoDiffFirstOrderFunction : public FirstOrderFunction {
+ public:
+ // Takes ownership of functor.
+ explicit AutoDiffFirstOrderFunction(FirstOrderFunctor* functor)
+ : functor_(functor) {
+ static_assert(kNumParameters > 0, "kNumParameters must be positive");
+ }
+
+ virtual ~AutoDiffFirstOrderFunction() {}
+
+ bool Evaluate(const double* const parameters,
+ double* cost,
+ double* gradient) const override {
+ if (gradient == nullptr) {
+ return (*functor_)(parameters, cost);
+ }
+
+ typedef Jet<double, kNumParameters> JetT;
+ internal::FixedArray<JetT, (256 * 7) / sizeof(JetT)> x(kNumParameters);
+ for (int i = 0; i < kNumParameters; ++i) {
+ x[i].a = parameters[i];
+ x[i].v.setZero();
+ x[i].v[i] = 1.0;
+ }
+
+ JetT output;
+ output.a = kImpossibleValue;
+ output.v.setConstant(kImpossibleValue);
+
+ if (!(*functor_)(x.data(), &output)) {
+ return false;
+ }
+
+ *cost = output.a;
+ VectorRef(gradient, kNumParameters) = output.v;
+ return true;
+ }
+
+ int NumParameters() const override { return kNumParameters; }
+
+ private:
+ std::unique_ptr<FirstOrderFunctor> functor_;
+};
+
+} // namespace ceres
+
+#endif // CERES_PUBLIC_AUTODIFF_FIRST_ORDER_FUNCTION_H_
diff --git a/extern/ceres/include/ceres/autodiff_local_parameterization.h b/extern/ceres/include/ceres/autodiff_local_parameterization.h
index 27397e20d3b..d694376fdd1 100644
--- a/extern/ceres/include/ceres/autodiff_local_parameterization.h
+++ b/extern/ceres/include/ceres/autodiff_local_parameterization.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -33,9 +33,10 @@
#ifndef CERES_PUBLIC_AUTODIFF_LOCAL_PARAMETERIZATION_H_
#define CERES_PUBLIC_AUTODIFF_LOCAL_PARAMETERIZATION_H_
-#include "ceres/local_parameterization.h"
+#include <memory>
+
#include "ceres/internal/autodiff.h"
-#include "ceres/internal/scoped_ptr.h"
+#include "ceres/local_parameterization.h"
namespace ceres {
@@ -107,21 +108,20 @@ namespace ceres {
template <typename Functor, int kGlobalSize, int kLocalSize>
class AutoDiffLocalParameterization : public LocalParameterization {
public:
- AutoDiffLocalParameterization() :
- functor_(new Functor()) {}
+ AutoDiffLocalParameterization() : functor_(new Functor()) {}
// Takes ownership of functor.
- explicit AutoDiffLocalParameterization(Functor* functor) :
- functor_(functor) {}
+ explicit AutoDiffLocalParameterization(Functor* functor)
+ : functor_(functor) {}
virtual ~AutoDiffLocalParameterization() {}
- virtual bool Plus(const double* x,
- const double* delta,
- double* x_plus_delta) const {
+ bool Plus(const double* x,
+ const double* delta,
+ double* x_plus_delta) const override {
return (*functor_)(x, delta, x_plus_delta);
}
- virtual bool ComputeJacobian(const double* x, double* jacobian) const {
+ bool ComputeJacobian(const double* x, double* jacobian) const override {
double zero_delta[kLocalSize];
for (int i = 0; i < kLocalSize; ++i) {
zero_delta[i] = 0.0;
@@ -133,20 +133,18 @@ class AutoDiffLocalParameterization : public LocalParameterization {
}
const double* parameter_ptrs[2] = {x, zero_delta};
- double* jacobian_ptrs[2] = { NULL, jacobian };
- return internal::AutoDiff<Functor, double, kGlobalSize, kLocalSize>
- ::Differentiate(*functor_,
- parameter_ptrs,
- kGlobalSize,
- x_plus_delta,
- jacobian_ptrs);
+ double* jacobian_ptrs[2] = {NULL, jacobian};
+ return internal::AutoDifferentiate<
+ kGlobalSize,
+ internal::StaticParameterDims<kGlobalSize, kLocalSize>>(
+ *functor_, parameter_ptrs, kGlobalSize, x_plus_delta, jacobian_ptrs);
}
- virtual int GlobalSize() const { return kGlobalSize; }
- virtual int LocalSize() const { return kLocalSize; }
+ int GlobalSize() const override { return kGlobalSize; }
+ int LocalSize() const override { return kLocalSize; }
private:
- internal::scoped_ptr<Functor> functor_;
+ std::unique_ptr<Functor> functor_;
};
} // namespace ceres
diff --git a/extern/ceres/include/ceres/c_api.h b/extern/ceres/include/ceres/c_api.h
index df7c9b6d671..0e6e590d0f5 100644
--- a/extern/ceres/include/ceres/c_api.h
+++ b/extern/ceres/include/ceres/c_api.h
@@ -1,5 +1,5 @@
/* Ceres Solver - A fast non-linear least squares minimizer
- * Copyright 2015 Google Inc. All rights reserved.
+ * Copyright 2019 Google Inc. All rights reserved.
* http://ceres-solver.org/
*
* Redistribution and use in source and binary forms, with or without
@@ -143,4 +143,4 @@ CERES_EXPORT void ceres_solve(ceres_problem_t* problem);
#include "ceres/internal/reenable_warnings.h"
-#endif /* CERES_PUBLIC_C_API_H_ */
+#endif /* CERES_PUBLIC_C_API_H_ */
diff --git a/extern/ceres/include/ceres/ceres.h b/extern/ceres/include/ceres/ceres.h
index 64ffb99798a..d249351694c 100644
--- a/extern/ceres/include/ceres/ceres.h
+++ b/extern/ceres/include/ceres/ceres.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -36,12 +36,18 @@
#include "ceres/autodiff_cost_function.h"
#include "ceres/autodiff_local_parameterization.h"
+#include "ceres/conditioned_cost_function.h"
+#include "ceres/context.h"
#include "ceres/cost_function.h"
#include "ceres/cost_function_to_functor.h"
#include "ceres/covariance.h"
#include "ceres/crs_matrix.h"
#include "ceres/dynamic_autodiff_cost_function.h"
+#include "ceres/dynamic_cost_function.h"
+#include "ceres/dynamic_cost_function_to_functor.h"
#include "ceres/dynamic_numeric_diff_cost_function.h"
+#include "ceres/evaluation_callback.h"
+#include "ceres/gradient_checker.h"
#include "ceres/gradient_problem.h"
#include "ceres/gradient_problem_solver.h"
#include "ceres/iteration_callback.h"
@@ -49,6 +55,7 @@
#include "ceres/local_parameterization.h"
#include "ceres/loss_function.h"
#include "ceres/numeric_diff_cost_function.h"
+#include "ceres/numeric_diff_options.h"
#include "ceres/ordered_groups.h"
#include "ceres/problem.h"
#include "ceres/sized_cost_function.h"
diff --git a/extern/ceres/include/ceres/conditioned_cost_function.h b/extern/ceres/include/ceres/conditioned_cost_function.h
index 29597d935cb..a57ee209b80 100644
--- a/extern/ceres/include/ceres/conditioned_cost_function.h
+++ b/extern/ceres/include/ceres/conditioned_cost_function.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -34,12 +34,12 @@
#ifndef CERES_PUBLIC_CONDITIONED_COST_FUNCTION_H_
#define CERES_PUBLIC_CONDITIONED_COST_FUNCTION_H_
+#include <memory>
#include <vector>
#include "ceres/cost_function.h"
-#include "ceres/internal/scoped_ptr.h"
-#include "ceres/types.h"
#include "ceres/internal/disable_warnings.h"
+#include "ceres/types.h"
namespace ceres {
@@ -77,17 +77,19 @@ class CERES_EXPORT ConditionedCostFunction : public CostFunction {
// per-residual conditioner. Takes ownership of all of the wrapped cost
// functions, or not, depending on the ownership parameter. Conditioners
// may be NULL, in which case the corresponding residual is not modified.
+ //
+ // The conditioners can repeat.
ConditionedCostFunction(CostFunction* wrapped_cost_function,
const std::vector<CostFunction*>& conditioners,
Ownership ownership);
virtual ~ConditionedCostFunction();
- virtual bool Evaluate(double const* const* parameters,
- double* residuals,
- double** jacobians) const;
+ bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const override;
private:
- internal::scoped_ptr<CostFunction> wrapped_cost_function_;
+ std::unique_ptr<CostFunction> wrapped_cost_function_;
std::vector<CostFunction*> conditioners_;
Ownership ownership_;
};
diff --git a/extern/ceres/include/ceres/context.h b/extern/ceres/include/ceres/context.h
new file mode 100644
index 00000000000..d08e32b31a8
--- /dev/null
+++ b/extern/ceres/include/ceres/context.h
@@ -0,0 +1,56 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2019 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: vitus@google.com (Michael Vitus)
+
+#ifndef CERES_PUBLIC_CONTEXT_H_
+#define CERES_PUBLIC_CONTEXT_H_
+
+namespace ceres {
+
+// A global context for processing data in Ceres. This provides a mechanism to
+// allow Ceres to reuse items that are expensive to create between multiple
+// calls; for example, thread pools. The same Context can be used on multiple
+// Problems, either serially or in parallel. When using it with multiple
+// Problems at the same time, they may end up contending for resources
+// (e.g. threads) managed by the Context.
+class Context {
+ public:
+ Context() {}
+ Context(const Context&) = delete;
+ void operator=(const Context&) = delete;
+
+ virtual ~Context() {}
+
+ // Creates a context object and the caller takes ownership.
+ static Context* Create();
+};
+
+} // namespace ceres
+
+#endif // CERES_PUBLIC_CONTEXT_H_
diff --git a/extern/ceres/include/ceres/cost_function.h b/extern/ceres/include/ceres/cost_function.h
index f051a897c0d..d1550c119e8 100644
--- a/extern/ceres/include/ceres/cost_function.h
+++ b/extern/ceres/include/ceres/cost_function.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -44,18 +44,18 @@
#ifndef CERES_PUBLIC_COST_FUNCTION_H_
#define CERES_PUBLIC_COST_FUNCTION_H_
+#include <cstdint>
#include <vector>
-#include "ceres/internal/macros.h"
-#include "ceres/internal/port.h"
-#include "ceres/types.h"
+
#include "ceres/internal/disable_warnings.h"
+#include "ceres/internal/port.h"
namespace ceres {
// This class implements the computation of the cost (a.k.a. residual) terms as
// a function of the input (control) variables, and is the interface for users
// to describe their least squares problem to Ceres. In other words, this is the
-// modelling layer between users and the Ceres optimizer. The signature of the
+// modeling layer between users and the Ceres optimizer. The signature of the
// function (number and sizes of input parameter blocks and number of outputs)
// is stored in parameter_block_sizes_ and num_residuals_ respectively. User
// code inheriting from this class is expected to set these two members with the
@@ -64,6 +64,8 @@ namespace ceres {
class CERES_EXPORT CostFunction {
public:
CostFunction() : num_residuals_(0) {}
+ CostFunction(const CostFunction&) = delete;
+ void operator=(const CostFunction&) = delete;
virtual ~CostFunction() {}
@@ -115,29 +117,24 @@ class CERES_EXPORT CostFunction {
double* residuals,
double** jacobians) const = 0;
- const std::vector<int32>& parameter_block_sizes() const {
+ const std::vector<int32_t>& parameter_block_sizes() const {
return parameter_block_sizes_;
}
- int num_residuals() const {
- return num_residuals_;
- }
+ int num_residuals() const { return num_residuals_; }
protected:
- std::vector<int32>* mutable_parameter_block_sizes() {
+ std::vector<int32_t>* mutable_parameter_block_sizes() {
return &parameter_block_sizes_;
}
- void set_num_residuals(int num_residuals) {
- num_residuals_ = num_residuals;
- }
+ void set_num_residuals(int num_residuals) { num_residuals_ = num_residuals; }
private:
// Cost function signature metadata: number of inputs & their sizes,
// number of outputs (residuals).
- std::vector<int32> parameter_block_sizes_;
+ std::vector<int32_t> parameter_block_sizes_;
int num_residuals_;
- CERES_DISALLOW_COPY_AND_ASSIGN(CostFunction);
};
} // namespace ceres
diff --git a/extern/ceres/include/ceres/cost_function_to_functor.h b/extern/ceres/include/ceres/cost_function_to_functor.h
index d2dc94725e4..1beeb906063 100644
--- a/extern/ceres/include/ceres/cost_function_to_functor.h
+++ b/extern/ceres/include/ceres/cost_function_to_functor.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -30,7 +30,7 @@
//
// CostFunctionToFunctor is an adapter class that allows users to use
// SizedCostFunction objects in templated functors which are to be used for
-// automatic differentiation. This allows the user to seamlessly mix
+// automatic differentiation. This allows the user to seamlessly mix
// analytic, numeric and automatic differentiation.
//
// For example, let us assume that
@@ -38,16 +38,15 @@
// class IntrinsicProjection : public SizedCostFunction<2, 5, 3> {
// public:
// IntrinsicProjection(const double* observation);
-// virtual bool Evaluate(double const* const* parameters,
-// double* residuals,
-// double** jacobians) const;
+// bool Evaluate(double const* const* parameters,
+// double* residuals,
+// double** jacobians) const override;
// };
//
// is a cost function that implements the projection of a point in its
// local coordinate system onto its image plane and subtracts it from
// the observed point projection. It can compute its residual and
-// either via analytic or numerical differentiation can compute its
-// jacobians.
+// jacobians either via analytic or numerical differentiation.
//
// Now we would like to compose the action of this CostFunction with
// the action of camera extrinsics, i.e., rotation and
@@ -87,594 +86,83 @@
#ifndef CERES_PUBLIC_COST_FUNCTION_TO_FUNCTOR_H_
#define CERES_PUBLIC_COST_FUNCTION_TO_FUNCTOR_H_
+#include <cstdint>
#include <numeric>
+#include <tuple>
+#include <utility>
#include <vector>
#include "ceres/cost_function.h"
#include "ceres/dynamic_cost_function_to_functor.h"
#include "ceres/internal/fixed_array.h"
+#include "ceres/internal/parameter_dims.h"
#include "ceres/internal/port.h"
-#include "ceres/internal/scoped_ptr.h"
+#include "ceres/types.h"
namespace ceres {
-template <int kNumResiduals,
- int N0, int N1 = 0, int N2 = 0, int N3 = 0, int N4 = 0,
- int N5 = 0, int N6 = 0, int N7 = 0, int N8 = 0, int N9 = 0>
+template <int kNumResiduals, int... Ns>
class CostFunctionToFunctor {
public:
// Takes ownership of cost_function.
explicit CostFunctionToFunctor(CostFunction* cost_function)
: cost_functor_(cost_function) {
- CHECK_NOTNULL(cost_function);
+ CHECK(cost_function != nullptr);
CHECK(kNumResiduals > 0 || kNumResiduals == DYNAMIC);
- // This block breaks the 80 column rule to keep it somewhat readable.
- CHECK((!N1 && !N2 && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
- ((N1 > 0) && !N2 && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
- ((N1 > 0) && (N2 > 0) && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) || // NOLINT
- ((N1 > 0) && (N2 > 0) && (N3 > 0) && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) || // NOLINT
- ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && !N5 && !N6 && !N7 && !N8 && !N9) || // NOLINT
- ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && !N6 && !N7 && !N8 && !N9) || // NOLINT
- ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && !N7 && !N8 && !N9) || // NOLINT
- ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && !N8 && !N9) || // NOLINT
- ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && !N9) || // NOLINT
- ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && (N9 > 0))) // NOLINT
- << "Zero block cannot precede a non-zero block. Block sizes are "
- << "(ignore trailing 0s): " << N0 << ", " << N1 << ", " << N2 << ", "
- << N3 << ", " << N4 << ", " << N5 << ", " << N6 << ", " << N7 << ", "
- << N8 << ", " << N9;
-
- const std::vector<int32>& parameter_block_sizes =
+ const std::vector<int32_t>& parameter_block_sizes =
cost_function->parameter_block_sizes();
- const int num_parameter_blocks =
- (N0 > 0) + (N1 > 0) + (N2 > 0) + (N3 > 0) + (N4 > 0) +
- (N5 > 0) + (N6 > 0) + (N7 > 0) + (N8 > 0) + (N9 > 0);
+ const int num_parameter_blocks = ParameterDims::kNumParameterBlocks;
CHECK_EQ(static_cast<int>(parameter_block_sizes.size()),
num_parameter_blocks);
- CHECK_EQ(N0, parameter_block_sizes[0]);
- if (parameter_block_sizes.size() > 1) CHECK_EQ(N1, parameter_block_sizes[1]); // NOLINT
- if (parameter_block_sizes.size() > 2) CHECK_EQ(N2, parameter_block_sizes[2]); // NOLINT
- if (parameter_block_sizes.size() > 3) CHECK_EQ(N3, parameter_block_sizes[3]); // NOLINT
- if (parameter_block_sizes.size() > 4) CHECK_EQ(N4, parameter_block_sizes[4]); // NOLINT
- if (parameter_block_sizes.size() > 5) CHECK_EQ(N5, parameter_block_sizes[5]); // NOLINT
- if (parameter_block_sizes.size() > 6) CHECK_EQ(N6, parameter_block_sizes[6]); // NOLINT
- if (parameter_block_sizes.size() > 7) CHECK_EQ(N7, parameter_block_sizes[7]); // NOLINT
- if (parameter_block_sizes.size() > 8) CHECK_EQ(N8, parameter_block_sizes[8]); // NOLINT
- if (parameter_block_sizes.size() > 9) CHECK_EQ(N9, parameter_block_sizes[9]); // NOLINT
-
- CHECK_EQ(accumulate(parameter_block_sizes.begin(),
- parameter_block_sizes.end(), 0),
- N0 + N1 + N2 + N3 + N4 + N5 + N6 + N7 + N8 + N9);
- }
-
- bool operator()(const double* x0, double* residuals) const {
- CHECK_NE(N0, 0);
- CHECK_EQ(N1, 0);
- CHECK_EQ(N2, 0);
- CHECK_EQ(N3, 0);
- CHECK_EQ(N4, 0);
- CHECK_EQ(N5, 0);
- CHECK_EQ(N6, 0);
- CHECK_EQ(N7, 0);
- CHECK_EQ(N8, 0);
- CHECK_EQ(N9, 0);
-
- return cost_functor_(&x0, residuals);
- }
-
- bool operator()(const double* x0,
- const double* x1,
- double* residuals) const {
- CHECK_NE(N0, 0);
- CHECK_NE(N1, 0);
- CHECK_EQ(N2, 0);
- CHECK_EQ(N3, 0);
- CHECK_EQ(N4, 0);
- CHECK_EQ(N5, 0);
- CHECK_EQ(N6, 0);
- CHECK_EQ(N7, 0);
- CHECK_EQ(N8, 0);
- CHECK_EQ(N9, 0);
- internal::FixedArray<const double*> parameter_blocks(2);
- parameter_blocks[0] = x0;
- parameter_blocks[1] = x1;
- return cost_functor_(parameter_blocks.get(), residuals);
- }
-
- bool operator()(const double* x0,
- const double* x1,
- const double* x2,
- double* residuals) const {
- CHECK_NE(N0, 0);
- CHECK_NE(N1, 0);
- CHECK_NE(N2, 0);
- CHECK_EQ(N3, 0);
- CHECK_EQ(N4, 0);
- CHECK_EQ(N5, 0);
- CHECK_EQ(N6, 0);
- CHECK_EQ(N7, 0);
- CHECK_EQ(N8, 0);
- CHECK_EQ(N9, 0);
- internal::FixedArray<const double*> parameter_blocks(3);
- parameter_blocks[0] = x0;
- parameter_blocks[1] = x1;
- parameter_blocks[2] = x2;
- return cost_functor_(parameter_blocks.get(), residuals);
- }
+ if (parameter_block_sizes.size() == num_parameter_blocks) {
+ for (int block = 0; block < num_parameter_blocks; ++block) {
+ CHECK_EQ(ParameterDims::GetDim(block), parameter_block_sizes[block])
+ << "Parameter block size missmatch. The specified static parameter "
+ "block dimension does not match the one from the cost function.";
+ }
+ }
- bool operator()(const double* x0,
- const double* x1,
- const double* x2,
- const double* x3,
- double* residuals) const {
- CHECK_NE(N0, 0);
- CHECK_NE(N1, 0);
- CHECK_NE(N2, 0);
- CHECK_NE(N3, 0);
- CHECK_EQ(N4, 0);
- CHECK_EQ(N5, 0);
- CHECK_EQ(N6, 0);
- CHECK_EQ(N7, 0);
- CHECK_EQ(N8, 0);
- CHECK_EQ(N9, 0);
- internal::FixedArray<const double*> parameter_blocks(4);
- parameter_blocks[0] = x0;
- parameter_blocks[1] = x1;
- parameter_blocks[2] = x2;
- parameter_blocks[3] = x3;
- return cost_functor_(parameter_blocks.get(), residuals);
+ CHECK_EQ(accumulate(
+ parameter_block_sizes.begin(), parameter_block_sizes.end(), 0),
+ ParameterDims::kNumParameters);
}
- bool operator()(const double* x0,
- const double* x1,
- const double* x2,
- const double* x3,
- const double* x4,
- double* residuals) const {
- CHECK_NE(N0, 0);
- CHECK_NE(N1, 0);
- CHECK_NE(N2, 0);
- CHECK_NE(N3, 0);
- CHECK_NE(N4, 0);
- CHECK_EQ(N5, 0);
- CHECK_EQ(N6, 0);
- CHECK_EQ(N7, 0);
- CHECK_EQ(N8, 0);
- CHECK_EQ(N9, 0);
- internal::FixedArray<const double*> parameter_blocks(5);
- parameter_blocks[0] = x0;
- parameter_blocks[1] = x1;
- parameter_blocks[2] = x2;
- parameter_blocks[3] = x3;
- parameter_blocks[4] = x4;
- return cost_functor_(parameter_blocks.get(), residuals);
- }
+ template <typename T, typename... Ts>
+ bool operator()(const T* p1, Ts*... ps) const {
+ // Add one because of residual block.
+ static_assert(sizeof...(Ts) + 1 == ParameterDims::kNumParameterBlocks + 1,
+ "Invalid number of parameter blocks specified.");
- bool operator()(const double* x0,
- const double* x1,
- const double* x2,
- const double* x3,
- const double* x4,
- const double* x5,
- double* residuals) const {
- CHECK_NE(N0, 0);
- CHECK_NE(N1, 0);
- CHECK_NE(N2, 0);
- CHECK_NE(N3, 0);
- CHECK_NE(N4, 0);
- CHECK_NE(N5, 0);
- CHECK_EQ(N6, 0);
- CHECK_EQ(N7, 0);
- CHECK_EQ(N8, 0);
- CHECK_EQ(N9, 0);
- internal::FixedArray<const double*> parameter_blocks(6);
- parameter_blocks[0] = x0;
- parameter_blocks[1] = x1;
- parameter_blocks[2] = x2;
- parameter_blocks[3] = x3;
- parameter_blocks[4] = x4;
- parameter_blocks[5] = x5;
- return cost_functor_(parameter_blocks.get(), residuals);
- }
+ auto params = std::make_tuple(p1, ps...);
- bool operator()(const double* x0,
- const double* x1,
- const double* x2,
- const double* x3,
- const double* x4,
- const double* x5,
- const double* x6,
- double* residuals) const {
- CHECK_NE(N0, 0);
- CHECK_NE(N1, 0);
- CHECK_NE(N2, 0);
- CHECK_NE(N3, 0);
- CHECK_NE(N4, 0);
- CHECK_NE(N5, 0);
- CHECK_NE(N6, 0);
- CHECK_EQ(N7, 0);
- CHECK_EQ(N8, 0);
- CHECK_EQ(N9, 0);
- internal::FixedArray<const double*> parameter_blocks(7);
- parameter_blocks[0] = x0;
- parameter_blocks[1] = x1;
- parameter_blocks[2] = x2;
- parameter_blocks[3] = x3;
- parameter_blocks[4] = x4;
- parameter_blocks[5] = x5;
- parameter_blocks[6] = x6;
- return cost_functor_(parameter_blocks.get(), residuals);
- }
+ // Extract residual pointer from params. The residual pointer is the
+ // last pointer.
+ constexpr int kResidualIndex = ParameterDims::kNumParameterBlocks;
+ T* residuals = std::get<kResidualIndex>(params);
- bool operator()(const double* x0,
- const double* x1,
- const double* x2,
- const double* x3,
- const double* x4,
- const double* x5,
- const double* x6,
- const double* x7,
- double* residuals) const {
- CHECK_NE(N0, 0);
- CHECK_NE(N1, 0);
- CHECK_NE(N2, 0);
- CHECK_NE(N3, 0);
- CHECK_NE(N4, 0);
- CHECK_NE(N5, 0);
- CHECK_NE(N6, 0);
- CHECK_NE(N7, 0);
- CHECK_EQ(N8, 0);
- CHECK_EQ(N9, 0);
- internal::FixedArray<const double*> parameter_blocks(8);
- parameter_blocks[0] = x0;
- parameter_blocks[1] = x1;
- parameter_blocks[2] = x2;
- parameter_blocks[3] = x3;
- parameter_blocks[4] = x4;
- parameter_blocks[5] = x5;
- parameter_blocks[6] = x6;
- parameter_blocks[7] = x7;
- return cost_functor_(parameter_blocks.get(), residuals);
- }
-
- bool operator()(const double* x0,
- const double* x1,
- const double* x2,
- const double* x3,
- const double* x4,
- const double* x5,
- const double* x6,
- const double* x7,
- const double* x8,
- double* residuals) const {
- CHECK_NE(N0, 0);
- CHECK_NE(N1, 0);
- CHECK_NE(N2, 0);
- CHECK_NE(N3, 0);
- CHECK_NE(N4, 0);
- CHECK_NE(N5, 0);
- CHECK_NE(N6, 0);
- CHECK_NE(N7, 0);
- CHECK_NE(N8, 0);
- CHECK_EQ(N9, 0);
- internal::FixedArray<const double*> parameter_blocks(9);
- parameter_blocks[0] = x0;
- parameter_blocks[1] = x1;
- parameter_blocks[2] = x2;
- parameter_blocks[3] = x3;
- parameter_blocks[4] = x4;
- parameter_blocks[5] = x5;
- parameter_blocks[6] = x6;
- parameter_blocks[7] = x7;
- parameter_blocks[8] = x8;
- return cost_functor_(parameter_blocks.get(), residuals);
- }
-
- bool operator()(const double* x0,
- const double* x1,
- const double* x2,
- const double* x3,
- const double* x4,
- const double* x5,
- const double* x6,
- const double* x7,
- const double* x8,
- const double* x9,
- double* residuals) const {
- CHECK_NE(N0, 0);
- CHECK_NE(N1, 0);
- CHECK_NE(N2, 0);
- CHECK_NE(N3, 0);
- CHECK_NE(N4, 0);
- CHECK_NE(N5, 0);
- CHECK_NE(N6, 0);
- CHECK_NE(N7, 0);
- CHECK_NE(N8, 0);
- CHECK_NE(N9, 0);
- internal::FixedArray<const double*> parameter_blocks(10);
- parameter_blocks[0] = x0;
- parameter_blocks[1] = x1;
- parameter_blocks[2] = x2;
- parameter_blocks[3] = x3;
- parameter_blocks[4] = x4;
- parameter_blocks[5] = x5;
- parameter_blocks[6] = x6;
- parameter_blocks[7] = x7;
- parameter_blocks[8] = x8;
- parameter_blocks[9] = x9;
- return cost_functor_(parameter_blocks.get(), residuals);
- }
+ // Extract parameter block pointers from params.
+ using Indices =
+ std::make_integer_sequence<int,
+ ParameterDims::kNumParameterBlocks>;
+ std::array<const T*, ParameterDims::kNumParameterBlocks> parameter_blocks =
+ GetParameterPointers<T>(params, Indices());
- template <typename JetT>
- bool operator()(const JetT* x0, JetT* residuals) const {
- CHECK_NE(N0, 0);
- CHECK_EQ(N1, 0);
- CHECK_EQ(N2, 0);
- CHECK_EQ(N3, 0);
- CHECK_EQ(N4, 0);
- CHECK_EQ(N5, 0);
- CHECK_EQ(N6, 0);
- CHECK_EQ(N7, 0);
- CHECK_EQ(N8, 0);
- CHECK_EQ(N9, 0);
- return cost_functor_(&x0, residuals);
+ return cost_functor_(parameter_blocks.data(), residuals);
}
- template <typename JetT>
- bool operator()(const JetT* x0,
- const JetT* x1,
- JetT* residuals) const {
- CHECK_NE(N0, 0);
- CHECK_NE(N1, 0);
- CHECK_EQ(N2, 0);
- CHECK_EQ(N3, 0);
- CHECK_EQ(N4, 0);
- CHECK_EQ(N5, 0);
- CHECK_EQ(N6, 0);
- CHECK_EQ(N7, 0);
- CHECK_EQ(N8, 0);
- CHECK_EQ(N9, 0);
- internal::FixedArray<const JetT*> jets(2);
- jets[0] = x0;
- jets[1] = x1;
- return cost_functor_(jets.get(), residuals);
- }
-
- template <typename JetT>
- bool operator()(const JetT* x0,
- const JetT* x1,
- const JetT* x2,
- JetT* residuals) const {
- CHECK_NE(N0, 0);
- CHECK_NE(N1, 0);
- CHECK_NE(N2, 0);
- CHECK_EQ(N3, 0);
- CHECK_EQ(N4, 0);
- CHECK_EQ(N5, 0);
- CHECK_EQ(N6, 0);
- CHECK_EQ(N7, 0);
- CHECK_EQ(N8, 0);
- CHECK_EQ(N9, 0);
- internal::FixedArray<const JetT*> jets(3);
- jets[0] = x0;
- jets[1] = x1;
- jets[2] = x2;
- return cost_functor_(jets.get(), residuals);
- }
-
- template <typename JetT>
- bool operator()(const JetT* x0,
- const JetT* x1,
- const JetT* x2,
- const JetT* x3,
- JetT* residuals) const {
- CHECK_NE(N0, 0);
- CHECK_NE(N1, 0);
- CHECK_NE(N2, 0);
- CHECK_NE(N3, 0);
- CHECK_EQ(N4, 0);
- CHECK_EQ(N5, 0);
- CHECK_EQ(N6, 0);
- CHECK_EQ(N7, 0);
- CHECK_EQ(N8, 0);
- CHECK_EQ(N9, 0);
- internal::FixedArray<const JetT*> jets(4);
- jets[0] = x0;
- jets[1] = x1;
- jets[2] = x2;
- jets[3] = x3;
- return cost_functor_(jets.get(), residuals);
- }
-
- template <typename JetT>
- bool operator()(const JetT* x0,
- const JetT* x1,
- const JetT* x2,
- const JetT* x3,
- const JetT* x4,
- JetT* residuals) const {
- CHECK_NE(N0, 0);
- CHECK_NE(N1, 0);
- CHECK_NE(N2, 0);
- CHECK_NE(N3, 0);
- CHECK_NE(N4, 0);
- CHECK_EQ(N5, 0);
- CHECK_EQ(N6, 0);
- CHECK_EQ(N7, 0);
- CHECK_EQ(N8, 0);
- CHECK_EQ(N9, 0);
- internal::FixedArray<const JetT*> jets(5);
- jets[0] = x0;
- jets[1] = x1;
- jets[2] = x2;
- jets[3] = x3;
- jets[4] = x4;
- return cost_functor_(jets.get(), residuals);
- }
-
- template <typename JetT>
- bool operator()(const JetT* x0,
- const JetT* x1,
- const JetT* x2,
- const JetT* x3,
- const JetT* x4,
- const JetT* x5,
- JetT* residuals) const {
- CHECK_NE(N0, 0);
- CHECK_NE(N1, 0);
- CHECK_NE(N2, 0);
- CHECK_NE(N3, 0);
- CHECK_NE(N4, 0);
- CHECK_NE(N5, 0);
- CHECK_EQ(N6, 0);
- CHECK_EQ(N7, 0);
- CHECK_EQ(N8, 0);
- CHECK_EQ(N9, 0);
- internal::FixedArray<const JetT*> jets(6);
- jets[0] = x0;
- jets[1] = x1;
- jets[2] = x2;
- jets[3] = x3;
- jets[4] = x4;
- jets[5] = x5;
- return cost_functor_(jets.get(), residuals);
- }
-
- template <typename JetT>
- bool operator()(const JetT* x0,
- const JetT* x1,
- const JetT* x2,
- const JetT* x3,
- const JetT* x4,
- const JetT* x5,
- const JetT* x6,
- JetT* residuals) const {
- CHECK_NE(N0, 0);
- CHECK_NE(N1, 0);
- CHECK_NE(N2, 0);
- CHECK_NE(N3, 0);
- CHECK_NE(N4, 0);
- CHECK_NE(N5, 0);
- CHECK_NE(N6, 0);
- CHECK_EQ(N7, 0);
- CHECK_EQ(N8, 0);
- CHECK_EQ(N9, 0);
- internal::FixedArray<const JetT*> jets(7);
- jets[0] = x0;
- jets[1] = x1;
- jets[2] = x2;
- jets[3] = x3;
- jets[4] = x4;
- jets[5] = x5;
- jets[6] = x6;
- return cost_functor_(jets.get(), residuals);
- }
-
- template <typename JetT>
- bool operator()(const JetT* x0,
- const JetT* x1,
- const JetT* x2,
- const JetT* x3,
- const JetT* x4,
- const JetT* x5,
- const JetT* x6,
- const JetT* x7,
- JetT* residuals) const {
- CHECK_NE(N0, 0);
- CHECK_NE(N1, 0);
- CHECK_NE(N2, 0);
- CHECK_NE(N3, 0);
- CHECK_NE(N4, 0);
- CHECK_NE(N5, 0);
- CHECK_NE(N6, 0);
- CHECK_NE(N7, 0);
- CHECK_EQ(N8, 0);
- CHECK_EQ(N9, 0);
- internal::FixedArray<const JetT*> jets(8);
- jets[0] = x0;
- jets[1] = x1;
- jets[2] = x2;
- jets[3] = x3;
- jets[4] = x4;
- jets[5] = x5;
- jets[6] = x6;
- jets[7] = x7;
- return cost_functor_(jets.get(), residuals);
- }
-
- template <typename JetT>
- bool operator()(const JetT* x0,
- const JetT* x1,
- const JetT* x2,
- const JetT* x3,
- const JetT* x4,
- const JetT* x5,
- const JetT* x6,
- const JetT* x7,
- const JetT* x8,
- JetT* residuals) const {
- CHECK_NE(N0, 0);
- CHECK_NE(N1, 0);
- CHECK_NE(N2, 0);
- CHECK_NE(N3, 0);
- CHECK_NE(N4, 0);
- CHECK_NE(N5, 0);
- CHECK_NE(N6, 0);
- CHECK_NE(N7, 0);
- CHECK_NE(N8, 0);
- CHECK_EQ(N9, 0);
- internal::FixedArray<const JetT*> jets(9);
- jets[0] = x0;
- jets[1] = x1;
- jets[2] = x2;
- jets[3] = x3;
- jets[4] = x4;
- jets[5] = x5;
- jets[6] = x6;
- jets[7] = x7;
- jets[8] = x8;
- return cost_functor_(jets.get(), residuals);
- }
+ private:
+ using ParameterDims = internal::StaticParameterDims<Ns...>;
- template <typename JetT>
- bool operator()(const JetT* x0,
- const JetT* x1,
- const JetT* x2,
- const JetT* x3,
- const JetT* x4,
- const JetT* x5,
- const JetT* x6,
- const JetT* x7,
- const JetT* x8,
- const JetT* x9,
- JetT* residuals) const {
- CHECK_NE(N0, 0);
- CHECK_NE(N1, 0);
- CHECK_NE(N2, 0);
- CHECK_NE(N3, 0);
- CHECK_NE(N4, 0);
- CHECK_NE(N5, 0);
- CHECK_NE(N6, 0);
- CHECK_NE(N7, 0);
- CHECK_NE(N8, 0);
- CHECK_NE(N9, 0);
- internal::FixedArray<const JetT*> jets(10);
- jets[0] = x0;
- jets[1] = x1;
- jets[2] = x2;
- jets[3] = x3;
- jets[4] = x4;
- jets[5] = x5;
- jets[6] = x6;
- jets[7] = x7;
- jets[8] = x8;
- jets[9] = x9;
- return cost_functor_(jets.get(), residuals);
+ template <typename T, typename Tuple, int... Indices>
+ static std::array<const T*, ParameterDims::kNumParameterBlocks>
+ GetParameterPointers(const Tuple& paramPointers,
+ std::integer_sequence<int, Indices...>) {
+ return std::array<const T*, ParameterDims::kNumParameterBlocks>{
+ {std::get<Indices>(paramPointers)...}};
}
- private:
DynamicCostFunctionToFunctor cost_functor_;
};
diff --git a/extern/ceres/include/ceres/covariance.h b/extern/ceres/include/ceres/covariance.h
index 930f96cf3ae..99825c425ad 100644
--- a/extern/ceres/include/ceres/covariance.h
+++ b/extern/ceres/include/ceres/covariance.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -31,12 +31,13 @@
#ifndef CERES_PUBLIC_COVARIANCE_H_
#define CERES_PUBLIC_COVARIANCE_H_
+#include <memory>
#include <utility>
#include <vector>
+
+#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/port.h"
-#include "ceres/internal/scoped_ptr.h"
#include "ceres/types.h"
-#include "ceres/internal/disable_warnings.h"
namespace ceres {
@@ -60,7 +61,7 @@ class CovarianceImpl;
// Background
// ==========
// One way to assess the quality of the solution returned by a
-// non-linear least squares solve is to analyze the covariance of the
+// non-linear least squares solver is to analyze the covariance of the
// solution.
//
// Let us consider the non-linear regression problem
@@ -158,7 +159,7 @@ class CovarianceImpl;
// Gauge Invariance
// ----------------
// In structure from motion (3D reconstruction) problems, the
-// reconstruction is ambiguous upto a similarity transform. This is
+// reconstruction is ambiguous up to a similarity transform. This is
// known as a Gauge Ambiguity. Handling Gauges correctly requires the
// use of SVD or custom inversion algorithms. For small problems the
// user can use the dense algorithm. For more details see
@@ -183,7 +184,7 @@ class CovarianceImpl;
// Covariance::Options options;
// Covariance covariance(options);
//
-// std::vector<std::pair<const double*, const double*> > covariance_blocks;
+// std::vector<std::pair<const double*, const double*>> covariance_blocks;
// covariance_blocks.push_back(make_pair(x, x));
// covariance_blocks.push_back(make_pair(y, y));
// covariance_blocks.push_back(make_pair(x, y));
@@ -200,19 +201,19 @@ class CovarianceImpl;
class CERES_EXPORT Covariance {
public:
struct CERES_EXPORT Options {
- Options()
-#ifndef CERES_NO_SUITESPARSE
- : algorithm_type(SUITE_SPARSE_QR),
+ // Sparse linear algebra library to use when a sparse matrix
+ // factorization is being used to compute the covariance matrix.
+ //
+ // Currently this only applies to SPARSE_QR.
+ SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type =
+#if !defined(CERES_NO_SUITESPARSE)
+ SUITE_SPARSE;
#else
- : algorithm_type(EIGEN_SPARSE_QR),
+ // Eigen's QR factorization is always available.
+ EIGEN_SPARSE;
#endif
- min_reciprocal_condition_number(1e-14),
- null_space_rank(0),
- num_threads(1),
- apply_loss_function(true) {
- }
- // Ceres supports three different algorithms for covariance
+ // Ceres supports two different algorithms for covariance
// estimation, which represent different tradeoffs in speed,
// accuracy and reliability.
//
@@ -229,23 +230,20 @@ class CERES_EXPORT Covariance {
// for small to moderate sized problems. It can handle
// full-rank as well as rank deficient Jacobians.
//
- // 2. EIGEN_SPARSE_QR uses the sparse QR factorization algorithm
- // in Eigen to compute the decomposition
+ // 2. SPARSE_QR uses the sparse QR factorization algorithm
+ // to compute the decomposition
//
// Q * R = J
//
// [J'J]^-1 = [R*R']^-1
//
- // It is a moderately fast algorithm for sparse matrices.
- //
- // 3. SUITE_SPARSE_QR uses the SuiteSparseQR sparse QR
- // factorization algorithm. It uses dense linear algebra and is
- // multi threaded, so for large sparse sparse matrices it is
- // significantly faster than EIGEN_SPARSE_QR.
- //
- // Neither EIGEN_SPARSE_QR not SUITE_SPARSE_QR are capable of
- // computing the covariance if the Jacobian is rank deficient.
- CovarianceAlgorithmType algorithm_type;
+ // SPARSE_QR is not capable of computing the covariance if the
+ // Jacobian is rank deficient. Depending on the value of
+ // Covariance::Options::sparse_linear_algebra_library_type, either
+ // Eigen's Sparse QR factorization algorithm will be used or
+ // SuiteSparse's high performance SuiteSparseQR algorithm will be
+ // used.
+ CovarianceAlgorithmType algorithm_type = SPARSE_QR;
// If the Jacobian matrix is near singular, then inverting J'J
// will result in unreliable results, e.g, if
@@ -270,7 +268,7 @@ class CERES_EXPORT Covariance {
// where min_sigma and max_sigma are the minimum and maxiumum
// singular values of J respectively.
//
- // 2. SUITE_SPARSE_QR and EIGEN_SPARSE_QR
+ // 2. SPARSE_QR
//
// rank(J) < num_col(J)
//
@@ -278,7 +276,7 @@ class CERES_EXPORT Covariance {
// sparse QR factorization algorithm. It is a fairly reliable
// indication of rank deficiency.
//
- double min_reciprocal_condition_number;
+ double min_reciprocal_condition_number = 1e-14;
// When using DENSE_SVD, the user has more control in dealing with
// singular and near singular covariance matrices.
@@ -313,9 +311,9 @@ class CERES_EXPORT Covariance {
//
// This option has no effect on the SUITE_SPARSE_QR and
// EIGEN_SPARSE_QR algorithms.
- int null_space_rank;
+ int null_space_rank = 0;
- int num_threads;
+ int num_threads = 1;
// Even though the residual blocks in the problem may contain loss
// functions, setting apply_loss_function to false will turn off
@@ -323,7 +321,7 @@ class CERES_EXPORT Covariance {
// function and in turn its effect on the covariance.
//
// TODO(sameergaarwal): Expand this based on Jim's experiments.
- bool apply_loss_function;
+ bool apply_loss_function = true;
};
explicit Covariance(const Options& options);
@@ -352,10 +350,9 @@ class CERES_EXPORT Covariance {
// covariance computation. Please see the documentation for
// Covariance::Options for more on the conditions under which this
// function returns false.
- bool Compute(
- const std::vector<std::pair<const double*,
- const double*> >& covariance_blocks,
- Problem* problem);
+ bool Compute(const std::vector<std::pair<const double*, const double*>>&
+ covariance_blocks,
+ Problem* problem);
// Compute a part of the covariance matrix.
//
@@ -428,8 +425,8 @@ class CERES_EXPORT Covariance {
// a square matrix whose row and column count is equal to the sum of
// the sizes of the individual parameter blocks. The covariance
// matrix will be a row-major matrix.
- bool GetCovarianceMatrix(const std::vector<const double *> &parameter_blocks,
- double *covariance_matrix);
+ bool GetCovarianceMatrix(const std::vector<const double*>& parameter_blocks,
+ double* covariance_matrix) const;
// Return the covariance matrix corresponding to parameter_blocks
// in the tangent space if a local parameterization is associated
@@ -448,10 +445,10 @@ class CERES_EXPORT Covariance {
// blocks. The covariance matrix will be a row-major matrix.
bool GetCovarianceMatrixInTangentSpace(
const std::vector<const double*>& parameter_blocks,
- double* covariance_matrix);
+ double* covariance_matrix) const;
private:
- internal::scoped_ptr<internal::CovarianceImpl> impl_;
+ std::unique_ptr<internal::CovarianceImpl> impl_;
};
} // namespace ceres
diff --git a/extern/ceres/include/ceres/crs_matrix.h b/extern/ceres/include/ceres/crs_matrix.h
index 23687c4670e..bc618fa0905 100644
--- a/extern/ceres/include/ceres/crs_matrix.h
+++ b/extern/ceres/include/ceres/crs_matrix.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -32,8 +32,9 @@
#define CERES_PUBLIC_CRS_MATRIX_H_
#include <vector>
-#include "ceres/internal/port.h"
+
#include "ceres/internal/disable_warnings.h"
+#include "ceres/internal/port.h"
namespace ceres {
diff --git a/extern/ceres/include/ceres/cubic_interpolation.h b/extern/ceres/include/ceres/cubic_interpolation.h
new file mode 100644
index 00000000000..9b9ea4a942c
--- /dev/null
+++ b/extern/ceres/include/ceres/cubic_interpolation.h
@@ -0,0 +1,436 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2019 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_PUBLIC_CUBIC_INTERPOLATION_H_
+#define CERES_PUBLIC_CUBIC_INTERPOLATION_H_
+
+#include "Eigen/Core"
+#include "ceres/internal/port.h"
+#include "glog/logging.h"
+
+namespace ceres {
+
+// Given samples from a function sampled at four equally spaced points,
+//
+// p0 = f(-1)
+// p1 = f(0)
+// p2 = f(1)
+// p3 = f(2)
+//
+// Evaluate the cubic Hermite spline (also known as the Catmull-Rom
+// spline) at a point x that lies in the interval [0, 1].
+//
+// This is also the interpolation kernel (for the case of a = 0.5) as
+// proposed by R. Keys, in:
+//
+// "Cubic convolution interpolation for digital image processing".
+// IEEE Transactions on Acoustics, Speech, and Signal Processing
+// 29 (6): 1153-1160.
+//
+// For more details see
+//
+// http://en.wikipedia.org/wiki/Cubic_Hermite_spline
+// http://en.wikipedia.org/wiki/Bicubic_interpolation
+//
+// f if not NULL will contain the interpolated function values.
+// dfdx if not NULL will contain the interpolated derivative values.
+template <int kDataDimension>
+void CubicHermiteSpline(const Eigen::Matrix<double, kDataDimension, 1>& p0,
+ const Eigen::Matrix<double, kDataDimension, 1>& p1,
+ const Eigen::Matrix<double, kDataDimension, 1>& p2,
+ const Eigen::Matrix<double, kDataDimension, 1>& p3,
+ const double x,
+ double* f,
+ double* dfdx) {
+ typedef Eigen::Matrix<double, kDataDimension, 1> VType;
+ const VType a = 0.5 * (-p0 + 3.0 * p1 - 3.0 * p2 + p3);
+ const VType b = 0.5 * (2.0 * p0 - 5.0 * p1 + 4.0 * p2 - p3);
+ const VType c = 0.5 * (-p0 + p2);
+ const VType d = p1;
+
+ // Use Horner's rule to evaluate the function value and its
+ // derivative.
+
+ // f = ax^3 + bx^2 + cx + d
+ if (f != NULL) {
+ Eigen::Map<VType>(f, kDataDimension) = d + x * (c + x * (b + x * a));
+ }
+
+ // dfdx = 3ax^2 + 2bx + c
+ if (dfdx != NULL) {
+ Eigen::Map<VType>(dfdx, kDataDimension) = c + x * (2.0 * b + 3.0 * a * x);
+ }
+}
+
+// Given as input an infinite one dimensional grid, which provides the
+// following interface.
+//
+// class Grid {
+// public:
+// enum { DATA_DIMENSION = 2; };
+// void GetValue(int n, double* f) const;
+// };
+//
+// Here, GetValue gives the value of a function f (possibly vector
+// valued) for any integer n.
+//
+// The enum DATA_DIMENSION indicates the dimensionality of the
+// function being interpolated. For example if you are interpolating
+// rotations in axis-angle format over time, then DATA_DIMENSION = 3.
+//
+// CubicInterpolator uses cubic Hermite splines to produce a smooth
+// approximation to it that can be used to evaluate the f(x) and f'(x)
+// at any point on the real number line.
+//
+// For more details on cubic interpolation see
+//
+// http://en.wikipedia.org/wiki/Cubic_Hermite_spline
+//
+// Example usage:
+//
+// const double data[] = {1.0, 2.0, 5.0, 6.0};
+// Grid1D<double, 1> grid(data, 0, 4);
+// CubicInterpolator<Grid1D<double, 1>> interpolator(grid);
+// double f, dfdx;
+// interpolator.Evaluator(1.5, &f, &dfdx);
+template <typename Grid>
+class CubicInterpolator {
+ public:
+ explicit CubicInterpolator(const Grid& grid) : grid_(grid) {
+ // The + casts the enum into an int before doing the
+ // comparison. It is needed to prevent
+ // "-Wunnamed-type-template-args" related errors.
+ CHECK_GE(+Grid::DATA_DIMENSION, 1);
+ }
+
+ void Evaluate(double x, double* f, double* dfdx) const {
+ const int n = std::floor(x);
+ Eigen::Matrix<double, Grid::DATA_DIMENSION, 1> p0, p1, p2, p3;
+ grid_.GetValue(n - 1, p0.data());
+ grid_.GetValue(n, p1.data());
+ grid_.GetValue(n + 1, p2.data());
+ grid_.GetValue(n + 2, p3.data());
+ CubicHermiteSpline<Grid::DATA_DIMENSION>(p0, p1, p2, p3, x - n, f, dfdx);
+ }
+
+ // The following two Evaluate overloads are needed for interfacing
+ // with automatic differentiation. The first is for when a scalar
+ // evaluation is done, and the second one is for when Jets are used.
+ void Evaluate(const double& x, double* f) const { Evaluate(x, f, NULL); }
+
+ template <typename JetT>
+ void Evaluate(const JetT& x, JetT* f) const {
+ double fx[Grid::DATA_DIMENSION], dfdx[Grid::DATA_DIMENSION];
+ Evaluate(x.a, fx, dfdx);
+ for (int i = 0; i < Grid::DATA_DIMENSION; ++i) {
+ f[i].a = fx[i];
+ f[i].v = dfdx[i] * x.v;
+ }
+ }
+
+ private:
+ const Grid& grid_;
+};
+
+// An object that implements an infinite one dimensional grid needed
+// by the CubicInterpolator where the source of the function values is
+// an array of type T on the interval
+//
+// [begin, ..., end - 1]
+//
+// Since the input array is finite and the grid is infinite, values
+// outside this interval needs to be computed. Grid1D uses the value
+// from the nearest edge.
+//
+// The function being provided can be vector valued, in which case
+// kDataDimension > 1. The dimensional slices of the function maybe
+// interleaved, or they maybe stacked, i.e, if the function has
+// kDataDimension = 2, if kInterleaved = true, then it is stored as
+//
+// f01, f02, f11, f12 ....
+//
+// and if kInterleaved = false, then it is stored as
+//
+// f01, f11, .. fn1, f02, f12, .. , fn2
+//
+template <typename T, int kDataDimension = 1, bool kInterleaved = true>
+struct Grid1D {
+ public:
+ enum { DATA_DIMENSION = kDataDimension };
+
+ Grid1D(const T* data, const int begin, const int end)
+ : data_(data), begin_(begin), end_(end), num_values_(end - begin) {
+ CHECK_LT(begin, end);
+ }
+
+ EIGEN_STRONG_INLINE void GetValue(const int n, double* f) const {
+ const int idx = std::min(std::max(begin_, n), end_ - 1) - begin_;
+ if (kInterleaved) {
+ for (int i = 0; i < kDataDimension; ++i) {
+ f[i] = static_cast<double>(data_[kDataDimension * idx + i]);
+ }
+ } else {
+ for (int i = 0; i < kDataDimension; ++i) {
+ f[i] = static_cast<double>(data_[i * num_values_ + idx]);
+ }
+ }
+ }
+
+ private:
+ const T* data_;
+ const int begin_;
+ const int end_;
+ const int num_values_;
+};
+
+// Given as input an infinite two dimensional grid like object, which
+// provides the following interface:
+//
+// struct Grid {
+// enum { DATA_DIMENSION = 1 };
+// void GetValue(int row, int col, double* f) const;
+// };
+//
+// Where, GetValue gives us the value of a function f (possibly vector
+// valued) for any pairs of integers (row, col), and the enum
+// DATA_DIMENSION indicates the dimensionality of the function being
+// interpolated. For example if you are interpolating a color image
+// with three channels (Red, Green & Blue), then DATA_DIMENSION = 3.
+//
+// BiCubicInterpolator uses the cubic convolution interpolation
+// algorithm of R. Keys, to produce a smooth approximation to it that
+// can be used to evaluate the f(r,c), df(r, c)/dr and df(r,c)/dc at
+// any point in the real plane.
+//
+// For more details on the algorithm used here see:
+//
+// "Cubic convolution interpolation for digital image processing".
+// Robert G. Keys, IEEE Trans. on Acoustics, Speech, and Signal
+// Processing 29 (6): 1153-1160, 1981.
+//
+// http://en.wikipedia.org/wiki/Cubic_Hermite_spline
+// http://en.wikipedia.org/wiki/Bicubic_interpolation
+//
+// Example usage:
+//
+// const double data[] = {1.0, 3.0, -1.0, 4.0,
+// 3.6, 2.1, 4.2, 2.0,
+// 2.0, 1.0, 3.1, 5.2};
+// Grid2D<double, 1> grid(data, 3, 4);
+// BiCubicInterpolator<Grid2D<double, 1>> interpolator(grid);
+// double f, dfdr, dfdc;
+// interpolator.Evaluate(1.2, 2.5, &f, &dfdr, &dfdc);
+
+template <typename Grid>
+class BiCubicInterpolator {
+ public:
+ explicit BiCubicInterpolator(const Grid& grid) : grid_(grid) {
+ // The + casts the enum into an int before doing the
+ // comparison. It is needed to prevent
+ // "-Wunnamed-type-template-args" related errors.
+ CHECK_GE(+Grid::DATA_DIMENSION, 1);
+ }
+
+ // Evaluate the interpolated function value and/or its
+ // derivative. Uses the nearest point on the grid boundary if r or
+ // c is out of bounds.
+ void Evaluate(
+ double r, double c, double* f, double* dfdr, double* dfdc) const {
+ // BiCubic interpolation requires 16 values around the point being
+ // evaluated. We will use pij, to indicate the elements of the
+ // 4x4 grid of values.
+ //
+ // col
+ // p00 p01 p02 p03
+ // row p10 p11 p12 p13
+ // p20 p21 p22 p23
+ // p30 p31 p32 p33
+ //
+ // The point (r,c) being evaluated is assumed to lie in the square
+ // defined by p11, p12, p22 and p21.
+
+ const int row = std::floor(r);
+ const int col = std::floor(c);
+
+ Eigen::Matrix<double, Grid::DATA_DIMENSION, 1> p0, p1, p2, p3;
+
+ // Interpolate along each of the four rows, evaluating the function
+ // value and the horizontal derivative in each row.
+ Eigen::Matrix<double, Grid::DATA_DIMENSION, 1> f0, f1, f2, f3;
+ Eigen::Matrix<double, Grid::DATA_DIMENSION, 1> df0dc, df1dc, df2dc, df3dc;
+
+ grid_.GetValue(row - 1, col - 1, p0.data());
+ grid_.GetValue(row - 1, col, p1.data());
+ grid_.GetValue(row - 1, col + 1, p2.data());
+ grid_.GetValue(row - 1, col + 2, p3.data());
+ CubicHermiteSpline<Grid::DATA_DIMENSION>(
+ p0, p1, p2, p3, c - col, f0.data(), df0dc.data());
+
+ grid_.GetValue(row, col - 1, p0.data());
+ grid_.GetValue(row, col, p1.data());
+ grid_.GetValue(row, col + 1, p2.data());
+ grid_.GetValue(row, col + 2, p3.data());
+ CubicHermiteSpline<Grid::DATA_DIMENSION>(
+ p0, p1, p2, p3, c - col, f1.data(), df1dc.data());
+
+ grid_.GetValue(row + 1, col - 1, p0.data());
+ grid_.GetValue(row + 1, col, p1.data());
+ grid_.GetValue(row + 1, col + 1, p2.data());
+ grid_.GetValue(row + 1, col + 2, p3.data());
+ CubicHermiteSpline<Grid::DATA_DIMENSION>(
+ p0, p1, p2, p3, c - col, f2.data(), df2dc.data());
+
+ grid_.GetValue(row + 2, col - 1, p0.data());
+ grid_.GetValue(row + 2, col, p1.data());
+ grid_.GetValue(row + 2, col + 1, p2.data());
+ grid_.GetValue(row + 2, col + 2, p3.data());
+ CubicHermiteSpline<Grid::DATA_DIMENSION>(
+ p0, p1, p2, p3, c - col, f3.data(), df3dc.data());
+
+ // Interpolate vertically the interpolated value from each row and
+ // compute the derivative along the columns.
+ CubicHermiteSpline<Grid::DATA_DIMENSION>(f0, f1, f2, f3, r - row, f, dfdr);
+ if (dfdc != NULL) {
+ // Interpolate vertically the derivative along the columns.
+ CubicHermiteSpline<Grid::DATA_DIMENSION>(
+ df0dc, df1dc, df2dc, df3dc, r - row, dfdc, NULL);
+ }
+ }
+
+ // The following two Evaluate overloads are needed for interfacing
+ // with automatic differentiation. The first is for when a scalar
+ // evaluation is done, and the second one is for when Jets are used.
+ void Evaluate(const double& r, const double& c, double* f) const {
+ Evaluate(r, c, f, NULL, NULL);
+ }
+
+ template <typename JetT>
+ void Evaluate(const JetT& r, const JetT& c, JetT* f) const {
+ double frc[Grid::DATA_DIMENSION];
+ double dfdr[Grid::DATA_DIMENSION];
+ double dfdc[Grid::DATA_DIMENSION];
+ Evaluate(r.a, c.a, frc, dfdr, dfdc);
+ for (int i = 0; i < Grid::DATA_DIMENSION; ++i) {
+ f[i].a = frc[i];
+ f[i].v = dfdr[i] * r.v + dfdc[i] * c.v;
+ }
+ }
+
+ private:
+ const Grid& grid_;
+};
+
+// An object that implements an infinite two dimensional grid needed
+// by the BiCubicInterpolator where the source of the function values
+// is an grid of type T on the grid
+//
+// [(row_start, col_start), ..., (row_start, col_end - 1)]
+// [ ... ]
+// [(row_end - 1, col_start), ..., (row_end - 1, col_end - 1)]
+//
+// Since the input grid is finite and the grid is infinite, values
+// outside this interval needs to be computed. Grid2D uses the value
+// from the nearest edge.
+//
+// The function being provided can be vector valued, in which case
+// kDataDimension > 1. The data maybe stored in row or column major
+// format and the various dimensional slices of the function maybe
+// interleaved, or they maybe stacked, i.e, if the function has
+// kDataDimension = 2, is stored in row-major format and if
+// kInterleaved = true, then it is stored as
+//
+// f001, f002, f011, f012, ...
+//
+// A commonly occuring example are color images (RGB) where the three
+// channels are stored interleaved.
+//
+// If kInterleaved = false, then it is stored as
+//
+// f001, f011, ..., fnm1, f002, f012, ...
+template <typename T,
+ int kDataDimension = 1,
+ bool kRowMajor = true,
+ bool kInterleaved = true>
+struct Grid2D {
+ public:
+ enum { DATA_DIMENSION = kDataDimension };
+
+ Grid2D(const T* data,
+ const int row_begin,
+ const int row_end,
+ const int col_begin,
+ const int col_end)
+ : data_(data),
+ row_begin_(row_begin),
+ row_end_(row_end),
+ col_begin_(col_begin),
+ col_end_(col_end),
+ num_rows_(row_end - row_begin),
+ num_cols_(col_end - col_begin),
+ num_values_(num_rows_ * num_cols_) {
+ CHECK_GE(kDataDimension, 1);
+ CHECK_LT(row_begin, row_end);
+ CHECK_LT(col_begin, col_end);
+ }
+
+ EIGEN_STRONG_INLINE void GetValue(const int r, const int c, double* f) const {
+ const int row_idx =
+ std::min(std::max(row_begin_, r), row_end_ - 1) - row_begin_;
+ const int col_idx =
+ std::min(std::max(col_begin_, c), col_end_ - 1) - col_begin_;
+
+ const int n = (kRowMajor) ? num_cols_ * row_idx + col_idx
+ : num_rows_ * col_idx + row_idx;
+
+ if (kInterleaved) {
+ for (int i = 0; i < kDataDimension; ++i) {
+ f[i] = static_cast<double>(data_[kDataDimension * n + i]);
+ }
+ } else {
+ for (int i = 0; i < kDataDimension; ++i) {
+ f[i] = static_cast<double>(data_[i * num_values_ + n]);
+ }
+ }
+ }
+
+ private:
+ const T* data_;
+ const int row_begin_;
+ const int row_end_;
+ const int col_begin_;
+ const int col_end_;
+ const int num_rows_;
+ const int num_cols_;
+ const int num_values_;
+};
+
+} // namespace ceres
+
+#endif // CERES_PUBLIC_CUBIC_INTERPOLATOR_H_
diff --git a/extern/ceres/include/ceres/dynamic_autodiff_cost_function.h b/extern/ceres/include/ceres/dynamic_autodiff_cost_function.h
index e6d26111f18..7b75150b5ce 100644
--- a/extern/ceres/include/ceres/dynamic_autodiff_cost_function.h
+++ b/extern/ceres/include/ceres/dynamic_autodiff_cost_function.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -28,7 +28,22 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
// mierle@gmail.com (Keir Mierle)
-//
+
+#ifndef CERES_PUBLIC_DYNAMIC_AUTODIFF_COST_FUNCTION_H_
+#define CERES_PUBLIC_DYNAMIC_AUTODIFF_COST_FUNCTION_H_
+
+#include <cmath>
+#include <memory>
+#include <numeric>
+#include <vector>
+
+#include "ceres/dynamic_cost_function.h"
+#include "ceres/internal/fixed_array.h"
+#include "ceres/jet.h"
+#include "glog/logging.h"
+
+namespace ceres {
+
// This autodiff implementation differs from the one found in
// autodiff_cost_function.h by supporting autodiff on cost functions
// with variable numbers of parameters with variable sizes. With the
@@ -43,7 +58,7 @@
// bool operator()(T const* const* parameters, T* residuals) const {
// // Use parameters[i] to access the i'th parameter block.
// }
-// }
+// };
//
// Since the sizing of the parameters is done at runtime, you must
// also specify the sizes after creating the dynamic autodiff cost
@@ -60,40 +75,17 @@
// default, controlled by the Stride template parameter) with each
// pass. There is a tradeoff with the size of the passes; you may want
// to experiment with the stride.
-
-#ifndef CERES_PUBLIC_DYNAMIC_AUTODIFF_COST_FUNCTION_H_
-#define CERES_PUBLIC_DYNAMIC_AUTODIFF_COST_FUNCTION_H_
-
-#include <cmath>
-#include <numeric>
-#include <vector>
-
-#include "ceres/cost_function.h"
-#include "ceres/internal/scoped_ptr.h"
-#include "ceres/jet.h"
-#include "glog/logging.h"
-
-namespace ceres {
-
template <typename CostFunctor, int Stride = 4>
-class DynamicAutoDiffCostFunction : public CostFunction {
+class DynamicAutoDiffCostFunction : public DynamicCostFunction {
public:
explicit DynamicAutoDiffCostFunction(CostFunctor* functor)
- : functor_(functor) {}
+ : functor_(functor) {}
virtual ~DynamicAutoDiffCostFunction() {}
- void AddParameterBlock(int size) {
- mutable_parameter_block_sizes()->push_back(size);
- }
-
- void SetNumResiduals(int num_residuals) {
- set_num_residuals(num_residuals);
- }
-
- virtual bool Evaluate(double const* const* parameters,
- double* residuals,
- double** jacobians) const {
+ bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const override {
CHECK_GT(num_residuals(), 0)
<< "You must call DynamicAutoDiffCostFunction::SetNumResiduals() "
<< "before DynamicAutoDiffCostFunction::Evaluate().";
@@ -112,20 +104,23 @@ class DynamicAutoDiffCostFunction : public CostFunction {
// depends on.
//
// To work around this issue, the solution here is to evaluate the
- // jacobians in a series of passes, each one computing Stripe *
+ // jacobians in a series of passes, each one computing Stride *
// num_residuals() derivatives. This is done with small, fixed-size jets.
- const int num_parameter_blocks = parameter_block_sizes().size();
- const int num_parameters = std::accumulate(parameter_block_sizes().begin(),
- parameter_block_sizes().end(),
- 0);
+ const int num_parameter_blocks =
+ static_cast<int>(parameter_block_sizes().size());
+ const int num_parameters = std::accumulate(
+ parameter_block_sizes().begin(), parameter_block_sizes().end(), 0);
// Allocate scratch space for the strided evaluation.
- std::vector<Jet<double, Stride> > input_jets(num_parameters);
- std::vector<Jet<double, Stride> > output_jets(num_residuals());
+ using JetT = Jet<double, Stride>;
+ internal::FixedArray<JetT, (256 * 7) / sizeof(JetT)> input_jets(
+ num_parameters);
+ internal::FixedArray<JetT, (256 * 7) / sizeof(JetT)> output_jets(
+ num_residuals());
// Make the parameter pack that is sent to the functor (reused).
- std::vector<Jet<double, Stride>* > jet_parameters(num_parameter_blocks,
- static_cast<Jet<double, Stride>* >(NULL));
+ internal::FixedArray<Jet<double, Stride>*> jet_parameters(
+ num_parameter_blocks, nullptr);
int num_active_parameters = 0;
// To handle constant parameters between non-constant parameter blocks, the
@@ -172,8 +167,8 @@ class DynamicAutoDiffCostFunction : public CostFunction {
// Evaluate all of the strides. Each stride is a chunk of the derivative to
// evaluate, typically some size proportional to the size of the SIMD
// registers of the CPU.
- int num_strides = static_cast<int>(ceil(num_active_parameters /
- static_cast<float>(Stride)));
+ int num_strides = static_cast<int>(
+ ceil(num_active_parameters / static_cast<float>(Stride)));
int current_derivative_section = 0;
int current_derivative_section_cursor = 0;
@@ -183,7 +178,7 @@ class DynamicAutoDiffCostFunction : public CostFunction {
// non-constant #Stride parameters.
const int initial_derivative_section = current_derivative_section;
const int initial_derivative_section_cursor =
- current_derivative_section_cursor;
+ current_derivative_section_cursor;
int active_parameter_count = 0;
parameter_cursor = 0;
@@ -193,9 +188,9 @@ class DynamicAutoDiffCostFunction : public CostFunction {
++j, parameter_cursor++) {
input_jets[parameter_cursor].v.setZero();
if (active_parameter_count < Stride &&
- parameter_cursor >= (
- start_derivative_section[current_derivative_section] +
- current_derivative_section_cursor)) {
+ parameter_cursor >=
+ (start_derivative_section[current_derivative_section] +
+ current_derivative_section_cursor)) {
if (jacobians[i] != NULL) {
input_jets[parameter_cursor].v[active_parameter_count] = 1.0;
++active_parameter_count;
@@ -222,9 +217,9 @@ class DynamicAutoDiffCostFunction : public CostFunction {
for (int j = 0; j < parameter_block_sizes()[i];
++j, parameter_cursor++) {
if (active_parameter_count < Stride &&
- parameter_cursor >= (
- start_derivative_section[current_derivative_section] +
- current_derivative_section_cursor)) {
+ parameter_cursor >=
+ (start_derivative_section[current_derivative_section] +
+ current_derivative_section_cursor)) {
if (jacobians[i] != NULL) {
for (int k = 0; k < num_residuals(); ++k) {
jacobians[i][k * parameter_block_sizes()[i] + j] =
@@ -252,7 +247,7 @@ class DynamicAutoDiffCostFunction : public CostFunction {
}
private:
- internal::scoped_ptr<CostFunctor> functor_;
+ std::unique_ptr<CostFunctor> functor_;
};
} // namespace ceres
diff --git a/extern/ceres/include/ceres/dynamic_cost_function.h b/extern/ceres/include/ceres/dynamic_cost_function.h
new file mode 100644
index 00000000000..6e8a076ecd0
--- /dev/null
+++ b/extern/ceres/include/ceres/dynamic_cost_function.h
@@ -0,0 +1,56 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2019 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_PUBLIC_DYNAMIC_COST_FUNCTION_H_
+#define CERES_PUBLIC_DYNAMIC_COST_FUNCTION_H_
+
+#include "ceres/cost_function.h"
+
+namespace ceres {
+
+// A common base class for DynamicAutoDiffCostFunction and
+// DynamicNumericDiffCostFunction which depend on methods that can add
+// parameter blocks and set the number of residuals at run time.
+class CERES_EXPORT DynamicCostFunction : public CostFunction {
+ public:
+ ~DynamicCostFunction() {}
+
+ virtual void AddParameterBlock(int size) {
+ mutable_parameter_block_sizes()->push_back(size);
+ }
+
+ virtual void SetNumResiduals(int num_residuals) {
+ set_num_residuals(num_residuals);
+ }
+};
+
+} // namespace ceres
+
+#endif // CERES_PUBLIC_DYNAMIC_COST_FUNCTION_H_
diff --git a/extern/ceres/include/ceres/dynamic_cost_function_to_functor.h b/extern/ceres/include/ceres/dynamic_cost_function_to_functor.h
index 9339a503ea0..8d174d8ecc2 100644
--- a/extern/ceres/include/ceres/dynamic_cost_function_to_functor.h
+++ b/extern/ceres/include/ceres/dynamic_cost_function_to_functor.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -28,7 +28,20 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
// dgossow@google.com (David Gossow)
-//
+
+#ifndef CERES_PUBLIC_DYNAMIC_COST_FUNCTION_TO_FUNCTOR_H_
+#define CERES_PUBLIC_DYNAMIC_COST_FUNCTION_TO_FUNCTOR_H_
+
+#include <memory>
+#include <numeric>
+#include <vector>
+
+#include "ceres/dynamic_cost_function.h"
+#include "ceres/internal/fixed_array.h"
+#include "ceres/internal/port.h"
+
+namespace ceres {
+
// DynamicCostFunctionToFunctor allows users to use CostFunction
// objects in templated functors which are to be used for automatic
// differentiation. It works similar to CostFunctionToFunctor, with the
@@ -40,9 +53,9 @@
// class IntrinsicProjection : public CostFunction {
// public:
// IntrinsicProjection(const double* observation);
-// virtual bool Evaluate(double const* const* parameters,
-// double* residuals,
-// double** jacobians) const;
+// bool Evaluate(double const* const* parameters,
+// double* residuals,
+// double** jacobians) const override;
// };
//
// is a cost function that implements the projection of a point in its
@@ -87,26 +100,12 @@
// private:
// DynamicCostFunctionToFunctor intrinsic_projection_;
// };
-
-#ifndef CERES_PUBLIC_DYNAMIC_COST_FUNCTION_TO_FUNCTOR_H_
-#define CERES_PUBLIC_DYNAMIC_COST_FUNCTION_TO_FUNCTOR_H_
-
-#include <numeric>
-#include <vector>
-
-#include "ceres/cost_function.h"
-#include "ceres/internal/fixed_array.h"
-#include "ceres/internal/port.h"
-#include "ceres/internal/scoped_ptr.h"
-
-namespace ceres {
-
class DynamicCostFunctionToFunctor {
public:
// Takes ownership of cost_function.
explicit DynamicCostFunctionToFunctor(CostFunction* cost_function)
: cost_function_(cost_function) {
- CHECK_NOTNULL(cost_function);
+ CHECK(cost_function != nullptr);
}
bool operator()(double const* const* parameters, double* residuals) const {
@@ -115,12 +114,13 @@ class DynamicCostFunctionToFunctor {
template <typename JetT>
bool operator()(JetT const* const* inputs, JetT* output) const {
- const std::vector<int32>& parameter_block_sizes =
+ const std::vector<int32_t>& parameter_block_sizes =
cost_function_->parameter_block_sizes();
- const int num_parameter_blocks = parameter_block_sizes.size();
+ const int num_parameter_blocks =
+ static_cast<int>(parameter_block_sizes.size());
const int num_residuals = cost_function_->num_residuals();
- const int num_parameters = std::accumulate(parameter_block_sizes.begin(),
- parameter_block_sizes.end(), 0);
+ const int num_parameters = std::accumulate(
+ parameter_block_sizes.begin(), parameter_block_sizes.end(), 0);
internal::FixedArray<double> parameters(num_parameters);
internal::FixedArray<double*> parameter_blocks(num_parameter_blocks);
@@ -130,8 +130,8 @@ class DynamicCostFunctionToFunctor {
// Build a set of arrays to get the residuals and jacobians from
// the CostFunction wrapped by this functor.
- double* parameter_ptr = parameters.get();
- double* jacobian_ptr = jacobians.get();
+ double* parameter_ptr = parameters.data();
+ double* jacobian_ptr = jacobians.data();
for (int i = 0; i < num_parameter_blocks; ++i) {
parameter_blocks[i] = parameter_ptr;
jacobian_blocks[i] = jacobian_ptr;
@@ -141,9 +141,9 @@ class DynamicCostFunctionToFunctor {
jacobian_ptr += num_residuals * parameter_block_sizes[i];
}
- if (!cost_function_->Evaluate(parameter_blocks.get(),
- residuals.get(),
- jacobian_blocks.get())) {
+ if (!cost_function_->Evaluate(parameter_blocks.data(),
+ residuals.data(),
+ jacobian_blocks.data())) {
return false;
}
@@ -170,7 +170,7 @@ class DynamicCostFunctionToFunctor {
output[i].v.setZero();
for (int j = 0; j < num_parameter_blocks; ++j) {
- const int32 block_size = parameter_block_sizes[j];
+ const int32_t block_size = parameter_block_sizes[j];
for (int k = 0; k < parameter_block_sizes[j]; ++k) {
output[i].v +=
jacobian_blocks[j][i * block_size + k] * inputs[j][k].v;
@@ -182,7 +182,7 @@ class DynamicCostFunctionToFunctor {
}
private:
- internal::scoped_ptr<CostFunction> cost_function_;
+ std::unique_ptr<CostFunction> cost_function_;
};
} // namespace ceres
diff --git a/extern/ceres/include/ceres/dynamic_numeric_diff_cost_function.h b/extern/ceres/include/ceres/dynamic_numeric_diff_cost_function.h
index 5770946a115..119b3f85e8e 100644
--- a/extern/ceres/include/ceres/dynamic_numeric_diff_cost_function.h
+++ b/extern/ceres/include/ceres/dynamic_numeric_diff_cost_function.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -30,7 +30,24 @@
// sameeragarwal@google.com (Sameer Agarwal)
// thadh@gmail.com (Thad Hughes)
// tbennun@gmail.com (Tal Ben-Nun)
-//
+
+#ifndef CERES_PUBLIC_DYNAMIC_NUMERIC_DIFF_COST_FUNCTION_H_
+#define CERES_PUBLIC_DYNAMIC_NUMERIC_DIFF_COST_FUNCTION_H_
+
+#include <cmath>
+#include <memory>
+#include <numeric>
+#include <vector>
+
+#include "ceres/dynamic_cost_function.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/numeric_diff.h"
+#include "ceres/internal/parameter_dims.h"
+#include "ceres/numeric_diff_options.h"
+#include "glog/logging.h"
+
+namespace ceres {
+
// This numeric diff implementation differs from the one found in
// numeric_diff_cost_function.h by supporting numericdiff on cost
// functions with variable numbers of parameters with variable
@@ -42,7 +59,9 @@
// numeric diff; the expected interface for the cost functors is:
//
// struct MyCostFunctor {
-// bool operator()(double const* const* parameters, double* residuals) const {
+// bool operator()(double const*
+// const* parameters,
+// double* residuals) const {
// // Use parameters[i] to access the i'th parameter block.
// }
// }
@@ -56,34 +75,14 @@
// cost_function.AddParameterBlock(5);
// cost_function.AddParameterBlock(10);
// cost_function.SetNumResiduals(21);
-
-#ifndef CERES_PUBLIC_DYNAMIC_NUMERIC_DIFF_COST_FUNCTION_H_
-#define CERES_PUBLIC_DYNAMIC_NUMERIC_DIFF_COST_FUNCTION_H_
-
-#include <cmath>
-#include <numeric>
-#include <vector>
-
-#include "ceres/cost_function.h"
-#include "ceres/internal/scoped_ptr.h"
-#include "ceres/internal/eigen.h"
-#include "ceres/internal/numeric_diff.h"
-#include "ceres/numeric_diff_options.h"
-#include "glog/logging.h"
-
-namespace ceres {
-
template <typename CostFunctor, NumericDiffMethodType method = CENTRAL>
-class DynamicNumericDiffCostFunction : public CostFunction {
+class DynamicNumericDiffCostFunction : public DynamicCostFunction {
public:
explicit DynamicNumericDiffCostFunction(
const CostFunctor* functor,
Ownership ownership = TAKE_OWNERSHIP,
const NumericDiffOptions& options = NumericDiffOptions())
- : functor_(functor),
- ownership_(ownership),
- options_(options) {
- }
+ : functor_(functor), ownership_(ownership), options_(options) {}
virtual ~DynamicNumericDiffCostFunction() {
if (ownership_ != TAKE_OWNERSHIP) {
@@ -91,28 +90,22 @@ class DynamicNumericDiffCostFunction : public CostFunction {
}
}
- void AddParameterBlock(int size) {
- mutable_parameter_block_sizes()->push_back(size);
- }
-
- void SetNumResiduals(int num_residuals) {
- set_num_residuals(num_residuals);
- }
-
- virtual bool Evaluate(double const* const* parameters,
- double* residuals,
- double** jacobians) const {
+ bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const override {
using internal::NumericDiff;
CHECK_GT(num_residuals(), 0)
<< "You must call DynamicNumericDiffCostFunction::SetNumResiduals() "
<< "before DynamicNumericDiffCostFunction::Evaluate().";
- const std::vector<int32>& block_sizes = parameter_block_sizes();
+ const std::vector<int32_t>& block_sizes = parameter_block_sizes();
CHECK(!block_sizes.empty())
<< "You must call DynamicNumericDiffCostFunction::AddParameterBlock() "
<< "before DynamicNumericDiffCostFunction::Evaluate().";
- const bool status = EvaluateCostFunctor(parameters, residuals);
+ const bool status =
+ internal::VariadicEvaluate<internal::DynamicParameterDims>(
+ *functor_.get(), parameters, residuals);
if (jacobians == NULL || !status) {
return status;
}
@@ -123,8 +116,8 @@ class DynamicNumericDiffCostFunction : public CostFunction {
std::vector<double*> parameters_references_copy(block_sizes.size());
parameters_references_copy[0] = &parameters_copy[0];
for (size_t block = 1; block < block_sizes.size(); ++block) {
- parameters_references_copy[block] = parameters_references_copy[block - 1]
- + block_sizes[block - 1];
+ parameters_references_copy[block] =
+ parameters_references_copy[block - 1] + block_sizes[block - 1];
}
// Copy the parameters into the local temp space.
@@ -136,18 +129,20 @@ class DynamicNumericDiffCostFunction : public CostFunction {
for (size_t block = 0; block < block_sizes.size(); ++block) {
if (jacobians[block] != NULL &&
- !NumericDiff<CostFunctor, method, DYNAMIC,
- DYNAMIC, DYNAMIC, DYNAMIC, DYNAMIC, DYNAMIC,
- DYNAMIC, DYNAMIC, DYNAMIC, DYNAMIC, DYNAMIC,
- DYNAMIC, DYNAMIC>::EvaluateJacobianForParameterBlock(
- functor_.get(),
- residuals,
- options_,
- this->num_residuals(),
- block,
- block_sizes[block],
- &parameters_references_copy[0],
- jacobians[block])) {
+ !NumericDiff<CostFunctor,
+ method,
+ ceres::DYNAMIC,
+ internal::DynamicParameterDims,
+ ceres::DYNAMIC,
+ ceres::DYNAMIC>::
+ EvaluateJacobianForParameterBlock(functor_.get(),
+ residuals,
+ options_,
+ this->num_residuals(),
+ block,
+ block_sizes[block],
+ &parameters_references_copy[0],
+ jacobians[block])) {
return false;
}
}
@@ -155,31 +150,7 @@ class DynamicNumericDiffCostFunction : public CostFunction {
}
private:
- bool EvaluateCostFunctor(double const* const* parameters,
- double* residuals) const {
- return EvaluateCostFunctorImpl(functor_.get(),
- parameters,
- residuals,
- functor_.get());
- }
-
- // Helper templates to allow evaluation of a functor or a
- // CostFunction.
- bool EvaluateCostFunctorImpl(const CostFunctor* functor,
- double const* const* parameters,
- double* residuals,
- const void* /* NOT USED */) const {
- return (*functor)(parameters, residuals);
- }
-
- bool EvaluateCostFunctorImpl(const CostFunctor* functor,
- double const* const* parameters,
- double* residuals,
- const CostFunction* /* NOT USED */) const {
- return functor->Evaluate(parameters, residuals, NULL);
- }
-
- internal::scoped_ptr<const CostFunctor> functor_;
+ std::unique_ptr<const CostFunctor> functor_;
Ownership ownership_;
NumericDiffOptions options_;
};
diff --git a/extern/ceres/include/ceres/evaluation_callback.h b/extern/ceres/include/ceres/evaluation_callback.h
new file mode 100644
index 00000000000..b9f5bbb5194
--- /dev/null
+++ b/extern/ceres/include/ceres/evaluation_callback.h
@@ -0,0 +1,80 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2019 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: mierle@gmail.com (Keir Mierle)
+
+#ifndef CERES_PUBLIC_EVALUATION_CALLBACK_H_
+#define CERES_PUBLIC_EVALUATION_CALLBACK_H_
+
+#include "ceres/internal/port.h"
+
+namespace ceres {
+
+// Using this callback interface, Ceres can notify you when it is
+// about to evaluate the residuals or jacobians. With the callback,
+// you can share computation between residual blocks by doing the
+// shared computation in PrepareForEvaluation() before Ceres calls
+// CostFunction::Evaluate(). It also enables caching results between a
+// pure residual evaluation and a residual & jacobian evaluation, via
+// the new_evaluation_point argument.
+//
+// One use case for this callback is if the cost function compute is
+// moved to the GPU. In that case, the prepare call does the actual
+// cost function evaluation, and subsequent calls from Ceres to the
+// actual cost functions merely copy the results from the GPU onto the
+// corresponding blocks for Ceres to plug into the solver.
+//
+// NOTE: Ceres provides no mechanism to share data other than the
+// notification from the callback. Users must provide access to
+// pre-computed shared data to their cost functions behind the scenes;
+// this all happens without Ceres knowing.
+//
+// One approach is to put a pointer to the shared data in each cost
+// function (recommended) or to use a global shared variable
+// (discouraged; bug-prone). As far as Ceres is concerned, it is
+// evaluating cost functions like any other; it just so happens that
+// behind the scenes the cost functions reuse pre-computed data to
+// execute faster.
+class CERES_EXPORT EvaluationCallback {
+ public:
+ virtual ~EvaluationCallback() {}
+
+ // Called before Ceres requests residuals or jacobians for a given setting of
+ // the parameters. User parameters (the double* values provided to the cost
+ // functions) are fixed until the next call to PrepareForEvaluation(). If
+ // new_evaluation_point == true, then this is a new point that is different
+ // from the last evaluated point. Otherwise, it is the same point that was
+ // evaluated previously (either jacobian or residual) and the user can use
+ // cached results from previous evaluations.
+ virtual void PrepareForEvaluation(bool evaluate_jacobians,
+ bool new_evaluation_point) = 0;
+};
+
+} // namespace ceres
+
+#endif // CERES_PUBLIC_EVALUATION_CALLBACK_H_
diff --git a/extern/ceres/include/ceres/first_order_function.h b/extern/ceres/include/ceres/first_order_function.h
new file mode 100644
index 00000000000..1420153b2aa
--- /dev/null
+++ b/extern/ceres/include/ceres/first_order_function.h
@@ -0,0 +1,54 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2019 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_PUBLIC_FIRST_ORDER_FUNCTION_H_
+#define CERES_PUBLIC_FIRST_ORDER_FUNCTION_H_
+
+#include "ceres/internal/port.h"
+
+namespace ceres {
+
+// A FirstOrderFunction object implements the evaluation of a function
+// and its gradient.
+class CERES_EXPORT FirstOrderFunction {
+ public:
+ virtual ~FirstOrderFunction() {}
+
+ // cost is never null. gradient may be null. The return value
+ // indicates whether the evaluation was successful or not.
+ virtual bool Evaluate(const double* const parameters,
+ double* cost,
+ double* gradient) const = 0;
+ virtual int NumParameters() const = 0;
+};
+
+} // namespace ceres
+
+#endif // CERES_PUBLIC_FIRST_ORDER_FUNCTION_H_
diff --git a/extern/ceres/include/ceres/gradient_checker.h b/extern/ceres/include/ceres/gradient_checker.h
index 6d285daf1d9..b79cf86b314 100644
--- a/extern/ceres/include/ceres/gradient_checker.h
+++ b/extern/ceres/include/ceres/gradient_checker.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -34,15 +34,14 @@
#ifndef CERES_PUBLIC_GRADIENT_CHECKER_H_
#define CERES_PUBLIC_GRADIENT_CHECKER_H_
-#include <vector>
+#include <memory>
#include <string>
+#include <vector>
#include "ceres/cost_function.h"
#include "ceres/dynamic_numeric_diff_cost_function.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/fixed_array.h"
-#include "ceres/internal/macros.h"
-#include "ceres/internal/scoped_ptr.h"
#include "ceres/local_parameterization.h"
#include "glog/logging.h"
@@ -64,13 +63,13 @@ namespace ceres {
//
// How to use: Fill in an array of pointers to parameter blocks for your
// CostFunction, and then call Probe(). Check that the return value is 'true'.
-class GradientChecker {
+class CERES_EXPORT GradientChecker {
public:
// This will not take ownership of the cost function or local
// parameterizations.
//
// function: The cost function to probe.
- // local_parameterization: A vector of local parameterizations for each
+ // local_parameterizations: A vector of local parameterizations for each
// parameter. May be NULL or contain NULL pointers to indicate that the
// respective parameter does not have a local parameterization.
// options: Options to use for numerical differentiation.
@@ -80,7 +79,7 @@ class GradientChecker {
const NumericDiffOptions& options);
// Contains results from a call to Probe for later inspection.
- struct ProbeResults {
+ struct CERES_EXPORT ProbeResults {
// The return value of the cost function.
bool return_value;
@@ -100,10 +99,10 @@ class GradientChecker {
// Derivatives as computed by the cost function in local space.
std::vector<Matrix> local_jacobians;
- // Derivatives as computed by nuerical differentiation in local space.
+ // Derivatives as computed by numerical differentiation in local space.
std::vector<Matrix> numeric_jacobians;
- // Derivatives as computed by nuerical differentiation in local space.
+ // Derivatives as computed by numerical differentiation in local space.
std::vector<Matrix> local_numeric_jacobians;
// Contains the maximum relative error found in the local Jacobians.
@@ -137,11 +136,13 @@ class GradientChecker {
ProbeResults* results) const;
private:
- CERES_DISALLOW_IMPLICIT_CONSTRUCTORS(GradientChecker);
+ GradientChecker() = delete;
+ GradientChecker(const GradientChecker&) = delete;
+ void operator=(const GradientChecker&) = delete;
std::vector<const LocalParameterization*> local_parameterizations_;
const CostFunction* function_;
- internal::scoped_ptr<CostFunction> finite_diff_cost_function_;
+ std::unique_ptr<CostFunction> finite_diff_cost_function_;
};
} // namespace ceres
diff --git a/extern/ceres/include/ceres/gradient_problem.h b/extern/ceres/include/ceres/gradient_problem.h
index 1226a4cd895..49d605ea2d6 100644
--- a/extern/ceres/include/ceres/gradient_problem.h
+++ b/extern/ceres/include/ceres/gradient_problem.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -31,9 +31,10 @@
#ifndef CERES_PUBLIC_GRADIENT_PROBLEM_H_
#define CERES_PUBLIC_GRADIENT_PROBLEM_H_
-#include "ceres/internal/macros.h"
+#include <memory>
+
+#include "ceres/first_order_function.h"
#include "ceres/internal/port.h"
-#include "ceres/internal/scoped_ptr.h"
#include "ceres/local_parameterization.h"
namespace ceres {
@@ -105,21 +106,9 @@ class CERES_EXPORT GradientProblem {
bool Plus(const double* x, const double* delta, double* x_plus_delta) const;
private:
- internal::scoped_ptr<FirstOrderFunction> function_;
- internal::scoped_ptr<LocalParameterization> parameterization_;
- internal::scoped_array<double> scratch_;
-};
-
-// A FirstOrderFunction object implements the evaluation of a function
-// and its gradient.
-class CERES_EXPORT FirstOrderFunction {
- public:
- virtual ~FirstOrderFunction() {}
- // cost is never NULL. gradient may be null.
- virtual bool Evaluate(const double* const parameters,
- double* cost,
- double* gradient) const = 0;
- virtual int NumParameters() const = 0;
+ std::unique_ptr<FirstOrderFunction> function_;
+ std::unique_ptr<LocalParameterization> parameterization_;
+ std::unique_ptr<double[]> scratch_;
};
} // namespace ceres
diff --git a/extern/ceres/include/ceres/gradient_problem_solver.h b/extern/ceres/include/ceres/gradient_problem_solver.h
index a7d0121ea0c..181699d8fd4 100644
--- a/extern/ceres/include/ceres/gradient_problem_solver.h
+++ b/extern/ceres/include/ceres/gradient_problem_solver.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -34,11 +34,11 @@
#include <cmath>
#include <string>
#include <vector>
-#include "ceres/internal/macros.h"
+
+#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/port.h"
#include "ceres/iteration_callback.h"
#include "ceres/types.h"
-#include "ceres/internal/disable_warnings.h"
namespace ceres {
@@ -54,39 +54,15 @@ class CERES_EXPORT GradientProblemSolver {
//
// The constants are defined inside types.h
struct CERES_EXPORT Options {
- // Default constructor that sets up a generic sparse problem.
- Options() {
- line_search_direction_type = LBFGS;
- line_search_type = WOLFE;
- nonlinear_conjugate_gradient_type = FLETCHER_REEVES;
- max_lbfgs_rank = 20;
- use_approximate_eigenvalue_bfgs_scaling = false;
- line_search_interpolation_type = CUBIC;
- min_line_search_step_size = 1e-9;
- line_search_sufficient_function_decrease = 1e-4;
- max_line_search_step_contraction = 1e-3;
- min_line_search_step_contraction = 0.6;
- max_num_line_search_step_size_iterations = 20;
- max_num_line_search_direction_restarts = 5;
- line_search_sufficient_curvature_decrease = 0.9;
- max_line_search_step_expansion = 10.0;
- max_num_iterations = 50;
- max_solver_time_in_seconds = 1e9;
- function_tolerance = 1e-6;
- gradient_tolerance = 1e-10;
- logging_type = PER_MINIMIZER_ITERATION;
- minimizer_progress_to_stdout = false;
- }
-
// Returns true if the options struct has a valid
// configuration. Returns false otherwise, and fills in *error
// with a message describing the problem.
bool IsValid(std::string* error) const;
// Minimizer options ----------------------------------------
- LineSearchDirectionType line_search_direction_type;
- LineSearchType line_search_type;
- NonlinearConjugateGradientType nonlinear_conjugate_gradient_type;
+ LineSearchDirectionType line_search_direction_type = LBFGS;
+ LineSearchType line_search_type = WOLFE;
+ NonlinearConjugateGradientType nonlinear_conjugate_gradient_type = FLETCHER_REEVES;
// The LBFGS hessian approximation is a low rank approximation to
// the inverse of the Hessian matrix. The rank of the
@@ -111,8 +87,8 @@ class CERES_EXPORT GradientProblemSolver {
// method, please see:
//
// Nocedal, J. (1980). "Updating Quasi-Newton Matrices with
- // Limited Storage". Mathematics of Computation 35 (151): 773–782.
- int max_lbfgs_rank;
+ // Limited Storage". Mathematics of Computation 35 (151): 773-782.
+ int max_lbfgs_rank = 20;
// As part of the (L)BFGS update step (BFGS) / right-multiply step (L-BFGS),
// the initial inverse Hessian approximation is taken to be the Identity.
@@ -134,18 +110,18 @@ class CERES_EXPORT GradientProblemSolver {
// Oren S.S., Self-scaling variable metric (SSVM) algorithms
// Part II: Implementation and experiments, Management Science,
// 20(5), 863-874, 1974.
- bool use_approximate_eigenvalue_bfgs_scaling;
+ bool use_approximate_eigenvalue_bfgs_scaling = false;
// Degree of the polynomial used to approximate the objective
// function. Valid values are BISECTION, QUADRATIC and CUBIC.
//
// BISECTION corresponds to pure backtracking search with no
// interpolation.
- LineSearchInterpolationType line_search_interpolation_type;
+ LineSearchInterpolationType line_search_interpolation_type = CUBIC;
// If during the line search, the step_size falls below this
// value, it is truncated to zero.
- double min_line_search_step_size;
+ double min_line_search_step_size = 1e-9;
// Line search parameters.
@@ -159,7 +135,7 @@ class CERES_EXPORT GradientProblemSolver {
//
// f(step_size) <= f(0) + sufficient_decrease * f'(0) * step_size
//
- double line_search_sufficient_function_decrease;
+ double line_search_sufficient_function_decrease = 1e-4;
// In each iteration of the line search,
//
@@ -169,7 +145,7 @@ class CERES_EXPORT GradientProblemSolver {
//
// 0 < max_step_contraction < min_step_contraction < 1
//
- double max_line_search_step_contraction;
+ double max_line_search_step_contraction = 1e-3;
// In each iteration of the line search,
//
@@ -179,19 +155,19 @@ class CERES_EXPORT GradientProblemSolver {
//
// 0 < max_step_contraction < min_step_contraction < 1
//
- double min_line_search_step_contraction;
+ double min_line_search_step_contraction = 0.6;
// Maximum number of trial step size iterations during each line search,
// if a step size satisfying the search conditions cannot be found within
// this number of trials, the line search will terminate.
- int max_num_line_search_step_size_iterations;
+ int max_num_line_search_step_size_iterations = 20;
// Maximum number of restarts of the line search direction algorithm before
// terminating the optimization. Restarts of the line search direction
// algorithm occur when the current algorithm fails to produce a new descent
// direction. This typically indicates a numerical failure, or a breakdown
// in the validity of the approximations used.
- int max_num_line_search_direction_restarts;
+ int max_num_line_search_direction_restarts = 5;
// The strong Wolfe conditions consist of the Armijo sufficient
// decrease condition, and an additional requirement that the
@@ -204,7 +180,7 @@ class CERES_EXPORT GradientProblemSolver {
//
// Where f() is the line search objective and f'() is the derivative
// of f w.r.t step_size (d f / d step_size).
- double line_search_sufficient_curvature_decrease;
+ double line_search_sufficient_curvature_decrease = 0.9;
// During the bracketing phase of the Wolfe search, the step size is
// increased until either a point satisfying the Wolfe conditions is
@@ -215,36 +191,49 @@ class CERES_EXPORT GradientProblemSolver {
// new_step_size <= max_step_expansion * step_size.
//
// By definition for expansion, max_step_expansion > 1.0.
- double max_line_search_step_expansion;
+ double max_line_search_step_expansion = 10.0;
// Maximum number of iterations for the minimizer to run for.
- int max_num_iterations;
+ int max_num_iterations = 50;
// Maximum time for which the minimizer should run for.
- double max_solver_time_in_seconds;
+ double max_solver_time_in_seconds = 1e9;
// Minimizer terminates when
//
// (new_cost - old_cost) < function_tolerance * old_cost;
//
- double function_tolerance;
+ double function_tolerance = 1e-6;
// Minimizer terminates when
//
// max_i |x - Project(Plus(x, -g(x))| < gradient_tolerance
//
// This value should typically be 1e-4 * function_tolerance.
- double gradient_tolerance;
+ double gradient_tolerance = 1e-10;
+
+ // Minimizer terminates when
+ //
+ // |step|_2 <= parameter_tolerance * ( |x|_2 + parameter_tolerance)
+ //
+ double parameter_tolerance = 1e-8;
// Logging options ---------------------------------------------------------
- LoggingType logging_type;
+ LoggingType logging_type = PER_MINIMIZER_ITERATION;
// By default the Minimizer progress is logged to VLOG(1), which
// is sent to STDERR depending on the vlog level. If this flag is
// set to true, and logging_type is not SILENT, the logging output
// is sent to STDOUT.
- bool minimizer_progress_to_stdout;
+ bool minimizer_progress_to_stdout = false;
+
+ // If true, the user's parameter blocks are updated at the end of
+ // every Minimizer iteration, otherwise they are updated when the
+ // Minimizer terminates. This is useful if, for example, the user
+ // wishes to visualize the state of the optimization every
+ // iteration.
+ bool update_state_every_iteration = false;
// Callbacks that are executed at the end of each iteration of the
// Minimizer. An iteration may terminate midway, either due to
@@ -265,8 +254,6 @@ class CERES_EXPORT GradientProblemSolver {
};
struct CERES_EXPORT Summary {
- Summary();
-
// A brief one line description of the state of the solver after
// termination.
std::string BriefReport() const;
@@ -278,65 +265,72 @@ class CERES_EXPORT GradientProblemSolver {
bool IsSolutionUsable() const;
// Minimizer summary -------------------------------------------------
- TerminationType termination_type;
+ TerminationType termination_type = FAILURE;
// Reason why the solver terminated.
- std::string message;
+ std::string message = "ceres::GradientProblemSolve was not called.";
// Cost of the problem (value of the objective function) before
// the optimization.
- double initial_cost;
+ double initial_cost = -1.0;
// Cost of the problem (value of the objective function) after the
// optimization.
- double final_cost;
+ double final_cost = -1.0;
// IterationSummary for each minimizer iteration in order.
std::vector<IterationSummary> iterations;
+ // Number of times the cost (and not the gradient) was evaluated.
+ int num_cost_evaluations = -1;
+
+ // Number of times the gradient (and the cost) were evaluated.
+ int num_gradient_evaluations = -1;
+
// Sum total of all time spent inside Ceres when Solve is called.
- double total_time_in_seconds;
+ double total_time_in_seconds = -1.0;
// Time (in seconds) spent evaluating the cost.
- double cost_evaluation_time_in_seconds;
+ double cost_evaluation_time_in_seconds = -1.0;
// Time (in seconds) spent evaluating the gradient.
- double gradient_evaluation_time_in_seconds;
+ double gradient_evaluation_time_in_seconds = -1.0;
// Time (in seconds) spent minimizing the interpolating polynomial
// to compute the next candidate step size as part of a line search.
- double line_search_polynomial_minimization_time_in_seconds;
+ double line_search_polynomial_minimization_time_in_seconds = -1.0;
- // Number of parameters in the probem.
- int num_parameters;
+ // Number of parameters in the problem.
+ int num_parameters = -1;
// Dimension of the tangent space of the problem.
- int num_local_parameters;
+ int num_local_parameters = -1;
// Type of line search direction used.
- LineSearchDirectionType line_search_direction_type;
+ LineSearchDirectionType line_search_direction_type = LBFGS;
// Type of the line search algorithm used.
- LineSearchType line_search_type;
+ LineSearchType line_search_type = WOLFE;
// When performing line search, the degree of the polynomial used
// to approximate the objective function.
- LineSearchInterpolationType line_search_interpolation_type;
+ LineSearchInterpolationType line_search_interpolation_type = CUBIC;
// If the line search direction is NONLINEAR_CONJUGATE_GRADIENT,
// then this indicates the particular variant of non-linear
// conjugate gradient used.
- NonlinearConjugateGradientType nonlinear_conjugate_gradient_type;
+ NonlinearConjugateGradientType nonlinear_conjugate_gradient_type =
+ FLETCHER_REEVES;
// If the type of the line search direction is LBFGS, then this
// indicates the rank of the Hessian approximation.
- int max_lbfgs_rank;
+ int max_lbfgs_rank = -1;
};
// Once a least squares problem has been built, this function takes
// the problem and optimizes it based on the values of the options
// parameters. Upon return, a detailed summary of the work performed
- // by the preprocessor, the non-linear minmizer and the linear
+ // by the preprocessor, the non-linear minimizer and the linear
// solver are reported in the summary object.
virtual void Solve(const GradientProblemSolver::Options& options,
const GradientProblem& problem,
diff --git a/extern/ceres/include/ceres/internal/array_selector.h b/extern/ceres/include/ceres/internal/array_selector.h
new file mode 100644
index 00000000000..841797f4c69
--- /dev/null
+++ b/extern/ceres/include/ceres/internal/array_selector.h
@@ -0,0 +1,95 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2020 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: darius.rueckert@fau.de (Darius Rueckert)
+//
+
+#ifndef CERES_PUBLIC_INTERNAL_ARRAY_SELECTOR_H_
+#define CERES_PUBLIC_INTERNAL_ARRAY_SELECTOR_H_
+
+#include <array>
+#include <vector>
+
+#include "ceres/internal/fixed_array.h"
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+// StaticFixedArray selects the best array implementation based on template
+// arguments. If the size is not known at compile-time, pass
+// ceres::DYNAMIC as a size-template argument.
+//
+// Three different containers are selected in different scenarios:
+//
+// num_elements == DYNAMIC:
+// -> ceres::internal::FixedArray<T, max_stack_size>(size)
+
+// num_elements != DYNAMIC && num_elements <= max_stack_size
+// -> std::array<T,num_elements>
+
+// num_elements != DYNAMIC && num_elements > max_stack_size
+// -> std::vector<T>(num_elements)
+//
+template <typename T,
+ int num_elements,
+ int max_num_elements_on_stack,
+ bool dynamic = (num_elements == DYNAMIC),
+ bool fits_on_stack = (num_elements <= max_num_elements_on_stack)>
+struct ArraySelector {};
+
+template <typename T,
+ int num_elements,
+ int max_num_elements_on_stack,
+ bool fits_on_stack>
+struct ArraySelector<T,
+ num_elements,
+ max_num_elements_on_stack,
+ true,
+ fits_on_stack>
+ : ceres::internal::FixedArray<T, max_num_elements_on_stack> {
+ ArraySelector(int s)
+ : ceres::internal::FixedArray<T, max_num_elements_on_stack>(s) {}
+};
+
+template <typename T, int num_elements, int max_num_elements_on_stack>
+struct ArraySelector<T, num_elements, max_num_elements_on_stack, false, true>
+ : std::array<T, num_elements> {
+ ArraySelector(int s) { CHECK_EQ(s, num_elements); }
+};
+
+template <typename T, int num_elements, int max_num_elements_on_stack>
+struct ArraySelector<T, num_elements, max_num_elements_on_stack, false, false>
+ : std::vector<T> {
+ ArraySelector(int s) : std::vector<T>(s) { CHECK_EQ(s, num_elements); }
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_PUBLIC_INTERNAL_ARRAY_SELECTOR_H_
diff --git a/extern/ceres/include/ceres/internal/autodiff.h b/extern/ceres/include/ceres/internal/autodiff.h
index 136152a36cd..cb7b1aca5b9 100644
--- a/extern/ceres/include/ceres/internal/autodiff.h
+++ b/extern/ceres/include/ceres/internal/autodiff.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -30,10 +30,10 @@
//
// Computation of the Jacobian matrix for vector-valued functions of multiple
// variables, using automatic differentiation based on the implementation of
-// dual numbers in jet.h. Before reading the rest of this file, it is adivsable
+// dual numbers in jet.h. Before reading the rest of this file, it is advisable
// to read jet.h's header comment in detail.
//
-// The helper wrapper AutoDiff::Differentiate() computes the jacobian of
+// The helper wrapper AutoDifferentiate() computes the jacobian of
// functors with templated operator() taking this form:
//
// struct F {
@@ -57,7 +57,7 @@
// [ * ]
//
// Similar to the 2-parameter example for f described in jet.h, computing the
-// jacobian dy/dx is done by substutiting a suitable jet object for x and all
+// jacobian dy/dx is done by substituting a suitable jet object for x and all
// intermediate steps of the computation of F. Since x is has 4 dimensions, use
// a Jet<double, 4>.
//
@@ -142,16 +142,33 @@
#include <stddef.h>
-#include "ceres/jet.h"
+#include <array>
+#include <utility>
+
+#include "ceres/internal/array_selector.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/fixed_array.h"
+#include "ceres/internal/parameter_dims.h"
#include "ceres/internal/variadic_evaluate.h"
+#include "ceres/jet.h"
+#include "ceres/types.h"
#include "glog/logging.h"
+// If the number of parameters exceeds this values, the corresponding jets are
+// placed on the heap. This will reduce performance by a factor of 2-5 on
+// current compilers.
+#ifndef CERES_AUTODIFF_MAX_PARAMETERS_ON_STACK
+#define CERES_AUTODIFF_MAX_PARAMETERS_ON_STACK 50
+#endif
+
+#ifndef CERES_AUTODIFF_MAX_RESIDUALS_ON_STACK
+#define CERES_AUTODIFF_MAX_RESIDUALS_ON_STACK 20
+#endif
+
namespace ceres {
namespace internal {
-// Extends src by a 1st order pertubation for every dimension and puts it in
+// Extends src by a 1st order perturbation for every dimension and puts it in
// dst. The size of src is N. Since this is also used for perturbations in
// blocked arrays, offset is used to shift which part of the jet the
// perturbation occurs. This is used to set up the extended x augmented by an
@@ -165,21 +182,62 @@ namespace internal {
//
// is what would get put in dst if N was 3, offset was 3, and the jet type JetT
// was 8-dimensional.
-template <typename JetT, typename T, int N>
-inline void Make1stOrderPerturbation(int offset, const T* src, JetT* dst) {
- DCHECK(src);
- DCHECK(dst);
- for (int j = 0; j < N; ++j) {
- dst[j].a = src[j];
- dst[j].v.setZero();
- dst[j].v[offset + j] = T(1.0);
+template <int j, int N, int Offset, typename T, typename JetT>
+struct Make1stOrderPerturbation {
+ public:
+ inline static void Apply(const T* src, JetT* dst) {
+ if (j == 0) {
+ DCHECK(src);
+ DCHECK(dst);
+ }
+ dst[j] = JetT(src[j], j + Offset);
+ Make1stOrderPerturbation<j + 1, N, Offset, T, JetT>::Apply(src, dst);
}
-}
+};
+
+template <int N, int Offset, typename T, typename JetT>
+struct Make1stOrderPerturbation<N, N, Offset, T, JetT> {
+ public:
+ static void Apply(const T* /*src*/, JetT* /*dst*/) {}
+};
+
+// Calls Make1stOrderPerturbation for every parameter block.
+//
+// Example:
+// If one having three parameter blocks with dimensions (3, 2, 4), the call
+// Make1stOrderPerturbations<integer_sequence<3, 2, 4>::Apply(params, x);
+// will result in the following calls to Make1stOrderPerturbation:
+// Make1stOrderPerturbation<0, 3, 0>::Apply(params[0], x + 0);
+// Make1stOrderPerturbation<0, 2, 3>::Apply(params[1], x + 3);
+// Make1stOrderPerturbation<0, 4, 5>::Apply(params[2], x + 5);
+template <typename Seq, int ParameterIdx = 0, int Offset = 0>
+struct Make1stOrderPerturbations;
+
+template <int N, int... Ns, int ParameterIdx, int Offset>
+struct Make1stOrderPerturbations<std::integer_sequence<int, N, Ns...>,
+ ParameterIdx,
+ Offset> {
+ template <typename T, typename JetT>
+ inline static void Apply(T const* const* parameters, JetT* x) {
+ Make1stOrderPerturbation<0, N, Offset, T, JetT>::Apply(
+ parameters[ParameterIdx], x + Offset);
+ Make1stOrderPerturbations<std::integer_sequence<int, Ns...>,
+ ParameterIdx + 1,
+ Offset + N>::Apply(parameters, x);
+ }
+};
+
+// End of 'recursion'. Nothing more to do.
+template <int ParameterIdx, int Total>
+struct Make1stOrderPerturbations<std::integer_sequence<int>, ParameterIdx, Total> {
+ template <typename T, typename JetT>
+ static void Apply(T const* const* /* NOT USED */, JetT* /* NOT USED */) {}
+};
// Takes the 0th order part of src, assumed to be a Jet type, and puts it in
// dst. This is used to pick out the "vector" part of the extended y.
template <typename JetT, typename T>
-inline void Take0thOrderPart(int M, const JetT *src, T dst) {
+inline void Take0thOrderPart(int M, const JetT* src, T dst) {
DCHECK(src);
for (int i = 0; i < M; ++i) {
dst[i] = src[i].a;
@@ -188,128 +246,117 @@ inline void Take0thOrderPart(int M, const JetT *src, T dst) {
// Takes N 1st order parts, starting at index N0, and puts them in the M x N
// matrix 'dst'. This is used to pick out the "matrix" parts of the extended y.
-template <typename JetT, typename T, int N0, int N>
-inline void Take1stOrderPart(const int M, const JetT *src, T *dst) {
+template <int N0, int N, typename JetT, typename T>
+inline void Take1stOrderPart(const int M, const JetT* src, T* dst) {
DCHECK(src);
DCHECK(dst);
for (int i = 0; i < M; ++i) {
- Eigen::Map<Eigen::Matrix<T, N, 1> >(dst + N * i, N) =
+ Eigen::Map<Eigen::Matrix<T, N, 1>>(dst + N * i, N) =
src[i].v.template segment<N>(N0);
}
}
-// This is in a struct because default template parameters on a
-// function are not supported in C++03 (though it is available in
-// C++0x). N0 through N5 are the dimension of the input arguments to
-// the user supplied functor.
-template <typename Functor, typename T,
- int N0 = 0, int N1 = 0, int N2 = 0, int N3 = 0, int N4 = 0,
- int N5 = 0, int N6 = 0, int N7 = 0, int N8 = 0, int N9 = 0>
-struct AutoDiff {
- static bool Differentiate(const Functor& functor,
- T const *const *parameters,
- int num_outputs,
- T *function_value,
- T **jacobians) {
- // This block breaks the 80 column rule to keep it somewhat readable.
- DCHECK_GT(num_outputs, 0);
- DCHECK((!N1 && !N2 && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
- ((N1 > 0) && !N2 && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
- ((N1 > 0) && (N2 > 0) && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) || // NOLINT
- ((N1 > 0) && (N2 > 0) && (N3 > 0) && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) || // NOLINT
- ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && !N5 && !N6 && !N7 && !N8 && !N9) || // NOLINT
- ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && !N6 && !N7 && !N8 && !N9) || // NOLINT
- ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && !N7 && !N8 && !N9) || // NOLINT
- ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && !N8 && !N9) || // NOLINT
- ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && !N9) || // NOLINT
- ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && (N9 > 0))) // NOLINT
- << "Zero block cannot precede a non-zero block. Block sizes are "
- << "(ignore trailing 0s): " << N0 << ", " << N1 << ", " << N2 << ", "
- << N3 << ", " << N4 << ", " << N5 << ", " << N6 << ", " << N7 << ", "
- << N8 << ", " << N9;
+// Calls Take1stOrderPart for every parameter block.
+//
+// Example:
+// If one having three parameter blocks with dimensions (3, 2, 4), the call
+// Take1stOrderParts<integer_sequence<3, 2, 4>::Apply(num_outputs,
+// output,
+// jacobians);
+// will result in the following calls to Take1stOrderPart:
+// if (jacobians[0]) {
+// Take1stOrderPart<0, 3>(num_outputs, output, jacobians[0]);
+// }
+// if (jacobians[1]) {
+// Take1stOrderPart<3, 2>(num_outputs, output, jacobians[1]);
+// }
+// if (jacobians[2]) {
+// Take1stOrderPart<5, 4>(num_outputs, output, jacobians[2]);
+// }
+template <typename Seq, int ParameterIdx = 0, int Offset = 0>
+struct Take1stOrderParts;
- typedef Jet<T, N0 + N1 + N2 + N3 + N4 + N5 + N6 + N7 + N8 + N9> JetT;
- FixedArray<JetT, (256 * 7) / sizeof(JetT)> x(
- N0 + N1 + N2 + N3 + N4 + N5 + N6 + N7 + N8 + N9 + num_outputs);
+template <int N, int... Ns, int ParameterIdx, int Offset>
+struct Take1stOrderParts<std::integer_sequence<int, N, Ns...>,
+ ParameterIdx,
+ Offset> {
+ template <typename JetT, typename T>
+ inline static void Apply(int num_outputs, JetT* output, T** jacobians) {
+ if (jacobians[ParameterIdx]) {
+ Take1stOrderPart<Offset, N>(num_outputs, output, jacobians[ParameterIdx]);
+ }
+ Take1stOrderParts<std::integer_sequence<int, Ns...>,
+ ParameterIdx + 1,
+ Offset + N>::Apply(num_outputs, output, jacobians);
+ }
+};
- // These are the positions of the respective jets in the fixed array x.
- const int jet0 = 0;
- const int jet1 = N0;
- const int jet2 = N0 + N1;
- const int jet3 = N0 + N1 + N2;
- const int jet4 = N0 + N1 + N2 + N3;
- const int jet5 = N0 + N1 + N2 + N3 + N4;
- const int jet6 = N0 + N1 + N2 + N3 + N4 + N5;
- const int jet7 = N0 + N1 + N2 + N3 + N4 + N5 + N6;
- const int jet8 = N0 + N1 + N2 + N3 + N4 + N5 + N6 + N7;
- const int jet9 = N0 + N1 + N2 + N3 + N4 + N5 + N6 + N7 + N8;
+// End of 'recursion'. Nothing more to do.
+template <int ParameterIdx, int Offset>
+struct Take1stOrderParts<std::integer_sequence<int>, ParameterIdx, Offset> {
+ template <typename T, typename JetT>
+ static void Apply(int /* NOT USED*/,
+ JetT* /* NOT USED*/,
+ T** /* NOT USED */) {}
+};
- const JetT *unpacked_parameters[10] = {
- x.get() + jet0,
- x.get() + jet1,
- x.get() + jet2,
- x.get() + jet3,
- x.get() + jet4,
- x.get() + jet5,
- x.get() + jet6,
- x.get() + jet7,
- x.get() + jet8,
- x.get() + jet9,
- };
+template <int kNumResiduals,
+ typename ParameterDims,
+ typename Functor,
+ typename T>
+inline bool AutoDifferentiate(const Functor& functor,
+ T const* const* parameters,
+ int dynamic_num_outputs,
+ T* function_value,
+ T** jacobians) {
+ typedef Jet<T, ParameterDims::kNumParameters> JetT;
+ using Parameters = typename ParameterDims::Parameters;
- JetT* output = x.get() + N0 + N1 + N2 + N3 + N4 + N5 + N6 + N7 + N8 + N9;
+ if (kNumResiduals != DYNAMIC) {
+ DCHECK_EQ(kNumResiduals, dynamic_num_outputs);
+ }
-#define CERES_MAKE_1ST_ORDER_PERTURBATION(i) \
- if (N ## i) { \
- internal::Make1stOrderPerturbation<JetT, T, N ## i>( \
- jet ## i, \
- parameters[i], \
- x.get() + jet ## i); \
- }
- CERES_MAKE_1ST_ORDER_PERTURBATION(0);
- CERES_MAKE_1ST_ORDER_PERTURBATION(1);
- CERES_MAKE_1ST_ORDER_PERTURBATION(2);
- CERES_MAKE_1ST_ORDER_PERTURBATION(3);
- CERES_MAKE_1ST_ORDER_PERTURBATION(4);
- CERES_MAKE_1ST_ORDER_PERTURBATION(5);
- CERES_MAKE_1ST_ORDER_PERTURBATION(6);
- CERES_MAKE_1ST_ORDER_PERTURBATION(7);
- CERES_MAKE_1ST_ORDER_PERTURBATION(8);
- CERES_MAKE_1ST_ORDER_PERTURBATION(9);
-#undef CERES_MAKE_1ST_ORDER_PERTURBATION
+ ArraySelector<JetT,
+ ParameterDims::kNumParameters,
+ CERES_AUTODIFF_MAX_PARAMETERS_ON_STACK>
+ parameters_as_jets(ParameterDims::kNumParameters);
- if (!VariadicEvaluate<Functor, JetT,
- N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>::Call(
- functor, unpacked_parameters, output)) {
- return false;
- }
+ // Pointers to the beginning of each parameter block
+ std::array<JetT*, ParameterDims::kNumParameterBlocks> unpacked_parameters =
+ ParameterDims::GetUnpackedParameters(parameters_as_jets.data());
- internal::Take0thOrderPart(num_outputs, output, function_value);
+ // If the number of residuals is fixed, we use the template argument as the
+ // number of outputs. Otherwise we use the num_outputs parameter. Note: The
+ // ?-operator here is compile-time evaluated, therefore num_outputs is also
+ // a compile-time constant for functors with fixed residuals.
+ const int num_outputs =
+ kNumResiduals == DYNAMIC ? dynamic_num_outputs : kNumResiduals;
+ DCHECK_GT(num_outputs, 0);
-#define CERES_TAKE_1ST_ORDER_PERTURBATION(i) \
- if (N ## i) { \
- if (jacobians[i]) { \
- internal::Take1stOrderPart<JetT, T, \
- jet ## i, \
- N ## i>(num_outputs, \
- output, \
- jacobians[i]); \
- } \
- }
- CERES_TAKE_1ST_ORDER_PERTURBATION(0);
- CERES_TAKE_1ST_ORDER_PERTURBATION(1);
- CERES_TAKE_1ST_ORDER_PERTURBATION(2);
- CERES_TAKE_1ST_ORDER_PERTURBATION(3);
- CERES_TAKE_1ST_ORDER_PERTURBATION(4);
- CERES_TAKE_1ST_ORDER_PERTURBATION(5);
- CERES_TAKE_1ST_ORDER_PERTURBATION(6);
- CERES_TAKE_1ST_ORDER_PERTURBATION(7);
- CERES_TAKE_1ST_ORDER_PERTURBATION(8);
- CERES_TAKE_1ST_ORDER_PERTURBATION(9);
-#undef CERES_TAKE_1ST_ORDER_PERTURBATION
- return true;
+ ArraySelector<JetT, kNumResiduals, CERES_AUTODIFF_MAX_RESIDUALS_ON_STACK>
+ residuals_as_jets(num_outputs);
+
+ // Invalidate the output Jets, so that we can detect if the user
+ // did not assign values to all of them.
+ for (int i = 0; i < num_outputs; ++i) {
+ residuals_as_jets[i].a = kImpossibleValue;
+ residuals_as_jets[i].v.setConstant(kImpossibleValue);
}
-};
+
+ Make1stOrderPerturbations<Parameters>::Apply(parameters,
+ parameters_as_jets.data());
+
+ if (!VariadicEvaluate<ParameterDims>(
+ functor, unpacked_parameters.data(), residuals_as_jets.data())) {
+ return false;
+ }
+
+ Take0thOrderPart(num_outputs, residuals_as_jets.data(), function_value);
+ Take1stOrderParts<Parameters>::Apply(
+ num_outputs, residuals_as_jets.data(), jacobians);
+
+ return true;
+}
} // namespace internal
} // namespace ceres
diff --git a/extern/ceres/include/ceres/internal/disable_warnings.h b/extern/ceres/include/ceres/internal/disable_warnings.h
index 094124f7159..fd848feec0f 100644
--- a/extern/ceres/include/ceres/internal/disable_warnings.h
+++ b/extern/ceres/include/ceres/internal/disable_warnings.h
@@ -35,7 +35,7 @@
#ifdef _MSC_VER
#pragma warning( push )
-// Disable the warning C4251 which is trigerred by stl classes in
+// Disable the warning C4251 which is triggered by stl classes in
// Ceres' public interface. To quote MSDN: "C4251 can be ignored "
// "if you are deriving from a type in the Standard C++ Library"
#pragma warning( disable : 4251 )
diff --git a/extern/ceres/include/ceres/internal/eigen.h b/extern/ceres/include/ceres/internal/eigen.h
index 7138804ace4..59545dfd9c9 100644
--- a/extern/ceres/include/ceres/internal/eigen.h
+++ b/extern/ceres/include/ceres/internal/eigen.h
@@ -52,40 +52,27 @@ typedef Eigen::Matrix<double,
Eigen::ColMajor> ColMajorMatrix;
typedef Eigen::Map<ColMajorMatrix, 0,
- Eigen::Stride<Eigen::Dynamic, 1> > ColMajorMatrixRef;
+ Eigen::Stride<Eigen::Dynamic, 1>> ColMajorMatrixRef;
typedef Eigen::Map<const ColMajorMatrix,
0,
- Eigen::Stride<Eigen::Dynamic, 1> > ConstColMajorMatrixRef;
-
-
+ Eigen::Stride<Eigen::Dynamic, 1>> ConstColMajorMatrixRef;
// C++ does not support templated typdefs, thus the need for this
// struct so that we can support statically sized Matrix and Maps.
-template <int num_rows = Eigen::Dynamic, int num_cols = Eigen::Dynamic>
+ template <int num_rows = Eigen::Dynamic, int num_cols = Eigen::Dynamic>
struct EigenTypes {
- typedef Eigen::Matrix <double, num_rows, num_cols, Eigen::RowMajor>
- Matrix;
-
- typedef Eigen::Map<
- Eigen::Matrix<double, num_rows, num_cols, Eigen::RowMajor> >
- MatrixRef;
-
- typedef Eigen::Matrix <double, num_rows, 1>
- Vector;
-
- typedef Eigen::Map <
- Eigen::Matrix<double, num_rows, 1> >
- VectorRef;
-
-
- typedef Eigen::Map<
- const Eigen::Matrix<double, num_rows, num_cols, Eigen::RowMajor> >
- ConstMatrixRef;
-
- typedef Eigen::Map <
- const Eigen::Matrix<double, num_rows, 1> >
- ConstVectorRef;
+ typedef Eigen::Matrix<double,
+ num_rows,
+ num_cols,
+ num_cols == 1 ? Eigen::ColMajor : Eigen::RowMajor>
+ Matrix;
+
+ typedef Eigen::Map<Matrix> MatrixRef;
+ typedef Eigen::Map<const Matrix> ConstMatrixRef;
+ typedef Eigen::Matrix<double, num_rows, 1> Vector;
+ typedef Eigen::Map<Eigen::Matrix<double, num_rows, 1>> VectorRef;
+ typedef Eigen::Map<const Eigen::Matrix<double, num_rows, 1>> ConstVectorRef;
};
} // namespace ceres
diff --git a/extern/ceres/include/ceres/internal/fixed_array.h b/extern/ceres/include/ceres/internal/fixed_array.h
index 387298c58d0..f8ef02d40e8 100644
--- a/extern/ceres/include/ceres/internal/fixed_array.h
+++ b/extern/ceres/include/ceres/internal/fixed_array.h
@@ -1,189 +1,466 @@
-// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
-// http://ceres-solver.org/
+// Copyright 2018 The Abseil Authors.
//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
//
-// * Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-// * Neither the name of Google Inc. nor the names of its contributors may be
-// used to endorse or promote products derived from this software without
-// specific prior written permission.
+// https://www.apache.org/licenses/LICENSE-2.0
//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-// POSSIBILITY OF SUCH DAMAGE.
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
//
-// Author: rennie@google.com (Jeffrey Rennie)
-// Author: sanjay@google.com (Sanjay Ghemawat) -- renamed to FixedArray
+// -----------------------------------------------------------------------------
+// File: fixed_array.h
+// -----------------------------------------------------------------------------
+//
+// A `FixedArray<T>` represents a non-resizable array of `T` where the length of
+// the array can be determined at run-time. It is a good replacement for
+// non-standard and deprecated uses of `alloca()` and variable length arrays
+// within the GCC extension. (See
+// https://gcc.gnu.org/onlinedocs/gcc/Variable-Length.html).
+//
+// `FixedArray` allocates small arrays inline, keeping performance fast by
+// avoiding heap operations. It also helps reduce the chances of
+// accidentally overflowing your stack if large input is passed to
+// your function.
#ifndef CERES_PUBLIC_INTERNAL_FIXED_ARRAY_H_
#define CERES_PUBLIC_INTERNAL_FIXED_ARRAY_H_
+#include <algorithm>
+#include <array>
#include <cstddef>
-#include "Eigen/Core"
-#include "ceres/internal/macros.h"
-#include "ceres/internal/manual_constructor.h"
+#include <memory>
+#include <tuple>
+#include <type_traits>
+
+#include <Eigen/Core> // For Eigen::aligned_allocator
+
+#include "ceres/internal/memory.h"
#include "glog/logging.h"
namespace ceres {
namespace internal {
-// A FixedArray<T> represents a non-resizable array of T where the
-// length of the array does not need to be a compile time constant.
-//
-// FixedArray allocates small arrays inline, and large arrays on
-// the heap. It is a good replacement for non-standard and deprecated
-// uses of alloca() and variable length arrays (a GCC extension).
-//
-// FixedArray keeps performance fast for small arrays, because it
-// avoids heap operations. It also helps reduce the chances of
-// accidentally overflowing your stack if large input is passed to
-// your function.
+constexpr static auto kFixedArrayUseDefault = static_cast<size_t>(-1);
+
+// The default fixed array allocator.
//
-// Also, FixedArray is useful for writing portable code. Not all
-// compilers support arrays of dynamic size.
+// As one can not easily detect if a struct contains or inherits from a fixed
+// size Eigen type, to be safe the Eigen::aligned_allocator is used by default.
+// But trivial types can never contain Eigen types, so std::allocator is used to
+// safe some heap memory.
+template <typename T>
+using FixedArrayDefaultAllocator =
+ typename std::conditional<std::is_trivial<T>::value,
+ std::allocator<T>,
+ Eigen::aligned_allocator<T>>::type;
-// Most users should not specify an inline_elements argument and let
-// FixedArray<> automatically determine the number of elements
-// to store inline based on sizeof(T).
+// -----------------------------------------------------------------------------
+// FixedArray
+// -----------------------------------------------------------------------------
+//
+// A `FixedArray` provides a run-time fixed-size array, allocating a small array
+// inline for efficiency.
//
-// If inline_elements is specified, the FixedArray<> implementation
-// will store arrays of length <= inline_elements inline.
+// Most users should not specify an `inline_elements` argument and let
+// `FixedArray` automatically determine the number of elements
+// to store inline based on `sizeof(T)`. If `inline_elements` is specified, the
+// `FixedArray` implementation will use inline storage for arrays with a
+// length <= `inline_elements`.
//
-// Finally note that unlike vector<T> FixedArray<T> will not zero-initialize
-// simple types like int, double, bool, etc.
+// Note that a `FixedArray` constructed with a `size_type` argument will
+// default-initialize its values by leaving trivially constructible types
+// uninitialized (e.g. int, int[4], double), and others default-constructed.
+// This matches the behavior of c-style arrays and `std::array`, but not
+// `std::vector`.
//
-// Non-POD types will be default-initialized just like regular vectors or
-// arrays.
+// Note that `FixedArray` does not provide a public allocator; if it requires a
+// heap allocation, it will do so with global `::operator new[]()` and
+// `::operator delete[]()`, even if T provides class-scope overrides for these
+// operators.
+template <typename T,
+ size_t N = kFixedArrayUseDefault,
+ typename A = FixedArrayDefaultAllocator<T>>
+class FixedArray {
+ static_assert(!std::is_array<T>::value || std::extent<T>::value > 0,
+ "Arrays with unknown bounds cannot be used with FixedArray.");
-#if defined(_WIN64)
- typedef __int64 ssize_t;
-#elif defined(_WIN32)
- typedef __int32 ssize_t;
-#endif
+ static constexpr size_t kInlineBytesDefault = 256;
+
+ using AllocatorTraits = std::allocator_traits<A>;
+ // std::iterator_traits isn't guaranteed to be SFINAE-friendly until C++17,
+ // but this seems to be mostly pedantic.
+ template <typename Iterator>
+ using EnableIfForwardIterator = typename std::enable_if<std::is_convertible<
+ typename std::iterator_traits<Iterator>::iterator_category,
+ std::forward_iterator_tag>::value>::type;
+ static constexpr bool DefaultConstructorIsNonTrivial() {
+ return !std::is_trivially_default_constructible<StorageElement>::value;
+ }
-template <typename T, ssize_t inline_elements = -1>
-class FixedArray {
public:
- // For playing nicely with stl:
- typedef T value_type;
- typedef T* iterator;
- typedef T const* const_iterator;
- typedef T& reference;
- typedef T const& const_reference;
- typedef T* pointer;
- typedef std::ptrdiff_t difference_type;
- typedef size_t size_type;
-
- // REQUIRES: n >= 0
- // Creates an array object that can store "n" elements.
- //
- // FixedArray<T> will not zero-initialiaze POD (simple) types like int,
- // double, bool, etc.
- // Non-POD types will be default-initialized just like regular vectors or
- // arrays.
- explicit FixedArray(size_type n);
-
- // Releases any resources.
- ~FixedArray();
-
- // Returns the length of the array.
- inline size_type size() const { return size_; }
-
- // Returns the memory size of the array in bytes.
- inline size_t memsize() const { return size_ * sizeof(T); }
-
- // Returns a pointer to the underlying element array.
- inline const T* get() const { return &array_[0].element; }
- inline T* get() { return &array_[0].element; }
+ using allocator_type = typename AllocatorTraits::allocator_type;
+ using value_type = typename AllocatorTraits::value_type;
+ using pointer = typename AllocatorTraits::pointer;
+ using const_pointer = typename AllocatorTraits::const_pointer;
+ using reference = value_type&;
+ using const_reference = const value_type&;
+ using size_type = typename AllocatorTraits::size_type;
+ using difference_type = typename AllocatorTraits::difference_type;
+ using iterator = pointer;
+ using const_iterator = const_pointer;
+ using reverse_iterator = std::reverse_iterator<iterator>;
+ using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+
+ static constexpr size_type inline_elements =
+ (N == kFixedArrayUseDefault ? kInlineBytesDefault / sizeof(value_type)
+ : static_cast<size_type>(N));
+
+ FixedArray(const FixedArray& other,
+ const allocator_type& a = allocator_type())
+ : FixedArray(other.begin(), other.end(), a) {}
+
+ FixedArray(FixedArray&& other, const allocator_type& a = allocator_type())
+ : FixedArray(std::make_move_iterator(other.begin()),
+ std::make_move_iterator(other.end()),
+ a) {}
+
+ // Creates an array object that can store `n` elements.
+ // Note that trivially constructible elements will be uninitialized.
+ explicit FixedArray(size_type n, const allocator_type& a = allocator_type())
+ : storage_(n, a) {
+ if (DefaultConstructorIsNonTrivial()) {
+ ConstructRange(storage_.alloc(), storage_.begin(), storage_.end());
+ }
+ }
+ // Creates an array initialized with `n` copies of `val`.
+ FixedArray(size_type n,
+ const value_type& val,
+ const allocator_type& a = allocator_type())
+ : storage_(n, a) {
+ ConstructRange(storage_.alloc(), storage_.begin(), storage_.end(), val);
+ }
+
+ // Creates an array initialized with the size and contents of `init_list`.
+ FixedArray(std::initializer_list<value_type> init_list,
+ const allocator_type& a = allocator_type())
+ : FixedArray(init_list.begin(), init_list.end(), a) {}
+
+ // Creates an array initialized with the elements from the input
+ // range. The array's size will always be `std::distance(first, last)`.
+ // REQUIRES: Iterator must be a forward_iterator or better.
+ template <typename Iterator, EnableIfForwardIterator<Iterator>* = nullptr>
+ FixedArray(Iterator first,
+ Iterator last,
+ const allocator_type& a = allocator_type())
+ : storage_(std::distance(first, last), a) {
+ CopyRange(storage_.alloc(), storage_.begin(), first, last);
+ }
+
+ ~FixedArray() noexcept {
+ for (auto* cur = storage_.begin(); cur != storage_.end(); ++cur) {
+ AllocatorTraits::destroy(storage_.alloc(), cur);
+ }
+ }
+
+ // Assignments are deleted because they break the invariant that the size of a
+ // `FixedArray` never changes.
+ void operator=(FixedArray&&) = delete;
+ void operator=(const FixedArray&) = delete;
+
+ // FixedArray::size()
+ //
+ // Returns the length of the fixed array.
+ size_type size() const { return storage_.size(); }
+
+ // FixedArray::max_size()
+ //
+ // Returns the largest possible value of `std::distance(begin(), end())` for a
+ // `FixedArray<T>`. This is equivalent to the most possible addressable bytes
+ // over the number of bytes taken by T.
+ constexpr size_type max_size() const {
+ return (std::numeric_limits<difference_type>::max)() / sizeof(value_type);
+ }
+
+ // FixedArray::empty()
+ //
+ // Returns whether or not the fixed array is empty.
+ bool empty() const { return size() == 0; }
+
+ // FixedArray::memsize()
+ //
+ // Returns the memory size of the fixed array in bytes.
+ size_t memsize() const { return size() * sizeof(value_type); }
+
+ // FixedArray::data()
+ //
+ // Returns a const T* pointer to elements of the `FixedArray`. This pointer
+ // can be used to access (but not modify) the contained elements.
+ const_pointer data() const { return AsValueType(storage_.begin()); }
+
+ // Overload of FixedArray::data() to return a T* pointer to elements of the
+ // fixed array. This pointer can be used to access and modify the contained
+ // elements.
+ pointer data() { return AsValueType(storage_.begin()); }
+
+ // FixedArray::operator[]
+ //
+ // Returns a reference the ith element of the fixed array.
// REQUIRES: 0 <= i < size()
- // Returns a reference to the "i"th element.
- inline T& operator[](size_type i) {
- DCHECK_LT(i, size_);
- return array_[i].element;
+ reference operator[](size_type i) {
+ DCHECK_LT(i, size());
+ return data()[i];
}
+ // Overload of FixedArray::operator()[] to return a const reference to the
+ // ith element of the fixed array.
// REQUIRES: 0 <= i < size()
- // Returns a reference to the "i"th element.
- inline const T& operator[](size_type i) const {
- DCHECK_LT(i, size_);
- return array_[i].element;
+ const_reference operator[](size_type i) const {
+ DCHECK_LT(i, size());
+ return data()[i];
+ }
+
+ // FixedArray::front()
+ //
+ // Returns a reference to the first element of the fixed array.
+ reference front() { return *begin(); }
+
+ // Overload of FixedArray::front() to return a reference to the first element
+ // of a fixed array of const values.
+ const_reference front() const { return *begin(); }
+
+ // FixedArray::back()
+ //
+ // Returns a reference to the last element of the fixed array.
+ reference back() { return *(end() - 1); }
+
+ // Overload of FixedArray::back() to return a reference to the last element
+ // of a fixed array of const values.
+ const_reference back() const { return *(end() - 1); }
+
+ // FixedArray::begin()
+ //
+ // Returns an iterator to the beginning of the fixed array.
+ iterator begin() { return data(); }
+
+ // Overload of FixedArray::begin() to return a const iterator to the
+ // beginning of the fixed array.
+ const_iterator begin() const { return data(); }
+
+ // FixedArray::cbegin()
+ //
+ // Returns a const iterator to the beginning of the fixed array.
+ const_iterator cbegin() const { return begin(); }
+
+ // FixedArray::end()
+ //
+ // Returns an iterator to the end of the fixed array.
+ iterator end() { return data() + size(); }
+
+ // Overload of FixedArray::end() to return a const iterator to the end of the
+ // fixed array.
+ const_iterator end() const { return data() + size(); }
+
+ // FixedArray::cend()
+ //
+ // Returns a const iterator to the end of the fixed array.
+ const_iterator cend() const { return end(); }
+
+ // FixedArray::rbegin()
+ //
+ // Returns a reverse iterator from the end of the fixed array.
+ reverse_iterator rbegin() { return reverse_iterator(end()); }
+
+ // Overload of FixedArray::rbegin() to return a const reverse iterator from
+ // the end of the fixed array.
+ const_reverse_iterator rbegin() const {
+ return const_reverse_iterator(end());
}
- inline iterator begin() { return &array_[0].element; }
- inline iterator end() { return &array_[size_].element; }
+ // FixedArray::crbegin()
+ //
+ // Returns a const reverse iterator from the end of the fixed array.
+ const_reverse_iterator crbegin() const { return rbegin(); }
- inline const_iterator begin() const { return &array_[0].element; }
- inline const_iterator end() const { return &array_[size_].element; }
+ // FixedArray::rend()
+ //
+ // Returns a reverse iterator from the beginning of the fixed array.
+ reverse_iterator rend() { return reverse_iterator(begin()); }
+
+ // Overload of FixedArray::rend() for returning a const reverse iterator
+ // from the beginning of the fixed array.
+ const_reverse_iterator rend() const {
+ return const_reverse_iterator(begin());
+ }
+
+ // FixedArray::crend()
+ //
+ // Returns a reverse iterator from the beginning of the fixed array.
+ const_reverse_iterator crend() const { return rend(); }
+
+ // FixedArray::fill()
+ //
+ // Assigns the given `value` to all elements in the fixed array.
+ void fill(const value_type& val) { std::fill(begin(), end(), val); }
+
+ // Relational operators. Equality operators are elementwise using
+ // `operator==`, while order operators order FixedArrays lexicographically.
+ friend bool operator==(const FixedArray& lhs, const FixedArray& rhs) {
+ return std::equal(lhs.begin(), lhs.end(), rhs.begin(), rhs.end());
+ }
+
+ friend bool operator!=(const FixedArray& lhs, const FixedArray& rhs) {
+ return !(lhs == rhs);
+ }
+
+ friend bool operator<(const FixedArray& lhs, const FixedArray& rhs) {
+ return std::lexicographical_compare(
+ lhs.begin(), lhs.end(), rhs.begin(), rhs.end());
+ }
+
+ friend bool operator>(const FixedArray& lhs, const FixedArray& rhs) {
+ return rhs < lhs;
+ }
+
+ friend bool operator<=(const FixedArray& lhs, const FixedArray& rhs) {
+ return !(rhs < lhs);
+ }
+
+ friend bool operator>=(const FixedArray& lhs, const FixedArray& rhs) {
+ return !(lhs < rhs);
+ }
private:
- // Container to hold elements of type T. This is necessary to handle
- // the case where T is a a (C-style) array. The size of InnerContainer
- // and T must be the same, otherwise callers' assumptions about use
- // of this code will be broken.
- struct InnerContainer {
- T element;
+ // StorageElement
+ //
+ // For FixedArrays with a C-style-array value_type, StorageElement is a POD
+ // wrapper struct called StorageElementWrapper that holds the value_type
+ // instance inside. This is needed for construction and destruction of the
+ // entire array regardless of how many dimensions it has. For all other cases,
+ // StorageElement is just an alias of value_type.
+ //
+ // Maintainer's Note: The simpler solution would be to simply wrap value_type
+ // in a struct whether it's an array or not. That causes some paranoid
+ // diagnostics to misfire, believing that 'data()' returns a pointer to a
+ // single element, rather than the packed array that it really is.
+ // e.g.:
+ //
+ // FixedArray<char> buf(1);
+ // sprintf(buf.data(), "foo");
+ //
+ // error: call to int __builtin___sprintf_chk(etc...)
+ // will always overflow destination buffer [-Werror]
+ //
+ template <typename OuterT,
+ typename InnerT = typename std::remove_extent<OuterT>::type,
+ size_t InnerN = std::extent<OuterT>::value>
+ struct StorageElementWrapper {
+ InnerT array[InnerN];
};
- // How many elements should we store inline?
- // a. If not specified, use a default of 256 bytes (256 bytes
- // seems small enough to not cause stack overflow or unnecessary
- // stack pollution, while still allowing stack allocation for
- // reasonably long character arrays.
- // b. Never use 0 length arrays (not ISO C++)
- static const size_type S1 = ((inline_elements < 0)
- ? (256/sizeof(T)) : inline_elements);
- static const size_type S2 = (S1 <= 0) ? 1 : S1;
- static const size_type kInlineElements = S2;
-
- size_type const size_;
- InnerContainer* const array_;
-
- // Allocate some space, not an array of elements of type T, so that we can
- // skip calling the T constructors and destructors for space we never use.
- ManualConstructor<InnerContainer> inline_space_[kInlineElements];
-};
+ using StorageElement =
+ typename std::conditional<std::is_array<value_type>::value,
+ StorageElementWrapper<value_type>,
+ value_type>::type;
-// Implementation details follow
-
-template <class T, ssize_t S>
-inline FixedArray<T, S>::FixedArray(typename FixedArray<T, S>::size_type n)
- : size_(n),
- array_((n <= kInlineElements
- ? reinterpret_cast<InnerContainer*>(inline_space_)
- : new InnerContainer[n])) {
- // Construct only the elements actually used.
- if (array_ == reinterpret_cast<InnerContainer*>(inline_space_)) {
- for (size_t i = 0; i != size_; ++i) {
- inline_space_[i].Init();
- }
+ static pointer AsValueType(pointer ptr) { return ptr; }
+ static pointer AsValueType(StorageElementWrapper<value_type>* ptr) {
+ return std::addressof(ptr->array);
}
-}
-template <class T, ssize_t S>
-inline FixedArray<T, S>::~FixedArray() {
- if (array_ != reinterpret_cast<InnerContainer*>(inline_space_)) {
- delete[] array_;
- } else {
- for (size_t i = 0; i != size_; ++i) {
- inline_space_[i].Destroy();
+ static_assert(sizeof(StorageElement) == sizeof(value_type), "");
+ static_assert(alignof(StorageElement) == alignof(value_type), "");
+
+ class NonEmptyInlinedStorage {
+ public:
+ StorageElement* data() { return reinterpret_cast<StorageElement*>(buff_); }
+ void AnnotateConstruct(size_type) {}
+ void AnnotateDestruct(size_type) {}
+
+ // #ifdef ADDRESS_SANITIZER
+ // void* RedzoneBegin() { return &redzone_begin_; }
+ // void* RedzoneEnd() { return &redzone_end_ + 1; }
+ // #endif // ADDRESS_SANITIZER
+
+ private:
+ // ADDRESS_SANITIZER_REDZONE(redzone_begin_);
+ alignas(StorageElement) char buff_[sizeof(StorageElement[inline_elements])];
+ // ADDRESS_SANITIZER_REDZONE(redzone_end_);
+ };
+
+ class EmptyInlinedStorage {
+ public:
+ StorageElement* data() { return nullptr; }
+ void AnnotateConstruct(size_type) {}
+ void AnnotateDestruct(size_type) {}
+ };
+
+ using InlinedStorage =
+ typename std::conditional<inline_elements == 0,
+ EmptyInlinedStorage,
+ NonEmptyInlinedStorage>::type;
+
+ // Storage
+ //
+ // An instance of Storage manages the inline and out-of-line memory for
+ // instances of FixedArray. This guarantees that even when construction of
+ // individual elements fails in the FixedArray constructor body, the
+ // destructor for Storage will still be called and out-of-line memory will be
+ // properly deallocated.
+ //
+ class Storage : public InlinedStorage {
+ public:
+ Storage(size_type n, const allocator_type& a)
+ : size_alloc_(n, a), data_(InitializeData()) {}
+
+ ~Storage() noexcept {
+ if (UsingInlinedStorage(size())) {
+ InlinedStorage::AnnotateDestruct(size());
+ } else {
+ AllocatorTraits::deallocate(alloc(), AsValueType(begin()), size());
+ }
}
- }
-}
+
+ size_type size() const { return std::get<0>(size_alloc_); }
+ StorageElement* begin() const { return data_; }
+ StorageElement* end() const { return begin() + size(); }
+ allocator_type& alloc() { return std::get<1>(size_alloc_); }
+
+ private:
+ static bool UsingInlinedStorage(size_type n) {
+ return n <= inline_elements;
+ }
+
+ StorageElement* InitializeData() {
+ if (UsingInlinedStorage(size())) {
+ InlinedStorage::AnnotateConstruct(size());
+ return InlinedStorage::data();
+ } else {
+ return reinterpret_cast<StorageElement*>(
+ AllocatorTraits::allocate(alloc(), size()));
+ }
+ }
+
+ // Using std::tuple and not absl::CompressedTuple, as it has a lot of
+ // dependencies to other absl headers.
+ std::tuple<size_type, allocator_type> size_alloc_;
+ StorageElement* data_;
+ };
+
+ Storage storage_;
+};
+
+template <typename T, size_t N, typename A>
+constexpr size_t FixedArray<T, N, A>::kInlineBytesDefault;
+
+template <typename T, size_t N, typename A>
+constexpr typename FixedArray<T, N, A>::size_type
+ FixedArray<T, N, A>::inline_elements;
} // namespace internal
} // namespace ceres
diff --git a/extern/ceres/internal/ceres/householder_vector.h b/extern/ceres/include/ceres/internal/householder_vector.h
index f54feea054d..55f68e526a0 100644
--- a/extern/ceres/internal/ceres/householder_vector.h
+++ b/extern/ceres/include/ceres/internal/householder_vector.h
@@ -28,8 +28,8 @@
//
// Author: vitus@google.com (Michael Vitus)
-#ifndef CERES_PUBLIC_HOUSEHOLDER_VECTOR_H_
-#define CERES_PUBLIC_HOUSEHOLDER_VECTOR_H_
+#ifndef CERES_PUBLIC_INTERNAL_HOUSEHOLDER_VECTOR_H_
+#define CERES_PUBLIC_INTERNAL_HOUSEHOLDER_VECTOR_H_
#include "Eigen/Core"
#include "glog/logging.h"
@@ -42,12 +42,15 @@ namespace internal {
// vector as pivot instead of first. This computes the vector v with v(n) = 1
// and beta such that H = I - beta * v * v^T is orthogonal and
// H * x = ||x||_2 * e_n.
-template <typename Scalar>
-void ComputeHouseholderVector(const Eigen::Matrix<Scalar, Eigen::Dynamic, 1>& x,
- Eigen::Matrix<Scalar, Eigen::Dynamic, 1>* v,
+//
+// NOTE: Some versions of MSVC have trouble deducing the type of v if
+// you do not specify all the template arguments explicitly.
+template <typename XVectorType, typename Scalar, int N>
+void ComputeHouseholderVector(const XVectorType& x,
+ Eigen::Matrix<Scalar, N, 1>* v,
Scalar* beta) {
- CHECK_NOTNULL(beta);
- CHECK_NOTNULL(v);
+ CHECK(beta != nullptr);
+ CHECK(v != nullptr);
CHECK_GT(x.rows(), 1);
CHECK_EQ(x.rows(), v->rows());
@@ -82,4 +85,4 @@ void ComputeHouseholderVector(const Eigen::Matrix<Scalar, Eigen::Dynamic, 1>& x,
} // namespace internal
} // namespace ceres
-#endif // CERES_PUBLIC_HOUSEHOLDER_VECTOR_H_
+#endif // CERES_PUBLIC_INTERNAL_HOUSEHOLDER_VECTOR_H_
diff --git a/extern/ceres/include/ceres/internal/integer_sequence_algorithm.h b/extern/ceres/include/ceres/internal/integer_sequence_algorithm.h
new file mode 100644
index 00000000000..170acac2832
--- /dev/null
+++ b/extern/ceres/include/ceres/internal/integer_sequence_algorithm.h
@@ -0,0 +1,165 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: jodebo_beck@gmx.de (Johannes Beck)
+//
+// Algorithms to be used together with integer_sequence, like computing the sum
+// or the exclusive scan (sometimes called exclusive prefix sum) at compile
+// time.
+
+#ifndef CERES_PUBLIC_INTERNAL_INTEGER_SEQUENCE_ALGORITHM_H_
+#define CERES_PUBLIC_INTERNAL_INTEGER_SEQUENCE_ALGORITHM_H_
+
+#include <utility>
+
+namespace ceres {
+namespace internal {
+
+// Implementation of calculating the sum of an integer sequence.
+// Recursively instantiate SumImpl and calculate the sum of the N first
+// numbers. This reduces the number of instantiations and speeds up
+// compilation.
+//
+// Examples:
+// 1) integer_sequence<int, 5>:
+// Value = 5
+//
+// 2) integer_sequence<int, 4, 2>:
+// Value = 4 + 2 + SumImpl<integer_sequence<int>>::Value
+// Value = 4 + 2 + 0
+//
+// 3) integer_sequence<int, 2, 1, 4>:
+// Value = 2 + 1 + SumImpl<integer_sequence<int, 4>>::Value
+// Value = 2 + 1 + 4
+template <typename Seq>
+struct SumImpl;
+
+// Strip of and sum the first number.
+template <typename T, T N, T... Ns>
+struct SumImpl<std::integer_sequence<T, N, Ns...>> {
+ static constexpr T Value = N + SumImpl<std::integer_sequence<T, Ns...>>::Value;
+};
+
+// Strip of and sum the first two numbers.
+template <typename T, T N1, T N2, T... Ns>
+struct SumImpl<std::integer_sequence<T, N1, N2, Ns...>> {
+ static constexpr T Value =
+ N1 + N2 + SumImpl<std::integer_sequence<T, Ns...>>::Value;
+};
+
+// Strip of and sum the first four numbers.
+template <typename T, T N1, T N2, T N3, T N4, T... Ns>
+struct SumImpl<std::integer_sequence<T, N1, N2, N3, N4, Ns...>> {
+ static constexpr T Value =
+ N1 + N2 + N3 + N4 + SumImpl<std::integer_sequence<T, Ns...>>::Value;
+};
+
+// Only one number is left. 'Value' is just that number ('recursion' ends).
+template <typename T, T N>
+struct SumImpl<std::integer_sequence<T, N>> {
+ static constexpr T Value = N;
+};
+
+// No number is left. 'Value' is the identity element (for sum this is zero).
+template <typename T>
+struct SumImpl<std::integer_sequence<T>> {
+ static constexpr T Value = T(0);
+};
+
+// Calculate the sum of an integer sequence. The resulting sum will be stored in
+// 'Value'.
+template <typename Seq>
+class Sum {
+ using T = typename Seq::value_type;
+
+ public:
+ static constexpr T Value = SumImpl<Seq>::Value;
+};
+
+// Implementation of calculating an exclusive scan (exclusive prefix sum) of an
+// integer sequence. Exclusive means that the i-th input element is not included
+// in the i-th sum. Calculating the exclusive scan for an input array I results
+// in the following output R:
+//
+// R[0] = 0
+// R[1] = I[0];
+// R[2] = I[0] + I[1];
+// R[3] = I[0] + I[1] + I[2];
+// ...
+//
+// In C++17 std::exclusive_scan does the same operation at runtime (but
+// cannot be used to calculate the prefix sum at compile time). See
+// https://en.cppreference.com/w/cpp/algorithm/exclusive_scan for a more
+// detailed description.
+//
+// Example for integer_sequence<int, 1, 4, 3> (seq := integer_sequence):
+// T , Sum, Ns... , Rs...
+// ExclusiveScanImpl<int, 0, seq<int, 1, 4, 3>, seq<int >>
+// ExclusiveScanImpl<int, 1, seq<int, 4, 3>, seq<int, 0 >>
+// ExclusiveScanImpl<int, 5, seq<int, 3>, seq<int, 0, 1 >>
+// ExclusiveScanImpl<int, 8, seq<int >, seq<int, 0, 1, 5>>
+// ^^^^^^^^^^^^^^^^^
+// resulting sequence
+template <typename T, T Sum, typename SeqIn, typename SeqOut>
+struct ExclusiveScanImpl;
+
+template <typename T, T Sum, T N, T... Ns, T... Rs>
+struct ExclusiveScanImpl<T, Sum, std::integer_sequence<T, N, Ns...>,
+ std::integer_sequence<T, Rs...>> {
+ using Type =
+ typename ExclusiveScanImpl<T, Sum + N, std::integer_sequence<T, Ns...>,
+ std::integer_sequence<T, Rs..., Sum>>::Type;
+};
+
+// End of 'recursion'. The resulting type is SeqOut.
+template <typename T, T Sum, typename SeqOut>
+struct ExclusiveScanImpl<T, Sum, std::integer_sequence<T>, SeqOut> {
+ using Type = SeqOut;
+};
+
+// Calculates the exclusive scan of the specified integer sequence. The last
+// element (the total) is not included in the resulting sequence so they have
+// same length. This means the exclusive scan of integer_sequence<int, 1, 2, 3>
+// will be integer_sequence<int, 0, 1, 3>.
+template <typename Seq>
+class ExclusiveScanT {
+ using T = typename Seq::value_type;
+
+ public:
+ using Type =
+ typename ExclusiveScanImpl<T, T(0), Seq, std::integer_sequence<T>>::Type;
+};
+
+// Helper to use exclusive scan without typename.
+template <typename Seq>
+using ExclusiveScan = typename ExclusiveScanT<Seq>::Type;
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_PUBLIC_INTERNAL_INTEGER_SEQUENCE_ALGORITHM_H_
diff --git a/extern/ceres/include/ceres/internal/line_parameterization.h b/extern/ceres/include/ceres/internal/line_parameterization.h
new file mode 100644
index 00000000000..eda390148df
--- /dev/null
+++ b/extern/ceres/include/ceres/internal/line_parameterization.h
@@ -0,0 +1,183 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2020 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: jodebo_beck@gmx.de (Johannes Beck)
+//
+
+#ifndef CERES_PUBLIC_INTERNAL_LINE_PARAMETERIZATION_H_
+#define CERES_PUBLIC_INTERNAL_LINE_PARAMETERIZATION_H_
+
+#include "householder_vector.h"
+
+namespace ceres {
+
+template <int AmbientSpaceDimension>
+bool LineParameterization<AmbientSpaceDimension>::Plus(
+ const double* x_ptr,
+ const double* delta_ptr,
+ double* x_plus_delta_ptr) const {
+ // We seek a box plus operator of the form
+ //
+ // [o*, d*] = Plus([o, d], [delta_o, delta_d])
+ //
+ // where o is the origin point, d is the direction vector, delta_o is
+ // the delta of the origin point and delta_d the delta of the direction and
+ // o* and d* is the updated origin point and direction.
+ //
+ // We separate the Plus operator into the origin point and directional part
+ // d* = Plus_d(d, delta_d)
+ // o* = Plus_o(o, d, delta_o)
+ //
+ // The direction update function Plus_d is the same as for the homogeneous
+ // vector parameterization:
+ //
+ // d* = H_{v(d)} [0.5 sinc(0.5 |delta_d|) delta_d, cos(0.5 |delta_d|)]^T
+ //
+ // where H is the householder matrix
+ // H_{v} = I - (2 / |v|^2) v v^T
+ // and
+ // v(d) = d - sign(d_n) |d| e_n.
+ //
+ // The origin point update function Plus_o is defined as
+ //
+ // o* = o + H_{v(d)} [0.5 delta_o, 0]^T.
+
+ static constexpr int kDim = AmbientSpaceDimension;
+ using AmbientVector = Eigen::Matrix<double, kDim, 1>;
+ using AmbientVectorRef = Eigen::Map<Eigen::Matrix<double, kDim, 1>>;
+ using ConstAmbientVectorRef =
+ Eigen::Map<const Eigen::Matrix<double, kDim, 1>>;
+ using ConstTangentVectorRef =
+ Eigen::Map<const Eigen::Matrix<double, kDim - 1, 1>>;
+
+ ConstAmbientVectorRef o(x_ptr);
+ ConstAmbientVectorRef d(x_ptr + kDim);
+
+ ConstTangentVectorRef delta_o(delta_ptr);
+ ConstTangentVectorRef delta_d(delta_ptr + kDim - 1);
+ AmbientVectorRef o_plus_delta(x_plus_delta_ptr);
+ AmbientVectorRef d_plus_delta(x_plus_delta_ptr + kDim);
+
+ const double norm_delta_d = delta_d.norm();
+
+ o_plus_delta = o;
+
+ // Shortcut for zero delta direction.
+ if (norm_delta_d == 0.0) {
+ d_plus_delta = d;
+
+ if (delta_o.isZero(0.0)) {
+ return true;
+ }
+ }
+
+ // Calculate the householder transformation which is needed for f_d and f_o.
+ AmbientVector v;
+ double beta;
+
+ // NOTE: The explicit template arguments are needed here because
+ // ComputeHouseholderVector is templated and some versions of MSVC
+ // have trouble deducing the type of v automatically.
+ internal::ComputeHouseholderVector<ConstAmbientVectorRef, double, kDim>(
+ d, &v, &beta);
+
+ if (norm_delta_d != 0.0) {
+ // Map the delta from the minimum representation to the over parameterized
+ // homogeneous vector. See section A6.9.2 on page 624 of Hartley & Zisserman
+ // (2nd Edition) for a detailed description. Note there is a typo on Page
+ // 625, line 4 so check the book errata.
+ const double norm_delta_div_2 = 0.5 * norm_delta_d;
+ const double sin_delta_by_delta =
+ std::sin(norm_delta_div_2) / norm_delta_div_2;
+
+ // Apply the delta update to remain on the unit sphere. See section A6.9.3
+ // on page 625 of Hartley & Zisserman (2nd Edition) for a detailed
+ // description.
+ AmbientVector y;
+ y.template head<kDim - 1>() = 0.5 * sin_delta_by_delta * delta_d;
+ y[kDim - 1] = std::cos(norm_delta_div_2);
+
+ d_plus_delta = d.norm() * (y - v * (beta * (v.transpose() * y)));
+ }
+
+ // The null space is in the direction of the line, so the tangent space is
+ // perpendicular to the line direction. This is achieved by using the
+ // householder matrix of the direction and allow only movements
+ // perpendicular to e_n.
+ //
+ // The factor of 0.5 is used to be consistent with the line direction
+ // update.
+ AmbientVector y;
+ y << 0.5 * delta_o, 0;
+ o_plus_delta += y - v * (beta * (v.transpose() * y));
+
+ return true;
+}
+
+template <int AmbientSpaceDimension>
+bool LineParameterization<AmbientSpaceDimension>::ComputeJacobian(
+ const double* x_ptr, double* jacobian_ptr) const {
+ static constexpr int kDim = AmbientSpaceDimension;
+ using AmbientVector = Eigen::Matrix<double, kDim, 1>;
+ using ConstAmbientVectorRef =
+ Eigen::Map<const Eigen::Matrix<double, kDim, 1>>;
+ using MatrixRef = Eigen::Map<
+ Eigen::Matrix<double, 2 * kDim, 2 * (kDim - 1), Eigen::RowMajor>>;
+
+ ConstAmbientVectorRef d(x_ptr + kDim);
+ MatrixRef jacobian(jacobian_ptr);
+
+ // Clear the Jacobian as only half of the matrix is not zero.
+ jacobian.setZero();
+
+ AmbientVector v;
+ double beta;
+
+ // NOTE: The explicit template arguments are needed here because
+ // ComputeHouseholderVector is templated and some versions of MSVC
+ // have trouble deducing the type of v automatically.
+ internal::ComputeHouseholderVector<ConstAmbientVectorRef, double, kDim>(
+ d, &v, &beta);
+
+ // The Jacobian is equal to J = 0.5 * H.leftCols(kDim - 1) where H is
+ // the Householder matrix (H = I - beta * v * v') for the origin point. For
+ // the line direction part the Jacobian is scaled by the norm of the
+ // direction.
+ for (int i = 0; i < kDim - 1; ++i) {
+ jacobian.block(0, i, kDim, 1) = -0.5 * beta * v(i) * v;
+ jacobian.col(i)(i) += 0.5;
+ }
+
+ jacobian.template block<kDim, kDim - 1>(kDim, kDim - 1) =
+ jacobian.template block<kDim, kDim - 1>(0, 0) * d.norm();
+ return true;
+}
+
+} // namespace ceres
+
+#endif // CERES_PUBLIC_INTERNAL_LINE_PARAMETERIZATION_H_
diff --git a/extern/ceres/include/ceres/internal/macros.h b/extern/ceres/include/ceres/internal/macros.h
deleted file mode 100644
index bebb965e25b..00000000000
--- a/extern/ceres/include/ceres/internal/macros.h
+++ /dev/null
@@ -1,170 +0,0 @@
-// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
-// http://ceres-solver.org/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// * Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-// * Neither the name of Google Inc. nor the names of its contributors may be
-// used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-// POSSIBILITY OF SUCH DAMAGE.
-//
-//
-// Various Google-specific macros.
-//
-// This code is compiled directly on many platforms, including client
-// platforms like Windows, Mac, and embedded systems. Before making
-// any changes here, make sure that you're not breaking any platforms.
-
-#ifndef CERES_PUBLIC_INTERNAL_MACROS_H_
-#define CERES_PUBLIC_INTERNAL_MACROS_H_
-
-#include <cstddef> // For size_t.
-
-// A macro to disallow the copy constructor and operator= functions
-// This should be used in the private: declarations for a class
-//
-// For disallowing only assign or copy, write the code directly, but declare
-// the intend in a comment, for example:
-//
-// void operator=(const TypeName&); // _DISALLOW_ASSIGN
-
-// Note, that most uses of CERES_DISALLOW_ASSIGN and CERES_DISALLOW_COPY
-// are broken semantically, one should either use disallow both or
-// neither. Try to avoid these in new code.
-#define CERES_DISALLOW_COPY_AND_ASSIGN(TypeName) \
- TypeName(const TypeName&); \
- void operator=(const TypeName&)
-
-// A macro to disallow all the implicit constructors, namely the
-// default constructor, copy constructor and operator= functions.
-//
-// This should be used in the private: declarations for a class
-// that wants to prevent anyone from instantiating it. This is
-// especially useful for classes containing only static methods.
-#define CERES_DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
- TypeName(); \
- CERES_DISALLOW_COPY_AND_ASSIGN(TypeName)
-
-// The arraysize(arr) macro returns the # of elements in an array arr.
-// The expression is a compile-time constant, and therefore can be
-// used in defining new arrays, for example. If you use arraysize on
-// a pointer by mistake, you will get a compile-time error.
-//
-// One caveat is that arraysize() doesn't accept any array of an
-// anonymous type or a type defined inside a function. In these rare
-// cases, you have to use the unsafe ARRAYSIZE() macro below. This is
-// due to a limitation in C++'s template system. The limitation might
-// eventually be removed, but it hasn't happened yet.
-
-// This template function declaration is used in defining arraysize.
-// Note that the function doesn't need an implementation, as we only
-// use its type.
-template <typename T, size_t N>
-char (&ArraySizeHelper(T (&array)[N]))[N];
-
-// That gcc wants both of these prototypes seems mysterious. VC, for
-// its part, can't decide which to use (another mystery). Matching of
-// template overloads: the final frontier.
-#ifndef _WIN32
-template <typename T, size_t N>
-char (&ArraySizeHelper(const T (&array)[N]))[N];
-#endif
-
-#define arraysize(array) (sizeof(ArraySizeHelper(array)))
-
-// ARRAYSIZE performs essentially the same calculation as arraysize,
-// but can be used on anonymous types or types defined inside
-// functions. It's less safe than arraysize as it accepts some
-// (although not all) pointers. Therefore, you should use arraysize
-// whenever possible.
-//
-// The expression ARRAYSIZE(a) is a compile-time constant of type
-// size_t.
-//
-// ARRAYSIZE catches a few type errors. If you see a compiler error
-//
-// "warning: division by zero in ..."
-//
-// when using ARRAYSIZE, you are (wrongfully) giving it a pointer.
-// You should only use ARRAYSIZE on statically allocated arrays.
-//
-// The following comments are on the implementation details, and can
-// be ignored by the users.
-//
-// ARRAYSIZE(arr) works by inspecting sizeof(arr) (the # of bytes in
-// the array) and sizeof(*(arr)) (the # of bytes in one array
-// element). If the former is divisible by the latter, perhaps arr is
-// indeed an array, in which case the division result is the # of
-// elements in the array. Otherwise, arr cannot possibly be an array,
-// and we generate a compiler error to prevent the code from
-// compiling.
-//
-// Since the size of bool is implementation-defined, we need to cast
-// !(sizeof(a) & sizeof(*(a))) to size_t in order to ensure the final
-// result has type size_t.
-//
-// This macro is not perfect as it wrongfully accepts certain
-// pointers, namely where the pointer size is divisible by the pointee
-// size. Since all our code has to go through a 32-bit compiler,
-// where a pointer is 4 bytes, this means all pointers to a type whose
-// size is 3 or greater than 4 will be (righteously) rejected.
-//
-// Kudos to Jorg Brown for this simple and elegant implementation.
-//
-// - wan 2005-11-16
-//
-// Starting with Visual C++ 2005, WinNT.h includes ARRAYSIZE. However,
-// the definition comes from the over-broad windows.h header that
-// introduces a macro, ERROR, that conflicts with the logging framework
-// that Ceres uses. Instead, rename ARRAYSIZE to CERES_ARRAYSIZE.
-#define CERES_ARRAYSIZE(a) \
- ((sizeof(a) / sizeof(*(a))) / \
- static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))
-
-// Tell the compiler to warn about unused return values for functions
-// declared with this macro. The macro should be used on function
-// declarations following the argument list:
-//
-// Sprocket* AllocateSprocket() MUST_USE_RESULT;
-//
-#if (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) \
- && !defined(COMPILER_ICC)
-#define CERES_MUST_USE_RESULT __attribute__ ((warn_unused_result))
-#else
-#define CERES_MUST_USE_RESULT
-#endif
-
-// Platform independent macros to get aligned memory allocations.
-// For example
-//
-// MyFoo my_foo CERES_ALIGN_ATTRIBUTE(16);
-//
-// Gives us an instance of MyFoo which is aligned at a 16 byte
-// boundary.
-#if defined(_MSC_VER)
-#define CERES_ALIGN_ATTRIBUTE(n) __declspec(align(n))
-#define CERES_ALIGN_OF(T) __alignof(T)
-#elif defined(__GNUC__)
-#define CERES_ALIGN_ATTRIBUTE(n) __attribute__((aligned(n)))
-#define CERES_ALIGN_OF(T) __alignof(T)
-#endif
-
-#endif // CERES_PUBLIC_INTERNAL_MACROS_H_
diff --git a/extern/ceres/include/ceres/internal/manual_constructor.h b/extern/ceres/include/ceres/internal/manual_constructor.h
deleted file mode 100644
index 0d7633cef83..00000000000
--- a/extern/ceres/include/ceres/internal/manual_constructor.h
+++ /dev/null
@@ -1,208 +0,0 @@
-// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
-// http://ceres-solver.org/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// * Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-// * Neither the name of Google Inc. nor the names of its contributors may be
-// used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-// POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: kenton@google.com (Kenton Varda)
-//
-// ManualConstructor statically-allocates space in which to store some
-// object, but does not initialize it. You can then call the constructor
-// and destructor for the object yourself as you see fit. This is useful
-// for memory management optimizations, where you want to initialize and
-// destroy an object multiple times but only allocate it once.
-//
-// (When I say ManualConstructor statically allocates space, I mean that
-// the ManualConstructor object itself is forced to be the right size.)
-
-#ifndef CERES_PUBLIC_INTERNAL_MANUAL_CONSTRUCTOR_H_
-#define CERES_PUBLIC_INTERNAL_MANUAL_CONSTRUCTOR_H_
-
-#include <new>
-
-namespace ceres {
-namespace internal {
-
-// ------- Define CERES_ALIGNED_CHAR_ARRAY --------------------------------
-
-#ifndef CERES_ALIGNED_CHAR_ARRAY
-
-// Because MSVC and older GCCs require that the argument to their alignment
-// construct to be a literal constant integer, we use a template instantiated
-// at all the possible powers of two.
-template<int alignment, int size> struct AlignType { };
-template<int size> struct AlignType<0, size> { typedef char result[size]; };
-
-#if !defined(CERES_ALIGN_ATTRIBUTE)
-#define CERES_ALIGNED_CHAR_ARRAY you_must_define_CERES_ALIGNED_CHAR_ARRAY_for_your_compiler
-#else // !defined(CERES_ALIGN_ATTRIBUTE)
-
-#define CERES_ALIGN_TYPE_TEMPLATE(X) \
- template<int size> struct AlignType<X, size> { \
- typedef CERES_ALIGN_ATTRIBUTE(X) char result[size]; \
- }
-
-CERES_ALIGN_TYPE_TEMPLATE(1);
-CERES_ALIGN_TYPE_TEMPLATE(2);
-CERES_ALIGN_TYPE_TEMPLATE(4);
-CERES_ALIGN_TYPE_TEMPLATE(8);
-CERES_ALIGN_TYPE_TEMPLATE(16);
-CERES_ALIGN_TYPE_TEMPLATE(32);
-CERES_ALIGN_TYPE_TEMPLATE(64);
-CERES_ALIGN_TYPE_TEMPLATE(128);
-CERES_ALIGN_TYPE_TEMPLATE(256);
-CERES_ALIGN_TYPE_TEMPLATE(512);
-CERES_ALIGN_TYPE_TEMPLATE(1024);
-CERES_ALIGN_TYPE_TEMPLATE(2048);
-CERES_ALIGN_TYPE_TEMPLATE(4096);
-CERES_ALIGN_TYPE_TEMPLATE(8192);
-// Any larger and MSVC++ will complain.
-
-#undef CERES_ALIGN_TYPE_TEMPLATE
-
-#define CERES_ALIGNED_CHAR_ARRAY(T, Size) \
- typename AlignType<CERES_ALIGN_OF(T), sizeof(T) * Size>::result
-
-#endif // !defined(CERES_ALIGN_ATTRIBUTE)
-
-#endif // CERES_ALIGNED_CHAR_ARRAY
-
-template <typename Type>
-class ManualConstructor {
- public:
- // No constructor or destructor because one of the most useful uses of
- // this class is as part of a union, and members of a union cannot have
- // constructors or destructors. And, anyway, the whole point of this
- // class is to bypass these.
-
- inline Type* get() {
- return reinterpret_cast<Type*>(space_);
- }
- inline const Type* get() const {
- return reinterpret_cast<const Type*>(space_);
- }
-
- inline Type* operator->() { return get(); }
- inline const Type* operator->() const { return get(); }
-
- inline Type& operator*() { return *get(); }
- inline const Type& operator*() const { return *get(); }
-
- // This is needed to get around the strict aliasing warning GCC generates.
- inline void* space() {
- return reinterpret_cast<void*>(space_);
- }
-
- // You can pass up to four constructor arguments as arguments of Init().
- inline void Init() {
- new(space()) Type;
- }
-
- template <typename T1>
- inline void Init(const T1& p1) {
- new(space()) Type(p1);
- }
-
- template <typename T1, typename T2>
- inline void Init(const T1& p1, const T2& p2) {
- new(space()) Type(p1, p2);
- }
-
- template <typename T1, typename T2, typename T3>
- inline void Init(const T1& p1, const T2& p2, const T3& p3) {
- new(space()) Type(p1, p2, p3);
- }
-
- template <typename T1, typename T2, typename T3, typename T4>
- inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4) {
- new(space()) Type(p1, p2, p3, p4);
- }
-
- template <typename T1, typename T2, typename T3, typename T4, typename T5>
- inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
- const T5& p5) {
- new(space()) Type(p1, p2, p3, p4, p5);
- }
-
- template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6>
- inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
- const T5& p5, const T6& p6) {
- new(space()) Type(p1, p2, p3, p4, p5, p6);
- }
-
- template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7>
- inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
- const T5& p5, const T6& p6, const T7& p7) {
- new(space()) Type(p1, p2, p3, p4, p5, p6, p7);
- }
-
- template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8>
- inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
- const T5& p5, const T6& p6, const T7& p7, const T8& p8) {
- new(space()) Type(p1, p2, p3, p4, p5, p6, p7, p8);
- }
-
- template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9>
- inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
- const T5& p5, const T6& p6, const T7& p7, const T8& p8,
- const T9& p9) {
- new(space()) Type(p1, p2, p3, p4, p5, p6, p7, p8, p9);
- }
-
- template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10>
- inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
- const T5& p5, const T6& p6, const T7& p7, const T8& p8,
- const T9& p9, const T10& p10) {
- new(space()) Type(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10);
- }
-
- template <typename T1, typename T2, typename T3, typename T4, typename T5,
- typename T6, typename T7, typename T8, typename T9, typename T10,
- typename T11>
- inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4,
- const T5& p5, const T6& p6, const T7& p7, const T8& p8,
- const T9& p9, const T10& p10, const T11& p11) {
- new(space()) Type(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11);
- }
-
- inline void Destroy() {
- get()->~Type();
- }
-
- private:
- CERES_ALIGNED_CHAR_ARRAY(Type, 1) space_;
-};
-
-#undef CERES_ALIGNED_CHAR_ARRAY
-
-} // namespace internal
-} // namespace ceres
-
-#endif // CERES_PUBLIC_INTERNAL_MANUAL_CONSTRUCTOR_H_
diff --git a/extern/ceres/include/ceres/internal/memory.h b/extern/ceres/include/ceres/internal/memory.h
new file mode 100644
index 00000000000..45c5b67c353
--- /dev/null
+++ b/extern/ceres/include/ceres/internal/memory.h
@@ -0,0 +1,90 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: memory.h
+// -----------------------------------------------------------------------------
+//
+// This header file contains utility functions for managing the creation and
+// conversion of smart pointers. This file is an extension to the C++
+// standard <memory> library header file.
+
+#ifndef CERES_PUBLIC_INTERNAL_MEMORY_H_
+#define CERES_PUBLIC_INTERNAL_MEMORY_H_
+
+#include <memory>
+
+#ifdef CERES_HAVE_EXCEPTIONS
+#define CERES_INTERNAL_TRY try
+#define CERES_INTERNAL_CATCH_ANY catch (...)
+#define CERES_INTERNAL_RETHROW \
+ do { \
+ throw; \
+ } while (false)
+#else // CERES_HAVE_EXCEPTIONS
+#define CERES_INTERNAL_TRY if (true)
+#define CERES_INTERNAL_CATCH_ANY else if (false)
+#define CERES_INTERNAL_RETHROW \
+ do { \
+ } while (false)
+#endif // CERES_HAVE_EXCEPTIONS
+
+namespace ceres {
+namespace internal {
+
+template <typename Allocator, typename Iterator, typename... Args>
+void ConstructRange(Allocator& alloc,
+ Iterator first,
+ Iterator last,
+ const Args&... args) {
+ for (Iterator cur = first; cur != last; ++cur) {
+ CERES_INTERNAL_TRY {
+ std::allocator_traits<Allocator>::construct(
+ alloc, std::addressof(*cur), args...);
+ }
+ CERES_INTERNAL_CATCH_ANY {
+ while (cur != first) {
+ --cur;
+ std::allocator_traits<Allocator>::destroy(alloc, std::addressof(*cur));
+ }
+ CERES_INTERNAL_RETHROW;
+ }
+ }
+}
+
+template <typename Allocator, typename Iterator, typename InputIterator>
+void CopyRange(Allocator& alloc,
+ Iterator destination,
+ InputIterator first,
+ InputIterator last) {
+ for (Iterator cur = destination; first != last;
+ static_cast<void>(++cur), static_cast<void>(++first)) {
+ CERES_INTERNAL_TRY {
+ std::allocator_traits<Allocator>::construct(
+ alloc, std::addressof(*cur), *first);
+ }
+ CERES_INTERNAL_CATCH_ANY {
+ while (cur != destination) {
+ --cur;
+ std::allocator_traits<Allocator>::destroy(alloc, std::addressof(*cur));
+ }
+ CERES_INTERNAL_RETHROW;
+ }
+ }
+}
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_PUBLIC_INTERNAL_MEMORY_H_
diff --git a/extern/ceres/include/ceres/internal/numeric_diff.h b/extern/ceres/include/ceres/internal/numeric_diff.h
index 11e8275b1d3..fb2e00baca5 100644
--- a/extern/ceres/include/ceres/internal/numeric_diff.h
+++ b/extern/ceres/include/ceres/internal/numeric_diff.h
@@ -36,12 +36,12 @@
#define CERES_PUBLIC_INTERNAL_NUMERIC_DIFF_H_
#include <cstring>
+#include <utility>
#include "Eigen/Dense"
#include "Eigen/StdVector"
#include "ceres/cost_function.h"
#include "ceres/internal/fixed_array.h"
-#include "ceres/internal/scoped_ptr.h"
#include "ceres/internal/variadic_evaluate.h"
#include "ceres/numeric_diff_options.h"
#include "ceres/types.h"
@@ -51,42 +51,11 @@
namespace ceres {
namespace internal {
-// Helper templates that allow evaluation of a variadic functor or a
-// CostFunction object.
-template <typename CostFunctor,
- int N0, int N1, int N2, int N3, int N4,
- int N5, int N6, int N7, int N8, int N9 >
-bool EvaluateImpl(const CostFunctor* functor,
- double const* const* parameters,
- double* residuals,
- const void* /* NOT USED */) {
- return VariadicEvaluate<CostFunctor,
- double,
- N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>::Call(
- *functor,
- parameters,
- residuals);
-}
-
-template <typename CostFunctor,
- int N0, int N1, int N2, int N3, int N4,
- int N5, int N6, int N7, int N8, int N9 >
-bool EvaluateImpl(const CostFunctor* functor,
- double const* const* parameters,
- double* residuals,
- const CostFunction* /* NOT USED */) {
- return functor->Evaluate(parameters, residuals, NULL);
-}
-
// This is split from the main class because C++ doesn't allow partial template
// specializations for member functions. The alternative is to repeat the main
// class for differing numbers of parameters, which is also unfortunate.
-template <typename CostFunctor,
- NumericDiffMethodType kMethod,
- int kNumResiduals,
- int N0, int N1, int N2, int N3, int N4,
- int N5, int N6, int N7, int N8, int N9,
- int kParameterBlock,
+template <typename CostFunctor, NumericDiffMethodType kMethod,
+ int kNumResiduals, typename ParameterDims, int kParameterBlock,
int kParameterBlockSize>
struct NumericDiff {
// Mutates parameters but must restore them before return.
@@ -104,6 +73,8 @@ struct NumericDiff {
using Eigen::RowMajor;
using Eigen::ColMajor;
+ DCHECK(jacobian);
+
const int num_residuals_internal =
(kNumResiduals != ceres::DYNAMIC ? kNumResiduals : num_residuals);
const int parameter_block_index_internal =
@@ -155,7 +126,7 @@ struct NumericDiff {
// compute the derivative for that parameter.
FixedArray<double> temp_residual_array(num_residuals_internal);
FixedArray<double> residual_array(num_residuals_internal);
- Map<ResidualVector> residuals(residual_array.get(),
+ Map<ResidualVector> residuals(residual_array.data(),
num_residuals_internal);
for (int j = 0; j < parameter_block_size_internal; ++j) {
@@ -170,8 +141,8 @@ struct NumericDiff {
residuals_at_eval_point,
parameters,
x_plus_delta.data(),
- temp_residual_array.get(),
- residual_array.get())) {
+ temp_residual_array.data(),
+ residual_array.data())) {
return false;
}
} else {
@@ -182,8 +153,8 @@ struct NumericDiff {
residuals_at_eval_point,
parameters,
x_plus_delta.data(),
- temp_residual_array.get(),
- residual_array.get())) {
+ temp_residual_array.data(),
+ residual_array.data())) {
return false;
}
}
@@ -220,8 +191,9 @@ struct NumericDiff {
// Mutate 1 element at a time and then restore.
x_plus_delta(parameter_index) = x(parameter_index) + delta;
- if (!EvaluateImpl<CostFunctor, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>(
- functor, parameters, residuals.data(), functor)) {
+ if (!VariadicEvaluate<ParameterDims>(*functor,
+ parameters,
+ residuals.data())) {
return false;
}
@@ -234,8 +206,9 @@ struct NumericDiff {
// Compute the function on the other side of x(parameter_index).
x_plus_delta(parameter_index) = x(parameter_index) - delta;
- if (!EvaluateImpl<CostFunctor, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>(
- functor, parameters, temp_residuals.data(), functor)) {
+ if (!VariadicEvaluate<ParameterDims>(*functor,
+ parameters,
+ temp_residuals.data())) {
return false;
}
@@ -407,35 +380,116 @@ struct NumericDiff {
}
};
-template <typename CostFunctor,
- NumericDiffMethodType kMethod,
- int kNumResiduals,
- int N0, int N1, int N2, int N3, int N4,
- int N5, int N6, int N7, int N8, int N9,
- int kParameterBlock>
-struct NumericDiff<CostFunctor, kMethod, kNumResiduals,
- N0, N1, N2, N3, N4, N5, N6, N7, N8, N9,
- kParameterBlock, 0> {
- // Mutates parameters but must restore them before return.
- static bool EvaluateJacobianForParameterBlock(
- const CostFunctor* functor,
- const double* residuals_at_eval_point,
- const NumericDiffOptions& options,
- const int num_residuals,
- const int parameter_block_index,
- const int parameter_block_size,
- double **parameters,
- double *jacobian) {
- // Silence unused parameter compiler warnings.
- (void)functor;
- (void)residuals_at_eval_point;
- (void)options;
- (void)num_residuals;
- (void)parameter_block_index;
- (void)parameter_block_size;
- (void)parameters;
- (void)jacobian;
- LOG(FATAL) << "Control should never reach here.";
+// This function calls NumericDiff<...>::EvaluateJacobianForParameterBlock for
+// each parameter block.
+//
+// Example:
+// A call to
+// EvaluateJacobianForParameterBlocks<StaticParameterDims<2, 3>>(
+// functor,
+// residuals_at_eval_point,
+// options,
+// num_residuals,
+// parameters,
+// jacobians);
+// will result in the following calls to
+// NumericDiff<...>::EvaluateJacobianForParameterBlock:
+//
+// if (jacobians[0] != nullptr) {
+// if (!NumericDiff<
+// CostFunctor,
+// method,
+// kNumResiduals,
+// StaticParameterDims<2, 3>,
+// 0,
+// 2>::EvaluateJacobianForParameterBlock(functor,
+// residuals_at_eval_point,
+// options,
+// num_residuals,
+// 0,
+// 2,
+// parameters,
+// jacobians[0])) {
+// return false;
+// }
+// }
+// if (jacobians[1] != nullptr) {
+// if (!NumericDiff<
+// CostFunctor,
+// method,
+// kNumResiduals,
+// StaticParameterDims<2, 3>,
+// 1,
+// 3>::EvaluateJacobianForParameterBlock(functor,
+// residuals_at_eval_point,
+// options,
+// num_residuals,
+// 1,
+// 3,
+// parameters,
+// jacobians[1])) {
+// return false;
+// }
+// }
+template <typename ParameterDims,
+ typename Parameters = typename ParameterDims::Parameters,
+ int ParameterIdx = 0>
+struct EvaluateJacobianForParameterBlocks;
+
+template <typename ParameterDims, int N, int... Ns, int ParameterIdx>
+struct EvaluateJacobianForParameterBlocks<ParameterDims,
+ std::integer_sequence<int, N, Ns...>,
+ ParameterIdx> {
+ template <NumericDiffMethodType method,
+ int kNumResiduals,
+ typename CostFunctor>
+ static bool Apply(const CostFunctor* functor,
+ const double* residuals_at_eval_point,
+ const NumericDiffOptions& options,
+ int num_residuals,
+ double** parameters,
+ double** jacobians) {
+ if (jacobians[ParameterIdx] != nullptr) {
+ if (!NumericDiff<
+ CostFunctor,
+ method,
+ kNumResiduals,
+ ParameterDims,
+ ParameterIdx,
+ N>::EvaluateJacobianForParameterBlock(functor,
+ residuals_at_eval_point,
+ options,
+ num_residuals,
+ ParameterIdx,
+ N,
+ parameters,
+ jacobians[ParameterIdx])) {
+ return false;
+ }
+ }
+
+ return EvaluateJacobianForParameterBlocks<ParameterDims,
+ std::integer_sequence<int, Ns...>,
+ ParameterIdx + 1>::
+ template Apply<method, kNumResiduals>(functor,
+ residuals_at_eval_point,
+ options,
+ num_residuals,
+ parameters,
+ jacobians);
+ }
+};
+
+// End of 'recursion'. Nothing more to do.
+template <typename ParameterDims, int ParameterIdx>
+struct EvaluateJacobianForParameterBlocks<ParameterDims, std::integer_sequence<int>,
+ ParameterIdx> {
+ template <NumericDiffMethodType method, int kNumResiduals,
+ typename CostFunctor>
+ static bool Apply(const CostFunctor* /* NOT USED*/,
+ const double* /* NOT USED*/,
+ const NumericDiffOptions& /* NOT USED*/, int /* NOT USED*/,
+ double** /* NOT USED*/, double** /* NOT USED*/) {
return true;
}
};
diff --git a/extern/ceres/include/ceres/internal/parameter_dims.h b/extern/ceres/include/ceres/internal/parameter_dims.h
new file mode 100644
index 00000000000..24021061416
--- /dev/null
+++ b/extern/ceres/include/ceres/internal/parameter_dims.h
@@ -0,0 +1,124 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: jodebo_beck@gmx.de (Johannes Beck)
+
+#ifndef CERES_PUBLIC_INTERNAL_PARAMETER_DIMS_H_
+#define CERES_PUBLIC_INTERNAL_PARAMETER_DIMS_H_
+
+#include <array>
+#include <utility>
+
+#include "ceres/internal/integer_sequence_algorithm.h"
+
+namespace ceres {
+namespace internal {
+
+// Checks, whether the given parameter block sizes are valid. Valid means every
+// dimension is bigger than zero.
+constexpr bool IsValidParameterDimensionSequence(std::integer_sequence<int>) {
+ return true;
+}
+
+template <int N, int... Ts>
+constexpr bool IsValidParameterDimensionSequence(
+ std::integer_sequence<int, N, Ts...>) {
+ return (N <= 0) ? false
+ : IsValidParameterDimensionSequence(
+ std::integer_sequence<int, Ts...>());
+}
+
+// Helper class that represents the parameter dimensions. The parameter
+// dimensions are either dynamic or the sizes are known at compile time. It is
+// used to pass parameter block dimensions around (e.g. between functions or
+// classes).
+//
+// As an example if one have three parameter blocks with dimensions (2, 4, 1),
+// one would use 'StaticParameterDims<2, 4, 1>' which is a synonym for
+// 'ParameterDims<false, 2, 4, 1>'.
+// For dynamic parameter dims, one would just use 'DynamicParameterDims', which
+// is a synonym for 'ParameterDims<true>'.
+template <bool IsDynamic, int... Ns>
+class ParameterDims {
+ public:
+ using Parameters = std::integer_sequence<int, Ns...>;
+
+ // The parameter dimensions are only valid if all parameter block dimensions
+ // are greater than zero.
+ static constexpr bool kIsValid =
+ IsValidParameterDimensionSequence(Parameters());
+ static_assert(kIsValid,
+ "Invalid parameter block dimension detected. Each parameter "
+ "block dimension must be bigger than zero.");
+
+ static constexpr bool kIsDynamic = IsDynamic;
+ static constexpr int kNumParameterBlocks = sizeof...(Ns);
+ static_assert(kIsDynamic || kNumParameterBlocks > 0,
+ "At least one parameter block must be specified.");
+
+ static constexpr int kNumParameters =
+ Sum<std::integer_sequence<int, Ns...>>::Value;
+
+ static constexpr int GetDim(int dim) { return params_[dim]; }
+
+ // If one has all parameters packed into a single array this function unpacks
+ // the parameters.
+ template <typename T>
+ static inline std::array<T*, kNumParameterBlocks> GetUnpackedParameters(
+ T* ptr) {
+ using Offsets = ExclusiveScan<Parameters>;
+ return GetUnpackedParameters(ptr, Offsets());
+ }
+
+ private:
+ template <typename T, int... Indices>
+ static inline std::array<T*, kNumParameterBlocks> GetUnpackedParameters(
+ T* ptr, std::integer_sequence<int, Indices...>) {
+ return std::array<T*, kNumParameterBlocks>{{ptr + Indices...}};
+ }
+
+ static constexpr std::array<int, kNumParameterBlocks> params_{Ns...};
+};
+
+// Even static constexpr member variables needs to be defined (not only
+// declared). As the ParameterDims class is tempalted this definition must
+// be in the header file.
+template <bool IsDynamic, int... Ns>
+constexpr std::array<int, ParameterDims<IsDynamic, Ns...>::kNumParameterBlocks>
+ ParameterDims<IsDynamic, Ns...>::params_;
+
+// Using declarations for static and dynamic parameter dims. This makes client
+// code easier to read.
+template <int... Ns>
+using StaticParameterDims = ParameterDims<false, Ns...>;
+using DynamicParameterDims = ParameterDims<true>;
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_PUBLIC_INTERNAL_PARAMETER_DIMS_H_
diff --git a/extern/ceres/include/ceres/internal/port.h b/extern/ceres/include/ceres/internal/port.h
index f4dcaee7bd8..958b0d15cb7 100644
--- a/extern/ceres/include/ceres/internal/port.h
+++ b/extern/ceres/include/ceres/internal/port.h
@@ -32,45 +32,41 @@
#define CERES_PUBLIC_INTERNAL_PORT_H_
// This file needs to compile as c code.
-#ifdef __cplusplus
-#include <cstddef>
#include "ceres/internal/config.h"
-#if defined(CERES_TR1_MEMORY_HEADER)
-#include <tr1/memory>
-#else
-#include <memory>
-#endif
-namespace ceres {
-
-#if defined(CERES_TR1_SHARED_PTR)
-using std::tr1::shared_ptr;
+#if defined(CERES_USE_OPENMP)
+# if defined(CERES_USE_CXX_THREADS) || defined(CERES_NO_THREADS)
+# error CERES_USE_OPENMP is mutually exclusive to CERES_USE_CXX_THREADS and CERES_NO_THREADS
+# endif
+#elif defined(CERES_USE_CXX_THREADS)
+# if defined(CERES_USE_OPENMP) || defined(CERES_NO_THREADS)
+# error CERES_USE_CXX_THREADS is mutually exclusive to CERES_USE_OPENMP, CERES_USE_CXX_THREADS and CERES_NO_THREADS
+# endif
+#elif defined(CERES_NO_THREADS)
+# if defined(CERES_USE_OPENMP) || defined(CERES_USE_CXX_THREADS)
+# error CERES_NO_THREADS is mutually exclusive to CERES_USE_OPENMP and CERES_USE_CXX_THREADS
+# endif
#else
-using std::shared_ptr;
+# error One of CERES_USE_OPENMP, CERES_USE_CXX_THREADS or CERES_NO_THREADS must be defined.
#endif
-// We allocate some Eigen objects on the stack and other places they
-// might not be aligned to 16-byte boundaries. If we have C++11, we
-// can specify their alignment anyway, and thus can safely enable
-// vectorization on those matrices; in C++99, we are out of luck. Figure out
-// what case we're in and write macros that do the right thing.
-#ifdef CERES_USE_CXX11
-namespace port_constants {
-static constexpr size_t kMaxAlignBytes =
- // Work around a GCC 4.8 bug
- // (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=56019) where
- // std::max_align_t is misplaced.
-#if defined (__GNUC__) && __GNUC__ == 4 && __GNUC_MINOR__ == 8
- alignof(::max_align_t);
-#else
- alignof(std::max_align_t);
+// CERES_NO_SPARSE should be automatically defined by config.h if Ceres was
+// compiled without any sparse back-end. Verify that it has not subsequently
+// been inconsistently redefined.
+#if defined(CERES_NO_SPARSE)
+# if !defined(CERES_NO_SUITESPARSE)
+# error CERES_NO_SPARSE requires CERES_NO_SUITESPARSE.
+# endif
+# if !defined(CERES_NO_CXSPARSE)
+# error CERES_NO_SPARSE requires CERES_NO_CXSPARSE
+# endif
+# if !defined(CERES_NO_ACCELERATE_SPARSE)
+# error CERES_NO_SPARSE requires CERES_NO_ACCELERATE_SPARSE
+# endif
+# if defined(CERES_USE_EIGEN_SPARSE)
+# error CERES_NO_SPARSE requires !CERES_USE_EIGEN_SPARSE
+# endif
#endif
-} // namespace port_constants
-#endif
-
-} // namespace ceres
-
-#endif // __cplusplus
// A macro to signal which functions and classes are exported when
// building a DLL with MSVC.
diff --git a/extern/ceres/include/ceres/internal/scoped_ptr.h b/extern/ceres/include/ceres/internal/scoped_ptr.h
deleted file mode 100644
index fa0ac25a031..00000000000
--- a/extern/ceres/include/ceres/internal/scoped_ptr.h
+++ /dev/null
@@ -1,310 +0,0 @@
-// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
-// http://ceres-solver.org/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// * Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-// * Neither the name of Google Inc. nor the names of its contributors may be
-// used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-// POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: jorg@google.com (Jorg Brown)
-//
-// This is an implementation designed to match the anticipated future TR2
-// implementation of the scoped_ptr class, and its closely-related brethren,
-// scoped_array, scoped_ptr_malloc, and make_scoped_ptr.
-
-#ifndef CERES_PUBLIC_INTERNAL_SCOPED_PTR_H_
-#define CERES_PUBLIC_INTERNAL_SCOPED_PTR_H_
-
-#include <assert.h>
-#include <stdlib.h>
-#include <cstddef>
-#include <algorithm>
-
-namespace ceres {
-namespace internal {
-
-template <class C> class scoped_ptr;
-template <class C, class Free> class scoped_ptr_malloc;
-template <class C> class scoped_array;
-
-template <class C>
-scoped_ptr<C> make_scoped_ptr(C *);
-
-// A scoped_ptr<T> is like a T*, except that the destructor of
-// scoped_ptr<T> automatically deletes the pointer it holds (if
-// any). That is, scoped_ptr<T> owns the T object that it points
-// to. Like a T*, a scoped_ptr<T> may hold either NULL or a pointer to
-// a T object. Also like T*, scoped_ptr<T> is thread-compatible, and
-// once you dereference it, you get the threadsafety guarantees of T.
-//
-// The size of a scoped_ptr is small: sizeof(scoped_ptr<C>) == sizeof(C*)
-template <class C>
-class scoped_ptr {
- public:
- // The element type
- typedef C element_type;
-
- // Constructor. Defaults to intializing with NULL.
- // There is no way to create an uninitialized scoped_ptr.
- // The input parameter must be allocated with new.
- explicit scoped_ptr(C* p = NULL) : ptr_(p) { }
-
- // Destructor. If there is a C object, delete it.
- // We don't need to test ptr_ == NULL because C++ does that for us.
- ~scoped_ptr() {
- enum { type_must_be_complete = sizeof(C) };
- delete ptr_;
- }
-
- // Reset. Deletes the current owned object, if any.
- // Then takes ownership of a new object, if given.
- // this->reset(this->get()) works.
- void reset(C* p = NULL) {
- if (p != ptr_) {
- enum { type_must_be_complete = sizeof(C) };
- delete ptr_;
- ptr_ = p;
- }
- }
-
- // Accessors to get the owned object.
- // operator* and operator-> will assert() if there is no current object.
- C& operator*() const {
- assert(ptr_ != NULL);
- return *ptr_;
- }
- C* operator->() const {
- assert(ptr_ != NULL);
- return ptr_;
- }
- C* get() const { return ptr_; }
-
- // Comparison operators.
- // These return whether a scoped_ptr and a raw pointer refer to
- // the same object, not just to two different but equal objects.
- bool operator==(const C* p) const { return ptr_ == p; }
- bool operator!=(const C* p) const { return ptr_ != p; }
-
- // Swap two scoped pointers.
- void swap(scoped_ptr& p2) {
- C* tmp = ptr_;
- ptr_ = p2.ptr_;
- p2.ptr_ = tmp;
- }
-
- // Release a pointer.
- // The return value is the current pointer held by this object.
- // If this object holds a NULL pointer, the return value is NULL.
- // After this operation, this object will hold a NULL pointer,
- // and will not own the object any more.
- C* release() {
- C* retVal = ptr_;
- ptr_ = NULL;
- return retVal;
- }
-
- private:
- C* ptr_;
-
- // google3 friend class that can access copy ctor (although if it actually
- // calls a copy ctor, there will be a problem) see below
- friend scoped_ptr<C> make_scoped_ptr<C>(C *p);
-
- // Forbid comparison of scoped_ptr types. If C2 != C, it totally doesn't
- // make sense, and if C2 == C, it still doesn't make sense because you should
- // never have the same object owned by two different scoped_ptrs.
- template <class C2> bool operator==(scoped_ptr<C2> const& p2) const;
- template <class C2> bool operator!=(scoped_ptr<C2> const& p2) const;
-
- // Disallow evil constructors
- scoped_ptr(const scoped_ptr&);
- void operator=(const scoped_ptr&);
-};
-
-// Free functions
-template <class C>
-inline void swap(scoped_ptr<C>& p1, scoped_ptr<C>& p2) {
- p1.swap(p2);
-}
-
-template <class C>
-inline bool operator==(const C* p1, const scoped_ptr<C>& p2) {
- return p1 == p2.get();
-}
-
-template <class C>
-inline bool operator==(const C* p1, const scoped_ptr<const C>& p2) {
- return p1 == p2.get();
-}
-
-template <class C>
-inline bool operator!=(const C* p1, const scoped_ptr<C>& p2) {
- return p1 != p2.get();
-}
-
-template <class C>
-inline bool operator!=(const C* p1, const scoped_ptr<const C>& p2) {
- return p1 != p2.get();
-}
-
-template <class C>
-scoped_ptr<C> make_scoped_ptr(C *p) {
- // This does nothing but to return a scoped_ptr of the type that the passed
- // pointer is of. (This eliminates the need to specify the name of T when
- // making a scoped_ptr that is used anonymously/temporarily.) From an
- // access control point of view, we construct an unnamed scoped_ptr here
- // which we return and thus copy-construct. Hence, we need to have access
- // to scoped_ptr::scoped_ptr(scoped_ptr const &). However, it is guaranteed
- // that we never actually call the copy constructor, which is a good thing
- // as we would call the temporary's object destructor (and thus delete p)
- // if we actually did copy some object, here.
- return scoped_ptr<C>(p);
-}
-
-// scoped_array<C> is like scoped_ptr<C>, except that the caller must allocate
-// with new [] and the destructor deletes objects with delete [].
-//
-// As with scoped_ptr<C>, a scoped_array<C> either points to an object
-// or is NULL. A scoped_array<C> owns the object that it points to.
-// scoped_array<T> is thread-compatible, and once you index into it,
-// the returned objects have only the threadsafety guarantees of T.
-//
-// Size: sizeof(scoped_array<C>) == sizeof(C*)
-template <class C>
-class scoped_array {
- public:
- // The element type
- typedef C element_type;
-
- // Constructor. Defaults to intializing with NULL.
- // There is no way to create an uninitialized scoped_array.
- // The input parameter must be allocated with new [].
- explicit scoped_array(C* p = NULL) : array_(p) { }
-
- // Destructor. If there is a C object, delete it.
- // We don't need to test ptr_ == NULL because C++ does that for us.
- ~scoped_array() {
- enum { type_must_be_complete = sizeof(C) };
- delete[] array_;
- }
-
- // Reset. Deletes the current owned object, if any.
- // Then takes ownership of a new object, if given.
- // this->reset(this->get()) works.
- void reset(C* p = NULL) {
- if (p != array_) {
- enum { type_must_be_complete = sizeof(C) };
- delete[] array_;
- array_ = p;
- }
- }
-
- // Get one element of the current object.
- // Will assert() if there is no current object, or index i is negative.
- C& operator[](std::ptrdiff_t i) const {
- assert(i >= 0);
- assert(array_ != NULL);
- return array_[i];
- }
-
- // Get a pointer to the zeroth element of the current object.
- // If there is no current object, return NULL.
- C* get() const {
- return array_;
- }
-
- // Comparison operators.
- // These return whether a scoped_array and a raw pointer refer to
- // the same array, not just to two different but equal arrays.
- bool operator==(const C* p) const { return array_ == p; }
- bool operator!=(const C* p) const { return array_ != p; }
-
- // Swap two scoped arrays.
- void swap(scoped_array& p2) {
- C* tmp = array_;
- array_ = p2.array_;
- p2.array_ = tmp;
- }
-
- // Release an array.
- // The return value is the current pointer held by this object.
- // If this object holds a NULL pointer, the return value is NULL.
- // After this operation, this object will hold a NULL pointer,
- // and will not own the object any more.
- C* release() {
- C* retVal = array_;
- array_ = NULL;
- return retVal;
- }
-
- private:
- C* array_;
-
- // Forbid comparison of different scoped_array types.
- template <class C2> bool operator==(scoped_array<C2> const& p2) const;
- template <class C2> bool operator!=(scoped_array<C2> const& p2) const;
-
- // Disallow evil constructors
- scoped_array(const scoped_array&);
- void operator=(const scoped_array&);
-};
-
-// Free functions
-template <class C>
-inline void swap(scoped_array<C>& p1, scoped_array<C>& p2) {
- p1.swap(p2);
-}
-
-template <class C>
-inline bool operator==(const C* p1, const scoped_array<C>& p2) {
- return p1 == p2.get();
-}
-
-template <class C>
-inline bool operator==(const C* p1, const scoped_array<const C>& p2) {
- return p1 == p2.get();
-}
-
-template <class C>
-inline bool operator!=(const C* p1, const scoped_array<C>& p2) {
- return p1 != p2.get();
-}
-
-template <class C>
-inline bool operator!=(const C* p1, const scoped_array<const C>& p2) {
- return p1 != p2.get();
-}
-
-// This class wraps the c library function free() in a class that can be
-// passed as a template argument to scoped_ptr_malloc below.
-class ScopedPtrMallocFree {
- public:
- inline void operator()(void* x) const {
- free(x);
- }
-};
-
-} // namespace internal
-} // namespace ceres
-
-#endif // CERES_PUBLIC_INTERNAL_SCOPED_PTR_H_
diff --git a/extern/ceres/include/ceres/internal/variadic_evaluate.h b/extern/ceres/include/ceres/internal/variadic_evaluate.h
index b3515b96d18..046832c0bb4 100644
--- a/extern/ceres/include/ceres/internal/variadic_evaluate.h
+++ b/extern/ceres/include/ceres/internal/variadic_evaluate.h
@@ -28,165 +28,77 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
// mierle@gmail.com (Keir Mierle)
+// jodebo_beck@gmx.de (Johannes Beck)
#ifndef CERES_PUBLIC_INTERNAL_VARIADIC_EVALUATE_H_
#define CERES_PUBLIC_INTERNAL_VARIADIC_EVALUATE_H_
#include <stddef.h>
-#include "ceres/jet.h"
-#include "ceres/types.h"
-#include "ceres/internal/eigen.h"
-#include "ceres/internal/fixed_array.h"
-#include "glog/logging.h"
+#include <type_traits>
+#include <utility>
+
+#include "ceres/cost_function.h"
+#include "ceres/internal/parameter_dims.h"
namespace ceres {
namespace internal {
-// This block of quasi-repeated code calls the user-supplied functor, which may
-// take a variable number of arguments. This is accomplished by specializing the
-// struct based on the size of the trailing parameters; parameters with 0 size
-// are assumed missing.
-template<typename Functor, typename T, int N0, int N1, int N2, int N3, int N4,
- int N5, int N6, int N7, int N8, int N9>
-struct VariadicEvaluate {
- static bool Call(const Functor& functor, T const *const *input, T* output) {
- return functor(input[0],
- input[1],
- input[2],
- input[3],
- input[4],
- input[5],
- input[6],
- input[7],
- input[8],
- input[9],
- output);
- }
-};
-
-template<typename Functor, typename T, int N0, int N1, int N2, int N3, int N4,
- int N5, int N6, int N7, int N8>
-struct VariadicEvaluate<Functor, T, N0, N1, N2, N3, N4, N5, N6, N7, N8, 0> {
- static bool Call(const Functor& functor, T const *const *input, T* output) {
- return functor(input[0],
- input[1],
- input[2],
- input[3],
- input[4],
- input[5],
- input[6],
- input[7],
- input[8],
- output);
- }
-};
-
-template<typename Functor, typename T, int N0, int N1, int N2, int N3, int N4,
- int N5, int N6, int N7>
-struct VariadicEvaluate<Functor, T, N0, N1, N2, N3, N4, N5, N6, N7, 0, 0> {
- static bool Call(const Functor& functor, T const *const *input, T* output) {
- return functor(input[0],
- input[1],
- input[2],
- input[3],
- input[4],
- input[5],
- input[6],
- input[7],
- output);
- }
-};
-
-template<typename Functor, typename T, int N0, int N1, int N2, int N3, int N4,
- int N5, int N6>
-struct VariadicEvaluate<Functor, T, N0, N1, N2, N3, N4, N5, N6, 0, 0, 0> {
- static bool Call(const Functor& functor, T const *const *input, T* output) {
- return functor(input[0],
- input[1],
- input[2],
- input[3],
- input[4],
- input[5],
- input[6],
- output);
- }
-};
-
-template<typename Functor, typename T, int N0, int N1, int N2, int N3, int N4,
- int N5>
-struct VariadicEvaluate<Functor, T, N0, N1, N2, N3, N4, N5, 0, 0, 0, 0> {
- static bool Call(const Functor& functor, T const *const *input, T* output) {
- return functor(input[0],
- input[1],
- input[2],
- input[3],
- input[4],
- input[5],
- output);
- }
-};
-
-template<typename Functor, typename T, int N0, int N1, int N2, int N3, int N4>
-struct VariadicEvaluate<Functor, T, N0, N1, N2, N3, N4, 0, 0, 0, 0, 0> {
- static bool Call(const Functor& functor, T const *const *input, T* output) {
- return functor(input[0],
- input[1],
- input[2],
- input[3],
- input[4],
- output);
- }
-};
-
-template<typename Functor, typename T, int N0, int N1, int N2, int N3>
-struct VariadicEvaluate<Functor, T, N0, N1, N2, N3, 0, 0, 0, 0, 0, 0> {
- static bool Call(const Functor& functor, T const *const *input, T* output) {
- return functor(input[0],
- input[1],
- input[2],
- input[3],
- output);
- }
-};
-
-template<typename Functor, typename T, int N0, int N1, int N2>
-struct VariadicEvaluate<Functor, T, N0, N1, N2, 0, 0, 0, 0, 0, 0, 0> {
- static bool Call(const Functor& functor, T const *const *input, T* output) {
- return functor(input[0],
- input[1],
- input[2],
- output);
- }
-};
-
-template<typename Functor, typename T, int N0, int N1>
-struct VariadicEvaluate<Functor, T, N0, N1, 0, 0, 0, 0, 0, 0, 0, 0> {
- static bool Call(const Functor& functor, T const *const *input, T* output) {
- return functor(input[0],
- input[1],
- output);
- }
-};
-
-template<typename Functor, typename T, int N0>
-struct VariadicEvaluate<Functor, T, N0, 0, 0, 0, 0, 0, 0, 0, 0, 0> {
- static bool Call(const Functor& functor, T const *const *input, T* output) {
- return functor(input[0],
- output);
- }
-};
-
-// Template instantiation for dynamically-sized functors.
-template<typename Functor, typename T>
-struct VariadicEvaluate<Functor, T, ceres::DYNAMIC, ceres::DYNAMIC,
- ceres::DYNAMIC, ceres::DYNAMIC, ceres::DYNAMIC,
- ceres::DYNAMIC, ceres::DYNAMIC, ceres::DYNAMIC,
- ceres::DYNAMIC, ceres::DYNAMIC> {
- static bool Call(const Functor& functor, T const *const *input, T* output) {
- return functor(input, output);
- }
-};
+// For fixed size cost functors
+template <typename Functor, typename T, int... Indices>
+inline bool VariadicEvaluateImpl(const Functor& functor, T const* const* input,
+ T* output, std::false_type /*is_dynamic*/,
+ std::integer_sequence<int, Indices...>) {
+ static_assert(sizeof...(Indices),
+ "Invalid number of parameter blocks. At least one parameter "
+ "block must be specified.");
+ return functor(input[Indices]..., output);
+}
+
+// For dynamic sized cost functors
+template <typename Functor, typename T>
+inline bool VariadicEvaluateImpl(const Functor& functor, T const* const* input,
+ T* output, std::true_type /*is_dynamic*/,
+ std::integer_sequence<int>) {
+ return functor(input, output);
+}
+
+// For ceres cost functors (not ceres::CostFunction)
+template <typename ParameterDims, typename Functor, typename T>
+inline bool VariadicEvaluateImpl(const Functor& functor, T const* const* input,
+ T* output, const void* /* NOT USED */) {
+ using ParameterBlockIndices =
+ std::make_integer_sequence<int, ParameterDims::kNumParameterBlocks>;
+ using IsDynamic = std::integral_constant<bool, ParameterDims::kIsDynamic>;
+ return VariadicEvaluateImpl(functor, input, output, IsDynamic(),
+ ParameterBlockIndices());
+}
+
+// For ceres::CostFunction
+template <typename ParameterDims, typename Functor, typename T>
+inline bool VariadicEvaluateImpl(const Functor& functor, T const* const* input,
+ T* output,
+ const CostFunction* /* NOT USED */) {
+ return functor.Evaluate(input, output, nullptr);
+}
+
+// Variadic evaluate is a helper function to evaluate ceres cost function or
+// functors using an input, output and the parameter dimensions. There are
+// several ways different possibilities:
+// 1) If the passed functor is a 'ceres::CostFunction' its evaluate method is
+// called.
+// 2) If the functor is not a 'ceres::CostFunction' and the specified parameter
+// dims is dynamic, the functor must have the following signature
+// 'bool(T const* const* input, T* output)'.
+// 3) If the functor is not a 'ceres::CostFunction' and the specified parameter
+// dims is not dynamic, the input is expanded by using the number of parameter
+// blocks. The signature of the functor must have the following signature
+// 'bool()(const T* i_1, const T* i_2, ... const T* i_n, T* output)'.
+template <typename ParameterDims, typename Functor, typename T>
+inline bool VariadicEvaluate(const Functor& functor, T const* const* input,
+ T* output) {
+ return VariadicEvaluateImpl<ParameterDims>(functor, input, output, &functor);
+}
} // namespace internal
} // namespace ceres
diff --git a/extern/ceres/include/ceres/iteration_callback.h b/extern/ceres/include/ceres/iteration_callback.h
index db5d0efe53a..0a743ecc26f 100644
--- a/extern/ceres/include/ceres/iteration_callback.h
+++ b/extern/ceres/include/ceres/iteration_callback.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -35,42 +35,22 @@
#ifndef CERES_PUBLIC_ITERATION_CALLBACK_H_
#define CERES_PUBLIC_ITERATION_CALLBACK_H_
-#include "ceres/types.h"
#include "ceres/internal/disable_warnings.h"
+#include "ceres/types.h"
namespace ceres {
// This struct describes the state of the optimizer after each
// iteration of the minimization.
struct CERES_EXPORT IterationSummary {
- IterationSummary()
- : iteration(0),
- step_is_valid(false),
- step_is_nonmonotonic(false),
- step_is_successful(false),
- cost(0.0),
- cost_change(0.0),
- gradient_max_norm(0.0),
- gradient_norm(0.0),
- step_norm(0.0),
- eta(0.0),
- step_size(0.0),
- line_search_function_evaluations(0),
- line_search_gradient_evaluations(0),
- line_search_iterations(0),
- linear_solver_iterations(0),
- iteration_time_in_seconds(0.0),
- step_solver_time_in_seconds(0.0),
- cumulative_time_in_seconds(0.0) {}
-
// Current iteration number.
- int32 iteration;
+ int iteration = 0;
// Step was numerically valid, i.e., all values are finite and the
// step reduces the value of the linearized model.
//
// Note: step_is_valid is always true when iteration = 0.
- bool step_is_valid;
+ bool step_is_valid = false;
// Step did not reduce the value of the objective function
// sufficiently, but it was accepted because of the relaxed
@@ -78,7 +58,7 @@ struct CERES_EXPORT IterationSummary {
// algorithm.
//
// Note: step_is_nonmonotonic is always false when iteration = 0;
- bool step_is_nonmonotonic;
+ bool step_is_nonmonotonic = false;
// Whether or not the minimizer accepted this step or not. If the
// ordinary trust region algorithm is used, this means that the
@@ -90,68 +70,68 @@ struct CERES_EXPORT IterationSummary {
// step and the step is declared successful.
//
// Note: step_is_successful is always true when iteration = 0.
- bool step_is_successful;
+ bool step_is_successful = false;
// Value of the objective function.
- double cost;
+ double cost = 0.90;
// Change in the value of the objective function in this
// iteration. This can be positive or negative.
- double cost_change;
+ double cost_change = 0.0;
// Infinity norm of the gradient vector.
- double gradient_max_norm;
+ double gradient_max_norm = 0.0;
// 2-norm of the gradient vector.
- double gradient_norm;
+ double gradient_norm = 0.0;
// 2-norm of the size of the step computed by the optimization
// algorithm.
- double step_norm;
+ double step_norm = 0.0;
// For trust region algorithms, the ratio of the actual change in
// cost and the change in the cost of the linearized approximation.
- double relative_decrease;
+ double relative_decrease = 0.0;
// Size of the trust region at the end of the current iteration. For
// the Levenberg-Marquardt algorithm, the regularization parameter
// mu = 1.0 / trust_region_radius.
- double trust_region_radius;
+ double trust_region_radius = 0.0;
// For the inexact step Levenberg-Marquardt algorithm, this is the
// relative accuracy with which the Newton(LM) step is solved. This
// number affects only the iterative solvers capable of solving
// linear systems inexactly. Factorization-based exact solvers
// ignore it.
- double eta;
+ double eta = 0.0;
// Step sized computed by the line search algorithm.
- double step_size;
+ double step_size = 0.0;
// Number of function value evaluations used by the line search algorithm.
- int line_search_function_evaluations;
+ int line_search_function_evaluations = 0;
// Number of function gradient evaluations used by the line search algorithm.
- int line_search_gradient_evaluations;
+ int line_search_gradient_evaluations = 0;
// Number of iterations taken by the line search algorithm.
- int line_search_iterations;
+ int line_search_iterations = 0;
// Number of iterations taken by the linear solver to solve for the
// Newton step.
- int linear_solver_iterations;
+ int linear_solver_iterations = 0;
// All times reported below are wall times.
// Time (in seconds) spent inside the minimizer loop in the current
// iteration.
- double iteration_time_in_seconds;
+ double iteration_time_in_seconds = 0.0;
// Time (in seconds) spent inside the trust region step solver.
- double step_solver_time_in_seconds;
+ double step_solver_time_in_seconds = 0.0;
// Time (in seconds) since the user called Solve().
- double cumulative_time_in_seconds;
+ double cumulative_time_in_seconds = 0.0;
};
// Interface for specifying callbacks that are executed at the end of
diff --git a/extern/ceres/include/ceres/jet.h b/extern/ceres/include/ceres/jet.h
index a104707298c..7aafaa01d30 100644
--- a/extern/ceres/include/ceres/jet.h
+++ b/extern/ceres/include/ceres/jet.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -31,7 +31,7 @@
// A simple implementation of N-dimensional dual numbers, for automatically
// computing exact derivatives of functions.
//
-// While a complete treatment of the mechanics of automatic differentation is
+// While a complete treatment of the mechanics of automatic differentiation is
// beyond the scope of this header (see
// http://en.wikipedia.org/wiki/Automatic_differentiation for details), the
// basic idea is to extend normal arithmetic with an extra element, "e," often
@@ -49,7 +49,7 @@
// f(x) = x^2 ,
//
// evaluated at 10. Using normal arithmetic, f(10) = 100, and df/dx(10) = 20.
-// Next, augument 10 with an infinitesimal to get:
+// Next, argument 10 with an infinitesimal to get:
//
// f(10 + e) = (10 + e)^2
// = 100 + 2 * 10 * e + e^2
@@ -102,8 +102,9 @@
// }
//
// // The "2" means there should be 2 dual number components.
-// Jet<double, 2> x(0); // Pick the 0th dual number for x.
-// Jet<double, 2> y(1); // Pick the 1st dual number for y.
+// // It computes the partial derivative at x=10, y=20.
+// Jet<double, 2> x(10, 0); // Pick the 0th dual number for x.
+// Jet<double, 2> y(20, 1); // Pick the 1st dual number for y.
// Jet<double, 2> z = f(x, y);
//
// LOG(INFO) << "df/dx = " << z.v[0]
@@ -124,7 +125,7 @@
//
// x = a + \sum_i v[i] t_i
//
-// A shorthand is to write an element as x = a + u, where u is the pertubation.
+// A shorthand is to write an element as x = a + u, where u is the perturbation.
// Then, the main point about the arithmetic of jets is that the product of
// perturbations is zero:
//
@@ -163,7 +164,6 @@
#include <string>
#include "Eigen/Core"
-#include "ceres/fpclassify.h"
#include "ceres/internal/port.h"
namespace ceres {
@@ -171,26 +171,25 @@ namespace ceres {
template <typename T, int N>
struct Jet {
enum { DIMENSION = N };
+ typedef T Scalar;
// Default-construct "a" because otherwise this can lead to false errors about
// uninitialized uses when other classes relying on default constructed T
// (where T is a Jet<T, N>). This usually only happens in opt mode. Note that
// the C++ standard mandates that e.g. default constructed doubles are
// initialized to 0.0; see sections 8.5 of the C++03 standard.
- Jet() : a() {
- v.setZero();
- }
+ Jet() : a() { v.setConstant(Scalar()); }
// Constructor from scalar: a + 0.
explicit Jet(const T& value) {
a = value;
- v.setZero();
+ v.setConstant(Scalar());
}
// Constructor from scalar plus variable: a + t_i.
Jet(const T& value, int k) {
a = value;
- v.setZero();
+ v.setConstant(Scalar());
v[k] = T(1.0);
}
@@ -198,58 +197,66 @@ struct Jet {
// The use of Eigen::DenseBase allows Eigen expressions
// to be passed in without being fully evaluated until
// they are assigned to v
- template<typename Derived>
- EIGEN_STRONG_INLINE Jet(const T& a, const Eigen::DenseBase<Derived> &v)
- : a(a), v(v) {
- }
+ template <typename Derived>
+ EIGEN_STRONG_INLINE Jet(const T& a, const Eigen::DenseBase<Derived>& v)
+ : a(a), v(v) {}
// Compound operators
- Jet<T, N>& operator+=(const Jet<T, N> &y) {
+ Jet<T, N>& operator+=(const Jet<T, N>& y) {
*this = *this + y;
return *this;
}
- Jet<T, N>& operator-=(const Jet<T, N> &y) {
+ Jet<T, N>& operator-=(const Jet<T, N>& y) {
*this = *this - y;
return *this;
}
- Jet<T, N>& operator*=(const Jet<T, N> &y) {
+ Jet<T, N>& operator*=(const Jet<T, N>& y) {
*this = *this * y;
return *this;
}
- Jet<T, N>& operator/=(const Jet<T, N> &y) {
+ Jet<T, N>& operator/=(const Jet<T, N>& y) {
*this = *this / y;
return *this;
}
+ // Compound with scalar operators.
+ Jet<T, N>& operator+=(const T& s) {
+ *this = *this + s;
+ return *this;
+ }
+
+ Jet<T, N>& operator-=(const T& s) {
+ *this = *this - s;
+ return *this;
+ }
+
+ Jet<T, N>& operator*=(const T& s) {
+ *this = *this * s;
+ return *this;
+ }
+
+ Jet<T, N>& operator/=(const T& s) {
+ *this = *this / s;
+ return *this;
+ }
+
// The scalar part.
T a;
// The infinitesimal part.
+ Eigen::Matrix<T, N, 1> v;
- // We allocate Jets on the stack and other places they
- // might not be aligned to 16-byte boundaries. If we have C++11, we
- // can specify their alignment anyway, and thus can safely enable
- // vectorization on those matrices; in C++99, we are out of luck. Figure out
- // what case we're in and do the right thing.
-#ifndef CERES_USE_CXX11
- // fall back to safe version:
- Eigen::Matrix<T, N, 1, Eigen::DontAlign> v;
-#else
- static constexpr bool kShouldAlignMatrix =
- 16 <= ::ceres::port_constants::kMaxAlignBytes;
- static constexpr int kAlignHint = kShouldAlignMatrix ?
- Eigen::AutoAlign : Eigen::DontAlign;
- static constexpr size_t kAlignment = kShouldAlignMatrix ? 16 : 1;
- alignas(kAlignment) Eigen::Matrix<T, N, 1, kAlignHint> v;
-#endif
+ // This struct needs to have an Eigen aligned operator new as it contains
+ // fixed-size Eigen types.
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW
};
// Unary +
-template<typename T, int N> inline
-Jet<T, N> const& operator+(const Jet<T, N>& f) {
+template <typename T, int N>
+inline Jet<T, N> const& operator+(const Jet<T, N>& f) {
return f;
}
@@ -257,72 +264,68 @@ Jet<T, N> const& operator+(const Jet<T, N>& f) {
// see if it causes a performance increase.
// Unary -
-template<typename T, int N> inline
-Jet<T, N> operator-(const Jet<T, N>&f) {
+template <typename T, int N>
+inline Jet<T, N> operator-(const Jet<T, N>& f) {
return Jet<T, N>(-f.a, -f.v);
}
// Binary +
-template<typename T, int N> inline
-Jet<T, N> operator+(const Jet<T, N>& f,
- const Jet<T, N>& g) {
+template <typename T, int N>
+inline Jet<T, N> operator+(const Jet<T, N>& f, const Jet<T, N>& g) {
return Jet<T, N>(f.a + g.a, f.v + g.v);
}
// Binary + with a scalar: x + s
-template<typename T, int N> inline
-Jet<T, N> operator+(const Jet<T, N>& f, T s) {
+template <typename T, int N>
+inline Jet<T, N> operator+(const Jet<T, N>& f, T s) {
return Jet<T, N>(f.a + s, f.v);
}
// Binary + with a scalar: s + x
-template<typename T, int N> inline
-Jet<T, N> operator+(T s, const Jet<T, N>& f) {
+template <typename T, int N>
+inline Jet<T, N> operator+(T s, const Jet<T, N>& f) {
return Jet<T, N>(f.a + s, f.v);
}
// Binary -
-template<typename T, int N> inline
-Jet<T, N> operator-(const Jet<T, N>& f,
- const Jet<T, N>& g) {
+template <typename T, int N>
+inline Jet<T, N> operator-(const Jet<T, N>& f, const Jet<T, N>& g) {
return Jet<T, N>(f.a - g.a, f.v - g.v);
}
// Binary - with a scalar: x - s
-template<typename T, int N> inline
-Jet<T, N> operator-(const Jet<T, N>& f, T s) {
+template <typename T, int N>
+inline Jet<T, N> operator-(const Jet<T, N>& f, T s) {
return Jet<T, N>(f.a - s, f.v);
}
// Binary - with a scalar: s - x
-template<typename T, int N> inline
-Jet<T, N> operator-(T s, const Jet<T, N>& f) {
+template <typename T, int N>
+inline Jet<T, N> operator-(T s, const Jet<T, N>& f) {
return Jet<T, N>(s - f.a, -f.v);
}
// Binary *
-template<typename T, int N> inline
-Jet<T, N> operator*(const Jet<T, N>& f,
- const Jet<T, N>& g) {
+template <typename T, int N>
+inline Jet<T, N> operator*(const Jet<T, N>& f, const Jet<T, N>& g) {
return Jet<T, N>(f.a * g.a, f.a * g.v + f.v * g.a);
}
// Binary * with a scalar: x * s
-template<typename T, int N> inline
-Jet<T, N> operator*(const Jet<T, N>& f, T s) {
+template <typename T, int N>
+inline Jet<T, N> operator*(const Jet<T, N>& f, T s) {
return Jet<T, N>(f.a * s, f.v * s);
}
// Binary * with a scalar: s * x
-template<typename T, int N> inline
-Jet<T, N> operator*(T s, const Jet<T, N>& f) {
+template <typename T, int N>
+inline Jet<T, N> operator*(T s, const Jet<T, N>& f) {
return Jet<T, N>(f.a * s, f.v * s);
}
// Binary /
-template<typename T, int N> inline
-Jet<T, N> operator/(const Jet<T, N>& f,
- const Jet<T, N>& g) {
+template <typename T, int N>
+inline Jet<T, N> operator/(const Jet<T, N>& f, const Jet<T, N>& g) {
// This uses:
//
// a + u (a + u)(b - v) (a + u)(b - v)
@@ -332,43 +335,43 @@ Jet<T, N> operator/(const Jet<T, N>& f,
// which holds because v*v = 0.
const T g_a_inverse = T(1.0) / g.a;
const T f_a_by_g_a = f.a * g_a_inverse;
- return Jet<T, N>(f.a * g_a_inverse, (f.v - f_a_by_g_a * g.v) * g_a_inverse);
+ return Jet<T, N>(f_a_by_g_a, (f.v - f_a_by_g_a * g.v) * g_a_inverse);
}
// Binary / with a scalar: s / x
-template<typename T, int N> inline
-Jet<T, N> operator/(T s, const Jet<T, N>& g) {
+template <typename T, int N>
+inline Jet<T, N> operator/(T s, const Jet<T, N>& g) {
const T minus_s_g_a_inverse2 = -s / (g.a * g.a);
return Jet<T, N>(s / g.a, g.v * minus_s_g_a_inverse2);
}
// Binary / with a scalar: x / s
-template<typename T, int N> inline
-Jet<T, N> operator/(const Jet<T, N>& f, T s) {
- const T s_inverse = 1.0 / s;
+template <typename T, int N>
+inline Jet<T, N> operator/(const Jet<T, N>& f, T s) {
+ const T s_inverse = T(1.0) / s;
return Jet<T, N>(f.a * s_inverse, f.v * s_inverse);
}
// Binary comparison operators for both scalars and jets.
-#define CERES_DEFINE_JET_COMPARISON_OPERATOR(op) \
-template<typename T, int N> inline \
-bool operator op(const Jet<T, N>& f, const Jet<T, N>& g) { \
- return f.a op g.a; \
-} \
-template<typename T, int N> inline \
-bool operator op(const T& s, const Jet<T, N>& g) { \
- return s op g.a; \
-} \
-template<typename T, int N> inline \
-bool operator op(const Jet<T, N>& f, const T& s) { \
- return f.a op s; \
-}
-CERES_DEFINE_JET_COMPARISON_OPERATOR( < ) // NOLINT
-CERES_DEFINE_JET_COMPARISON_OPERATOR( <= ) // NOLINT
-CERES_DEFINE_JET_COMPARISON_OPERATOR( > ) // NOLINT
-CERES_DEFINE_JET_COMPARISON_OPERATOR( >= ) // NOLINT
-CERES_DEFINE_JET_COMPARISON_OPERATOR( == ) // NOLINT
-CERES_DEFINE_JET_COMPARISON_OPERATOR( != ) // NOLINT
+#define CERES_DEFINE_JET_COMPARISON_OPERATOR(op) \
+ template <typename T, int N> \
+ inline bool operator op(const Jet<T, N>& f, const Jet<T, N>& g) { \
+ return f.a op g.a; \
+ } \
+ template <typename T, int N> \
+ inline bool operator op(const T& s, const Jet<T, N>& g) { \
+ return s op g.a; \
+ } \
+ template <typename T, int N> \
+ inline bool operator op(const Jet<T, N>& f, const T& s) { \
+ return f.a op s; \
+ }
+CERES_DEFINE_JET_COMPARISON_OPERATOR(<) // NOLINT
+CERES_DEFINE_JET_COMPARISON_OPERATOR(<=) // NOLINT
+CERES_DEFINE_JET_COMPARISON_OPERATOR(>) // NOLINT
+CERES_DEFINE_JET_COMPARISON_OPERATOR(>=) // NOLINT
+CERES_DEFINE_JET_COMPARISON_OPERATOR(==) // NOLINT
+CERES_DEFINE_JET_COMPARISON_OPERATOR(!=) // NOLINT
#undef CERES_DEFINE_JET_COMPARISON_OPERATOR
// Pull some functions from namespace std.
@@ -376,112 +379,128 @@ CERES_DEFINE_JET_COMPARISON_OPERATOR( != ) // NOLINT
// This is necessary because we want to use the same name (e.g. 'sqrt') for
// double-valued and Jet-valued functions, but we are not allowed to put
// Jet-valued functions inside namespace std.
-//
-// TODO(keir): Switch to "using".
-inline double abs (double x) { return std::abs(x); }
-inline double log (double x) { return std::log(x); }
-inline double exp (double x) { return std::exp(x); }
-inline double sqrt (double x) { return std::sqrt(x); }
-inline double cos (double x) { return std::cos(x); }
-inline double acos (double x) { return std::acos(x); }
-inline double sin (double x) { return std::sin(x); }
-inline double asin (double x) { return std::asin(x); }
-inline double tan (double x) { return std::tan(x); }
-inline double atan (double x) { return std::atan(x); }
-inline double sinh (double x) { return std::sinh(x); }
-inline double cosh (double x) { return std::cosh(x); }
-inline double tanh (double x) { return std::tanh(x); }
-inline double floor (double x) { return std::floor(x); }
-inline double ceil (double x) { return std::ceil(x); }
-inline double pow (double x, double y) { return std::pow(x, y); }
-inline double atan2(double y, double x) { return std::atan2(y, x); }
+using std::abs;
+using std::acos;
+using std::asin;
+using std::atan;
+using std::atan2;
+using std::cbrt;
+using std::ceil;
+using std::cos;
+using std::cosh;
+using std::exp;
+using std::exp2;
+using std::floor;
+using std::fmax;
+using std::fmin;
+using std::hypot;
+using std::isfinite;
+using std::isinf;
+using std::isnan;
+using std::isnormal;
+using std::log;
+using std::log2;
+using std::pow;
+using std::sin;
+using std::sinh;
+using std::sqrt;
+using std::tan;
+using std::tanh;
+
+// Legacy names from pre-C++11 days.
+// clang-format off
+inline bool IsFinite(double x) { return std::isfinite(x); }
+inline bool IsInfinite(double x) { return std::isinf(x); }
+inline bool IsNaN(double x) { return std::isnan(x); }
+inline bool IsNormal(double x) { return std::isnormal(x); }
+// clang-format on
// In general, f(a + h) ~= f(a) + f'(a) h, via the chain rule.
// abs(x + h) ~= x + h or -(x + h)
-template <typename T, int N> inline
-Jet<T, N> abs(const Jet<T, N>& f) {
- return f.a < T(0.0) ? -f : f;
+template <typename T, int N>
+inline Jet<T, N> abs(const Jet<T, N>& f) {
+ return (f.a < T(0.0) ? -f : f);
}
// log(a + h) ~= log(a) + h / a
-template <typename T, int N> inline
-Jet<T, N> log(const Jet<T, N>& f) {
+template <typename T, int N>
+inline Jet<T, N> log(const Jet<T, N>& f) {
const T a_inverse = T(1.0) / f.a;
return Jet<T, N>(log(f.a), f.v * a_inverse);
}
// exp(a + h) ~= exp(a) + exp(a) h
-template <typename T, int N> inline
-Jet<T, N> exp(const Jet<T, N>& f) {
+template <typename T, int N>
+inline Jet<T, N> exp(const Jet<T, N>& f) {
const T tmp = exp(f.a);
return Jet<T, N>(tmp, tmp * f.v);
}
// sqrt(a + h) ~= sqrt(a) + h / (2 sqrt(a))
-template <typename T, int N> inline
-Jet<T, N> sqrt(const Jet<T, N>& f) {
+template <typename T, int N>
+inline Jet<T, N> sqrt(const Jet<T, N>& f) {
const T tmp = sqrt(f.a);
const T two_a_inverse = T(1.0) / (T(2.0) * tmp);
return Jet<T, N>(tmp, f.v * two_a_inverse);
}
// cos(a + h) ~= cos(a) - sin(a) h
-template <typename T, int N> inline
-Jet<T, N> cos(const Jet<T, N>& f) {
- return Jet<T, N>(cos(f.a), - sin(f.a) * f.v);
+template <typename T, int N>
+inline Jet<T, N> cos(const Jet<T, N>& f) {
+ return Jet<T, N>(cos(f.a), -sin(f.a) * f.v);
}
// acos(a + h) ~= acos(a) - 1 / sqrt(1 - a^2) h
-template <typename T, int N> inline
-Jet<T, N> acos(const Jet<T, N>& f) {
- const T tmp = - T(1.0) / sqrt(T(1.0) - f.a * f.a);
+template <typename T, int N>
+inline Jet<T, N> acos(const Jet<T, N>& f) {
+ const T tmp = -T(1.0) / sqrt(T(1.0) - f.a * f.a);
return Jet<T, N>(acos(f.a), tmp * f.v);
}
// sin(a + h) ~= sin(a) + cos(a) h
-template <typename T, int N> inline
-Jet<T, N> sin(const Jet<T, N>& f) {
+template <typename T, int N>
+inline Jet<T, N> sin(const Jet<T, N>& f) {
return Jet<T, N>(sin(f.a), cos(f.a) * f.v);
}
// asin(a + h) ~= asin(a) + 1 / sqrt(1 - a^2) h
-template <typename T, int N> inline
-Jet<T, N> asin(const Jet<T, N>& f) {
+template <typename T, int N>
+inline Jet<T, N> asin(const Jet<T, N>& f) {
const T tmp = T(1.0) / sqrt(T(1.0) - f.a * f.a);
return Jet<T, N>(asin(f.a), tmp * f.v);
}
// tan(a + h) ~= tan(a) + (1 + tan(a)^2) h
-template <typename T, int N> inline
-Jet<T, N> tan(const Jet<T, N>& f) {
+template <typename T, int N>
+inline Jet<T, N> tan(const Jet<T, N>& f) {
const T tan_a = tan(f.a);
const T tmp = T(1.0) + tan_a * tan_a;
return Jet<T, N>(tan_a, tmp * f.v);
}
// atan(a + h) ~= atan(a) + 1 / (1 + a^2) h
-template <typename T, int N> inline
-Jet<T, N> atan(const Jet<T, N>& f) {
+template <typename T, int N>
+inline Jet<T, N> atan(const Jet<T, N>& f) {
const T tmp = T(1.0) / (T(1.0) + f.a * f.a);
return Jet<T, N>(atan(f.a), tmp * f.v);
}
// sinh(a + h) ~= sinh(a) + cosh(a) h
-template <typename T, int N> inline
-Jet<T, N> sinh(const Jet<T, N>& f) {
+template <typename T, int N>
+inline Jet<T, N> sinh(const Jet<T, N>& f) {
return Jet<T, N>(sinh(f.a), cosh(f.a) * f.v);
}
// cosh(a + h) ~= cosh(a) + sinh(a) h
-template <typename T, int N> inline
-Jet<T, N> cosh(const Jet<T, N>& f) {
+template <typename T, int N>
+inline Jet<T, N> cosh(const Jet<T, N>& f) {
return Jet<T, N>(cosh(f.a), sinh(f.a) * f.v);
}
// tanh(a + h) ~= tanh(a) + (1 - tanh(a)^2) h
-template <typename T, int N> inline
-Jet<T, N> tanh(const Jet<T, N>& f) {
+template <typename T, int N>
+inline Jet<T, N> tanh(const Jet<T, N>& f) {
const T tanh_a = tanh(f.a);
const T tmp = T(1.0) - tanh_a * tanh_a;
return Jet<T, N>(tanh_a, tmp * f.v);
@@ -491,8 +510,8 @@ Jet<T, N> tanh(const Jet<T, N>& f) {
// result in a zero derivative which provides no information to the solver.
//
// floor(a + h) ~= floor(a) + 0
-template <typename T, int N> inline
-Jet<T, N> floor(const Jet<T, N>& f) {
+template <typename T, int N>
+inline Jet<T, N> floor(const Jet<T, N>& f) {
return Jet<T, N>(floor(f.a));
}
@@ -500,11 +519,60 @@ Jet<T, N> floor(const Jet<T, N>& f) {
// result in a zero derivative which provides no information to the solver.
//
// ceil(a + h) ~= ceil(a) + 0
-template <typename T, int N> inline
-Jet<T, N> ceil(const Jet<T, N>& f) {
+template <typename T, int N>
+inline Jet<T, N> ceil(const Jet<T, N>& f) {
return Jet<T, N>(ceil(f.a));
}
+// Some new additions to C++11:
+
+// cbrt(a + h) ~= cbrt(a) + h / (3 a ^ (2/3))
+template <typename T, int N>
+inline Jet<T, N> cbrt(const Jet<T, N>& f) {
+ const T derivative = T(1.0) / (T(3.0) * cbrt(f.a * f.a));
+ return Jet<T, N>(cbrt(f.a), f.v * derivative);
+}
+
+// exp2(x + h) = 2^(x+h) ~= 2^x + h*2^x*log(2)
+template <typename T, int N>
+inline Jet<T, N> exp2(const Jet<T, N>& f) {
+ const T tmp = exp2(f.a);
+ const T derivative = tmp * log(T(2));
+ return Jet<T, N>(tmp, f.v * derivative);
+}
+
+// log2(x + h) ~= log2(x) + h / (x * log(2))
+template <typename T, int N>
+inline Jet<T, N> log2(const Jet<T, N>& f) {
+ const T derivative = T(1.0) / (f.a * log(T(2)));
+ return Jet<T, N>(log2(f.a), f.v * derivative);
+}
+
+// Like sqrt(x^2 + y^2),
+// but acts to prevent underflow/overflow for small/large x/y.
+// Note that the function is non-smooth at x=y=0,
+// so the derivative is undefined there.
+template <typename T, int N>
+inline Jet<T, N> hypot(const Jet<T, N>& x, const Jet<T, N>& y) {
+ // d/da sqrt(a) = 0.5 / sqrt(a)
+ // d/dx x^2 + y^2 = 2x
+ // So by the chain rule:
+ // d/dx sqrt(x^2 + y^2) = 0.5 / sqrt(x^2 + y^2) * 2x = x / sqrt(x^2 + y^2)
+ // d/dy sqrt(x^2 + y^2) = y / sqrt(x^2 + y^2)
+ const T tmp = hypot(x.a, y.a);
+ return Jet<T, N>(tmp, x.a / tmp * x.v + y.a / tmp * y.v);
+}
+
+template <typename T, int N>
+inline Jet<T, N> fmax(const Jet<T, N>& x, const Jet<T, N>& y) {
+ return x < y ? y : x;
+}
+
+template <typename T, int N>
+inline Jet<T, N> fmin(const Jet<T, N>& x, const Jet<T, N>& y) {
+ return y < x ? y : x;
+}
+
// Bessel functions of the first kind with integer order equal to 0, 1, n.
//
// Microsoft has deprecated the j[0,1,n]() POSIX Bessel functions in favour of
@@ -512,21 +580,21 @@ Jet<T, N> ceil(const Jet<T, N>& f) {
// function errors in client code (the specific warning is suppressed when
// Ceres itself is built).
inline double BesselJ0(double x) {
-#if defined(_MSC_VER) && defined(_j0)
+#if defined(CERES_MSVC_USE_UNDERSCORE_PREFIXED_BESSEL_FUNCTIONS)
return _j0(x);
#else
return j0(x);
#endif
}
inline double BesselJ1(double x) {
-#if defined(_MSC_VER) && defined(_j1)
+#if defined(CERES_MSVC_USE_UNDERSCORE_PREFIXED_BESSEL_FUNCTIONS)
return _j1(x);
#else
return j1(x);
#endif
}
inline double BesselJn(int n, double x) {
-#if defined(_MSC_VER) && defined(_jn)
+#if defined(CERES_MSVC_USE_UNDERSCORE_PREFIXED_BESSEL_FUNCTIONS)
return _jn(n, x);
#else
return jn(n, x);
@@ -541,32 +609,32 @@ inline double BesselJn(int n, double x) {
// See formula http://dlmf.nist.gov/10.6#E3
// j0(a + h) ~= j0(a) - j1(a) h
-template <typename T, int N> inline
-Jet<T, N> BesselJ0(const Jet<T, N>& f) {
- return Jet<T, N>(BesselJ0(f.a),
- -BesselJ1(f.a) * f.v);
+template <typename T, int N>
+inline Jet<T, N> BesselJ0(const Jet<T, N>& f) {
+ return Jet<T, N>(BesselJ0(f.a), -BesselJ1(f.a) * f.v);
}
// See formula http://dlmf.nist.gov/10.6#E1
// j1(a + h) ~= j1(a) + 0.5 ( j0(a) - j2(a) ) h
-template <typename T, int N> inline
-Jet<T, N> BesselJ1(const Jet<T, N>& f) {
+template <typename T, int N>
+inline Jet<T, N> BesselJ1(const Jet<T, N>& f) {
return Jet<T, N>(BesselJ1(f.a),
T(0.5) * (BesselJ0(f.a) - BesselJn(2, f.a)) * f.v);
}
// See formula http://dlmf.nist.gov/10.6#E1
// j_n(a + h) ~= j_n(a) + 0.5 ( j_{n-1}(a) - j_{n+1}(a) ) h
-template <typename T, int N> inline
-Jet<T, N> BesselJn(int n, const Jet<T, N>& f) {
- return Jet<T, N>(BesselJn(n, f.a),
- T(0.5) * (BesselJn(n - 1, f.a) - BesselJn(n + 1, f.a)) * f.v);
+template <typename T, int N>
+inline Jet<T, N> BesselJn(int n, const Jet<T, N>& f) {
+ return Jet<T, N>(
+ BesselJn(n, f.a),
+ T(0.5) * (BesselJn(n - 1, f.a) - BesselJn(n + 1, f.a)) * f.v);
}
// Jet Classification. It is not clear what the appropriate semantics are for
-// these classifications. This picks that IsFinite and isnormal are "all"
-// operations, i.e. all elements of the jet must be finite for the jet itself
-// to be finite (or normal). For IsNaN and IsInfinite, the answer is less
+// these classifications. This picks that std::isfinite and std::isnormal are
+// "all" operations, i.e. all elements of the jet must be finite for the jet
+// itself to be finite (or normal). For IsNaN and IsInfinite, the answer is less
// clear. This takes a "any" approach for IsNaN and IsInfinite such that if any
// part of a jet is nan or inf, then the entire jet is nan or inf. This leads
// to strange situations like a jet can be both IsInfinite and IsNaN, but in
@@ -574,81 +642,88 @@ Jet<T, N> BesselJn(int n, const Jet<T, N>& f) {
// derivatives are sane.
// The jet is finite if all parts of the jet are finite.
-template <typename T, int N> inline
-bool IsFinite(const Jet<T, N>& f) {
- if (!IsFinite(f.a)) {
- return false;
- }
+template <typename T, int N>
+inline bool isfinite(const Jet<T, N>& f) {
+ // Branchless implementation. This is more efficient for the false-case and
+ // works with the codegen system.
+ auto result = isfinite(f.a);
for (int i = 0; i < N; ++i) {
- if (!IsFinite(f.v[i])) {
- return false;
- }
+ result = result & isfinite(f.v[i]);
}
- return true;
+ return result;
}
-// The jet is infinite if any part of the jet is infinite.
-template <typename T, int N> inline
-bool IsInfinite(const Jet<T, N>& f) {
- if (IsInfinite(f.a)) {
- return true;
- }
- for (int i = 0; i < N; i++) {
- if (IsInfinite(f.v[i])) {
- return true;
- }
+// The jet is infinite if any part of the Jet is infinite.
+template <typename T, int N>
+inline bool isinf(const Jet<T, N>& f) {
+ auto result = isinf(f.a);
+ for (int i = 0; i < N; ++i) {
+ result = result | isinf(f.v[i]);
}
- return false;
+ return result;
}
// The jet is NaN if any part of the jet is NaN.
-template <typename T, int N> inline
-bool IsNaN(const Jet<T, N>& f) {
- if (IsNaN(f.a)) {
- return true;
- }
+template <typename T, int N>
+inline bool isnan(const Jet<T, N>& f) {
+ auto result = isnan(f.a);
for (int i = 0; i < N; ++i) {
- if (IsNaN(f.v[i])) {
- return true;
- }
+ result = result | isnan(f.v[i]);
}
- return false;
+ return result;
}
// The jet is normal if all parts of the jet are normal.
-template <typename T, int N> inline
-bool IsNormal(const Jet<T, N>& f) {
- if (!IsNormal(f.a)) {
- return false;
- }
+template <typename T, int N>
+inline bool isnormal(const Jet<T, N>& f) {
+ auto result = isnormal(f.a);
for (int i = 0; i < N; ++i) {
- if (!IsNormal(f.v[i])) {
- return false;
- }
+ result = result & isnormal(f.v[i]);
}
- return true;
+ return result;
+}
+
+// Legacy functions from the pre-C++11 days.
+template <typename T, int N>
+inline bool IsFinite(const Jet<T, N>& f) {
+ return isfinite(f);
+}
+
+template <typename T, int N>
+inline bool IsNaN(const Jet<T, N>& f) {
+ return isnan(f);
+}
+
+template <typename T, int N>
+inline bool IsNormal(const Jet<T, N>& f) {
+ return isnormal(f);
+}
+
+// The jet is infinite if any part of the jet is infinite.
+template <typename T, int N>
+inline bool IsInfinite(const Jet<T, N>& f) {
+ return isinf(f);
}
// atan2(b + db, a + da) ~= atan2(b, a) + (- b da + a db) / (a^2 + b^2)
//
// In words: the rate of change of theta is 1/r times the rate of
// change of (x, y) in the positive angular direction.
-template <typename T, int N> inline
-Jet<T, N> atan2(const Jet<T, N>& g, const Jet<T, N>& f) {
+template <typename T, int N>
+inline Jet<T, N> atan2(const Jet<T, N>& g, const Jet<T, N>& f) {
// Note order of arguments:
//
// f = a + da
// g = b + db
T const tmp = T(1.0) / (f.a * f.a + g.a * g.a);
- return Jet<T, N>(atan2(g.a, f.a), tmp * (- g.a * f.v + f.a * g.v));
+ return Jet<T, N>(atan2(g.a, f.a), tmp * (-g.a * f.v + f.a * g.v));
}
-
// pow -- base is a differentiable function, exponent is a constant.
// (a+da)^p ~= a^p + p*a^(p-1) da
-template <typename T, int N> inline
-Jet<T, N> pow(const Jet<T, N>& f, double g) {
+template <typename T, int N>
+inline Jet<T, N> pow(const Jet<T, N>& f, double g) {
T const tmp = g * pow(f.a, g - T(1.0));
return Jet<T, N>(pow(f.a, g), tmp * f.v);
}
@@ -664,26 +739,30 @@ Jet<T, N> pow(const Jet<T, N>& f, double g) {
// 3. For f < 0 and integer g we have: (f)^(g + dg) ~= f^g but if dg
// != 0, the derivatives are not defined and we return NaN.
-template <typename T, int N> inline
-Jet<T, N> pow(double f, const Jet<T, N>& g) {
- if (f == 0 && g.a > 0) {
+template <typename T, int N>
+inline Jet<T, N> pow(T f, const Jet<T, N>& g) {
+ Jet<T, N> result;
+
+ if (f == T(0) && g.a > T(0)) {
// Handle case 2.
- return Jet<T, N>(T(0.0));
- }
- if (f < 0 && g.a == floor(g.a)) {
- // Handle case 3.
- Jet<T, N> ret(pow(f, g.a));
- for (int i = 0; i < N; i++) {
- if (g.v[i] != T(0.0)) {
- // Return a NaN when g.v != 0.
- ret.v[i] = std::numeric_limits<T>::quiet_NaN();
+ result = Jet<T, N>(T(0.0));
+ } else {
+ if (f < 0 && g.a == floor(g.a)) { // Handle case 3.
+ result = Jet<T, N>(pow(f, g.a));
+ for (int i = 0; i < N; i++) {
+ if (g.v[i] != T(0.0)) {
+ // Return a NaN when g.v != 0.
+ result.v[i] = std::numeric_limits<T>::quiet_NaN();
+ }
}
+ } else {
+ // Handle case 1.
+ T const tmp = pow(f, g.a);
+ result = Jet<T, N>(tmp, log(f) * tmp * g.v);
}
- return ret;
}
- // Handle case 1.
- T const tmp = pow(f, g.a);
- return Jet<T, N>(tmp, log(f) * tmp * g.v);
+
+ return result;
}
// pow -- both base and exponent are differentiable functions. This has a
@@ -722,73 +801,48 @@ Jet<T, N> pow(double f, const Jet<T, N>& g) {
//
// 9. For f < 0, g noninteger: The value and derivatives of f^g are not finite.
-template <typename T, int N> inline
-Jet<T, N> pow(const Jet<T, N>& f, const Jet<T, N>& g) {
- if (f.a == 0 && g.a >= 1) {
+template <typename T, int N>
+inline Jet<T, N> pow(const Jet<T, N>& f, const Jet<T, N>& g) {
+ Jet<T, N> result;
+
+ if (f.a == T(0) && g.a >= T(1)) {
// Handle cases 2 and 3.
- if (g.a > 1) {
- return Jet<T, N>(T(0.0));
+ if (g.a > T(1)) {
+ result = Jet<T, N>(T(0.0));
+ } else {
+ result = f;
}
- return f;
- }
- if (f.a < 0 && g.a == floor(g.a)) {
- // Handle cases 7 and 8.
- T const tmp = g.a * pow(f.a, g.a - T(1.0));
- Jet<T, N> ret(pow(f.a, g.a), tmp * f.v);
- for (int i = 0; i < N; i++) {
- if (g.v[i] != T(0.0)) {
- // Return a NaN when g.v != 0.
- ret.v[i] = std::numeric_limits<T>::quiet_NaN();
+
+ } else {
+ if (f.a < T(0) && g.a == floor(g.a)) {
+ // Handle cases 7 and 8.
+ T const tmp = g.a * pow(f.a, g.a - T(1.0));
+ result = Jet<T, N>(pow(f.a, g.a), tmp * f.v);
+ for (int i = 0; i < N; i++) {
+ if (g.v[i] != T(0.0)) {
+ // Return a NaN when g.v != 0.
+ result.v[i] = T(std::numeric_limits<double>::quiet_NaN());
+ }
}
+ } else {
+ // Handle the remaining cases. For cases 4,5,6,9 we allow the log()
+ // function to generate -HUGE_VAL or NaN, since those cases result in a
+ // nonfinite derivative.
+ T const tmp1 = pow(f.a, g.a);
+ T const tmp2 = g.a * pow(f.a, g.a - T(1.0));
+ T const tmp3 = tmp1 * log(f.a);
+ result = Jet<T, N>(tmp1, tmp2 * f.v + tmp3 * g.v);
}
- return ret;
}
- // Handle the remaining cases. For cases 4,5,6,9 we allow the log() function
- // to generate -HUGE_VAL or NaN, since those cases result in a nonfinite
- // derivative.
- T const tmp1 = pow(f.a, g.a);
- T const tmp2 = g.a * pow(f.a, g.a - T(1.0));
- T const tmp3 = tmp1 * log(f.a);
- return Jet<T, N>(tmp1, tmp2 * f.v + tmp3 * g.v);
-}
-
-// Define the helper functions Eigen needs to embed Jet types.
-//
-// NOTE(keir): machine_epsilon() and precision() are missing, because they don't
-// work with nested template types (e.g. where the scalar is itself templated).
-// Among other things, this means that decompositions of Jet's does not work,
-// for example
-//
-// Matrix<Jet<T, N> ... > A, x, b;
-// ...
-// A.solve(b, &x)
-//
-// does not work and will fail with a strange compiler error.
-//
-// TODO(keir): This is an Eigen 2.0 limitation that is lifted in 3.0. When we
-// switch to 3.0, also add the rest of the specialization functionality.
-template<typename T, int N> inline const Jet<T, N>& ei_conj(const Jet<T, N>& x) { return x; } // NOLINT
-template<typename T, int N> inline const Jet<T, N>& ei_real(const Jet<T, N>& x) { return x; } // NOLINT
-template<typename T, int N> inline Jet<T, N> ei_imag(const Jet<T, N>& ) { return Jet<T, N>(0.0); } // NOLINT
-template<typename T, int N> inline Jet<T, N> ei_abs (const Jet<T, N>& x) { return fabs(x); } // NOLINT
-template<typename T, int N> inline Jet<T, N> ei_abs2(const Jet<T, N>& x) { return x * x; } // NOLINT
-template<typename T, int N> inline Jet<T, N> ei_sqrt(const Jet<T, N>& x) { return sqrt(x); } // NOLINT
-template<typename T, int N> inline Jet<T, N> ei_exp (const Jet<T, N>& x) { return exp(x); } // NOLINT
-template<typename T, int N> inline Jet<T, N> ei_log (const Jet<T, N>& x) { return log(x); } // NOLINT
-template<typename T, int N> inline Jet<T, N> ei_sin (const Jet<T, N>& x) { return sin(x); } // NOLINT
-template<typename T, int N> inline Jet<T, N> ei_cos (const Jet<T, N>& x) { return cos(x); } // NOLINT
-template<typename T, int N> inline Jet<T, N> ei_tan (const Jet<T, N>& x) { return tan(x); } // NOLINT
-template<typename T, int N> inline Jet<T, N> ei_atan(const Jet<T, N>& x) { return atan(x); } // NOLINT
-template<typename T, int N> inline Jet<T, N> ei_sinh(const Jet<T, N>& x) { return sinh(x); } // NOLINT
-template<typename T, int N> inline Jet<T, N> ei_cosh(const Jet<T, N>& x) { return cosh(x); } // NOLINT
-template<typename T, int N> inline Jet<T, N> ei_tanh(const Jet<T, N>& x) { return tanh(x); } // NOLINT
-template<typename T, int N> inline Jet<T, N> ei_pow (const Jet<T, N>& x, Jet<T, N> y) { return pow(x, y); } // NOLINT
+
+ return result;
+}
// Note: This has to be in the ceres namespace for argument dependent lookup to
// function correctly. Otherwise statements like CHECK_LE(x, 2.0) fail with
// strange compile errors.
template <typename T, int N>
-inline std::ostream &operator<<(std::ostream &s, const Jet<T, N>& z) {
+inline std::ostream& operator<<(std::ostream& s, const Jet<T, N>& z) {
s << "[" << z.a << " ; ";
for (int i = 0; i < N; ++i) {
s << z.v[i];
@@ -799,15 +853,78 @@ inline std::ostream &operator<<(std::ostream &s, const Jet<T, N>& z) {
s << "]";
return s;
}
-
} // namespace ceres
+namespace std {
+template <typename T, int N>
+struct numeric_limits<ceres::Jet<T, N>> {
+ static constexpr bool is_specialized = true;
+ static constexpr bool is_signed = std::numeric_limits<T>::is_signed;
+ static constexpr bool is_integer = std::numeric_limits<T>::is_integer;
+ static constexpr bool is_exact = std::numeric_limits<T>::is_exact;
+ static constexpr bool has_infinity = std::numeric_limits<T>::has_infinity;
+ static constexpr bool has_quiet_NaN = std::numeric_limits<T>::has_quiet_NaN;
+ static constexpr bool has_signaling_NaN =
+ std::numeric_limits<T>::has_signaling_NaN;
+ static constexpr bool is_iec559 = std::numeric_limits<T>::is_iec559;
+ static constexpr bool is_bounded = std::numeric_limits<T>::is_bounded;
+ static constexpr bool is_modulo = std::numeric_limits<T>::is_modulo;
+
+ static constexpr std::float_denorm_style has_denorm =
+ std::numeric_limits<T>::has_denorm;
+ static constexpr std::float_round_style round_style =
+ std::numeric_limits<T>::round_style;
+
+ static constexpr int digits = std::numeric_limits<T>::digits;
+ static constexpr int digits10 = std::numeric_limits<T>::digits10;
+ static constexpr int max_digits10 = std::numeric_limits<T>::max_digits10;
+ static constexpr int radix = std::numeric_limits<T>::radix;
+ static constexpr int min_exponent = std::numeric_limits<T>::min_exponent;
+ static constexpr int min_exponent10 = std::numeric_limits<T>::max_exponent10;
+ static constexpr int max_exponent = std::numeric_limits<T>::max_exponent;
+ static constexpr int max_exponent10 = std::numeric_limits<T>::max_exponent10;
+ static constexpr bool traps = std::numeric_limits<T>::traps;
+ static constexpr bool tinyness_before =
+ std::numeric_limits<T>::tinyness_before;
+
+ static constexpr ceres::Jet<T, N> min() noexcept {
+ return ceres::Jet<T, N>(std::numeric_limits<T>::min());
+ }
+ static constexpr ceres::Jet<T, N> lowest() noexcept {
+ return ceres::Jet<T, N>(std::numeric_limits<T>::lowest());
+ }
+ static constexpr ceres::Jet<T, N> epsilon() noexcept {
+ return ceres::Jet<T, N>(std::numeric_limits<T>::epsilon());
+ }
+ static constexpr ceres::Jet<T, N> round_error() noexcept {
+ return ceres::Jet<T, N>(std::numeric_limits<T>::round_error());
+ }
+ static constexpr ceres::Jet<T, N> infinity() noexcept {
+ return ceres::Jet<T, N>(std::numeric_limits<T>::infinity());
+ }
+ static constexpr ceres::Jet<T, N> quiet_NaN() noexcept {
+ return ceres::Jet<T, N>(std::numeric_limits<T>::quiet_NaN());
+ }
+ static constexpr ceres::Jet<T, N> signaling_NaN() noexcept {
+ return ceres::Jet<T, N>(std::numeric_limits<T>::signaling_NaN());
+ }
+ static constexpr ceres::Jet<T, N> denorm_min() noexcept {
+ return ceres::Jet<T, N>(std::numeric_limits<T>::denorm_min());
+ }
+
+ static constexpr ceres::Jet<T, N> max() noexcept {
+ return ceres::Jet<T, N>(std::numeric_limits<T>::max());
+ }
+};
+
+} // namespace std
+
namespace Eigen {
// Creating a specialization of NumTraits enables placing Jet objects inside
// Eigen arrays, getting all the goodness of Eigen combined with autodiff.
-template<typename T, int N>
-struct NumTraits<ceres::Jet<T, N> > {
+template <typename T, int N>
+struct NumTraits<ceres::Jet<T, N>> {
typedef ceres::Jet<T, N> Real;
typedef ceres::Jet<T, N> NonInteger;
typedef ceres::Jet<T, N> Nested;
@@ -821,6 +938,8 @@ struct NumTraits<ceres::Jet<T, N> > {
return Real(std::numeric_limits<T>::epsilon());
}
+ static inline int digits10() { return NumTraits<T>::digits10(); }
+
enum {
IsComplex = 0,
IsInteger = 0,
@@ -833,7 +952,7 @@ struct NumTraits<ceres::Jet<T, N> > {
RequireInitialization = 1
};
- template<bool Vectorized>
+ template <bool Vectorized>
struct Div {
enum {
#if defined(EIGEN_VECTORIZE_AVX)
@@ -847,6 +966,24 @@ struct NumTraits<ceres::Jet<T, N> > {
Cost = 3
};
};
+
+ static inline Real highest() { return Real(std::numeric_limits<T>::max()); }
+ static inline Real lowest() { return Real(-std::numeric_limits<T>::max()); }
+};
+
+// Specifying the return type of binary operations between Jets and scalar types
+// allows you to perform matrix/array operations with Eigen matrices and arrays
+// such as addition, subtraction, multiplication, and division where one Eigen
+// matrix/array is of type Jet and the other is a scalar type. This improves
+// performance by using the optimized scalar-to-Jet binary operations but
+// is only available on Eigen versions >= 3.3
+template <typename BinaryOp, typename T, int N>
+struct ScalarBinaryOpTraits<ceres::Jet<T, N>, T, BinaryOp> {
+ typedef ceres::Jet<T, N> ReturnType;
+};
+template <typename BinaryOp, typename T, int N>
+struct ScalarBinaryOpTraits<T, ceres::Jet<T, N>, BinaryOp> {
+ typedef ceres::Jet<T, N> ReturnType;
};
} // namespace Eigen
diff --git a/extern/ceres/include/ceres/local_parameterization.h b/extern/ceres/include/ceres/local_parameterization.h
index 379fc684921..1576e829e73 100644
--- a/extern/ceres/include/ceres/local_parameterization.h
+++ b/extern/ceres/include/ceres/local_parameterization.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -32,10 +32,12 @@
#ifndef CERES_PUBLIC_LOCAL_PARAMETERIZATION_H_
#define CERES_PUBLIC_LOCAL_PARAMETERIZATION_H_
+#include <array>
+#include <memory>
#include <vector>
-#include "ceres/internal/port.h"
-#include "ceres/internal/scoped_ptr.h"
+
#include "ceres/internal/disable_warnings.h"
+#include "ceres/internal/port.h"
namespace ceres {
@@ -61,7 +63,7 @@ namespace ceres {
// optimize over two dimensional vector delta in the tangent space at
// that point and then "move" to the point x + delta, where the move
// operation involves projecting back onto the sphere. Doing so
-// removes a redundent dimension from the optimization, making it
+// removes a redundant dimension from the optimization, making it
// numerically more robust and efficient.
//
// More generally we can define a function
@@ -154,17 +156,16 @@ class CERES_EXPORT IdentityParameterization : public LocalParameterization {
public:
explicit IdentityParameterization(int size);
virtual ~IdentityParameterization() {}
- virtual bool Plus(const double* x,
- const double* delta,
- double* x_plus_delta) const;
- virtual bool ComputeJacobian(const double* x,
- double* jacobian) const;
- virtual bool MultiplyByJacobian(const double* x,
- const int num_cols,
- const double* global_matrix,
- double* local_matrix) const;
- virtual int GlobalSize() const { return size_; }
- virtual int LocalSize() const { return size_; }
+ bool Plus(const double* x,
+ const double* delta,
+ double* x_plus_delta) const override;
+ bool ComputeJacobian(const double* x, double* jacobian) const override;
+ bool MultiplyByJacobian(const double* x,
+ const int num_cols,
+ const double* global_matrix,
+ double* local_matrix) const override;
+ int GlobalSize() const override { return size_; }
+ int LocalSize() const override { return size_; }
private:
const int size_;
@@ -176,19 +177,18 @@ class CERES_EXPORT SubsetParameterization : public LocalParameterization {
explicit SubsetParameterization(int size,
const std::vector<int>& constant_parameters);
virtual ~SubsetParameterization() {}
- virtual bool Plus(const double* x,
- const double* delta,
- double* x_plus_delta) const;
- virtual bool ComputeJacobian(const double* x,
- double* jacobian) const;
- virtual bool MultiplyByJacobian(const double* x,
- const int num_cols,
- const double* global_matrix,
- double* local_matrix) const;
- virtual int GlobalSize() const {
+ bool Plus(const double* x,
+ const double* delta,
+ double* x_plus_delta) const override;
+ bool ComputeJacobian(const double* x, double* jacobian) const override;
+ bool MultiplyByJacobian(const double* x,
+ const int num_cols,
+ const double* global_matrix,
+ double* local_matrix) const override;
+ int GlobalSize() const override {
return static_cast<int>(constancy_mask_.size());
}
- virtual int LocalSize() const { return local_size_; }
+ int LocalSize() const override { return local_size_; }
private:
const int local_size_;
@@ -202,13 +202,12 @@ class CERES_EXPORT SubsetParameterization : public LocalParameterization {
class CERES_EXPORT QuaternionParameterization : public LocalParameterization {
public:
virtual ~QuaternionParameterization() {}
- virtual bool Plus(const double* x,
- const double* delta,
- double* x_plus_delta) const;
- virtual bool ComputeJacobian(const double* x,
- double* jacobian) const;
- virtual int GlobalSize() const { return 4; }
- virtual int LocalSize() const { return 3; }
+ bool Plus(const double* x,
+ const double* delta,
+ double* x_plus_delta) const override;
+ bool ComputeJacobian(const double* x, double* jacobian) const override;
+ int GlobalSize() const override { return 4; }
+ int LocalSize() const override { return 3; }
};
// Implements the quaternion local parameterization for Eigen's representation
@@ -222,16 +221,16 @@ class CERES_EXPORT QuaternionParameterization : public LocalParameterization {
//
// Plus(x, delta) = [sin(|delta|) delta / |delta|, cos(|delta|)] * x
// with * being the quaternion multiplication operator.
-class EigenQuaternionParameterization : public ceres::LocalParameterization {
+class CERES_EXPORT EigenQuaternionParameterization
+ : public ceres::LocalParameterization {
public:
virtual ~EigenQuaternionParameterization() {}
- virtual bool Plus(const double* x,
- const double* delta,
- double* x_plus_delta) const;
- virtual bool ComputeJacobian(const double* x,
- double* jacobian) const;
- virtual int GlobalSize() const { return 4; }
- virtual int LocalSize() const { return 3; }
+ bool Plus(const double* x,
+ const double* delta,
+ double* x_plus_delta) const override;
+ bool ComputeJacobian(const double* x, double* jacobian) const override;
+ int GlobalSize() const override { return 4; }
+ int LocalSize() const override { return 3; }
};
// This provides a parameterization for homogeneous vectors which are commonly
@@ -247,32 +246,55 @@ class EigenQuaternionParameterization : public ceres::LocalParameterization {
// remain on the sphere. We assume that the last element of x is the scalar
// component. The size of the homogeneous vector is required to be greater than
// 1.
-class CERES_EXPORT HomogeneousVectorParameterization :
- public LocalParameterization {
+class CERES_EXPORT HomogeneousVectorParameterization
+ : public LocalParameterization {
public:
explicit HomogeneousVectorParameterization(int size);
virtual ~HomogeneousVectorParameterization() {}
- virtual bool Plus(const double* x,
- const double* delta,
- double* x_plus_delta) const;
- virtual bool ComputeJacobian(const double* x,
- double* jacobian) const;
- virtual int GlobalSize() const { return size_; }
- virtual int LocalSize() const { return size_ - 1; }
+ bool Plus(const double* x,
+ const double* delta,
+ double* x_plus_delta) const override;
+ bool ComputeJacobian(const double* x, double* jacobian) const override;
+ int GlobalSize() const override { return size_; }
+ int LocalSize() const override { return size_ - 1; }
private:
const int size_;
};
+// This provides a parameterization for lines, where the line is
+// over-parameterized by an origin point and a direction vector. So the
+// parameter vector size needs to be two times the ambient space dimension,
+// where the first half is interpreted as the origin point and the second half
+// as the direction.
+//
+// The plus operator for the line direction is the same as for the
+// HomogeneousVectorParameterization. The update of the origin point is
+// perpendicular to the line direction before the update.
+//
+// This local parameterization is a special case of the affine Grassmannian
+// manifold (see https://en.wikipedia.org/wiki/Affine_Grassmannian_(manifold))
+// for the case Graff_1(R^n).
+template <int AmbientSpaceDimension>
+class LineParameterization : public LocalParameterization {
+ public:
+ static_assert(AmbientSpaceDimension >= 2,
+ "The ambient space must be at least 2");
+
+ bool Plus(const double* x,
+ const double* delta,
+ double* x_plus_delta) const override;
+ bool ComputeJacobian(const double* x, double* jacobian) const override;
+ int GlobalSize() const override { return 2 * AmbientSpaceDimension; }
+ int LocalSize() const override { return 2 * (AmbientSpaceDimension - 1); }
+};
+
// Construct a local parameterization by taking the Cartesian product
// of a number of other local parameterizations. This is useful, when
// a parameter block is the cartesian product of two or more
// manifolds. For example the parameters of a camera consist of a
// rotation and a translation, i.e., SO(3) x R^3.
//
-// Currently this class supports taking the cartesian product of up to
-// four local parameterizations.
-//
// Example usage:
//
// ProductParameterization product_param(new QuaterionionParameterization(),
@@ -282,35 +304,49 @@ class CERES_EXPORT HomogeneousVectorParameterization :
// rotation is represented using a quaternion.
class CERES_EXPORT ProductParameterization : public LocalParameterization {
public:
+ ProductParameterization(const ProductParameterization&) = delete;
+ ProductParameterization& operator=(const ProductParameterization&) = delete;
//
- // NOTE: All the constructors take ownership of the input local
+ // NOTE: The constructor takes ownership of the input local
// parameterizations.
//
- ProductParameterization(LocalParameterization* local_param1,
- LocalParameterization* local_param2);
+ template <typename... LocalParams>
+ ProductParameterization(LocalParams*... local_params)
+ : local_params_(sizeof...(LocalParams)),
+ local_size_{0},
+ global_size_{0},
+ buffer_size_{0} {
+ constexpr int kNumLocalParams = sizeof...(LocalParams);
+ static_assert(kNumLocalParams >= 2,
+ "At least two local parameterizations must be specified.");
- ProductParameterization(LocalParameterization* local_param1,
- LocalParameterization* local_param2,
- LocalParameterization* local_param3);
+ using LocalParameterizationPtr = std::unique_ptr<LocalParameterization>;
- ProductParameterization(LocalParameterization* local_param1,
- LocalParameterization* local_param2,
- LocalParameterization* local_param3,
- LocalParameterization* local_param4);
+ // Wrap all raw pointers into std::unique_ptr for exception safety.
+ std::array<LocalParameterizationPtr, kNumLocalParams> local_params_array{
+ LocalParameterizationPtr(local_params)...};
- virtual ~ProductParameterization();
- virtual bool Plus(const double* x,
- const double* delta,
- double* x_plus_delta) const;
- virtual bool ComputeJacobian(const double* x,
- double* jacobian) const;
- virtual int GlobalSize() const { return global_size_; }
- virtual int LocalSize() const { return local_size_; }
+ // Initialize internal state.
+ for (int i = 0; i < kNumLocalParams; ++i) {
+ LocalParameterizationPtr& param = local_params_[i];
+ param = std::move(local_params_array[i]);
- private:
- void Init();
+ buffer_size_ =
+ std::max(buffer_size_, param->LocalSize() * param->GlobalSize());
+ global_size_ += param->GlobalSize();
+ local_size_ += param->LocalSize();
+ }
+ }
+
+ bool Plus(const double* x,
+ const double* delta,
+ double* x_plus_delta) const override;
+ bool ComputeJacobian(const double* x, double* jacobian) const override;
+ int GlobalSize() const override { return global_size_; }
+ int LocalSize() const override { return local_size_; }
- std::vector<LocalParameterization*> local_params_;
+ private:
+ std::vector<std::unique_ptr<LocalParameterization>> local_params_;
int local_size_;
int global_size_;
int buffer_size_;
@@ -319,5 +355,7 @@ class CERES_EXPORT ProductParameterization : public LocalParameterization {
} // namespace ceres
#include "ceres/internal/reenable_warnings.h"
+#include "ceres/internal/line_parameterization.h"
#endif // CERES_PUBLIC_LOCAL_PARAMETERIZATION_H_
+
diff --git a/extern/ceres/include/ceres/loss_function.h b/extern/ceres/include/ceres/loss_function.h
index 0512c135143..7aabf7dfce1 100644
--- a/extern/ceres/include/ceres/loss_function.h
+++ b/extern/ceres/include/ceres/loss_function.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -57,7 +57,7 @@
// anything special (i.e. if we used a basic quadratic loss), the
// residual for the erroneous measurement will result in extreme error
// due to the quadratic nature of squared loss. This results in the
-// entire solution getting pulled away from the optimimum to reduce
+// entire solution getting pulled away from the optimum to reduce
// the large error that would otherwise be attributed to the wrong
// measurement.
//
@@ -75,11 +75,11 @@
#ifndef CERES_PUBLIC_LOSS_FUNCTION_H_
#define CERES_PUBLIC_LOSS_FUNCTION_H_
-#include "glog/logging.h"
-#include "ceres/internal/macros.h"
-#include "ceres/internal/scoped_ptr.h"
-#include "ceres/types.h"
+#include <memory>
+
#include "ceres/internal/disable_warnings.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
namespace ceres {
@@ -119,7 +119,6 @@ class CERES_EXPORT LossFunction {
// Note: in the region of interest (i.e. s < 3) we have:
// TrivialLoss >= HuberLoss >= SoftLOneLoss >= CauchyLoss
-
// This corresponds to no robustification.
//
// rho(s) = s
@@ -131,7 +130,7 @@ class CERES_EXPORT LossFunction {
// thing.
class CERES_EXPORT TrivialLoss : public LossFunction {
public:
- virtual void Evaluate(double, double*) const;
+ void Evaluate(double, double*) const override;
};
// Scaling
@@ -174,8 +173,8 @@ class CERES_EXPORT TrivialLoss : public LossFunction {
// http://en.wikipedia.org/wiki/Huber_Loss_Function
class CERES_EXPORT HuberLoss : public LossFunction {
public:
- explicit HuberLoss(double a) : a_(a), b_(a * a) { }
- virtual void Evaluate(double, double*) const;
+ explicit HuberLoss(double a) : a_(a), b_(a * a) {}
+ void Evaluate(double, double*) const override;
private:
const double a_;
@@ -187,11 +186,11 @@ class CERES_EXPORT HuberLoss : public LossFunction {
//
// rho(s) = 2 (sqrt(1 + s) - 1).
//
-// At s = 0: rho = [0, 1, -1/2].
+// At s = 0: rho = [0, 1, -1 / (2 * a^2)].
class CERES_EXPORT SoftLOneLoss : public LossFunction {
public:
- explicit SoftLOneLoss(double a) : b_(a * a), c_(1 / b_) { }
- virtual void Evaluate(double, double*) const;
+ explicit SoftLOneLoss(double a) : b_(a * a), c_(1 / b_) {}
+ void Evaluate(double, double*) const override;
private:
// b = a^2.
@@ -204,11 +203,11 @@ class CERES_EXPORT SoftLOneLoss : public LossFunction {
//
// rho(s) = log(1 + s).
//
-// At s = 0: rho = [0, 1, -1].
+// At s = 0: rho = [0, 1, -1 / a^2].
class CERES_EXPORT CauchyLoss : public LossFunction {
public:
- explicit CauchyLoss(double a) : b_(a * a), c_(1 / b_) { }
- virtual void Evaluate(double, double*) const;
+ explicit CauchyLoss(double a) : b_(a * a), c_(1 / b_) {}
+ void Evaluate(double, double*) const override;
private:
// b = a^2.
@@ -228,8 +227,8 @@ class CERES_EXPORT CauchyLoss : public LossFunction {
// At s = 0: rho = [0, 1, 0].
class CERES_EXPORT ArctanLoss : public LossFunction {
public:
- explicit ArctanLoss(double a) : a_(a), b_(1 / (a * a)) { }
- virtual void Evaluate(double, double*) const;
+ explicit ArctanLoss(double a) : a_(a), b_(1 / (a * a)) {}
+ void Evaluate(double, double*) const override;
private:
const double a_;
@@ -268,7 +267,7 @@ class CERES_EXPORT ArctanLoss : public LossFunction {
class CERES_EXPORT TolerantLoss : public LossFunction {
public:
explicit TolerantLoss(double a, double b);
- virtual void Evaluate(double, double*) const;
+ void Evaluate(double, double*) const override;
private:
const double a_, b_, c_;
@@ -277,16 +276,17 @@ class CERES_EXPORT TolerantLoss : public LossFunction {
// This is the Tukey biweight loss function which aggressively
// attempts to suppress large errors.
//
-// The term is computed as:
+// The term is computed as follows where the equations are scaled by a
+// factor of 2 because the cost function is given by 1/2 rho(s):
//
-// rho(s) = a^2 / 6 * (1 - (1 - s / a^2)^3 ) for s <= a^2,
-// rho(s) = a^2 / 6 for s > a^2.
+// rho(s) = a^2 / 3 * (1 - (1 - s / a^2)^3 ) for s <= a^2,
+// rho(s) = a^2 / 3 for s > a^2.
//
-// At s = 0: rho = [0, 0.5, -1 / a^2]
+// At s = 0: rho = [0, 1, -2 / a^2]
class CERES_EXPORT TukeyLoss : public ceres::LossFunction {
public:
- explicit TukeyLoss(double a) : a_squared_(a * a) { }
- virtual void Evaluate(double, double*) const;
+ explicit TukeyLoss(double a) : a_squared_(a * a) {}
+ void Evaluate(double, double*) const override;
private:
const double a_squared_;
@@ -297,13 +297,15 @@ class CERES_EXPORT TukeyLoss : public ceres::LossFunction {
// The loss functions must not be NULL.
class CERES_EXPORT ComposedLoss : public LossFunction {
public:
- explicit ComposedLoss(const LossFunction* f, Ownership ownership_f,
- const LossFunction* g, Ownership ownership_g);
+ explicit ComposedLoss(const LossFunction* f,
+ Ownership ownership_f,
+ const LossFunction* g,
+ Ownership ownership_g);
virtual ~ComposedLoss();
- virtual void Evaluate(double, double*) const;
+ void Evaluate(double, double*) const override;
private:
- internal::scoped_ptr<const LossFunction> f_, g_;
+ std::unique_ptr<const LossFunction> f_, g_;
const Ownership ownership_f_, ownership_g_;
};
@@ -329,21 +331,22 @@ class CERES_EXPORT ScaledLoss : public LossFunction {
// Constructs a ScaledLoss wrapping another loss function. Takes
// ownership of the wrapped loss function or not depending on the
// ownership parameter.
- ScaledLoss(const LossFunction* rho, double a, Ownership ownership) :
- rho_(rho), a_(a), ownership_(ownership) { }
+ ScaledLoss(const LossFunction* rho, double a, Ownership ownership)
+ : rho_(rho), a_(a), ownership_(ownership) {}
+ ScaledLoss(const ScaledLoss&) = delete;
+ void operator=(const ScaledLoss&) = delete;
virtual ~ScaledLoss() {
if (ownership_ == DO_NOT_TAKE_OWNERSHIP) {
rho_.release();
}
}
- virtual void Evaluate(double, double*) const;
+ void Evaluate(double, double*) const override;
private:
- internal::scoped_ptr<const LossFunction> rho_;
+ std::unique_ptr<const LossFunction> rho_;
const double a_;
const Ownership ownership_;
- CERES_DISALLOW_COPY_AND_ASSIGN(ScaledLoss);
};
// Sometimes after the optimization problem has been constructed, we
@@ -387,8 +390,10 @@ class CERES_EXPORT ScaledLoss : public LossFunction {
class CERES_EXPORT LossFunctionWrapper : public LossFunction {
public:
LossFunctionWrapper(LossFunction* rho, Ownership ownership)
- : rho_(rho), ownership_(ownership) {
- }
+ : rho_(rho), ownership_(ownership) {}
+
+ LossFunctionWrapper(const LossFunctionWrapper&) = delete;
+ void operator=(const LossFunctionWrapper&) = delete;
virtual ~LossFunctionWrapper() {
if (ownership_ == DO_NOT_TAKE_OWNERSHIP) {
@@ -396,13 +401,12 @@ class CERES_EXPORT LossFunctionWrapper : public LossFunction {
}
}
- virtual void Evaluate(double sq_norm, double out[3]) const {
+ void Evaluate(double sq_norm, double out[3]) const override {
if (rho_.get() == NULL) {
out[0] = sq_norm;
out[1] = 1.0;
out[2] = 0.0;
- }
- else {
+ } else {
rho_->Evaluate(sq_norm, out);
}
}
@@ -416,9 +420,8 @@ class CERES_EXPORT LossFunctionWrapper : public LossFunction {
}
private:
- internal::scoped_ptr<const LossFunction> rho_;
+ std::unique_ptr<const LossFunction> rho_;
Ownership ownership_;
- CERES_DISALLOW_COPY_AND_ASSIGN(LossFunctionWrapper);
};
} // namespace ceres
diff --git a/extern/ceres/include/ceres/normal_prior.h b/extern/ceres/include/ceres/normal_prior.h
index cd98b4c846b..14ab379f4af 100644
--- a/extern/ceres/include/ceres/normal_prior.h
+++ b/extern/ceres/include/ceres/normal_prior.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -35,8 +35,8 @@
#define CERES_PUBLIC_NORMAL_PRIOR_H_
#include "ceres/cost_function.h"
-#include "ceres/internal/eigen.h"
#include "ceres/internal/disable_warnings.h"
+#include "ceres/internal/eigen.h"
namespace ceres {
@@ -57,15 +57,15 @@ namespace ceres {
// which would be the case if the covariance matrix S is rank
// deficient.
-class CERES_EXPORT NormalPrior: public CostFunction {
+class CERES_EXPORT NormalPrior : public CostFunction {
public:
// Check that the number of rows in the vector b are the same as the
// number of columns in the matrix A, crash otherwise.
NormalPrior(const Matrix& A, const Vector& b);
+ bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const override;
- virtual bool Evaluate(double const* const* parameters,
- double* residuals,
- double** jacobians) const;
private:
Matrix A_;
Vector b_;
diff --git a/extern/ceres/include/ceres/numeric_diff_cost_function.h b/extern/ceres/include/ceres/numeric_diff_cost_function.h
index 5dfaeab6241..c69f262f572 100644
--- a/extern/ceres/include/ceres/numeric_diff_cost_function.h
+++ b/extern/ceres/include/ceres/numeric_diff_cost_function.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -52,16 +52,16 @@
// The actual cost added to the total problem is e^2, or (k - x'k)^2; however,
// the squaring is implicitly done by the optimization framework.
//
-// To write an numerically-differentiable cost function for the above model, first
-// define the object
+// To write an numerically-differentiable cost function for the above model,
+// first define the object
//
// class MyScalarCostFunctor {
-// MyScalarCostFunctor(double k): k_(k) {}
+// explicit MyScalarCostFunctor(double k): k_(k) {}
//
// bool operator()(const double* const x,
// const double* const y,
// double* residuals) const {
-// residuals[0] = k_ - x[0] * y[0] + x[1] * y[1];
+// residuals[0] = k_ - x[0] * y[0] - x[1] * y[1];
// return true;
// }
//
@@ -98,6 +98,8 @@
// NumericDiffCostFunction also supports cost functions with a
// runtime-determined number of residuals. For example:
//
+// clang-format off
+//
// CostFunction* cost_function
// = new NumericDiffCostFunction<MyScalarCostFunctor, CENTRAL, DYNAMIC, 2, 2>(
// new CostFunctorWithDynamicNumResiduals(1.0), ^ ^ ^
@@ -109,10 +111,8 @@
// Indicate dynamic number of residuals --------------------+ | |
// Dimension of x ------------------------------------------------+ |
// Dimension of y ---------------------------------------------------+
+// clang-format on
//
-// The framework can currently accommodate cost functions of up to 10
-// independent variables, and there is no limit on the dimensionality
-// of each of them.
//
// The central difference method is considerably more accurate at the cost of
// twice as many function evaluations than forward difference. Consider using
@@ -161,10 +161,13 @@
#ifndef CERES_PUBLIC_NUMERIC_DIFF_COST_FUNCTION_H_
#define CERES_PUBLIC_NUMERIC_DIFF_COST_FUNCTION_H_
+#include <array>
+#include <memory>
+
#include "Eigen/Dense"
#include "ceres/cost_function.h"
#include "ceres/internal/numeric_diff.h"
-#include "ceres/internal/scoped_ptr.h"
+#include "ceres/internal/parameter_dims.h"
#include "ceres/numeric_diff_options.h"
#include "ceres/sized_cost_function.h"
#include "ceres/types.h"
@@ -175,34 +178,17 @@ namespace ceres {
template <typename CostFunctor,
NumericDiffMethodType method = CENTRAL,
int kNumResiduals = 0, // Number of residuals, or ceres::DYNAMIC
- int N0 = 0, // Number of parameters in block 0.
- int N1 = 0, // Number of parameters in block 1.
- int N2 = 0, // Number of parameters in block 2.
- int N3 = 0, // Number of parameters in block 3.
- int N4 = 0, // Number of parameters in block 4.
- int N5 = 0, // Number of parameters in block 5.
- int N6 = 0, // Number of parameters in block 6.
- int N7 = 0, // Number of parameters in block 7.
- int N8 = 0, // Number of parameters in block 8.
- int N9 = 0> // Number of parameters in block 9.
-class NumericDiffCostFunction
- : public SizedCostFunction<kNumResiduals,
- N0, N1, N2, N3, N4,
- N5, N6, N7, N8, N9> {
+ int... Ns> // Parameters dimensions for each block.
+class NumericDiffCostFunction : public SizedCostFunction<kNumResiduals, Ns...> {
public:
NumericDiffCostFunction(
CostFunctor* functor,
Ownership ownership = TAKE_OWNERSHIP,
int num_residuals = kNumResiduals,
const NumericDiffOptions& options = NumericDiffOptions())
- : functor_(functor),
- ownership_(ownership),
- options_(options) {
+ : functor_(functor), ownership_(ownership), options_(options) {
if (kNumResiduals == DYNAMIC) {
- SizedCostFunction<kNumResiduals,
- N0, N1, N2, N3, N4,
- N5, N6, N7, N8, N9>
- ::set_num_residuals(num_residuals);
+ SizedCostFunction<kNumResiduals, Ns...>::set_num_residuals(num_residuals);
}
}
@@ -212,24 +198,21 @@ class NumericDiffCostFunction
}
}
- virtual bool Evaluate(double const* const* parameters,
- double* residuals,
- double** jacobians) const {
+ bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const override {
using internal::FixedArray;
using internal::NumericDiff;
- const int kNumParameters = N0 + N1 + N2 + N3 + N4 + N5 + N6 + N7 + N8 + N9;
- const int kNumParameterBlocks =
- (N0 > 0) + (N1 > 0) + (N2 > 0) + (N3 > 0) + (N4 > 0) +
- (N5 > 0) + (N6 > 0) + (N7 > 0) + (N8 > 0) + (N9 > 0);
+ using ParameterDims =
+ typename SizedCostFunction<kNumResiduals, Ns...>::ParameterDims;
+
+ constexpr int kNumParameters = ParameterDims::kNumParameters;
+ constexpr int kNumParameterBlocks = ParameterDims::kNumParameterBlocks;
// Get the function value (residuals) at the the point to evaluate.
- if (!internal::EvaluateImpl<CostFunctor,
- N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>(
- functor_.get(),
- parameters,
- residuals,
- functor_.get())) {
+ if (!internal::VariadicEvaluate<ParameterDims>(
+ *functor_, parameters, residuals)) {
return false;
}
@@ -239,77 +222,29 @@ class NumericDiffCostFunction
// Create a copy of the parameters which will get mutated.
FixedArray<double> parameters_copy(kNumParameters);
- FixedArray<double*> parameters_reference_copy(kNumParameterBlocks);
-
- parameters_reference_copy[0] = parameters_copy.get();
- if (N1) parameters_reference_copy[1] = parameters_reference_copy[0] + N0;
- if (N2) parameters_reference_copy[2] = parameters_reference_copy[1] + N1;
- if (N3) parameters_reference_copy[3] = parameters_reference_copy[2] + N2;
- if (N4) parameters_reference_copy[4] = parameters_reference_copy[3] + N3;
- if (N5) parameters_reference_copy[5] = parameters_reference_copy[4] + N4;
- if (N6) parameters_reference_copy[6] = parameters_reference_copy[5] + N5;
- if (N7) parameters_reference_copy[7] = parameters_reference_copy[6] + N6;
- if (N8) parameters_reference_copy[8] = parameters_reference_copy[7] + N7;
- if (N9) parameters_reference_copy[9] = parameters_reference_copy[8] + N8;
+ std::array<double*, kNumParameterBlocks> parameters_reference_copy =
+ ParameterDims::GetUnpackedParameters(parameters_copy.data());
-#define CERES_COPY_PARAMETER_BLOCK(block) \
- if (N ## block) memcpy(parameters_reference_copy[block], \
- parameters[block], \
- sizeof(double) * N ## block); // NOLINT
-
- CERES_COPY_PARAMETER_BLOCK(0);
- CERES_COPY_PARAMETER_BLOCK(1);
- CERES_COPY_PARAMETER_BLOCK(2);
- CERES_COPY_PARAMETER_BLOCK(3);
- CERES_COPY_PARAMETER_BLOCK(4);
- CERES_COPY_PARAMETER_BLOCK(5);
- CERES_COPY_PARAMETER_BLOCK(6);
- CERES_COPY_PARAMETER_BLOCK(7);
- CERES_COPY_PARAMETER_BLOCK(8);
- CERES_COPY_PARAMETER_BLOCK(9);
-
-#undef CERES_COPY_PARAMETER_BLOCK
-
-#define CERES_EVALUATE_JACOBIAN_FOR_BLOCK(block) \
- if (N ## block && jacobians[block] != NULL) { \
- if (!NumericDiff<CostFunctor, \
- method, \
- kNumResiduals, \
- N0, N1, N2, N3, N4, N5, N6, N7, N8, N9, \
- block, \
- N ## block >::EvaluateJacobianForParameterBlock( \
- functor_.get(), \
- residuals, \
- options_, \
- SizedCostFunction<kNumResiduals, \
- N0, N1, N2, N3, N4, \
- N5, N6, N7, N8, N9>::num_residuals(), \
- block, \
- N ## block, \
- parameters_reference_copy.get(), \
- jacobians[block])) { \
- return false; \
- } \
+ for (int block = 0; block < kNumParameterBlocks; ++block) {
+ memcpy(parameters_reference_copy[block],
+ parameters[block],
+ sizeof(double) * ParameterDims::GetDim(block));
}
- CERES_EVALUATE_JACOBIAN_FOR_BLOCK(0);
- CERES_EVALUATE_JACOBIAN_FOR_BLOCK(1);
- CERES_EVALUATE_JACOBIAN_FOR_BLOCK(2);
- CERES_EVALUATE_JACOBIAN_FOR_BLOCK(3);
- CERES_EVALUATE_JACOBIAN_FOR_BLOCK(4);
- CERES_EVALUATE_JACOBIAN_FOR_BLOCK(5);
- CERES_EVALUATE_JACOBIAN_FOR_BLOCK(6);
- CERES_EVALUATE_JACOBIAN_FOR_BLOCK(7);
- CERES_EVALUATE_JACOBIAN_FOR_BLOCK(8);
- CERES_EVALUATE_JACOBIAN_FOR_BLOCK(9);
-
-#undef CERES_EVALUATE_JACOBIAN_FOR_BLOCK
+ internal::EvaluateJacobianForParameterBlocks<ParameterDims>::
+ template Apply<method, kNumResiduals>(
+ functor_.get(),
+ residuals,
+ options_,
+ SizedCostFunction<kNumResiduals, Ns...>::num_residuals(),
+ parameters_reference_copy.data(),
+ jacobians);
return true;
}
private:
- internal::scoped_ptr<CostFunctor> functor_;
+ std::unique_ptr<CostFunctor> functor_;
Ownership ownership_;
NumericDiffOptions options_;
};
diff --git a/extern/ceres/include/ceres/numeric_diff_options.h b/extern/ceres/include/ceres/numeric_diff_options.h
index 119c8a86596..64919ed5ab1 100644
--- a/extern/ceres/include/ceres/numeric_diff_options.h
+++ b/extern/ceres/include/ceres/numeric_diff_options.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -32,23 +32,17 @@
#ifndef CERES_PUBLIC_NUMERIC_DIFF_OPTIONS_H_
#define CERES_PUBLIC_NUMERIC_DIFF_OPTIONS_H_
+#include "ceres/internal/port.h"
+
namespace ceres {
// Options pertaining to numeric differentiation (e.g., convergence criteria,
// step sizes).
struct CERES_EXPORT NumericDiffOptions {
- NumericDiffOptions() {
- relative_step_size = 1e-6;
- ridders_relative_initial_step_size = 1e-2;
- max_num_ridders_extrapolations = 10;
- ridders_epsilon = 1e-12;
- ridders_step_shrink_factor = 2.0;
- }
-
// Numeric differentiation step size (multiplied by parameter block's
// order of magnitude). If parameters are close to zero, the step size
// is set to sqrt(machine_epsilon).
- double relative_step_size;
+ double relative_step_size = 1e-6;
// Initial step size for Ridders adaptive numeric differentiation (multiplied
// by parameter block's order of magnitude).
@@ -59,19 +53,19 @@ struct CERES_EXPORT NumericDiffOptions {
// Note: For Ridders' method to converge, the step size should be initialized
// to a value that is large enough to produce a significant change in the
// function. As the derivative is estimated, the step size decreases.
- double ridders_relative_initial_step_size;
+ double ridders_relative_initial_step_size = 1e-2;
// Maximal number of adaptive extrapolations (sampling) in Ridders' method.
- int max_num_ridders_extrapolations;
+ int max_num_ridders_extrapolations = 10;
// Convergence criterion on extrapolation error for Ridders adaptive
// differentiation. The available error estimation methods are defined in
// NumericDiffErrorType and set in the "ridders_error_method" field.
- double ridders_epsilon;
+ double ridders_epsilon = 1e-12;
// The factor in which to shrink the step size with each extrapolation in
// Ridders' method.
- double ridders_step_shrink_factor;
+ double ridders_step_shrink_factor = 2.0;
};
} // namespace ceres
diff --git a/extern/ceres/include/ceres/ordered_groups.h b/extern/ceres/include/ceres/ordered_groups.h
index aa1bd3a7da1..954663c97e6 100644
--- a/extern/ceres/include/ceres/ordered_groups.h
+++ b/extern/ceres/include/ceres/ordered_groups.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -33,7 +33,9 @@
#include <map>
#include <set>
+#include <unordered_map>
#include <vector>
+
#include "ceres/internal/port.h"
#include "glog/logging.h"
@@ -63,8 +65,7 @@ class OrderedGroups {
return false;
}
- typename std::map<T, int>::const_iterator it =
- element_to_group_.find(element);
+ auto it = element_to_group_.find(element);
if (it != element_to_group_.end()) {
if (it->second == group) {
// Element is already in the right group, nothing to do.
@@ -126,17 +127,14 @@ class OrderedGroups {
return;
}
- typename std::map<int, std::set<T> >::reverse_iterator it =
- group_to_elements_.rbegin();
- std::map<int, std::set<T> > new_group_to_elements;
+ auto it = group_to_elements_.rbegin();
+ std::map<int, std::set<T>> new_group_to_elements;
new_group_to_elements[it->first] = it->second;
int new_group_id = it->first + 1;
for (++it; it != group_to_elements_.rend(); ++it) {
- for (typename std::set<T>::const_iterator element_it = it->second.begin();
- element_it != it->second.end();
- ++element_it) {
- element_to_group_[*element_it] = new_group_id;
+ for (const auto& element : it->second) {
+ element_to_group_[element] = new_group_id;
}
new_group_to_elements[new_group_id] = it->second;
new_group_id++;
@@ -148,8 +146,7 @@ class OrderedGroups {
// Return the group id for the element. If the element is not a
// member of any group, return -1.
int GroupId(const T element) const {
- typename std::map<T, int>::const_iterator it =
- element_to_group_.find(element);
+ auto it = element_to_group_.find(element);
if (it == element_to_group_.end()) {
return -1;
}
@@ -157,27 +154,21 @@ class OrderedGroups {
}
bool IsMember(const T element) const {
- typename std::map<T, int>::const_iterator it =
- element_to_group_.find(element);
+ auto it = element_to_group_.find(element);
return (it != element_to_group_.end());
}
// This function always succeeds, i.e., implicitly there exists a
// group for every integer.
int GroupSize(const int group) const {
- typename std::map<int, std::set<T> >::const_iterator it =
- group_to_elements_.find(group);
- return (it == group_to_elements_.end()) ? 0 : it->second.size();
+ auto it = group_to_elements_.find(group);
+ return (it == group_to_elements_.end()) ? 0 : it->second.size();
}
- int NumElements() const {
- return element_to_group_.size();
- }
+ int NumElements() const { return element_to_group_.size(); }
// Number of groups with one or more elements.
- int NumGroups() const {
- return group_to_elements_.size();
- }
+ int NumGroups() const { return group_to_elements_.size(); }
// The first group with one or more elements. Calling this when
// there are no groups with non-zero elements will result in a
@@ -187,17 +178,15 @@ class OrderedGroups {
return group_to_elements_.begin()->first;
}
- const std::map<int, std::set<T> >& group_to_elements() const {
+ const std::map<int, std::set<T>>& group_to_elements() const {
return group_to_elements_;
}
- const std::map<T, int>& element_to_group() const {
- return element_to_group_;
- }
+ const std::map<T, int>& element_to_group() const { return element_to_group_; }
private:
- std::map<int, std::set<T> > group_to_elements_;
- std::map<T, int> element_to_group_;
+ std::map<int, std::set<T>> group_to_elements_;
+ std::unordered_map<T, int> element_to_group_;
};
// Typedef for the most commonly used version of OrderedGroups.
diff --git a/extern/ceres/include/ceres/problem.h b/extern/ceres/include/ceres/problem.h
index 27ed4ef15da..88f99663f65 100644
--- a/extern/ceres/include/ceres/problem.h
+++ b/extern/ceres/include/ceres/problem.h
@@ -34,22 +34,23 @@
#ifndef CERES_PUBLIC_PROBLEM_H_
#define CERES_PUBLIC_PROBLEM_H_
+#include <array>
#include <cstddef>
#include <map>
+#include <memory>
#include <set>
#include <vector>
-#include "glog/logging.h"
-#include "ceres/internal/macros.h"
+#include "ceres/context.h"
+#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/port.h"
-#include "ceres/internal/scoped_ptr.h"
#include "ceres/types.h"
-#include "ceres/internal/disable_warnings.h"
-
+#include "glog/logging.h"
namespace ceres {
class CostFunction;
+class EvaluationCallback;
class LossFunction;
class LocalParameterization;
class Solver;
@@ -114,20 +115,13 @@ typedef internal::ResidualBlock* ResidualBlockId;
//
// Problem problem;
//
-// problem.AddResidualBlock(new MyUnaryCostFunction(...), x1);
-// problem.AddResidualBlock(new MyBinaryCostFunction(...), x2, x3);
+// problem.AddResidualBlock(new MyUnaryCostFunction(...), nullptr, x1);
+// problem.AddResidualBlock(new MyBinaryCostFunction(...), nullptr, x2, x3);
//
// Please see cost_function.h for details of the CostFunction object.
class CERES_EXPORT Problem {
public:
struct CERES_EXPORT Options {
- Options()
- : cost_function_ownership(TAKE_OWNERSHIP),
- loss_function_ownership(TAKE_OWNERSHIP),
- local_parameterization_ownership(TAKE_OWNERSHIP),
- enable_fast_removal(false),
- disable_all_safety_checks(false) {}
-
// These flags control whether the Problem object owns the cost
// functions, loss functions, and parameterizations passed into
// the Problem. If set to TAKE_OWNERSHIP, then the problem object
@@ -135,25 +129,25 @@ class CERES_EXPORT Problem {
// destruction. The destructor is careful to delete the pointers
// only once, since sharing cost/loss/parameterizations is
// allowed.
- Ownership cost_function_ownership;
- Ownership loss_function_ownership;
- Ownership local_parameterization_ownership;
+ Ownership cost_function_ownership = TAKE_OWNERSHIP;
+ Ownership loss_function_ownership = TAKE_OWNERSHIP;
+ Ownership local_parameterization_ownership = TAKE_OWNERSHIP;
// If true, trades memory for faster RemoveResidualBlock() and
// RemoveParameterBlock() operations.
//
// By default, RemoveParameterBlock() and RemoveResidualBlock() take time
// proportional to the size of the entire problem. If you only ever remove
- // parameters or residuals from the problem occassionally, this might be
+ // parameters or residuals from the problem occasionally, this might be
// acceptable. However, if you have memory to spare, enable this option to
// make RemoveParameterBlock() take time proportional to the number of
// residual blocks that depend on it, and RemoveResidualBlock() take (on
// average) constant time.
//
- // The increase in memory usage is twofold: an additonal hash set per
+ // The increase in memory usage is twofold: an additional hash set per
// parameter block containing all the residuals that depend on the parameter
// block; and a hash set in the problem containing all residuals.
- bool enable_fast_removal;
+ bool enable_fast_removal = false;
// By default, Ceres performs a variety of safety checks when constructing
// the problem. There is a small but measurable performance penalty to
@@ -164,22 +158,51 @@ class CERES_EXPORT Problem {
//
// WARNING: Do not set this to true, unless you are absolutely sure of what
// you are doing.
- bool disable_all_safety_checks;
+ bool disable_all_safety_checks = false;
+
+ // A Ceres global context to use for solving this problem. This may help to
+ // reduce computation time as Ceres can reuse expensive objects to create.
+ // The context object can be nullptr, in which case Ceres may create one.
+ //
+ // Ceres does NOT take ownership of the pointer.
+ Context* context = nullptr;
+
+ // Using this callback interface, Ceres can notify you when it is
+ // about to evaluate the residuals or jacobians. With the
+ // callback, you can share computation between residual blocks by
+ // doing the shared computation in
+ // EvaluationCallback::PrepareForEvaluation() before Ceres calls
+ // CostFunction::Evaluate(). It also enables caching results
+ // between a pure residual evaluation and a residual & jacobian
+ // evaluation.
+ //
+ // Problem DOES NOT take ownership of the callback.
+ //
+ // NOTE: Evaluation callbacks are incompatible with inner
+ // iterations. So calling Solve with
+ // Solver::Options::use_inner_iterations = true on a Problem with
+ // a non-null evaluation callback is an error.
+ EvaluationCallback* evaluation_callback = nullptr;
};
// The default constructor is equivalent to the
// invocation Problem(Problem::Options()).
Problem();
explicit Problem(const Options& options);
+ Problem(Problem&&);
+ Problem& operator=(Problem&&);
+
+ Problem(const Problem&) = delete;
+ Problem& operator=(const Problem&) = delete;
~Problem();
// Add a residual block to the overall cost function. The cost
- // function carries with it information about the sizes of the
+ // function carries with its information about the sizes of the
// parameter blocks it expects. The function checks that these match
// the sizes of the parameter blocks listed in parameter_blocks. The
// program aborts if a mismatch is detected. loss_function can be
- // NULL, in which case the cost of the term is just the squared norm
+ // nullptr, in which case the cost of the term is just the squared norm
// of the residuals.
//
// The user has the option of explicitly adding the parameter blocks
@@ -208,59 +231,35 @@ class CERES_EXPORT Problem {
//
// Problem problem;
//
- // problem.AddResidualBlock(new MyUnaryCostFunction(...), NULL, x1);
- // problem.AddResidualBlock(new MyBinaryCostFunction(...), NULL, x2, x1);
+ // problem.AddResidualBlock(new MyUnaryCostFunction(...), nullptr, x1);
+ // problem.AddResidualBlock(new MyBinaryCostFunction(...), nullptr, x2, x1);
//
+ // Add a residual block by listing the parameter block pointers
+ // directly instead of wapping them in a container.
+ template <typename... Ts>
+ ResidualBlockId AddResidualBlock(CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* x0,
+ Ts*... xs) {
+ const std::array<double*, sizeof...(Ts) + 1> parameter_blocks{{x0, xs...}};
+ return AddResidualBlock(cost_function,
+ loss_function,
+ parameter_blocks.data(),
+ static_cast<int>(parameter_blocks.size()));
+ }
+
+ // Add a residual block by providing a vector of parameter blocks.
ResidualBlockId AddResidualBlock(
CostFunction* cost_function,
LossFunction* loss_function,
const std::vector<double*>& parameter_blocks);
- // Convenience methods for adding residuals with a small number of
- // parameters. This is the common case. Instead of specifying the
- // parameter block arguments as a vector, list them as pointers.
- ResidualBlockId AddResidualBlock(CostFunction* cost_function,
- LossFunction* loss_function,
- double* x0);
- ResidualBlockId AddResidualBlock(CostFunction* cost_function,
- LossFunction* loss_function,
- double* x0, double* x1);
+ // Add a residual block by providing a pointer to the parameter block array
+ // and the number of parameter blocks.
ResidualBlockId AddResidualBlock(CostFunction* cost_function,
LossFunction* loss_function,
- double* x0, double* x1, double* x2);
- ResidualBlockId AddResidualBlock(CostFunction* cost_function,
- LossFunction* loss_function,
- double* x0, double* x1, double* x2,
- double* x3);
- ResidualBlockId AddResidualBlock(CostFunction* cost_function,
- LossFunction* loss_function,
- double* x0, double* x1, double* x2,
- double* x3, double* x4);
- ResidualBlockId AddResidualBlock(CostFunction* cost_function,
- LossFunction* loss_function,
- double* x0, double* x1, double* x2,
- double* x3, double* x4, double* x5);
- ResidualBlockId AddResidualBlock(CostFunction* cost_function,
- LossFunction* loss_function,
- double* x0, double* x1, double* x2,
- double* x3, double* x4, double* x5,
- double* x6);
- ResidualBlockId AddResidualBlock(CostFunction* cost_function,
- LossFunction* loss_function,
- double* x0, double* x1, double* x2,
- double* x3, double* x4, double* x5,
- double* x6, double* x7);
- ResidualBlockId AddResidualBlock(CostFunction* cost_function,
- LossFunction* loss_function,
- double* x0, double* x1, double* x2,
- double* x3, double* x4, double* x5,
- double* x6, double* x7, double* x8);
- ResidualBlockId AddResidualBlock(CostFunction* cost_function,
- LossFunction* loss_function,
- double* x0, double* x1, double* x2,
- double* x3, double* x4, double* x5,
- double* x6, double* x7, double* x8,
- double* x9);
+ double* const* const parameter_blocks,
+ int num_parameter_blocks);
// Add a parameter block with appropriate size to the problem.
// Repeated calls with the same arguments are ignored. Repeated
@@ -290,7 +289,7 @@ class CERES_EXPORT Problem {
// ordering, rendering the jacobian or residuals returned from the solver
// uninterpretable. If you depend on the evaluated jacobian, do not use
// remove! This may change in a future release.
- void RemoveParameterBlock(double* values);
+ void RemoveParameterBlock(const double* values);
// Remove a residual block from the problem. Any parameters that the residual
// block depends on are not removed. The cost and loss functions for the
@@ -304,32 +303,43 @@ class CERES_EXPORT Problem {
void RemoveResidualBlock(ResidualBlockId residual_block);
// Hold the indicated parameter block constant during optimization.
- void SetParameterBlockConstant(double* values);
+ void SetParameterBlockConstant(const double* values);
// Allow the indicated parameter block to vary during optimization.
void SetParameterBlockVariable(double* values);
- // Returns true if a parameter block is set constant, and false otherwise.
- bool IsParameterBlockConstant(double* values) const;
+ // Returns true if a parameter block is set constant, and false
+ // otherwise. A parameter block may be set constant in two ways:
+ // either by calling SetParameterBlockConstant or by associating a
+ // LocalParameterization with a zero dimensional tangent space with
+ // it.
+ bool IsParameterBlockConstant(const double* values) const;
// Set the local parameterization for one of the parameter blocks.
// The local_parameterization is owned by the Problem by default. It
// is acceptable to set the same parameterization for multiple
// parameters; the destructor is careful to delete local
- // parameterizations only once. The local parameterization can only
- // be set once per parameter, and cannot be changed once set.
+ // parameterizations only once. Calling SetParameterization with
+ // nullptr will clear any previously set parameterization.
void SetParameterization(double* values,
LocalParameterization* local_parameterization);
// Get the local parameterization object associated with this
// parameter block. If there is no parameterization object
- // associated then NULL is returned.
- const LocalParameterization* GetParameterization(double* values) const;
+ // associated then nullptr is returned.
+ const LocalParameterization* GetParameterization(const double* values) const;
- // Set the lower/upper bound for the parameter with position "index".
+ // Set the lower/upper bound for the parameter at position "index".
void SetParameterLowerBound(double* values, int index, double lower_bound);
void SetParameterUpperBound(double* values, int index, double upper_bound);
+ // Get the lower/upper bound for the parameter at position
+ // "index". If the parameter is not bounded by the user, then its
+ // lower bound is -std::numeric_limits<double>::max() and upper
+ // bound is std::numeric_limits<double>::max().
+ double GetParameterLowerBound(const double* values, int index) const;
+ double GetParameterUpperBound(const double* values, int index) const;
+
// Number of parameter blocks in the problem. Always equals
// parameter_blocks().size() and parameter_block_sizes().size().
int NumParameterBlocks() const;
@@ -376,7 +386,7 @@ class CERES_EXPORT Problem {
const CostFunction* GetCostFunctionForResidualBlock(
const ResidualBlockId residual_block) const;
- // Get the LossFunction for the given residual block. Returns NULL
+ // Get the LossFunction for the given residual block. Returns nullptr
// if no loss function is associated with this residual block.
const LossFunction* GetLossFunctionForResidualBlock(
const ResidualBlockId residual_block) const;
@@ -393,11 +403,6 @@ class CERES_EXPORT Problem {
// Options struct to control Problem::Evaluate.
struct EvaluateOptions {
- EvaluateOptions()
- : apply_loss_function(true),
- num_threads(1) {
- }
-
// The set of parameter blocks for which evaluation should be
// performed. This vector determines the order that parameter
// blocks occur in the gradient vector and in the columns of the
@@ -430,12 +435,12 @@ class CERES_EXPORT Problem {
// function. This is of use for example if the user wishes to
// analyse the solution quality by studying the distribution of
// residuals before and after the solve.
- bool apply_loss_function;
+ bool apply_loss_function = true;
- int num_threads;
+ int num_threads = 1;
};
- // Evaluate Problem. Any of the output pointers can be NULL. Which
+ // Evaluate Problem. Any of the output pointers can be nullptr. Which
// residual blocks and parameter blocks are used is controlled by
// the EvaluateOptions struct above.
//
@@ -445,16 +450,16 @@ class CERES_EXPORT Problem {
//
// Problem problem;
// double x = 1;
- // problem.AddResidualBlock(new MyCostFunction, NULL, &x);
+ // problem.AddResidualBlock(new MyCostFunction, nullptr, &x);
//
// double cost = 0.0;
- // problem.Evaluate(Problem::EvaluateOptions(), &cost, NULL, NULL, NULL);
+ // problem.Evaluate(Problem::EvaluateOptions(), &cost, nullptr, nullptr, nullptr);
//
// The cost is evaluated at x = 1. If you wish to evaluate the
// problem at x = 2, then
//
- // x = 2;
- // problem.Evaluate(Problem::EvaluateOptions(), &cost, NULL, NULL, NULL);
+ // x = 2;
+ // problem.Evaluate(Problem::EvaluateOptions(), &cost, nullptr, nullptr, nullptr);
//
// is the way to do so.
//
@@ -468,17 +473,63 @@ class CERES_EXPORT Problem {
// Note 3: This function cannot be called while the problem is being
// solved, for example it cannot be called from an IterationCallback
// at the end of an iteration during a solve.
+ //
+ // Note 4: If an EvaluationCallback is associated with the problem,
+ // then its PrepareForEvaluation method will be called everytime
+ // this method is called with new_point = true.
bool Evaluate(const EvaluateOptions& options,
double* cost,
std::vector<double>* residuals,
std::vector<double>* gradient,
CRSMatrix* jacobian);
+ // Evaluates the residual block, storing the scalar cost in *cost,
+ // the residual components in *residuals, and the jacobians between
+ // the parameters and residuals in jacobians[i], in row-major order.
+ //
+ // If residuals is nullptr, the residuals are not computed.
+ //
+ // If jacobians is nullptr, no Jacobians are computed. If
+ // jacobians[i] is nullptr, then the Jacobian for that parameter
+ // block is not computed.
+ //
+ // It is not okay to request the Jacobian w.r.t a parameter block
+ // that is constant.
+ //
+ // The return value indicates the success or failure. Even if the
+ // function returns false, the caller should expect the output
+ // memory locations to have been modified.
+ //
+ // The returned cost and jacobians have had robustification and
+ // local parameterizations applied already; for example, the
+ // jacobian for a 4-dimensional quaternion parameter using the
+ // "QuaternionParameterization" is num_residuals by 3 instead of
+ // num_residuals by 4.
+ //
+ // apply_loss_function as the name implies allows the user to switch
+ // the application of the loss function on and off.
+ //
+ // WARNING: If an EvaluationCallback is associated with the problem
+ // then it is the user's responsibility to call it before calling
+ // this method.
+ //
+ // This is because, if the user calls this method multiple times, we
+ // cannot tell if the underlying parameter blocks have changed
+ // between calls or not. So if EvaluateResidualBlock was responsible
+ // for calling the EvaluationCallback, it will have to do it
+ // everytime it is called. Which makes the common case where the
+ // parameter blocks do not change, inefficient. So we leave it to
+ // the user to call the EvaluationCallback as needed.
+ bool EvaluateResidualBlock(ResidualBlockId residual_block_id,
+ bool apply_loss_function,
+ double* cost,
+ double* residuals,
+ double** jacobians) const;
+
private:
friend class Solver;
friend class Covariance;
- internal::scoped_ptr<internal::ProblemImpl> problem_impl_;
- CERES_DISALLOW_COPY_AND_ASSIGN(Problem);
+ std::unique_ptr<internal::ProblemImpl> impl_;
};
} // namespace ceres
diff --git a/extern/ceres/include/ceres/rotation.h b/extern/ceres/include/ceres/rotation.h
index b6a06f772c4..7d5c8ef1fb2 100644
--- a/extern/ceres/include/ceres/rotation.h
+++ b/extern/ceres/include/ceres/rotation.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -49,6 +49,8 @@
#include <cmath>
#include <limits>
+#include "glog/logging.h"
+
namespace ceres {
// Trivial wrapper to index linear arrays as matrices, given a fixed
@@ -82,47 +84,44 @@ MatrixAdapter<T, 3, 1> RowMajorAdapter3x3(T* pointer);
// and quaternion is a 4-tuple that will contain the resulting quaternion.
// The implementation may be used with auto-differentiation up to the first
// derivative, higher derivatives may have unexpected results near the origin.
-template<typename T>
+template <typename T>
void AngleAxisToQuaternion(const T* angle_axis, T* quaternion);
// Convert a quaternion to the equivalent combined axis-angle representation.
// The value quaternion must be a unit quaternion - it is not normalized first,
// and angle_axis will be filled with a value whose norm is the angle of
// rotation in radians, and whose direction is the axis of rotation.
-// The implemention may be used with auto-differentiation up to the first
+// The implementation may be used with auto-differentiation up to the first
// derivative, higher derivatives may have unexpected results near the origin.
-template<typename T>
+template <typename T>
void QuaternionToAngleAxis(const T* quaternion, T* angle_axis);
// Conversions between 3x3 rotation matrix (in column major order) and
-// quaternion rotation representations. Templated for use with
+// quaternion rotation representations. Templated for use with
// autodifferentiation.
template <typename T>
void RotationMatrixToQuaternion(const T* R, T* quaternion);
template <typename T, int row_stride, int col_stride>
void RotationMatrixToQuaternion(
- const MatrixAdapter<const T, row_stride, col_stride>& R,
- T* quaternion);
+ const MatrixAdapter<const T, row_stride, col_stride>& R, T* quaternion);
// Conversions between 3x3 rotation matrix (in column major order) and
-// axis-angle rotation representations. Templated for use with
+// axis-angle rotation representations. Templated for use with
// autodifferentiation.
template <typename T>
void RotationMatrixToAngleAxis(const T* R, T* angle_axis);
template <typename T, int row_stride, int col_stride>
void RotationMatrixToAngleAxis(
- const MatrixAdapter<const T, row_stride, col_stride>& R,
- T* angle_axis);
+ const MatrixAdapter<const T, row_stride, col_stride>& R, T* angle_axis);
template <typename T>
void AngleAxisToRotationMatrix(const T* angle_axis, T* R);
template <typename T, int row_stride, int col_stride>
void AngleAxisToRotationMatrix(
- const T* angle_axis,
- const MatrixAdapter<T, row_stride, col_stride>& R);
+ const T* angle_axis, const MatrixAdapter<T, row_stride, col_stride>& R);
// Conversions between 3x3 rotation matrix (in row major order) and
// Euler angle (in degrees) rotation representations.
@@ -135,8 +134,7 @@ void EulerAnglesToRotationMatrix(const T* euler, int row_stride, T* R);
template <typename T, int row_stride, int col_stride>
void EulerAnglesToRotationMatrix(
- const T* euler,
- const MatrixAdapter<T, row_stride, col_stride>& R);
+ const T* euler, const MatrixAdapter<T, row_stride, col_stride>& R);
// Convert a 4-vector to a 3x3 scaled rotation matrix.
//
@@ -157,25 +155,23 @@ void EulerAnglesToRotationMatrix(
// such that det(Q) = 1 and Q*Q' = I
//
// WARNING: The rotation matrix is ROW MAJOR
-template <typename T> inline
-void QuaternionToScaledRotation(const T q[4], T R[3 * 3]);
+template <typename T>
+inline void QuaternionToScaledRotation(const T q[4], T R[3 * 3]);
-template <typename T, int row_stride, int col_stride> inline
-void QuaternionToScaledRotation(
- const T q[4],
- const MatrixAdapter<T, row_stride, col_stride>& R);
+template <typename T, int row_stride, int col_stride>
+inline void QuaternionToScaledRotation(
+ const T q[4], const MatrixAdapter<T, row_stride, col_stride>& R);
// Same as above except that the rotation matrix is normalized by the
// Frobenius norm, so that R * R' = I (and det(R) = 1).
//
// WARNING: The rotation matrix is ROW MAJOR
-template <typename T> inline
-void QuaternionToRotation(const T q[4], T R[3 * 3]);
+template <typename T>
+inline void QuaternionToRotation(const T q[4], T R[3 * 3]);
-template <typename T, int row_stride, int col_stride> inline
-void QuaternionToRotation(
- const T q[4],
- const MatrixAdapter<T, row_stride, col_stride>& R);
+template <typename T, int row_stride, int col_stride>
+inline void QuaternionToRotation(
+ const T q[4], const MatrixAdapter<T, row_stride, col_stride>& R);
// Rotates a point pt by a quaternion q:
//
@@ -185,37 +181,54 @@ void QuaternionToRotation(
// write the transform as (something)*pt + pt, as is clear from the
// formula below. If you pass in a quaternion with |q|^2 = 2 then you
// WILL NOT get back 2 times the result you get for a unit quaternion.
-template <typename T> inline
-void UnitQuaternionRotatePoint(const T q[4], const T pt[3], T result[3]);
+//
+// Inplace rotation is not supported. pt and result must point to different
+// memory locations, otherwise the result will be undefined.
+template <typename T>
+inline void UnitQuaternionRotatePoint(const T q[4], const T pt[3], T result[3]);
// With this function you do not need to assume that q has unit norm.
// It does assume that the norm is non-zero.
-template <typename T> inline
-void QuaternionRotatePoint(const T q[4], const T pt[3], T result[3]);
+//
+// Inplace rotation is not supported. pt and result must point to different
+// memory locations, otherwise the result will be undefined.
+template <typename T>
+inline void QuaternionRotatePoint(const T q[4], const T pt[3], T result[3]);
// zw = z * w, where * is the Quaternion product between 4 vectors.
-template<typename T> inline
-void QuaternionProduct(const T z[4], const T w[4], T zw[4]);
+//
+// Inplace quaternion product is not supported. The resulting quaternion zw must
+// not share the memory with the input quaternion z and w, otherwise the result
+// will be undefined.
+template <typename T>
+inline void QuaternionProduct(const T z[4], const T w[4], T zw[4]);
// xy = x cross y;
-template<typename T> inline
-void CrossProduct(const T x[3], const T y[3], T x_cross_y[3]);
+//
+// Inplace cross product is not supported. The resulting vector x_cross_y must
+// not share the memory with the input vectors x and y, otherwise the result
+// will be undefined.
+template <typename T>
+inline void CrossProduct(const T x[3], const T y[3], T x_cross_y[3]);
-template<typename T> inline
-T DotProduct(const T x[3], const T y[3]);
+template <typename T>
+inline T DotProduct(const T x[3], const T y[3]);
// y = R(angle_axis) * x;
-template<typename T> inline
-void AngleAxisRotatePoint(const T angle_axis[3], const T pt[3], T result[3]);
+//
+// Inplace rotation is not supported. pt and result must point to different
+// memory locations, otherwise the result will be undefined.
+template <typename T>
+inline void AngleAxisRotatePoint(const T angle_axis[3],
+ const T pt[3],
+ T result[3]);
// --- IMPLEMENTATION
-template<typename T, int row_stride, int col_stride>
+template <typename T, int row_stride, int col_stride>
struct MatrixAdapter {
T* pointer_;
- explicit MatrixAdapter(T* pointer)
- : pointer_(pointer)
- {}
+ explicit MatrixAdapter(T* pointer) : pointer_(pointer) {}
T& operator()(int r, int c) const {
return pointer_[r * row_stride + c * col_stride];
@@ -232,7 +245,7 @@ MatrixAdapter<T, 3, 1> RowMajorAdapter3x3(T* pointer) {
return MatrixAdapter<T, 3, 1>(pointer);
}
-template<typename T>
+template <typename T>
inline void AngleAxisToQuaternion(const T* angle_axis, T* quaternion) {
const T& a0 = angle_axis[0];
const T& a1 = angle_axis[1];
@@ -261,7 +274,7 @@ inline void AngleAxisToQuaternion(const T* angle_axis, T* quaternion) {
}
}
-template<typename T>
+template <typename T>
inline void QuaternionToAngleAxis(const T* quaternion, T* angle_axis) {
const T& q1 = quaternion[1];
const T& q2 = quaternion[2];
@@ -288,9 +301,8 @@ inline void QuaternionToAngleAxis(const T* quaternion, T* angle_axis) {
// = atan(-sin(theta), -cos(theta))
//
const T two_theta =
- T(2.0) * ((cos_theta < 0.0)
- ? atan2(-sin_theta, -cos_theta)
- : atan2(sin_theta, cos_theta));
+ T(2.0) * ((cos_theta < T(0.0)) ? atan2(-sin_theta, -cos_theta)
+ : atan2(sin_theta, cos_theta));
const T k = two_theta / sin_theta;
angle_axis[0] = q1 * k;
angle_axis[1] = q2 * k;
@@ -316,8 +328,7 @@ void RotationMatrixToQuaternion(const T* R, T* angle_axis) {
// Ken Shoemake, 1987 SIGGRAPH course notes
template <typename T, int row_stride, int col_stride>
void RotationMatrixToQuaternion(
- const MatrixAdapter<const T, row_stride, col_stride>& R,
- T* quaternion) {
+ const MatrixAdapter<const T, row_stride, col_stride>& R, T* quaternion) {
const T trace = R(0, 0) + R(1, 1) + R(2, 2);
if (trace >= 0.0) {
T t = sqrt(trace + T(1.0));
@@ -359,8 +370,7 @@ inline void RotationMatrixToAngleAxis(const T* R, T* angle_axis) {
template <typename T, int row_stride, int col_stride>
void RotationMatrixToAngleAxis(
- const MatrixAdapter<const T, row_stride, col_stride>& R,
- T* angle_axis) {
+ const MatrixAdapter<const T, row_stride, col_stride>& R, T* angle_axis) {
T quaternion[4];
RotationMatrixToQuaternion(R, quaternion);
QuaternionToAngleAxis(quaternion, angle_axis);
@@ -374,8 +384,7 @@ inline void AngleAxisToRotationMatrix(const T* angle_axis, T* R) {
template <typename T, int row_stride, int col_stride>
void AngleAxisToRotationMatrix(
- const T* angle_axis,
- const MatrixAdapter<T, row_stride, col_stride>& R) {
+ const T* angle_axis, const MatrixAdapter<T, row_stride, col_stride>& R) {
static const T kOne = T(1.0);
const T theta2 = DotProduct(angle_axis, angle_axis);
if (theta2 > T(std::numeric_limits<double>::epsilon())) {
@@ -390,6 +399,7 @@ void AngleAxisToRotationMatrix(
const T costheta = cos(theta);
const T sintheta = sin(theta);
+ // clang-format off
R(0, 0) = costheta + wx*wx*(kOne - costheta);
R(1, 0) = wz*sintheta + wx*wy*(kOne - costheta);
R(2, 0) = -wy*sintheta + wx*wz*(kOne - costheta);
@@ -399,15 +409,16 @@ void AngleAxisToRotationMatrix(
R(0, 2) = wy*sintheta + wx*wz*(kOne - costheta);
R(1, 2) = -wx*sintheta + wy*wz*(kOne - costheta);
R(2, 2) = costheta + wz*wz*(kOne - costheta);
+ // clang-format on
} else {
// Near zero, we switch to using the first order Taylor expansion.
- R(0, 0) = kOne;
- R(1, 0) = angle_axis[2];
+ R(0, 0) = kOne;
+ R(1, 0) = angle_axis[2];
R(2, 0) = -angle_axis[1];
R(0, 1) = -angle_axis[2];
- R(1, 1) = kOne;
- R(2, 1) = angle_axis[0];
- R(0, 2) = angle_axis[1];
+ R(1, 1) = kOne;
+ R(2, 1) = angle_axis[0];
+ R(0, 2) = angle_axis[1];
R(1, 2) = -angle_axis[0];
R(2, 2) = kOne;
}
@@ -422,8 +433,7 @@ inline void EulerAnglesToRotationMatrix(const T* euler,
template <typename T, int row_stride, int col_stride>
void EulerAnglesToRotationMatrix(
- const T* euler,
- const MatrixAdapter<T, row_stride, col_stride>& R) {
+ const T* euler, const MatrixAdapter<T, row_stride, col_stride>& R) {
const double kPi = 3.14159265358979323846;
const T degrees_to_radians(kPi / 180.0);
@@ -438,28 +448,27 @@ void EulerAnglesToRotationMatrix(
const T c3 = cos(pitch);
const T s3 = sin(pitch);
- R(0, 0) = c1*c2;
- R(0, 1) = -s1*c3 + c1*s2*s3;
- R(0, 2) = s1*s3 + c1*s2*c3;
+ R(0, 0) = c1 * c2;
+ R(0, 1) = -s1 * c3 + c1 * s2 * s3;
+ R(0, 2) = s1 * s3 + c1 * s2 * c3;
- R(1, 0) = s1*c2;
- R(1, 1) = c1*c3 + s1*s2*s3;
- R(1, 2) = -c1*s3 + s1*s2*c3;
+ R(1, 0) = s1 * c2;
+ R(1, 1) = c1 * c3 + s1 * s2 * s3;
+ R(1, 2) = -c1 * s3 + s1 * s2 * c3;
R(2, 0) = -s2;
- R(2, 1) = c2*s3;
- R(2, 2) = c2*c3;
+ R(2, 1) = c2 * s3;
+ R(2, 2) = c2 * c3;
}
-template <typename T> inline
-void QuaternionToScaledRotation(const T q[4], T R[3 * 3]) {
+template <typename T>
+inline void QuaternionToScaledRotation(const T q[4], T R[3 * 3]) {
QuaternionToScaledRotation(q, RowMajorAdapter3x3(R));
}
-template <typename T, int row_stride, int col_stride> inline
-void QuaternionToScaledRotation(
- const T q[4],
- const MatrixAdapter<T, row_stride, col_stride>& R) {
+template <typename T, int row_stride, int col_stride>
+inline void QuaternionToScaledRotation(
+ const T q[4], const MatrixAdapter<T, row_stride, col_stride>& R) {
// Make convenient names for elements of q.
T a = q[0];
T b = q[1];
@@ -478,22 +487,24 @@ void QuaternionToScaledRotation(
T cd = c * d;
T dd = d * d;
- R(0, 0) = aa + bb - cc - dd; R(0, 1) = T(2) * (bc - ad); R(0, 2) = T(2) * (ac + bd); // NOLINT
- R(1, 0) = T(2) * (ad + bc); R(1, 1) = aa - bb + cc - dd; R(1, 2) = T(2) * (cd - ab); // NOLINT
- R(2, 0) = T(2) * (bd - ac); R(2, 1) = T(2) * (ab + cd); R(2, 2) = aa - bb - cc + dd; // NOLINT
+ // clang-format off
+ R(0, 0) = aa + bb - cc - dd; R(0, 1) = T(2) * (bc - ad); R(0, 2) = T(2) * (ac + bd);
+ R(1, 0) = T(2) * (ad + bc); R(1, 1) = aa - bb + cc - dd; R(1, 2) = T(2) * (cd - ab);
+ R(2, 0) = T(2) * (bd - ac); R(2, 1) = T(2) * (ab + cd); R(2, 2) = aa - bb - cc + dd;
+ // clang-format on
}
-template <typename T> inline
-void QuaternionToRotation(const T q[4], T R[3 * 3]) {
+template <typename T>
+inline void QuaternionToRotation(const T q[4], T R[3 * 3]) {
QuaternionToRotation(q, RowMajorAdapter3x3(R));
}
-template <typename T, int row_stride, int col_stride> inline
-void QuaternionToRotation(const T q[4],
- const MatrixAdapter<T, row_stride, col_stride>& R) {
+template <typename T, int row_stride, int col_stride>
+inline void QuaternionToRotation(
+ const T q[4], const MatrixAdapter<T, row_stride, col_stride>& R) {
QuaternionToScaledRotation(q, R);
- T normalizer = q[0]*q[0] + q[1]*q[1] + q[2]*q[2] + q[3]*q[3];
+ T normalizer = q[0] * q[0] + q[1] * q[1] + q[2] * q[2] + q[3] * q[3];
normalizer = T(1) / normalizer;
for (int i = 0; i < 3; ++i) {
@@ -503,8 +514,13 @@ void QuaternionToRotation(const T q[4],
}
}
-template <typename T> inline
-void UnitQuaternionRotatePoint(const T q[4], const T pt[3], T result[3]) {
+template <typename T>
+inline void UnitQuaternionRotatePoint(const T q[4],
+ const T pt[3],
+ T result[3]) {
+ DCHECK_NE(pt, result) << "Inplace rotation is not supported.";
+
+ // clang-format off
const T t2 = q[0] * q[1];
const T t3 = q[0] * q[2];
const T t4 = q[0] * q[3];
@@ -517,50 +533,63 @@ void UnitQuaternionRotatePoint(const T q[4], const T pt[3], T result[3]) {
result[0] = T(2) * ((t8 + t1) * pt[0] + (t6 - t4) * pt[1] + (t3 + t7) * pt[2]) + pt[0]; // NOLINT
result[1] = T(2) * ((t4 + t6) * pt[0] + (t5 + t1) * pt[1] + (t9 - t2) * pt[2]) + pt[1]; // NOLINT
result[2] = T(2) * ((t7 - t3) * pt[0] + (t2 + t9) * pt[1] + (t5 + t8) * pt[2]) + pt[2]; // NOLINT
+ // clang-format on
}
-template <typename T> inline
-void QuaternionRotatePoint(const T q[4], const T pt[3], T result[3]) {
+template <typename T>
+inline void QuaternionRotatePoint(const T q[4], const T pt[3], T result[3]) {
+ DCHECK_NE(pt, result) << "Inplace rotation is not supported.";
+
// 'scale' is 1 / norm(q).
- const T scale = T(1) / sqrt(q[0] * q[0] +
- q[1] * q[1] +
- q[2] * q[2] +
- q[3] * q[3]);
+ const T scale =
+ T(1) / sqrt(q[0] * q[0] + q[1] * q[1] + q[2] * q[2] + q[3] * q[3]);
// Make unit-norm version of q.
const T unit[4] = {
- scale * q[0],
- scale * q[1],
- scale * q[2],
- scale * q[3],
+ scale * q[0],
+ scale * q[1],
+ scale * q[2],
+ scale * q[3],
};
UnitQuaternionRotatePoint(unit, pt, result);
}
-template<typename T> inline
-void QuaternionProduct(const T z[4], const T w[4], T zw[4]) {
+template <typename T>
+inline void QuaternionProduct(const T z[4], const T w[4], T zw[4]) {
+ DCHECK_NE(z, zw) << "Inplace quaternion product is not supported.";
+ DCHECK_NE(w, zw) << "Inplace quaternion product is not supported.";
+
+ // clang-format off
zw[0] = z[0] * w[0] - z[1] * w[1] - z[2] * w[2] - z[3] * w[3];
zw[1] = z[0] * w[1] + z[1] * w[0] + z[2] * w[3] - z[3] * w[2];
zw[2] = z[0] * w[2] - z[1] * w[3] + z[2] * w[0] + z[3] * w[1];
zw[3] = z[0] * w[3] + z[1] * w[2] - z[2] * w[1] + z[3] * w[0];
+ // clang-format on
}
// xy = x cross y;
-template<typename T> inline
-void CrossProduct(const T x[3], const T y[3], T x_cross_y[3]) {
+template <typename T>
+inline void CrossProduct(const T x[3], const T y[3], T x_cross_y[3]) {
+ DCHECK_NE(x, x_cross_y) << "Inplace cross product is not supported.";
+ DCHECK_NE(y, x_cross_y) << "Inplace cross product is not supported.";
+
x_cross_y[0] = x[1] * y[2] - x[2] * y[1];
x_cross_y[1] = x[2] * y[0] - x[0] * y[2];
x_cross_y[2] = x[0] * y[1] - x[1] * y[0];
}
-template<typename T> inline
-T DotProduct(const T x[3], const T y[3]) {
+template <typename T>
+inline T DotProduct(const T x[3], const T y[3]) {
return (x[0] * y[0] + x[1] * y[1] + x[2] * y[2]);
}
-template<typename T> inline
-void AngleAxisRotatePoint(const T angle_axis[3], const T pt[3], T result[3]) {
+template <typename T>
+inline void AngleAxisRotatePoint(const T angle_axis[3],
+ const T pt[3],
+ T result[3]) {
+ DCHECK_NE(pt, result) << "Inplace rotation is not supported.";
+
const T theta2 = DotProduct(angle_axis, angle_axis);
if (theta2 > T(std::numeric_limits<double>::epsilon())) {
// Away from zero, use the rodriguez formula
@@ -576,17 +605,17 @@ void AngleAxisRotatePoint(const T angle_axis[3], const T pt[3], T result[3]) {
const T theta = sqrt(theta2);
const T costheta = cos(theta);
const T sintheta = sin(theta);
- const T theta_inverse = 1.0 / theta;
+ const T theta_inverse = T(1.0) / theta;
- const T w[3] = { angle_axis[0] * theta_inverse,
- angle_axis[1] * theta_inverse,
- angle_axis[2] * theta_inverse };
+ const T w[3] = {angle_axis[0] * theta_inverse,
+ angle_axis[1] * theta_inverse,
+ angle_axis[2] * theta_inverse};
// Explicitly inlined evaluation of the cross product for
// performance reasons.
- const T w_cross_pt[3] = { w[1] * pt[2] - w[2] * pt[1],
- w[2] * pt[0] - w[0] * pt[2],
- w[0] * pt[1] - w[1] * pt[0] };
+ const T w_cross_pt[3] = {w[1] * pt[2] - w[2] * pt[1],
+ w[2] * pt[0] - w[0] * pt[2],
+ w[0] * pt[1] - w[1] * pt[0]};
const T tmp =
(w[0] * pt[0] + w[1] * pt[1] + w[2] * pt[2]) * (T(1.0) - costheta);
@@ -611,9 +640,9 @@ void AngleAxisRotatePoint(const T angle_axis[3], const T pt[3], T result[3]) {
//
// Explicitly inlined evaluation of the cross product for
// performance reasons.
- const T w_cross_pt[3] = { angle_axis[1] * pt[2] - angle_axis[2] * pt[1],
- angle_axis[2] * pt[0] - angle_axis[0] * pt[2],
- angle_axis[0] * pt[1] - angle_axis[1] * pt[0] };
+ const T w_cross_pt[3] = {angle_axis[1] * pt[2] - angle_axis[2] * pt[1],
+ angle_axis[2] * pt[0] - angle_axis[0] * pt[2],
+ angle_axis[0] * pt[1] - angle_axis[1] * pt[0]};
result[0] = pt[0] + w_cross_pt[0];
result[1] = pt[1] + w_cross_pt[1];
diff --git a/extern/ceres/include/ceres/sized_cost_function.h b/extern/ceres/include/ceres/sized_cost_function.h
index b10421e81be..8e92f1b796c 100644
--- a/extern/ceres/include/ceres/sized_cost_function.h
+++ b/extern/ceres/include/ceres/sized_cost_function.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -38,55 +38,30 @@
#ifndef CERES_PUBLIC_SIZED_COST_FUNCTION_H_
#define CERES_PUBLIC_SIZED_COST_FUNCTION_H_
-#include "ceres/types.h"
#include "ceres/cost_function.h"
+#include "ceres/types.h"
#include "glog/logging.h"
+#include "internal/parameter_dims.h"
namespace ceres {
-template<int kNumResiduals,
- int N0 = 0, int N1 = 0, int N2 = 0, int N3 = 0, int N4 = 0,
- int N5 = 0, int N6 = 0, int N7 = 0, int N8 = 0, int N9 = 0>
+template <int kNumResiduals, int... Ns>
class SizedCostFunction : public CostFunction {
public:
- SizedCostFunction() {
- CHECK(kNumResiduals > 0 || kNumResiduals == DYNAMIC)
- << "Cost functions must have at least one residual block.";
+ static_assert(kNumResiduals > 0 || kNumResiduals == DYNAMIC,
+ "Cost functions must have at least one residual block.");
+ static_assert(internal::StaticParameterDims<Ns...>::kIsValid,
+ "Invalid parameter block dimension detected. Each parameter "
+ "block dimension must be bigger than zero.");
- // This block breaks the 80 column rule to keep it somewhat readable.
- CHECK((!N1 && !N2 && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
- ((N1 > 0) && !N2 && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) ||
- ((N1 > 0) && (N2 > 0) && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) || // NOLINT
- ((N1 > 0) && (N2 > 0) && (N3 > 0) && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) || // NOLINT
- ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && !N5 && !N6 && !N7 && !N8 && !N9) || // NOLINT
- ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && !N6 && !N7 && !N8 && !N9) || // NOLINT
- ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && !N7 && !N8 && !N9) || // NOLINT
- ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && !N8 && !N9) || // NOLINT
- ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && !N9) || // NOLINT
- ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && (N9 > 0))) // NOLINT
- << "Zero block cannot precede a non-zero block. Block sizes are "
- << "(ignore trailing 0s): " << N0 << ", " << N1 << ", " << N2 << ", "
- << N3 << ", " << N4 << ", " << N5 << ", " << N6 << ", " << N7 << ", "
- << N8 << ", " << N9;
+ using ParameterDims = internal::StaticParameterDims<Ns...>;
+ SizedCostFunction() {
set_num_residuals(kNumResiduals);
-
-#define CERES_ADD_PARAMETER_BLOCK(N) \
- if (N) mutable_parameter_block_sizes()->push_back(N);
- CERES_ADD_PARAMETER_BLOCK(N0);
- CERES_ADD_PARAMETER_BLOCK(N1);
- CERES_ADD_PARAMETER_BLOCK(N2);
- CERES_ADD_PARAMETER_BLOCK(N3);
- CERES_ADD_PARAMETER_BLOCK(N4);
- CERES_ADD_PARAMETER_BLOCK(N5);
- CERES_ADD_PARAMETER_BLOCK(N6);
- CERES_ADD_PARAMETER_BLOCK(N7);
- CERES_ADD_PARAMETER_BLOCK(N8);
- CERES_ADD_PARAMETER_BLOCK(N9);
-#undef CERES_ADD_PARAMETER_BLOCK
+ *mutable_parameter_block_sizes() = std::vector<int32_t>{Ns...};
}
- virtual ~SizedCostFunction() { }
+ virtual ~SizedCostFunction() {}
// Subclasses must implement Evaluate().
};
diff --git a/extern/ceres/include/ceres/solver.h b/extern/ceres/include/ceres/solver.h
index 0d77d242dfe..62631744fe2 100644
--- a/extern/ceres/include/ceres/solver.h
+++ b/extern/ceres/include/ceres/solver.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -32,20 +32,21 @@
#define CERES_PUBLIC_SOLVER_H_
#include <cmath>
+#include <memory>
#include <string>
+#include <unordered_set>
#include <vector>
+
#include "ceres/crs_matrix.h"
-#include "ceres/internal/macros.h"
+#include "ceres/internal/disable_warnings.h"
#include "ceres/internal/port.h"
#include "ceres/iteration_callback.h"
#include "ceres/ordered_groups.h"
+#include "ceres/problem.h"
#include "ceres/types.h"
-#include "ceres/internal/disable_warnings.h"
namespace ceres {
-class Problem;
-
// Interface for non-linear least squares solvers.
class CERES_EXPORT Solver {
public:
@@ -57,87 +58,6 @@ class CERES_EXPORT Solver {
//
// The constants are defined inside types.h
struct CERES_EXPORT Options {
- // Default constructor that sets up a generic sparse problem.
- Options() {
- minimizer_type = TRUST_REGION;
- line_search_direction_type = LBFGS;
- line_search_type = WOLFE;
- nonlinear_conjugate_gradient_type = FLETCHER_REEVES;
- max_lbfgs_rank = 20;
- use_approximate_eigenvalue_bfgs_scaling = false;
- line_search_interpolation_type = CUBIC;
- min_line_search_step_size = 1e-9;
- line_search_sufficient_function_decrease = 1e-4;
- max_line_search_step_contraction = 1e-3;
- min_line_search_step_contraction = 0.6;
- max_num_line_search_step_size_iterations = 20;
- max_num_line_search_direction_restarts = 5;
- line_search_sufficient_curvature_decrease = 0.9;
- max_line_search_step_expansion = 10.0;
- trust_region_strategy_type = LEVENBERG_MARQUARDT;
- dogleg_type = TRADITIONAL_DOGLEG;
- use_nonmonotonic_steps = false;
- max_consecutive_nonmonotonic_steps = 5;
- max_num_iterations = 50;
- max_solver_time_in_seconds = 1e9;
- num_threads = 1;
- initial_trust_region_radius = 1e4;
- max_trust_region_radius = 1e16;
- min_trust_region_radius = 1e-32;
- min_relative_decrease = 1e-3;
- min_lm_diagonal = 1e-6;
- max_lm_diagonal = 1e32;
- max_num_consecutive_invalid_steps = 5;
- function_tolerance = 1e-6;
- gradient_tolerance = 1e-10;
- parameter_tolerance = 1e-8;
-
-#if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE) && !defined(CERES_ENABLE_LGPL_CODE) // NOLINT
- linear_solver_type = DENSE_QR;
-#else
- linear_solver_type = SPARSE_NORMAL_CHOLESKY;
-#endif
-
- preconditioner_type = JACOBI;
- visibility_clustering_type = CANONICAL_VIEWS;
- dense_linear_algebra_library_type = EIGEN;
-
- // Choose a default sparse linear algebra library in the order:
- //
- // SUITE_SPARSE > CX_SPARSE > EIGEN_SPARSE > NO_SPARSE
- sparse_linear_algebra_library_type = NO_SPARSE;
-#if !defined(CERES_NO_SUITESPARSE)
- sparse_linear_algebra_library_type = SUITE_SPARSE;
-#else
- #if !defined(CERES_NO_CXSPARSE)
- sparse_linear_algebra_library_type = CX_SPARSE;
- #else
- #if defined(CERES_USE_EIGEN_SPARSE)
- sparse_linear_algebra_library_type = EIGEN_SPARSE;
- #endif
- #endif
-#endif
-
- num_linear_solver_threads = 1;
- use_explicit_schur_complement = false;
- use_postordering = false;
- dynamic_sparsity = false;
- min_linear_solver_iterations = 0;
- max_linear_solver_iterations = 500;
- eta = 1e-1;
- jacobi_scaling = true;
- use_inner_iterations = false;
- inner_iteration_tolerance = 1e-3;
- logging_type = PER_MINIMIZER_ITERATION;
- minimizer_progress_to_stdout = false;
- trust_region_problem_dump_directory = "/tmp";
- trust_region_problem_dump_format_type = TEXTFILE;
- check_gradients = false;
- gradient_check_relative_precision = 1e-8;
- gradient_check_numeric_derivative_relative_step_size = 1e-6;
- update_state_every_iteration = false;
- }
-
// Returns true if the options struct has a valid
// configuration. Returns false otherwise, and fills in *error
// with a message describing the problem.
@@ -157,7 +77,7 @@ class CERES_EXPORT Solver {
// exactly or inexactly.
//
// 2. The trust region approach approximates the objective
- // function using using a model function (often a quadratic) over
+ // function using a model function (often a quadratic) over
// a subset of the search space known as the trust region. If the
// model function succeeds in minimizing the true objective
// function the trust region is expanded; conversely, otherwise it
@@ -168,11 +88,12 @@ class CERES_EXPORT Solver {
// trust region methods first choose a step size (the size of the
// trust region) and then a step direction while line search methods
// first choose a step direction and then a step size.
- MinimizerType minimizer_type;
+ MinimizerType minimizer_type = TRUST_REGION;
- LineSearchDirectionType line_search_direction_type;
- LineSearchType line_search_type;
- NonlinearConjugateGradientType nonlinear_conjugate_gradient_type;
+ LineSearchDirectionType line_search_direction_type = LBFGS;
+ LineSearchType line_search_type = WOLFE;
+ NonlinearConjugateGradientType nonlinear_conjugate_gradient_type =
+ FLETCHER_REEVES;
// The LBFGS hessian approximation is a low rank approximation to
// the inverse of the Hessian matrix. The rank of the
@@ -197,8 +118,8 @@ class CERES_EXPORT Solver {
// method, please see:
//
// Nocedal, J. (1980). "Updating Quasi-Newton Matrices with
- // Limited Storage". Mathematics of Computation 35 (151): 773–782.
- int max_lbfgs_rank;
+ // Limited Storage". Mathematics of Computation 35 (151): 773-782.
+ int max_lbfgs_rank = 20;
// As part of the (L)BFGS update step (BFGS) / right-multiply step (L-BFGS),
// the initial inverse Hessian approximation is taken to be the Identity.
@@ -220,18 +141,18 @@ class CERES_EXPORT Solver {
// Oren S.S., Self-scaling variable metric (SSVM) algorithms
// Part II: Implementation and experiments, Management Science,
// 20(5), 863-874, 1974.
- bool use_approximate_eigenvalue_bfgs_scaling;
+ bool use_approximate_eigenvalue_bfgs_scaling = false;
// Degree of the polynomial used to approximate the objective
// function. Valid values are BISECTION, QUADRATIC and CUBIC.
//
// BISECTION corresponds to pure backtracking search with no
// interpolation.
- LineSearchInterpolationType line_search_interpolation_type;
+ LineSearchInterpolationType line_search_interpolation_type = CUBIC;
// If during the line search, the step_size falls below this
// value, it is truncated to zero.
- double min_line_search_step_size;
+ double min_line_search_step_size = 1e-9;
// Line search parameters.
@@ -245,7 +166,7 @@ class CERES_EXPORT Solver {
//
// f(step_size) <= f(0) + sufficient_decrease * f'(0) * step_size
//
- double line_search_sufficient_function_decrease;
+ double line_search_sufficient_function_decrease = 1e-4;
// In each iteration of the line search,
//
@@ -255,7 +176,7 @@ class CERES_EXPORT Solver {
//
// 0 < max_step_contraction < min_step_contraction < 1
//
- double max_line_search_step_contraction;
+ double max_line_search_step_contraction = 1e-3;
// In each iteration of the line search,
//
@@ -265,19 +186,25 @@ class CERES_EXPORT Solver {
//
// 0 < max_step_contraction < min_step_contraction < 1
//
- double min_line_search_step_contraction;
+ double min_line_search_step_contraction = 0.6;
- // Maximum number of trial step size iterations during each line search,
- // if a step size satisfying the search conditions cannot be found within
- // this number of trials, the line search will terminate.
- int max_num_line_search_step_size_iterations;
+ // Maximum number of trial step size iterations during each line
+ // search, if a step size satisfying the search conditions cannot
+ // be found within this number of trials, the line search will
+ // terminate.
+
+ // The minimum allowed value is 0 for trust region minimizer and 1
+ // otherwise. If 0 is specified for the trust region minimizer,
+ // then line search will not be used when solving constrained
+ // optimization problems.
+ int max_num_line_search_step_size_iterations = 20;
// Maximum number of restarts of the line search direction algorithm before
// terminating the optimization. Restarts of the line search direction
// algorithm occur when the current algorithm fails to produce a new descent
// direction. This typically indicates a numerical failure, or a breakdown
// in the validity of the approximations used.
- int max_num_line_search_direction_restarts;
+ int max_num_line_search_direction_restarts = 5;
// The strong Wolfe conditions consist of the Armijo sufficient
// decrease condition, and an additional requirement that the
@@ -290,7 +217,7 @@ class CERES_EXPORT Solver {
//
// Where f() is the line search objective and f'() is the derivative
// of f w.r.t step_size (d f / d step_size).
- double line_search_sufficient_curvature_decrease;
+ double line_search_sufficient_curvature_decrease = 0.9;
// During the bracketing phase of the Wolfe search, the step size is
// increased until either a point satisfying the Wolfe conditions is
@@ -301,12 +228,12 @@ class CERES_EXPORT Solver {
// new_step_size <= max_step_expansion * step_size.
//
// By definition for expansion, max_step_expansion > 1.0.
- double max_line_search_step_expansion;
+ double max_line_search_step_expansion = 10.0;
- TrustRegionStrategyType trust_region_strategy_type;
+ TrustRegionStrategyType trust_region_strategy_type = LEVENBERG_MARQUARDT;
// Type of dogleg strategy to use.
- DoglegType dogleg_type;
+ DoglegType dogleg_type = TRADITIONAL_DOGLEG;
// The classical trust region methods are descent methods, in that
// they only accept a point if it strictly reduces the value of
@@ -317,7 +244,7 @@ class CERES_EXPORT Solver {
// in the value of the objective function.
//
// This is because allowing for non-decreasing objective function
- // values in a princpled manner allows the algorithm to "jump over
+ // values in a principled manner allows the algorithm to "jump over
// boulders" as the method is not restricted to move into narrow
// valleys while preserving its convergence properties.
//
@@ -333,30 +260,30 @@ class CERES_EXPORT Solver {
// than the minimum value encountered over the course of the
// optimization, the final parameters returned to the user are the
// ones corresponding to the minimum cost over all iterations.
- bool use_nonmonotonic_steps;
- int max_consecutive_nonmonotonic_steps;
+ bool use_nonmonotonic_steps = false;
+ int max_consecutive_nonmonotonic_steps = 5;
// Maximum number of iterations for the minimizer to run for.
- int max_num_iterations;
+ int max_num_iterations = 50;
// Maximum time for which the minimizer should run for.
- double max_solver_time_in_seconds;
+ double max_solver_time_in_seconds = 1e9;
// Number of threads used by Ceres for evaluating the cost and
// jacobians.
- int num_threads;
+ int num_threads = 1;
// Trust region minimizer settings.
- double initial_trust_region_radius;
- double max_trust_region_radius;
+ double initial_trust_region_radius = 1e4;
+ double max_trust_region_radius = 1e16;
// Minimizer terminates when the trust region radius becomes
// smaller than this value.
- double min_trust_region_radius;
+ double min_trust_region_radius = 1e-32;
// Lower bound for the relative decrease before a step is
// accepted.
- double min_relative_decrease;
+ double min_relative_decrease = 1e-3;
// For the Levenberg-Marquadt algorithm, the scaled diagonal of
// the normal equations J'J is used to control the size of the
@@ -365,46 +292,75 @@ class CERES_EXPORT Solver {
// fail. max_lm_diagonal and min_lm_diagonal, clamp the values of
// diag(J'J) from above and below. In the normal course of
// operation, the user should not have to modify these parameters.
- double min_lm_diagonal;
- double max_lm_diagonal;
+ double min_lm_diagonal = 1e-6;
+ double max_lm_diagonal = 1e32;
// Sometimes due to numerical conditioning problems or linear
// solver flakiness, the trust region strategy may return a
// numerically invalid step that can be fixed by reducing the
// trust region size. So the TrustRegionMinimizer allows for a few
// successive invalid steps before it declares NUMERICAL_FAILURE.
- int max_num_consecutive_invalid_steps;
+ int max_num_consecutive_invalid_steps = 5;
// Minimizer terminates when
//
// (new_cost - old_cost) < function_tolerance * old_cost;
//
- double function_tolerance;
+ double function_tolerance = 1e-6;
// Minimizer terminates when
//
// max_i |x - Project(Plus(x, -g(x))| < gradient_tolerance
//
// This value should typically be 1e-4 * function_tolerance.
- double gradient_tolerance;
+ double gradient_tolerance = 1e-10;
// Minimizer terminates when
//
// |step|_2 <= parameter_tolerance * ( |x|_2 + parameter_tolerance)
//
- double parameter_tolerance;
+ double parameter_tolerance = 1e-8;
// Linear least squares solver options -------------------------------------
- LinearSolverType linear_solver_type;
+ LinearSolverType linear_solver_type =
+#if defined(CERES_NO_SPARSE)
+ DENSE_QR;
+#else
+ SPARSE_NORMAL_CHOLESKY;
+#endif
// Type of preconditioner to use with the iterative linear solvers.
- PreconditionerType preconditioner_type;
+ PreconditionerType preconditioner_type = JACOBI;
// Type of clustering algorithm to use for visibility based
// preconditioning. This option is used only when the
// preconditioner_type is CLUSTER_JACOBI or CLUSTER_TRIDIAGONAL.
- VisibilityClusteringType visibility_clustering_type;
+ VisibilityClusteringType visibility_clustering_type = CANONICAL_VIEWS;
+
+ // Subset preconditioner is a preconditioner for problems with
+ // general sparsity. Given a subset of residual blocks of a
+ // problem, it uses the corresponding subset of the rows of the
+ // Jacobian to construct a preconditioner.
+ //
+ // Suppose the Jacobian J has been horizontally partitioned as
+ //
+ // J = [P]
+ // [Q]
+ //
+ // Where, Q is the set of rows corresponding to the residual
+ // blocks in residual_blocks_for_subset_preconditioner.
+ //
+ // The preconditioner is the inverse of the matrix Q'Q.
+ //
+ // Obviously, the efficacy of the preconditioner depends on how
+ // well the matrix Q approximates J'J, or how well the chosen
+ // residual blocks approximate the non-linear least squares
+ // problem.
+ //
+ // If Solver::Options::preconditioner_type == SUBSET, then
+ // residual_blocks_for_subset_preconditioner must be non-empty.
+ std::unordered_set<ResidualBlockId> residual_blocks_for_subset_preconditioner;
// Ceres supports using multiple dense linear algebra libraries
// for dense matrix factorizations. Currently EIGEN and LAPACK are
@@ -413,22 +369,28 @@ class CERES_EXPORT Solver {
// available.
//
// This setting affects the DENSE_QR, DENSE_NORMAL_CHOLESKY and
- // DENSE_SCHUR solvers. For small to moderate sized probem EIGEN
+ // DENSE_SCHUR solvers. For small to moderate sized problem EIGEN
// is a fine choice but for large problems, an optimized LAPACK +
// BLAS implementation can make a substantial difference in
// performance.
- DenseLinearAlgebraLibraryType dense_linear_algebra_library_type;
+ DenseLinearAlgebraLibraryType dense_linear_algebra_library_type = EIGEN;
// Ceres supports using multiple sparse linear algebra libraries
// for sparse matrix ordering and factorizations. Currently,
// SUITE_SPARSE and CX_SPARSE are the valid choices, depending on
// whether they are linked into Ceres at build time.
- SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type;
-
- // Number of threads used by Ceres to solve the Newton
- // step. Currently only the SPARSE_SCHUR solver is capable of
- // using this setting.
- int num_linear_solver_threads;
+ SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type =
+#if !defined(CERES_NO_SUITESPARSE)
+ SUITE_SPARSE;
+#elif defined(CERES_USE_EIGEN_SPARSE)
+ EIGEN_SPARSE;
+#elif !defined(CERES_NO_CXSPARSE)
+ CX_SPARSE;
+#elif !defined(CERES_NO_ACCELERATE_SPARSE)
+ ACCELERATE_SPARSE;
+#else
+ NO_SPARSE;
+#endif
// The order in which variables are eliminated in a linear solver
// can have a significant of impact on the efficiency and accuracy
@@ -456,7 +418,7 @@ class CERES_EXPORT Solver {
//
// Given such an ordering, Ceres ensures that the parameter blocks in
// the lowest numbered group are eliminated first, and then the
- // parmeter blocks in the next lowest numbered group and so on. Within
+ // parameter blocks in the next lowest numbered group and so on. Within
// each group, Ceres is free to order the parameter blocks as it
// chooses.
//
@@ -496,13 +458,13 @@ class CERES_EXPORT Solver {
// the parameter blocks into two groups, one for the points and one
// for the cameras, where the group containing the points has an id
// smaller than the group containing cameras.
- shared_ptr<ParameterBlockOrdering> linear_solver_ordering;
+ std::shared_ptr<ParameterBlockOrdering> linear_solver_ordering;
// Use an explicitly computed Schur complement matrix with
// ITERATIVE_SCHUR.
//
// By default this option is disabled and ITERATIVE_SCHUR
- // evaluates evaluates matrix-vector products between the Schur
+ // evaluates matrix-vector products between the Schur
// complement and a vector implicitly by exploiting the algebraic
// expression for the Schur complement.
//
@@ -519,7 +481,7 @@ class CERES_EXPORT Solver {
//
// NOTE: This option can only be used with the SCHUR_JACOBI
// preconditioner.
- bool use_explicit_schur_complement;
+ bool use_explicit_schur_complement = false;
// Sparse Cholesky factorization algorithms use a fill-reducing
// ordering to permute the columns of the Jacobian matrix. There
@@ -540,7 +502,7 @@ class CERES_EXPORT Solver {
// reordering algorithm which has slightly better runtime
// performance at the expense of an extra copy of the Jacobian
// matrix. Setting use_postordering to true enables this tradeoff.
- bool use_postordering;
+ bool use_postordering = false;
// Some non-linear least squares problems are symbolically dense but
// numerically sparse. i.e. at any given state only a small number
@@ -554,8 +516,32 @@ class CERES_EXPORT Solver {
// then it is probably best to keep this false, otherwise it will
// likely lead to worse performance.
- // This settings affects the SPARSE_NORMAL_CHOLESKY solver.
- bool dynamic_sparsity;
+ // This settings only affects the SPARSE_NORMAL_CHOLESKY solver.
+ bool dynamic_sparsity = false;
+
+ // TODO(sameeragarwal): Further expand the documentation for the
+ // following two options.
+
+ // NOTE1: EXPERIMENTAL FEATURE, UNDER DEVELOPMENT, USE AT YOUR OWN RISK.
+ //
+ // If use_mixed_precision_solves is true, the Gauss-Newton matrix
+ // is computed in double precision, but its factorization is
+ // computed in single precision. This can result in significant
+ // time and memory savings at the cost of some accuracy in the
+ // Gauss-Newton step. Iterative refinement is used to recover some
+ // of this accuracy back.
+ //
+ // If use_mixed_precision_solves is true, we recommend setting
+ // max_num_refinement_iterations to 2-3.
+ //
+ // NOTE2: The following two options are currently only applicable
+ // if sparse_linear_algebra_library_type is EIGEN_SPARSE and
+ // linear_solver_type is SPARSE_NORMAL_CHOLESKY, or SPARSE_SCHUR.
+ bool use_mixed_precision_solves = false;
+
+ // Number steps of the iterative refinement process to run when
+ // computing the Gauss-Newton step.
+ int max_num_refinement_iterations = 0;
// Some non-linear least squares problems have additional
// structure in the way the parameter blocks interact that it is
@@ -583,7 +569,7 @@ class CERES_EXPORT Solver {
// known as Wiberg's algorithm.
//
// Ruhe & Wedin (Algorithms for Separable Nonlinear Least Squares
- // Problems, SIAM Reviews, 22(3), 1980) present an analyis of
+ // Problems, SIAM Reviews, 22(3), 1980) present an analysis of
// various algorithms for solving separable non-linear least
// squares problems and refer to "Variable Projection" as
// Algorithm I in their paper.
@@ -615,7 +601,7 @@ class CERES_EXPORT Solver {
// displays better convergence behaviour per iteration. Setting
// Solver::Options::num_threads to the maximum number possible is
// highly recommended.
- bool use_inner_iterations;
+ bool use_inner_iterations = false;
// If inner_iterations is true, then the user has two choices.
//
@@ -627,7 +613,7 @@ class CERES_EXPORT Solver {
// the lower numbered groups are optimized before the higher
// number groups. Each group must be an independent set. Not
// all parameter blocks need to be present in the ordering.
- shared_ptr<ParameterBlockOrdering> inner_iteration_ordering;
+ std::shared_ptr<ParameterBlockOrdering> inner_iteration_ordering;
// Generally speaking, inner iterations make significant progress
// in the early stages of the solve and then their contribution
@@ -638,17 +624,17 @@ class CERES_EXPORT Solver {
// inner iterations drops below inner_iteration_tolerance, the use
// of inner iterations in subsequent trust region minimizer
// iterations is disabled.
- double inner_iteration_tolerance;
+ double inner_iteration_tolerance = 1e-3;
// Minimum number of iterations for which the linear solver should
// run, even if the convergence criterion is satisfied.
- int min_linear_solver_iterations;
+ int min_linear_solver_iterations = 0;
// Maximum number of iterations for which the linear solver should
// run. If the solver does not converge in less than
// max_linear_solver_iterations, then it returns MAX_ITERATIONS,
// as its termination type.
- int max_linear_solver_iterations;
+ int max_linear_solver_iterations = 500;
// Forcing sequence parameter. The truncated Newton solver uses
// this number to control the relative accuracy with which the
@@ -658,21 +644,21 @@ class CERES_EXPORT Solver {
// it to terminate the iterations when
//
// (Q_i - Q_{i-1})/Q_i < eta/i
- double eta;
+ double eta = 1e-1;
// Normalize the jacobian using Jacobi scaling before calling
// the linear least squares solver.
- bool jacobi_scaling;
+ bool jacobi_scaling = true;
// Logging options ---------------------------------------------------------
- LoggingType logging_type;
+ LoggingType logging_type = PER_MINIMIZER_ITERATION;
// By default the Minimizer progress is logged to VLOG(1), which
// is sent to STDERR depending on the vlog level. If this flag is
// set to true, and logging_type is not SILENT, the logging output
// is sent to STDOUT.
- bool minimizer_progress_to_stdout;
+ bool minimizer_progress_to_stdout = false;
// List of iterations at which the minimizer should dump the trust
// region problem. Useful for testing and benchmarking. If empty
@@ -683,8 +669,8 @@ class CERES_EXPORT Solver {
// non-empty if trust_region_minimizer_iterations_to_dump is
// non-empty and trust_region_problem_dump_format_type is not
// CONSOLE.
- std::string trust_region_problem_dump_directory;
- DumpFormatType trust_region_problem_dump_format_type;
+ std::string trust_region_problem_dump_directory = "/tmp";
+ DumpFormatType trust_region_problem_dump_format_type = TEXTFILE;
// Finite differences options ----------------------------------------------
@@ -694,12 +680,12 @@ class CERES_EXPORT Solver {
// etc), then also computing it using finite differences. The
// results are compared, and if they differ substantially, details
// are printed to the log.
- bool check_gradients;
+ bool check_gradients = false;
// Relative precision to check for in the gradient checker. If the
// relative difference between an element in a jacobian exceeds
// this number, then the jacobian for that cost term is dumped.
- double gradient_check_relative_precision;
+ double gradient_check_relative_precision = 1e-8;
// WARNING: This option only applies to the to the numeric
// differentiation used for checking the user provided derivatives
@@ -723,7 +709,7 @@ class CERES_EXPORT Solver {
//
// The finite differencing is done along each dimension. The
// reason to use a relative (rather than absolute) step size is
- // that this way, numeric differentation works for functions where
+ // that this way, numeric differentiation works for functions where
// the arguments are typically large (e.g. 1e9) and when the
// values are small (e.g. 1e-5). It is possible to construct
// "torture cases" which break this finite difference heuristic,
@@ -733,14 +719,21 @@ class CERES_EXPORT Solver {
// theory a good choice is sqrt(eps) * x, which for doubles means
// about 1e-8 * x. However, I have found this number too
// optimistic. This number should be exposed for users to change.
- double gradient_check_numeric_derivative_relative_step_size;
-
- // If true, the user's parameter blocks are updated at the end of
- // every Minimizer iteration, otherwise they are updated when the
- // Minimizer terminates. This is useful if, for example, the user
- // wishes to visualize the state of the optimization every
- // iteration.
- bool update_state_every_iteration;
+ double gradient_check_numeric_derivative_relative_step_size = 1e-6;
+
+ // If update_state_every_iteration is true, then Ceres Solver will
+ // guarantee that at the end of every iteration and before any
+ // user provided IterationCallback is called, the parameter blocks
+ // are updated to the current best solution found by the
+ // solver. Thus the IterationCallback can inspect the values of
+ // the parameter blocks for purposes of computation, visualization
+ // or termination.
+
+ // If update_state_every_iteration is false then there is no such
+ // guarantee, and user provided IterationCallbacks should not
+ // expect to look at the parameter blocks and interpret their
+ // values.
+ bool update_state_every_iteration = false;
// Callbacks that are executed at the end of each iteration of the
// Minimizer. An iteration may terminate midway, either due to
@@ -749,20 +742,18 @@ class CERES_EXPORT Solver {
// executed.
// Callbacks are executed in the order that they are specified in
- // this vector. By default, parameter blocks are updated only at
- // the end of the optimization, i.e when the Minimizer
- // terminates. This behaviour is controlled by
- // update_state_every_variable. If the user wishes to have access
- // to the update parameter blocks when his/her callbacks are
- // executed, then set update_state_every_iteration to true.
+ // this vector. By default, parameter blocks are updated only at the
+ // end of the optimization, i.e when the Minimizer terminates. This
+ // behaviour is controlled by update_state_every_iteration. If the
+ // user wishes to have access to the updated parameter blocks when
+ // his/her callbacks are executed, then set
+ // update_state_every_iteration to true.
//
// The solver does NOT take ownership of these pointers.
std::vector<IterationCallback*> callbacks;
};
struct CERES_EXPORT Summary {
- Summary();
-
// A brief one line description of the state of the solver after
// termination.
std::string BriefReport() const;
@@ -774,25 +765,25 @@ class CERES_EXPORT Solver {
bool IsSolutionUsable() const;
// Minimizer summary -------------------------------------------------
- MinimizerType minimizer_type;
+ MinimizerType minimizer_type = TRUST_REGION;
- TerminationType termination_type;
+ TerminationType termination_type = FAILURE;
// Reason why the solver terminated.
- std::string message;
+ std::string message = "ceres::Solve was not called.";
// Cost of the problem (value of the objective function) before
// the optimization.
- double initial_cost;
+ double initial_cost = -1.0;
// Cost of the problem (value of the objective function) after the
// optimization.
- double final_cost;
+ double final_cost = -1.0;
// The part of the total cost that comes from residual blocks that
// were held fixed by the preprocessor because all the parameter
// blocks that they depend on were fixed.
- double fixed_cost;
+ double fixed_cost = -1.0;
// IterationSummary for each minimizer iteration in order.
std::vector<IterationSummary> iterations;
@@ -801,22 +792,22 @@ class CERES_EXPORT Solver {
// accepted. Unless use_non_monotonic_steps is true this is also
// the number of steps in which the objective function value/cost
// went down.
- int num_successful_steps;
+ int num_successful_steps = -1;
// Number of minimizer iterations in which the step was rejected
// either because it did not reduce the cost enough or the step
// was not numerically valid.
- int num_unsuccessful_steps;
+ int num_unsuccessful_steps = -1;
// Number of times inner iterations were performed.
- int num_inner_iteration_steps;
+ int num_inner_iteration_steps = -1;
// Total number of iterations inside the line search algorithm
// across all invocations. We call these iterations "steps" to
// distinguish them from the outer iterations of the line search
// and trust region minimizer algorithms which call the line
// search algorithm as a subroutine.
- int num_line_search_steps;
+ int num_line_search_steps = -1;
// All times reported below are wall times.
@@ -824,31 +815,42 @@ class CERES_EXPORT Solver {
// occurs, Ceres performs a number of preprocessing steps. These
// include error checks, memory allocations, and reorderings. This
// time is accounted for as preprocessing time.
- double preprocessor_time_in_seconds;
+ double preprocessor_time_in_seconds = -1.0;
// Time spent in the TrustRegionMinimizer.
- double minimizer_time_in_seconds;
+ double minimizer_time_in_seconds = -1.0;
// After the Minimizer is finished, some time is spent in
// re-evaluating residuals etc. This time is accounted for in the
// postprocessor time.
- double postprocessor_time_in_seconds;
+ double postprocessor_time_in_seconds = -1.0;
// Some total of all time spent inside Ceres when Solve is called.
- double total_time_in_seconds;
+ double total_time_in_seconds = -1.0;
// Time (in seconds) spent in the linear solver computing the
// trust region step.
- double linear_solver_time_in_seconds;
+ double linear_solver_time_in_seconds = -1.0;
+
+ // Number of times the Newton step was computed by solving a
+ // linear system. This does not include linear solves used by
+ // inner iterations.
+ int num_linear_solves = -1;
// Time (in seconds) spent evaluating the residual vector.
- double residual_evaluation_time_in_seconds;
+ double residual_evaluation_time_in_seconds = 1.0;
+
+ // Number of residual only evaluations.
+ int num_residual_evaluations = -1;
// Time (in seconds) spent evaluating the jacobian matrix.
- double jacobian_evaluation_time_in_seconds;
+ double jacobian_evaluation_time_in_seconds = -1.0;
+
+ // Number of Jacobian (and residual) evaluations.
+ int num_jacobian_evaluations = -1;
// Time (in seconds) spent doing inner iterations.
- double inner_iteration_time_in_seconds;
+ double inner_iteration_time_in_seconds = -1.0;
// Cumulative timing information for line searches performed as part of the
// solve. Note that in addition to the case when the Line Search minimizer
@@ -857,89 +859,89 @@ class CERES_EXPORT Solver {
// Time (in seconds) spent evaluating the univariate cost function as part
// of a line search.
- double line_search_cost_evaluation_time_in_seconds;
+ double line_search_cost_evaluation_time_in_seconds = -1.0;
// Time (in seconds) spent evaluating the gradient of the univariate cost
// function as part of a line search.
- double line_search_gradient_evaluation_time_in_seconds;
+ double line_search_gradient_evaluation_time_in_seconds = -1.0;
// Time (in seconds) spent minimizing the interpolating polynomial
// to compute the next candidate step size as part of a line search.
- double line_search_polynomial_minimization_time_in_seconds;
+ double line_search_polynomial_minimization_time_in_seconds = -1.0;
// Total time (in seconds) spent performing line searches.
- double line_search_total_time_in_seconds;
+ double line_search_total_time_in_seconds = -1.0;
// Number of parameter blocks in the problem.
- int num_parameter_blocks;
+ int num_parameter_blocks = -1;
- // Number of parameters in the probem.
- int num_parameters;
+ // Number of parameters in the problem.
+ int num_parameters = -1;
// Dimension of the tangent space of the problem (or the number of
// columns in the Jacobian for the problem). This is different
// from num_parameters if a parameter block is associated with a
// LocalParameterization
- int num_effective_parameters;
+ int num_effective_parameters = -1;
// Number of residual blocks in the problem.
- int num_residual_blocks;
+ int num_residual_blocks = -1;
// Number of residuals in the problem.
- int num_residuals;
+ int num_residuals = -1;
// Number of parameter blocks in the problem after the inactive
// and constant parameter blocks have been removed. A parameter
// block is inactive if no residual block refers to it.
- int num_parameter_blocks_reduced;
+ int num_parameter_blocks_reduced = -1;
// Number of parameters in the reduced problem.
- int num_parameters_reduced;
+ int num_parameters_reduced = -1;
// Dimension of the tangent space of the reduced problem (or the
// number of columns in the Jacobian for the reduced
// problem). This is different from num_parameters_reduced if a
// parameter block in the reduced problem is associated with a
// LocalParameterization.
- int num_effective_parameters_reduced;
+ int num_effective_parameters_reduced = -1;
// Number of residual blocks in the reduced problem.
- int num_residual_blocks_reduced;
+ int num_residual_blocks_reduced = -1;
// Number of residuals in the reduced problem.
- int num_residuals_reduced;
+ int num_residuals_reduced = -1;
// Is the reduced problem bounds constrained.
- bool is_constrained;
+ bool is_constrained = false;
// Number of threads specified by the user for Jacobian and
// residual evaluation.
- int num_threads_given;
+ int num_threads_given = -1;
// Number of threads actually used by the solver for Jacobian and
// residual evaluation. This number is not equal to
// num_threads_given if OpenMP is not available.
- int num_threads_used;
-
- // Number of threads specified by the user for solving the trust
- // region problem.
- int num_linear_solver_threads_given;
-
- // Number of threads actually used by the solver for solving the
- // trust region problem. This number is not equal to
- // num_threads_given if OpenMP is not available.
- int num_linear_solver_threads_used;
+ int num_threads_used = -1;
// Type of the linear solver requested by the user.
- LinearSolverType linear_solver_type_given;
-
+ LinearSolverType linear_solver_type_given =
+#if defined(CERES_NO_SPARSE)
+ DENSE_QR;
+#else
+ SPARSE_NORMAL_CHOLESKY;
+#endif
// Type of the linear solver actually used. This may be different
// from linear_solver_type_given if Ceres determines that the
// problem structure is not compatible with the linear solver
// requested or if the linear solver requested by the user is not
// available, e.g. The user requested SPARSE_NORMAL_CHOLESKY but
// no sparse linear algebra library was available.
- LinearSolverType linear_solver_type_used;
+ LinearSolverType linear_solver_type_used =
+#if defined(CERES_NO_SPARSE)
+ DENSE_QR;
+#else
+ SPARSE_NORMAL_CHOLESKY;
+#endif
// Size of the elimination groups given by the user as hints to
// the linear solver.
@@ -953,15 +955,29 @@ class CERES_EXPORT Solver {
// parameter blocks.
std::vector<int> linear_solver_ordering_used;
+ // For Schur type linear solvers, this string describes the
+ // template specialization which was detected in the problem and
+ // should be used.
+ std::string schur_structure_given;
+
+ // This is the Schur template specialization that was actually
+ // instantiated and used. The reason this will be different from
+ // schur_structure_given is because the corresponding template
+ // specialization does not exist.
+ //
+ // Template specializations can be added to ceres by editing
+ // internal/ceres/generate_template_specializations.py
+ std::string schur_structure_used;
+
// True if the user asked for inner iterations to be used as part
// of the optimization.
- bool inner_iterations_given;
+ bool inner_iterations_given = false;
// True if the user asked for inner iterations to be used as part
// of the optimization and the problem structure was such that
// they were actually performed. e.g., in a problem with just one
// parameter block, inner iterations are not performed.
- bool inner_iterations_used;
+ bool inner_iterations_used = false;
// Size of the parameter groups given by the user for performing
// inner iterations.
@@ -976,57 +992,59 @@ class CERES_EXPORT Solver {
std::vector<int> inner_iteration_ordering_used;
// Type of the preconditioner requested by the user.
- PreconditionerType preconditioner_type_given;
+ PreconditionerType preconditioner_type_given = IDENTITY;
// Type of the preconditioner actually used. This may be different
// from linear_solver_type_given if Ceres determines that the
// problem structure is not compatible with the linear solver
// requested or if the linear solver requested by the user is not
// available.
- PreconditionerType preconditioner_type_used;
+ PreconditionerType preconditioner_type_used = IDENTITY;
// Type of clustering algorithm used for visibility based
// preconditioning. Only meaningful when the preconditioner_type
// is CLUSTER_JACOBI or CLUSTER_TRIDIAGONAL.
- VisibilityClusteringType visibility_clustering_type;
+ VisibilityClusteringType visibility_clustering_type = CANONICAL_VIEWS;
// Type of trust region strategy.
- TrustRegionStrategyType trust_region_strategy_type;
+ TrustRegionStrategyType trust_region_strategy_type = LEVENBERG_MARQUARDT;
// Type of dogleg strategy used for solving the trust region
// problem.
- DoglegType dogleg_type;
+ DoglegType dogleg_type = TRADITIONAL_DOGLEG;
// Type of the dense linear algebra library used.
- DenseLinearAlgebraLibraryType dense_linear_algebra_library_type;
+ DenseLinearAlgebraLibraryType dense_linear_algebra_library_type = EIGEN;
// Type of the sparse linear algebra library used.
- SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type;
+ SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type =
+ NO_SPARSE;
// Type of line search direction used.
- LineSearchDirectionType line_search_direction_type;
+ LineSearchDirectionType line_search_direction_type = LBFGS;
// Type of the line search algorithm used.
- LineSearchType line_search_type;
+ LineSearchType line_search_type = WOLFE;
// When performing line search, the degree of the polynomial used
// to approximate the objective function.
- LineSearchInterpolationType line_search_interpolation_type;
+ LineSearchInterpolationType line_search_interpolation_type = CUBIC;
// If the line search direction is NONLINEAR_CONJUGATE_GRADIENT,
// then this indicates the particular variant of non-linear
// conjugate gradient used.
- NonlinearConjugateGradientType nonlinear_conjugate_gradient_type;
+ NonlinearConjugateGradientType nonlinear_conjugate_gradient_type =
+ FLETCHER_REEVES;
// If the type of the line search direction is LBFGS, then this
// indicates the rank of the Hessian approximation.
- int max_lbfgs_rank;
+ int max_lbfgs_rank = -1;
};
// Once a least squares problem has been built, this function takes
// the problem and optimizes it based on the values of the options
// parameters. Upon return, a detailed summary of the work performed
- // by the preprocessor, the non-linear minmizer and the linear
+ // by the preprocessor, the non-linear minimizer and the linear
// solver are reported in the summary object.
virtual void Solve(const Options& options,
Problem* problem,
@@ -1035,8 +1053,8 @@ class CERES_EXPORT Solver {
// Helper function which avoids going through the interface.
CERES_EXPORT void Solve(const Solver::Options& options,
- Problem* problem,
- Solver::Summary* summary);
+ Problem* problem,
+ Solver::Summary* summary);
} // namespace ceres
diff --git a/extern/ceres/include/ceres/tiny_solver.h b/extern/ceres/include/ceres/tiny_solver.h
new file mode 100644
index 00000000000..47db5824dc5
--- /dev/null
+++ b/extern/ceres/include/ceres/tiny_solver.h
@@ -0,0 +1,368 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2019 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: mierle@gmail.com (Keir Mierle)
+//
+// WARNING WARNING WARNING
+// WARNING WARNING WARNING Tiny solver is experimental and will change.
+// WARNING WARNING WARNING
+//
+// A tiny least squares solver using Levenberg-Marquardt, intended for solving
+// small dense problems with low latency and low overhead. The implementation
+// takes care to do all allocation up front, so that no memory is allocated
+// during solving. This is especially useful when solving many similar problems;
+// for example, inverse pixel distortion for every pixel on a grid.
+//
+// Note: This code has no dependencies beyond Eigen, including on other parts of
+// Ceres, so it is possible to take this file alone and put it in another
+// project without the rest of Ceres.
+//
+// Algorithm based off of:
+//
+// [1] K. Madsen, H. Nielsen, O. Tingleoff.
+// Methods for Non-linear Least Squares Problems.
+// http://www2.imm.dtu.dk/pubdb/views/edoc_download.php/3215/pdf/imm3215.pdf
+
+#ifndef CERES_PUBLIC_TINY_SOLVER_H_
+#define CERES_PUBLIC_TINY_SOLVER_H_
+
+#include <cassert>
+#include <cmath>
+
+#include "Eigen/Dense"
+
+namespace ceres {
+
+// To use tiny solver, create a class or struct that allows computing the cost
+// function (described below). This is similar to a ceres::CostFunction, but is
+// different to enable statically allocating all memory for the solver
+// (specifically, enum sizes). Key parts are the Scalar typedef, the enums to
+// describe problem sizes (needed to remove all heap allocations), and the
+// operator() overload to evaluate the cost and (optionally) jacobians.
+//
+// struct TinySolverCostFunctionTraits {
+// typedef double Scalar;
+// enum {
+// NUM_RESIDUALS = <int> OR Eigen::Dynamic,
+// NUM_PARAMETERS = <int> OR Eigen::Dynamic,
+// };
+// bool operator()(const double* parameters,
+// double* residuals,
+// double* jacobian) const;
+//
+// int NumResiduals() const; -- Needed if NUM_RESIDUALS == Eigen::Dynamic.
+// int NumParameters() const; -- Needed if NUM_PARAMETERS == Eigen::Dynamic.
+// };
+//
+// For operator(), the size of the objects is:
+//
+// double* parameters -- NUM_PARAMETERS or NumParameters()
+// double* residuals -- NUM_RESIDUALS or NumResiduals()
+// double* jacobian -- NUM_RESIDUALS * NUM_PARAMETERS in column-major format
+// (Eigen's default); or NULL if no jacobian requested.
+//
+// An example (fully statically sized):
+//
+// struct MyCostFunctionExample {
+// typedef double Scalar;
+// enum {
+// NUM_RESIDUALS = 2,
+// NUM_PARAMETERS = 3,
+// };
+// bool operator()(const double* parameters,
+// double* residuals,
+// double* jacobian) const {
+// residuals[0] = x + 2*y + 4*z;
+// residuals[1] = y * z;
+// if (jacobian) {
+// jacobian[0 * 2 + 0] = 1; // First column (x).
+// jacobian[0 * 2 + 1] = 0;
+//
+// jacobian[1 * 2 + 0] = 2; // Second column (y).
+// jacobian[1 * 2 + 1] = z;
+//
+// jacobian[2 * 2 + 0] = 4; // Third column (z).
+// jacobian[2 * 2 + 1] = y;
+// }
+// return true;
+// }
+// };
+//
+// The solver supports either statically or dynamically sized cost
+// functions. If the number of residuals is dynamic then the Function
+// must define:
+//
+// int NumResiduals() const;
+//
+// If the number of parameters is dynamic then the Function must
+// define:
+//
+// int NumParameters() const;
+//
+template <typename Function,
+ typename LinearSolver =
+ Eigen::LDLT<Eigen::Matrix<typename Function::Scalar,
+ Function::NUM_PARAMETERS,
+ Function::NUM_PARAMETERS>>>
+class TinySolver {
+ public:
+ // This class needs to have an Eigen aligned operator new as it contains
+ // fixed-size Eigen types.
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+
+ enum {
+ NUM_RESIDUALS = Function::NUM_RESIDUALS,
+ NUM_PARAMETERS = Function::NUM_PARAMETERS
+ };
+ typedef typename Function::Scalar Scalar;
+ typedef typename Eigen::Matrix<Scalar, NUM_PARAMETERS, 1> Parameters;
+
+ enum Status {
+ GRADIENT_TOO_SMALL, // eps > max(J'*f(x))
+ RELATIVE_STEP_SIZE_TOO_SMALL, // eps > ||dx|| / (||x|| + eps)
+ COST_TOO_SMALL, // eps > ||f(x)||^2 / 2
+ HIT_MAX_ITERATIONS,
+
+ // TODO(sameeragarwal): Deal with numerical failures.
+ };
+
+ struct Options {
+ Scalar gradient_tolerance = 1e-10; // eps > max(J'*f(x))
+ Scalar parameter_tolerance = 1e-8; // eps > ||dx|| / ||x||
+ Scalar cost_threshold = // eps > ||f(x)||
+ std::numeric_limits<Scalar>::epsilon();
+ Scalar initial_trust_region_radius = 1e4;
+ int max_num_iterations = 50;
+ };
+
+ struct Summary {
+ Scalar initial_cost = -1; // 1/2 ||f(x)||^2
+ Scalar final_cost = -1; // 1/2 ||f(x)||^2
+ Scalar gradient_max_norm = -1; // max(J'f(x))
+ int iterations = -1;
+ Status status = HIT_MAX_ITERATIONS;
+ };
+
+ bool Update(const Function& function, const Parameters& x) {
+ if (!function(x.data(), error_.data(), jacobian_.data())) {
+ return false;
+ }
+
+ error_ = -error_;
+
+ // On the first iteration, compute a diagonal (Jacobi) scaling
+ // matrix, which we store as a vector.
+ if (summary.iterations == 0) {
+ // jacobi_scaling = 1 / (1 + diagonal(J'J))
+ //
+ // 1 is added to the denominator to regularize small diagonal
+ // entries.
+ jacobi_scaling_ = 1.0 / (1.0 + jacobian_.colwise().norm().array());
+ }
+
+ // This explicitly computes the normal equations, which is numerically
+ // unstable. Nevertheless, it is often good enough and is fast.
+ //
+ // TODO(sameeragarwal): Refactor this to allow for DenseQR
+ // factorization.
+ jacobian_ = jacobian_ * jacobi_scaling_.asDiagonal();
+ jtj_ = jacobian_.transpose() * jacobian_;
+ g_ = jacobian_.transpose() * error_;
+ summary.gradient_max_norm = g_.array().abs().maxCoeff();
+ cost_ = error_.squaredNorm() / 2;
+ return true;
+ }
+
+ const Summary& Solve(const Function& function, Parameters* x_and_min) {
+ Initialize<NUM_RESIDUALS, NUM_PARAMETERS>(function);
+ assert(x_and_min);
+ Parameters& x = *x_and_min;
+ summary = Summary();
+ summary.iterations = 0;
+
+ // TODO(sameeragarwal): Deal with failure here.
+ Update(function, x);
+ summary.initial_cost = cost_;
+ summary.final_cost = cost_;
+
+ if (summary.gradient_max_norm < options.gradient_tolerance) {
+ summary.status = GRADIENT_TOO_SMALL;
+ return summary;
+ }
+
+ if (cost_ < options.cost_threshold) {
+ summary.status = COST_TOO_SMALL;
+ return summary;
+ }
+
+ Scalar u = 1.0 / options.initial_trust_region_radius;
+ Scalar v = 2;
+
+ for (summary.iterations = 1;
+ summary.iterations < options.max_num_iterations;
+ summary.iterations++) {
+ jtj_regularized_ = jtj_;
+ const Scalar min_diagonal = 1e-6;
+ const Scalar max_diagonal = 1e32;
+ for (int i = 0; i < lm_diagonal_.rows(); ++i) {
+ lm_diagonal_[i] = std::sqrt(
+ u * std::min(std::max(jtj_(i, i), min_diagonal), max_diagonal));
+ jtj_regularized_(i, i) += lm_diagonal_[i] * lm_diagonal_[i];
+ }
+
+ // TODO(sameeragarwal): Check for failure and deal with it.
+ linear_solver_.compute(jtj_regularized_);
+ lm_step_ = linear_solver_.solve(g_);
+ dx_ = jacobi_scaling_.asDiagonal() * lm_step_;
+
+ // Adding parameter_tolerance to x.norm() ensures that this
+ // works if x is near zero.
+ const Scalar parameter_tolerance =
+ options.parameter_tolerance *
+ (x.norm() + options.parameter_tolerance);
+ if (dx_.norm() < parameter_tolerance) {
+ summary.status = RELATIVE_STEP_SIZE_TOO_SMALL;
+ break;
+ }
+ x_new_ = x + dx_;
+
+ // TODO(keir): Add proper handling of errors from user eval of cost
+ // functions.
+ function(&x_new_[0], &f_x_new_[0], NULL);
+
+ const Scalar cost_change = (2 * cost_ - f_x_new_.squaredNorm());
+
+ // TODO(sameeragarwal): Better more numerically stable evaluation.
+ const Scalar model_cost_change = lm_step_.dot(2 * g_ - jtj_ * lm_step_);
+
+ // rho is the ratio of the actual reduction in error to the reduction
+ // in error that would be obtained if the problem was linear. See [1]
+ // for details.
+ Scalar rho(cost_change / model_cost_change);
+ if (rho > 0) {
+ // Accept the Levenberg-Marquardt step because the linear
+ // model fits well.
+ x = x_new_;
+
+ // TODO(sameeragarwal): Deal with failure.
+ Update(function, x);
+ if (summary.gradient_max_norm < options.gradient_tolerance) {
+ summary.status = GRADIENT_TOO_SMALL;
+ break;
+ }
+
+ if (cost_ < options.cost_threshold) {
+ summary.status = COST_TOO_SMALL;
+ break;
+ }
+
+ Scalar tmp = Scalar(2 * rho - 1);
+ u = u * std::max(1 / 3., 1 - tmp * tmp * tmp);
+ v = 2;
+ continue;
+ }
+
+ // Reject the update because either the normal equations failed to solve
+ // or the local linear model was not good (rho < 0). Instead, increase u
+ // to move closer to gradient descent.
+ u *= v;
+ v *= 2;
+ }
+
+ summary.final_cost = cost_;
+ return summary;
+ }
+
+ Options options;
+ Summary summary;
+
+ private:
+ // Preallocate everything, including temporary storage needed for solving the
+ // linear system. This allows reusing the intermediate storage across solves.
+ LinearSolver linear_solver_;
+ Scalar cost_;
+ Parameters dx_, x_new_, g_, jacobi_scaling_, lm_diagonal_, lm_step_;
+ Eigen::Matrix<Scalar, NUM_RESIDUALS, 1> error_, f_x_new_;
+ Eigen::Matrix<Scalar, NUM_RESIDUALS, NUM_PARAMETERS> jacobian_;
+ Eigen::Matrix<Scalar, NUM_PARAMETERS, NUM_PARAMETERS> jtj_, jtj_regularized_;
+
+ // The following definitions are needed for template metaprogramming.
+ template <bool Condition, typename T>
+ struct enable_if;
+
+ template <typename T>
+ struct enable_if<true, T> {
+ typedef T type;
+ };
+
+ // The number of parameters and residuals are dynamically sized.
+ template <int R, int P>
+ typename enable_if<(R == Eigen::Dynamic && P == Eigen::Dynamic), void>::type
+ Initialize(const Function& function) {
+ Initialize(function.NumResiduals(), function.NumParameters());
+ }
+
+ // The number of parameters is dynamically sized and the number of
+ // residuals is statically sized.
+ template <int R, int P>
+ typename enable_if<(R == Eigen::Dynamic && P != Eigen::Dynamic), void>::type
+ Initialize(const Function& function) {
+ Initialize(function.NumResiduals(), P);
+ }
+
+ // The number of parameters is statically sized and the number of
+ // residuals is dynamically sized.
+ template <int R, int P>
+ typename enable_if<(R != Eigen::Dynamic && P == Eigen::Dynamic), void>::type
+ Initialize(const Function& function) {
+ Initialize(R, function.NumParameters());
+ }
+
+ // The number of parameters and residuals are statically sized.
+ template <int R, int P>
+ typename enable_if<(R != Eigen::Dynamic && P != Eigen::Dynamic), void>::type
+ Initialize(const Function& /* function */) {}
+
+ void Initialize(int num_residuals, int num_parameters) {
+ dx_.resize(num_parameters);
+ x_new_.resize(num_parameters);
+ g_.resize(num_parameters);
+ jacobi_scaling_.resize(num_parameters);
+ lm_diagonal_.resize(num_parameters);
+ lm_step_.resize(num_parameters);
+ error_.resize(num_residuals);
+ f_x_new_.resize(num_residuals);
+ jacobian_.resize(num_residuals, num_parameters);
+ jtj_.resize(num_parameters, num_parameters);
+ jtj_regularized_.resize(num_parameters, num_parameters);
+ }
+};
+
+} // namespace ceres
+
+#endif // CERES_PUBLIC_TINY_SOLVER_H_
diff --git a/extern/ceres/include/ceres/tiny_solver_autodiff_function.h b/extern/ceres/include/ceres/tiny_solver_autodiff_function.h
new file mode 100644
index 00000000000..b782f549cc1
--- /dev/null
+++ b/extern/ceres/include/ceres/tiny_solver_autodiff_function.h
@@ -0,0 +1,206 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2019 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: mierle@gmail.com (Keir Mierle)
+//
+// WARNING WARNING WARNING
+// WARNING WARNING WARNING Tiny solver is experimental and will change.
+// WARNING WARNING WARNING
+
+#ifndef CERES_PUBLIC_TINY_SOLVER_AUTODIFF_FUNCTION_H_
+#define CERES_PUBLIC_TINY_SOLVER_AUTODIFF_FUNCTION_H_
+
+#include <memory>
+#include <type_traits>
+
+#include "Eigen/Core"
+#include "ceres/jet.h"
+#include "ceres/types.h" // For kImpossibleValue.
+
+namespace ceres {
+
+// An adapter around autodiff-style CostFunctors to enable easier use of
+// TinySolver. See the example below showing how to use it:
+//
+// // Example for cost functor with static residual size.
+// // Same as an autodiff cost functor, but taking only 1 parameter.
+// struct MyFunctor {
+// template<typename T>
+// bool operator()(const T* const parameters, T* residuals) const {
+// const T& x = parameters[0];
+// const T& y = parameters[1];
+// const T& z = parameters[2];
+// residuals[0] = x + 2.*y + 4.*z;
+// residuals[1] = y * z;
+// return true;
+// }
+// };
+//
+// typedef TinySolverAutoDiffFunction<MyFunctor, 2, 3>
+// AutoDiffFunction;
+//
+// MyFunctor my_functor;
+// AutoDiffFunction f(my_functor);
+//
+// Vec3 x = ...;
+// TinySolver<AutoDiffFunction> solver;
+// solver.Solve(f, &x);
+//
+// // Example for cost functor with dynamic residual size.
+// // NumResiduals() supplies dynamic size of residuals.
+// // Same functionality as in tiny_solver.h but with autodiff.
+// struct MyFunctorWithDynamicResiduals {
+// int NumResiduals() const {
+// return 2;
+// }
+//
+// template<typename T>
+// bool operator()(const T* const parameters, T* residuals) const {
+// const T& x = parameters[0];
+// const T& y = parameters[1];
+// const T& z = parameters[2];
+// residuals[0] = x + static_cast<T>(2.)*y + static_cast<T>(4.)*z;
+// residuals[1] = y * z;
+// return true;
+// }
+// };
+//
+// typedef TinySolverAutoDiffFunction<MyFunctorWithDynamicResiduals,
+// Eigen::Dynamic,
+// 3>
+// AutoDiffFunctionWithDynamicResiduals;
+//
+// MyFunctorWithDynamicResiduals my_functor_dyn;
+// AutoDiffFunctionWithDynamicResiduals f(my_functor_dyn);
+//
+// Vec3 x = ...;
+// TinySolver<AutoDiffFunctionWithDynamicResiduals> solver;
+// solver.Solve(f, &x);
+//
+// WARNING: The cost function adapter is not thread safe.
+template <typename CostFunctor,
+ int kNumResiduals,
+ int kNumParameters,
+ typename T = double>
+class TinySolverAutoDiffFunction {
+ public:
+ // This class needs to have an Eigen aligned operator new as it contains
+ // as a member a Jet type, which itself has a fixed-size Eigen type as member.
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+
+ TinySolverAutoDiffFunction(const CostFunctor& cost_functor)
+ : cost_functor_(cost_functor) {
+ Initialize<kNumResiduals>(cost_functor);
+ }
+
+ typedef T Scalar;
+ enum {
+ NUM_PARAMETERS = kNumParameters,
+ NUM_RESIDUALS = kNumResiduals,
+ };
+
+ // This is similar to AutoDifferentiate(), but since there is only one
+ // parameter block it is easier to inline to avoid overhead.
+ bool operator()(const T* parameters, T* residuals, T* jacobian) const {
+ if (jacobian == NULL) {
+ // No jacobian requested, so just directly call the cost function with
+ // doubles, skipping jets and derivatives.
+ return cost_functor_(parameters, residuals);
+ }
+ // Initialize the input jets with passed parameters.
+ for (int i = 0; i < kNumParameters; ++i) {
+ jet_parameters_[i].a = parameters[i]; // Scalar part.
+ jet_parameters_[i].v.setZero(); // Derivative part.
+ jet_parameters_[i].v[i] = T(1.0);
+ }
+
+ // Initialize the output jets such that we can detect user errors.
+ for (int i = 0; i < num_residuals_; ++i) {
+ jet_residuals_[i].a = kImpossibleValue;
+ jet_residuals_[i].v.setConstant(kImpossibleValue);
+ }
+
+ // Execute the cost function, but with jets to find the derivative.
+ if (!cost_functor_(jet_parameters_, jet_residuals_.data())) {
+ return false;
+ }
+
+ // Copy the jacobian out of the derivative part of the residual jets.
+ Eigen::Map<Eigen::Matrix<T, kNumResiduals, kNumParameters>> jacobian_matrix(
+ jacobian, num_residuals_, kNumParameters);
+ for (int r = 0; r < num_residuals_; ++r) {
+ residuals[r] = jet_residuals_[r].a;
+ // Note that while this looks like a fast vectorized write, in practice it
+ // unfortunately thrashes the cache since the writes to the column-major
+ // jacobian are strided (e.g. rows are non-contiguous).
+ jacobian_matrix.row(r) = jet_residuals_[r].v;
+ }
+ return true;
+ }
+
+ int NumResiduals() const {
+ return num_residuals_; // Set by Initialize.
+ }
+
+ private:
+ const CostFunctor& cost_functor_;
+
+ // The number of residuals at runtime.
+ // This will be overriden if NUM_RESIDUALS == Eigen::Dynamic.
+ int num_residuals_ = kNumResiduals;
+
+ // To evaluate the cost function with jets, temporary storage is needed. These
+ // are the buffers that are used during evaluation; parameters for the input,
+ // and jet_residuals_ are where the final cost and derivatives end up.
+ //
+ // Since this buffer is used for evaluation, the adapter is not thread safe.
+ using JetType = Jet<T, kNumParameters>;
+ mutable JetType jet_parameters_[kNumParameters];
+ // Eigen::Matrix serves as static or dynamic container.
+ mutable Eigen::Matrix<JetType, kNumResiduals, 1> jet_residuals_;
+
+ // The number of residuals is dynamically sized and the number of
+ // parameters is statically sized.
+ template <int R>
+ typename std::enable_if<(R == Eigen::Dynamic), void>::type Initialize(
+ const CostFunctor& function) {
+ jet_residuals_.resize(function.NumResiduals());
+ num_residuals_ = function.NumResiduals();
+ }
+
+ // The number of parameters and residuals are statically sized.
+ template <int R>
+ typename std::enable_if<(R != Eigen::Dynamic), void>::type Initialize(
+ const CostFunctor& /* function */) {
+ num_residuals_ = kNumResiduals;
+ }
+};
+
+} // namespace ceres
+
+#endif // CERES_PUBLIC_TINY_SOLVER_AUTODIFF_FUNCTION_H_
diff --git a/extern/ceres/include/ceres/tiny_solver_cost_function_adapter.h b/extern/ceres/include/ceres/tiny_solver_cost_function_adapter.h
new file mode 100644
index 00000000000..18ccb398f90
--- /dev/null
+++ b/extern/ceres/include/ceres/tiny_solver_cost_function_adapter.h
@@ -0,0 +1,142 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2019 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_PUBLIC_TINY_SOLVER_COST_FUNCTION_ADAPTER_H_
+#define CERES_PUBLIC_TINY_SOLVER_COST_FUNCTION_ADAPTER_H_
+
+#include "Eigen/Core"
+#include "ceres/cost_function.h"
+#include "glog/logging.h"
+
+namespace ceres {
+
+// An adapter class that lets users of TinySolver use
+// ceres::CostFunction objects that have exactly one parameter block.
+//
+// The adapter allows for the number of residuals and the size of the
+// parameter block to be specified at compile or run-time.
+//
+// WARNING: This object is not thread-safe.
+//
+// Example usage:
+//
+// CostFunction* cost_function = ...
+//
+// Number of residuals and parameter block size known at compile time:
+//
+// TinySolverCostFunctionAdapter<kNumResiduals, kNumParameters>
+// cost_function_adapter(*cost_function);
+//
+// Number of residuals known at compile time and the parameter block
+// size not known at compile time.
+//
+// TinySolverCostFunctionAdapter<kNumResiduals, Eigen::Dynamic>
+// cost_function_adapter(*cost_function);
+//
+// Number of residuals not known at compile time and the parameter
+// block size known at compile time.
+//
+// TinySolverCostFunctionAdapter<Eigen::Dynamic, kParameterBlockSize>
+// cost_function_adapter(*cost_function);
+//
+// Number of residuals not known at compile time and the parameter
+// block size not known at compile time.
+//
+// TinySolverCostFunctionAdapter cost_function_adapter(*cost_function);
+//
+template <int kNumResiduals = Eigen::Dynamic,
+ int kNumParameters = Eigen::Dynamic>
+class TinySolverCostFunctionAdapter {
+ public:
+ typedef double Scalar;
+ enum ComponentSizeType {
+ NUM_PARAMETERS = kNumParameters,
+ NUM_RESIDUALS = kNumResiduals
+ };
+
+ // This struct needs to have an Eigen aligned operator new as it contains
+ // fixed-size Eigen types.
+ EIGEN_MAKE_ALIGNED_OPERATOR_NEW
+
+ TinySolverCostFunctionAdapter(const CostFunction& cost_function)
+ : cost_function_(cost_function) {
+ CHECK_EQ(cost_function_.parameter_block_sizes().size(), 1)
+ << "Only CostFunctions with exactly one parameter blocks are allowed.";
+
+ const int parameter_block_size = cost_function_.parameter_block_sizes()[0];
+ if (NUM_PARAMETERS == Eigen::Dynamic || NUM_RESIDUALS == Eigen::Dynamic) {
+ if (NUM_RESIDUALS != Eigen::Dynamic) {
+ CHECK_EQ(cost_function_.num_residuals(), NUM_RESIDUALS);
+ }
+ if (NUM_PARAMETERS != Eigen::Dynamic) {
+ CHECK_EQ(parameter_block_size, NUM_PARAMETERS);
+ }
+
+ row_major_jacobian_.resize(cost_function_.num_residuals(),
+ parameter_block_size);
+ }
+ }
+
+ bool operator()(const double* parameters,
+ double* residuals,
+ double* jacobian) const {
+ if (!jacobian) {
+ return cost_function_.Evaluate(&parameters, residuals, NULL);
+ }
+
+ double* jacobians[1] = {row_major_jacobian_.data()};
+ if (!cost_function_.Evaluate(&parameters, residuals, jacobians)) {
+ return false;
+ }
+
+ // The Function object used by TinySolver takes its Jacobian in a
+ // column-major layout, and the CostFunction objects use row-major
+ // Jacobian matrices. So the following bit of code does the
+ // conversion from row-major Jacobians to column-major Jacobians.
+ Eigen::Map<Eigen::Matrix<double, NUM_RESIDUALS, NUM_PARAMETERS>>
+ col_major_jacobian(jacobian, NumResiduals(), NumParameters());
+ col_major_jacobian = row_major_jacobian_;
+ return true;
+ }
+
+ int NumResiduals() const { return cost_function_.num_residuals(); }
+ int NumParameters() const {
+ return cost_function_.parameter_block_sizes()[0];
+ }
+
+ private:
+ const CostFunction& cost_function_;
+ mutable Eigen::Matrix<double, NUM_RESIDUALS, NUM_PARAMETERS, Eigen::RowMajor>
+ row_major_jacobian_;
+};
+
+} // namespace ceres
+
+#endif // CERES_PUBLIC_TINY_SOLVER_COST_FUNCTION_ADAPTER_H_
diff --git a/extern/ceres/include/ceres/types.h b/extern/ceres/include/ceres/types.h
index 2ea41803629..3a19b7333b2 100644
--- a/extern/ceres/include/ceres/types.h
+++ b/extern/ceres/include/ceres/types.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -39,15 +39,11 @@
#include <string>
-#include "ceres/internal/port.h"
#include "ceres/internal/disable_warnings.h"
+#include "ceres/internal/port.h"
namespace ceres {
-// Basic integer types. These typedefs are in the Ceres namespace to avoid
-// conflicts with other packages having similar typedefs.
-typedef int int32;
-
// Argument type used in interfaces that can optionally take ownership
// of a passed in argument. If TAKE_OWNERSHIP is passed, the called
// object takes ownership of the pointer argument, and will call
@@ -116,10 +112,29 @@ enum PreconditionerType {
// the scene to determine the sparsity structure of the
// preconditioner. This is done using a clustering algorithm. The
// available visibility clustering algorithms are described below.
- //
- // Note: Requires SuiteSparse.
CLUSTER_JACOBI,
- CLUSTER_TRIDIAGONAL
+ CLUSTER_TRIDIAGONAL,
+
+ // Subset preconditioner is a general purpose preconditioner
+ // linear least squares problems. Given a set of residual blocks,
+ // it uses the corresponding subset of the rows of the Jacobian to
+ // construct a preconditioner.
+ //
+ // Suppose the Jacobian J has been horizontally partitioned as
+ //
+ // J = [P]
+ // [Q]
+ //
+ // Where, Q is the set of rows corresponding to the residual
+ // blocks in residual_blocks_for_subset_preconditioner.
+ //
+ // The preconditioner is the inverse of the matrix Q'Q.
+ //
+ // Obviously, the efficacy of the preconditioner depends on how
+ // well the matrix Q approximates J'J, or how well the chosen
+ // residual blocks approximate the non-linear least squares
+ // problem.
+ SUBSET,
};
enum VisibilityClusteringType {
@@ -150,7 +165,7 @@ enum SparseLinearAlgebraLibraryType {
// minimum degree ordering.
SUITE_SPARSE,
- // A lightweight replacment for SuiteSparse, which does not require
+ // A lightweight replacement for SuiteSparse, which does not require
// a LAPACK/BLAS implementation. Consequently, its performance is
// also a bit lower than SuiteSparse.
CX_SPARSE,
@@ -159,6 +174,9 @@ enum SparseLinearAlgebraLibraryType {
// the Simplicial LDLT routines.
EIGEN_SPARSE,
+ // Apple's Accelerate framework sparse linear algebra routines.
+ ACCELERATE_SPARSE,
+
// No sparse linear solver should be used. This does not necessarily
// imply that Ceres was built without any sparse library, although that
// is the likely use case, merely that one should not be used.
@@ -202,7 +220,7 @@ enum LineSearchDirectionType {
// symmetric matrix but only N conditions are specified by the Secant
// equation. The requirement that the Hessian approximation be positive
// definite imposes another N additional constraints, but that still leaves
- // remaining degrees-of-freedom. (L)BFGS methods uniquely deteremine the
+ // remaining degrees-of-freedom. (L)BFGS methods uniquely determine the
// approximate Hessian by imposing the additional constraints that the
// approximation at the next iteration must be the 'closest' to the current
// approximation (the nature of how this proximity is measured is actually
@@ -222,26 +240,26 @@ enum LineSearchDirectionType {
// For more details on BFGS see:
//
// Broyden, C.G., "The Convergence of a Class of Double-rank Minimization
- // Algorithms,"; J. Inst. Maths. Applics., Vol. 6, pp 76–90, 1970.
+ // Algorithms,"; J. Inst. Maths. Applics., Vol. 6, pp 76-90, 1970.
//
// Fletcher, R., "A New Approach to Variable Metric Algorithms,"
- // Computer Journal, Vol. 13, pp 317–322, 1970.
+ // Computer Journal, Vol. 13, pp 317-322, 1970.
//
// Goldfarb, D., "A Family of Variable Metric Updates Derived by Variational
- // Means," Mathematics of Computing, Vol. 24, pp 23–26, 1970.
+ // Means," Mathematics of Computing, Vol. 24, pp 23-26, 1970.
//
// Shanno, D.F., "Conditioning of Quasi-Newton Methods for Function
- // Minimization," Mathematics of Computing, Vol. 24, pp 647–656, 1970.
+ // Minimization," Mathematics of Computing, Vol. 24, pp 647-656, 1970.
//
// For more details on L-BFGS see:
//
// Nocedal, J. (1980). "Updating Quasi-Newton Matrices with Limited
- // Storage". Mathematics of Computation 35 (151): 773–782.
+ // Storage". Mathematics of Computation 35 (151): 773-782.
//
// Byrd, R. H.; Nocedal, J.; Schnabel, R. B. (1994).
// "Representations of Quasi-Newton Matrices and their use in
// Limited Memory Methods". Mathematical Programming 63 (4):
- // 129–156.
+ // 129-156.
//
// A general reference for both methods:
//
@@ -250,7 +268,7 @@ enum LineSearchDirectionType {
BFGS,
};
-// Nonliner conjugate gradient methods are a generalization of the
+// Nonlinear conjugate gradient methods are a generalization of the
// method of Conjugate Gradients for linear systems. The
// generalization can be carried out in a number of different ways
// leading to number of different rules for computing the search
@@ -420,10 +438,16 @@ enum LineSearchInterpolationType {
enum CovarianceAlgorithmType {
DENSE_SVD,
- SUITE_SPARSE_QR,
- EIGEN_SPARSE_QR
+ SPARSE_QR,
};
+// It is a near impossibility that user code generates this exact
+// value in normal operation, thus we will use it to fill arrays
+// before passing them to user code. If on return an element of the
+// array still contains this value, we will assume that the user code
+// did not write to that memory location.
+const double kImpossibleValue = 1e302;
+
CERES_EXPORT const char* LinearSolverTypeToString(
LinearSolverType type);
CERES_EXPORT bool StringToLinearSolverType(std::string value,
@@ -493,6 +517,13 @@ CERES_EXPORT bool StringToNumericDiffMethodType(
std::string value,
NumericDiffMethodType* type);
+CERES_EXPORT const char* LoggingTypeToString(LoggingType type);
+CERES_EXPORT bool StringtoLoggingType(std::string value, LoggingType* type);
+
+CERES_EXPORT const char* DumpFormatTypeToString(DumpFormatType type);
+CERES_EXPORT bool StringtoDumpFormatType(std::string value, DumpFormatType* type);
+CERES_EXPORT bool StringtoDumpFormatType(std::string value, LoggingType* type);
+
CERES_EXPORT const char* TerminationTypeToString(TerminationType type);
CERES_EXPORT bool IsSchurType(LinearSolverType type);
diff --git a/extern/ceres/include/ceres/version.h b/extern/ceres/include/ceres/version.h
index 2f1cc297a38..50aa2124e75 100644
--- a/extern/ceres/include/ceres/version.h
+++ b/extern/ceres/include/ceres/version.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -31,8 +31,8 @@
#ifndef CERES_PUBLIC_VERSION_H_
#define CERES_PUBLIC_VERSION_H_
-#define CERES_VERSION_MAJOR 1
-#define CERES_VERSION_MINOR 12
+#define CERES_VERSION_MAJOR 2
+#define CERES_VERSION_MINOR 0
#define CERES_VERSION_REVISION 0
// Classic CPP stringifcation; the extra level of indirection allows the
diff --git a/extern/ceres/internal/ceres/accelerate_sparse.cc b/extern/ceres/internal/ceres/accelerate_sparse.cc
new file mode 100644
index 00000000000..eb04e7113d7
--- /dev/null
+++ b/extern/ceres/internal/ceres/accelerate_sparse.cc
@@ -0,0 +1,289 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: alexs.mac@gmail.com (Alex Stewart)
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_NO_ACCELERATE_SPARSE
+
+#include "ceres/accelerate_sparse.h"
+
+#include <algorithm>
+#include <string>
+#include <vector>
+
+#include "ceres/compressed_col_sparse_matrix_utils.h"
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "glog/logging.h"
+
+#define CASESTR(x) case x: return #x
+
+namespace ceres {
+namespace internal {
+
+namespace {
+const char* SparseStatusToString(SparseStatus_t status) {
+ switch (status) {
+ CASESTR(SparseStatusOK);
+ CASESTR(SparseFactorizationFailed);
+ CASESTR(SparseMatrixIsSingular);
+ CASESTR(SparseInternalError);
+ CASESTR(SparseParameterError);
+ CASESTR(SparseStatusReleased);
+ default:
+ return "UKNOWN";
+ }
+}
+} // namespace.
+
+// Resizes workspace as required to contain at least required_size bytes
+// aligned to kAccelerateRequiredAlignment and returns a pointer to the
+// aligned start.
+void* ResizeForAccelerateAlignment(const size_t required_size,
+ std::vector<uint8_t> *workspace) {
+ // As per the Accelerate documentation, all workspace memory passed to the
+ // sparse solver functions must be 16-byte aligned.
+ constexpr int kAccelerateRequiredAlignment = 16;
+ // Although malloc() on macOS should always be 16-byte aligned, it is unclear
+ // if this holds for new(), or on other Apple OSs (phoneOS, watchOS etc).
+ // As such we assume it is not and use std::align() to create a (potentially
+ // offset) 16-byte aligned sub-buffer of the specified size within workspace.
+ workspace->resize(required_size + kAccelerateRequiredAlignment);
+ size_t size_from_aligned_start = workspace->size();
+ void* aligned_solve_workspace_start =
+ reinterpret_cast<void*>(workspace->data());
+ aligned_solve_workspace_start =
+ std::align(kAccelerateRequiredAlignment,
+ required_size,
+ aligned_solve_workspace_start,
+ size_from_aligned_start);
+ CHECK(aligned_solve_workspace_start != nullptr)
+ << "required_size: " << required_size
+ << ", workspace size: " << workspace->size();
+ return aligned_solve_workspace_start;
+}
+
+template<typename Scalar>
+void AccelerateSparse<Scalar>::Solve(NumericFactorization* numeric_factor,
+ DenseVector* rhs_and_solution) {
+ // From SparseSolve() documentation in Solve.h
+ const int required_size =
+ numeric_factor->solveWorkspaceRequiredStatic +
+ numeric_factor->solveWorkspaceRequiredPerRHS;
+ SparseSolve(*numeric_factor, *rhs_and_solution,
+ ResizeForAccelerateAlignment(required_size, &solve_workspace_));
+}
+
+template<typename Scalar>
+typename AccelerateSparse<Scalar>::ASSparseMatrix
+AccelerateSparse<Scalar>::CreateSparseMatrixTransposeView(
+ CompressedRowSparseMatrix* A) {
+ // Accelerate uses CSC as its sparse storage format whereas Ceres uses CSR.
+ // As this method returns the transpose view we can flip rows/cols to map
+ // from CSR to CSC^T.
+ //
+ // Accelerate's columnStarts is a long*, not an int*. These types might be
+ // different (e.g. ARM on iOS) so always make a copy.
+ column_starts_.resize(A->num_rows() +1); // +1 for final column length.
+ std::copy_n(A->rows(), column_starts_.size(), &column_starts_[0]);
+
+ ASSparseMatrix At;
+ At.structure.rowCount = A->num_cols();
+ At.structure.columnCount = A->num_rows();
+ At.structure.columnStarts = &column_starts_[0];
+ At.structure.rowIndices = A->mutable_cols();
+ At.structure.attributes.transpose = false;
+ At.structure.attributes.triangle = SparseUpperTriangle;
+ At.structure.attributes.kind = SparseSymmetric;
+ At.structure.attributes._reserved = 0;
+ At.structure.attributes._allocatedBySparse = 0;
+ At.structure.blockSize = 1;
+ if (std::is_same<Scalar, double>::value) {
+ At.data = reinterpret_cast<Scalar*>(A->mutable_values());
+ } else {
+ values_ =
+ ConstVectorRef(A->values(), A->num_nonzeros()).template cast<Scalar>();
+ At.data = values_.data();
+ }
+ return At;
+}
+
+template<typename Scalar>
+typename AccelerateSparse<Scalar>::SymbolicFactorization
+AccelerateSparse<Scalar>::AnalyzeCholesky(ASSparseMatrix* A) {
+ return SparseFactor(SparseFactorizationCholesky, A->structure);
+}
+
+template<typename Scalar>
+typename AccelerateSparse<Scalar>::NumericFactorization
+AccelerateSparse<Scalar>::Cholesky(ASSparseMatrix* A,
+ SymbolicFactorization* symbolic_factor) {
+ return SparseFactor(*symbolic_factor, *A);
+}
+
+template<typename Scalar>
+void AccelerateSparse<Scalar>::Cholesky(ASSparseMatrix* A,
+ NumericFactorization* numeric_factor) {
+ // From SparseRefactor() documentation in Solve.h
+ const int required_size = std::is_same<Scalar, double>::value
+ ? numeric_factor->symbolicFactorization.workspaceSize_Double
+ : numeric_factor->symbolicFactorization.workspaceSize_Float;
+ return SparseRefactor(*A, numeric_factor,
+ ResizeForAccelerateAlignment(required_size,
+ &factorization_workspace_));
+}
+
+// Instantiate only for the specific template types required/supported s/t the
+// definition can be in the .cc file.
+template class AccelerateSparse<double>;
+template class AccelerateSparse<float>;
+
+template<typename Scalar>
+std::unique_ptr<SparseCholesky>
+AppleAccelerateCholesky<Scalar>::Create(OrderingType ordering_type) {
+ return std::unique_ptr<SparseCholesky>(
+ new AppleAccelerateCholesky<Scalar>(ordering_type));
+}
+
+template<typename Scalar>
+AppleAccelerateCholesky<Scalar>::AppleAccelerateCholesky(
+ const OrderingType ordering_type)
+ : ordering_type_(ordering_type) {}
+
+template<typename Scalar>
+AppleAccelerateCholesky<Scalar>::~AppleAccelerateCholesky() {
+ FreeSymbolicFactorization();
+ FreeNumericFactorization();
+}
+
+template<typename Scalar>
+CompressedRowSparseMatrix::StorageType
+AppleAccelerateCholesky<Scalar>::StorageType() const {
+ return CompressedRowSparseMatrix::LOWER_TRIANGULAR;
+}
+
+template<typename Scalar>
+LinearSolverTerminationType
+AppleAccelerateCholesky<Scalar>::Factorize(CompressedRowSparseMatrix* lhs,
+ std::string* message) {
+ CHECK_EQ(lhs->storage_type(), StorageType());
+ if (lhs == NULL) {
+ *message = "Failure: Input lhs is NULL.";
+ return LINEAR_SOLVER_FATAL_ERROR;
+ }
+ typename SparseTypesTrait<Scalar>::SparseMatrix as_lhs =
+ as_.CreateSparseMatrixTransposeView(lhs);
+
+ if (!symbolic_factor_) {
+ symbolic_factor_.reset(
+ new typename SparseTypesTrait<Scalar>::SymbolicFactorization(
+ as_.AnalyzeCholesky(&as_lhs)));
+ if (symbolic_factor_->status != SparseStatusOK) {
+ *message = StringPrintf(
+ "Apple Accelerate Failure : Symbolic factorisation failed: %s",
+ SparseStatusToString(symbolic_factor_->status));
+ FreeSymbolicFactorization();
+ return LINEAR_SOLVER_FATAL_ERROR;
+ }
+ }
+
+ if (!numeric_factor_) {
+ numeric_factor_.reset(
+ new typename SparseTypesTrait<Scalar>::NumericFactorization(
+ as_.Cholesky(&as_lhs, symbolic_factor_.get())));
+ } else {
+ // Recycle memory from previous numeric factorization.
+ as_.Cholesky(&as_lhs, numeric_factor_.get());
+ }
+ if (numeric_factor_->status != SparseStatusOK) {
+ *message = StringPrintf(
+ "Apple Accelerate Failure : Numeric factorisation failed: %s",
+ SparseStatusToString(numeric_factor_->status));
+ FreeNumericFactorization();
+ return LINEAR_SOLVER_FAILURE;
+ }
+
+ return LINEAR_SOLVER_SUCCESS;
+}
+
+template<typename Scalar>
+LinearSolverTerminationType
+AppleAccelerateCholesky<Scalar>::Solve(const double* rhs,
+ double* solution,
+ std::string* message) {
+ CHECK_EQ(numeric_factor_->status, SparseStatusOK)
+ << "Solve called without a call to Factorize first ("
+ << SparseStatusToString(numeric_factor_->status) << ").";
+ const int num_cols = numeric_factor_->symbolicFactorization.columnCount;
+
+ typename SparseTypesTrait<Scalar>::DenseVector as_rhs_and_solution;
+ as_rhs_and_solution.count = num_cols;
+ if (std::is_same<Scalar, double>::value) {
+ as_rhs_and_solution.data = reinterpret_cast<Scalar*>(solution);
+ std::copy_n(rhs, num_cols, solution);
+ } else {
+ scalar_rhs_and_solution_ =
+ ConstVectorRef(rhs, num_cols).template cast<Scalar>();
+ as_rhs_and_solution.data = scalar_rhs_and_solution_.data();
+ }
+ as_.Solve(numeric_factor_.get(), &as_rhs_and_solution);
+ if (!std::is_same<Scalar, double>::value) {
+ VectorRef(solution, num_cols) =
+ scalar_rhs_and_solution_.template cast<double>();
+ }
+ return LINEAR_SOLVER_SUCCESS;
+}
+
+template<typename Scalar>
+void AppleAccelerateCholesky<Scalar>::FreeSymbolicFactorization() {
+ if (symbolic_factor_) {
+ SparseCleanup(*symbolic_factor_);
+ symbolic_factor_.reset();
+ }
+}
+
+template<typename Scalar>
+void AppleAccelerateCholesky<Scalar>::FreeNumericFactorization() {
+ if (numeric_factor_) {
+ SparseCleanup(*numeric_factor_);
+ numeric_factor_.reset();
+ }
+}
+
+// Instantiate only for the specific template types required/supported s/t the
+// definition can be in the .cc file.
+template class AppleAccelerateCholesky<double>;
+template class AppleAccelerateCholesky<float>;
+
+}
+}
+
+#endif // CERES_NO_ACCELERATE_SPARSE
diff --git a/extern/ceres/internal/ceres/accelerate_sparse.h b/extern/ceres/internal/ceres/accelerate_sparse.h
new file mode 100644
index 00000000000..43b4ea5fd70
--- /dev/null
+++ b/extern/ceres/internal/ceres/accelerate_sparse.h
@@ -0,0 +1,147 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: alexs.mac@gmail.com (Alex Stewart)
+
+#ifndef CERES_INTERNAL_ACCELERATE_SPARSE_H_
+#define CERES_INTERNAL_ACCELERATE_SPARSE_H_
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_NO_ACCELERATE_SPARSE
+
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "ceres/linear_solver.h"
+#include "ceres/sparse_cholesky.h"
+#include "Accelerate.h"
+
+namespace ceres {
+namespace internal {
+
+class CompressedRowSparseMatrix;
+class TripletSparseMatrix;
+
+template<typename Scalar>
+struct SparseTypesTrait {
+};
+
+template<>
+struct SparseTypesTrait<double> {
+ typedef DenseVector_Double DenseVector;
+ typedef SparseMatrix_Double SparseMatrix;
+ typedef SparseOpaqueSymbolicFactorization SymbolicFactorization;
+ typedef SparseOpaqueFactorization_Double NumericFactorization;
+};
+
+template<>
+struct SparseTypesTrait<float> {
+ typedef DenseVector_Float DenseVector;
+ typedef SparseMatrix_Float SparseMatrix;
+ typedef SparseOpaqueSymbolicFactorization SymbolicFactorization;
+ typedef SparseOpaqueFactorization_Float NumericFactorization;
+};
+
+template<typename Scalar>
+class AccelerateSparse {
+ public:
+ using DenseVector = typename SparseTypesTrait<Scalar>::DenseVector;
+ // Use ASSparseMatrix to avoid collision with ceres::internal::SparseMatrix.
+ using ASSparseMatrix = typename SparseTypesTrait<Scalar>::SparseMatrix;
+ using SymbolicFactorization = typename SparseTypesTrait<Scalar>::SymbolicFactorization;
+ using NumericFactorization = typename SparseTypesTrait<Scalar>::NumericFactorization;
+
+ // Solves a linear system given its symbolic (reference counted within
+ // NumericFactorization) and numeric factorization.
+ void Solve(NumericFactorization* numeric_factor,
+ DenseVector* rhs_and_solution);
+
+ // Note: Accelerate's API passes/returns its objects by value, but as the
+ // objects contain pointers to the underlying data these copies are
+ // all shallow (in some cases Accelerate also reference counts the
+ // objects internally).
+ ASSparseMatrix CreateSparseMatrixTransposeView(CompressedRowSparseMatrix* A);
+ // Computes a symbolic factorisation of A that can be used in Solve().
+ SymbolicFactorization AnalyzeCholesky(ASSparseMatrix* A);
+ // Compute the numeric Cholesky factorization of A, given its
+ // symbolic factorization.
+ NumericFactorization Cholesky(ASSparseMatrix* A,
+ SymbolicFactorization* symbolic_factor);
+ // Reuse the NumericFactorization from a previous matrix with the same
+ // symbolic factorization to represent a new numeric factorization.
+ void Cholesky(ASSparseMatrix* A, NumericFactorization* numeric_factor);
+
+ private:
+ std::vector<long> column_starts_;
+ std::vector<uint8_t> solve_workspace_;
+ std::vector<uint8_t> factorization_workspace_;
+ // Storage for the values of A if Scalar != double (necessitating a copy).
+ Eigen::Matrix<Scalar, Eigen::Dynamic, 1> values_;
+};
+
+// An implementation of SparseCholesky interface using Apple's Accelerate
+// framework.
+template<typename Scalar>
+class AppleAccelerateCholesky : public SparseCholesky {
+ public:
+ // Factory
+ static std::unique_ptr<SparseCholesky> Create(OrderingType ordering_type);
+
+ // SparseCholesky interface.
+ virtual ~AppleAccelerateCholesky();
+ CompressedRowSparseMatrix::StorageType StorageType() const;
+ LinearSolverTerminationType Factorize(CompressedRowSparseMatrix* lhs,
+ std::string* message) final;
+ LinearSolverTerminationType Solve(const double* rhs,
+ double* solution,
+ std::string* message) final ;
+
+ private:
+ AppleAccelerateCholesky(const OrderingType ordering_type);
+ void FreeSymbolicFactorization();
+ void FreeNumericFactorization();
+
+ const OrderingType ordering_type_;
+ AccelerateSparse<Scalar> as_;
+ std::unique_ptr<typename AccelerateSparse<Scalar>::SymbolicFactorization>
+ symbolic_factor_;
+ std::unique_ptr<typename AccelerateSparse<Scalar>::NumericFactorization>
+ numeric_factor_;
+ // Copy of rhs/solution if Scalar != double (necessitating a copy).
+ Eigen::Matrix<Scalar, Eigen::Dynamic, 1> scalar_rhs_and_solution_;
+};
+
+}
+}
+
+#endif // CERES_NO_ACCELERATE_SPARSE
+
+#endif // CERES_INTERNAL_ACCELERATE_SPARSE_H_
diff --git a/extern/ceres/internal/ceres/array_utils.cc b/extern/ceres/internal/ceres/array_utils.cc
index 7be3c78ce24..32459e6dcd9 100644
--- a/extern/ceres/internal/ceres/array_utils.cc
+++ b/extern/ceres/internal/ceres/array_utils.cc
@@ -35,25 +35,17 @@
#include <cstddef>
#include <string>
#include <vector>
-#include "ceres/fpclassify.h"
#include "ceres/stringprintf.h"
-
+#include "ceres/types.h"
namespace ceres {
namespace internal {
using std::string;
-// It is a near impossibility that user code generates this exact
-// value in normal operation, thus we will use it to fill arrays
-// before passing them to user code. If on return an element of the
-// array still contains this value, we will assume that the user code
-// did not write to that memory location.
-const double kImpossibleValue = 1e302;
-
bool IsArrayValid(const int size, const double* x) {
if (x != NULL) {
for (int i = 0; i < size; ++i) {
- if (!IsFinite(x[i]) || (x[i] == kImpossibleValue)) {
+ if (!std::isfinite(x[i]) || (x[i] == kImpossibleValue)) {
return false;
}
}
@@ -67,7 +59,7 @@ int FindInvalidValue(const int size, const double* x) {
}
for (int i = 0; i < size; ++i) {
- if (!IsFinite(x[i]) || (x[i] == kImpossibleValue)) {
+ if (!std::isfinite(x[i]) || (x[i] == kImpossibleValue)) {
return i;
}
}
diff --git a/extern/ceres/internal/ceres/array_utils.h b/extern/ceres/internal/ceres/array_utils.h
index 2d2ffca8809..1d55733b7b8 100644
--- a/extern/ceres/internal/ceres/array_utils.h
+++ b/extern/ceres/internal/ceres/array_utils.h
@@ -66,11 +66,9 @@ int FindInvalidValue(const int size, const double* x);
// array pointer is NULL, it is treated as an array of zeros.
void AppendArrayToString(const int size, const double* x, std::string* result);
-extern const double kImpossibleValue;
-
// This routine takes an array of integer values, sorts and uniques
// them and then maps each value in the array to its position in the
-// sorted+uniqued array. By doing this, if there are are k unique
+// sorted+uniqued array. By doing this, if there are k unique
// values in the array, each value is replaced by an integer in the
// range [0, k-1], while preserving their relative order.
//
diff --git a/extern/ceres/internal/ceres/block_jacobi_preconditioner.cc b/extern/ceres/internal/ceres/block_jacobi_preconditioner.cc
index 22d4b351c51..772c7af2ba5 100644
--- a/extern/ceres/internal/ceres/block_jacobi_preconditioner.cc
+++ b/extern/ceres/internal/ceres/block_jacobi_preconditioner.cc
@@ -34,7 +34,6 @@
#include "ceres/block_structure.h"
#include "ceres/block_random_access_diagonal_matrix.h"
#include "ceres/casts.h"
-#include "ceres/integral_types.h"
#include "ceres/internal/eigen.h"
namespace ceres {
diff --git a/extern/ceres/internal/ceres/block_jacobi_preconditioner.h b/extern/ceres/internal/ceres/block_jacobi_preconditioner.h
index 14007295823..856b506e073 100644
--- a/extern/ceres/internal/ceres/block_jacobi_preconditioner.h
+++ b/extern/ceres/internal/ceres/block_jacobi_preconditioner.h
@@ -31,9 +31,8 @@
#ifndef CERES_INTERNAL_BLOCK_JACOBI_PRECONDITIONER_H_
#define CERES_INTERNAL_BLOCK_JACOBI_PRECONDITIONER_H_
-#include <vector>
+#include <memory>
#include "ceres/block_random_access_diagonal_matrix.h"
-#include "ceres/internal/scoped_ptr.h"
#include "ceres/preconditioner.h"
namespace ceres {
@@ -56,18 +55,21 @@ class BlockJacobiPreconditioner : public BlockSparseMatrixPreconditioner {
public:
// A must remain valid while the BlockJacobiPreconditioner is.
explicit BlockJacobiPreconditioner(const BlockSparseMatrix& A);
+ BlockJacobiPreconditioner(const BlockJacobiPreconditioner&) = delete;
+ void operator=(const BlockJacobiPreconditioner&) = delete;
+
virtual ~BlockJacobiPreconditioner();
// Preconditioner interface
- virtual void RightMultiply(const double* x, double* y) const;
- virtual int num_rows() const { return m_->num_rows(); }
- virtual int num_cols() const { return m_->num_rows(); }
-
+ void RightMultiply(const double* x, double* y) const final;
+ int num_rows() const final { return m_->num_rows(); }
+ int num_cols() const final { return m_->num_rows(); }
const BlockRandomAccessDiagonalMatrix& matrix() const { return *m_; }
+
private:
- virtual bool UpdateImpl(const BlockSparseMatrix& A, const double* D);
+ bool UpdateImpl(const BlockSparseMatrix& A, const double* D) final;
- scoped_ptr<BlockRandomAccessDiagonalMatrix> m_;
+ std::unique_ptr<BlockRandomAccessDiagonalMatrix> m_;
};
} // namespace internal
diff --git a/extern/ceres/internal/ceres/block_jacobian_writer.cc b/extern/ceres/internal/ceres/block_jacobian_writer.cc
index 7a3fee4fbdf..6998bd66e61 100644
--- a/extern/ceres/internal/ceres/block_jacobian_writer.cc
+++ b/extern/ceres/internal/ceres/block_jacobian_writer.cc
@@ -37,7 +37,6 @@
#include "ceres/residual_block.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/port.h"
-#include "ceres/internal/scoped_ptr.h"
namespace ceres {
namespace internal {
@@ -206,7 +205,7 @@ SparseMatrix* BlockJacobianWriter::CreateJacobian() const {
}
BlockSparseMatrix* jacobian = new BlockSparseMatrix(bs);
- CHECK_NOTNULL(jacobian);
+ CHECK(jacobian != nullptr);
return jacobian;
}
diff --git a/extern/ceres/internal/ceres/block_jacobian_writer.h b/extern/ceres/internal/ceres/block_jacobian_writer.h
index 8e6f45130a4..c94a0d3f909 100644
--- a/extern/ceres/internal/ceres/block_jacobian_writer.h
+++ b/extern/ceres/internal/ceres/block_jacobian_writer.h
@@ -49,6 +49,7 @@ class BlockEvaluatePreparer;
class Program;
class SparseMatrix;
+// TODO(sameeragarwal): This class needs documemtation.
class BlockJacobianWriter {
public:
BlockJacobianWriter(const Evaluator::Options& options,
diff --git a/extern/ceres/internal/ceres/block_random_access_dense_matrix.cc b/extern/ceres/internal/ceres/block_random_access_dense_matrix.cc
index 61748ef6f7f..f567aa5816b 100644
--- a/extern/ceres/internal/ceres/block_random_access_dense_matrix.cc
+++ b/extern/ceres/internal/ceres/block_random_access_dense_matrix.cc
@@ -32,7 +32,6 @@
#include <vector>
#include "ceres/internal/eigen.h"
-#include "ceres/internal/scoped_ptr.h"
#include "glog/logging.h"
namespace ceres {
diff --git a/extern/ceres/internal/ceres/block_random_access_dense_matrix.h b/extern/ceres/internal/ceres/block_random_access_dense_matrix.h
index 89689082561..8c5e2527ec1 100644
--- a/extern/ceres/internal/ceres/block_random_access_dense_matrix.h
+++ b/extern/ceres/internal/ceres/block_random_access_dense_matrix.h
@@ -33,11 +33,10 @@
#include "ceres/block_random_access_matrix.h"
+#include <memory>
#include <vector>
-#include "ceres/internal/macros.h"
#include "ceres/internal/port.h"
-#include "ceres/internal/scoped_ptr.h"
namespace ceres {
namespace internal {
@@ -57,27 +56,29 @@ class BlockRandomAccessDenseMatrix : public BlockRandomAccessMatrix {
// blocks is a vector of block sizes. The resulting matrix has
// blocks.size() * blocks.size() cells.
explicit BlockRandomAccessDenseMatrix(const std::vector<int>& blocks);
+ BlockRandomAccessDenseMatrix(const BlockRandomAccessDenseMatrix&) = delete;
+ void operator=(const BlockRandomAccessDenseMatrix&) = delete;
// The destructor is not thread safe. It assumes that no one is
// modifying any cells when the matrix is being destroyed.
virtual ~BlockRandomAccessDenseMatrix();
// BlockRandomAccessMatrix interface.
- virtual CellInfo* GetCell(int row_block_id,
- int col_block_id,
- int* row,
- int* col,
- int* row_stride,
- int* col_stride);
+ CellInfo* GetCell(int row_block_id,
+ int col_block_id,
+ int* row,
+ int* col,
+ int* row_stride,
+ int* col_stride) final;
// This is not a thread safe method, it assumes that no cell is
// locked.
- virtual void SetZero();
+ void SetZero() final;
// Since the matrix is square with the same row and column block
// structure, num_rows() = num_cols().
- virtual int num_rows() const { return num_rows_; }
- virtual int num_cols() const { return num_rows_; }
+ int num_rows() const final { return num_rows_; }
+ int num_cols() const final { return num_rows_; }
// The underlying matrix storing the cells.
const double* values() const { return values_.get(); }
@@ -86,10 +87,8 @@ class BlockRandomAccessDenseMatrix : public BlockRandomAccessMatrix {
private:
int num_rows_;
std::vector<int> block_layout_;
- scoped_array<double> values_;
- scoped_array<CellInfo> cell_infos_;
-
- CERES_DISALLOW_COPY_AND_ASSIGN(BlockRandomAccessDenseMatrix);
+ std::unique_ptr<double[]> values_;
+ std::unique_ptr<CellInfo[]> cell_infos_;
};
} // namespace internal
diff --git a/extern/ceres/internal/ceres/block_random_access_diagonal_matrix.cc b/extern/ceres/internal/ceres/block_random_access_diagonal_matrix.cc
index 052690d18be..526d173e4b0 100644
--- a/extern/ceres/internal/ceres/block_random_access_diagonal_matrix.cc
+++ b/extern/ceres/internal/ceres/block_random_access_diagonal_matrix.cc
@@ -34,9 +34,9 @@
#include <set>
#include <utility>
#include <vector>
+
#include "Eigen/Dense"
#include "ceres/internal/port.h"
-#include "ceres/internal/scoped_ptr.h"
#include "ceres/stl_util.h"
#include "ceres/triplet_sparse_matrix.h"
#include "ceres/types.h"
@@ -137,8 +137,8 @@ void BlockRandomAccessDiagonalMatrix::Invert() {
void BlockRandomAccessDiagonalMatrix::RightMultiply(const double* x,
double* y) const {
- CHECK_NOTNULL(x);
- CHECK_NOTNULL(y);
+ CHECK(x != nullptr);
+ CHECK(y != nullptr);
const double* values = tsm_->values();
for (int i = 0; i < blocks_.size(); ++i) {
const int block_size = blocks_[i];
diff --git a/extern/ceres/internal/ceres/block_random_access_diagonal_matrix.h b/extern/ceres/internal/ceres/block_random_access_diagonal_matrix.h
index 07ffc9d4a0d..3bda7d19074 100644
--- a/extern/ceres/internal/ceres/block_random_access_diagonal_matrix.h
+++ b/extern/ceres/internal/ceres/block_random_access_diagonal_matrix.h
@@ -31,17 +31,14 @@
#ifndef CERES_INTERNAL_BLOCK_RANDOM_ACCESS_DIAGONAL_MATRIX_H_
#define CERES_INTERNAL_BLOCK_RANDOM_ACCESS_DIAGONAL_MATRIX_H_
+#include <memory>
#include <set>
-#include <vector>
#include <utility>
-#include "ceres/mutex.h"
+#include <vector>
+
#include "ceres/block_random_access_matrix.h"
-#include "ceres/collections_port.h"
-#include "ceres/triplet_sparse_matrix.h"
-#include "ceres/integral_types.h"
-#include "ceres/internal/macros.h"
#include "ceres/internal/port.h"
-#include "ceres/internal/scoped_ptr.h"
+#include "ceres/triplet_sparse_matrix.h"
#include "ceres/types.h"
namespace ceres {
@@ -53,22 +50,24 @@ class BlockRandomAccessDiagonalMatrix : public BlockRandomAccessMatrix {
public:
// blocks is an array of block sizes.
explicit BlockRandomAccessDiagonalMatrix(const std::vector<int>& blocks);
+ BlockRandomAccessDiagonalMatrix(const BlockRandomAccessDiagonalMatrix&) = delete;
+ void operator=(const BlockRandomAccessDiagonalMatrix&) = delete;
// The destructor is not thread safe. It assumes that no one is
// modifying any cells when the matrix is being destroyed.
virtual ~BlockRandomAccessDiagonalMatrix();
// BlockRandomAccessMatrix Interface.
- virtual CellInfo* GetCell(int row_block_id,
- int col_block_id,
- int* row,
- int* col,
- int* row_stride,
- int* col_stride);
+ CellInfo* GetCell(int row_block_id,
+ int col_block_id,
+ int* row,
+ int* col,
+ int* row_stride,
+ int* col_stride) final;
// This is not a thread safe method, it assumes that no cell is
// locked.
- virtual void SetZero();
+ void SetZero() final;
// Invert the matrix assuming that each block is positive definite.
void Invert();
@@ -77,8 +76,8 @@ class BlockRandomAccessDiagonalMatrix : public BlockRandomAccessMatrix {
void RightMultiply(const double* x, double* y) const;
// Since the matrix is square, num_rows() == num_cols().
- virtual int num_rows() const { return tsm_->num_rows(); }
- virtual int num_cols() const { return tsm_->num_cols(); }
+ int num_rows() const final { return tsm_->num_rows(); }
+ int num_cols() const final { return tsm_->num_cols(); }
const TripletSparseMatrix* matrix() const { return tsm_.get(); }
TripletSparseMatrix* mutable_matrix() { return tsm_.get(); }
@@ -89,10 +88,9 @@ class BlockRandomAccessDiagonalMatrix : public BlockRandomAccessMatrix {
std::vector<CellInfo*> layout_;
// The underlying matrix object which actually stores the cells.
- scoped_ptr<TripletSparseMatrix> tsm_;
+ std::unique_ptr<TripletSparseMatrix> tsm_;
friend class BlockRandomAccessDiagonalMatrixTest;
- CERES_DISALLOW_COPY_AND_ASSIGN(BlockRandomAccessDiagonalMatrix);
};
} // namespace internal
diff --git a/extern/ceres/internal/ceres/block_random_access_matrix.h b/extern/ceres/internal/ceres/block_random_access_matrix.h
index 34c8bf5cd4d..6fcf0dc8a7c 100644
--- a/extern/ceres/internal/ceres/block_random_access_matrix.h
+++ b/extern/ceres/internal/ceres/block_random_access_matrix.h
@@ -33,7 +33,7 @@
#ifndef CERES_INTERNAL_BLOCK_RANDOM_ACCESS_MATRIX_H_
#define CERES_INTERNAL_BLOCK_RANDOM_ACCESS_MATRIX_H_
-#include "ceres/mutex.h"
+#include <mutex>
namespace ceres {
namespace internal {
@@ -77,23 +77,18 @@ namespace internal {
//
// if (cell != NULL) {
// MatrixRef m(cell->values, row_stride, col_stride);
-// CeresMutexLock l(&cell->m);
+// std::lock_guard<std::mutex> l(&cell->m);
// m.block(row, col, row_block_size, col_block_size) = ...
// }
// Structure to carry a pointer to the array containing a cell and the
-// Mutex guarding it.
+// mutex guarding it.
struct CellInfo {
- CellInfo()
- : values(NULL) {
- }
-
- explicit CellInfo(double* ptr)
- : values(ptr) {
- }
+ CellInfo() : values(nullptr) {}
+ explicit CellInfo(double* values) : values(values) {}
double* values;
- Mutex m;
+ std::mutex m;
};
class BlockRandomAccessMatrix {
diff --git a/extern/ceres/internal/ceres/block_random_access_sparse_matrix.cc b/extern/ceres/internal/ceres/block_random_access_sparse_matrix.cc
index 5432ec1064a..9c164546635 100644
--- a/extern/ceres/internal/ceres/block_random_access_sparse_matrix.cc
+++ b/extern/ceres/internal/ceres/block_random_access_sparse_matrix.cc
@@ -31,12 +31,12 @@
#include "ceres/block_random_access_sparse_matrix.h"
#include <algorithm>
+#include <memory>
#include <set>
#include <utility>
#include <vector>
+
#include "ceres/internal/port.h"
-#include "ceres/internal/scoped_ptr.h"
-#include "ceres/mutex.h"
#include "ceres/triplet_sparse_matrix.h"
#include "ceres/types.h"
#include "glog/logging.h"
@@ -51,7 +51,7 @@ using std::vector;
BlockRandomAccessSparseMatrix::BlockRandomAccessSparseMatrix(
const vector<int>& blocks,
- const set<pair<int, int> >& block_pairs)
+ const set<pair<int, int>>& block_pairs)
: kMaxRowBlocks(10 * 1000 * 1000),
blocks_(blocks) {
CHECK_LT(blocks.size(), kMaxRowBlocks);
@@ -69,11 +69,9 @@ BlockRandomAccessSparseMatrix::BlockRandomAccessSparseMatrix(
// object for looking into the values array of the
// TripletSparseMatrix.
int num_nonzeros = 0;
- for (set<pair<int, int> >::const_iterator it = block_pairs.begin();
- it != block_pairs.end();
- ++it) {
- const int row_block_size = blocks_[it->first];
- const int col_block_size = blocks_[it->second];
+ for (const auto& block_pair : block_pairs) {
+ const int row_block_size = blocks_[block_pair.first];
+ const int col_block_size = blocks_[block_pair.second];
num_nonzeros += row_block_size * col_block_size;
}
@@ -88,24 +86,19 @@ BlockRandomAccessSparseMatrix::BlockRandomAccessSparseMatrix(
double* values = tsm_->mutable_values();
int pos = 0;
- for (set<pair<int, int> >::const_iterator it = block_pairs.begin();
- it != block_pairs.end();
- ++it) {
- const int row_block_size = blocks_[it->first];
- const int col_block_size = blocks_[it->second];
- cell_values_.push_back(make_pair(make_pair(it->first, it->second),
- values + pos));
- layout_[IntPairToLong(it->first, it->second)] =
+ for (const auto& block_pair : block_pairs) {
+ const int row_block_size = blocks_[block_pair.first];
+ const int col_block_size = blocks_[block_pair.second];
+ cell_values_.push_back(make_pair(block_pair, values + pos));
+ layout_[IntPairToLong(block_pair.first, block_pair.second)] =
new CellInfo(values + pos);
pos += row_block_size * col_block_size;
}
// Fill the sparsity pattern of the underlying matrix.
- for (set<pair<int, int> >::const_iterator it = block_pairs.begin();
- it != block_pairs.end();
- ++it) {
- const int row_block_id = it->first;
- const int col_block_id = it->second;
+ for (const auto& block_pair : block_pairs) {
+ const int row_block_id = block_pair.first;
+ const int col_block_id = block_pair.second;
const int row_block_size = blocks_[row_block_id];
const int col_block_size = blocks_[col_block_id];
int pos =
@@ -125,10 +118,8 @@ BlockRandomAccessSparseMatrix::BlockRandomAccessSparseMatrix(
// Assume that the user does not hold any locks on any cell blocks
// when they are calling SetZero.
BlockRandomAccessSparseMatrix::~BlockRandomAccessSparseMatrix() {
- for (LayoutType::iterator it = layout_.begin();
- it != layout_.end();
- ++it) {
- delete it->second;
+ for (const auto& entry : layout_) {
+ delete entry.second;
}
}
@@ -163,19 +154,17 @@ void BlockRandomAccessSparseMatrix::SetZero() {
void BlockRandomAccessSparseMatrix::SymmetricRightMultiply(const double* x,
double* y) const {
- vector< pair<pair<int, int>, double*> >::const_iterator it =
- cell_values_.begin();
- for (; it != cell_values_.end(); ++it) {
- const int row = it->first.first;
+ for (const auto& cell_position_and_data : cell_values_) {
+ const int row = cell_position_and_data.first.first;
const int row_block_size = blocks_[row];
const int row_block_pos = block_positions_[row];
- const int col = it->first.second;
+ const int col = cell_position_and_data.first.second;
const int col_block_size = blocks_[col];
const int col_block_pos = block_positions_[col];
MatrixVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
- it->second, row_block_size, col_block_size,
+ cell_position_and_data.second, row_block_size, col_block_size,
x + col_block_pos,
y + row_block_pos);
@@ -185,7 +174,7 @@ void BlockRandomAccessSparseMatrix::SymmetricRightMultiply(const double* x,
// triangular multiply also.
if (row != col) {
MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
- it->second, row_block_size, col_block_size,
+ cell_position_and_data.second, row_block_size, col_block_size,
x + row_block_pos,
y + col_block_pos);
}
diff --git a/extern/ceres/internal/ceres/block_random_access_sparse_matrix.h b/extern/ceres/internal/ceres/block_random_access_sparse_matrix.h
index 2b3c7fdabae..d542a3d64e3 100644
--- a/extern/ceres/internal/ceres/block_random_access_sparse_matrix.h
+++ b/extern/ceres/internal/ceres/block_random_access_sparse_matrix.h
@@ -31,17 +31,16 @@
#ifndef CERES_INTERNAL_BLOCK_RANDOM_ACCESS_SPARSE_MATRIX_H_
#define CERES_INTERNAL_BLOCK_RANDOM_ACCESS_SPARSE_MATRIX_H_
+#include <cstdint>
+#include <memory>
#include <set>
-#include <vector>
+#include <unordered_map>
#include <utility>
-#include "ceres/mutex.h"
+#include <vector>
+
#include "ceres/block_random_access_matrix.h"
-#include "ceres/collections_port.h"
#include "ceres/triplet_sparse_matrix.h"
-#include "ceres/integral_types.h"
-#include "ceres/internal/macros.h"
#include "ceres/internal/port.h"
-#include "ceres/internal/scoped_ptr.h"
#include "ceres/types.h"
#include "ceres/small_blas.h"
@@ -59,23 +58,25 @@ class BlockRandomAccessSparseMatrix : public BlockRandomAccessMatrix {
// of this matrix.
BlockRandomAccessSparseMatrix(
const std::vector<int>& blocks,
- const std::set<std::pair<int, int> >& block_pairs);
+ const std::set<std::pair<int, int>>& block_pairs);
+ BlockRandomAccessSparseMatrix(const BlockRandomAccessSparseMatrix&) = delete;
+ void operator=(const BlockRandomAccessSparseMatrix&) = delete;
// The destructor is not thread safe. It assumes that no one is
// modifying any cells when the matrix is being destroyed.
virtual ~BlockRandomAccessSparseMatrix();
// BlockRandomAccessMatrix Interface.
- virtual CellInfo* GetCell(int row_block_id,
- int col_block_id,
- int* row,
- int* col,
- int* row_stride,
- int* col_stride);
+ CellInfo* GetCell(int row_block_id,
+ int col_block_id,
+ int* row,
+ int* col,
+ int* row_stride,
+ int* col_stride) final;
// This is not a thread safe method, it assumes that no cell is
// locked.
- virtual void SetZero();
+ void SetZero() final;
// Assume that the matrix is symmetric and only one half of the
// matrix is stored.
@@ -84,24 +85,24 @@ class BlockRandomAccessSparseMatrix : public BlockRandomAccessMatrix {
void SymmetricRightMultiply(const double* x, double* y) const;
// Since the matrix is square, num_rows() == num_cols().
- virtual int num_rows() const { return tsm_->num_rows(); }
- virtual int num_cols() const { return tsm_->num_cols(); }
+ int num_rows() const final { return tsm_->num_rows(); }
+ int num_cols() const final { return tsm_->num_cols(); }
// Access to the underlying matrix object.
const TripletSparseMatrix* matrix() const { return tsm_.get(); }
TripletSparseMatrix* mutable_matrix() { return tsm_.get(); }
private:
- int64 IntPairToLong(int row, int col) const {
+ int64_t IntPairToLong(int row, int col) const {
return row * kMaxRowBlocks + col;
}
- void LongToIntPair(int64 index, int* row, int* col) const {
+ void LongToIntPair(int64_t index, int* row, int* col) const {
*row = index / kMaxRowBlocks;
*col = index % kMaxRowBlocks;
}
- const int64 kMaxRowBlocks;
+ const int64_t kMaxRowBlocks;
// row/column block sizes.
const std::vector<int> blocks_;
@@ -109,18 +110,17 @@ class BlockRandomAccessSparseMatrix : public BlockRandomAccessMatrix {
// A mapping from <row_block_id, col_block_id> to the position in
// the values array of tsm_ where the block is stored.
- typedef HashMap<long int, CellInfo* > LayoutType;
+ typedef std::unordered_map<long int, CellInfo* > LayoutType;
LayoutType layout_;
// In order traversal of contents of the matrix. This allows us to
// implement a matrix-vector which is 20% faster than using the
// iterator in the Layout object instead.
- std::vector<std::pair<std::pair<int, int>, double*> > cell_values_;
+ std::vector<std::pair<std::pair<int, int>, double*>> cell_values_;
// The underlying matrix object which actually stores the cells.
- scoped_ptr<TripletSparseMatrix> tsm_;
+ std::unique_ptr<TripletSparseMatrix> tsm_;
friend class BlockRandomAccessSparseMatrixTest;
- CERES_DISALLOW_COPY_AND_ASSIGN(BlockRandomAccessSparseMatrix);
};
} // namespace internal
diff --git a/extern/ceres/internal/ceres/block_sparse_matrix.cc b/extern/ceres/internal/ceres/block_sparse_matrix.cc
index 68d0780156c..8f50f3561e2 100644
--- a/extern/ceres/internal/ceres/block_sparse_matrix.cc
+++ b/extern/ceres/internal/ceres/block_sparse_matrix.cc
@@ -35,6 +35,7 @@
#include <vector>
#include "ceres/block_structure.h"
#include "ceres/internal/eigen.h"
+#include "ceres/random.h"
#include "ceres/small_blas.h"
#include "ceres/triplet_sparse_matrix.h"
#include "glog/logging.h"
@@ -51,9 +52,8 @@ BlockSparseMatrix::BlockSparseMatrix(
: num_rows_(0),
num_cols_(0),
num_nonzeros_(0),
- values_(NULL),
block_structure_(block_structure) {
- CHECK_NOTNULL(block_structure_.get());
+ CHECK(block_structure_ != nullptr);
// Count the number of columns in the matrix.
for (int i = 0; i < block_structure_->cols.size(); ++i) {
@@ -80,7 +80,8 @@ BlockSparseMatrix::BlockSparseMatrix(
VLOG(2) << "Allocating values array with "
<< num_nonzeros_ * sizeof(double) << " bytes."; // NOLINT
values_.reset(new double[num_nonzeros_]);
- CHECK_NOTNULL(values_.get());
+ max_num_nonzeros_ = num_nonzeros_;
+ CHECK(values_ != nullptr);
}
void BlockSparseMatrix::SetZero() {
@@ -88,8 +89,8 @@ void BlockSparseMatrix::SetZero() {
}
void BlockSparseMatrix::RightMultiply(const double* x, double* y) const {
- CHECK_NOTNULL(x);
- CHECK_NOTNULL(y);
+ CHECK(x != nullptr);
+ CHECK(y != nullptr);
for (int i = 0; i < block_structure_->rows.size(); ++i) {
int row_block_pos = block_structure_->rows[i].block.position;
@@ -108,8 +109,8 @@ void BlockSparseMatrix::RightMultiply(const double* x, double* y) const {
}
void BlockSparseMatrix::LeftMultiply(const double* x, double* y) const {
- CHECK_NOTNULL(x);
- CHECK_NOTNULL(y);
+ CHECK(x != nullptr);
+ CHECK(y != nullptr);
for (int i = 0; i < block_structure_->rows.size(); ++i) {
int row_block_pos = block_structure_->rows[i].block.position;
@@ -128,7 +129,7 @@ void BlockSparseMatrix::LeftMultiply(const double* x, double* y) const {
}
void BlockSparseMatrix::SquaredColumnNorm(double* x) const {
- CHECK_NOTNULL(x);
+ CHECK(x != nullptr);
VectorRef(x, num_cols_).setZero();
for (int i = 0; i < block_structure_->rows.size(); ++i) {
int row_block_size = block_structure_->rows[i].block.size;
@@ -145,7 +146,7 @@ void BlockSparseMatrix::SquaredColumnNorm(double* x) const {
}
void BlockSparseMatrix::ScaleColumns(const double* scale) {
- CHECK_NOTNULL(scale);
+ CHECK(scale != nullptr);
for (int i = 0; i < block_structure_->rows.size(); ++i) {
int row_block_size = block_structure_->rows[i].block.size;
@@ -162,7 +163,7 @@ void BlockSparseMatrix::ScaleColumns(const double* scale) {
}
void BlockSparseMatrix::ToDenseMatrix(Matrix* dense_matrix) const {
- CHECK_NOTNULL(dense_matrix);
+ CHECK(dense_matrix != nullptr);
dense_matrix->resize(num_rows_, num_cols_);
dense_matrix->setZero();
@@ -185,7 +186,7 @@ void BlockSparseMatrix::ToDenseMatrix(Matrix* dense_matrix) const {
void BlockSparseMatrix::ToTripletSparseMatrix(
TripletSparseMatrix* matrix) const {
- CHECK_NOTNULL(matrix);
+ CHECK(matrix != nullptr);
matrix->Reserve(num_nonzeros_);
matrix->Resize(num_rows_, num_cols_);
@@ -220,7 +221,7 @@ const CompressedRowBlockStructure* BlockSparseMatrix::block_structure()
}
void BlockSparseMatrix::ToTextFile(FILE* file) const {
- CHECK_NOTNULL(file);
+ CHECK(file != nullptr);
for (int i = 0; i < block_structure_->rows.size(); ++i) {
const int row_block_pos = block_structure_->rows[i].block.position;
const int row_block_size = block_structure_->rows[i].block.size;
@@ -242,5 +243,162 @@ void BlockSparseMatrix::ToTextFile(FILE* file) const {
}
}
+BlockSparseMatrix* BlockSparseMatrix::CreateDiagonalMatrix(
+ const double* diagonal, const std::vector<Block>& column_blocks) {
+ // Create the block structure for the diagonal matrix.
+ CompressedRowBlockStructure* bs = new CompressedRowBlockStructure();
+ bs->cols = column_blocks;
+ int position = 0;
+ bs->rows.resize(column_blocks.size(), CompressedRow(1));
+ for (int i = 0; i < column_blocks.size(); ++i) {
+ CompressedRow& row = bs->rows[i];
+ row.block = column_blocks[i];
+ Cell& cell = row.cells[0];
+ cell.block_id = i;
+ cell.position = position;
+ position += row.block.size * row.block.size;
+ }
+
+ // Create the BlockSparseMatrix with the given block structure.
+ BlockSparseMatrix* matrix = new BlockSparseMatrix(bs);
+ matrix->SetZero();
+
+ // Fill the values array of the block sparse matrix.
+ double* values = matrix->mutable_values();
+ for (int i = 0; i < column_blocks.size(); ++i) {
+ const int size = column_blocks[i].size;
+ for (int j = 0; j < size; ++j) {
+ // (j + 1) * size is compact way of accessing the (j,j) entry.
+ values[j * (size + 1)] = diagonal[j];
+ }
+ diagonal += size;
+ values += size * size;
+ }
+
+ return matrix;
+}
+
+void BlockSparseMatrix::AppendRows(const BlockSparseMatrix& m) {
+ CHECK_EQ(m.num_cols(), num_cols());
+ const CompressedRowBlockStructure* m_bs = m.block_structure();
+ CHECK_EQ(m_bs->cols.size(), block_structure_->cols.size());
+
+ const int old_num_nonzeros = num_nonzeros_;
+ const int old_num_row_blocks = block_structure_->rows.size();
+ block_structure_->rows.resize(old_num_row_blocks + m_bs->rows.size());
+
+ for (int i = 0; i < m_bs->rows.size(); ++i) {
+ const CompressedRow& m_row = m_bs->rows[i];
+ CompressedRow& row = block_structure_->rows[old_num_row_blocks + i];
+ row.block.size = m_row.block.size;
+ row.block.position = num_rows_;
+ num_rows_ += m_row.block.size;
+ row.cells.resize(m_row.cells.size());
+ for (int c = 0; c < m_row.cells.size(); ++c) {
+ const int block_id = m_row.cells[c].block_id;
+ row.cells[c].block_id = block_id;
+ row.cells[c].position = num_nonzeros_;
+ num_nonzeros_ += m_row.block.size * m_bs->cols[block_id].size;
+ }
+ }
+
+ if (num_nonzeros_ > max_num_nonzeros_) {
+ double* new_values = new double[num_nonzeros_];
+ std::copy(values_.get(), values_.get() + old_num_nonzeros, new_values);
+ values_.reset(new_values);
+ max_num_nonzeros_ = num_nonzeros_;
+ }
+
+ std::copy(m.values(),
+ m.values() + m.num_nonzeros(),
+ values_.get() + old_num_nonzeros);
+}
+
+void BlockSparseMatrix::DeleteRowBlocks(const int delta_row_blocks) {
+ const int num_row_blocks = block_structure_->rows.size();
+ int delta_num_nonzeros = 0;
+ int delta_num_rows = 0;
+ const std::vector<Block>& column_blocks = block_structure_->cols;
+ for (int i = 0; i < delta_row_blocks; ++i) {
+ const CompressedRow& row = block_structure_->rows[num_row_blocks - i - 1];
+ delta_num_rows += row.block.size;
+ for (int c = 0; c < row.cells.size(); ++c) {
+ const Cell& cell = row.cells[c];
+ delta_num_nonzeros += row.block.size * column_blocks[cell.block_id].size;
+ }
+ }
+ num_nonzeros_ -= delta_num_nonzeros;
+ num_rows_ -= delta_num_rows;
+ block_structure_->rows.resize(num_row_blocks - delta_row_blocks);
+}
+
+BlockSparseMatrix* BlockSparseMatrix::CreateRandomMatrix(
+ const BlockSparseMatrix::RandomMatrixOptions& options) {
+ CHECK_GT(options.num_row_blocks, 0);
+ CHECK_GT(options.min_row_block_size, 0);
+ CHECK_GT(options.max_row_block_size, 0);
+ CHECK_LE(options.min_row_block_size, options.max_row_block_size);
+ CHECK_GT(options.block_density, 0.0);
+ CHECK_LE(options.block_density, 1.0);
+
+ CompressedRowBlockStructure* bs = new CompressedRowBlockStructure();
+ if (options.col_blocks.empty()) {
+ CHECK_GT(options.num_col_blocks, 0);
+ CHECK_GT(options.min_col_block_size, 0);
+ CHECK_GT(options.max_col_block_size, 0);
+ CHECK_LE(options.min_col_block_size, options.max_col_block_size);
+
+ // Generate the col block structure.
+ int col_block_position = 0;
+ for (int i = 0; i < options.num_col_blocks; ++i) {
+ // Generate a random integer in [min_col_block_size, max_col_block_size]
+ const int delta_block_size =
+ Uniform(options.max_col_block_size - options.min_col_block_size);
+ const int col_block_size = options.min_col_block_size + delta_block_size;
+ bs->cols.push_back(Block(col_block_size, col_block_position));
+ col_block_position += col_block_size;
+ }
+ } else {
+ bs->cols = options.col_blocks;
+ }
+
+ bool matrix_has_blocks = false;
+ while (!matrix_has_blocks) {
+ VLOG(1) << "Clearing";
+ bs->rows.clear();
+ int row_block_position = 0;
+ int value_position = 0;
+ for (int r = 0; r < options.num_row_blocks; ++r) {
+
+ const int delta_block_size =
+ Uniform(options.max_row_block_size - options.min_row_block_size);
+ const int row_block_size = options.min_row_block_size + delta_block_size;
+ bs->rows.push_back(CompressedRow());
+ CompressedRow& row = bs->rows.back();
+ row.block.size = row_block_size;
+ row.block.position = row_block_position;
+ row_block_position += row_block_size;
+ for (int c = 0; c < bs->cols.size(); ++c) {
+ if (RandDouble() > options.block_density) continue;
+
+ row.cells.push_back(Cell());
+ Cell& cell = row.cells.back();
+ cell.block_id = c;
+ cell.position = value_position;
+ value_position += row_block_size * bs->cols[c].size;
+ matrix_has_blocks = true;
+ }
+ }
+ }
+
+ BlockSparseMatrix* matrix = new BlockSparseMatrix(bs);
+ double* values = matrix->mutable_values();
+ for (int i = 0; i < matrix->num_nonzeros(); ++i) {
+ values[i] = RandNormal();
+ }
+
+ return matrix;
+}
+
} // namespace internal
} // namespace ceres
diff --git a/extern/ceres/internal/ceres/block_sparse_matrix.h b/extern/ceres/internal/ceres/block_sparse_matrix.h
index 2f9afb738f8..d0c255de612 100644
--- a/extern/ceres/internal/ceres/block_sparse_matrix.h
+++ b/extern/ceres/internal/ceres/block_sparse_matrix.h
@@ -34,11 +34,10 @@
#ifndef CERES_INTERNAL_BLOCK_SPARSE_MATRIX_H_
#define CERES_INTERNAL_BLOCK_SPARSE_MATRIX_H_
+#include <memory>
#include "ceres/block_structure.h"
#include "ceres/sparse_matrix.h"
#include "ceres/internal/eigen.h"
-#include "ceres/internal/macros.h"
-#include "ceres/internal/scoped_ptr.h"
namespace ceres {
namespace internal {
@@ -64,34 +63,99 @@ class BlockSparseMatrix : public SparseMatrix {
explicit BlockSparseMatrix(CompressedRowBlockStructure* block_structure);
BlockSparseMatrix();
+ BlockSparseMatrix(const BlockSparseMatrix&) = delete;
+ void operator=(const BlockSparseMatrix&) = delete;
+
virtual ~BlockSparseMatrix();
// Implementation of SparseMatrix interface.
- virtual void SetZero();
- virtual void RightMultiply(const double* x, double* y) const;
- virtual void LeftMultiply(const double* x, double* y) const;
- virtual void SquaredColumnNorm(double* x) const;
- virtual void ScaleColumns(const double* scale);
- virtual void ToDenseMatrix(Matrix* dense_matrix) const;
- virtual void ToTextFile(FILE* file) const;
-
- virtual int num_rows() const { return num_rows_; }
- virtual int num_cols() const { return num_cols_; }
- virtual int num_nonzeros() const { return num_nonzeros_; }
- virtual const double* values() const { return values_.get(); }
- virtual double* mutable_values() { return values_.get(); }
+ void SetZero() final;
+ void RightMultiply(const double* x, double* y) const final;
+ void LeftMultiply(const double* x, double* y) const final;
+ void SquaredColumnNorm(double* x) const final;
+ void ScaleColumns(const double* scale) final;
+ void ToDenseMatrix(Matrix* dense_matrix) const final;
+ void ToTextFile(FILE* file) const final;
+
+ int num_rows() const final { return num_rows_; }
+ int num_cols() const final { return num_cols_; }
+ int num_nonzeros() const final { return num_nonzeros_; }
+ const double* values() const final { return values_.get(); }
+ double* mutable_values() final { return values_.get(); }
void ToTripletSparseMatrix(TripletSparseMatrix* matrix) const;
const CompressedRowBlockStructure* block_structure() const;
+ // Append the contents of m to the bottom of this matrix. m must
+ // have the same column blocks structure as this matrix.
+ void AppendRows(const BlockSparseMatrix& m);
+
+ // Delete the bottom delta_rows_blocks.
+ void DeleteRowBlocks(int delta_row_blocks);
+
+ static BlockSparseMatrix* CreateDiagonalMatrix(
+ const double* diagonal,
+ const std::vector<Block>& column_blocks);
+
+ struct RandomMatrixOptions {
+ int num_row_blocks = 0;
+ int min_row_block_size = 0;
+ int max_row_block_size = 0;
+ int num_col_blocks = 0;
+ int min_col_block_size = 0;
+ int max_col_block_size = 0;
+
+ // 0 < block_density <= 1 is the probability of a block being
+ // present in the matrix. A given random matrix will not have
+ // precisely this density.
+ double block_density = 0.0;
+
+ // If col_blocks is non-empty, then the generated random matrix
+ // has this block structure and the column related options in this
+ // struct are ignored.
+ std::vector<Block> col_blocks;
+ };
+
+ // Create a random BlockSparseMatrix whose entries are normally
+ // distributed and whose structure is determined by
+ // RandomMatrixOptions.
+ //
+ // Caller owns the result.
+ static BlockSparseMatrix* CreateRandomMatrix(
+ const RandomMatrixOptions& options);
+
private:
int num_rows_;
int num_cols_;
- int max_num_nonzeros_;
int num_nonzeros_;
- scoped_array<double> values_;
- scoped_ptr<CompressedRowBlockStructure> block_structure_;
- CERES_DISALLOW_COPY_AND_ASSIGN(BlockSparseMatrix);
+ int max_num_nonzeros_;
+ std::unique_ptr<double[]> values_;
+ std::unique_ptr<CompressedRowBlockStructure> block_structure_;
+};
+
+// A number of algorithms like the SchurEliminator do not need
+// access to the full BlockSparseMatrix interface. They only
+// need read only access to the values array and the block structure.
+//
+// BlockSparseDataMatrix a struct that carries these two bits of
+// information
+class BlockSparseMatrixData {
+ public:
+ BlockSparseMatrixData(const BlockSparseMatrix& m)
+ : block_structure_(m.block_structure()), values_(m.values()){};
+
+ BlockSparseMatrixData(const CompressedRowBlockStructure* block_structure,
+ const double* values)
+ : block_structure_(block_structure), values_(values) {}
+
+ const CompressedRowBlockStructure* block_structure() const {
+ return block_structure_;
+ }
+ const double* values() const { return values_; }
+
+ private:
+ const CompressedRowBlockStructure* block_structure_;
+ const double* values_;
};
} // namespace internal
diff --git a/extern/ceres/internal/ceres/block_structure.h b/extern/ceres/internal/ceres/block_structure.h
index 6e7003addb6..b5218c0c2cc 100644
--- a/extern/ceres/internal/ceres/block_structure.h
+++ b/extern/ceres/internal/ceres/block_structure.h
@@ -38,14 +38,14 @@
#ifndef CERES_INTERNAL_BLOCK_STRUCTURE_H_
#define CERES_INTERNAL_BLOCK_STRUCTURE_H_
+#include <cstdint>
#include <vector>
#include "ceres/internal/port.h"
-#include "ceres/types.h"
namespace ceres {
namespace internal {
-typedef int32 BlockSize;
+typedef int32_t BlockSize;
struct Block {
Block() : size(-1), position(-1) {}
@@ -70,6 +70,11 @@ struct Cell {
bool CellLessThan(const Cell& lhs, const Cell& rhs);
struct CompressedList {
+ CompressedList() {}
+
+ // Construct a CompressedList with the cells containing num_cells
+ // entries.
+ CompressedList(int num_cells) : cells(num_cells) {}
Block block;
std::vector<Cell> cells;
};
diff --git a/extern/ceres/internal/ceres/c_api.cc b/extern/ceres/internal/ceres/c_api.cc
index ada8f3e0013..2244909131a 100644
--- a/extern/ceres/internal/ceres/c_api.cc
+++ b/extern/ceres/internal/ceres/c_api.cc
@@ -80,9 +80,9 @@ class CallbackCostFunction : public ceres::CostFunction {
virtual ~CallbackCostFunction() {}
- virtual bool Evaluate(double const* const* parameters,
+ bool Evaluate(double const* const* parameters,
double* residuals,
- double** jacobians) const {
+ double** jacobians) const final {
return (*cost_function_)(user_data_,
const_cast<double**>(parameters),
residuals,
@@ -101,7 +101,7 @@ class CallbackLossFunction : public ceres::LossFunction {
explicit CallbackLossFunction(ceres_loss_function_t loss_function,
void* user_data)
: loss_function_(loss_function), user_data_(user_data) {}
- virtual void Evaluate(double sq_norm, double* rho) const {
+ void Evaluate(double sq_norm, double* rho) const final {
(*loss_function_)(user_data_, sq_norm, rho);
}
diff --git a/extern/ceres/internal/ceres/callbacks.cc b/extern/ceres/internal/ceres/callbacks.cc
index 50a0ec19924..84576e40e7d 100644
--- a/extern/ceres/internal/ceres/callbacks.cc
+++ b/extern/ceres/internal/ceres/callbacks.cc
@@ -47,9 +47,29 @@ StateUpdatingCallback::~StateUpdatingCallback() {}
CallbackReturnType StateUpdatingCallback::operator()(
const IterationSummary& summary) {
+ program_->StateVectorToParameterBlocks(parameters_);
+ program_->CopyParameterBlockStateToUserState();
+ return SOLVER_CONTINUE;
+}
+
+GradientProblemSolverStateUpdatingCallback::
+ GradientProblemSolverStateUpdatingCallback(
+ int num_parameters,
+ const double* internal_parameters,
+ double* user_parameters)
+ : num_parameters_(num_parameters),
+ internal_parameters_(internal_parameters),
+ user_parameters_(user_parameters) {}
+
+GradientProblemSolverStateUpdatingCallback::
+ ~GradientProblemSolverStateUpdatingCallback() {}
+
+CallbackReturnType GradientProblemSolverStateUpdatingCallback::operator()(
+ const IterationSummary& summary) {
if (summary.step_is_successful) {
- program_->StateVectorToParameterBlocks(parameters_);
- program_->CopyParameterBlockStateToUserState();
+ std::copy(internal_parameters_,
+ internal_parameters_ + num_parameters_,
+ user_parameters_);
}
return SOLVER_CONTINUE;
}
diff --git a/extern/ceres/internal/ceres/callbacks.h b/extern/ceres/internal/ceres/callbacks.h
index 33c66df5c11..d68bf7f6301 100644
--- a/extern/ceres/internal/ceres/callbacks.h
+++ b/extern/ceres/internal/ceres/callbacks.h
@@ -46,19 +46,34 @@ class StateUpdatingCallback : public IterationCallback {
public:
StateUpdatingCallback(Program* program, double* parameters);
virtual ~StateUpdatingCallback();
- virtual CallbackReturnType operator()(const IterationSummary& summary);
+ CallbackReturnType operator()(const IterationSummary& summary) final;
private:
Program* program_;
double* parameters_;
};
+// Callback for updating the externally visible state of the
+// parameters vector for GradientProblemSolver.
+class GradientProblemSolverStateUpdatingCallback : public IterationCallback {
+ public:
+ GradientProblemSolverStateUpdatingCallback(int num_parameters,
+ const double* internal_parameters,
+ double* user_parameters);
+ virtual ~GradientProblemSolverStateUpdatingCallback();
+ CallbackReturnType operator()(const IterationSummary& summary) final;
+ private:
+ int num_parameters_;
+ const double* internal_parameters_;
+ double* user_parameters_;
+};
+
// Callback for logging the state of the minimizer to STDERR or
// STDOUT depending on the user's preferences and logging level.
class LoggingCallback : public IterationCallback {
public:
LoggingCallback(MinimizerType minimizer_type, bool log_to_stdout);
virtual ~LoggingCallback();
- virtual CallbackReturnType operator()(const IterationSummary& summary);
+ CallbackReturnType operator()(const IterationSummary& summary) final;
private:
const MinimizerType minimizer_type;
diff --git a/extern/ceres/internal/ceres/canonical_views_clustering.cc b/extern/ceres/internal/ceres/canonical_views_clustering.cc
new file mode 100644
index 00000000000..e927e1f91be
--- /dev/null
+++ b/extern/ceres/internal/ceres/canonical_views_clustering.cc
@@ -0,0 +1,232 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: David Gallup (dgallup@google.com)
+// Sameer Agarwal (sameeragarwal@google.com)
+
+#include "ceres/canonical_views_clustering.h"
+
+#include <unordered_set>
+#include <unordered_map>
+
+#include "ceres/graph.h"
+#include "ceres/map_util.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+using std::vector;
+
+typedef std::unordered_map<int, int> IntMap;
+typedef std::unordered_set<int> IntSet;
+
+class CanonicalViewsClustering {
+ public:
+ CanonicalViewsClustering() {}
+
+ // Compute the canonical views clustering of the vertices of the
+ // graph. centers will contain the vertices that are the identified
+ // as the canonical views/cluster centers, and membership is a map
+ // from vertices to cluster_ids. The i^th cluster center corresponds
+ // to the i^th cluster. It is possible depending on the
+ // configuration of the clustering algorithm that some of the
+ // vertices may not be assigned to any cluster. In this case they
+ // are assigned to a cluster with id = kInvalidClusterId.
+ void ComputeClustering(const CanonicalViewsClusteringOptions& options,
+ const WeightedGraph<int>& graph,
+ vector<int>* centers,
+ IntMap* membership);
+
+ private:
+ void FindValidViews(IntSet* valid_views) const;
+ double ComputeClusteringQualityDifference(const int candidate,
+ const vector<int>& centers) const;
+ void UpdateCanonicalViewAssignments(const int canonical_view);
+ void ComputeClusterMembership(const vector<int>& centers,
+ IntMap* membership) const;
+
+ CanonicalViewsClusteringOptions options_;
+ const WeightedGraph<int>* graph_;
+ // Maps a view to its representative canonical view (its cluster
+ // center).
+ IntMap view_to_canonical_view_;
+ // Maps a view to its similarity to its current cluster center.
+ std::unordered_map<int, double> view_to_canonical_view_similarity_;
+};
+
+void ComputeCanonicalViewsClustering(
+ const CanonicalViewsClusteringOptions& options,
+ const WeightedGraph<int>& graph,
+ vector<int>* centers,
+ IntMap* membership) {
+ time_t start_time = time(NULL);
+ CanonicalViewsClustering cv;
+ cv.ComputeClustering(options, graph, centers, membership);
+ VLOG(2) << "Canonical views clustering time (secs): "
+ << time(NULL) - start_time;
+}
+
+// Implementation of CanonicalViewsClustering
+void CanonicalViewsClustering::ComputeClustering(
+ const CanonicalViewsClusteringOptions& options,
+ const WeightedGraph<int>& graph,
+ vector<int>* centers,
+ IntMap* membership) {
+ options_ = options;
+ CHECK(centers != nullptr);
+ CHECK(membership != nullptr);
+ centers->clear();
+ membership->clear();
+ graph_ = &graph;
+
+ IntSet valid_views;
+ FindValidViews(&valid_views);
+ while (valid_views.size() > 0) {
+ // Find the next best canonical view.
+ double best_difference = -std::numeric_limits<double>::max();
+ int best_view = 0;
+
+ // TODO(sameeragarwal): Make this loop multi-threaded.
+ for (const auto& view : valid_views) {
+ const double difference =
+ ComputeClusteringQualityDifference(view, *centers);
+ if (difference > best_difference) {
+ best_difference = difference;
+ best_view = view;
+ }
+ }
+
+ CHECK_GT(best_difference, -std::numeric_limits<double>::max());
+
+ // Add canonical view if quality improves, or if minimum is not
+ // yet met, otherwise break.
+ if ((best_difference <= 0) &&
+ (centers->size() >= options_.min_views)) {
+ break;
+ }
+
+ centers->push_back(best_view);
+ valid_views.erase(best_view);
+ UpdateCanonicalViewAssignments(best_view);
+ }
+
+ ComputeClusterMembership(*centers, membership);
+}
+
+// Return the set of vertices of the graph which have valid vertex
+// weights.
+void CanonicalViewsClustering::FindValidViews(
+ IntSet* valid_views) const {
+ const IntSet& views = graph_->vertices();
+ for (const auto& view : views) {
+ if (graph_->VertexWeight(view) != WeightedGraph<int>::InvalidWeight()) {
+ valid_views->insert(view);
+ }
+ }
+}
+
+// Computes the difference in the quality score if 'candidate' were
+// added to the set of canonical views.
+double CanonicalViewsClustering::ComputeClusteringQualityDifference(
+ const int candidate,
+ const vector<int>& centers) const {
+ // View score.
+ double difference =
+ options_.view_score_weight * graph_->VertexWeight(candidate);
+
+ // Compute how much the quality score changes if the candidate view
+ // was added to the list of canonical views and its nearest
+ // neighbors became members of its cluster.
+ const IntSet& neighbors = graph_->Neighbors(candidate);
+ for (const auto& neighbor : neighbors) {
+ const double old_similarity =
+ FindWithDefault(view_to_canonical_view_similarity_, neighbor, 0.0);
+ const double new_similarity = graph_->EdgeWeight(neighbor, candidate);
+ if (new_similarity > old_similarity) {
+ difference += new_similarity - old_similarity;
+ }
+ }
+
+ // Number of views penalty.
+ difference -= options_.size_penalty_weight;
+
+ // Orthogonality.
+ for (int i = 0; i < centers.size(); ++i) {
+ difference -= options_.similarity_penalty_weight *
+ graph_->EdgeWeight(centers[i], candidate);
+ }
+
+ return difference;
+}
+
+// Reassign views if they're more similar to the new canonical view.
+void CanonicalViewsClustering::UpdateCanonicalViewAssignments(
+ const int canonical_view) {
+ const IntSet& neighbors = graph_->Neighbors(canonical_view);
+ for (const auto& neighbor : neighbors) {
+ const double old_similarity =
+ FindWithDefault(view_to_canonical_view_similarity_, neighbor, 0.0);
+ const double new_similarity =
+ graph_->EdgeWeight(neighbor, canonical_view);
+ if (new_similarity > old_similarity) {
+ view_to_canonical_view_[neighbor] = canonical_view;
+ view_to_canonical_view_similarity_[neighbor] = new_similarity;
+ }
+ }
+}
+
+// Assign a cluster id to each view.
+void CanonicalViewsClustering::ComputeClusterMembership(
+ const vector<int>& centers,
+ IntMap* membership) const {
+ CHECK(membership != nullptr);
+ membership->clear();
+
+ // The i^th cluster has cluster id i.
+ IntMap center_to_cluster_id;
+ for (int i = 0; i < centers.size(); ++i) {
+ center_to_cluster_id[centers[i]] = i;
+ }
+
+ static constexpr int kInvalidClusterId = -1;
+
+ const IntSet& views = graph_->vertices();
+ for (const auto& view : views) {
+ auto it = view_to_canonical_view_.find(view);
+ int cluster_id = kInvalidClusterId;
+ if (it != view_to_canonical_view_.end()) {
+ cluster_id = FindOrDie(center_to_cluster_id, it->second);
+ }
+
+ InsertOrDie(membership, view, cluster_id);
+ }
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/extern/ceres/internal/ceres/canonical_views_clustering.h b/extern/ceres/internal/ceres/canonical_views_clustering.h
new file mode 100644
index 00000000000..630adfed717
--- /dev/null
+++ b/extern/ceres/internal/ceres/canonical_views_clustering.h
@@ -0,0 +1,124 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// An implementation of the Canonical Views clustering algorithm from
+// "Scene Summarization for Online Image Collections", Ian Simon, Noah
+// Snavely, Steven M. Seitz, ICCV 2007.
+//
+// More details can be found at
+// http://grail.cs.washington.edu/projects/canonview/
+//
+// Ceres uses this algorithm to perform view clustering for
+// constructing visibility based preconditioners.
+
+#ifndef CERES_INTERNAL_CANONICAL_VIEWS_CLUSTERING_H_
+#define CERES_INTERNAL_CANONICAL_VIEWS_CLUSTERING_H_
+
+#include <unordered_map>
+#include <vector>
+
+#include "ceres/graph.h"
+
+namespace ceres {
+namespace internal {
+
+struct CanonicalViewsClusteringOptions;
+
+// Compute a partitioning of the vertices of the graph using the
+// canonical views clustering algorithm.
+//
+// In the following we will use the terms vertices and views
+// interchangeably. Given a weighted Graph G(V,E), the canonical views
+// of G are the set of vertices that best "summarize" the content
+// of the graph. If w_ij i s the weight connecting the vertex i to
+// vertex j, and C is the set of canonical views. Then the objective
+// of the canonical views algorithm is
+//
+// E[C] = sum_[i in V] max_[j in C] w_ij
+// - size_penalty_weight * |C|
+// - similarity_penalty_weight * sum_[i in C, j in C, j > i] w_ij
+//
+// alpha is the size penalty that penalizes large number of canonical
+// views.
+//
+// beta is the similarity penalty that penalizes canonical views that
+// are too similar to other canonical views.
+//
+// Thus the canonical views algorithm tries to find a canonical view
+// for each vertex in the graph which best explains it, while trying
+// to minimize the number of canonical views and the overlap between
+// them.
+//
+// We further augment the above objective function by allowing for per
+// vertex weights, higher weights indicating a higher preference for
+// being chosen as a canonical view. Thus if w_i is the vertex weight
+// for vertex i, the objective function is then
+//
+// E[C] = sum_[i in V] max_[j in C] w_ij
+// - size_penalty_weight * |C|
+// - similarity_penalty_weight * sum_[i in C, j in C, j > i] w_ij
+// + view_score_weight * sum_[i in C] w_i
+//
+// centers will contain the vertices that are the identified
+// as the canonical views/cluster centers, and membership is a map
+// from vertices to cluster_ids. The i^th cluster center corresponds
+// to the i^th cluster.
+//
+// It is possible depending on the configuration of the clustering
+// algorithm that some of the vertices may not be assigned to any
+// cluster. In this case they are assigned to a cluster with id = -1;
+void ComputeCanonicalViewsClustering(
+ const CanonicalViewsClusteringOptions& options,
+ const WeightedGraph<int>& graph,
+ std::vector<int>* centers,
+ std::unordered_map<int, int>* membership);
+
+struct CanonicalViewsClusteringOptions {
+ // The minimum number of canonical views to compute.
+ int min_views = 3;
+
+ // Penalty weight for the number of canonical views. A higher
+ // number will result in fewer canonical views.
+ double size_penalty_weight = 5.75;
+
+ // Penalty weight for the diversity (orthogonality) of the
+ // canonical views. A higher number will encourage less similar
+ // canonical views.
+ double similarity_penalty_weight = 100;
+
+ // Weight for per-view scores. Lower weight places less
+ // confidence in the view scores.
+ double view_score_weight = 0.0;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_CANONICAL_VIEWS_CLUSTERING_H_
diff --git a/extern/ceres/internal/ceres/cgnr_linear_operator.h b/extern/ceres/internal/ceres/cgnr_linear_operator.h
index 44c07cabd01..8e8febcc934 100644
--- a/extern/ceres/internal/ceres/cgnr_linear_operator.h
+++ b/extern/ceres/internal/ceres/cgnr_linear_operator.h
@@ -32,8 +32,8 @@
#define CERES_INTERNAL_CGNR_LINEAR_OPERATOR_H_
#include <algorithm>
+#include <memory>
#include "ceres/linear_operator.h"
-#include "ceres/internal/scoped_ptr.h"
#include "ceres/internal/eigen.h"
namespace ceres {
@@ -84,7 +84,7 @@ class CgnrLinearOperator : public LinearOperator {
}
virtual ~CgnrLinearOperator() {}
- virtual void RightMultiply(const double* x, double* y) const {
+ void RightMultiply(const double* x, double* y) const final {
std::fill(z_.get(), z_.get() + A_.num_rows(), 0.0);
// z = Ax
@@ -101,17 +101,17 @@ class CgnrLinearOperator : public LinearOperator {
}
}
- virtual void LeftMultiply(const double* x, double* y) const {
+ void LeftMultiply(const double* x, double* y) const final {
RightMultiply(x, y);
}
- virtual int num_rows() const { return A_.num_cols(); }
- virtual int num_cols() const { return A_.num_cols(); }
+ int num_rows() const final { return A_.num_cols(); }
+ int num_cols() const final { return A_.num_cols(); }
private:
const LinearOperator& A_;
const double* D_;
- scoped_array<double> z_;
+ std::unique_ptr<double[]> z_;
};
} // namespace internal
diff --git a/extern/ceres/internal/ceres/cgnr_solver.cc b/extern/ceres/internal/ceres/cgnr_solver.cc
index 61fae758d5b..9dba1cfb4a8 100644
--- a/extern/ceres/internal/ceres/cgnr_solver.cc
+++ b/extern/ceres/internal/ceres/cgnr_solver.cc
@@ -35,6 +35,7 @@
#include "ceres/conjugate_gradients_solver.h"
#include "ceres/internal/eigen.h"
#include "ceres/linear_solver.h"
+#include "ceres/subset_preconditioner.h"
#include "ceres/wall_time.h"
#include "glog/logging.h"
@@ -42,14 +43,19 @@ namespace ceres {
namespace internal {
CgnrSolver::CgnrSolver(const LinearSolver::Options& options)
- : options_(options),
- preconditioner_(NULL) {
+ : options_(options) {
if (options_.preconditioner_type != JACOBI &&
- options_.preconditioner_type != IDENTITY) {
- LOG(FATAL) << "CGNR only supports IDENTITY and JACOBI preconditioners.";
+ options_.preconditioner_type != IDENTITY &&
+ options_.preconditioner_type != SUBSET) {
+ LOG(FATAL)
+ << "Preconditioner = "
+ << PreconditionerTypeToString(options_.preconditioner_type) << ". "
+ << "Congratulations, you found a bug in Ceres. Please report it.";
}
}
+CgnrSolver::~CgnrSolver() {}
+
LinearSolver::Summary CgnrSolver::SolveImpl(
BlockSparseMatrix* A,
const double* b,
@@ -62,16 +68,31 @@ LinearSolver::Summary CgnrSolver::SolveImpl(
z.setZero();
A->LeftMultiply(b, z.data());
- // Precondition if necessary.
- LinearSolver::PerSolveOptions cg_per_solve_options = per_solve_options;
- if (options_.preconditioner_type == JACOBI) {
- if (preconditioner_.get() == NULL) {
+ if (!preconditioner_) {
+ if (options_.preconditioner_type == JACOBI) {
preconditioner_.reset(new BlockJacobiPreconditioner(*A));
+ } else if (options_.preconditioner_type == SUBSET) {
+ Preconditioner::Options preconditioner_options;
+ preconditioner_options.type = SUBSET;
+ preconditioner_options.subset_preconditioner_start_row_block =
+ options_.subset_preconditioner_start_row_block;
+ preconditioner_options.sparse_linear_algebra_library_type =
+ options_.sparse_linear_algebra_library_type;
+ preconditioner_options.use_postordering = options_.use_postordering;
+ preconditioner_options.num_threads = options_.num_threads;
+ preconditioner_options.context = options_.context;
+ preconditioner_.reset(
+ new SubsetPreconditioner(preconditioner_options, *A));
}
+ }
+
+ if (preconditioner_) {
preconditioner_->Update(*A, per_solve_options.D);
- cg_per_solve_options.preconditioner = preconditioner_.get();
}
+ LinearSolver::PerSolveOptions cg_per_solve_options = per_solve_options;
+ cg_per_solve_options.preconditioner = preconditioner_.get();
+
// Solve (AtA + DtD)x = z (= Atb).
VectorRef(x, A->num_cols()).setZero();
CgnrLinearOperator lhs(*A, per_solve_options.D);
diff --git a/extern/ceres/internal/ceres/cgnr_solver.h b/extern/ceres/internal/ceres/cgnr_solver.h
index f7a15736925..52927333daa 100644
--- a/extern/ceres/internal/ceres/cgnr_solver.h
+++ b/extern/ceres/internal/ceres/cgnr_solver.h
@@ -31,7 +31,7 @@
#ifndef CERES_INTERNAL_CGNR_SOLVER_H_
#define CERES_INTERNAL_CGNR_SOLVER_H_
-#include "ceres/internal/scoped_ptr.h"
+#include <memory>
#include "ceres/linear_solver.h"
namespace ceres {
@@ -51,16 +51,19 @@ class BlockJacobiPreconditioner;
class CgnrSolver : public BlockSparseMatrixSolver {
public:
explicit CgnrSolver(const LinearSolver::Options& options);
- virtual Summary SolveImpl(
+ CgnrSolver(const CgnrSolver&) = delete;
+ void operator=(const CgnrSolver&) = delete;
+ virtual ~CgnrSolver();
+
+ Summary SolveImpl(
BlockSparseMatrix* A,
const double* b,
const LinearSolver::PerSolveOptions& per_solve_options,
- double* x);
+ double* x) final;
private:
const LinearSolver::Options options_;
- scoped_ptr<Preconditioner> preconditioner_;
- CERES_DISALLOW_COPY_AND_ASSIGN(CgnrSolver);
+ std::unique_ptr<Preconditioner> preconditioner_;
};
} // namespace internal
diff --git a/extern/ceres/internal/ceres/collections_port.h b/extern/ceres/internal/ceres/collections_port.h
deleted file mode 100644
index e699a661b8b..00000000000
--- a/extern/ceres/internal/ceres/collections_port.h
+++ /dev/null
@@ -1,196 +0,0 @@
-// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
-// http://ceres-solver.org/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// * Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-// * Neither the name of Google Inc. nor the names of its contributors may be
-// used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-// POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: keir@google.com (Keir Mierle)
-//
-// Portable HashMap and HashSet, and a specialized overload for hashing pairs.
-
-#ifndef CERES_INTERNAL_COLLECTIONS_PORT_H_
-#define CERES_INTERNAL_COLLECTIONS_PORT_H_
-
-#include "ceres/internal/port.h"
-
-#if defined(CERES_NO_UNORDERED_MAP)
-# include <map>
-# include <set>
-#endif
-
-#if defined(CERES_TR1_UNORDERED_MAP)
-# include <tr1/unordered_map>
-# include <tr1/unordered_set>
-# define CERES_HASH_NAMESPACE_START namespace std { namespace tr1 {
-# define CERES_HASH_NAMESPACE_END } }
-#endif
-
-#if defined(CERES_STD_UNORDERED_MAP)
-# include <unordered_map>
-# include <unordered_set>
-# define CERES_HASH_NAMESPACE_START namespace std {
-# define CERES_HASH_NAMESPACE_END }
-#endif
-
-#if defined(CERES_STD_UNORDERED_MAP_IN_TR1_NAMESPACE)
-# include <unordered_map>
-# include <unordered_set>
-# define CERES_HASH_NAMESPACE_START namespace std { namespace tr1 {
-# define CERES_HASH_NAMESPACE_END } }
-#endif
-
-#if !defined(CERES_NO_UNORDERED_MAP) && !defined(CERES_TR1_UNORDERED_MAP) && \
- !defined(CERES_STD_UNORDERED_MAP) && !defined(CERES_STD_UNORDERED_MAP_IN_TR1_NAMESPACE) // NOLINT
-# error One of: CERES_NO_UNORDERED_MAP, CERES_TR1_UNORDERED_MAP,\
- CERES_STD_UNORDERED_MAP, CERES_STD_UNORDERED_MAP_IN_TR1_NAMESPACE must be defined! // NOLINT
-#endif
-
-#include <utility>
-#include "ceres/integral_types.h"
-
-// Some systems don't have access to unordered_map/unordered_set. In
-// that case, substitute the hash map/set with normal map/set. The
-// price to pay is slower speed for some operations.
-#if defined(CERES_NO_UNORDERED_MAP)
-
-namespace ceres {
-namespace internal {
-
-template<typename K, typename V>
-struct HashMap : map<K, V> {};
-
-template<typename K>
-struct HashSet : set<K> {};
-
-} // namespace internal
-} // namespace ceres
-
-#else
-
-namespace ceres {
-namespace internal {
-
-#if defined(CERES_TR1_UNORDERED_MAP) || \
- defined(CERES_STD_UNORDERED_MAP_IN_TR1_NAMESPACE)
-template<typename K, typename V>
-struct HashMap : std::tr1::unordered_map<K, V> {};
-template<typename K>
-struct HashSet : std::tr1::unordered_set<K> {};
-#endif
-
-#if defined(CERES_STD_UNORDERED_MAP)
-template<typename K, typename V>
-struct HashMap : std::unordered_map<K, V> {};
-template<typename K>
-struct HashSet : std::unordered_set<K> {};
-#endif
-
-#if defined(_WIN32) && !defined(__MINGW64__) && !defined(__MINGW32__)
-#define GG_LONGLONG(x) x##I64
-#define GG_ULONGLONG(x) x##UI64
-#else
-#define GG_LONGLONG(x) x##LL
-#define GG_ULONGLONG(x) x##ULL
-#endif
-
-// The hash function is due to Bob Jenkins (see
-// http://burtleburtle.net/bob/hash/index.html). Each mix takes 36 instructions,
-// in 18 cycles if you're lucky. On x86 architectures, this requires 45
-// instructions in 27 cycles, if you're lucky.
-//
-// 32bit version
-inline void hash_mix(uint32& a, uint32& b, uint32& c) {
- a -= b; a -= c; a ^= (c>>13);
- b -= c; b -= a; b ^= (a<<8);
- c -= a; c -= b; c ^= (b>>13);
- a -= b; a -= c; a ^= (c>>12);
- b -= c; b -= a; b ^= (a<<16);
- c -= a; c -= b; c ^= (b>>5);
- a -= b; a -= c; a ^= (c>>3);
- b -= c; b -= a; b ^= (a<<10);
- c -= a; c -= b; c ^= (b>>15);
-}
-
-// 64bit version
-inline void hash_mix(uint64& a, uint64& b, uint64& c) {
- a -= b; a -= c; a ^= (c>>43);
- b -= c; b -= a; b ^= (a<<9);
- c -= a; c -= b; c ^= (b>>8);
- a -= b; a -= c; a ^= (c>>38);
- b -= c; b -= a; b ^= (a<<23);
- c -= a; c -= b; c ^= (b>>5);
- a -= b; a -= c; a ^= (c>>35);
- b -= c; b -= a; b ^= (a<<49);
- c -= a; c -= b; c ^= (b>>11);
-}
-
-inline uint32 Hash32NumWithSeed(uint32 num, uint32 c) {
- // The golden ratio; an arbitrary value.
- uint32 b = 0x9e3779b9UL;
- hash_mix(num, b, c);
- return c;
-}
-
-inline uint64 Hash64NumWithSeed(uint64 num, uint64 c) {
- // More of the golden ratio.
- uint64 b = GG_ULONGLONG(0xe08c1d668b756f82);
- hash_mix(num, b, c);
- return c;
-}
-
-} // namespace internal
-} // namespace ceres
-
-// Since on some platforms this is a doubly-nested namespace (std::tr1) and
-// others it is not, the entire namespace line must be in a macro.
-CERES_HASH_NAMESPACE_START
-
-// The outrageously annoying specializations below are for portability reasons.
-// In short, it's not possible to have two overloads of hash<pair<T1, T2>
-
-// Hasher for STL pairs. Requires hashers for both members to be defined.
-template<typename T>
-struct hash<pair<T, T> > {
- size_t operator()(const pair<T, T>& p) const {
- size_t h1 = hash<T>()(p.first);
- size_t h2 = hash<T>()(p.second);
- // The decision below is at compile time
- return (sizeof(h1) <= sizeof(ceres::internal::uint32)) ?
- ceres::internal::Hash32NumWithSeed(h1, h2) :
- ceres::internal::Hash64NumWithSeed(h1, h2);
- }
- // Less than operator for MSVC.
- bool operator()(const pair<T, T>& a,
- const pair<T, T>& b) const {
- return a < b;
- }
- static const size_t bucket_size = 4; // These are required by MSVC
- static const size_t min_buckets = 8; // 4 and 8 are defaults.
-};
-
-CERES_HASH_NAMESPACE_END
-
-#endif // CERES_NO_UNORDERED_MAP
-#endif // CERES_INTERNAL_COLLECTIONS_PORT_H_
diff --git a/extern/ceres/internal/ceres/compressed_col_sparse_matrix_utils.cc b/extern/ceres/internal/ceres/compressed_col_sparse_matrix_utils.cc
index ebb2a62c544..3f6672f858c 100644
--- a/extern/ceres/internal/ceres/compressed_col_sparse_matrix_utils.cc
+++ b/extern/ceres/internal/ceres/compressed_col_sparse_matrix_utils.cc
@@ -47,8 +47,10 @@ void CompressedColumnScalarMatrixToBlockMatrix(
const vector<int>& col_blocks,
vector<int>* block_rows,
vector<int>* block_cols) {
- CHECK_NOTNULL(block_rows)->clear();
- CHECK_NOTNULL(block_cols)->clear();
+ CHECK(block_rows != nullptr);
+ CHECK(block_cols != nullptr);
+ block_rows->clear();
+ block_cols->clear();
const int num_row_blocks = row_blocks.size();
const int num_col_blocks = col_blocks.size();
diff --git a/extern/ceres/internal/ceres/compressed_row_jacobian_writer.cc b/extern/ceres/internal/ceres/compressed_row_jacobian_writer.cc
index 40977b74c67..1fc0116815c 100644
--- a/extern/ceres/internal/ceres/compressed_row_jacobian_writer.cc
+++ b/extern/ceres/internal/ceres/compressed_row_jacobian_writer.cc
@@ -30,6 +30,7 @@
#include "ceres/compressed_row_jacobian_writer.h"
+#include <iterator>
#include <utility>
#include <vector>
@@ -70,7 +71,7 @@ void CompressedRowJacobianWriter::PopulateJacobianRowAndColumnBlockVectors(
void CompressedRowJacobianWriter::GetOrderedParameterBlocks(
const Program* program,
int residual_id,
- vector<pair<int, int> >* evaluated_jacobian_blocks) {
+ vector<pair<int, int>>* evaluated_jacobian_blocks) {
const ResidualBlock* residual_block =
program->residual_blocks()[residual_id];
const int num_parameter_blocks = residual_block->NumParameterBlocks();
@@ -121,6 +122,7 @@ SparseMatrix* CompressedRowJacobianWriter::CreateJacobian() const {
// seems to be the only way to construct it without doing a memory copy.
int* rows = jacobian->mutable_rows();
int* cols = jacobian->mutable_cols();
+
int row_pos = 0;
rows[0] = 0;
for (int i = 0; i < residual_blocks.size(); ++i) {
@@ -206,7 +208,7 @@ void CompressedRowJacobianWriter::Write(int residual_id,
program_->residual_blocks()[residual_id];
const int num_residuals = residual_block->NumResiduals();
- vector<pair<int, int> > evaluated_jacobian_blocks;
+ vector<pair<int, int>> evaluated_jacobian_blocks;
GetOrderedParameterBlocks(program_, residual_id, &evaluated_jacobian_blocks);
// Where in the current row does the jacobian for a parameter block begin.
diff --git a/extern/ceres/internal/ceres/compressed_row_jacobian_writer.h b/extern/ceres/internal/ceres/compressed_row_jacobian_writer.h
index 1cd01235ccf..9fb414e2519 100644
--- a/extern/ceres/internal/ceres/compressed_row_jacobian_writer.h
+++ b/extern/ceres/internal/ceres/compressed_row_jacobian_writer.h
@@ -83,7 +83,7 @@ class CompressedRowJacobianWriter {
static void GetOrderedParameterBlocks(
const Program* program,
int residual_id,
- std::vector<std::pair<int, int> >* evaluated_jacobian_blocks);
+ std::vector<std::pair<int, int>>* evaluated_jacobian_blocks);
// JacobianWriter interface.
diff --git a/extern/ceres/internal/ceres/compressed_row_sparse_matrix.cc b/extern/ceres/internal/ceres/compressed_row_sparse_matrix.cc
index 91d18bbd604..e56de16bf92 100644
--- a/extern/ceres/internal/ceres/compressed_row_sparse_matrix.cc
+++ b/extern/ceres/internal/ceres/compressed_row_sparse_matrix.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -35,6 +35,7 @@
#include <vector>
#include "ceres/crs_matrix.h"
#include "ceres/internal/port.h"
+#include "ceres/random.h"
#include "ceres/triplet_sparse_matrix.h"
#include "glog/logging.h"
@@ -54,9 +55,7 @@ namespace {
//
// If this is the case, this functor will not be a StrictWeakOrdering.
struct RowColLessThan {
- RowColLessThan(const int* rows, const int* cols)
- : rows(rows), cols(cols) {
- }
+ RowColLessThan(const int* rows, const int* cols) : rows(rows), cols(cols) {}
bool operator()(const int x, const int y) const {
if (rows[x] == rows[y]) {
@@ -69,6 +68,91 @@ struct RowColLessThan {
const int* cols;
};
+void TransposeForCompressedRowSparseStructure(const int num_rows,
+ const int num_cols,
+ const int num_nonzeros,
+ const int* rows,
+ const int* cols,
+ const double* values,
+ int* transpose_rows,
+ int* transpose_cols,
+ double* transpose_values) {
+ // Explicitly zero out transpose_rows.
+ std::fill(transpose_rows, transpose_rows + num_cols + 1, 0);
+
+ // Count the number of entries in each column of the original matrix
+ // and assign to transpose_rows[col + 1].
+ for (int idx = 0; idx < num_nonzeros; ++idx) {
+ ++transpose_rows[cols[idx] + 1];
+ }
+
+ // Compute the starting position for each row in the transpose by
+ // computing the cumulative sum of the entries of transpose_rows.
+ for (int i = 1; i < num_cols + 1; ++i) {
+ transpose_rows[i] += transpose_rows[i - 1];
+ }
+
+ // Populate transpose_cols and (optionally) transpose_values by
+ // walking the entries of the source matrices. For each entry that
+ // is added, the value of transpose_row is incremented allowing us
+ // to keep track of where the next entry for that row should go.
+ //
+ // As a result transpose_row is shifted to the left by one entry.
+ for (int r = 0; r < num_rows; ++r) {
+ for (int idx = rows[r]; idx < rows[r + 1]; ++idx) {
+ const int c = cols[idx];
+ const int transpose_idx = transpose_rows[c]++;
+ transpose_cols[transpose_idx] = r;
+ if (values != NULL && transpose_values != NULL) {
+ transpose_values[transpose_idx] = values[idx];
+ }
+ }
+ }
+
+ // This loop undoes the left shift to transpose_rows introduced by
+ // the previous loop.
+ for (int i = num_cols - 1; i > 0; --i) {
+ transpose_rows[i] = transpose_rows[i - 1];
+ }
+ transpose_rows[0] = 0;
+}
+
+void AddRandomBlock(const int num_rows,
+ const int num_cols,
+ const int row_block_begin,
+ const int col_block_begin,
+ std::vector<int>* rows,
+ std::vector<int>* cols,
+ std::vector<double>* values) {
+ for (int r = 0; r < num_rows; ++r) {
+ for (int c = 0; c < num_cols; ++c) {
+ rows->push_back(row_block_begin + r);
+ cols->push_back(col_block_begin + c);
+ values->push_back(RandNormal());
+ }
+ }
+}
+
+void AddSymmetricRandomBlock(const int num_rows,
+ const int row_block_begin,
+ std::vector<int>* rows,
+ std::vector<int>* cols,
+ std::vector<double>* values) {
+ for (int r = 0; r < num_rows; ++r) {
+ for (int c = r; c < num_rows; ++c) {
+ const double v = RandNormal();
+ rows->push_back(row_block_begin + r);
+ cols->push_back(row_block_begin + c);
+ values->push_back(v);
+ if (r != c) {
+ rows->push_back(row_block_begin + c);
+ cols->push_back(row_block_begin + r);
+ values->push_back(v);
+ }
+ }
+ }
+}
+
} // namespace
// This constructor gives you a semi-initialized CompressedRowSparseMatrix.
@@ -77,69 +161,96 @@ CompressedRowSparseMatrix::CompressedRowSparseMatrix(int num_rows,
int max_num_nonzeros) {
num_rows_ = num_rows;
num_cols_ = num_cols;
+ storage_type_ = UNSYMMETRIC;
rows_.resize(num_rows + 1, 0);
cols_.resize(max_num_nonzeros, 0);
values_.resize(max_num_nonzeros, 0.0);
+ VLOG(1) << "# of rows: " << num_rows_ << " # of columns: " << num_cols_
+ << " max_num_nonzeros: " << cols_.size() << ". Allocating "
+ << (num_rows_ + 1) * sizeof(int) + // NOLINT
+ cols_.size() * sizeof(int) + // NOLINT
+ cols_.size() * sizeof(double); // NOLINT
+}
+
+CompressedRowSparseMatrix* CompressedRowSparseMatrix::FromTripletSparseMatrix(
+ const TripletSparseMatrix& input) {
+ return CompressedRowSparseMatrix::FromTripletSparseMatrix(input, false);
+}
- VLOG(1) << "# of rows: " << num_rows_
- << " # of columns: " << num_cols_
- << " max_num_nonzeros: " << cols_.size()
- << ". Allocating " << (num_rows_ + 1) * sizeof(int) + // NOLINT
- cols_.size() * sizeof(int) + // NOLINT
- cols_.size() * sizeof(double); // NOLINT
+CompressedRowSparseMatrix*
+CompressedRowSparseMatrix::FromTripletSparseMatrixTransposed(
+ const TripletSparseMatrix& input) {
+ return CompressedRowSparseMatrix::FromTripletSparseMatrix(input, true);
}
-CompressedRowSparseMatrix::CompressedRowSparseMatrix(
- const TripletSparseMatrix& m) {
- num_rows_ = m.num_rows();
- num_cols_ = m.num_cols();
+CompressedRowSparseMatrix* CompressedRowSparseMatrix::FromTripletSparseMatrix(
+ const TripletSparseMatrix& input, bool transpose) {
+ int num_rows = input.num_rows();
+ int num_cols = input.num_cols();
+ const int* rows = input.rows();
+ const int* cols = input.cols();
+ const double* values = input.values();
- rows_.resize(num_rows_ + 1, 0);
- cols_.resize(m.num_nonzeros(), 0);
- values_.resize(m.max_num_nonzeros(), 0.0);
+ if (transpose) {
+ std::swap(num_rows, num_cols);
+ std::swap(rows, cols);
+ }
- // index is the list of indices into the TripletSparseMatrix m.
- vector<int> index(m.num_nonzeros(), 0);
- for (int i = 0; i < m.num_nonzeros(); ++i) {
+ // index is the list of indices into the TripletSparseMatrix input.
+ vector<int> index(input.num_nonzeros(), 0);
+ for (int i = 0; i < input.num_nonzeros(); ++i) {
index[i] = i;
}
// Sort index such that the entries of m are ordered by row and ties
// are broken by column.
- sort(index.begin(), index.end(), RowColLessThan(m.rows(), m.cols()));
+ std::sort(index.begin(), index.end(), RowColLessThan(rows, cols));
+
+ VLOG(1) << "# of rows: " << num_rows << " # of columns: " << num_cols
+ << " num_nonzeros: " << input.num_nonzeros() << ". Allocating "
+ << ((num_rows + 1) * sizeof(int) + // NOLINT
+ input.num_nonzeros() * sizeof(int) + // NOLINT
+ input.num_nonzeros() * sizeof(double)); // NOLINT
+
+ CompressedRowSparseMatrix* output =
+ new CompressedRowSparseMatrix(num_rows, num_cols, input.num_nonzeros());
- VLOG(1) << "# of rows: " << num_rows_
- << " # of columns: " << num_cols_
- << " max_num_nonzeros: " << cols_.size()
- << ". Allocating "
- << ((num_rows_ + 1) * sizeof(int) + // NOLINT
- cols_.size() * sizeof(int) + // NOLINT
- cols_.size() * sizeof(double)); // NOLINT
+ if (num_rows == 0) {
+ // No data to copy.
+ return output;
+ }
// Copy the contents of the cols and values array in the order given
// by index and count the number of entries in each row.
- for (int i = 0; i < m.num_nonzeros(); ++i) {
+ int* output_rows = output->mutable_rows();
+ int* output_cols = output->mutable_cols();
+ double* output_values = output->mutable_values();
+
+ output_rows[0] = 0;
+ for (int i = 0; i < index.size(); ++i) {
const int idx = index[i];
- ++rows_[m.rows()[idx] + 1];
- cols_[i] = m.cols()[idx];
- values_[i] = m.values()[idx];
+ ++output_rows[rows[idx] + 1];
+ output_cols[i] = cols[idx];
+ output_values[i] = values[idx];
}
// Find the cumulative sum of the row counts.
- for (int i = 1; i < num_rows_ + 1; ++i) {
- rows_[i] += rows_[i - 1];
+ for (int i = 1; i < num_rows + 1; ++i) {
+ output_rows[i] += output_rows[i - 1];
}
- CHECK_EQ(num_nonzeros(), m.num_nonzeros());
+ CHECK_EQ(output->num_nonzeros(), input.num_nonzeros());
+ return output;
}
CompressedRowSparseMatrix::CompressedRowSparseMatrix(const double* diagonal,
int num_rows) {
- CHECK_NOTNULL(diagonal);
+ CHECK(diagonal != nullptr);
num_rows_ = num_rows;
num_cols_ = num_rows;
+ storage_type_ = UNSYMMETRIC;
rows_.resize(num_rows + 1);
cols_.resize(num_rows);
values_.resize(num_rows);
@@ -154,47 +265,150 @@ CompressedRowSparseMatrix::CompressedRowSparseMatrix(const double* diagonal,
CHECK_EQ(num_nonzeros(), num_rows);
}
-CompressedRowSparseMatrix::~CompressedRowSparseMatrix() {
-}
+CompressedRowSparseMatrix::~CompressedRowSparseMatrix() {}
void CompressedRowSparseMatrix::SetZero() {
std::fill(values_.begin(), values_.end(), 0);
}
+// TODO(sameeragarwal): Make RightMultiply and LeftMultiply
+// block-aware for higher performance.
void CompressedRowSparseMatrix::RightMultiply(const double* x,
double* y) const {
- CHECK_NOTNULL(x);
- CHECK_NOTNULL(y);
+ CHECK(x != nullptr);
+ CHECK(y != nullptr);
+
+ if (storage_type_ == UNSYMMETRIC) {
+ for (int r = 0; r < num_rows_; ++r) {
+ for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
+ const int c = cols_[idx];
+ const double v = values_[idx];
+ y[r] += v * x[c];
+ }
+ }
+ } else if (storage_type_ == UPPER_TRIANGULAR) {
+ // Because of their block structure, we will have entries that lie
+ // above (below) the diagonal for lower (upper) triangular matrices,
+ // so the loops below need to account for this.
+ for (int r = 0; r < num_rows_; ++r) {
+ int idx = rows_[r];
+ const int idx_end = rows_[r + 1];
+
+ // For upper triangular matrices r <= c, so skip entries with r
+ // > c.
+ while (idx < idx_end && r > cols_[idx]) {
+ ++idx;
+ }
- for (int r = 0; r < num_rows_; ++r) {
- for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
- y[r] += values_[idx] * x[cols_[idx]];
+ for (; idx < idx_end; ++idx) {
+ const int c = cols_[idx];
+ const double v = values_[idx];
+ y[r] += v * x[c];
+ // Since we are only iterating over the upper triangular part
+ // of the matrix, add contributions for the strictly lower
+ // triangular part.
+ if (r != c) {
+ y[c] += v * x[r];
+ }
+ }
+ }
+ } else if (storage_type_ == LOWER_TRIANGULAR) {
+ for (int r = 0; r < num_rows_; ++r) {
+ int idx = rows_[r];
+ const int idx_end = rows_[r + 1];
+ // For lower triangular matrices, we only iterate till we are r >=
+ // c.
+ for (; idx < idx_end && r >= cols_[idx]; ++idx) {
+ const int c = cols_[idx];
+ const double v = values_[idx];
+ y[r] += v * x[c];
+ // Since we are only iterating over the lower triangular part
+ // of the matrix, add contributions for the strictly upper
+ // triangular part.
+ if (r != c) {
+ y[c] += v * x[r];
+ }
+ }
}
+ } else {
+ LOG(FATAL) << "Unknown storage type: " << storage_type_;
}
}
void CompressedRowSparseMatrix::LeftMultiply(const double* x, double* y) const {
- CHECK_NOTNULL(x);
- CHECK_NOTNULL(y);
+ CHECK(x != nullptr);
+ CHECK(y != nullptr);
- for (int r = 0; r < num_rows_; ++r) {
- for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
- y[cols_[idx]] += values_[idx] * x[r];
+ if (storage_type_ == UNSYMMETRIC) {
+ for (int r = 0; r < num_rows_; ++r) {
+ for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
+ y[cols_[idx]] += values_[idx] * x[r];
+ }
}
+ } else {
+ // Since the matrix is symmetric, LeftMultiply = RightMultiply.
+ RightMultiply(x, y);
}
}
void CompressedRowSparseMatrix::SquaredColumnNorm(double* x) const {
- CHECK_NOTNULL(x);
+ CHECK(x != nullptr);
std::fill(x, x + num_cols_, 0.0);
- for (int idx = 0; idx < rows_[num_rows_]; ++idx) {
- x[cols_[idx]] += values_[idx] * values_[idx];
+ if (storage_type_ == UNSYMMETRIC) {
+ for (int idx = 0; idx < rows_[num_rows_]; ++idx) {
+ x[cols_[idx]] += values_[idx] * values_[idx];
+ }
+ } else if (storage_type_ == UPPER_TRIANGULAR) {
+ // Because of their block structure, we will have entries that lie
+ // above (below) the diagonal for lower (upper) triangular
+ // matrices, so the loops below need to account for this.
+ for (int r = 0; r < num_rows_; ++r) {
+ int idx = rows_[r];
+ const int idx_end = rows_[r + 1];
+
+ // For upper triangular matrices r <= c, so skip entries with r
+ // > c.
+ while (idx < idx_end && r > cols_[idx]) {
+ ++idx;
+ }
+
+ for (; idx < idx_end; ++idx) {
+ const int c = cols_[idx];
+ const double v2 = values_[idx] * values_[idx];
+ x[c] += v2;
+ // Since we are only iterating over the upper triangular part
+ // of the matrix, add contributions for the strictly lower
+ // triangular part.
+ if (r != c) {
+ x[r] += v2;
+ }
+ }
+ }
+ } else if (storage_type_ == LOWER_TRIANGULAR) {
+ for (int r = 0; r < num_rows_; ++r) {
+ int idx = rows_[r];
+ const int idx_end = rows_[r + 1];
+ // For lower triangular matrices, we only iterate till we are r >=
+ // c.
+ for (; idx < idx_end && r >= cols_[idx]; ++idx) {
+ const int c = cols_[idx];
+ const double v2 = values_[idx] * values_[idx];
+ x[c] += v2;
+ // Since we are only iterating over the lower triangular part
+ // of the matrix, add contributions for the strictly upper
+ // triangular part.
+ if (r != c) {
+ x[r] += v2;
+ }
+ }
+ }
+ } else {
+ LOG(FATAL) << "Unknown storage type: " << storage_type_;
}
}
-
void CompressedRowSparseMatrix::ScaleColumns(const double* scale) {
- CHECK_NOTNULL(scale);
+ CHECK(scale != nullptr);
for (int idx = 0; idx < rows_[num_rows_]; ++idx) {
values_[idx] *= scale[cols_[idx]];
@@ -202,7 +416,7 @@ void CompressedRowSparseMatrix::ScaleColumns(const double* scale) {
}
void CompressedRowSparseMatrix::ToDenseMatrix(Matrix* dense_matrix) const {
- CHECK_NOTNULL(dense_matrix);
+ CHECK(dense_matrix != nullptr);
dense_matrix->resize(num_rows_, num_cols_);
dense_matrix->setZero();
@@ -216,10 +430,17 @@ void CompressedRowSparseMatrix::ToDenseMatrix(Matrix* dense_matrix) const {
void CompressedRowSparseMatrix::DeleteRows(int delta_rows) {
CHECK_GE(delta_rows, 0);
CHECK_LE(delta_rows, num_rows_);
+ CHECK_EQ(storage_type_, UNSYMMETRIC);
num_rows_ -= delta_rows;
rows_.resize(num_rows_ + 1);
+ // The rest of the code updates the block information. Immediately
+ // return in case of no block information.
+ if (row_blocks_.empty()) {
+ return;
+ }
+
// Walk the list of row blocks until we reach the new number of rows
// and the drop the rest of the row blocks.
int num_row_blocks = 0;
@@ -233,9 +454,11 @@ void CompressedRowSparseMatrix::DeleteRows(int delta_rows) {
}
void CompressedRowSparseMatrix::AppendRows(const CompressedRowSparseMatrix& m) {
+ CHECK_EQ(storage_type_, UNSYMMETRIC);
CHECK_EQ(m.num_cols(), num_cols_);
- CHECK(row_blocks_.size() == 0 || m.row_blocks().size() !=0)
+ CHECK((row_blocks_.empty() && m.row_blocks().empty()) ||
+ (!row_blocks_.empty() && !m.row_blocks().empty()))
<< "Cannot append a matrix with row blocks to one without and vice versa."
<< "This matrix has : " << row_blocks_.size() << " row blocks."
<< "The matrix being appended has: " << m.row_blocks().size()
@@ -254,9 +477,8 @@ void CompressedRowSparseMatrix::AppendRows(const CompressedRowSparseMatrix& m) {
DCHECK_LT(num_nonzeros(), cols_.size());
if (m.num_nonzeros() > 0) {
std::copy(m.cols(), m.cols() + m.num_nonzeros(), &cols_[num_nonzeros()]);
- std::copy(m.values(),
- m.values() + m.num_nonzeros(),
- &values_[num_nonzeros()]);
+ std::copy(
+ m.values(), m.values() + m.num_nonzeros(), &values_[num_nonzeros()]);
}
rows_.resize(num_rows_ + m.num_rows() + 1);
@@ -270,20 +492,22 @@ void CompressedRowSparseMatrix::AppendRows(const CompressedRowSparseMatrix& m) {
}
num_rows_ += m.num_rows();
- row_blocks_.insert(row_blocks_.end(),
- m.row_blocks().begin(),
- m.row_blocks().end());
+
+ // The rest of the code updates the block information. Immediately
+ // return in case of no block information.
+ if (row_blocks_.empty()) {
+ return;
+ }
+
+ row_blocks_.insert(
+ row_blocks_.end(), m.row_blocks().begin(), m.row_blocks().end());
}
void CompressedRowSparseMatrix::ToTextFile(FILE* file) const {
- CHECK_NOTNULL(file);
+ CHECK(file != nullptr);
for (int r = 0; r < num_rows_; ++r) {
for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
- fprintf(file,
- "% 10d % 10d %17f\n",
- r,
- cols_[idx],
- values_[idx]);
+ fprintf(file, "% 10d % 10d %17f\n", r, cols_[idx], values_[idx]);
}
}
}
@@ -308,29 +532,8 @@ void CompressedRowSparseMatrix::SetMaxNumNonZeros(int num_nonzeros) {
values_.resize(num_nonzeros);
}
-void CompressedRowSparseMatrix::SolveLowerTriangularInPlace(
- double* solution) const {
- for (int r = 0; r < num_rows_; ++r) {
- for (int idx = rows_[r]; idx < rows_[r + 1] - 1; ++idx) {
- solution[r] -= values_[idx] * solution[cols_[idx]];
- }
- solution[r] /= values_[rows_[r + 1] - 1];
- }
-}
-
-void CompressedRowSparseMatrix::SolveLowerTriangularTransposeInPlace(
- double* solution) const {
- for (int r = num_rows_ - 1; r >= 0; --r) {
- solution[r] /= values_[rows_[r + 1] - 1];
- for (int idx = rows_[r + 1] - 2; idx >= rows_[r]; --idx) {
- solution[cols_[idx]] -= values_[idx] * solution[r];
- }
- }
-}
-
CompressedRowSparseMatrix* CompressedRowSparseMatrix::CreateBlockDiagonalMatrix(
- const double* diagonal,
- const vector<int>& blocks) {
+ const double* diagonal, const vector<int>& blocks) {
int num_rows = 0;
int num_nonzeros = 0;
for (int i = 0; i < blocks.size(); ++i) {
@@ -373,189 +576,152 @@ CompressedRowSparseMatrix* CompressedRowSparseMatrix::Transpose() const {
CompressedRowSparseMatrix* transpose =
new CompressedRowSparseMatrix(num_cols_, num_rows_, num_nonzeros());
- int* transpose_rows = transpose->mutable_rows();
- int* transpose_cols = transpose->mutable_cols();
- double* transpose_values = transpose->mutable_values();
-
- for (int idx = 0; idx < num_nonzeros(); ++idx) {
- ++transpose_rows[cols_[idx] + 1];
- }
-
- for (int i = 1; i < transpose->num_rows() + 1; ++i) {
- transpose_rows[i] += transpose_rows[i - 1];
- }
-
- for (int r = 0; r < num_rows(); ++r) {
- for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
- const int c = cols_[idx];
- const int transpose_idx = transpose_rows[c]++;
- transpose_cols[transpose_idx] = r;
- transpose_values[transpose_idx] = values_[idx];
- }
- }
-
- for (int i = transpose->num_rows() - 1; i > 0 ; --i) {
- transpose_rows[i] = transpose_rows[i - 1];
+ switch (storage_type_) {
+ case UNSYMMETRIC:
+ transpose->set_storage_type(UNSYMMETRIC);
+ break;
+ case LOWER_TRIANGULAR:
+ transpose->set_storage_type(UPPER_TRIANGULAR);
+ break;
+ case UPPER_TRIANGULAR:
+ transpose->set_storage_type(LOWER_TRIANGULAR);
+ break;
+ default:
+ LOG(FATAL) << "Unknown storage type: " << storage_type_;
+ };
+
+ TransposeForCompressedRowSparseStructure(num_rows(),
+ num_cols(),
+ num_nonzeros(),
+ rows(),
+ cols(),
+ values(),
+ transpose->mutable_rows(),
+ transpose->mutable_cols(),
+ transpose->mutable_values());
+
+ // The rest of the code updates the block information. Immediately
+ // return in case of no block information.
+ if (row_blocks_.empty()) {
+ return transpose;
}
- transpose_rows[0] = 0;
*(transpose->mutable_row_blocks()) = col_blocks_;
*(transpose->mutable_col_blocks()) = row_blocks_;
-
return transpose;
}
-namespace {
-// A ProductTerm is a term in the outer product of a matrix with
-// itself.
-struct ProductTerm {
- ProductTerm(const int row, const int col, const int index)
- : row(row), col(col), index(index) {
- }
-
- bool operator<(const ProductTerm& right) const {
- if (row == right.row) {
- if (col == right.col) {
- return index < right.index;
- }
- return col < right.col;
- }
- return row < right.row;
- }
-
- int row;
- int col;
- int index;
-};
-
-CompressedRowSparseMatrix*
-CompressAndFillProgram(const int num_rows,
- const int num_cols,
- const vector<ProductTerm>& product,
- vector<int>* program) {
- CHECK_GT(product.size(), 0);
-
- // Count the number of unique product term, which in turn is the
- // number of non-zeros in the outer product.
- int num_nonzeros = 1;
- for (int i = 1; i < product.size(); ++i) {
- if (product[i].row != product[i - 1].row ||
- product[i].col != product[i - 1].col) {
- ++num_nonzeros;
- }
- }
-
- CompressedRowSparseMatrix* matrix =
- new CompressedRowSparseMatrix(num_rows, num_cols, num_nonzeros);
-
- int* crsm_rows = matrix->mutable_rows();
- std::fill(crsm_rows, crsm_rows + num_rows + 1, 0);
- int* crsm_cols = matrix->mutable_cols();
- std::fill(crsm_cols, crsm_cols + num_nonzeros, 0);
-
- CHECK_NOTNULL(program)->clear();
- program->resize(product.size());
-
- // Iterate over the sorted product terms. This means each row is
- // filled one at a time, and we are able to assign a position in the
- // values array to each term.
- //
- // If terms repeat, i.e., they contribute to the same entry in the
- // result matrix), then they do not affect the sparsity structure of
- // the result matrix.
- int nnz = 0;
- crsm_cols[0] = product[0].col;
- crsm_rows[product[0].row + 1]++;
- (*program)[product[0].index] = nnz;
- for (int i = 1; i < product.size(); ++i) {
- const ProductTerm& previous = product[i - 1];
- const ProductTerm& current = product[i];
-
- // Sparsity structure is updated only if the term is not a repeat.
- if (previous.row != current.row || previous.col != current.col) {
- crsm_cols[++nnz] = current.col;
- crsm_rows[current.row + 1]++;
- }
-
- // All terms get assigned the position in the values array where
- // their value is accumulated.
- (*program)[current.index] = nnz;
- }
-
- for (int i = 1; i < num_rows + 1; ++i) {
- crsm_rows[i] += crsm_rows[i - 1];
- }
-
- return matrix;
-}
-
-} // namespace
-
-CompressedRowSparseMatrix*
-CompressedRowSparseMatrix::CreateOuterProductMatrixAndProgram(
- const CompressedRowSparseMatrix& m,
- vector<int>* program) {
- CHECK_NOTNULL(program)->clear();
- CHECK_GT(m.num_nonzeros(), 0)
- << "Congratulations, "
- << "you found a bug in Ceres. Please report it.";
-
- vector<ProductTerm> product;
- const vector<int>& row_blocks = m.row_blocks();
- int row_block_begin = 0;
- // Iterate over row blocks
- for (int row_block = 0; row_block < row_blocks.size(); ++row_block) {
- const int row_block_end = row_block_begin + row_blocks[row_block];
- // Compute the outer product terms for just one row per row block.
- const int r = row_block_begin;
- // Compute the lower triangular part of the product.
- for (int idx1 = m.rows()[r]; idx1 < m.rows()[r + 1]; ++idx1) {
- for (int idx2 = m.rows()[r]; idx2 <= idx1; ++idx2) {
- product.push_back(ProductTerm(m.cols()[idx1],
- m.cols()[idx2],
- product.size()));
- }
+CompressedRowSparseMatrix* CompressedRowSparseMatrix::CreateRandomMatrix(
+ CompressedRowSparseMatrix::RandomMatrixOptions options) {
+ CHECK_GT(options.num_row_blocks, 0);
+ CHECK_GT(options.min_row_block_size, 0);
+ CHECK_GT(options.max_row_block_size, 0);
+ CHECK_LE(options.min_row_block_size, options.max_row_block_size);
+
+ if (options.storage_type == UNSYMMETRIC) {
+ CHECK_GT(options.num_col_blocks, 0);
+ CHECK_GT(options.min_col_block_size, 0);
+ CHECK_GT(options.max_col_block_size, 0);
+ CHECK_LE(options.min_col_block_size, options.max_col_block_size);
+ } else {
+ // Symmetric matrices (LOWER_TRIANGULAR or UPPER_TRIANGULAR);
+ options.num_col_blocks = options.num_row_blocks;
+ options.min_col_block_size = options.min_row_block_size;
+ options.max_col_block_size = options.max_row_block_size;
+ }
+
+ CHECK_GT(options.block_density, 0.0);
+ CHECK_LE(options.block_density, 1.0);
+
+ vector<int> row_blocks;
+ vector<int> col_blocks;
+
+ // Generate the row block structure.
+ for (int i = 0; i < options.num_row_blocks; ++i) {
+ // Generate a random integer in [min_row_block_size, max_row_block_size]
+ const int delta_block_size =
+ Uniform(options.max_row_block_size - options.min_row_block_size);
+ row_blocks.push_back(options.min_row_block_size + delta_block_size);
+ }
+
+ if (options.storage_type == UNSYMMETRIC) {
+ // Generate the col block structure.
+ for (int i = 0; i < options.num_col_blocks; ++i) {
+ // Generate a random integer in [min_col_block_size, max_col_block_size]
+ const int delta_block_size =
+ Uniform(options.max_col_block_size - options.min_col_block_size);
+ col_blocks.push_back(options.min_col_block_size + delta_block_size);
}
- row_block_begin = row_block_end;
- }
- CHECK_EQ(row_block_begin, m.num_rows());
- sort(product.begin(), product.end());
- return CompressAndFillProgram(m.num_cols(), m.num_cols(), product, program);
-}
+ } else {
+ // Symmetric matrices (LOWER_TRIANGULAR or UPPER_TRIANGULAR);
+ col_blocks = row_blocks;
+ }
+
+ vector<int> tsm_rows;
+ vector<int> tsm_cols;
+ vector<double> tsm_values;
+
+ // For ease of construction, we are going to generate the
+ // CompressedRowSparseMatrix by generating it as a
+ // TripletSparseMatrix and then converting it to a
+ // CompressedRowSparseMatrix.
+
+ // It is possible that the random matrix is empty which is likely
+ // not what the user wants, so do the matrix generation till we have
+ // at least one non-zero entry.
+ while (tsm_values.empty()) {
+ tsm_rows.clear();
+ tsm_cols.clear();
+ tsm_values.clear();
+
+ int row_block_begin = 0;
+ for (int r = 0; r < options.num_row_blocks; ++r) {
+ int col_block_begin = 0;
+ for (int c = 0; c < options.num_col_blocks; ++c) {
+ if (((options.storage_type == UPPER_TRIANGULAR) && (r > c)) ||
+ ((options.storage_type == LOWER_TRIANGULAR) && (r < c))) {
+ col_block_begin += col_blocks[c];
+ continue;
+ }
-void CompressedRowSparseMatrix::ComputeOuterProduct(
- const CompressedRowSparseMatrix& m,
- const vector<int>& program,
- CompressedRowSparseMatrix* result) {
- result->SetZero();
- double* values = result->mutable_values();
- const vector<int>& row_blocks = m.row_blocks();
-
- int cursor = 0;
- int row_block_begin = 0;
- const double* m_values = m.values();
- const int* m_rows = m.rows();
- // Iterate over row blocks.
- for (int row_block = 0; row_block < row_blocks.size(); ++row_block) {
- const int row_block_end = row_block_begin + row_blocks[row_block];
- const int saved_cursor = cursor;
- for (int r = row_block_begin; r < row_block_end; ++r) {
- // Reuse the program segment for each row in this row block.
- cursor = saved_cursor;
- const int row_begin = m_rows[r];
- const int row_end = m_rows[r + 1];
- for (int idx1 = row_begin; idx1 < row_end; ++idx1) {
- const double v1 = m_values[idx1];
- for (int idx2 = row_begin; idx2 <= idx1; ++idx2, ++cursor) {
- values[program[cursor]] += v1 * m_values[idx2];
+ // Randomly determine if this block is present or not.
+ if (RandDouble() <= options.block_density) {
+ // If the matrix is symmetric, then we take care to generate
+ // symmetric diagonal blocks.
+ if (options.storage_type == UNSYMMETRIC || r != c) {
+ AddRandomBlock(row_blocks[r],
+ col_blocks[c],
+ row_block_begin,
+ col_block_begin,
+ &tsm_rows,
+ &tsm_cols,
+ &tsm_values);
+ } else {
+ AddSymmetricRandomBlock(row_blocks[r],
+ row_block_begin,
+ &tsm_rows,
+ &tsm_cols,
+ &tsm_values);
+ }
}
+ col_block_begin += col_blocks[c];
}
+ row_block_begin += row_blocks[r];
}
- row_block_begin = row_block_end;
}
- CHECK_EQ(row_block_begin, m.num_rows());
- CHECK_EQ(cursor, program.size());
+ const int num_rows = std::accumulate(row_blocks.begin(), row_blocks.end(), 0);
+ const int num_cols = std::accumulate(col_blocks.begin(), col_blocks.end(), 0);
+ const bool kDoNotTranspose = false;
+ CompressedRowSparseMatrix* matrix =
+ CompressedRowSparseMatrix::FromTripletSparseMatrix(
+ TripletSparseMatrix(
+ num_rows, num_cols, tsm_rows, tsm_cols, tsm_values),
+ kDoNotTranspose);
+ (*matrix->mutable_row_blocks()) = row_blocks;
+ (*matrix->mutable_col_blocks()) = col_blocks;
+ matrix->set_storage_type(options.storage_type);
+ return matrix;
}
} // namespace internal
diff --git a/extern/ceres/internal/ceres/compressed_row_sparse_matrix.h b/extern/ceres/internal/ceres/compressed_row_sparse_matrix.h
index 987339d09a1..758b40bbc8a 100644
--- a/extern/ceres/internal/ceres/compressed_row_sparse_matrix.h
+++ b/extern/ceres/internal/ceres/compressed_row_sparse_matrix.h
@@ -32,7 +32,6 @@
#define CERES_INTERNAL_COMPRESSED_ROW_SPARSE_MATRIX_H_
#include <vector>
-#include "ceres/internal/macros.h"
#include "ceres/internal/port.h"
#include "ceres/sparse_matrix.h"
#include "ceres/types.h"
@@ -48,13 +47,35 @@ class TripletSparseMatrix;
class CompressedRowSparseMatrix : public SparseMatrix {
public:
- // Build a matrix with the same content as the TripletSparseMatrix
- // m. TripletSparseMatrix objects are easier to construct
- // incrementally, so we use them to initialize SparseMatrix
- // objects.
+ enum StorageType {
+ UNSYMMETRIC,
+ // Matrix is assumed to be symmetric but only the lower triangular
+ // part of the matrix is stored.
+ LOWER_TRIANGULAR,
+ // Matrix is assumed to be symmetric but only the upper triangular
+ // part of the matrix is stored.
+ UPPER_TRIANGULAR
+ };
+
+ // Create a matrix with the same content as the TripletSparseMatrix
+ // input. We assume that input does not have any repeated
+ // entries.
//
- // We assume that m does not have any repeated entries.
- explicit CompressedRowSparseMatrix(const TripletSparseMatrix& m);
+ // The storage type of the matrix is set to UNSYMMETRIC.
+ //
+ // Caller owns the result.
+ static CompressedRowSparseMatrix* FromTripletSparseMatrix(
+ const TripletSparseMatrix& input);
+
+ // Create a matrix with the same content as the TripletSparseMatrix
+ // input transposed. We assume that input does not have any repeated
+ // entries.
+ //
+ // The storage type of the matrix is set to UNSYMMETRIC.
+ //
+ // Caller owns the result.
+ static CompressedRowSparseMatrix* FromTripletSparseMatrixTransposed(
+ const TripletSparseMatrix& input);
// Use this constructor only if you know what you are doing. This
// creates a "blank" matrix with the appropriate amount of memory
@@ -67,30 +88,30 @@ class CompressedRowSparseMatrix : public SparseMatrix {
// manually, instead of going via the indirect route of first
// constructing a TripletSparseMatrix, which leads to more than
// double the peak memory usage.
- CompressedRowSparseMatrix(int num_rows,
- int num_cols,
- int max_num_nonzeros);
+ //
+ // The storage type is set to UNSYMMETRIC.
+ CompressedRowSparseMatrix(int num_rows, int num_cols, int max_num_nonzeros);
// Build a square sparse diagonal matrix with num_rows rows and
// columns. The diagonal m(i,i) = diagonal(i);
+ //
+ // The storage type is set to UNSYMMETRIC
CompressedRowSparseMatrix(const double* diagonal, int num_rows);
- virtual ~CompressedRowSparseMatrix();
-
// SparseMatrix interface.
- virtual void SetZero();
- virtual void RightMultiply(const double* x, double* y) const;
- virtual void LeftMultiply(const double* x, double* y) const;
- virtual void SquaredColumnNorm(double* x) const;
- virtual void ScaleColumns(const double* scale);
-
- virtual void ToDenseMatrix(Matrix* dense_matrix) const;
- virtual void ToTextFile(FILE* file) const;
- virtual int num_rows() const { return num_rows_; }
- virtual int num_cols() const { return num_cols_; }
- virtual int num_nonzeros() const { return rows_[num_rows_]; }
- virtual const double* values() const { return &values_[0]; }
- virtual double* mutable_values() { return &values_[0]; }
+ virtual ~CompressedRowSparseMatrix();
+ void SetZero() final;
+ void RightMultiply(const double* x, double* y) const final;
+ void LeftMultiply(const double* x, double* y) const final;
+ void SquaredColumnNorm(double* x) const final;
+ void ScaleColumns(const double* scale) final;
+ void ToDenseMatrix(Matrix* dense_matrix) const final;
+ void ToTextFile(FILE* file) const final;
+ int num_rows() const final { return num_rows_; }
+ int num_cols() const final { return num_cols_; }
+ int num_nonzeros() const final { return rows_[num_rows_]; }
+ const double* values() const final { return &values_[0]; }
+ double* mutable_values() final { return &values_[0]; }
// Delete the bottom delta_rows.
// num_rows -= delta_rows
@@ -102,6 +123,15 @@ class CompressedRowSparseMatrix : public SparseMatrix {
void ToCRSMatrix(CRSMatrix* matrix) const;
+ CompressedRowSparseMatrix* Transpose() const;
+
+ // Destructive array resizing method.
+ void SetMaxNumNonZeros(int num_nonzeros);
+
+ // Non-destructive array resizing method.
+ void set_num_rows(const int num_rows) { num_rows_ = num_rows; }
+ void set_num_cols(const int num_cols) { num_cols_ = num_cols; }
+
// Low level access methods that expose the structure of the matrix.
const int* cols() const { return &cols_[0]; }
int* mutable_cols() { return &cols_[0]; }
@@ -109,60 +139,79 @@ class CompressedRowSparseMatrix : public SparseMatrix {
const int* rows() const { return &rows_[0]; }
int* mutable_rows() { return &rows_[0]; }
+ const StorageType storage_type() const { return storage_type_; }
+ void set_storage_type(const StorageType storage_type) {
+ storage_type_ = storage_type;
+ }
+
const std::vector<int>& row_blocks() const { return row_blocks_; }
std::vector<int>* mutable_row_blocks() { return &row_blocks_; }
const std::vector<int>& col_blocks() const { return col_blocks_; }
std::vector<int>* mutable_col_blocks() { return &col_blocks_; }
- // Destructive array resizing method.
- void SetMaxNumNonZeros(int num_nonzeros);
-
- // Non-destructive array resizing method.
- void set_num_rows(const int num_rows) { num_rows_ = num_rows; }
- void set_num_cols(const int num_cols) { num_cols_ = num_cols; }
-
- void SolveLowerTriangularInPlace(double* solution) const;
- void SolveLowerTriangularTransposeInPlace(double* solution) const;
-
- CompressedRowSparseMatrix* Transpose() const;
-
+ // Create a block diagonal CompressedRowSparseMatrix with the given
+ // block structure. The individual blocks are assumed to be laid out
+ // contiguously in the diagonal array, one block at a time.
+ //
+ // Caller owns the result.
static CompressedRowSparseMatrix* CreateBlockDiagonalMatrix(
- const double* diagonal,
- const std::vector<int>& blocks);
+ const double* diagonal, const std::vector<int>& blocks);
- // Compute the sparsity structure of the product m.transpose() * m
- // and create a CompressedRowSparseMatrix corresponding to it.
+ // Options struct to control the generation of random block sparse
+ // matrices in compressed row sparse format.
+ //
+ // The random matrix generation proceeds as follows.
//
- // Also compute a "program" vector, which for every term in the
- // outer product points to the entry in the values array of the
- // result matrix where it should be accumulated.
+ // First the row and column block structure is determined by
+ // generating random row and column block sizes that lie within the
+ // given bounds.
//
- // This program is used by the ComputeOuterProduct function below to
- // compute the outer product.
+ // Then we walk the block structure of the resulting matrix, and with
+ // probability block_density detemine whether they are structurally
+ // zero or not. If the answer is no, then we generate entries for the
+ // block which are distributed normally.
+ struct RandomMatrixOptions {
+ // Type of matrix to create.
+ //
+ // If storage_type is UPPER_TRIANGULAR (LOWER_TRIANGULAR), then
+ // create a square symmetric matrix with just the upper triangular
+ // (lower triangular) part. In this case, num_col_blocks,
+ // min_col_block_size and max_col_block_size will be ignored and
+ // assumed to be equal to the corresponding row settings.
+ StorageType storage_type = UNSYMMETRIC;
+
+ int num_row_blocks = 0;
+ int min_row_block_size = 0;
+ int max_row_block_size = 0;
+ int num_col_blocks = 0;
+ int min_col_block_size = 0;
+ int max_col_block_size = 0;
+
+ // 0 < block_density <= 1 is the probability of a block being
+ // present in the matrix. A given random matrix will not have
+ // precisely this density.
+ double block_density = 0.0;
+ };
+
+ // Create a random CompressedRowSparseMatrix whose entries are
+ // normally distributed and whose structure is determined by
+ // RandomMatrixOptions.
//
- // Since the entries of the program are the same for rows with the
- // same sparsity structure, the program only stores the result for
- // one row per row block. The ComputeOuterProduct function reuses
- // this information for each row in the row block.
- static CompressedRowSparseMatrix* CreateOuterProductMatrixAndProgram(
- const CompressedRowSparseMatrix& m,
- std::vector<int>* program);
-
- // Compute the values array for the expression m.transpose() * m,
- // where the matrix used to store the result and a program have been
- // created using the CreateOuterProductMatrixAndProgram function
- // above.
- static void ComputeOuterProduct(const CompressedRowSparseMatrix& m,
- const std::vector<int>& program,
- CompressedRowSparseMatrix* result);
+ // Caller owns the result.
+ static CompressedRowSparseMatrix* CreateRandomMatrix(
+ RandomMatrixOptions options);
private:
+ static CompressedRowSparseMatrix* FromTripletSparseMatrix(
+ const TripletSparseMatrix& input, bool transpose);
+
int num_rows_;
int num_cols_;
std::vector<int> rows_;
std::vector<int> cols_;
std::vector<double> values_;
+ StorageType storage_type_;
// If the matrix has an underlying block structure, then it can also
// carry with it row and column block sizes. This is auxilliary and
@@ -171,8 +220,6 @@ class CompressedRowSparseMatrix : public SparseMatrix {
// any way.
std::vector<int> row_blocks_;
std::vector<int> col_blocks_;
-
- CERES_DISALLOW_COPY_AND_ASSIGN(CompressedRowSparseMatrix);
};
} // namespace internal
diff --git a/extern/ceres/internal/ceres/concurrent_queue.h b/extern/ceres/internal/ceres/concurrent_queue.h
new file mode 100644
index 00000000000..52e2903022b
--- /dev/null
+++ b/extern/ceres/internal/ceres/concurrent_queue.h
@@ -0,0 +1,159 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: vitus@google.com (Michael Vitus)
+
+#ifndef CERES_INTERNAL_CONCURRENT_QUEUE_H_
+#define CERES_INTERNAL_CONCURRENT_QUEUE_H_
+
+#include <condition_variable>
+#include <mutex>
+#include <queue>
+#include <thread>
+
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+// A thread-safe multi-producer, multi-consumer queue for queueing items that
+// are typically handled asynchronously by multiple threads. The ConcurrentQueue
+// has two states which only affect the Wait call:
+//
+// (1) Waiters have been enabled (enabled by default or calling
+// EnableWaiters). The call to Wait will block until an item is available.
+// Push and pop will operate as expected.
+//
+// (2) StopWaiters has been called. All threads blocked in a Wait() call will
+// be woken up and pop any available items from the queue. All future Wait
+// requests will either return an element from the queue or return
+// immediately if no element is present. Push and pop will operate as
+// expected.
+//
+// A common use case is using the concurrent queue as an interface for
+// scheduling tasks for a set of thread workers:
+//
+// ConcurrentQueue<Task> task_queue;
+//
+// [Worker threads]:
+// Task task;
+// while(task_queue.Wait(&task)) {
+// ...
+// }
+//
+// [Producers]:
+// task_queue.Push(...);
+// ..
+// task_queue.Push(...);
+// ...
+// // Signal worker threads to stop blocking on Wait and terminate.
+// task_queue.StopWaiters();
+//
+template <typename T>
+class ConcurrentQueue {
+ public:
+ // Defaults the queue to blocking on Wait calls.
+ ConcurrentQueue() : wait_(true) {}
+
+ // Atomically push an element onto the queue. If a thread was waiting for an
+ // element, wake it up.
+ void Push(const T& value) {
+ std::lock_guard<std::mutex> lock(mutex_);
+ queue_.push(value);
+ work_pending_condition_.notify_one();
+ }
+
+ // Atomically pop an element from the queue. If an element is present, return
+ // true. If the queue was empty, return false.
+ bool Pop(T* value) {
+ CHECK(value != nullptr);
+
+ std::lock_guard<std::mutex> lock(mutex_);
+ return PopUnlocked(value);
+ }
+
+ // Atomically pop an element from the queue. Blocks until one is available or
+ // StopWaiters is called. Returns true if an element was successfully popped
+ // from the queue, otherwise returns false.
+ bool Wait(T* value) {
+ CHECK(value != nullptr);
+
+ std::unique_lock<std::mutex> lock(mutex_);
+ work_pending_condition_.wait(lock,
+ [&]() { return !(wait_ && queue_.empty()); });
+
+ return PopUnlocked(value);
+ }
+
+ // Unblock all threads waiting to pop a value from the queue, and they will
+ // exit Wait() without getting a value. All future Wait requests will return
+ // immediately if no element is present until EnableWaiters is called.
+ void StopWaiters() {
+ std::lock_guard<std::mutex> lock(mutex_);
+ wait_ = false;
+ work_pending_condition_.notify_all();
+ }
+
+ // Enable threads to block on Wait calls.
+ void EnableWaiters() {
+ std::lock_guard<std::mutex> lock(mutex_);
+ wait_ = true;
+ }
+
+ private:
+ // Pops an element from the queue. If an element is present, return
+ // true. If the queue was empty, return false. Not thread-safe. Must acquire
+ // the lock before calling.
+ bool PopUnlocked(T* value) {
+ if (queue_.empty()) {
+ return false;
+ }
+
+ *value = queue_.front();
+ queue_.pop();
+
+ return true;
+ }
+
+ // The mutex controls read and write access to the queue_ and stop_
+ // variables. It is also used to block the calling thread until an element is
+ // available to pop from the queue.
+ std::mutex mutex_;
+ std::condition_variable work_pending_condition_;
+
+ std::queue<T> queue_;
+ // If true, signals that callers of Wait will block waiting to pop an
+ // element off the queue.
+ bool wait_;
+};
+
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_CONCURRENT_QUEUE_H_
diff --git a/extern/ceres/internal/ceres/conditioned_cost_function.cc b/extern/ceres/internal/ceres/conditioned_cost_function.cc
index 08899e3d246..d933ad7c462 100644
--- a/extern/ceres/internal/ceres/conditioned_cost_function.cc
+++ b/extern/ceres/internal/ceres/conditioned_cost_function.cc
@@ -68,7 +68,7 @@ ConditionedCostFunction::ConditionedCostFunction(
ConditionedCostFunction::~ConditionedCostFunction() {
if (ownership_ == TAKE_OWNERSHIP) {
- STLDeleteElements(&conditioners_);
+ STLDeleteUniqueContainerPointers(conditioners_.begin(), conditioners_.end());
} else {
wrapped_cost_function_.release();
}
diff --git a/extern/ceres/internal/ceres/conjugate_gradients_solver.cc b/extern/ceres/internal/ceres/conjugate_gradients_solver.cc
index 3702276a2fb..c6f85c15ea0 100644
--- a/extern/ceres/internal/ceres/conjugate_gradients_solver.cc
+++ b/extern/ceres/internal/ceres/conjugate_gradients_solver.cc
@@ -41,7 +41,6 @@
#include <cmath>
#include <cstddef>
-#include "ceres/fpclassify.h"
#include "ceres/internal/eigen.h"
#include "ceres/linear_operator.h"
#include "ceres/stringprintf.h"
@@ -53,7 +52,7 @@ namespace internal {
namespace {
bool IsZeroOrInfinity(double x) {
- return ((x == 0.0) || (IsInfinite(x)));
+ return ((x == 0.0) || std::isinf(x));
}
} // namespace
@@ -68,9 +67,9 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve(
const double* b,
const LinearSolver::PerSolveOptions& per_solve_options,
double* x) {
- CHECK_NOTNULL(A);
- CHECK_NOTNULL(x);
- CHECK_NOTNULL(b);
+ CHECK(A != nullptr);
+ CHECK(x != nullptr);
+ CHECK(b != nullptr);
CHECK_EQ(A->num_rows(), A->num_cols());
LinearSolver::Summary summary;
@@ -148,7 +147,7 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve(
q.setZero();
A->RightMultiply(p.data(), q.data());
const double pq = p.dot(q);
- if ((pq <= 0) || IsInfinite(pq)) {
+ if ((pq <= 0) || std::isinf(pq)) {
summary.termination_type = LINEAR_SOLVER_NO_CONVERGENCE;
summary.message = StringPrintf(
"Matrix is indefinite, no more progress can be made. "
@@ -158,7 +157,7 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve(
}
const double alpha = rho / pq;
- if (IsInfinite(alpha)) {
+ if (std::isinf(alpha)) {
summary.termination_type = LINEAR_SOLVER_FAILURE;
summary.message =
StringPrintf("Numerical failure. alpha = rho / pq = %e, "
diff --git a/extern/ceres/internal/ceres/conjugate_gradients_solver.h b/extern/ceres/internal/ceres/conjugate_gradients_solver.h
index a1e18334414..d89383e6359 100644
--- a/extern/ceres/internal/ceres/conjugate_gradients_solver.h
+++ b/extern/ceres/internal/ceres/conjugate_gradients_solver.h
@@ -35,7 +35,6 @@
#define CERES_INTERNAL_CONJUGATE_GRADIENTS_SOLVER_H_
#include "ceres/linear_solver.h"
-#include "ceres/internal/macros.h"
namespace ceres {
namespace internal {
@@ -58,14 +57,13 @@ class LinearOperator;
class ConjugateGradientsSolver : public LinearSolver {
public:
explicit ConjugateGradientsSolver(const LinearSolver::Options& options);
- virtual Summary Solve(LinearOperator* A,
- const double* b,
- const LinearSolver::PerSolveOptions& per_solve_options,
- double* x);
+ Summary Solve(LinearOperator* A,
+ const double* b,
+ const LinearSolver::PerSolveOptions& per_solve_options,
+ double* x) final;
private:
const LinearSolver::Options options_;
- CERES_DISALLOW_COPY_AND_ASSIGN(ConjugateGradientsSolver);
};
} // namespace internal
diff --git a/extern/ceres/internal/ceres/context.cc b/extern/ceres/internal/ceres/context.cc
new file mode 100644
index 00000000000..e2232013b4b
--- /dev/null
+++ b/extern/ceres/internal/ceres/context.cc
@@ -0,0 +1,41 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: vitus@google.com (Michael Vitus)
+
+#include "ceres/context.h"
+
+#include "ceres/context_impl.h"
+
+namespace ceres {
+
+Context* Context::Create() {
+ return new internal::ContextImpl();
+}
+
+} // namespace ceres
diff --git a/extern/ceres/internal/ceres/context_impl.cc b/extern/ceres/internal/ceres/context_impl.cc
new file mode 100644
index 00000000000..622f33a9dc0
--- /dev/null
+++ b/extern/ceres/internal/ceres/context_impl.cc
@@ -0,0 +1,43 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: vitus@google.com (Michael Vitus)
+
+#include "ceres/context_impl.h"
+
+namespace ceres {
+namespace internal {
+
+void ContextImpl::EnsureMinimumThreads(int num_threads) {
+#ifdef CERES_USE_CXX_THREADS
+ thread_pool.Resize(num_threads);
+#endif // CERES_USE_CXX_THREADS
+
+}
+} // namespace internal
+} // namespace ceres
diff --git a/extern/ceres/internal/ceres/context_impl.h b/extern/ceres/internal/ceres/context_impl.h
new file mode 100644
index 00000000000..5c03ad71bab
--- /dev/null
+++ b/extern/ceres/internal/ceres/context_impl.h
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: vitus@google.com (Michael Vitus)
+
+#ifndef CERES_INTERNAL_CONTEXT_IMPL_H_
+#define CERES_INTERNAL_CONTEXT_IMPL_H_
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#include "ceres/context.h"
+
+#ifdef CERES_USE_CXX_THREADS
+#include "ceres/thread_pool.h"
+#endif // CERES_USE_CXX_THREADS
+
+namespace ceres {
+namespace internal {
+
+class ContextImpl : public Context {
+ public:
+ ContextImpl() {}
+ ContextImpl(const ContextImpl&) = delete;
+ void operator=(const ContextImpl&) = delete;
+
+ virtual ~ContextImpl() {}
+
+ // When compiled with C++ threading support, resize the thread pool to have
+ // at min(num_thread, num_hardware_threads) where num_hardware_threads is
+ // defined by the hardware. Otherwise this call is a no-op.
+ void EnsureMinimumThreads(int num_threads);
+
+#ifdef CERES_USE_CXX_THREADS
+ ThreadPool thread_pool;
+#endif // CERES_USE_CXX_THREADS
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_CONTEXT_IMPL_H_
diff --git a/extern/ceres/internal/ceres/coordinate_descent_minimizer.cc b/extern/ceres/internal/ceres/coordinate_descent_minimizer.cc
index c6b42cf1516..c5d56f30bc3 100644
--- a/extern/ceres/internal/ceres/coordinate_descent_minimizer.cc
+++ b/extern/ceres/internal/ceres/coordinate_descent_minimizer.cc
@@ -30,16 +30,16 @@
#include "ceres/coordinate_descent_minimizer.h"
-#ifdef CERES_USE_OPENMP
-#include <omp.h>
-#endif
-
+#include <algorithm>
#include <iterator>
+#include <memory>
#include <numeric>
#include <vector>
+
#include "ceres/evaluator.h"
#include "ceres/linear_solver.h"
#include "ceres/minimizer.h"
+#include "ceres/parallel_for.h"
#include "ceres/parameter_block.h"
#include "ceres/parameter_block_ordering.h"
#include "ceres/problem_impl.h"
@@ -59,6 +59,11 @@ using std::set;
using std::string;
using std::vector;
+CoordinateDescentMinimizer::CoordinateDescentMinimizer(ContextImpl* context)
+ : context_(context) {
+ CHECK(context_ != nullptr);
+}
+
CoordinateDescentMinimizer::~CoordinateDescentMinimizer() {
}
@@ -74,19 +79,16 @@ bool CoordinateDescentMinimizer::Init(
// Serialize the OrderedGroups into a vector of parameter block
// offsets for parallel access.
map<ParameterBlock*, int> parameter_block_index;
- map<int, set<double*> > group_to_elements = ordering.group_to_elements();
- for (map<int, set<double*> >::const_iterator it = group_to_elements.begin();
- it != group_to_elements.end();
- ++it) {
- for (set<double*>::const_iterator ptr_it = it->second.begin();
- ptr_it != it->second.end();
- ++ptr_it) {
- parameter_blocks_.push_back(parameter_map.find(*ptr_it)->second);
+ map<int, set<double*>> group_to_elements = ordering.group_to_elements();
+ for (const auto& g_t_e : group_to_elements) {
+ const auto& elements = g_t_e.second;
+ for (double* parameter_block: elements) {
+ parameter_blocks_.push_back(parameter_map.find(parameter_block)->second);
parameter_block_index[parameter_blocks_.back()] =
parameter_blocks_.size() - 1;
}
independent_set_offsets_.push_back(
- independent_set_offsets_.back() + it->second.size());
+ independent_set_offsets_.back() + elements.size());
}
// The ordering does not have to contain all parameter blocks, so
@@ -109,8 +111,7 @@ bool CoordinateDescentMinimizer::Init(
const int num_parameter_blocks = residual_block->NumParameterBlocks();
for (int j = 0; j < num_parameter_blocks; ++j) {
ParameterBlock* parameter_block = residual_block->parameter_blocks()[j];
- const map<ParameterBlock*, int>::const_iterator it =
- parameter_block_index.find(parameter_block);
+ const auto it = parameter_block_index.find(parameter_block);
if (it != parameter_block_index.end()) {
residual_blocks_[it->second].push_back(residual_block);
}
@@ -120,6 +121,7 @@ bool CoordinateDescentMinimizer::Init(
evaluator_options_.linear_solver_type = DENSE_QR;
evaluator_options_.num_eliminate_blocks = 0;
evaluator_options_.num_threads = 1;
+ evaluator_options_.context = context_;
return true;
}
@@ -135,11 +137,12 @@ void CoordinateDescentMinimizer::Minimize(
parameter_block->SetConstant();
}
- scoped_array<LinearSolver*> linear_solvers(
+ std::unique_ptr<LinearSolver*[]> linear_solvers(
new LinearSolver*[options.num_threads]);
LinearSolver::Options linear_solver_options;
linear_solver_options.type = DENSE_QR;
+ linear_solver_options.context = context_;
for (int i = 0; i < options.num_threads; ++i) {
linear_solvers[i] = LinearSolver::Create(linear_solver_options);
@@ -148,13 +151,11 @@ void CoordinateDescentMinimizer::Minimize(
for (int i = 0; i < independent_set_offsets_.size() - 1; ++i) {
const int num_problems =
independent_set_offsets_[i + 1] - independent_set_offsets_[i];
- // No point paying the price for an OpemMP call if the set is of
- // size zero.
+ // Avoid parallelization overhead call if the set is empty.
if (num_problems == 0) {
continue;
}
-#ifdef CERES_USE_OPENMP
const int num_inner_iteration_threads =
min(options.num_threads, num_problems);
evaluator_options_.num_threads =
@@ -162,47 +163,43 @@ void CoordinateDescentMinimizer::Minimize(
// The parameter blocks in each independent set can be optimized
// in parallel, since they do not co-occur in any residual block.
-#pragma omp parallel for num_threads(num_inner_iteration_threads)
-#endif
- for (int j = independent_set_offsets_[i];
- j < independent_set_offsets_[i + 1];
- ++j) {
-#ifdef CERES_USE_OPENMP
- int thread_id = omp_get_thread_num();
-#else
- int thread_id = 0;
-#endif
-
- ParameterBlock* parameter_block = parameter_blocks_[j];
- const int old_index = parameter_block->index();
- const int old_delta_offset = parameter_block->delta_offset();
- parameter_block->SetVarying();
- parameter_block->set_index(0);
- parameter_block->set_delta_offset(0);
-
- Program inner_program;
- inner_program.mutable_parameter_blocks()->push_back(parameter_block);
- *inner_program.mutable_residual_blocks() = residual_blocks_[j];
-
- // TODO(sameeragarwal): Better error handling. Right now we
- // assume that this is not going to lead to problems of any
- // sort. Basically we should be checking for numerical failure
- // of some sort.
- //
- // On the other hand, if the optimization is a failure, that in
- // some ways is fine, since it won't change the parameters and
- // we are fine.
- Solver::Summary inner_summary;
- Solve(&inner_program,
- linear_solvers[thread_id],
- parameters + parameter_block->state_offset(),
- &inner_summary);
-
- parameter_block->set_index(old_index);
- parameter_block->set_delta_offset(old_delta_offset);
- parameter_block->SetState(parameters + parameter_block->state_offset());
- parameter_block->SetConstant();
- }
+ ParallelFor(
+ context_,
+ independent_set_offsets_[i],
+ independent_set_offsets_[i + 1],
+ num_inner_iteration_threads,
+ [&](int thread_id, int j) {
+ ParameterBlock* parameter_block = parameter_blocks_[j];
+ const int old_index = parameter_block->index();
+ const int old_delta_offset = parameter_block->delta_offset();
+ parameter_block->SetVarying();
+ parameter_block->set_index(0);
+ parameter_block->set_delta_offset(0);
+
+ Program inner_program;
+ inner_program.mutable_parameter_blocks()->push_back(parameter_block);
+ *inner_program.mutable_residual_blocks() = residual_blocks_[j];
+
+ // TODO(sameeragarwal): Better error handling. Right now we
+ // assume that this is not going to lead to problems of any
+ // sort. Basically we should be checking for numerical failure
+ // of some sort.
+ //
+ // On the other hand, if the optimization is a failure, that in
+ // some ways is fine, since it won't change the parameters and
+ // we are fine.
+ Solver::Summary inner_summary;
+ Solve(&inner_program,
+ linear_solvers[thread_id],
+ parameters + parameter_block->state_offset(),
+ &inner_summary);
+
+ parameter_block->set_index(old_index);
+ parameter_block->set_delta_offset(old_delta_offset);
+ parameter_block->SetState(parameters +
+ parameter_block->state_offset());
+ parameter_block->SetConstant();
+ });
}
for (int i = 0; i < parameter_blocks_.size(); ++i) {
@@ -227,14 +224,17 @@ void CoordinateDescentMinimizer::Solve(Program* program,
Minimizer::Options minimizer_options;
minimizer_options.evaluator.reset(
- CHECK_NOTNULL(Evaluator::Create(evaluator_options_, program, &error)));
+ Evaluator::Create(evaluator_options_, program, &error));
+ CHECK(minimizer_options.evaluator != nullptr);
minimizer_options.jacobian.reset(
- CHECK_NOTNULL(minimizer_options.evaluator->CreateJacobian()));
+ minimizer_options.evaluator->CreateJacobian());
+ CHECK(minimizer_options.jacobian != nullptr);
TrustRegionStrategy::Options trs_options;
trs_options.linear_solver = linear_solver;
minimizer_options.trust_region_strategy.reset(
- CHECK_NOTNULL(TrustRegionStrategy::Create(trs_options)));
+ TrustRegionStrategy::Create(trs_options));
+ CHECK(minimizer_options.trust_region_strategy != nullptr);
minimizer_options.is_silent = true;
TrustRegionMinimizer minimizer;
@@ -245,17 +245,16 @@ bool CoordinateDescentMinimizer::IsOrderingValid(
const Program& program,
const ParameterBlockOrdering& ordering,
string* message) {
- const map<int, set<double*> >& group_to_elements =
+ const map<int, set<double*>>& group_to_elements =
ordering.group_to_elements();
// Verify that each group is an independent set
- map<int, set<double*> >::const_iterator it = group_to_elements.begin();
- for (; it != group_to_elements.end(); ++it) {
- if (!program.IsParameterBlockSetIndependent(it->second)) {
+ for (const auto& g_t_e : group_to_elements) {
+ if (!program.IsParameterBlockSetIndependent(g_t_e.second)) {
*message =
StringPrintf("The user-provided "
"parameter_blocks_for_inner_iterations does not "
- "form an independent set. Group Id: %d", it->first);
+ "form an independent set. Group Id: %d", g_t_e.first);
return false;
}
}
@@ -268,7 +267,7 @@ bool CoordinateDescentMinimizer::IsOrderingValid(
// points.
ParameterBlockOrdering* CoordinateDescentMinimizer::CreateOrdering(
const Program& program) {
- scoped_ptr<ParameterBlockOrdering> ordering(new ParameterBlockOrdering);
+ std::unique_ptr<ParameterBlockOrdering> ordering(new ParameterBlockOrdering);
ComputeRecursiveIndependentSetOrdering(program, ordering.get());
ordering->Reverse();
return ordering.release();
diff --git a/extern/ceres/internal/ceres/coordinate_descent_minimizer.h b/extern/ceres/internal/ceres/coordinate_descent_minimizer.h
index 25ea04ce622..7d17d53eb0f 100644
--- a/extern/ceres/internal/ceres/coordinate_descent_minimizer.h
+++ b/extern/ceres/internal/ceres/coordinate_descent_minimizer.h
@@ -34,6 +34,7 @@
#include <string>
#include <vector>
+#include "ceres/context_impl.h"
#include "ceres/evaluator.h"
#include "ceres/minimizer.h"
#include "ceres/problem_impl.h"
@@ -57,6 +58,8 @@ class LinearSolver;
// program are constant.
class CoordinateDescentMinimizer : public Minimizer {
public:
+ explicit CoordinateDescentMinimizer(ContextImpl* context);
+
bool Init(const Program& program,
const ProblemImpl::ParameterMap& parameter_map,
const ParameterBlockOrdering& ordering,
@@ -64,9 +67,10 @@ class CoordinateDescentMinimizer : public Minimizer {
// Minimizer interface.
virtual ~CoordinateDescentMinimizer();
- virtual void Minimize(const Minimizer::Options& options,
- double* parameters,
- Solver::Summary* summary);
+
+ void Minimize(const Minimizer::Options& options,
+ double* parameters,
+ Solver::Summary* summary) final;
// Verify that each group in the ordering forms an independent set.
static bool IsOrderingValid(const Program& program,
@@ -86,7 +90,7 @@ class CoordinateDescentMinimizer : public Minimizer {
Solver::Summary* summary);
std::vector<ParameterBlock*> parameter_blocks_;
- std::vector<std::vector<ResidualBlock*> > residual_blocks_;
+ std::vector<std::vector<ResidualBlock*>> residual_blocks_;
// The optimization is performed in rounds. In each round all the
// parameter blocks that form one independent set are optimized in
// parallel. This array, marks the boundaries of the independent
@@ -94,6 +98,8 @@ class CoordinateDescentMinimizer : public Minimizer {
std::vector<int> independent_set_offsets_;
Evaluator::Options evaluator_options_;
+
+ ContextImpl* context_;
};
} // namespace internal
diff --git a/extern/ceres/internal/ceres/corrector.cc b/extern/ceres/internal/ceres/corrector.cc
index 720182868c1..4ac0dc3cd86 100644
--- a/extern/ceres/internal/ceres/corrector.cc
+++ b/extern/ceres/internal/ceres/corrector.cc
@@ -43,7 +43,7 @@ Corrector::Corrector(const double sq_norm, const double rho[3]) {
sqrt_rho1_ = sqrt(rho[1]);
// If sq_norm = 0.0, the correction becomes trivial, the residual
- // and the jacobian are scaled by the squareroot of the derivative
+ // and the jacobian are scaled by the square root of the derivative
// of rho. Handling this case explicitly avoids the divide by zero
// error that would occur below.
//
@@ -59,7 +59,7 @@ Corrector::Corrector(const double sq_norm, const double rho[3]) {
// correction which re-wights the gradient of the function by the
// square root of the derivative of rho, and the Gauss-Newton
// Hessian gets both the scaling and the rank-1 curvature
- // correction. Normaly, alpha is upper bounded by one, but with this
+ // correction. Normally, alpha is upper bounded by one, but with this
// change, alpha is bounded above by zero.
//
// Empirically we have observed that the full Triggs correction and
diff --git a/extern/ceres/internal/ceres/corrector.h b/extern/ceres/internal/ceres/corrector.h
index 315f012ab1d..a5b03dda803 100644
--- a/extern/ceres/internal/ceres/corrector.h
+++ b/extern/ceres/internal/ceres/corrector.h
@@ -43,7 +43,7 @@ namespace internal {
// radial robust loss.
//
// The key idea here is to look at the expressions for the robustified
-// gauss newton approximation and then take its squareroot to get the
+// gauss newton approximation and then take its square root to get the
// corresponding corrections to the residual and jacobian. For the
// full expressions see Eq. 10 and 11 in BANS by Triggs et al.
class Corrector {
diff --git a/extern/ceres/internal/ceres/covariance.cc b/extern/ceres/internal/ceres/covariance.cc
index cb280a36847..8256078409a 100644
--- a/extern/ceres/internal/ceres/covariance.cc
+++ b/extern/ceres/internal/ceres/covariance.cc
@@ -50,15 +50,15 @@ Covariance::~Covariance() {
}
bool Covariance::Compute(
- const vector<pair<const double*, const double*> >& covariance_blocks,
+ const vector<pair<const double*, const double*>>& covariance_blocks,
Problem* problem) {
- return impl_->Compute(covariance_blocks, problem->problem_impl_.get());
+ return impl_->Compute(covariance_blocks, problem->impl_.get());
}
bool Covariance::Compute(
const vector<const double*>& parameter_blocks,
Problem* problem) {
- return impl_->Compute(parameter_blocks, problem->problem_impl_.get());
+ return impl_->Compute(parameter_blocks, problem->impl_.get());
}
bool Covariance::GetCovarianceBlock(const double* parameter_block1,
@@ -82,7 +82,7 @@ bool Covariance::GetCovarianceBlockInTangentSpace(
bool Covariance::GetCovarianceMatrix(
const vector<const double*>& parameter_blocks,
- double* covariance_matrix) {
+ double* covariance_matrix) const {
return impl_->GetCovarianceMatrixInTangentOrAmbientSpace(parameter_blocks,
true, // ambient
covariance_matrix);
@@ -90,7 +90,7 @@ bool Covariance::GetCovarianceMatrix(
bool Covariance::GetCovarianceMatrixInTangentSpace(
const std::vector<const double *>& parameter_blocks,
- double *covariance_matrix) {
+ double *covariance_matrix) const {
return impl_->GetCovarianceMatrixInTangentOrAmbientSpace(parameter_blocks,
false, // tangent
covariance_matrix);
diff --git a/extern/ceres/internal/ceres/covariance_impl.cc b/extern/ceres/internal/ceres/covariance_impl.cc
index d698f88fa9b..6c26412d854 100644
--- a/extern/ceres/internal/ceres/covariance_impl.cc
+++ b/extern/ceres/internal/ceres/covariance_impl.cc
@@ -30,14 +30,12 @@
#include "ceres/covariance_impl.h"
-#ifdef CERES_USE_OPENMP
-#include <omp.h>
-#endif
-
#include <algorithm>
#include <cstdlib>
+#include <memory>
#include <numeric>
#include <sstream>
+#include <unordered_set>
#include <utility>
#include <vector>
@@ -45,13 +43,14 @@
#include "Eigen/SparseQR"
#include "Eigen/SVD"
-#include "ceres/collections_port.h"
#include "ceres/compressed_col_sparse_matrix_utils.h"
#include "ceres/compressed_row_sparse_matrix.h"
#include "ceres/covariance.h"
#include "ceres/crs_matrix.h"
#include "ceres/internal/eigen.h"
#include "ceres/map_util.h"
+#include "ceres/parallel_for.h"
+#include "ceres/parallel_utils.h"
#include "ceres/parameter_block.h"
#include "ceres/problem_impl.h"
#include "ceres/residual_block.h"
@@ -69,21 +68,22 @@ using std::sort;
using std::swap;
using std::vector;
-typedef vector<pair<const double*, const double*> > CovarianceBlocks;
+typedef vector<pair<const double*, const double*>> CovarianceBlocks;
CovarianceImpl::CovarianceImpl(const Covariance::Options& options)
: options_(options),
is_computed_(false),
is_valid_(false) {
-#ifndef CERES_USE_OPENMP
+#ifdef CERES_NO_THREADS
if (options_.num_threads > 1) {
LOG(WARNING)
- << "OpenMP support is not compiled into this binary; "
+ << "No threading support is compiled into this binary; "
<< "only options.num_threads = 1 is supported. Switching "
<< "to single threaded mode.";
options_.num_threads = 1;
}
#endif
+
evaluate_options_.num_threads = options_.num_threads;
evaluate_options_.apply_loss_function = options_.apply_loss_function;
}
@@ -97,7 +97,7 @@ template <typename T> void CheckForDuplicates(vector<T> blocks) {
std::adjacent_find(blocks.begin(), blocks.end());
if (it != blocks.end()) {
// In case there are duplicates, we search for their location.
- map<T, vector<int> > blocks_map;
+ map<T, vector<int>> blocks_map;
for (int i = 0; i < blocks.size(); ++i) {
blocks_map[blocks[i]].push_back(i);
}
@@ -122,7 +122,7 @@ template <typename T> void CheckForDuplicates(vector<T> blocks) {
bool CovarianceImpl::Compute(const CovarianceBlocks& covariance_blocks,
ProblemImpl* problem) {
- CheckForDuplicates<pair<const double*, const double*> >(covariance_blocks);
+ CheckForDuplicates<pair<const double*, const double*>>(covariance_blocks);
problem_ = problem;
parameter_block_to_row_index_.clear();
covariance_matrix_.reset(NULL);
@@ -333,55 +333,47 @@ bool CovarianceImpl::GetCovarianceMatrixInTangentOrAmbientSpace(
// Assemble the blocks in the covariance matrix.
MatrixRef covariance(covariance_matrix, covariance_size, covariance_size);
const int num_threads = options_.num_threads;
- scoped_array<double> workspace(
+ std::unique_ptr<double[]> workspace(
new double[num_threads * max_covariance_block_size *
max_covariance_block_size]);
bool success = true;
-// The collapse() directive is only supported in OpenMP 3.0 and higher. OpenMP
-// 3.0 was released in May 2008 (hence the version number).
-#if _OPENMP >= 200805
-# pragma omp parallel for num_threads(num_threads) schedule(dynamic) collapse(2)
-#else
-# pragma omp parallel for num_threads(num_threads) schedule(dynamic)
-#endif
- for (int i = 0; i < num_parameters; ++i) {
- for (int j = 0; j < num_parameters; ++j) {
- // The second loop can't start from j = i for compatibility with OpenMP
- // collapse command. The conditional serves as a workaround
- if (j >= i) {
+ // Technically the following code is a double nested loop where
+ // i = 1:n, j = i:n.
+ int iteration_count = (num_parameters * (num_parameters + 1)) / 2;
+ problem_->context()->EnsureMinimumThreads(num_threads);
+ ParallelFor(
+ problem_->context(),
+ 0,
+ iteration_count,
+ num_threads,
+ [&](int thread_id, int k) {
+ int i, j;
+ LinearIndexToUpperTriangularIndex(k, num_parameters, &i, &j);
+
int covariance_row_idx = cum_parameter_size[i];
int covariance_col_idx = cum_parameter_size[j];
int size_i = parameter_sizes[i];
int size_j = parameter_sizes[j];
-#ifdef CERES_USE_OPENMP
- int thread_id = omp_get_thread_num();
-#else
- int thread_id = 0;
-#endif
double* covariance_block =
- workspace.get() +
- thread_id * max_covariance_block_size * max_covariance_block_size;
+ workspace.get() + thread_id * max_covariance_block_size *
+ max_covariance_block_size;
if (!GetCovarianceBlockInTangentOrAmbientSpace(
- parameters[i], parameters[j], lift_covariance_to_ambient_space,
- covariance_block)) {
+ parameters[i], parameters[j],
+ lift_covariance_to_ambient_space, covariance_block)) {
success = false;
}
- covariance.block(covariance_row_idx, covariance_col_idx,
- size_i, size_j) =
- MatrixRef(covariance_block, size_i, size_j);
+ covariance.block(covariance_row_idx, covariance_col_idx, size_i,
+ size_j) = MatrixRef(covariance_block, size_i, size_j);
if (i != j) {
covariance.block(covariance_col_idx, covariance_row_idx,
size_j, size_i) =
MatrixRef(covariance_block, size_i, size_j).transpose();
-
}
- }
- }
- }
+ });
return success;
}
@@ -397,7 +389,7 @@ bool CovarianceImpl::ComputeCovarianceSparsity(
vector<double*> all_parameter_blocks;
problem->GetParameterBlocks(&all_parameter_blocks);
const ProblemImpl::ParameterMap& parameter_map = problem->parameter_map();
- HashSet<ParameterBlock*> parameter_blocks_in_use;
+ std::unordered_set<ParameterBlock*> parameter_blocks_in_use;
vector<ResidualBlock*> residual_blocks;
problem->GetResidualBlocks(&residual_blocks);
@@ -496,13 +488,10 @@ bool CovarianceImpl::ComputeCovarianceSparsity(
// rows of the covariance matrix in order.
int i = 0; // index into covariance_blocks.
int cursor = 0; // index into the covariance matrix.
- for (map<const double*, int>::const_iterator it =
- parameter_block_to_row_index_.begin();
- it != parameter_block_to_row_index_.end();
- ++it) {
- const double* row_block = it->first;
+ for (const auto& entry : parameter_block_to_row_index_) {
+ const double* row_block = entry.first;
const int row_block_size = problem->ParameterBlockLocalSize(row_block);
- int row_begin = it->second;
+ int row_begin = entry.second;
// Iterate over the covariance blocks contained in this row block
// and count the number of columns in this row block.
@@ -538,24 +527,37 @@ bool CovarianceImpl::ComputeCovarianceSparsity(
}
bool CovarianceImpl::ComputeCovarianceValues() {
- switch (options_.algorithm_type) {
- case DENSE_SVD:
- return ComputeCovarianceValuesUsingDenseSVD();
- case SUITE_SPARSE_QR:
-#ifndef CERES_NO_SUITESPARSE
+ if (options_.algorithm_type == DENSE_SVD) {
+ return ComputeCovarianceValuesUsingDenseSVD();
+ }
+
+ if (options_.algorithm_type == SPARSE_QR) {
+ if (options_.sparse_linear_algebra_library_type == EIGEN_SPARSE) {
+ return ComputeCovarianceValuesUsingEigenSparseQR();
+ }
+
+ if (options_.sparse_linear_algebra_library_type == SUITE_SPARSE) {
+#if !defined(CERES_NO_SUITESPARSE)
return ComputeCovarianceValuesUsingSuiteSparseQR();
#else
- LOG(ERROR) << "SuiteSparse is required to use the "
- << "SUITE_SPARSE_QR algorithm.";
+ LOG(ERROR) << "SuiteSparse is required to use the SPARSE_QR algorithm "
+ << "with "
+ << "Covariance::Options::sparse_linear_algebra_library_type "
+ << "= SUITE_SPARSE.";
return false;
#endif
- case EIGEN_SPARSE_QR:
- return ComputeCovarianceValuesUsingEigenSparseQR();
- default:
- LOG(ERROR) << "Unsupported covariance estimation algorithm type: "
- << CovarianceAlgorithmTypeToString(options_.algorithm_type);
- return false;
+ }
+
+ LOG(ERROR) << "Unsupported "
+ << "Covariance::Options::sparse_linear_algebra_library_type "
+ << "= "
+ << SparseLinearAlgebraLibraryTypeToString(
+ options_.sparse_linear_algebra_library_type);
+ return false;
}
+
+ LOG(ERROR) << "Unsupported Covariance::Options::algorithm_type = "
+ << CovarianceAlgorithmTypeToString(options_.algorithm_type);
return false;
}
@@ -649,8 +651,12 @@ bool CovarianceImpl::ComputeCovarianceValuesUsingSuiteSparseQR() {
&permutation,
&cc);
event_logger.AddEvent("Numeric Factorization");
- CHECK_NOTNULL(permutation);
- CHECK_NOTNULL(R);
+ if (R == nullptr) {
+ LOG(ERROR) << "Something is wrong. SuiteSparseQR returned R = nullptr.";
+ free(permutation);
+ cholmod_l_finish(&cc);
+ return false;
+ }
if (rank < cholmod_jacobian.ncol) {
LOG(ERROR) << "Jacobian matrix is rank deficient. "
@@ -663,8 +669,14 @@ bool CovarianceImpl::ComputeCovarianceValuesUsingSuiteSparseQR() {
}
vector<int> inverse_permutation(num_cols);
- for (SuiteSparse_long i = 0; i < num_cols; ++i) {
- inverse_permutation[permutation[i]] = i;
+ if (permutation) {
+ for (SuiteSparse_long i = 0; i < num_cols; ++i) {
+ inverse_permutation[permutation[i]] = i;
+ }
+ } else {
+ for (SuiteSparse_long i = 0; i < num_cols; ++i) {
+ inverse_permutation[i] = i;
+ }
}
const int* rows = covariance_matrix_->rows();
@@ -681,35 +693,29 @@ bool CovarianceImpl::ComputeCovarianceValuesUsingSuiteSparseQR() {
// Since the covariance matrix is symmetric, the i^th row and column
// are equal.
const int num_threads = options_.num_threads;
- scoped_array<double> workspace(new double[num_threads * num_cols]);
-
-#pragma omp parallel for num_threads(num_threads) schedule(dynamic)
- for (int r = 0; r < num_cols; ++r) {
- const int row_begin = rows[r];
- const int row_end = rows[r + 1];
- if (row_end == row_begin) {
- continue;
- }
-
-# ifdef CERES_USE_OPENMP
- int thread_id = omp_get_thread_num();
-# else
- int thread_id = 0;
-# endif
-
- double* solution = workspace.get() + thread_id * num_cols;
- SolveRTRWithSparseRHS<SuiteSparse_long>(
- num_cols,
- static_cast<SuiteSparse_long*>(R->i),
- static_cast<SuiteSparse_long*>(R->p),
- static_cast<double*>(R->x),
- inverse_permutation[r],
- solution);
- for (int idx = row_begin; idx < row_end; ++idx) {
- const int c = cols[idx];
- values[idx] = solution[inverse_permutation[c]];
- }
- }
+ std::unique_ptr<double[]> workspace(new double[num_threads * num_cols]);
+
+ problem_->context()->EnsureMinimumThreads(num_threads);
+ ParallelFor(
+ problem_->context(),
+ 0,
+ num_cols,
+ num_threads,
+ [&](int thread_id, int r) {
+ const int row_begin = rows[r];
+ const int row_end = rows[r + 1];
+ if (row_end != row_begin) {
+ double* solution = workspace.get() + thread_id * num_cols;
+ SolveRTRWithSparseRHS<SuiteSparse_long>(
+ num_cols, static_cast<SuiteSparse_long*>(R->i),
+ static_cast<SuiteSparse_long*>(R->p), static_cast<double*>(R->x),
+ inverse_permutation[r], solution);
+ for (int idx = row_begin; idx < row_end; ++idx) {
+ const int c = cols[idx];
+ values[idx] = solution[inverse_permutation[c]];
+ }
+ }
+ });
free(permutation);
cholmod_l_free_sparse(&R, &cc);
@@ -746,8 +752,8 @@ bool CovarianceImpl::ComputeCovarianceValuesUsingDenseSVD() {
}
event_logger.AddEvent("ConvertToDenseMatrix");
- Eigen::JacobiSVD<Matrix> svd(dense_jacobian,
- Eigen::ComputeThinU | Eigen::ComputeThinV);
+ Eigen::BDCSVD<Matrix> svd(dense_jacobian,
+ Eigen::ComputeThinU | Eigen::ComputeThinV);
event_logger.AddEvent("SingularValueDecomposition");
@@ -838,7 +844,7 @@ bool CovarianceImpl::ComputeCovarianceValuesUsingEigenSparseQR() {
jacobian.rows.data(), jacobian.cols.data(), jacobian.values.data());
event_logger.AddEvent("ConvertToSparseMatrix");
- Eigen::SparseQR<EigenSparseMatrix, Eigen::COLAMDOrdering<int> >
+ Eigen::SparseQR<EigenSparseMatrix, Eigen::COLAMDOrdering<int>>
qr_solver(sparse_jacobian);
event_logger.AddEvent("QRDecomposition");
@@ -873,38 +879,35 @@ bool CovarianceImpl::ComputeCovarianceValuesUsingEigenSparseQR() {
// are equal.
const int num_cols = jacobian.num_cols;
const int num_threads = options_.num_threads;
- scoped_array<double> workspace(new double[num_threads * num_cols]);
-
-#pragma omp parallel for num_threads(num_threads) schedule(dynamic)
- for (int r = 0; r < num_cols; ++r) {
- const int row_begin = rows[r];
- const int row_end = rows[r + 1];
- if (row_end == row_begin) {
- continue;
- }
-
-# ifdef CERES_USE_OPENMP
- int thread_id = omp_get_thread_num();
-# else
- int thread_id = 0;
-# endif
-
- double* solution = workspace.get() + thread_id * num_cols;
- SolveRTRWithSparseRHS<int>(
- num_cols,
- qr_solver.matrixR().innerIndexPtr(),
- qr_solver.matrixR().outerIndexPtr(),
- &qr_solver.matrixR().data().value(0),
- inverse_permutation.indices().coeff(r),
- solution);
-
- // Assign the values of the computed covariance using the
- // inverse permutation used in the QR factorization.
- for (int idx = row_begin; idx < row_end; ++idx) {
- const int c = cols[idx];
- values[idx] = solution[inverse_permutation.indices().coeff(c)];
- }
- }
+ std::unique_ptr<double[]> workspace(new double[num_threads * num_cols]);
+
+ problem_->context()->EnsureMinimumThreads(num_threads);
+ ParallelFor(
+ problem_->context(),
+ 0,
+ num_cols,
+ num_threads,
+ [&](int thread_id, int r) {
+ const int row_begin = rows[r];
+ const int row_end = rows[r + 1];
+ if (row_end != row_begin) {
+ double* solution = workspace.get() + thread_id * num_cols;
+ SolveRTRWithSparseRHS<int>(
+ num_cols,
+ qr_solver.matrixR().innerIndexPtr(),
+ qr_solver.matrixR().outerIndexPtr(),
+ &qr_solver.matrixR().data().value(0),
+ inverse_permutation.indices().coeff(r),
+ solution);
+
+ // Assign the values of the computed covariance using the
+ // inverse permutation used in the QR factorization.
+ for (int idx = row_begin; idx < row_end; ++idx) {
+ const int c = cols[idx];
+ values[idx] = solution[inverse_permutation.indices().coeff(c)];
+ }
+ }
+ });
event_logger.AddEvent("Inverse");
diff --git a/extern/ceres/internal/ceres/covariance_impl.h b/extern/ceres/internal/ceres/covariance_impl.h
index a3f0761f57c..065e43c60fc 100644
--- a/extern/ceres/internal/ceres/covariance_impl.h
+++ b/extern/ceres/internal/ceres/covariance_impl.h
@@ -32,11 +32,11 @@
#define CERES_INTERNAL_COVARIANCE_IMPL_H_
#include <map>
+#include <memory>
#include <set>
#include <utility>
#include <vector>
#include "ceres/covariance.h"
-#include "ceres/internal/scoped_ptr.h"
#include "ceres/problem_impl.h"
#include "ceres/suitesparse.h"
@@ -52,7 +52,7 @@ class CovarianceImpl {
bool Compute(
const std::vector<std::pair<const double*,
- const double*> >& covariance_blocks,
+ const double*>>& covariance_blocks,
ProblemImpl* problem);
bool Compute(
@@ -72,7 +72,7 @@ class CovarianceImpl {
bool ComputeCovarianceSparsity(
const std::vector<std::pair<const double*,
- const double*> >& covariance_blocks,
+ const double*>>& covariance_blocks,
ProblemImpl* problem);
bool ComputeCovarianceValues();
@@ -92,7 +92,7 @@ class CovarianceImpl {
bool is_valid_;
std::map<const double*, int> parameter_block_to_row_index_;
std::set<const double*> constant_parameter_blocks_;
- scoped_ptr<CompressedRowSparseMatrix> covariance_matrix_;
+ std::unique_ptr<CompressedRowSparseMatrix> covariance_matrix_;
};
} // namespace internal
diff --git a/extern/ceres/internal/ceres/cxsparse.cc b/extern/ceres/internal/ceres/cxsparse.cc
new file mode 100644
index 00000000000..5a028773206
--- /dev/null
+++ b/extern/ceres/internal/ceres/cxsparse.cc
@@ -0,0 +1,284 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: strandmark@google.com (Petter Strandmark)
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_NO_CXSPARSE
+
+#include "ceres/cxsparse.h"
+
+#include <string>
+#include <vector>
+
+#include "ceres/compressed_col_sparse_matrix_utils.h"
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+using std::vector;
+
+CXSparse::CXSparse() : scratch_(NULL), scratch_size_(0) {}
+
+CXSparse::~CXSparse() {
+ if (scratch_size_ > 0) {
+ cs_di_free(scratch_);
+ }
+}
+
+csn* CXSparse::Cholesky(cs_di* A, cs_dis* symbolic_factor) {
+ return cs_di_chol(A, symbolic_factor);
+}
+
+void CXSparse::Solve(cs_dis* symbolic_factor, csn* numeric_factor, double* b) {
+ // Make sure we have enough scratch space available.
+ const int num_cols = numeric_factor->L->n;
+ if (scratch_size_ < num_cols) {
+ if (scratch_size_ > 0) {
+ cs_di_free(scratch_);
+ }
+ scratch_ =
+ reinterpret_cast<CS_ENTRY*>(cs_di_malloc(num_cols, sizeof(CS_ENTRY)));
+ scratch_size_ = num_cols;
+ }
+
+ // When the Cholesky factor succeeded, these methods are
+ // guaranteed to succeeded as well. In the comments below, "x"
+ // refers to the scratch space.
+ //
+ // Set x = P * b.
+ CHECK(cs_di_ipvec(symbolic_factor->pinv, b, scratch_, num_cols));
+ // Set x = L \ x.
+ CHECK(cs_di_lsolve(numeric_factor->L, scratch_));
+ // Set x = L' \ x.
+ CHECK(cs_di_ltsolve(numeric_factor->L, scratch_));
+ // Set b = P' * x.
+ CHECK(cs_di_pvec(symbolic_factor->pinv, scratch_, b, num_cols));
+}
+
+bool CXSparse::SolveCholesky(cs_di* lhs, double* rhs_and_solution) {
+ return cs_cholsol(1, lhs, rhs_and_solution);
+}
+
+cs_dis* CXSparse::AnalyzeCholesky(cs_di* A) {
+ // order = 1 for Cholesky factor.
+ return cs_schol(1, A);
+}
+
+cs_dis* CXSparse::AnalyzeCholeskyWithNaturalOrdering(cs_di* A) {
+ // order = 0 for Natural ordering.
+ return cs_schol(0, A);
+}
+
+cs_dis* CXSparse::BlockAnalyzeCholesky(cs_di* A,
+ const vector<int>& row_blocks,
+ const vector<int>& col_blocks) {
+ const int num_row_blocks = row_blocks.size();
+ const int num_col_blocks = col_blocks.size();
+
+ vector<int> block_rows;
+ vector<int> block_cols;
+ CompressedColumnScalarMatrixToBlockMatrix(
+ A->i, A->p, row_blocks, col_blocks, &block_rows, &block_cols);
+ cs_di block_matrix;
+ block_matrix.m = num_row_blocks;
+ block_matrix.n = num_col_blocks;
+ block_matrix.nz = -1;
+ block_matrix.nzmax = block_rows.size();
+ block_matrix.p = &block_cols[0];
+ block_matrix.i = &block_rows[0];
+ block_matrix.x = NULL;
+
+ int* ordering = cs_amd(1, &block_matrix);
+ vector<int> block_ordering(num_row_blocks, -1);
+ std::copy(ordering, ordering + num_row_blocks, &block_ordering[0]);
+ cs_free(ordering);
+
+ vector<int> scalar_ordering;
+ BlockOrderingToScalarOrdering(row_blocks, block_ordering, &scalar_ordering);
+
+ cs_dis* symbolic_factor =
+ reinterpret_cast<cs_dis*>(cs_calloc(1, sizeof(cs_dis)));
+ symbolic_factor->pinv = cs_pinv(&scalar_ordering[0], A->n);
+ cs* permuted_A = cs_symperm(A, symbolic_factor->pinv, 0);
+
+ symbolic_factor->parent = cs_etree(permuted_A, 0);
+ int* postordering = cs_post(symbolic_factor->parent, A->n);
+ int* column_counts =
+ cs_counts(permuted_A, symbolic_factor->parent, postordering, 0);
+ cs_free(postordering);
+ cs_spfree(permuted_A);
+
+ symbolic_factor->cp = (int*)cs_malloc(A->n + 1, sizeof(int));
+ symbolic_factor->lnz = cs_cumsum(symbolic_factor->cp, column_counts, A->n);
+ symbolic_factor->unz = symbolic_factor->lnz;
+
+ cs_free(column_counts);
+
+ if (symbolic_factor->lnz < 0) {
+ cs_sfree(symbolic_factor);
+ symbolic_factor = NULL;
+ }
+
+ return symbolic_factor;
+}
+
+cs_di CXSparse::CreateSparseMatrixTransposeView(CompressedRowSparseMatrix* A) {
+ cs_di At;
+ At.m = A->num_cols();
+ At.n = A->num_rows();
+ At.nz = -1;
+ At.nzmax = A->num_nonzeros();
+ At.p = A->mutable_rows();
+ At.i = A->mutable_cols();
+ At.x = A->mutable_values();
+ return At;
+}
+
+cs_di* CXSparse::CreateSparseMatrix(TripletSparseMatrix* tsm) {
+ cs_di_sparse tsm_wrapper;
+ tsm_wrapper.nzmax = tsm->num_nonzeros();
+ tsm_wrapper.nz = tsm->num_nonzeros();
+ tsm_wrapper.m = tsm->num_rows();
+ tsm_wrapper.n = tsm->num_cols();
+ tsm_wrapper.p = tsm->mutable_cols();
+ tsm_wrapper.i = tsm->mutable_rows();
+ tsm_wrapper.x = tsm->mutable_values();
+
+ return cs_compress(&tsm_wrapper);
+}
+
+void CXSparse::ApproximateMinimumDegreeOrdering(cs_di* A, int* ordering) {
+ int* cs_ordering = cs_amd(1, A);
+ std::copy(cs_ordering, cs_ordering + A->m, ordering);
+ cs_free(cs_ordering);
+}
+
+cs_di* CXSparse::TransposeMatrix(cs_di* A) { return cs_di_transpose(A, 1); }
+
+cs_di* CXSparse::MatrixMatrixMultiply(cs_di* A, cs_di* B) {
+ return cs_di_multiply(A, B);
+}
+
+void CXSparse::Free(cs_di* sparse_matrix) { cs_di_spfree(sparse_matrix); }
+
+void CXSparse::Free(cs_dis* symbolic_factor) { cs_di_sfree(symbolic_factor); }
+
+void CXSparse::Free(csn* numeric_factor) { cs_di_nfree(numeric_factor); }
+
+std::unique_ptr<SparseCholesky> CXSparseCholesky::Create(
+ const OrderingType ordering_type) {
+ return std::unique_ptr<SparseCholesky>(new CXSparseCholesky(ordering_type));
+}
+
+CompressedRowSparseMatrix::StorageType CXSparseCholesky::StorageType() const {
+ return CompressedRowSparseMatrix::LOWER_TRIANGULAR;
+}
+
+CXSparseCholesky::CXSparseCholesky(const OrderingType ordering_type)
+ : ordering_type_(ordering_type),
+ symbolic_factor_(NULL),
+ numeric_factor_(NULL) {}
+
+CXSparseCholesky::~CXSparseCholesky() {
+ FreeSymbolicFactorization();
+ FreeNumericFactorization();
+}
+
+LinearSolverTerminationType CXSparseCholesky::Factorize(
+ CompressedRowSparseMatrix* lhs, std::string* message) {
+ CHECK_EQ(lhs->storage_type(), StorageType());
+ if (lhs == NULL) {
+ *message = "Failure: Input lhs is NULL.";
+ return LINEAR_SOLVER_FATAL_ERROR;
+ }
+
+ cs_di cs_lhs = cs_.CreateSparseMatrixTransposeView(lhs);
+
+ if (symbolic_factor_ == NULL) {
+ if (ordering_type_ == NATURAL) {
+ symbolic_factor_ = cs_.AnalyzeCholeskyWithNaturalOrdering(&cs_lhs);
+ } else {
+ if (!lhs->col_blocks().empty() && !(lhs->row_blocks().empty())) {
+ symbolic_factor_ = cs_.BlockAnalyzeCholesky(
+ &cs_lhs, lhs->col_blocks(), lhs->row_blocks());
+ } else {
+ symbolic_factor_ = cs_.AnalyzeCholesky(&cs_lhs);
+ }
+ }
+
+ if (symbolic_factor_ == NULL) {
+ *message = "CXSparse Failure : Symbolic factorization failed.";
+ return LINEAR_SOLVER_FATAL_ERROR;
+ }
+ }
+
+ FreeNumericFactorization();
+ numeric_factor_ = cs_.Cholesky(&cs_lhs, symbolic_factor_);
+ if (numeric_factor_ == NULL) {
+ *message = "CXSparse Failure : Numeric factorization failed.";
+ return LINEAR_SOLVER_FAILURE;
+ }
+
+ return LINEAR_SOLVER_SUCCESS;
+}
+
+LinearSolverTerminationType CXSparseCholesky::Solve(const double* rhs,
+ double* solution,
+ std::string* message) {
+ CHECK(numeric_factor_ != NULL)
+ << "Solve called without a call to Factorize first.";
+ const int num_cols = numeric_factor_->L->n;
+ memcpy(solution, rhs, num_cols * sizeof(*solution));
+ cs_.Solve(symbolic_factor_, numeric_factor_, solution);
+ return LINEAR_SOLVER_SUCCESS;
+}
+
+void CXSparseCholesky::FreeSymbolicFactorization() {
+ if (symbolic_factor_ != NULL) {
+ cs_.Free(symbolic_factor_);
+ symbolic_factor_ = NULL;
+ }
+}
+
+void CXSparseCholesky::FreeNumericFactorization() {
+ if (numeric_factor_ != NULL) {
+ cs_.Free(numeric_factor_);
+ numeric_factor_ = NULL;
+ }
+}
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_NO_CXSPARSE
diff --git a/extern/ceres/internal/ceres/cxsparse.h b/extern/ceres/internal/ceres/cxsparse.h
index 26dd1927a78..dc4740ceaee 100644
--- a/extern/ceres/internal/ceres/cxsparse.h
+++ b/extern/ceres/internal/ceres/cxsparse.h
@@ -36,7 +36,12 @@
#ifndef CERES_NO_CXSPARSE
+#include <memory>
+#include <string>
#include <vector>
+
+#include "ceres/linear_solver.h"
+#include "ceres/sparse_cholesky.h"
#include "cs.h"
namespace ceres {
@@ -47,21 +52,27 @@ class TripletSparseMatrix;
// This object provides access to solving linear systems using Cholesky
// factorization with a known symbolic factorization. This features does not
-// explicity exist in CXSparse. The methods in the class are nonstatic because
+// explicitly exist in CXSparse. The methods in the class are nonstatic because
// the class manages internal scratch space.
class CXSparse {
public:
CXSparse();
~CXSparse();
- // Solves a symmetric linear system A * x = b using Cholesky factorization.
- // A - The system matrix.
- // symbolic_factorization - The symbolic factorization of A. This is obtained
- // from AnalyzeCholesky.
- // b - The right hand size of the linear equation. This
- // array will also recieve the solution.
- // Returns false if Cholesky factorization of A fails.
- bool SolveCholesky(cs_di* A, cs_dis* symbolic_factorization, double* b);
+ // Solve the system lhs * solution = rhs in place by using an
+ // approximate minimum degree fill reducing ordering.
+ bool SolveCholesky(cs_di* lhs, double* rhs_and_solution);
+
+ // Solves a linear system given its symbolic and numeric factorization.
+ void Solve(cs_dis* symbolic_factor,
+ csn* numeric_factor,
+ double* rhs_and_solution);
+
+ // Compute the numeric Cholesky factorization of A, given its
+ // symbolic factorization.
+ //
+ // Caller owns the result.
+ csn* Cholesky(cs_di* A, cs_dis* symbolic_factor);
// Creates a sparse matrix from a compressed-column form. No memory is
// allocated or copied; the structure A is filled out with info from the
@@ -117,6 +128,7 @@ class CXSparse {
void Free(cs_di* sparse_matrix);
void Free(cs_dis* symbolic_factorization);
+ void Free(csn* numeric_factorization);
private:
// Cached scratch space
@@ -124,10 +136,37 @@ class CXSparse {
int scratch_size_;
};
+// An implementation of SparseCholesky interface using the CXSparse
+// library.
+class CXSparseCholesky : public SparseCholesky {
+ public:
+ // Factory
+ static std::unique_ptr<SparseCholesky> Create(OrderingType ordering_type);
+
+ // SparseCholesky interface.
+ virtual ~CXSparseCholesky();
+ CompressedRowSparseMatrix::StorageType StorageType() const final;
+ LinearSolverTerminationType Factorize(CompressedRowSparseMatrix* lhs,
+ std::string* message) final;
+ LinearSolverTerminationType Solve(const double* rhs,
+ double* solution,
+ std::string* message) final;
+
+ private:
+ CXSparseCholesky(const OrderingType ordering_type);
+ void FreeSymbolicFactorization();
+ void FreeNumericFactorization();
+
+ const OrderingType ordering_type_;
+ CXSparse cs_;
+ cs_dis* symbolic_factor_;
+ csn* numeric_factor_;
+};
+
} // namespace internal
} // namespace ceres
-#else // CERES_NO_CXSPARSE
+#else // CERES_NO_CXSPARSE
typedef void cs_dis;
diff --git a/extern/ceres/internal/ceres/dense_normal_cholesky_solver.cc b/extern/ceres/internal/ceres/dense_normal_cholesky_solver.cc
index b13cf3fc9f6..fe7d931a3fd 100644
--- a/extern/ceres/internal/ceres/dense_normal_cholesky_solver.cc
+++ b/extern/ceres/internal/ceres/dense_normal_cholesky_solver.cc
@@ -36,7 +36,6 @@
#include "ceres/blas.h"
#include "ceres/dense_sparse_matrix.h"
#include "ceres/internal/eigen.h"
-#include "ceres/internal/scoped_ptr.h"
#include "ceres/lapack.h"
#include "ceres/linear_solver.h"
#include "ceres/types.h"
diff --git a/extern/ceres/internal/ceres/dense_normal_cholesky_solver.h b/extern/ceres/internal/ceres/dense_normal_cholesky_solver.h
index 11287ebf675..976718e8615 100644
--- a/extern/ceres/internal/ceres/dense_normal_cholesky_solver.h
+++ b/extern/ceres/internal/ceres/dense_normal_cholesky_solver.h
@@ -35,7 +35,6 @@
#define CERES_INTERNAL_DENSE_NORMAL_CHOLESKY_SOLVER_H_
#include "ceres/linear_solver.h"
-#include "ceres/internal/macros.h"
namespace ceres {
namespace internal {
@@ -79,11 +78,11 @@ class DenseNormalCholeskySolver: public DenseSparseMatrixSolver {
explicit DenseNormalCholeskySolver(const LinearSolver::Options& options);
private:
- virtual LinearSolver::Summary SolveImpl(
+ LinearSolver::Summary SolveImpl(
DenseSparseMatrix* A,
const double* b,
const LinearSolver::PerSolveOptions& per_solve_options,
- double* x);
+ double* x) final;
LinearSolver::Summary SolveUsingLAPACK(
DenseSparseMatrix* A,
@@ -98,7 +97,6 @@ class DenseNormalCholeskySolver: public DenseSparseMatrixSolver {
double* x);
const LinearSolver::Options options_;
- CERES_DISALLOW_COPY_AND_ASSIGN(DenseNormalCholeskySolver);
};
} // namespace internal
diff --git a/extern/ceres/internal/ceres/dense_qr_solver.cc b/extern/ceres/internal/ceres/dense_qr_solver.cc
index e85fdfc0c68..161e9c67a00 100644
--- a/extern/ceres/internal/ceres/dense_qr_solver.cc
+++ b/extern/ceres/internal/ceres/dense_qr_solver.cc
@@ -30,12 +30,10 @@
#include "ceres/dense_qr_solver.h"
-
#include <cstddef>
#include "Eigen/Dense"
#include "ceres/dense_sparse_matrix.h"
#include "ceres/internal/eigen.h"
-#include "ceres/internal/scoped_ptr.h"
#include "ceres/lapack.h"
#include "ceres/linear_solver.h"
#include "ceres/types.h"
diff --git a/extern/ceres/internal/ceres/dense_qr_solver.h b/extern/ceres/internal/ceres/dense_qr_solver.h
index 1a6e0898c56..9ea959db68d 100644
--- a/extern/ceres/internal/ceres/dense_qr_solver.h
+++ b/extern/ceres/internal/ceres/dense_qr_solver.h
@@ -34,7 +34,6 @@
#include "ceres/linear_solver.h"
#include "ceres/internal/eigen.h"
-#include "ceres/internal/macros.h"
namespace ceres {
namespace internal {
@@ -84,11 +83,11 @@ class DenseQRSolver: public DenseSparseMatrixSolver {
explicit DenseQRSolver(const LinearSolver::Options& options);
private:
- virtual LinearSolver::Summary SolveImpl(
+ LinearSolver::Summary SolveImpl(
DenseSparseMatrix* A,
const double* b,
const LinearSolver::PerSolveOptions& per_solve_options,
- double* x);
+ double* x) final;
LinearSolver::Summary SolveUsingEigen(
DenseSparseMatrix* A,
@@ -106,7 +105,6 @@ class DenseQRSolver: public DenseSparseMatrixSolver {
ColMajorMatrix lhs_;
Vector rhs_;
Vector work_;
- CERES_DISALLOW_COPY_AND_ASSIGN(DenseQRSolver);
};
} // namespace internal
diff --git a/extern/ceres/internal/ceres/dense_sparse_matrix.cc b/extern/ceres/internal/ceres/dense_sparse_matrix.cc
index 19db867d4aa..72e08360dd0 100644
--- a/extern/ceres/internal/ceres/dense_sparse_matrix.cc
+++ b/extern/ceres/internal/ceres/dense_sparse_matrix.cc
@@ -166,7 +166,7 @@ ColMajorMatrixRef DenseSparseMatrix::mutable_matrix() {
void DenseSparseMatrix::ToTextFile(FILE* file) const {
- CHECK_NOTNULL(file);
+ CHECK(file != nullptr);
const int active_rows =
(has_diagonal_reserved_ && !has_diagonal_appended_)
? (m_.rows() - m_.cols())
diff --git a/extern/ceres/internal/ceres/dense_sparse_matrix.h b/extern/ceres/internal/ceres/dense_sparse_matrix.h
index b011bfddee7..6d3d504ea36 100644
--- a/extern/ceres/internal/ceres/dense_sparse_matrix.h
+++ b/extern/ceres/internal/ceres/dense_sparse_matrix.h
@@ -33,10 +33,8 @@
#ifndef CERES_INTERNAL_DENSE_SPARSE_MATRIX_H_
#define CERES_INTERNAL_DENSE_SPARSE_MATRIX_H_
-#include "ceres/sparse_matrix.h"
#include "ceres/internal/eigen.h"
-#include "ceres/internal/macros.h"
-#include "ceres/internal/scoped_ptr.h"
+#include "ceres/sparse_matrix.h"
#include "ceres/types.h"
namespace ceres {
@@ -57,18 +55,18 @@ class DenseSparseMatrix : public SparseMatrix {
virtual ~DenseSparseMatrix() {}
// SparseMatrix interface.
- virtual void SetZero();
- virtual void RightMultiply(const double* x, double* y) const;
- virtual void LeftMultiply(const double* x, double* y) const;
- virtual void SquaredColumnNorm(double* x) const;
- virtual void ScaleColumns(const double* scale);
- virtual void ToDenseMatrix(Matrix* dense_matrix) const;
- virtual void ToTextFile(FILE* file) const;
- virtual int num_rows() const;
- virtual int num_cols() const;
- virtual int num_nonzeros() const;
- virtual const double* values() const { return m_.data(); }
- virtual double* mutable_values() { return m_.data(); }
+ void SetZero() final;
+ void RightMultiply(const double* x, double* y) const final;
+ void LeftMultiply(const double* x, double* y) const final;
+ void SquaredColumnNorm(double* x) const final;
+ void ScaleColumns(const double* scale) final;
+ void ToDenseMatrix(Matrix* dense_matrix) const final;
+ void ToTextFile(FILE* file) const final;
+ int num_rows() const final;
+ int num_cols() const final;
+ int num_nonzeros() const final;
+ const double* values() const final { return m_.data(); }
+ double* mutable_values() final { return m_.data(); }
ConstColMajorMatrixRef matrix() const;
ColMajorMatrixRef mutable_matrix();
diff --git a/extern/ceres/internal/ceres/dogleg_strategy.cc b/extern/ceres/internal/ceres/dogleg_strategy.cc
index 839e1816338..ecc6b882338 100644
--- a/extern/ceres/internal/ceres/dogleg_strategy.cc
+++ b/extern/ceres/internal/ceres/dogleg_strategy.cc
@@ -30,7 +30,9 @@
#include "ceres/dogleg_strategy.h"
+#include <algorithm>
#include <cmath>
+
#include "Eigen/Dense"
#include "ceres/array_utils.h"
#include "ceres/internal/eigen.h"
@@ -64,7 +66,7 @@ DoglegStrategy::DoglegStrategy(const TrustRegionStrategy::Options& options)
dogleg_step_norm_(0.0),
reuse_(false),
dogleg_type_(options.dogleg_type) {
- CHECK_NOTNULL(linear_solver_);
+ CHECK(linear_solver_ != nullptr);
CHECK_GT(min_diagonal_, 0.0);
CHECK_LE(min_diagonal_, max_diagonal_);
CHECK_GT(max_radius_, 0.0);
@@ -79,9 +81,9 @@ TrustRegionStrategy::Summary DoglegStrategy::ComputeStep(
SparseMatrix* jacobian,
const double* residuals,
double* step) {
- CHECK_NOTNULL(jacobian);
- CHECK_NOTNULL(residuals);
- CHECK_NOTNULL(step);
+ CHECK(jacobian != nullptr);
+ CHECK(residuals != nullptr);
+ CHECK(step != nullptr);
const int n = jacobian->num_cols();
if (reuse_) {
@@ -469,7 +471,7 @@ double DoglegStrategy::EvaluateSubspaceModel(const Vector2d& x) const {
// In the failure case, another step should be taken, such as the traditional
// dogleg step.
bool DoglegStrategy::FindMinimumOnTrustRegionBoundary(Vector2d* minimum) const {
- CHECK_NOTNULL(minimum);
+ CHECK(minimum != nullptr);
// Return (0, 0) in all error cases.
minimum->setZero();
diff --git a/extern/ceres/internal/ceres/dogleg_strategy.h b/extern/ceres/internal/ceres/dogleg_strategy.h
index 046b9d824c9..1150940efd3 100644
--- a/extern/ceres/internal/ceres/dogleg_strategy.h
+++ b/extern/ceres/internal/ceres/dogleg_strategy.h
@@ -58,15 +58,14 @@ class DoglegStrategy : public TrustRegionStrategy {
virtual ~DoglegStrategy() {}
// TrustRegionStrategy interface
- virtual Summary ComputeStep(const PerSolveOptions& per_solve_options,
+ Summary ComputeStep(const PerSolveOptions& per_solve_options,
SparseMatrix* jacobian,
const double* residuals,
- double* step);
- virtual void StepAccepted(double step_quality);
- virtual void StepRejected(double step_quality);
- virtual void StepIsInvalid();
-
- virtual double Radius() const;
+ double* step) final;
+ void StepAccepted(double step_quality) final;
+ void StepRejected(double step_quality) final;
+ void StepIsInvalid();
+ double Radius() const final;
// These functions are predominantly for testing.
Vector gradient() const { return gradient_; }
@@ -103,7 +102,7 @@ class DoglegStrategy : public TrustRegionStrategy {
// mu is used to scale the diagonal matrix used to make the
// Gauss-Newton solve full rank. In each solve, the strategy starts
- // out with mu = min_mu, and tries values upto max_mu. If the user
+ // out with mu = min_mu, and tries values up to max_mu. If the user
// reports an invalid step, the value of mu_ is increased so that
// the next solve starts with a stronger regularization.
//
diff --git a/extern/ceres/internal/ceres/dynamic_compressed_row_jacobian_writer.cc b/extern/ceres/internal/ceres/dynamic_compressed_row_jacobian_writer.cc
index fd5d89e350a..1749449043e 100644
--- a/extern/ceres/internal/ceres/dynamic_compressed_row_jacobian_writer.cc
+++ b/extern/ceres/internal/ceres/dynamic_compressed_row_jacobian_writer.cc
@@ -28,9 +28,10 @@
//
// Author: richie.stebbing@gmail.com (Richard Stebbing)
-#include "ceres/compressed_row_jacobian_writer.h"
#include "ceres/dynamic_compressed_row_jacobian_writer.h"
+
#include "ceres/casts.h"
+#include "ceres/compressed_row_jacobian_writer.h"
#include "ceres/dynamic_compressed_row_sparse_matrix.h"
#include "ceres/parameter_block.h"
#include "ceres/program.h"
@@ -48,43 +49,28 @@ DynamicCompressedRowJacobianWriter::CreateEvaluatePreparers(int num_threads) {
}
SparseMatrix* DynamicCompressedRowJacobianWriter::CreateJacobian() const {
- // Initialize `jacobian` with zero number of `max_num_nonzeros`.
- const int num_residuals = program_->NumResiduals();
- const int num_effective_parameters = program_->NumEffectiveParameters();
-
DynamicCompressedRowSparseMatrix* jacobian =
- new DynamicCompressedRowSparseMatrix(num_residuals,
- num_effective_parameters,
- 0);
-
- vector<int>* row_blocks = jacobian->mutable_row_blocks();
- for (int i = 0; i < jacobian->num_rows(); ++i) {
- row_blocks->push_back(1);
- }
-
- vector<int>* col_blocks = jacobian->mutable_col_blocks();
- for (int i = 0; i < jacobian->num_cols(); ++i) {
- col_blocks->push_back(1);
- }
-
+ new DynamicCompressedRowSparseMatrix(program_->NumResiduals(),
+ program_->NumEffectiveParameters(),
+ 0 /* max_num_nonzeros */);
return jacobian;
}
void DynamicCompressedRowJacobianWriter::Write(int residual_id,
int residual_offset,
- double **jacobians,
+ double** jacobians,
SparseMatrix* base_jacobian) {
DynamicCompressedRowSparseMatrix* jacobian =
- down_cast<DynamicCompressedRowSparseMatrix*>(base_jacobian);
+ down_cast<DynamicCompressedRowSparseMatrix*>(base_jacobian);
// Get the `residual_block` of interest.
const ResidualBlock* residual_block =
program_->residual_blocks()[residual_id];
const int num_residuals = residual_block->NumResiduals();
- vector<pair<int, int> > evaluated_jacobian_blocks;
+ vector<pair<int, int>> evaluated_jacobian_blocks;
CompressedRowJacobianWriter::GetOrderedParameterBlocks(
- program_, residual_id, &evaluated_jacobian_blocks);
+ program_, residual_id, &evaluated_jacobian_blocks);
// `residual_offset` is the residual row in the global jacobian.
// Empty the jacobian rows.
@@ -97,16 +83,17 @@ void DynamicCompressedRowJacobianWriter::Write(int residual_id,
const int parameter_block_jacobian_index =
evaluated_jacobian_blocks[i].second;
const int parameter_block_size = parameter_block->LocalSize();
+ const double* parameter_jacobian =
+ jacobians[parameter_block_jacobian_index];
// For each parameter block only insert its non-zero entries.
for (int r = 0; r < num_residuals; ++r) {
- for (int c = 0; c < parameter_block_size; ++c) {
- const double& v = jacobians[parameter_block_jacobian_index][
- r * parameter_block_size + c];
+ for (int c = 0; c < parameter_block_size; ++c, ++parameter_jacobian) {
+ const double v = *parameter_jacobian;
// Only insert non-zero entries.
if (v != 0.0) {
jacobian->InsertEntry(
- residual_offset + r, parameter_block->delta_offset() + c, v);
+ residual_offset + r, parameter_block->delta_offset() + c, v);
}
}
}
diff --git a/extern/ceres/internal/ceres/dynamic_compressed_row_sparse_matrix.h b/extern/ceres/internal/ceres/dynamic_compressed_row_sparse_matrix.h
index cab860bddbd..ad41da7b15a 100644
--- a/extern/ceres/internal/ceres/dynamic_compressed_row_sparse_matrix.h
+++ b/extern/ceres/internal/ceres/dynamic_compressed_row_sparse_matrix.h
@@ -91,8 +91,8 @@ class DynamicCompressedRowSparseMatrix : public CompressedRowSparseMatrix {
void Finalize(int num_additional_elements);
private:
- std::vector<std::vector<int> > dynamic_cols_;
- std::vector<std::vector<double> > dynamic_values_;
+ std::vector<std::vector<int>> dynamic_cols_;
+ std::vector<std::vector<double>> dynamic_values_;
};
} // namespace internal
diff --git a/extern/ceres/internal/ceres/dynamic_sparse_normal_cholesky_solver.cc b/extern/ceres/internal/ceres/dynamic_sparse_normal_cholesky_solver.cc
new file mode 100644
index 00000000000..25d5417bca8
--- /dev/null
+++ b/extern/ceres/internal/ceres/dynamic_sparse_normal_cholesky_solver.cc
@@ -0,0 +1,286 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/dynamic_sparse_normal_cholesky_solver.h"
+
+#include <algorithm>
+#include <cstring>
+#include <ctime>
+#include <memory>
+#include <sstream>
+
+#include "Eigen/SparseCore"
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/cxsparse.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/linear_solver.h"
+#include "ceres/suitesparse.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/types.h"
+#include "ceres/wall_time.h"
+
+#ifdef CERES_USE_EIGEN_SPARSE
+#include "Eigen/SparseCholesky"
+#endif
+
+namespace ceres {
+namespace internal {
+
+DynamicSparseNormalCholeskySolver::DynamicSparseNormalCholeskySolver(
+ const LinearSolver::Options& options)
+ : options_(options) {}
+
+LinearSolver::Summary DynamicSparseNormalCholeskySolver::SolveImpl(
+ CompressedRowSparseMatrix* A,
+ const double* b,
+ const LinearSolver::PerSolveOptions& per_solve_options,
+ double* x) {
+ const int num_cols = A->num_cols();
+ VectorRef(x, num_cols).setZero();
+ A->LeftMultiply(b, x);
+
+ if (per_solve_options.D != nullptr) {
+ // Temporarily append a diagonal block to the A matrix, but undo
+ // it before returning the matrix to the user.
+ std::unique_ptr<CompressedRowSparseMatrix> regularizer;
+ if (!A->col_blocks().empty()) {
+ regularizer.reset(CompressedRowSparseMatrix::CreateBlockDiagonalMatrix(
+ per_solve_options.D, A->col_blocks()));
+ } else {
+ regularizer.reset(
+ new CompressedRowSparseMatrix(per_solve_options.D, num_cols));
+ }
+ A->AppendRows(*regularizer);
+ }
+
+ LinearSolver::Summary summary;
+ switch (options_.sparse_linear_algebra_library_type) {
+ case SUITE_SPARSE:
+ summary = SolveImplUsingSuiteSparse(A, x);
+ break;
+ case CX_SPARSE:
+ summary = SolveImplUsingCXSparse(A, x);
+ break;
+ case EIGEN_SPARSE:
+ summary = SolveImplUsingEigen(A, x);
+ break;
+ default:
+ LOG(FATAL) << "Unsupported sparse linear algebra library for "
+ << "dynamic sparsity: "
+ << SparseLinearAlgebraLibraryTypeToString(
+ options_.sparse_linear_algebra_library_type);
+ }
+
+ if (per_solve_options.D != nullptr) {
+ A->DeleteRows(num_cols);
+ }
+
+ return summary;
+}
+
+LinearSolver::Summary DynamicSparseNormalCholeskySolver::SolveImplUsingEigen(
+ CompressedRowSparseMatrix* A, double* rhs_and_solution) {
+#ifndef CERES_USE_EIGEN_SPARSE
+
+ LinearSolver::Summary summary;
+ summary.num_iterations = 0;
+ summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+ summary.message =
+ "SPARSE_NORMAL_CHOLESKY cannot be used with EIGEN_SPARSE "
+ "because Ceres was not built with support for "
+ "Eigen's SimplicialLDLT decomposition. "
+ "This requires enabling building with -DEIGENSPARSE=ON.";
+ return summary;
+
+#else
+
+ EventLogger event_logger("DynamicSparseNormalCholeskySolver::Eigen::Solve");
+
+ Eigen::MappedSparseMatrix<double, Eigen::RowMajor> a(A->num_rows(),
+ A->num_cols(),
+ A->num_nonzeros(),
+ A->mutable_rows(),
+ A->mutable_cols(),
+ A->mutable_values());
+
+ Eigen::SparseMatrix<double> lhs = a.transpose() * a;
+ Eigen::SimplicialLDLT<Eigen::SparseMatrix<double>> solver;
+
+ LinearSolver::Summary summary;
+ summary.num_iterations = 1;
+ summary.termination_type = LINEAR_SOLVER_SUCCESS;
+ summary.message = "Success.";
+
+ solver.analyzePattern(lhs);
+ if (VLOG_IS_ON(2)) {
+ std::stringstream ss;
+ solver.dumpMemory(ss);
+ VLOG(2) << "Symbolic Analysis\n" << ss.str();
+ }
+
+ event_logger.AddEvent("Analyze");
+ if (solver.info() != Eigen::Success) {
+ summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+ summary.message = "Eigen failure. Unable to find symbolic factorization.";
+ return summary;
+ }
+
+ solver.factorize(lhs);
+ event_logger.AddEvent("Factorize");
+ if (solver.info() != Eigen::Success) {
+ summary.termination_type = LINEAR_SOLVER_FAILURE;
+ summary.message = "Eigen failure. Unable to find numeric factorization.";
+ return summary;
+ }
+
+ const Vector rhs = VectorRef(rhs_and_solution, lhs.cols());
+ VectorRef(rhs_and_solution, lhs.cols()) = solver.solve(rhs);
+ event_logger.AddEvent("Solve");
+ if (solver.info() != Eigen::Success) {
+ summary.termination_type = LINEAR_SOLVER_FAILURE;
+ summary.message = "Eigen failure. Unable to do triangular solve.";
+ return summary;
+ }
+
+ return summary;
+#endif // CERES_USE_EIGEN_SPARSE
+}
+
+LinearSolver::Summary DynamicSparseNormalCholeskySolver::SolveImplUsingCXSparse(
+ CompressedRowSparseMatrix* A, double* rhs_and_solution) {
+#ifdef CERES_NO_CXSPARSE
+
+ LinearSolver::Summary summary;
+ summary.num_iterations = 0;
+ summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+ summary.message =
+ "SPARSE_NORMAL_CHOLESKY cannot be used with CX_SPARSE "
+ "because Ceres was not built with support for CXSparse. "
+ "This requires enabling building with -DCXSPARSE=ON.";
+
+ return summary;
+
+#else
+ EventLogger event_logger(
+ "DynamicSparseNormalCholeskySolver::CXSparse::Solve");
+
+ LinearSolver::Summary summary;
+ summary.num_iterations = 1;
+ summary.termination_type = LINEAR_SOLVER_SUCCESS;
+ summary.message = "Success.";
+
+ CXSparse cxsparse;
+
+ // Wrap the augmented Jacobian in a compressed sparse column matrix.
+ cs_di a_transpose = cxsparse.CreateSparseMatrixTransposeView(A);
+
+ // Compute the normal equations. J'J delta = J'f and solve them
+ // using a sparse Cholesky factorization. Notice that when compared
+ // to SuiteSparse we have to explicitly compute the transpose of Jt,
+ // and then the normal equations before they can be
+ // factorized. CHOLMOD/SuiteSparse on the other hand can just work
+ // off of Jt to compute the Cholesky factorization of the normal
+ // equations.
+ cs_di* a = cxsparse.TransposeMatrix(&a_transpose);
+ cs_di* lhs = cxsparse.MatrixMatrixMultiply(&a_transpose, a);
+ cxsparse.Free(a);
+ event_logger.AddEvent("NormalEquations");
+
+ if (!cxsparse.SolveCholesky(lhs, rhs_and_solution)) {
+ summary.termination_type = LINEAR_SOLVER_FAILURE;
+ summary.message = "CXSparse::SolveCholesky failed";
+ }
+ event_logger.AddEvent("Solve");
+
+ cxsparse.Free(lhs);
+ event_logger.AddEvent("TearDown");
+ return summary;
+#endif
+}
+
+LinearSolver::Summary
+DynamicSparseNormalCholeskySolver::SolveImplUsingSuiteSparse(
+ CompressedRowSparseMatrix* A, double* rhs_and_solution) {
+#ifdef CERES_NO_SUITESPARSE
+
+ LinearSolver::Summary summary;
+ summary.num_iterations = 0;
+ summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+ summary.message =
+ "SPARSE_NORMAL_CHOLESKY cannot be used with SUITE_SPARSE "
+ "because Ceres was not built with support for SuiteSparse. "
+ "This requires enabling building with -DSUITESPARSE=ON.";
+ return summary;
+
+#else
+
+ EventLogger event_logger(
+ "DynamicSparseNormalCholeskySolver::SuiteSparse::Solve");
+ LinearSolver::Summary summary;
+ summary.termination_type = LINEAR_SOLVER_SUCCESS;
+ summary.num_iterations = 1;
+ summary.message = "Success.";
+
+ SuiteSparse ss;
+ const int num_cols = A->num_cols();
+ cholmod_sparse lhs = ss.CreateSparseMatrixTransposeView(A);
+ event_logger.AddEvent("Setup");
+ cholmod_factor* factor = ss.AnalyzeCholesky(&lhs, &summary.message);
+ event_logger.AddEvent("Analysis");
+
+ if (factor == nullptr) {
+ summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
+ return summary;
+ }
+
+ summary.termination_type = ss.Cholesky(&lhs, factor, &summary.message);
+ if (summary.termination_type == LINEAR_SOLVER_SUCCESS) {
+ cholmod_dense cholmod_rhs =
+ ss.CreateDenseVectorView(rhs_and_solution, num_cols);
+ cholmod_dense* solution = ss.Solve(factor, &cholmod_rhs, &summary.message);
+ event_logger.AddEvent("Solve");
+ if (solution != nullptr) {
+ memcpy(
+ rhs_and_solution, solution->x, num_cols * sizeof(*rhs_and_solution));
+ ss.Free(solution);
+ } else {
+ summary.termination_type = LINEAR_SOLVER_FAILURE;
+ }
+ }
+
+ ss.Free(factor);
+ event_logger.AddEvent("Teardown");
+ return summary;
+
+#endif
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/extern/ceres/internal/ceres/dynamic_sparse_normal_cholesky_solver.h b/extern/ceres/internal/ceres/dynamic_sparse_normal_cholesky_solver.h
new file mode 100644
index 00000000000..4e31c7a8492
--- /dev/null
+++ b/extern/ceres/internal/ceres/dynamic_sparse_normal_cholesky_solver.h
@@ -0,0 +1,86 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// A solver for sparse linear least squares problem based on solving
+// the normal equations via a sparse cholesky factorization.
+
+#ifndef CERES_INTERNAL_DYNAMIC_SPARSE_NORMAL_CHOLESKY_SOLVER_H_
+#define CERES_INTERNAL_DYNAMIC_SPARSE_NORMAL_CHOLESKY_SOLVER_H_
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#include "ceres/linear_solver.h"
+
+namespace ceres {
+namespace internal {
+
+class CompressedRowSparseMatrix;
+
+// A variant of SparseNormalCholeskySolver in the case where matrix
+// sparsity is not constant across calls to Solve. This means that
+// there is no benefit to symbolically factorizing the matrix and
+// caching this factorization.
+//
+// TODO(alex): Add support for Accelerate sparse solvers:
+// https://github.com/ceres-solver/ceres-solver/issues/397
+class DynamicSparseNormalCholeskySolver
+ : public CompressedRowSparseMatrixSolver {
+ public:
+ explicit DynamicSparseNormalCholeskySolver(
+ const LinearSolver::Options& options);
+ virtual ~DynamicSparseNormalCholeskySolver() {}
+
+ private:
+ LinearSolver::Summary SolveImpl(
+ CompressedRowSparseMatrix* A,
+ const double* b,
+ const LinearSolver::PerSolveOptions& options,
+ double* x) final;
+
+ LinearSolver::Summary SolveImplUsingSuiteSparse(
+ CompressedRowSparseMatrix* A,
+ double* rhs_and_solution);
+
+ LinearSolver::Summary SolveImplUsingCXSparse(
+ CompressedRowSparseMatrix* A,
+ double* rhs_and_solution);
+
+ LinearSolver::Summary SolveImplUsingEigen(
+ CompressedRowSparseMatrix* A,
+ double* rhs_and_solution);
+
+ const LinearSolver::Options options_;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_DYNAMIC_SPARSE_NORMAL_CHOLESKY_SOLVER_H_
diff --git a/extern/ceres/internal/ceres/eigensparse.cc b/extern/ceres/internal/ceres/eigensparse.cc
new file mode 100644
index 00000000000..22ed2c43b5d
--- /dev/null
+++ b/extern/ceres/internal/ceres/eigensparse.cc
@@ -0,0 +1,190 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/eigensparse.h"
+
+#ifdef CERES_USE_EIGEN_SPARSE
+
+#include <sstream>
+
+#include "Eigen/SparseCholesky"
+#include "Eigen/SparseCore"
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/linear_solver.h"
+
+namespace ceres {
+namespace internal {
+
+// TODO(sameeragarwal): Use enable_if to clean up the implementations
+// for when Scalar == double.
+template <typename Solver>
+class EigenSparseCholeskyTemplate : public SparseCholesky {
+ public:
+ EigenSparseCholeskyTemplate() : analyzed_(false) {}
+ virtual ~EigenSparseCholeskyTemplate() {}
+ CompressedRowSparseMatrix::StorageType StorageType() const final {
+ return CompressedRowSparseMatrix::LOWER_TRIANGULAR;
+ }
+
+ LinearSolverTerminationType Factorize(
+ const Eigen::SparseMatrix<typename Solver::Scalar>& lhs,
+ std::string* message) {
+ if (!analyzed_) {
+ solver_.analyzePattern(lhs);
+
+ if (VLOG_IS_ON(2)) {
+ std::stringstream ss;
+ solver_.dumpMemory(ss);
+ VLOG(2) << "Symbolic Analysis\n" << ss.str();
+ }
+
+ if (solver_.info() != Eigen::Success) {
+ *message = "Eigen failure. Unable to find symbolic factorization.";
+ return LINEAR_SOLVER_FATAL_ERROR;
+ }
+
+ analyzed_ = true;
+ }
+
+ solver_.factorize(lhs);
+ if (solver_.info() != Eigen::Success) {
+ *message = "Eigen failure. Unable to find numeric factorization.";
+ return LINEAR_SOLVER_FAILURE;
+ }
+ return LINEAR_SOLVER_SUCCESS;
+ }
+
+ LinearSolverTerminationType Solve(const double* rhs_ptr,
+ double* solution_ptr,
+ std::string* message) {
+ CHECK(analyzed_) << "Solve called without a call to Factorize first.";
+
+ scalar_rhs_ = ConstVectorRef(rhs_ptr, solver_.cols())
+ .template cast<typename Solver::Scalar>();
+
+ // The two casts are needed if the Scalar in this class is not
+ // double. For code simplicity we are going to assume that Eigen
+ // is smart enough to figure out that casting a double Vector to a
+ // double Vector is a straight copy. If this turns into a
+ // performance bottleneck (unlikely), we can revisit this.
+ scalar_solution_ = solver_.solve(scalar_rhs_);
+ VectorRef(solution_ptr, solver_.cols()) =
+ scalar_solution_.template cast<double>();
+
+ if (solver_.info() != Eigen::Success) {
+ *message = "Eigen failure. Unable to do triangular solve.";
+ return LINEAR_SOLVER_FAILURE;
+ }
+ return LINEAR_SOLVER_SUCCESS;
+ }
+
+ LinearSolverTerminationType Factorize(CompressedRowSparseMatrix* lhs,
+ std::string* message) final {
+ CHECK_EQ(lhs->storage_type(), StorageType());
+
+ typename Solver::Scalar* values_ptr = NULL;
+ if (std::is_same<typename Solver::Scalar, double>::value) {
+ values_ptr =
+ reinterpret_cast<typename Solver::Scalar*>(lhs->mutable_values());
+ } else {
+ // In the case where the scalar used in this class is not
+ // double. In that case, make a copy of the values array in the
+ // CompressedRowSparseMatrix and cast it to Scalar along the way.
+ values_ = ConstVectorRef(lhs->values(), lhs->num_nonzeros())
+ .cast<typename Solver::Scalar>();
+ values_ptr = values_.data();
+ }
+
+ Eigen::MappedSparseMatrix<typename Solver::Scalar, Eigen::ColMajor>
+ eigen_lhs(lhs->num_rows(),
+ lhs->num_rows(),
+ lhs->num_nonzeros(),
+ lhs->mutable_rows(),
+ lhs->mutable_cols(),
+ values_ptr);
+ return Factorize(eigen_lhs, message);
+ }
+
+ private:
+ Eigen::Matrix<typename Solver::Scalar, Eigen::Dynamic, 1> values_,
+ scalar_rhs_, scalar_solution_;
+ bool analyzed_;
+ Solver solver_;
+};
+
+std::unique_ptr<SparseCholesky> EigenSparseCholesky::Create(
+ const OrderingType ordering_type) {
+ std::unique_ptr<SparseCholesky> sparse_cholesky;
+
+ typedef Eigen::SimplicialLDLT<Eigen::SparseMatrix<double>,
+ Eigen::Upper,
+ Eigen::AMDOrdering<int>>
+ WithAMDOrdering;
+ typedef Eigen::SimplicialLDLT<Eigen::SparseMatrix<double>,
+ Eigen::Upper,
+ Eigen::NaturalOrdering<int>>
+ WithNaturalOrdering;
+ if (ordering_type == AMD) {
+ sparse_cholesky.reset(new EigenSparseCholeskyTemplate<WithAMDOrdering>());
+ } else {
+ sparse_cholesky.reset(
+ new EigenSparseCholeskyTemplate<WithNaturalOrdering>());
+ }
+ return sparse_cholesky;
+}
+
+EigenSparseCholesky::~EigenSparseCholesky() {}
+
+std::unique_ptr<SparseCholesky> FloatEigenSparseCholesky::Create(
+ const OrderingType ordering_type) {
+ std::unique_ptr<SparseCholesky> sparse_cholesky;
+ typedef Eigen::SimplicialLDLT<Eigen::SparseMatrix<float>,
+ Eigen::Upper,
+ Eigen::AMDOrdering<int>>
+ WithAMDOrdering;
+ typedef Eigen::SimplicialLDLT<Eigen::SparseMatrix<float>,
+ Eigen::Upper,
+ Eigen::NaturalOrdering<int>>
+ WithNaturalOrdering;
+ if (ordering_type == AMD) {
+ sparse_cholesky.reset(new EigenSparseCholeskyTemplate<WithAMDOrdering>());
+ } else {
+ sparse_cholesky.reset(
+ new EigenSparseCholeskyTemplate<WithNaturalOrdering>());
+ }
+ return sparse_cholesky;
+}
+
+FloatEigenSparseCholesky::~FloatEigenSparseCholesky() {}
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_USE_EIGEN_SPARSE
diff --git a/extern/ceres/internal/ceres/eigensparse.h b/extern/ceres/internal/ceres/eigensparse.h
new file mode 100644
index 00000000000..2e6c6f01abb
--- /dev/null
+++ b/extern/ceres/internal/ceres/eigensparse.h
@@ -0,0 +1,90 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// A simple C++ interface to the Eigen's Sparse Cholesky routines.
+
+#ifndef CERES_INTERNAL_EIGENSPARSE_H_
+#define CERES_INTERNAL_EIGENSPARSE_H_
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifdef CERES_USE_EIGEN_SPARSE
+
+#include <memory>
+#include <string>
+
+#include "Eigen/SparseCore"
+#include "ceres/linear_solver.h"
+#include "ceres/sparse_cholesky.h"
+
+namespace ceres {
+namespace internal {
+
+class EigenSparseCholesky : public SparseCholesky {
+ public:
+ // Factory
+ static std::unique_ptr<SparseCholesky> Create(
+ const OrderingType ordering_type);
+
+ // SparseCholesky interface.
+ virtual ~EigenSparseCholesky();
+ virtual LinearSolverTerminationType Factorize(
+ CompressedRowSparseMatrix* lhs, std::string* message) = 0;
+ virtual CompressedRowSparseMatrix::StorageType StorageType() const = 0;
+ virtual LinearSolverTerminationType Solve(const double* rhs,
+ double* solution,
+ std::string* message) = 0;
+};
+
+// Even though the input is double precision linear system, this class
+// solves it by computing a single precision Cholesky factorization.
+class FloatEigenSparseCholesky : public SparseCholesky {
+ public:
+ // Factory
+ static std::unique_ptr<SparseCholesky> Create(
+ const OrderingType ordering_type);
+
+ // SparseCholesky interface.
+ virtual ~FloatEigenSparseCholesky();
+ virtual LinearSolverTerminationType Factorize(
+ CompressedRowSparseMatrix* lhs, std::string* message) = 0;
+ virtual CompressedRowSparseMatrix::StorageType StorageType() const = 0;
+ virtual LinearSolverTerminationType Solve(const double* rhs,
+ double* solution,
+ std::string* message) = 0;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_USE_EIGEN_SPARSE
+
+#endif // CERES_INTERNAL_EIGENSPARSE_H_
diff --git a/extern/ceres/internal/ceres/evaluator.cc b/extern/ceres/internal/ceres/evaluator.cc
index baba9afa11b..8387983553d 100644
--- a/extern/ceres/internal/ceres/evaluator.cc
+++ b/extern/ceres/internal/ceres/evaluator.cc
@@ -51,6 +51,8 @@ Evaluator::~Evaluator() {}
Evaluator* Evaluator::Create(const Evaluator::Options& options,
Program* program,
std::string* error) {
+ CHECK(options.context != NULL);
+
switch (options.linear_solver_type) {
case DENSE_QR:
case DENSE_NORMAL_CHOLESKY:
@@ -71,9 +73,9 @@ Evaluator* Evaluator::Create(const Evaluator::Options& options,
DynamicCompressedRowJacobianFinalizer>(
options, program);
} else {
- return new ProgramEvaluator<ScratchEvaluatePreparer,
- CompressedRowJacobianWriter>(options,
- program);
+ return new ProgramEvaluator<BlockEvaluatePreparer,
+ BlockJacobianWriter>(options,
+ program);
}
default:
diff --git a/extern/ceres/internal/ceres/evaluator.h b/extern/ceres/internal/ceres/evaluator.h
index fea307919d0..b820958ed77 100644
--- a/extern/ceres/internal/ceres/evaluator.h
+++ b/extern/ceres/internal/ceres/evaluator.h
@@ -36,6 +36,7 @@
#include <string>
#include <vector>
+#include "ceres/context_impl.h"
#include "ceres/execution_summary.h"
#include "ceres/internal/port.h"
#include "ceres/types.h"
@@ -43,6 +44,7 @@
namespace ceres {
struct CRSMatrix;
+class EvaluationCallback;
namespace internal {
@@ -58,47 +60,18 @@ class Evaluator {
virtual ~Evaluator();
struct Options {
- Options()
- : num_threads(1),
- num_eliminate_blocks(-1),
- linear_solver_type(DENSE_QR),
- dynamic_sparsity(false) {}
-
- int num_threads;
- int num_eliminate_blocks;
- LinearSolverType linear_solver_type;
- bool dynamic_sparsity;
+ int num_threads = 1;
+ int num_eliminate_blocks = -1;
+ LinearSolverType linear_solver_type = DENSE_QR;
+ bool dynamic_sparsity = false;
+ ContextImpl* context = nullptr;
+ EvaluationCallback* evaluation_callback = nullptr;
};
static Evaluator* Create(const Options& options,
Program* program,
std::string* error);
- // This is used for computing the cost, residual and Jacobian for
- // returning to the user. For actually solving the optimization
- // problem, the optimization algorithm uses the ProgramEvaluator
- // objects directly.
- //
- // The residual, gradients and jacobian pointers can be NULL, in
- // which case they will not be evaluated. cost cannot be NULL.
- //
- // The parallelism of the evaluator is controlled by num_threads; it
- // should be at least 1.
- //
- // Note: That this function does not take a parameter vector as
- // input. The parameter blocks are evaluated on the values contained
- // in the arrays pointed to by their user_state pointers.
- //
- // Also worth noting is that this function mutates program by
- // calling Program::SetParameterOffsetsAndIndex() on it so that an
- // evaluator object can be constructed.
- static bool Evaluate(Program* program,
- int num_threads,
- double* cost,
- std::vector<double>* residuals,
- std::vector<double>* gradient,
- CRSMatrix* jacobian);
-
// Build and return a sparse matrix for storing and working with the Jacobian
// of the objective function. The jacobian has dimensions
// NumEffectiveParameters() by NumParameters(), and is typically extremely
@@ -117,16 +90,14 @@ class Evaluator {
// Schur complement based methods.
virtual SparseMatrix* CreateJacobian() const = 0;
-
// Options struct to control Evaluator::Evaluate;
struct EvaluateOptions {
- EvaluateOptions()
- : apply_loss_function(true) {
- }
-
// If false, the loss function correction is not applied to the
// residual blocks.
- bool apply_loss_function;
+ bool apply_loss_function = true;
+
+ // If false, this evaluation point is the same as the last one.
+ bool new_evaluation_point = true;
};
// Evaluate the cost function for the given state. Returns the cost,
@@ -190,12 +161,8 @@ class Evaluator {
// that the base class implementation does not have to worry about
// life time issues. Further, these calls are not expected to be
// frequent or performance sensitive.
- virtual std::map<std::string, int> CallStatistics() const {
- return std::map<std::string, int>();
- }
-
- virtual std::map<std::string, double> TimeStatistics() const {
- return std::map<std::string, double>();
+ virtual std::map<std::string, CallStatistics> Statistics() const {
+ return std::map<std::string, CallStatistics>();
}
};
diff --git a/extern/ceres/internal/ceres/execution_summary.h b/extern/ceres/internal/ceres/execution_summary.h
index aa9929d8974..17fd882af03 100644
--- a/extern/ceres/internal/ceres/execution_summary.h
+++ b/extern/ceres/internal/ceres/execution_summary.h
@@ -32,47 +32,45 @@
#define CERES_INTERNAL_EXECUTION_SUMMARY_H_
#include <map>
+#include <mutex>
#include <string>
#include "ceres/internal/port.h"
#include "ceres/wall_time.h"
-#include "ceres/mutex.h"
namespace ceres {
namespace internal {
-// Struct used by various objects to report statistics and other
-// information about their execution. e.g., ExecutionSummary::times
-// can be used for reporting times associated with various activities.
+struct CallStatistics {
+ CallStatistics() : time(0.), calls(0) {}
+ double time;
+ int calls;
+};
+
+// Struct used by various objects to report statistics about their
+// execution.
class ExecutionSummary {
public:
void IncrementTimeBy(const std::string& name, const double value) {
- CeresMutexLock l(&times_mutex_);
- times_[name] += value;
+ std::lock_guard<std::mutex> l(mutex_);
+ CallStatistics& call_stats = statistics_[name];
+ call_stats.time += value;
+ ++call_stats.calls;
}
- void IncrementCall(const std::string& name) {
- CeresMutexLock l(&calls_mutex_);
- calls_[name] += 1;
+ const std::map<std::string, CallStatistics>& statistics() const {
+ return statistics_;
}
- const std::map<std::string, double>& times() const { return times_; }
- const std::map<std::string, int>& calls() const { return calls_; }
-
private:
- Mutex times_mutex_;
- std::map<std::string, double> times_;
-
- Mutex calls_mutex_;
- std::map<std::string, int> calls_;
+ std::mutex mutex_;
+ std::map<std::string, CallStatistics> statistics_;
};
class ScopedExecutionTimer {
public:
ScopedExecutionTimer(const std::string& name, ExecutionSummary* summary)
- : start_time_(WallTimeInSeconds()),
- name_(name),
- summary_(summary) {}
+ : start_time_(WallTimeInSeconds()), name_(name), summary_(summary) {}
~ScopedExecutionTimer() {
summary_->IncrementTimeBy(name_, WallTimeInSeconds() - start_time_);
diff --git a/extern/ceres/internal/ceres/float_cxsparse.cc b/extern/ceres/internal/ceres/float_cxsparse.cc
new file mode 100644
index 00000000000..6c688303444
--- /dev/null
+++ b/extern/ceres/internal/ceres/float_cxsparse.cc
@@ -0,0 +1,47 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/float_cxsparse.h"
+
+#if !defined(CERES_NO_CXSPARSE)
+
+namespace ceres {
+namespace internal {
+
+std::unique_ptr<SparseCholesky> FloatCXSparseCholesky::Create(
+ OrderingType ordering_type) {
+ LOG(FATAL) << "FloatCXSparseCholesky is not available.";
+ return std::unique_ptr<SparseCholesky>();
+}
+
+} // namespace internal
+} // namespace ceres
+
+#endif // !defined(CERES_NO_CXSPARSE)
diff --git a/extern/ceres/internal/ceres/float_cxsparse.h b/extern/ceres/internal/ceres/float_cxsparse.h
new file mode 100644
index 00000000000..57fc5e4c010
--- /dev/null
+++ b/extern/ceres/internal/ceres/float_cxsparse.h
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_FLOAT_CXSPARSE_H_
+#define CERES_INTERNAL_FLOAT_CXSPARSE_H_
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#if !defined(CERES_NO_CXSPARSE)
+
+#include <memory>
+#include "ceres/sparse_cholesky.h"
+
+namespace ceres {
+namespace internal {
+
+// Fake implementation of a single precision Sparse Cholesky using
+// CXSparse.
+class FloatCXSparseCholesky : public SparseCholesky {
+ public:
+ static std::unique_ptr<SparseCholesky> Create(
+ OrderingType ordering_type);
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // !defined(CERES_NO_CXSPARSE)
+
+#endif // CERES_INTERNAL_FLOAT_CXSPARSE_H_
diff --git a/extern/ceres/internal/ceres/float_suitesparse.cc b/extern/ceres/internal/ceres/float_suitesparse.cc
new file mode 100644
index 00000000000..03604572b5c
--- /dev/null
+++ b/extern/ceres/internal/ceres/float_suitesparse.cc
@@ -0,0 +1,47 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/float_suitesparse.h"
+
+#if !defined(CERES_NO_SUITESPARSE)
+
+namespace ceres {
+namespace internal {
+
+std::unique_ptr<SparseCholesky> FloatSuiteSparseCholesky::Create(
+ OrderingType ordering_type) {
+ LOG(FATAL) << "FloatSuiteSparseCholesky is not available.";
+ return std::unique_ptr<SparseCholesky>();
+}
+
+} // namespace internal
+} // namespace ceres
+
+#endif // !defined(CERES_NO_SUITESPARSE)
diff --git a/extern/ceres/internal/ceres/float_suitesparse.h b/extern/ceres/internal/ceres/float_suitesparse.h
new file mode 100644
index 00000000000..ac4d4090922
--- /dev/null
+++ b/extern/ceres/internal/ceres/float_suitesparse.h
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_FLOAT_SUITESPARSE_H_
+#define CERES_INTERNAL_FLOAT_SUITESPARSE_H_
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#include <memory>
+#include "ceres/sparse_cholesky.h"
+
+#if !defined(CERES_NO_SUITESPARSE)
+
+namespace ceres {
+namespace internal {
+
+// Fake implementation of a single precision Sparse Cholesky using
+// SuiteSparse.
+class FloatSuiteSparseCholesky : public SparseCholesky {
+ public:
+ static std::unique_ptr<SparseCholesky> Create(
+ OrderingType ordering_type);
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // !defined(CERES_NO_SUITESPARSE)
+
+#endif // CERES_INTERNAL_FLOAT_SUITESPARSE_H_
diff --git a/extern/ceres/internal/ceres/integral_types.h b/extern/ceres/internal/ceres/function_sample.cc
index 98a746f13ff..2fd3dbdf7c5 100644
--- a/extern/ceres/internal/ceres/integral_types.h
+++ b/extern/ceres/internal/ceres/function_sample.cc
@@ -26,66 +26,48 @@
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
-// Author: keir@google.com (Keir Mierle)
-//
-// Portable typedefs for various fixed-size integers. Uses template
-// metaprogramming instead of fragile compiler defines.
+// Author: sameeragarwal@google.com (Sameer Agarwal)
-#ifndef CERES_INTERNAL_INTEGRAL_TYPES_H_
-#define CERES_INTERNAL_INTEGRAL_TYPES_H_
+#include "ceres/function_sample.h"
+#include "ceres/stringprintf.h"
namespace ceres {
namespace internal {
-// Compile time ternary on types.
-template<bool kCondition, typename kTrueType, typename kFalseType>
-struct Ternary {
- typedef kTrueType type;
-};
-template<typename kTrueType, typename kFalseType>
-struct Ternary<false, kTrueType, kFalseType> {
- typedef kFalseType type;
-};
-
-#define CERES_INTSIZE(TYPE) \
- typename Ternary<sizeof(TYPE) * 8 == kBits, TYPE,
-
-template<int kBits>
-struct Integer {
- typedef
- CERES_INTSIZE(char)
- CERES_INTSIZE(short)
- CERES_INTSIZE(int)
- CERES_INTSIZE(long int)
- CERES_INTSIZE(long long)
- void>::type >::type >::type >::type >::type
- type;
-};
+FunctionSample::FunctionSample()
+ : x(0.0),
+ vector_x_is_valid(false),
+ value(0.0),
+ value_is_valid(false),
+ vector_gradient_is_valid(false),
+ gradient(0.0),
+ gradient_is_valid(false) {}
-template<int kBits>
-struct UnsignedInteger {
- typedef
- CERES_INTSIZE(unsigned char)
- CERES_INTSIZE(unsigned short)
- CERES_INTSIZE(unsigned int)
- CERES_INTSIZE(unsigned long int)
- CERES_INTSIZE(unsigned long long)
- void>::type >::type >::type >::type >::type
- type;
-};
+FunctionSample::FunctionSample(const double x, const double value)
+ : x(x),
+ vector_x_is_valid(false),
+ value(value),
+ value_is_valid(true),
+ vector_gradient_is_valid(false),
+ gradient(0.0),
+ gradient_is_valid(false) {}
-#undef CERES_INTSIZE
+FunctionSample::FunctionSample(const double x,
+ const double value,
+ const double gradient)
+ : x(x),
+ vector_x_is_valid(false),
+ value(value),
+ value_is_valid(true),
+ vector_gradient_is_valid(false),
+ gradient(gradient),
+ gradient_is_valid(true) {}
-typedef Integer< 8>::type int8;
-typedef Integer<32>::type int32;
-typedef Integer<64>::type int64;
-
-typedef UnsignedInteger< 8>::type uint8;
-typedef UnsignedInteger<16>::type uint16;
-typedef UnsignedInteger<32>::type uint32;
-typedef UnsignedInteger<64>::type uint64;
+std::string FunctionSample::ToDebugString() const {
+ return StringPrintf("[x: %.8e, value: %.8e, gradient: %.8e, "
+ "value_is_valid: %d, gradient_is_valid: %d]",
+ x, value, gradient, value_is_valid, gradient_is_valid);
+}
} // namespace internal
} // namespace ceres
-
-#endif // CERES_INTERNAL_INTEGRAL_TYPES_H_
diff --git a/extern/ceres/internal/ceres/function_sample.h b/extern/ceres/internal/ceres/function_sample.h
new file mode 100644
index 00000000000..df79aef944f
--- /dev/null
+++ b/extern/ceres/internal/ceres/function_sample.h
@@ -0,0 +1,94 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_FUNCTION_SAMPLE_H_
+#define CERES_INTERNAL_FUNCTION_SAMPLE_H_
+
+#include <string>
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+// FunctionSample is used by the line search routines to store and
+// communicate the value and (optionally) the gradient of the function
+// being minimized.
+//
+// Since line search as the name implies happens along a certain
+// line/direction. FunctionSample contains the information in two
+// ways. Information in the ambient space and information along the
+// direction of search.
+struct FunctionSample {
+ FunctionSample();
+ FunctionSample(double x, double value);
+ FunctionSample(double x, double value, double gradient);
+
+ std::string ToDebugString() const;
+
+ // x is the location of the sample along the search direction.
+ double x;
+
+ // Let p be a point and d be the search direction then
+ //
+ // vector_x = p + x * d;
+ Vector vector_x;
+ // True if vector_x has been assigned a valid value.
+ bool vector_x_is_valid;
+
+ // value = f(vector_x)
+ double value;
+ // True of the evaluation was successful and value is a finite
+ // number.
+ bool value_is_valid;
+
+ // vector_gradient = Df(vector_position);
+ //
+ // D is the derivative operator.
+ Vector vector_gradient;
+ // True if the vector gradient was evaluated and the evaluation was
+ // successful (the value is a finite number).
+ bool vector_gradient_is_valid;
+
+ // gradient = d.transpose() * vector_gradient
+ //
+ // where d is the search direction.
+ double gradient;
+ // True if the evaluation of the gradient was sucessful and the
+ // value is a finite number.
+ bool gradient_is_valid;
+};
+
+
+
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_FUNCTION_SAMPLE_H_
diff --git a/extern/ceres/internal/ceres/generate_template_specializations.py b/extern/ceres/internal/ceres/generate_template_specializations.py
new file mode 100644
index 00000000000..5e91f8d2b6a
--- /dev/null
+++ b/extern/ceres/internal/ceres/generate_template_specializations.py
@@ -0,0 +1,246 @@
+# Ceres Solver - A fast non-linear least squares minimizer
+# Copyright 2015 Google Inc. All rights reserved.
+# http://ceres-solver.org/
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+# * Neither the name of Google Inc. nor the names of its contributors may be
+# used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+# Author: sameeragarwal@google.com (Sameer Agarwal)
+#
+# Script for explicitly generating template specialization of the
+# SchurEliminator class. It is a rather large class
+# and the number of explicit instantiations is also large. Explicitly
+# generating these instantiations in separate .cc files breaks the
+# compilation into separate compilation unit rather than one large cc
+# file which takes 2+GB of RAM to compile.
+#
+# This script creates three sets of files.
+#
+# 1. schur_eliminator_x_x_x.cc and partitioned_matrix_view_x_x_x.cc
+# where, the x indicates the template parameters and
+#
+# 2. schur_eliminator.cc & partitioned_matrix_view.cc
+#
+# that contains a factory function for instantiating these classes
+# based on runtime parameters.
+#
+# 3. schur_templates.cc
+#
+# that contains a function which can be queried to determine what
+# template specializations are available.
+#
+# The following list of tuples, specializations indicates the set of
+# specializations that is generated.
+SPECIALIZATIONS = [(2, 2, 2),
+ (2, 2, 3),
+ (2, 2, 4),
+ (2, 2, "Eigen::Dynamic"),
+ (2, 3, 3),
+ (2, 3, 4),
+ (2, 3, 6),
+ (2, 3, 9),
+ (2, 3, "Eigen::Dynamic"),
+ (2, 4, 3),
+ (2, 4, 4),
+ (2, 4, 6),
+ (2, 4, 8),
+ (2, 4, 9),
+ (2, 4, "Eigen::Dynamic"),
+ (2, "Eigen::Dynamic", "Eigen::Dynamic"),
+ (3, 3, 3),
+ (4, 4, 2),
+ (4, 4, 3),
+ (4, 4, 4),
+ (4, 4, "Eigen::Dynamic")]
+
+import schur_eliminator_template
+import partitioned_matrix_view_template
+import os
+import glob
+
+def SuffixForSize(size):
+ if size == "Eigen::Dynamic":
+ return "d"
+ return str(size)
+
+def SpecializationFilename(prefix, row_block_size, e_block_size, f_block_size):
+ return "_".join([prefix] + map(SuffixForSize, (row_block_size,
+ e_block_size,
+ f_block_size)))
+
+def GenerateFactoryConditional(row_block_size, e_block_size, f_block_size):
+ conditionals = []
+ if (row_block_size != "Eigen::Dynamic"):
+ conditionals.append("(options.row_block_size == %s)" % row_block_size)
+ if (e_block_size != "Eigen::Dynamic"):
+ conditionals.append("(options.e_block_size == %s)" % e_block_size)
+ if (f_block_size != "Eigen::Dynamic"):
+ conditionals.append("(options.f_block_size == %s)" % f_block_size)
+ if (len(conditionals) == 0):
+ return "%s"
+
+ if (len(conditionals) == 1):
+ return " if " + conditionals[0] + "{\n %s\n }\n"
+
+ return " if (" + " &&\n ".join(conditionals) + ") {\n %s\n }\n"
+
+def Specialize(name, data):
+ """
+ Generate specialization code and the conditionals to instantiate it.
+ """
+
+ # Specialization files
+ for row_block_size, e_block_size, f_block_size in SPECIALIZATIONS:
+ output = SpecializationFilename("generated/" + name,
+ row_block_size,
+ e_block_size,
+ f_block_size) + ".cc"
+
+ with open(output, "w") as f:
+ f.write(data["HEADER"])
+ f.write(data["SPECIALIZATION_FILE"] %
+ (row_block_size, e_block_size, f_block_size))
+
+ # Generate the _d_d_d specialization.
+ output = SpecializationFilename("generated/" + name,
+ "Eigen::Dynamic",
+ "Eigen::Dynamic",
+ "Eigen::Dynamic") + ".cc"
+ with open(output, "w") as f:
+ f.write(data["HEADER"])
+ f.write(data["DYNAMIC_FILE"] %
+ ("Eigen::Dynamic", "Eigen::Dynamic", "Eigen::Dynamic"))
+
+ # Factory
+ with open(name + ".cc", "w") as f:
+ f.write(data["HEADER"])
+ f.write(data["FACTORY_FILE_HEADER"])
+ for row_block_size, e_block_size, f_block_size in SPECIALIZATIONS:
+ factory_conditional = GenerateFactoryConditional(
+ row_block_size, e_block_size, f_block_size)
+ factory = data["FACTORY"] % (row_block_size, e_block_size, f_block_size)
+ f.write(factory_conditional % factory);
+ f.write(data["FACTORY_FOOTER"])
+
+QUERY_HEADER = """// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// What template specializations are available.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+"""
+
+QUERY_FILE_HEADER = """
+#include "ceres/internal/eigen.h"
+#include "ceres/schur_templates.h"
+
+namespace ceres {
+namespace internal {
+
+void GetBestSchurTemplateSpecialization(int* row_block_size,
+ int* e_block_size,
+ int* f_block_size) {
+ LinearSolver::Options options;
+ options.row_block_size = *row_block_size;
+ options.e_block_size = *e_block_size;
+ options.f_block_size = *f_block_size;
+ *row_block_size = Eigen::Dynamic;
+ *e_block_size = Eigen::Dynamic;
+ *f_block_size = Eigen::Dynamic;
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+"""
+
+QUERY_FOOTER = """
+#endif
+ return;
+}
+
+} // namespace internal
+} // namespace ceres
+"""
+
+QUERY_ACTION = """ *row_block_size = %s;
+ *e_block_size = %s;
+ *f_block_size = %s;
+ return;"""
+
+def GenerateQueryFile():
+ """
+ Generate file that allows querying for available template specializations.
+ """
+
+ with open("schur_templates.cc", "w") as f:
+ f.write(QUERY_HEADER)
+ f.write(QUERY_FILE_HEADER)
+ for row_block_size, e_block_size, f_block_size in SPECIALIZATIONS:
+ factory_conditional = GenerateFactoryConditional(
+ row_block_size, e_block_size, f_block_size)
+ action = QUERY_ACTION % (row_block_size, e_block_size, f_block_size)
+ f.write(factory_conditional % action)
+ f.write(QUERY_FOOTER)
+
+
+if __name__ == "__main__":
+ for f in glob.glob("generated/*"):
+ os.remove(f)
+
+ Specialize("schur_eliminator",
+ schur_eliminator_template.__dict__)
+ Specialize("partitioned_matrix_view",
+ partitioned_matrix_view_template.__dict__)
+ GenerateQueryFile()
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_2.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_2.cc
index 500115b9897..86ad17b4f71 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_2.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_2.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_partitioned_matrix_view_specializations.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_3.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_3.cc
index 1384cb619e3..33018d573a4 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_3.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_3.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_partitioned_matrix_view_specializations.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_4.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_4.cc
index 030035ec97b..a429a546e3d 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_4.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_4.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_partitioned_matrix_view_specializations.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_d.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_d.cc
index c9501b50170..f6f03ea6dcc 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_d.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_d.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_partitioned_matrix_view_specializations.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_3.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_3.cc
index c2639bff69e..0b73e1a2aa8 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_3.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_3.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_partitioned_matrix_view_specializations.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_4.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_4.cc
index 693e43959c1..bc4a86194eb 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_4.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_4.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_partitioned_matrix_view_specializations.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_6.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_6.cc
index 7b9368ffefd..fe8f7dd37af 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_6.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_6.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_partitioned_matrix_view_specializations.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_9.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_9.cc
index e72c5f6937a..ac493fcd0c0 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_9.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_9.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_partitioned_matrix_view_specializations.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_d.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_d.cc
index c1f410eb64c..e29efaf4832 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_d.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_d.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_partitioned_matrix_view_specializations.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_3.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_3.cc
index 7292c333d5d..e61e0a31314 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_3.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_3.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_partitioned_matrix_view_specializations.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_4.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_4.cc
index 891d65a8646..2e1170da01f 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_4.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_4.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_partitioned_matrix_view_specializations.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_6.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_6.cc
new file mode 100644
index 00000000000..4a5590d9751
--- /dev/null
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_6.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<2, 4, 6>;
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_8.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_8.cc
index 395f6bd4c13..83015f1ecc5 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_8.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_8.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_partitioned_matrix_view_specializations.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_9.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_9.cc
index 88952b10e34..25671f913dd 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_9.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_9.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_partitioned_matrix_view_specializations.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_d.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_d.cc
index 7733e1993eb..d259802bd5a 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_d.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_d.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_partitioned_matrix_view_specializations.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_d_d.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_d_d.cc
index 117a0cdb8c1..c9567595acd 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_d_d.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_2_d_d.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_partitioned_matrix_view_specializations.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_3_3_3.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_3_3_3.cc
new file mode 100644
index 00000000000..d3b20be70e7
--- /dev/null
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_3_3_3.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of PartitionedMatrixView.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/partitioned_matrix_view_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class PartitionedMatrixView<3, 3, 3>;
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_2.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_2.cc
index a620bb70dba..f08049c9653 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_2.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_2.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_partitioned_matrix_view_specializations.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_3.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_3.cc
index 2978630832c..9342612022f 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_3.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_3.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_partitioned_matrix_view_specializations.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_4.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_4.cc
index bcd03b02e3a..8b273fa0da0 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_4.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_4.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_partitioned_matrix_view_specializations.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_d.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_d.cc
index 6b541ecf0d9..e8b45e49eca 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_d.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_d.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_partitioned_matrix_view_specializations.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
diff --git a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_d_d_d.cc b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_d_d_d.cc
index 85111e722c4..3545b869d5f 100644
--- a/extern/ceres/internal/ceres/generated/partitioned_matrix_view_d_d_d.cc
+++ b/extern/ceres/internal/ceres/generated/partitioned_matrix_view_d_d_d.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_partitioned_matrix_view_specializations.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
#include "ceres/partitioned_matrix_view_impl.h"
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_2_2_2.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_2_2_2.cc
index ac07a3f229e..79fcf437981 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_2_2_2.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_2_2_2.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_eliminator_specialization.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_2_2_3.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_2_2_3.cc
index 0ec09553f9e..edd7fb649b4 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_2_2_3.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_2_2_3.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_eliminator_specialization.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_2_2_4.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_2_2_4.cc
index 74a42cc4a16..692267dba46 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_2_2_4.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_2_2_4.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_eliminator_specialization.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_2_2_d.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_2_2_d.cc
index 5ce757fda5d..33d9c6de270 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_2_2_d.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_2_2_d.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_eliminator_specialization.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_3.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_3.cc
index 2e7ae28b4ea..4a5e2fe30c0 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_3.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_3.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_eliminator_specialization.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_4.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_4.cc
index 443207070cf..7ee63d069aa 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_4.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_4.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_eliminator_specialization.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_6.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_6.cc
index ac2f358b383..108760ef1f8 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_6.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_6.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_eliminator_specialization.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_9.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_9.cc
index 930ab440fa5..4fea2fa4417 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_9.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_9.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_eliminator_specialization.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_d.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_d.cc
index 486c53d36f4..0d13c99e7ca 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_d.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_2_3_d.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_eliminator_specialization.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_3.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_3.cc
index 6f247a7b832..3827c653a63 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_3.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_3.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_eliminator_specialization.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_4.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_4.cc
index c44cd045263..47bdfab1f22 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_4.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_4.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_eliminator_specialization.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_6.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_6.cc
new file mode 100644
index 00000000000..3777be22707
--- /dev/null
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_6.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<2, 4, 6>;
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_8.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_8.cc
index c9a0d5fc729..862c76a2a9c 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_8.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_8.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_eliminator_specialization.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_9.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_9.cc
index b0455b0bca0..5b5b7ccd415 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_9.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_9.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_eliminator_specialization.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_d.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_d.cc
index 3234380f23c..ce2d450b073 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_d.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_2_4_d.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_eliminator_specialization.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_2_d_d.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_2_d_d.cc
index 311f8556932..9b02bd9db5a 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_2_d_d.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_2_d_d.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_eliminator_specialization.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_3_3_3.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_3_3_3.cc
new file mode 100644
index 00000000000..1cbeadf518f
--- /dev/null
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_3_3_3.cc
@@ -0,0 +1,58 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<3, 3, 3>;
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_4_4_2.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_4_4_2.cc
index bc40bd55296..10f709d7577 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_4_4_2.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_4_4_2.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_eliminator_specialization.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_4_4_3.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_4_4_3.cc
index cca88c802b0..bcbcc745519 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_4_4_3.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_4_4_3.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_eliminator_specialization.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_4_4_4.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_4_4_4.cc
index 33c94a907b9..44ecc87deba 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_4_4_4.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_4_4_4.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_eliminator_specialization.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_4_4_d.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_4_4_d.cc
index 1a1866f93a8..69c856304f0 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_4_4_d.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_4_4_d.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_eliminator_specialization.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
diff --git a/extern/ceres/internal/ceres/generated/schur_eliminator_d_d_d.cc b/extern/ceres/internal/ceres/generated/schur_eliminator_d_d_d.cc
index 6b18ef8c863..348708bb335 100644
--- a/extern/ceres/internal/ceres/generated/schur_eliminator_d_d_d.cc
+++ b/extern/ceres/internal/ceres/generated/schur_eliminator_d_d_d.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_eliminator_specialization.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
#include "ceres/schur_eliminator_impl.h"
diff --git a/extern/ceres/internal/ceres/gradient_checker.cc b/extern/ceres/internal/ceres/gradient_checker.cc
index c16c141db09..ef56666970d 100644
--- a/extern/ceres/internal/ceres/gradient_checker.cc
+++ b/extern/ceres/internal/ceres/gradient_checker.cc
@@ -34,6 +34,7 @@
#include <algorithm>
#include <cmath>
+#include <cstdint>
#include <numeric>
#include <string>
#include <vector>
@@ -61,11 +62,11 @@ bool EvaluateCostFunction(
Vector* residuals,
std::vector<Matrix>* jacobians,
std::vector<Matrix>* local_jacobians) {
- CHECK_NOTNULL(residuals);
- CHECK_NOTNULL(jacobians);
- CHECK_NOTNULL(local_jacobians);
+ CHECK(residuals != nullptr);
+ CHECK(jacobians != nullptr);
+ CHECK(local_jacobians != nullptr);
- const vector<int32>& block_sizes = function->parameter_block_sizes();
+ const vector<int32_t>& block_sizes = function->parameter_block_sizes();
const int num_parameter_blocks = block_sizes.size();
// Allocate Jacobian matrices in local space.
@@ -110,7 +111,7 @@ bool EvaluateCostFunction(
Matrix global_J_local(global_size, local_size);
local_parameterizations.at(i)->ComputeJacobian(
parameters[i], global_J_local.data());
- local_jacobians->at(i) = jacobians->at(i) * global_J_local;
+ local_jacobians->at(i).noalias() = jacobians->at(i) * global_J_local;
}
}
return true;
@@ -122,20 +123,20 @@ GradientChecker::GradientChecker(
const vector<const LocalParameterization*>* local_parameterizations,
const NumericDiffOptions& options) :
function_(function) {
- CHECK_NOTNULL(function);
+ CHECK(function != nullptr);
if (local_parameterizations != NULL) {
local_parameterizations_ = *local_parameterizations;
} else {
local_parameterizations_.resize(function->parameter_block_sizes().size(),
NULL);
}
- DynamicNumericDiffCostFunction<CostFunction, CENTRAL>*
+ DynamicNumericDiffCostFunction<CostFunction, RIDDERS>*
finite_diff_cost_function =
- new DynamicNumericDiffCostFunction<CostFunction, CENTRAL>(
+ new DynamicNumericDiffCostFunction<CostFunction, RIDDERS>(
function, DO_NOT_TAKE_OWNERSHIP, options);
finite_diff_cost_function_.reset(finite_diff_cost_function);
- const vector<int32>& parameter_block_sizes =
+ const vector<int32_t>& parameter_block_sizes =
function->parameter_block_sizes();
const int num_parameter_blocks = parameter_block_sizes.size();
for (int i = 0; i < num_parameter_blocks; ++i) {
diff --git a/extern/ceres/internal/ceres/gradient_checking_cost_function.cc b/extern/ceres/internal/ceres/gradient_checking_cost_function.cc
index f2c73367891..13d6c58a6b7 100644
--- a/extern/ceres/internal/ceres/gradient_checking_cost_function.cc
+++ b/extern/ceres/internal/ceres/gradient_checking_cost_function.cc
@@ -33,13 +33,13 @@
#include <algorithm>
#include <cmath>
+#include <cstdint>
#include <numeric>
#include <string>
#include <vector>
#include "ceres/gradient_checker.h"
#include "ceres/internal/eigen.h"
-#include "ceres/internal/scoped_ptr.h"
#include "ceres/parameter_block.h"
#include "ceres/problem.h"
#include "ceres/problem_impl.h"
@@ -74,8 +74,8 @@ class GradientCheckingCostFunction : public CostFunction {
relative_precision_(relative_precision),
extra_info_(extra_info),
callback_(callback) {
- CHECK_NOTNULL(callback_);
- const vector<int32>& parameter_block_sizes =
+ CHECK(callback_ != nullptr);
+ const vector<int32_t>& parameter_block_sizes =
function->parameter_block_sizes();
*mutable_parameter_block_sizes() = parameter_block_sizes;
set_num_residuals(function->num_residuals());
@@ -83,9 +83,9 @@ class GradientCheckingCostFunction : public CostFunction {
virtual ~GradientCheckingCostFunction() { }
- virtual bool Evaluate(double const* const* parameters,
- double* residuals,
- double** jacobians) const {
+ bool Evaluate(double const* const* parameters,
+ double* residuals,
+ double** jacobians) const final {
if (!jacobians) {
// Nothing to check in this case; just forward.
return function_->Evaluate(parameters, residuals, NULL);
@@ -107,7 +107,7 @@ class GradientCheckingCostFunction : public CostFunction {
MatrixRef(residuals, num_residuals, 1) = results.residuals;
// Copy the original jacobian blocks into the jacobians array.
- const vector<int32>& block_sizes = function_->parameter_block_sizes();
+ const vector<int32_t>& block_sizes = function_->parameter_block_sizes();
for (int k = 0; k < block_sizes.size(); k++) {
if (jacobians[k] != NULL) {
MatrixRef(jacobians[k],
@@ -148,10 +148,9 @@ CallbackReturnType GradientCheckingIterationCallback::operator()(
}
void GradientCheckingIterationCallback::SetGradientErrorDetected(
std::string& error_log) {
- mutex_.Lock();
+ std::lock_guard<std::mutex> l(mutex_);
gradient_error_detected_ = true;
error_log_ += "\n" + error_log;
- mutex_.Unlock();
}
CostFunction* CreateGradientCheckingCostFunction(
@@ -176,7 +175,7 @@ ProblemImpl* CreateGradientCheckingProblemImpl(
double relative_step_size,
double relative_precision,
GradientCheckingIterationCallback* callback) {
- CHECK_NOTNULL(callback);
+ CHECK(callback != nullptr);
// We create new CostFunctions by wrapping the original CostFunction
// in a gradient checking CostFunction. So its okay for the
// ProblemImpl to take ownership of it and destroy it. The
@@ -189,6 +188,7 @@ ProblemImpl* CreateGradientCheckingProblemImpl(
DO_NOT_TAKE_OWNERSHIP;
gradient_checking_problem_options.local_parameterization_ownership =
DO_NOT_TAKE_OWNERSHIP;
+ gradient_checking_problem_options.context = problem_impl->context();
NumericDiffOptions numeric_diff_options;
numeric_diff_options.relative_step_size = relative_step_size;
@@ -212,6 +212,17 @@ ProblemImpl* CreateGradientCheckingProblemImpl(
gradient_checking_problem_impl->SetParameterBlockConstant(
parameter_block->mutable_user_state());
}
+
+ for (int i = 0; i < parameter_block->Size(); ++i) {
+ gradient_checking_problem_impl->SetParameterUpperBound(
+ parameter_block->mutable_user_state(),
+ i,
+ parameter_block->UpperBound(i));
+ gradient_checking_problem_impl->SetParameterLowerBound(
+ parameter_block->mutable_user_state(),
+ i,
+ parameter_block->LowerBound(i));
+ }
}
// For every ResidualBlock in problem_impl, create a new
@@ -255,7 +266,8 @@ ProblemImpl* CreateGradientCheckingProblemImpl(
gradient_checking_problem_impl->AddResidualBlock(
gradient_checking_cost_function,
const_cast<LossFunction*>(residual_block->loss_function()),
- parameter_blocks);
+ parameter_blocks.data(),
+ static_cast<int>(parameter_blocks.size()));
}
// Normally, when a problem is given to the solver, we guarantee
diff --git a/extern/ceres/internal/ceres/gradient_checking_cost_function.h b/extern/ceres/internal/ceres/gradient_checking_cost_function.h
index 497f8e2a594..e9a34f7eb9f 100644
--- a/extern/ceres/internal/ceres/gradient_checking_cost_function.h
+++ b/extern/ceres/internal/ceres/gradient_checking_cost_function.h
@@ -32,12 +32,12 @@
#ifndef CERES_INTERNAL_GRADIENT_CHECKING_COST_FUNCTION_H_
#define CERES_INTERNAL_GRADIENT_CHECKING_COST_FUNCTION_H_
+#include <mutex>
#include <string>
#include "ceres/cost_function.h"
#include "ceres/iteration_callback.h"
#include "ceres/local_parameterization.h"
-#include "ceres/mutex.h"
namespace ceres {
namespace internal {
@@ -52,7 +52,7 @@ class GradientCheckingIterationCallback : public IterationCallback {
// Will return SOLVER_CONTINUE until a gradient error has been detected,
// then return SOLVER_ABORT.
- virtual CallbackReturnType operator()(const IterationSummary& summary);
+ CallbackReturnType operator()(const IterationSummary& summary) final;
// Notify this that a gradient error has occurred (thread safe).
void SetGradientErrorDetected(std::string& error_log);
@@ -63,8 +63,7 @@ class GradientCheckingIterationCallback : public IterationCallback {
private:
bool gradient_error_detected_;
std::string error_log_;
- // Mutex protecting member variables.
- ceres::internal::Mutex mutex_;
+ std::mutex mutex_;
};
// Creates a CostFunction that checks the Jacobians that cost_function computes
diff --git a/extern/ceres/internal/ceres/gradient_problem_evaluator.h b/extern/ceres/internal/ceres/gradient_problem_evaluator.h
index 2c562544768..c5ad1d71607 100644
--- a/extern/ceres/internal/ceres/gradient_problem_evaluator.h
+++ b/extern/ceres/internal/ceres/gradient_problem_evaluator.h
@@ -48,43 +48,46 @@ class GradientProblemEvaluator : public Evaluator {
explicit GradientProblemEvaluator(const GradientProblem& problem)
: problem_(problem) {}
virtual ~GradientProblemEvaluator() {}
- virtual SparseMatrix* CreateJacobian() const { return NULL; }
- virtual bool Evaluate(const EvaluateOptions& evaluate_options,
- const double* state,
- double* cost,
- double* residuals,
- double* gradient,
- SparseMatrix* jacobian) {
+ SparseMatrix* CreateJacobian() const final { return nullptr; }
+ bool Evaluate(const EvaluateOptions& evaluate_options,
+ const double* state,
+ double* cost,
+ double* residuals,
+ double* gradient,
+ SparseMatrix* jacobian) final {
CHECK(jacobian == NULL);
ScopedExecutionTimer total_timer("Evaluator::Total", &execution_summary_);
+ // The reason we use Residual and Jacobian here even when we are
+ // only computing the cost and gradient has to do with the fact
+ // that the line search minimizer code is used by both the
+ // GradientProblemSolver and the main CeresSolver coder where the
+ // Evaluator evaluates the Jacobian, and these magic strings need
+ // to be consistent across the code base for the time accounting
+ // to work.
ScopedExecutionTimer call_type_timer(
- gradient == NULL ? "Evaluator::Cost" : "Evaluator::Gradient",
+ gradient == NULL ? "Evaluator::Residual" : "Evaluator::Jacobian",
&execution_summary_);
return problem_.Evaluate(state, cost, gradient);
}
- virtual bool Plus(const double* state,
- const double* delta,
- double* state_plus_delta) const {
+ bool Plus(const double* state,
+ const double* delta,
+ double* state_plus_delta) const final {
return problem_.Plus(state, delta, state_plus_delta);
}
- virtual int NumParameters() const {
+ int NumParameters() const final {
return problem_.NumParameters();
}
- virtual int NumEffectiveParameters() const {
+ int NumEffectiveParameters() const final {
return problem_.NumLocalParameters();
}
- virtual int NumResiduals() const { return 1; }
+ int NumResiduals() const final { return 1; }
- virtual std::map<std::string, int> CallStatistics() const {
- return execution_summary_.calls();
- }
-
- virtual std::map<std::string, double> TimeStatistics() const {
- return execution_summary_.times();
+ std::map<std::string, internal::CallStatistics> Statistics() const final {
+ return execution_summary_.statistics();
}
private:
diff --git a/extern/ceres/internal/ceres/gradient_problem_solver.cc b/extern/ceres/internal/ceres/gradient_problem_solver.cc
index 8709f8f3fbd..1639e30666c 100644
--- a/extern/ceres/internal/ceres/gradient_problem_solver.cc
+++ b/extern/ceres/internal/ceres/gradient_problem_solver.cc
@@ -30,6 +30,7 @@
#include "ceres/gradient_problem_solver.h"
+#include <memory>
#include "ceres/callbacks.h"
#include "ceres/gradient_problem.h"
#include "ceres/gradient_problem_evaluator.h"
@@ -72,6 +73,7 @@ Solver::Options GradientProblemSolverOptionsToSolverOptions(
COPY_OPTION(max_line_search_step_expansion);
COPY_OPTION(max_num_iterations);
COPY_OPTION(max_solver_time_in_seconds);
+ COPY_OPTION(parameter_tolerance);
COPY_OPTION(function_tolerance);
COPY_OPTION(gradient_tolerance);
COPY_OPTION(logging_type);
@@ -97,16 +99,18 @@ void GradientProblemSolver::Solve(const GradientProblemSolver::Options& options,
const GradientProblem& problem,
double* parameters_ptr,
GradientProblemSolver::Summary* summary) {
- using internal::scoped_ptr;
- using internal::WallTimeInSeconds;
- using internal::Minimizer;
+ using internal::CallStatistics;
using internal::GradientProblemEvaluator;
+ using internal::GradientProblemSolverStateUpdatingCallback;
using internal::LoggingCallback;
+ using internal::Minimizer;
using internal::SetSummaryFinalCost;
+ using internal::WallTimeInSeconds;
double start_time = WallTimeInSeconds();
- *CHECK_NOTNULL(summary) = Summary();
+ CHECK(summary != nullptr);
+ *summary = Summary();
summary->num_parameters = problem.NumParameters();
summary->num_local_parameters = problem.NumLocalParameters();
summary->line_search_direction_type = options.line_search_direction_type; // NOLINT
@@ -121,6 +125,10 @@ void GradientProblemSolver::Solve(const GradientProblemSolver::Options& options,
return;
}
+ VectorRef parameters(parameters_ptr, problem.NumParameters());
+ Vector solution(problem.NumParameters());
+ solution = parameters;
+
// TODO(sameeragarwal): This is a bit convoluted, we should be able
// to convert to minimizer options directly, but this will do for
// now.
@@ -128,7 +136,7 @@ void GradientProblemSolver::Solve(const GradientProblemSolver::Options& options,
Minimizer::Options(GradientProblemSolverOptionsToSolverOptions(options));
minimizer_options.evaluator.reset(new GradientProblemEvaluator(problem));
- scoped_ptr<IterationCallback> logging_callback;
+ std::unique_ptr<IterationCallback> logging_callback;
if (options.logging_type != SILENT) {
logging_callback.reset(
new LoggingCallback(LINE_SEARCH, options.minimizer_progress_to_stdout));
@@ -136,10 +144,16 @@ void GradientProblemSolver::Solve(const GradientProblemSolver::Options& options,
logging_callback.get());
}
- scoped_ptr<Minimizer> minimizer(Minimizer::Create(LINE_SEARCH));
- Vector solution(problem.NumParameters());
- VectorRef parameters(parameters_ptr, problem.NumParameters());
- solution = parameters;
+ std::unique_ptr<IterationCallback> state_updating_callback;
+ if (options.update_state_every_iteration) {
+ state_updating_callback.reset(
+ new GradientProblemSolverStateUpdatingCallback(
+ problem.NumParameters(), solution.data(), parameters_ptr));
+ minimizer_options.callbacks.insert(minimizer_options.callbacks.begin(),
+ state_updating_callback.get());
+ }
+
+ std::unique_ptr<Minimizer> minimizer(Minimizer::Create(LINE_SEARCH));
Solver::Summary solver_summary;
solver_summary.fixed_cost = 0.0;
@@ -162,34 +176,23 @@ void GradientProblemSolver::Solve(const GradientProblemSolver::Options& options,
SetSummaryFinalCost(summary);
}
- const std::map<string, double>& evaluator_time_statistics =
- minimizer_options.evaluator->TimeStatistics();
- summary->cost_evaluation_time_in_seconds =
- FindWithDefault(evaluator_time_statistics, "Evaluator::Residual", 0.0);
- summary->gradient_evaluation_time_in_seconds =
- FindWithDefault(evaluator_time_statistics, "Evaluator::Jacobian", 0.0);
+ const std::map<string, CallStatistics>& evaluator_statistics =
+ minimizer_options.evaluator->Statistics();
+ {
+ const CallStatistics& call_stats = FindWithDefault(
+ evaluator_statistics, "Evaluator::Residual", CallStatistics());
+ summary->cost_evaluation_time_in_seconds = call_stats.time;
+ summary->num_cost_evaluations = call_stats.calls;
+ }
- summary->total_time_in_seconds = WallTimeInSeconds() - start_time;
-}
+ {
+ const CallStatistics& call_stats = FindWithDefault(
+ evaluator_statistics, "Evaluator::Jacobian", CallStatistics());
+ summary->gradient_evaluation_time_in_seconds = call_stats.time;
+ summary->num_gradient_evaluations = call_stats.calls;
+ }
-// Invalid values for most fields, to ensure that we are not
-// accidentally reporting default values.
-GradientProblemSolver::Summary::Summary()
- : termination_type(FAILURE),
- message("ceres::GradientProblemSolve was not called."),
- initial_cost(-1.0),
- final_cost(-1.0),
- total_time_in_seconds(-1.0),
- cost_evaluation_time_in_seconds(-1.0),
- gradient_evaluation_time_in_seconds(-1.0),
- line_search_polynomial_minimization_time_in_seconds(-1.0),
- num_parameters(-1),
- num_local_parameters(-1),
- line_search_direction_type(LBFGS),
- line_search_type(ARMIJO),
- line_search_interpolation_type(BISECTION),
- nonlinear_conjugate_gradient_type(FLETCHER_REEVES),
- max_lbfgs_rank(-1) {
+ summary->total_time_in_seconds = WallTimeInSeconds() - start_time;
}
bool GradientProblemSolver::Summary::IsSolutionUsable() const {
@@ -256,15 +259,15 @@ string GradientProblemSolver::Summary::FullReport() const {
static_cast<int>(iterations.size()));
StringAppendF(&report, "\nTime (in seconds):\n");
-
- StringAppendF(&report, "\n Cost evaluation %23.4f\n",
- cost_evaluation_time_in_seconds);
- StringAppendF(&report, " Gradient evaluation %23.4f\n",
- gradient_evaluation_time_in_seconds);
- StringAppendF(&report, " Polynomial minimization %17.4f\n",
+ StringAppendF(&report, "\n Cost evaluation %23.6f (%d)\n",
+ cost_evaluation_time_in_seconds,
+ num_cost_evaluations);
+ StringAppendF(&report, " Gradient & cost evaluation %16.6f (%d)\n",
+ gradient_evaluation_time_in_seconds,
+ num_gradient_evaluations);
+ StringAppendF(&report, " Polynomial minimization %17.6f\n",
line_search_polynomial_minimization_time_in_seconds);
-
- StringAppendF(&report, "Total %25.4f\n\n",
+ StringAppendF(&report, "Total %25.6f\n\n",
total_time_in_seconds);
StringAppendF(&report, "Termination: %25s (%s)\n",
diff --git a/extern/ceres/internal/ceres/graph.h b/extern/ceres/internal/ceres/graph.h
index b96b67265cb..4e1fd81c1ea 100644
--- a/extern/ceres/internal/ceres/graph.h
+++ b/extern/ceres/internal/ceres/graph.h
@@ -32,11 +32,11 @@
#define CERES_INTERNAL_GRAPH_H_
#include <limits>
+#include <unordered_set>
+#include <unordered_map>
#include <utility>
-#include "ceres/integral_types.h"
#include "ceres/map_util.h"
-#include "ceres/collections_port.h"
-#include "ceres/internal/macros.h"
+#include "ceres/pair_hash.h"
#include "ceres/types.h"
#include "glog/logging.h"
@@ -53,7 +53,7 @@ class Graph {
// Add a vertex.
void AddVertex(const Vertex& vertex) {
if (vertices_.insert(vertex).second) {
- edges_[vertex] = HashSet<Vertex>();
+ edges_[vertex] = std::unordered_set<Vertex>();
}
}
@@ -63,10 +63,9 @@ class Graph {
}
vertices_.erase(vertex);
- const HashSet<Vertex>& sinks = edges_[vertex];
- for (typename HashSet<Vertex>::const_iterator it = sinks.begin();
- it != sinks.end(); ++it) {
- edges_[*it].erase(vertex);
+ const std::unordered_set<Vertex>& sinks = edges_[vertex];
+ for (const Vertex& s : sinks) {
+ edges_[s].erase(vertex);
}
edges_.erase(vertex);
@@ -90,19 +89,17 @@ class Graph {
// Calling Neighbors on a vertex not in the graph will result in
// undefined behaviour.
- const HashSet<Vertex>& Neighbors(const Vertex& vertex) const {
+ const std::unordered_set<Vertex>& Neighbors(const Vertex& vertex) const {
return FindOrDie(edges_, vertex);
}
- const HashSet<Vertex>& vertices() const {
+ const std::unordered_set<Vertex>& vertices() const {
return vertices_;
}
private:
- HashSet<Vertex> vertices_;
- HashMap<Vertex, HashSet<Vertex> > edges_;
-
- CERES_DISALLOW_COPY_AND_ASSIGN(Graph);
+ std::unordered_set<Vertex> vertices_;
+ std::unordered_map<Vertex, std::unordered_set<Vertex>> edges_;
};
// A weighted undirected graph templated over the vertex ids. Vertex
@@ -117,7 +114,7 @@ class WeightedGraph {
void AddVertex(const Vertex& vertex, double weight) {
if (vertices_.find(vertex) == vertices_.end()) {
vertices_.insert(vertex);
- edges_[vertex] = HashSet<Vertex>();
+ edges_[vertex] = std::unordered_set<Vertex>();
}
vertex_weights_[vertex] = weight;
}
@@ -135,15 +132,14 @@ class WeightedGraph {
vertices_.erase(vertex);
vertex_weights_.erase(vertex);
- const HashSet<Vertex>& sinks = edges_[vertex];
- for (typename HashSet<Vertex>::const_iterator it = sinks.begin();
- it != sinks.end(); ++it) {
- if (vertex < *it) {
- edge_weights_.erase(std::make_pair(vertex, *it));
+ const std::unordered_set<Vertex>& sinks = edges_[vertex];
+ for (const Vertex& s : sinks) {
+ if (vertex < s) {
+ edge_weights_.erase(std::make_pair(vertex, s));
} else {
- edge_weights_.erase(std::make_pair(*it, vertex));
+ edge_weights_.erase(std::make_pair(s, vertex));
}
- edges_[*it].erase(vertex);
+ edges_[s].erase(vertex);
}
edges_.erase(vertex);
@@ -198,11 +194,11 @@ class WeightedGraph {
// Calling Neighbors on a vertex not in the graph will result in
// undefined behaviour.
- const HashSet<Vertex>& Neighbors(const Vertex& vertex) const {
+ const std::unordered_set<Vertex>& Neighbors(const Vertex& vertex) const {
return FindOrDie(edges_, vertex);
}
- const HashSet<Vertex>& vertices() const {
+ const std::unordered_set<Vertex>& vertices() const {
return vertices_;
}
@@ -211,12 +207,11 @@ class WeightedGraph {
}
private:
- HashSet<Vertex> vertices_;
- HashMap<Vertex, double> vertex_weights_;
- HashMap<Vertex, HashSet<Vertex> > edges_;
- HashMap<std::pair<Vertex, Vertex>, double> edge_weights_;
-
- CERES_DISALLOW_COPY_AND_ASSIGN(WeightedGraph);
+ std::unordered_set<Vertex> vertices_;
+ std::unordered_map<Vertex, double> vertex_weights_;
+ std::unordered_map<Vertex, std::unordered_set<Vertex>> edges_;
+ std::unordered_map<std::pair<Vertex, Vertex>, double, pair_hash>
+ edge_weights_;
};
} // namespace internal
diff --git a/extern/ceres/internal/ceres/graph_algorithms.h b/extern/ceres/internal/ceres/graph_algorithms.h
index d1d3f52cd22..b0629313159 100644
--- a/extern/ceres/internal/ceres/graph_algorithms.h
+++ b/extern/ceres/internal/ceres/graph_algorithms.h
@@ -34,9 +34,10 @@
#define CERES_INTERNAL_GRAPH_ALGORITHMS_H_
#include <algorithm>
+#include <unordered_map>
+#include <unordered_set>
#include <vector>
#include <utility>
-#include "ceres/collections_port.h"
#include "ceres/graph.h"
#include "ceres/wall_time.h"
#include "glog/logging.h"
@@ -96,10 +97,10 @@ class VertexDegreeLessThan {
template <typename Vertex>
int IndependentSetOrdering(const Graph<Vertex>& graph,
std::vector<Vertex>* ordering) {
- const HashSet<Vertex>& vertices = graph.vertices();
+ const std::unordered_set<Vertex>& vertices = graph.vertices();
const int num_vertices = vertices.size();
- CHECK_NOTNULL(ordering);
+ CHECK(ordering != nullptr);
ordering->clear();
ordering->reserve(num_vertices);
@@ -109,34 +110,29 @@ int IndependentSetOrdering(const Graph<Vertex>& graph,
const char kBlack = 2;
// Mark all vertices white.
- HashMap<Vertex, char> vertex_color;
+ std::unordered_map<Vertex, char> vertex_color;
std::vector<Vertex> vertex_queue;
- for (typename HashSet<Vertex>::const_iterator it = vertices.begin();
- it != vertices.end();
- ++it) {
- vertex_color[*it] = kWhite;
- vertex_queue.push_back(*it);
+ for (const Vertex& vertex : vertices) {
+ vertex_color[vertex] = kWhite;
+ vertex_queue.push_back(vertex);
}
-
- std::sort(vertex_queue.begin(), vertex_queue.end(),
+ std::sort(vertex_queue.begin(),
+ vertex_queue.end(),
VertexTotalOrdering<Vertex>(graph));
// Iterate over vertex_queue. Pick the first white vertex, add it
// to the independent set. Mark it black and its neighbors grey.
- for (int i = 0; i < vertex_queue.size(); ++i) {
- const Vertex& vertex = vertex_queue[i];
+ for (const Vertex& vertex : vertex_queue) {
if (vertex_color[vertex] != kWhite) {
continue;
}
ordering->push_back(vertex);
vertex_color[vertex] = kBlack;
- const HashSet<Vertex>& neighbors = graph.Neighbors(vertex);
- for (typename HashSet<Vertex>::const_iterator it = neighbors.begin();
- it != neighbors.end();
- ++it) {
- vertex_color[*it] = kGrey;
+ const std::unordered_set<Vertex>& neighbors = graph.Neighbors(vertex);
+ for (const Vertex& neighbor : neighbors) {
+ vertex_color[neighbor] = kGrey;
}
}
@@ -145,10 +141,7 @@ int IndependentSetOrdering(const Graph<Vertex>& graph,
// Iterate over the vertices and add all the grey vertices to the
// ordering. At this stage there should only be black or grey
// vertices in the graph.
- for (typename std::vector<Vertex>::const_iterator it = vertex_queue.begin();
- it != vertex_queue.end();
- ++it) {
- const Vertex vertex = *it;
+ for (const Vertex& vertex : vertex_queue) {
DCHECK(vertex_color[vertex] != kWhite);
if (vertex_color[vertex] != kBlack) {
ordering->push_back(vertex);
@@ -172,8 +165,8 @@ int IndependentSetOrdering(const Graph<Vertex>& graph,
template <typename Vertex>
int StableIndependentSetOrdering(const Graph<Vertex>& graph,
std::vector<Vertex>* ordering) {
- CHECK_NOTNULL(ordering);
- const HashSet<Vertex>& vertices = graph.vertices();
+ CHECK(ordering != nullptr);
+ const std::unordered_set<Vertex>& vertices = graph.vertices();
const int num_vertices = vertices.size();
CHECK_EQ(vertices.size(), ordering->size());
@@ -188,11 +181,9 @@ int StableIndependentSetOrdering(const Graph<Vertex>& graph,
VertexDegreeLessThan<Vertex>(graph));
// Mark all vertices white.
- HashMap<Vertex, char> vertex_color;
- for (typename HashSet<Vertex>::const_iterator it = vertices.begin();
- it != vertices.end();
- ++it) {
- vertex_color[*it] = kWhite;
+ std::unordered_map<Vertex, char> vertex_color;
+ for (const Vertex& vertex : vertices) {
+ vertex_color[vertex] = kWhite;
}
ordering->clear();
@@ -207,11 +198,9 @@ int StableIndependentSetOrdering(const Graph<Vertex>& graph,
ordering->push_back(vertex);
vertex_color[vertex] = kBlack;
- const HashSet<Vertex>& neighbors = graph.Neighbors(vertex);
- for (typename HashSet<Vertex>::const_iterator it = neighbors.begin();
- it != neighbors.end();
- ++it) {
- vertex_color[*it] = kGrey;
+ const std::unordered_set<Vertex>& neighbors = graph.Neighbors(vertex);
+ for (const Vertex& neighbor : neighbors) {
+ vertex_color[neighbor] = kGrey;
}
}
@@ -220,10 +209,7 @@ int StableIndependentSetOrdering(const Graph<Vertex>& graph,
// Iterate over the vertices and add all the grey vertices to the
// ordering. At this stage there should only be black or grey
// vertices in the graph.
- for (typename std::vector<Vertex>::const_iterator it = vertex_queue.begin();
- it != vertex_queue.end();
- ++it) {
- const Vertex vertex = *it;
+ for (const Vertex& vertex : vertex_queue) {
DCHECK(vertex_color[vertex] != kWhite);
if (vertex_color[vertex] != kBlack) {
ordering->push_back(vertex);
@@ -242,8 +228,8 @@ int StableIndependentSetOrdering(const Graph<Vertex>& graph,
// is what gives this data structure its efficiency.
template <typename Vertex>
Vertex FindConnectedComponent(const Vertex& vertex,
- HashMap<Vertex, Vertex>* union_find) {
- typename HashMap<Vertex, Vertex>::iterator it = union_find->find(vertex);
+ std::unordered_map<Vertex, Vertex>* union_find) {
+ auto it = union_find->find(vertex);
DCHECK(it != union_find->end());
if (it->second != vertex) {
it->second = FindConnectedComponent(it->second, union_find);
@@ -274,30 +260,24 @@ template <typename Vertex>
WeightedGraph<Vertex>*
Degree2MaximumSpanningForest(const WeightedGraph<Vertex>& graph) {
// Array of edges sorted in decreasing order of their weights.
- std::vector<std::pair<double, std::pair<Vertex, Vertex> > > weighted_edges;
+ std::vector<std::pair<double, std::pair<Vertex, Vertex>>> weighted_edges;
WeightedGraph<Vertex>* forest = new WeightedGraph<Vertex>();
// Disjoint-set to keep track of the connected components in the
// maximum spanning tree.
- HashMap<Vertex, Vertex> disjoint_set;
+ std::unordered_map<Vertex, Vertex> disjoint_set;
// Sort of the edges in the graph in decreasing order of their
// weight. Also add the vertices of the graph to the Maximum
// Spanning Tree graph and set each vertex to be its own connected
// component in the disjoint_set structure.
- const HashSet<Vertex>& vertices = graph.vertices();
- for (typename HashSet<Vertex>::const_iterator it = vertices.begin();
- it != vertices.end();
- ++it) {
- const Vertex vertex1 = *it;
+ const std::unordered_set<Vertex>& vertices = graph.vertices();
+ for (const Vertex& vertex1 : vertices) {
forest->AddVertex(vertex1, graph.VertexWeight(vertex1));
disjoint_set[vertex1] = vertex1;
- const HashSet<Vertex>& neighbors = graph.Neighbors(vertex1);
- for (typename HashSet<Vertex>::const_iterator it2 = neighbors.begin();
- it2 != neighbors.end();
- ++it2) {
- const Vertex vertex2 = *it2;
+ const std::unordered_set<Vertex>& neighbors = graph.Neighbors(vertex1);
+ for (const Vertex& vertex2 : neighbors) {
if (vertex1 >= vertex2) {
continue;
}
diff --git a/extern/ceres/internal/ceres/implicit_schur_complement.cc b/extern/ceres/internal/ceres/implicit_schur_complement.cc
index d05f03817b7..bf680d1d952 100644
--- a/extern/ceres/internal/ceres/implicit_schur_complement.cc
+++ b/extern/ceres/internal/ceres/implicit_schur_complement.cc
@@ -34,7 +34,6 @@
#include "ceres/block_sparse_matrix.h"
#include "ceres/block_structure.h"
#include "ceres/internal/eigen.h"
-#include "ceres/internal/scoped_ptr.h"
#include "ceres/linear_solver.h"
#include "ceres/types.h"
#include "glog/logging.h"
diff --git a/extern/ceres/internal/ceres/implicit_schur_complement.h b/extern/ceres/internal/ceres/implicit_schur_complement.h
index 5d822ebaeef..f4ddf724910 100644
--- a/extern/ceres/internal/ceres/implicit_schur_complement.h
+++ b/extern/ceres/internal/ceres/implicit_schur_complement.h
@@ -34,11 +34,11 @@
#ifndef CERES_INTERNAL_IMPLICIT_SCHUR_COMPLEMENT_H_
#define CERES_INTERNAL_IMPLICIT_SCHUR_COMPLEMENT_H_
+#include <memory>
#include "ceres/linear_operator.h"
#include "ceres/linear_solver.h"
#include "ceres/partitioned_matrix_view.h"
#include "ceres/internal/eigen.h"
-#include "ceres/internal/scoped_ptr.h"
#include "ceres/types.h"
namespace ceres {
@@ -113,11 +113,11 @@ class ImplicitSchurComplement : public LinearOperator {
void Init(const BlockSparseMatrix& A, const double* D, const double* b);
// y += Sx, where S is the Schur complement.
- virtual void RightMultiply(const double* x, double* y) const;
+ void RightMultiply(const double* x, double* y) const final;
// The Schur complement is a symmetric positive definite matrix,
// thus the left and right multiply operators are the same.
- virtual void LeftMultiply(const double* x, double* y) const {
+ void LeftMultiply(const double* x, double* y) const final {
RightMultiply(x, y);
}
@@ -127,8 +127,8 @@ class ImplicitSchurComplement : public LinearOperator {
// complement.
void BackSubstitute(const double* x, double* y);
- virtual int num_rows() const { return A_->num_cols_f(); }
- virtual int num_cols() const { return A_->num_cols_f(); }
+ int num_rows() const final { return A_->num_cols_f(); }
+ int num_cols() const final { return A_->num_cols_f(); }
const Vector& rhs() const { return rhs_; }
const BlockSparseMatrix* block_diagonal_EtE_inverse() const {
@@ -145,12 +145,12 @@ class ImplicitSchurComplement : public LinearOperator {
const LinearSolver::Options& options_;
- scoped_ptr<PartitionedMatrixViewBase> A_;
+ std::unique_ptr<PartitionedMatrixViewBase> A_;
const double* D_;
const double* b_;
- scoped_ptr<BlockSparseMatrix> block_diagonal_EtE_inverse_;
- scoped_ptr<BlockSparseMatrix> block_diagonal_FtF_inverse_;
+ std::unique_ptr<BlockSparseMatrix> block_diagonal_EtE_inverse_;
+ std::unique_ptr<BlockSparseMatrix> block_diagonal_FtF_inverse_;
Vector rhs_;
diff --git a/extern/ceres/internal/ceres/inner_product_computer.cc b/extern/ceres/internal/ceres/inner_product_computer.cc
new file mode 100644
index 00000000000..2bf88365d99
--- /dev/null
+++ b/extern/ceres/internal/ceres/inner_product_computer.cc
@@ -0,0 +1,330 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/inner_product_computer.h"
+
+#include <algorithm>
+#include "ceres/small_blas.h"
+
+namespace ceres {
+namespace internal {
+
+
+// Create the CompressedRowSparseMatrix matrix that will contain the
+// inner product.
+//
+// storage_type controls whether the result matrix contains the upper
+// or the lower triangular part of the product.
+//
+// num_nonzeros is the number of non-zeros in the result matrix.
+CompressedRowSparseMatrix* InnerProductComputer::CreateResultMatrix(
+ const CompressedRowSparseMatrix::StorageType storage_type,
+ const int num_nonzeros) {
+ CompressedRowSparseMatrix* matrix =
+ new CompressedRowSparseMatrix(m_.num_cols(), m_.num_cols(), num_nonzeros);
+ matrix->set_storage_type(storage_type);
+
+ const CompressedRowBlockStructure* bs = m_.block_structure();
+ const std::vector<Block>& blocks = bs->cols;
+ matrix->mutable_row_blocks()->resize(blocks.size());
+ matrix->mutable_col_blocks()->resize(blocks.size());
+ for (int i = 0; i < blocks.size(); ++i) {
+ (*(matrix->mutable_row_blocks()))[i] = blocks[i].size;
+ (*(matrix->mutable_col_blocks()))[i] = blocks[i].size;
+ }
+
+ return matrix;
+}
+
+// Given the set of product terms in the inner product, return the
+// total number of non-zeros in the result and for each row block of
+// the result matrix, compute the number of non-zeros in any one row
+// of the row block.
+int InnerProductComputer::ComputeNonzeros(
+ const std::vector<InnerProductComputer::ProductTerm>& product_terms,
+ std::vector<int>* row_nnz) {
+ const CompressedRowBlockStructure* bs = m_.block_structure();
+ const std::vector<Block>& blocks = bs->cols;
+
+ row_nnz->resize(blocks.size());
+ std::fill(row_nnz->begin(), row_nnz->end(), 0);
+
+ // First product term.
+ (*row_nnz)[product_terms[0].row] = blocks[product_terms[0].col].size;
+ int num_nonzeros =
+ blocks[product_terms[0].row].size * blocks[product_terms[0].col].size;
+
+ // Remaining product terms.
+ for (int i = 1; i < product_terms.size(); ++i) {
+ const ProductTerm& previous = product_terms[i - 1];
+ const ProductTerm& current = product_terms[i];
+
+ // Each (row, col) block counts only once.
+ // This check depends on product sorted on (row, col).
+ if (current.row != previous.row || current.col != previous.col) {
+ (*row_nnz)[current.row] += blocks[current.col].size;
+ num_nonzeros += blocks[current.row].size * blocks[current.col].size;
+ }
+ }
+
+ return num_nonzeros;
+}
+
+InnerProductComputer::InnerProductComputer(const BlockSparseMatrix& m,
+ const int start_row_block,
+ const int end_row_block)
+ : m_(m), start_row_block_(start_row_block), end_row_block_(end_row_block) {}
+
+// Compute the sparsity structure of the product m.transpose() * m
+// and create a CompressedRowSparseMatrix corresponding to it.
+//
+// Also compute the "program" vector, which for every term in the
+// block outer product provides the information for the entry in the
+// values array of the result matrix where it should be accumulated.
+//
+// Since the entries of the program are the same for rows with the
+// same sparsity structure, the program only stores the result for one
+// row per row block. The Compute function reuses this information for
+// each row in the row block.
+//
+// product_storage_type controls the form of the output matrix. It
+// can be LOWER_TRIANGULAR or UPPER_TRIANGULAR.
+InnerProductComputer* InnerProductComputer::Create(
+ const BlockSparseMatrix& m,
+ CompressedRowSparseMatrix::StorageType product_storage_type) {
+ return InnerProductComputer::Create(
+ m, 0, m.block_structure()->rows.size(), product_storage_type);
+}
+
+InnerProductComputer* InnerProductComputer::Create(
+ const BlockSparseMatrix& m,
+ const int start_row_block,
+ const int end_row_block,
+ CompressedRowSparseMatrix::StorageType product_storage_type) {
+ CHECK(product_storage_type == CompressedRowSparseMatrix::LOWER_TRIANGULAR ||
+ product_storage_type == CompressedRowSparseMatrix::UPPER_TRIANGULAR);
+ CHECK_GT(m.num_nonzeros(), 0)
+ << "Congratulations, you found a bug in Ceres. Please report it.";
+ InnerProductComputer* inner_product_computer =
+ new InnerProductComputer(m, start_row_block, end_row_block);
+ inner_product_computer->Init(product_storage_type);
+ return inner_product_computer;
+}
+
+void InnerProductComputer::Init(
+ const CompressedRowSparseMatrix::StorageType product_storage_type) {
+ std::vector<InnerProductComputer::ProductTerm> product_terms;
+ const CompressedRowBlockStructure* bs = m_.block_structure();
+
+ // Give input matrix m in Block Sparse format
+ // (row_block, col_block)
+ // represent each block multiplication
+ // (row_block, col_block1)' X (row_block, col_block2)
+ // by its product term:
+ // (col_block1, col_block2, index)
+ for (int row_block = start_row_block_; row_block < end_row_block_;
+ ++row_block) {
+ const CompressedRow& row = bs->rows[row_block];
+ for (int c1 = 0; c1 < row.cells.size(); ++c1) {
+ const Cell& cell1 = row.cells[c1];
+ int c2_begin, c2_end;
+ if (product_storage_type == CompressedRowSparseMatrix::LOWER_TRIANGULAR) {
+ c2_begin = 0;
+ c2_end = c1 + 1;
+ } else {
+ c2_begin = c1;
+ c2_end = row.cells.size();
+ }
+
+ for (int c2 = c2_begin; c2 < c2_end; ++c2) {
+ const Cell& cell2 = row.cells[c2];
+ product_terms.push_back(InnerProductComputer::ProductTerm(
+ cell1.block_id, cell2.block_id, product_terms.size()));
+ }
+ }
+ }
+
+ std::sort(product_terms.begin(), product_terms.end());
+ ComputeOffsetsAndCreateResultMatrix(product_storage_type, product_terms);
+}
+
+void InnerProductComputer::ComputeOffsetsAndCreateResultMatrix(
+ const CompressedRowSparseMatrix::StorageType product_storage_type,
+ const std::vector<InnerProductComputer::ProductTerm>& product_terms) {
+ const std::vector<Block>& col_blocks = m_.block_structure()->cols;
+
+ std::vector<int> row_block_nnz;
+ const int num_nonzeros = ComputeNonzeros(product_terms, &row_block_nnz);
+
+ result_.reset(CreateResultMatrix(product_storage_type, num_nonzeros));
+
+ // Populate the row non-zero counts in the result matrix.
+ int* crsm_rows = result_->mutable_rows();
+ crsm_rows[0] = 0;
+ for (int i = 0; i < col_blocks.size(); ++i) {
+ for (int j = 0; j < col_blocks[i].size; ++j, ++crsm_rows) {
+ *(crsm_rows + 1) = *crsm_rows + row_block_nnz[i];
+ }
+ }
+
+ // The following macro FILL_CRSM_COL_BLOCK is key to understanding
+ // how this class works.
+ //
+ // It does two things.
+ //
+ // Sets the value for the current term in the result_offsets_ array
+ // and populates the cols array of the result matrix.
+ //
+ // row_block and col_block as the names imply, refer to the row and
+ // column blocks of the current term.
+ //
+ // row_nnz is the number of nonzeros in the result_matrix at the
+ // beginning of the first row of row_block.
+ //
+ // col_nnz is the number of nonzeros in the first row of the row
+ // block that occur before the current column block, i.e. this is
+ // sum of the sizes of all the column blocks in this row block that
+ // came before this column block.
+ //
+ // Given these two numbers and the total number of nonzeros in this
+ // row (nnz_in_row), we can now populate the cols array as follows:
+ //
+ // nnz + j * nnz_in_row is the beginning of the j^th row.
+ //
+ // nnz + j * nnz_in_row + col_nnz is the beginning of the column
+ // block in the j^th row.
+ //
+ // nnz + j * nnz_in_row + col_nnz + k is then the j^th row and the
+ // k^th column of the product block, whose value is
+ //
+ // col_blocks[col_block].position + k, which is the column number of
+ // the k^th column of the current column block.
+#define FILL_CRSM_COL_BLOCK \
+ const int row_block = current->row; \
+ const int col_block = current->col; \
+ const int nnz_in_row = row_block_nnz[row_block]; \
+ int* crsm_cols = result_->mutable_cols(); \
+ result_offsets_[current->index] = nnz + col_nnz; \
+ for (int j = 0; j < col_blocks[row_block].size; ++j) { \
+ for (int k = 0; k < col_blocks[col_block].size; ++k) { \
+ crsm_cols[nnz + j * nnz_in_row + col_nnz + k] = \
+ col_blocks[col_block].position + k; \
+ } \
+ }
+
+ result_offsets_.resize(product_terms.size());
+ int col_nnz = 0;
+ int nnz = 0;
+
+ // Process the first term.
+ const InnerProductComputer::ProductTerm* current = &product_terms[0];
+ FILL_CRSM_COL_BLOCK;
+
+ // Process the rest of the terms.
+ for (int i = 1; i < product_terms.size(); ++i) {
+ current = &product_terms[i];
+ const InnerProductComputer::ProductTerm* previous = &product_terms[i - 1];
+
+ // If the current term is the same as the previous term, then it
+ // stores its product at the same location as the previous term.
+ if (previous->row == current->row && previous->col == current->col) {
+ result_offsets_[current->index] = result_offsets_[previous->index];
+ continue;
+ }
+
+ if (previous->row == current->row) {
+ // if the current and previous terms are in the same row block,
+ // then they differ in the column block, in which case advance
+ // col_nnz by the column size of the prevous term.
+ col_nnz += col_blocks[previous->col].size;
+ } else {
+ // If we have moved to a new row-block , then col_nnz is zero,
+ // and nnz is set to the beginning of the row block.
+ col_nnz = 0;
+ nnz += row_block_nnz[previous->row] * col_blocks[previous->row].size;
+ }
+
+ FILL_CRSM_COL_BLOCK;
+ }
+}
+
+// Use the results_offsets_ array to numerically compute the product
+// m' * m and store it in result_.
+//
+// TODO(sameeragarwal): Multithreading support.
+void InnerProductComputer::Compute() {
+ const double* m_values = m_.values();
+ const CompressedRowBlockStructure* bs = m_.block_structure();
+
+ const CompressedRowSparseMatrix::StorageType storage_type =
+ result_->storage_type();
+ result_->SetZero();
+ double* values = result_->mutable_values();
+ const int* rows = result_->rows();
+ int cursor = 0;
+
+ // Iterate row blocks.
+ for (int r = start_row_block_; r < end_row_block_; ++r) {
+ const CompressedRow& m_row = bs->rows[r];
+ for (int c1 = 0; c1 < m_row.cells.size(); ++c1) {
+ const Cell& cell1 = m_row.cells[c1];
+ const int c1_size = bs->cols[cell1.block_id].size;
+ const int row_nnz = rows[bs->cols[cell1.block_id].position + 1] -
+ rows[bs->cols[cell1.block_id].position];
+
+ int c2_begin, c2_end;
+ if (storage_type == CompressedRowSparseMatrix::LOWER_TRIANGULAR) {
+ c2_begin = 0;
+ c2_end = c1 + 1;
+ } else {
+ c2_begin = c1;
+ c2_end = m_row.cells.size();
+ }
+
+ for (int c2 = c2_begin; c2 < c2_end; ++c2, ++cursor) {
+ const Cell& cell2 = m_row.cells[c2];
+ const int c2_size = bs->cols[cell2.block_id].size;
+ MatrixTransposeMatrixMultiply<Eigen::Dynamic, Eigen::Dynamic,
+ Eigen::Dynamic, Eigen::Dynamic, 1>(
+ m_values + cell1.position,
+ m_row.block.size, c1_size,
+ m_values + cell2.position,
+ m_row.block.size, c2_size,
+ values + result_offsets_[cursor],
+ 0, 0, c1_size, row_nnz);
+ }
+ }
+ }
+
+ CHECK_EQ(cursor, result_offsets_.size());
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/extern/ceres/internal/ceres/inner_product_computer.h b/extern/ceres/internal/ceres/inner_product_computer.h
new file mode 100644
index 00000000000..73073f8ad06
--- /dev/null
+++ b/extern/ceres/internal/ceres/inner_product_computer.h
@@ -0,0 +1,157 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_INNER_PRODUCT_COMPUTER_H_
+#define CERES_INTERNAL_INNER_PRODUCT_COMPUTER_H_
+
+#include <memory>
+#include <vector>
+
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/compressed_row_sparse_matrix.h"
+
+namespace ceres {
+namespace internal {
+
+// This class is used to repeatedly compute the inner product
+//
+// result = m' * m
+//
+// where the sparsity structure of m remains constant across calls.
+//
+// Upon creation, the class computes and caches information needed to
+// compute v, and then uses it to efficiently compute the product
+// every time InnerProductComputer::Compute is called.
+//
+// See sparse_normal_cholesky_solver.cc for example usage.
+//
+// Note that the result matrix is a block upper or lower-triangular
+// matrix, i.e., it will contain entries in the upper or lower
+// triangular part of the matrix corresponding to the block that occur
+// along its diagonal.
+//
+// This is not a problem as sparse linear algebra libraries can ignore
+// these entries with ease and the space used is minimal/linear in the
+// size of the matrices.
+class InnerProductComputer {
+ public:
+ // Factory
+ //
+ // m is the input matrix
+ //
+ // Since m' * m is a symmetric matrix, we only compute half of the
+ // matrix and the value of storage_type which must be
+ // UPPER_TRIANGULAR or LOWER_TRIANGULAR determines which half is
+ // computed.
+ //
+ // The user must ensure that the matrix m is valid for the life time
+ // of this object.
+ static InnerProductComputer* Create(
+ const BlockSparseMatrix& m,
+ CompressedRowSparseMatrix::StorageType storage_type);
+
+ // This factory method allows the user control over range of row
+ // blocks of m that should be used to compute the inner product.
+ //
+ // a = m(start_row_block : end_row_block, :);
+ // result = a' * a;
+ static InnerProductComputer* Create(
+ const BlockSparseMatrix& m,
+ int start_row_block,
+ int end_row_block,
+ CompressedRowSparseMatrix::StorageType storage_type);
+
+ // Update result_ to be numerically equal to m' * m.
+ void Compute();
+
+ // Accessors for the result containing the inner product.
+ //
+ // Compute must be called before accessing this result for
+ // the first time.
+ const CompressedRowSparseMatrix& result() const { return *result_; }
+ CompressedRowSparseMatrix* mutable_result() const { return result_.get(); }
+
+ private:
+ // A ProductTerm is a term in the block inner product of a matrix
+ // with itself.
+ struct ProductTerm {
+ ProductTerm(const int row, const int col, const int index)
+ : row(row), col(col), index(index) {}
+
+ bool operator<(const ProductTerm& right) const {
+ if (row == right.row) {
+ if (col == right.col) {
+ return index < right.index;
+ }
+ return col < right.col;
+ }
+ return row < right.row;
+ }
+
+ int row;
+ int col;
+ int index;
+ };
+
+ InnerProductComputer(const BlockSparseMatrix& m,
+ int start_row_block,
+ int end_row_block);
+
+ void Init(CompressedRowSparseMatrix::StorageType storage_type);
+
+ CompressedRowSparseMatrix* CreateResultMatrix(
+ const CompressedRowSparseMatrix::StorageType storage_type,
+ int num_nonzeros);
+
+ int ComputeNonzeros(const std::vector<ProductTerm>& product_terms,
+ std::vector<int>* row_block_nnz);
+
+ void ComputeOffsetsAndCreateResultMatrix(
+ const CompressedRowSparseMatrix::StorageType storage_type,
+ const std::vector<ProductTerm>& product_terms);
+
+ const BlockSparseMatrix& m_;
+ const int start_row_block_;
+ const int end_row_block_;
+ std::unique_ptr<CompressedRowSparseMatrix> result_;
+
+ // For each term in the inner product, result_offsets_ contains the
+ // location in the values array of the result_ matrix where it
+ // should be stored.
+ //
+ // This is the principal look up table that allows this class to
+ // compute the inner product fast.
+ std::vector<int> result_offsets_;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_INNER_PRODUCT_COMPUTER_H_
diff --git a/extern/ceres/internal/ceres/invert_psd_matrix.h b/extern/ceres/internal/ceres/invert_psd_matrix.h
new file mode 100644
index 00000000000..21d301a77ee
--- /dev/null
+++ b/extern/ceres/internal/ceres/invert_psd_matrix.h
@@ -0,0 +1,79 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_INVERT_PSD_MATRIX_H_
+#define CERES_INTERNAL_INVERT_PSD_MATRIX_H_
+
+#include "ceres/internal/eigen.h"
+#include "glog/logging.h"
+#include "Eigen/Dense"
+
+namespace ceres {
+namespace internal {
+
+// Helper routine to compute the inverse or pseudo-inverse of a
+// symmetric positive semi-definite matrix.
+//
+// assume_full_rank controls whether a Cholesky factorization or an
+// Singular Value Decomposition is used to compute the inverse and the
+// pseudo-inverse respectively.
+//
+// The template parameter kSize can either be Eigen::Dynamic or a
+// positive integer equal to the number of rows of m.
+template <int kSize>
+typename EigenTypes<kSize, kSize>::Matrix InvertPSDMatrix(
+ const bool assume_full_rank,
+ const typename EigenTypes<kSize, kSize>::Matrix& m) {
+ using MType = typename EigenTypes<kSize, kSize>::Matrix;
+ const int size = m.rows();
+
+ // If the matrix can be assumed to be full rank, then if it is small
+ // (< 5) and fixed size, use Eigen's optimized inverse()
+ // implementation.
+ //
+ // https://eigen.tuxfamily.org/dox/group__TutorialLinearAlgebra.html#title3
+ if (assume_full_rank) {
+ if (kSize > 0 && kSize < 5) {
+ return m.inverse();
+ }
+ return m.template selfadjointView<Eigen::Upper>().llt().solve(
+ MType::Identity(size, size));
+ }
+
+ // For a thin SVD the number of columns of the matrix need to be dynamic.
+ using SVDMType = typename EigenTypes<kSize, Eigen::Dynamic>::Matrix;
+ Eigen::JacobiSVD<SVDMType> svd(m, Eigen::ComputeThinU | Eigen::ComputeThinV);
+ return svd.solve(MType::Identity(size, size));
+}
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_INVERT_PSD_MATRIX_H_
diff --git a/extern/ceres/internal/ceres/iterative_refiner.cc b/extern/ceres/internal/ceres/iterative_refiner.cc
new file mode 100644
index 00000000000..fb0e45bdcdd
--- /dev/null
+++ b/extern/ceres/internal/ceres/iterative_refiner.cc
@@ -0,0 +1,74 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include <string>
+#include "ceres/iterative_refiner.h"
+
+#include "Eigen/Core"
+#include "ceres/sparse_cholesky.h"
+#include "ceres/sparse_matrix.h"
+
+namespace ceres {
+namespace internal {
+
+IterativeRefiner::IterativeRefiner(const int max_num_iterations)
+ : max_num_iterations_(max_num_iterations) {}
+
+IterativeRefiner::~IterativeRefiner() {}
+
+void IterativeRefiner::Allocate(int num_cols) {
+ residual_.resize(num_cols);
+ correction_.resize(num_cols);
+ lhs_x_solution_.resize(num_cols);
+}
+
+void IterativeRefiner::Refine(const SparseMatrix& lhs,
+ const double* rhs_ptr,
+ SparseCholesky* sparse_cholesky,
+ double* solution_ptr) {
+ const int num_cols = lhs.num_cols();
+ Allocate(num_cols);
+ ConstVectorRef rhs(rhs_ptr, num_cols);
+ VectorRef solution(solution_ptr, num_cols);
+ for (int i = 0; i < max_num_iterations_; ++i) {
+ // residual = rhs - lhs * solution
+ lhs_x_solution_.setZero();
+ lhs.RightMultiply(solution_ptr, lhs_x_solution_.data());
+ residual_ = rhs - lhs_x_solution_;
+ // solution += lhs^-1 residual
+ std::string ignored_message;
+ sparse_cholesky->Solve(
+ residual_.data(), correction_.data(), &ignored_message);
+ solution += correction_;
+ }
+};
+
+} // namespace internal
+} // namespace ceres
diff --git a/extern/ceres/internal/ceres/iterative_refiner.h b/extern/ceres/internal/ceres/iterative_refiner.h
new file mode 100644
index 00000000000..f969935ae46
--- /dev/null
+++ b/extern/ceres/internal/ceres/iterative_refiner.h
@@ -0,0 +1,93 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_ITERATIVE_REFINER_H_
+#define CERES_INTERNAL_ITERATIVE_REFINER_H_
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+class SparseCholesky;
+class SparseMatrix;
+
+// Iterative refinement
+// (https://en.wikipedia.org/wiki/Iterative_refinement) is the process
+// of improving the solution to a linear system, by using the
+// following iteration.
+//
+// r_i = b - Ax_i
+// Ad_i = r_i
+// x_{i+1} = x_i + d_i
+//
+// IterativeRefiner implements this process for Symmetric Positive
+// Definite linear systems.
+//
+// The above iterative loop is run until max_num_iterations is reached.
+class IterativeRefiner {
+ public:
+ // max_num_iterations is the number of refinement iterations to
+ // perform.
+ IterativeRefiner(int max_num_iterations);
+
+ // Needed for mocking.
+ virtual ~IterativeRefiner();
+
+ // Given an initial estimate of the solution of lhs * x = rhs, use
+ // max_num_iterations rounds of iterative refinement to improve it.
+ //
+ // sparse_cholesky is assumed to contain an already computed
+ // factorization (or approximation thereof) of lhs.
+ //
+ // solution is expected to contain a approximation to the solution
+ // to lhs * x = rhs. It can be zero.
+ //
+ // This method is virtual to facilitate mocking.
+ virtual void Refine(const SparseMatrix& lhs,
+ const double* rhs,
+ SparseCholesky* sparse_cholesky,
+ double* solution);
+
+ private:
+ void Allocate(int num_cols);
+
+ int max_num_iterations_;
+ Vector residual_;
+ Vector correction_;
+ Vector lhs_x_solution_;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_ITERATIVE_REFINER_H_
diff --git a/extern/ceres/internal/ceres/iterative_schur_complement_solver.cc b/extern/ceres/internal/ceres/iterative_schur_complement_solver.cc
index 9d4e30d69d2..6076c38c71d 100644
--- a/extern/ceres/internal/ceres/iterative_schur_complement_solver.cc
+++ b/extern/ceres/internal/ceres/iterative_schur_complement_solver.cc
@@ -41,7 +41,6 @@
#include "ceres/detect_structure.h"
#include "ceres/implicit_schur_complement.h"
#include "ceres/internal/eigen.h"
-#include "ceres/internal/scoped_ptr.h"
#include "ceres/linear_solver.h"
#include "ceres/preconditioner.h"
#include "ceres/schur_jacobi_preconditioner.h"
@@ -59,8 +58,7 @@ IterativeSchurComplementSolver::IterativeSchurComplementSolver(
: options_(options) {
}
-IterativeSchurComplementSolver::~IterativeSchurComplementSolver() {
-}
+IterativeSchurComplementSolver::~IterativeSchurComplementSolver() {}
LinearSolver::Summary IterativeSchurComplementSolver::SolveImpl(
BlockSparseMatrix* A,
@@ -69,7 +67,7 @@ LinearSolver::Summary IterativeSchurComplementSolver::SolveImpl(
double* x) {
EventLogger event_logger("IterativeSchurComplementSolver::Solve");
- CHECK_NOTNULL(A->block_structure());
+ CHECK(A->block_structure() != nullptr);
const int num_eliminate_blocks = options_.elimination_groups[0];
// Initialize a ImplicitSchurComplement object.
if (schur_complement_ == NULL) {
@@ -86,29 +84,61 @@ LinearSolver::Summary IterativeSchurComplementSolver::SolveImpl(
A->block_structure()->cols.size() - num_eliminate_blocks;
if (num_schur_complement_blocks == 0) {
VLOG(2) << "No parameter blocks left in the schur complement.";
- LinearSolver::Summary cg_summary;
- cg_summary.num_iterations = 0;
- cg_summary.termination_type = LINEAR_SOLVER_SUCCESS;
+ LinearSolver::Summary summary;
+ summary.num_iterations = 0;
+ summary.termination_type = LINEAR_SOLVER_SUCCESS;
schur_complement_->BackSubstitute(NULL, x);
- return cg_summary;
+ return summary;
}
// Initialize the solution to the Schur complement system to zero.
reduced_linear_system_solution_.resize(schur_complement_->num_rows());
reduced_linear_system_solution_.setZero();
- // Instantiate a conjugate gradient solver that runs on the Schur
- // complement matrix with the block diagonal of the matrix F'F as
- // the preconditioner.
LinearSolver::Options cg_options;
cg_options.min_num_iterations = options_.min_num_iterations;
cg_options.max_num_iterations = options_.max_num_iterations;
ConjugateGradientsSolver cg_solver(cg_options);
- LinearSolver::PerSolveOptions cg_per_solve_options;
+ LinearSolver::PerSolveOptions cg_per_solve_options;
cg_per_solve_options.r_tolerance = per_solve_options.r_tolerance;
cg_per_solve_options.q_tolerance = per_solve_options.q_tolerance;
+ CreatePreconditioner(A);
+ if (preconditioner_.get() != NULL) {
+ if (!preconditioner_->Update(*A, per_solve_options.D)) {
+ LinearSolver::Summary summary;
+ summary.num_iterations = 0;
+ summary.termination_type = LINEAR_SOLVER_FAILURE;
+ summary.message = "Preconditioner update failed.";
+ return summary;
+ }
+
+ cg_per_solve_options.preconditioner = preconditioner_.get();
+ }
+
+ event_logger.AddEvent("Setup");
+ LinearSolver::Summary summary =
+ cg_solver.Solve(schur_complement_.get(),
+ schur_complement_->rhs().data(),
+ cg_per_solve_options,
+ reduced_linear_system_solution_.data());
+ if (summary.termination_type != LINEAR_SOLVER_FAILURE &&
+ summary.termination_type != LINEAR_SOLVER_FATAL_ERROR) {
+ schur_complement_->BackSubstitute(reduced_linear_system_solution_.data(),
+ x);
+ }
+ event_logger.AddEvent("Solve");
+ return summary;
+}
+
+void IterativeSchurComplementSolver::CreatePreconditioner(
+ BlockSparseMatrix* A) {
+ if (options_.preconditioner_type == IDENTITY ||
+ preconditioner_.get() != NULL) {
+ return;
+ }
+
Preconditioner::Options preconditioner_options;
preconditioner_options.type = options_.preconditioner_type;
preconditioner_options.visibility_clustering_type =
@@ -120,63 +150,27 @@ LinearSolver::Summary IterativeSchurComplementSolver::SolveImpl(
preconditioner_options.e_block_size = options_.e_block_size;
preconditioner_options.f_block_size = options_.f_block_size;
preconditioner_options.elimination_groups = options_.elimination_groups;
+ CHECK(options_.context != NULL);
+ preconditioner_options.context = options_.context;
switch (options_.preconditioner_type) {
- case IDENTITY:
- break;
case JACOBI:
- preconditioner_.reset(
- new SparseMatrixPreconditionerWrapper(
- schur_complement_->block_diagonal_FtF_inverse()));
+ preconditioner_.reset(new SparseMatrixPreconditionerWrapper(
+ schur_complement_->block_diagonal_FtF_inverse()));
break;
case SCHUR_JACOBI:
- if (preconditioner_.get() == NULL) {
- preconditioner_.reset(
- new SchurJacobiPreconditioner(*A->block_structure(),
- preconditioner_options));
- }
+ preconditioner_.reset(new SchurJacobiPreconditioner(
+ *A->block_structure(), preconditioner_options));
break;
case CLUSTER_JACOBI:
case CLUSTER_TRIDIAGONAL:
- if (preconditioner_.get() == NULL) {
- preconditioner_.reset(
- new VisibilityBasedPreconditioner(*A->block_structure(),
- preconditioner_options));
- }
+ preconditioner_.reset(new VisibilityBasedPreconditioner(
+ *A->block_structure(), preconditioner_options));
break;
default:
LOG(FATAL) << "Unknown Preconditioner Type";
}
-
- bool preconditioner_update_was_successful = true;
- if (preconditioner_.get() != NULL) {
- preconditioner_update_was_successful =
- preconditioner_->Update(*A, per_solve_options.D);
- cg_per_solve_options.preconditioner = preconditioner_.get();
- }
- event_logger.AddEvent("Setup");
-
- LinearSolver::Summary cg_summary;
- cg_summary.num_iterations = 0;
- cg_summary.termination_type = LINEAR_SOLVER_FAILURE;
-
- // TODO(sameeragarwal): Refactor preconditioners to return a more
- // sane message.
- cg_summary.message = "Preconditioner update failed.";
- if (preconditioner_update_was_successful) {
- cg_summary = cg_solver.Solve(schur_complement_.get(),
- schur_complement_->rhs().data(),
- cg_per_solve_options,
- reduced_linear_system_solution_.data());
- if (cg_summary.termination_type != LINEAR_SOLVER_FAILURE &&
- cg_summary.termination_type != LINEAR_SOLVER_FATAL_ERROR) {
- schur_complement_->BackSubstitute(
- reduced_linear_system_solution_.data(), x);
- }
- }
- event_logger.AddEvent("Solve");
- return cg_summary;
-}
+};
} // namespace internal
} // namespace ceres
diff --git a/extern/ceres/internal/ceres/iterative_schur_complement_solver.h b/extern/ceres/internal/ceres/iterative_schur_complement_solver.h
index e90d310de07..9aed94fcb1a 100644
--- a/extern/ceres/internal/ceres/iterative_schur_complement_solver.h
+++ b/extern/ceres/internal/ceres/iterative_schur_complement_solver.h
@@ -31,9 +31,9 @@
#ifndef CERES_INTERNAL_ITERATIVE_SCHUR_COMPLEMENT_SOLVER_H_
#define CERES_INTERNAL_ITERATIVE_SCHUR_COMPLEMENT_SOLVER_H_
+#include <memory>
#include "ceres/linear_solver.h"
#include "ceres/internal/eigen.h"
-#include "ceres/internal/scoped_ptr.h"
#include "ceres/types.h"
namespace ceres {
@@ -70,20 +70,24 @@ class Preconditioner;
class IterativeSchurComplementSolver : public BlockSparseMatrixSolver {
public:
explicit IterativeSchurComplementSolver(const LinearSolver::Options& options);
+ IterativeSchurComplementSolver(const IterativeSchurComplementSolver&) = delete;
+ void operator=(const IterativeSchurComplementSolver&) = delete;
+
virtual ~IterativeSchurComplementSolver();
private:
- virtual LinearSolver::Summary SolveImpl(
+ LinearSolver::Summary SolveImpl(
BlockSparseMatrix* A,
const double* b,
const LinearSolver::PerSolveOptions& options,
- double* x);
+ double* x) final;
+
+ void CreatePreconditioner(BlockSparseMatrix* A);
LinearSolver::Options options_;
- scoped_ptr<internal::ImplicitSchurComplement> schur_complement_;
- scoped_ptr<Preconditioner> preconditioner_;
+ std::unique_ptr<internal::ImplicitSchurComplement> schur_complement_;
+ std::unique_ptr<Preconditioner> preconditioner_;
Vector reduced_linear_system_solution_;
- CERES_DISALLOW_COPY_AND_ASSIGN(IterativeSchurComplementSolver);
};
} // namespace internal
diff --git a/extern/ceres/internal/ceres/lapack.cc b/extern/ceres/internal/ceres/lapack.cc
index 6fc23f4e658..37efbcd27d1 100644
--- a/extern/ceres/internal/ceres/lapack.cc
+++ b/extern/ceres/internal/ceres/lapack.cc
@@ -34,6 +34,7 @@
#include "ceres/linear_solver.h"
#include "glog/logging.h"
+#ifndef CERES_NO_LAPACK
// C interface to the LAPACK Cholesky factorization and triangular solve.
extern "C" void dpotrf_(char* uplo,
int* n,
@@ -61,7 +62,7 @@ extern "C" void dgels_(char* uplo,
double* work,
int* lwork,
int* info);
-
+#endif
namespace ceres {
namespace internal {
diff --git a/extern/ceres/internal/ceres/levenberg_marquardt_strategy.cc b/extern/ceres/internal/ceres/levenberg_marquardt_strategy.cc
index e9833805ef5..9eec631e6dd 100644
--- a/extern/ceres/internal/ceres/levenberg_marquardt_strategy.cc
+++ b/extern/ceres/internal/ceres/levenberg_marquardt_strategy.cc
@@ -30,7 +30,9 @@
#include "ceres/levenberg_marquardt_strategy.h"
+#include <algorithm>
#include <cmath>
+
#include "Eigen/Core"
#include "ceres/array_utils.h"
#include "ceres/internal/eigen.h"
@@ -53,7 +55,7 @@ LevenbergMarquardtStrategy::LevenbergMarquardtStrategy(
max_diagonal_(options.max_lm_diagonal),
decrease_factor_(2.0),
reuse_diagonal_(false) {
- CHECK_NOTNULL(linear_solver_);
+ CHECK(linear_solver_ != nullptr);
CHECK_GT(min_diagonal_, 0.0);
CHECK_LE(min_diagonal_, max_diagonal_);
CHECK_GT(max_radius_, 0.0);
@@ -67,9 +69,9 @@ TrustRegionStrategy::Summary LevenbergMarquardtStrategy::ComputeStep(
SparseMatrix* jacobian,
const double* residuals,
double* step) {
- CHECK_NOTNULL(jacobian);
- CHECK_NOTNULL(residuals);
- CHECK_NOTNULL(step);
+ CHECK(jacobian != nullptr);
+ CHECK(residuals != nullptr);
+ CHECK(step != nullptr);
const int num_parameters = jacobian->num_cols();
if (!reuse_diagonal_) {
@@ -98,7 +100,7 @@ TrustRegionStrategy::Summary LevenbergMarquardtStrategy::ComputeStep(
// Invalidate the output array lm_step, so that we can detect if
// the linear solver generated numerical garbage. This is known
// to happen for the DENSE_QR and then DENSE_SCHUR solver when
- // the Jacobin is severly rank deficient and mu is too small.
+ // the Jacobin is severely rank deficient and mu is too small.
InvalidateArray(num_parameters, step);
// Instead of solving Jx = -r, solve Jy = r.
diff --git a/extern/ceres/internal/ceres/levenberg_marquardt_strategy.h b/extern/ceres/internal/ceres/levenberg_marquardt_strategy.h
index c87a016c8f4..8fb37f32959 100644
--- a/extern/ceres/internal/ceres/levenberg_marquardt_strategy.h
+++ b/extern/ceres/internal/ceres/levenberg_marquardt_strategy.h
@@ -49,14 +49,14 @@ class LevenbergMarquardtStrategy : public TrustRegionStrategy {
virtual ~LevenbergMarquardtStrategy();
// TrustRegionStrategy interface
- virtual TrustRegionStrategy::Summary ComputeStep(
+ TrustRegionStrategy::Summary ComputeStep(
const TrustRegionStrategy::PerSolveOptions& per_solve_options,
SparseMatrix* jacobian,
const double* residuals,
- double* step);
- virtual void StepAccepted(double step_quality);
- virtual void StepRejected(double step_quality);
- virtual void StepIsInvalid() {
+ double* step) final;
+ void StepAccepted(double step_quality) final;
+ void StepRejected(double step_quality) final;
+ void StepIsInvalid() final {
// Treat the current step as a rejected step with no increase in
// solution quality. Since rejected steps lead to decrease in the
// size of the trust region, the next time ComputeStep is called,
@@ -64,7 +64,7 @@ class LevenbergMarquardtStrategy : public TrustRegionStrategy {
StepRejected(0.0);
}
- virtual double Radius() const;
+ double Radius() const final;
private:
LinearSolver* linear_solver_;
diff --git a/extern/ceres/internal/ceres/line_search.cc b/extern/ceres/internal/ceres/line_search.cc
index 9cdcb7b77e5..352c64f5057 100644
--- a/extern/ceres/internal/ceres/line_search.cc
+++ b/extern/ceres/internal/ceres/line_search.cc
@@ -30,17 +30,19 @@
#include "ceres/line_search.h"
+#include <algorithm>
+#include <cmath>
#include <iomanip>
#include <iostream> // NOLINT
-#include "glog/logging.h"
#include "ceres/evaluator.h"
+#include "ceres/function_sample.h"
#include "ceres/internal/eigen.h"
-#include "ceres/fpclassify.h"
#include "ceres/map_util.h"
#include "ceres/polynomial.h"
#include "ceres/stringprintf.h"
#include "ceres/wall_time.h"
+#include "glog/logging.h"
namespace ceres {
namespace internal {
@@ -53,30 +55,8 @@ using std::vector;
namespace {
// Precision used for floating point values in error message output.
const int kErrorMessageNumericPrecision = 8;
-
-FunctionSample ValueSample(const double x, const double value) {
- FunctionSample sample;
- sample.x = x;
- sample.value = value;
- sample.value_is_valid = true;
- return sample;
-}
-
-FunctionSample ValueAndGradientSample(const double x,
- const double value,
- const double gradient) {
- FunctionSample sample;
- sample.x = x;
- sample.value = value;
- sample.gradient = gradient;
- sample.value_is_valid = true;
- sample.gradient_is_valid = true;
- return sample;
-}
-
} // namespace
-
ostream& operator<<(ostream &os, const FunctionSample& sample);
// Convenience stream operator for pushing FunctionSamples into log messages.
@@ -112,9 +92,7 @@ LineSearchFunction::LineSearchFunction(Evaluator* evaluator)
: evaluator_(evaluator),
position_(evaluator->NumParameters()),
direction_(evaluator->NumEffectiveParameters()),
- evaluation_point_(evaluator->NumParameters()),
scaled_direction_(evaluator->NumEffectiveParameters()),
- gradient_(evaluator->NumEffectiveParameters()),
initial_evaluator_residual_time_in_seconds(0.0),
initial_evaluator_jacobian_time_in_seconds(0.0) {}
@@ -124,27 +102,48 @@ void LineSearchFunction::Init(const Vector& position,
direction_ = direction;
}
-bool LineSearchFunction::Evaluate(double x, double* f, double* g) {
- scaled_direction_ = x * direction_;
+void LineSearchFunction::Evaluate(const double x,
+ const bool evaluate_gradient,
+ FunctionSample* output) {
+ output->x = x;
+ output->vector_x_is_valid = false;
+ output->value_is_valid = false;
+ output->gradient_is_valid = false;
+ output->vector_gradient_is_valid = false;
+
+ scaled_direction_ = output->x * direction_;
+ output->vector_x.resize(position_.rows(), 1);
if (!evaluator_->Plus(position_.data(),
scaled_direction_.data(),
- evaluation_point_.data())) {
- return false;
+ output->vector_x.data())) {
+ return;
}
+ output->vector_x_is_valid = true;
- if (g == NULL) {
- return (evaluator_->Evaluate(evaluation_point_.data(),
- f, NULL, NULL, NULL) &&
- IsFinite(*f));
+ double* gradient = NULL;
+ if (evaluate_gradient) {
+ output->vector_gradient.resize(direction_.rows(), 1);
+ gradient = output->vector_gradient.data();
}
+ const bool eval_status = evaluator_->Evaluate(
+ output->vector_x.data(), &(output->value), NULL, gradient, NULL);
- if (!evaluator_->Evaluate(evaluation_point_.data(),
- f, NULL, gradient_.data(), NULL)) {
- return false;
+ if (!eval_status || !std::isfinite(output->value)) {
+ return;
+ }
+
+ output->value_is_valid = true;
+ if (!evaluate_gradient) {
+ return;
+ }
+
+ output->gradient = direction_.dot(output->vector_gradient);
+ if (!std::isfinite(output->gradient)) {
+ return;
}
- *g = direction_.dot(gradient_);
- return IsFinite(*f) && IsFinite(*g);
+ output->gradient_is_valid = true;
+ output->vector_gradient_is_valid = true;
}
double LineSearchFunction::DirectionInfinityNorm() const {
@@ -152,21 +151,28 @@ double LineSearchFunction::DirectionInfinityNorm() const {
}
void LineSearchFunction::ResetTimeStatistics() {
- const map<string, double> evaluator_time_statistics =
- evaluator_->TimeStatistics();
+ const map<string, CallStatistics> evaluator_statistics =
+ evaluator_->Statistics();
+
initial_evaluator_residual_time_in_seconds =
- FindWithDefault(evaluator_time_statistics, "Evaluator::Residual", 0.0);
+ FindWithDefault(
+ evaluator_statistics, "Evaluator::Residual", CallStatistics())
+ .time;
initial_evaluator_jacobian_time_in_seconds =
- FindWithDefault(evaluator_time_statistics, "Evaluator::Jacobian", 0.0);
+ FindWithDefault(
+ evaluator_statistics, "Evaluator::Jacobian", CallStatistics())
+ .time;
}
void LineSearchFunction::TimeStatistics(
double* cost_evaluation_time_in_seconds,
double* gradient_evaluation_time_in_seconds) const {
- const map<string, double> evaluator_time_statistics =
- evaluator_->TimeStatistics();
+ const map<string, CallStatistics> evaluator_time_statistics =
+ evaluator_->Statistics();
*cost_evaluation_time_in_seconds =
- FindWithDefault(evaluator_time_statistics, "Evaluator::Residual", 0.0) -
+ FindWithDefault(
+ evaluator_time_statistics, "Evaluator::Residual", CallStatistics())
+ .time -
initial_evaluator_residual_time_in_seconds;
// Strictly speaking this will slightly underestimate the time spent
// evaluating the gradient of the line search univariate cost function as it
@@ -175,7 +181,9 @@ void LineSearchFunction::TimeStatistics(
// allows direct subtraction of the timing information from the totals for
// the evaluator returned in the solver summary.
*gradient_evaluation_time_in_seconds =
- FindWithDefault(evaluator_time_statistics, "Evaluator::Jacobian", 0.0) -
+ FindWithDefault(
+ evaluator_time_statistics, "Evaluator::Jacobian", CallStatistics())
+ .time -
initial_evaluator_jacobian_time_in_seconds;
}
@@ -184,12 +192,12 @@ void LineSearch::Search(double step_size_estimate,
double initial_gradient,
Summary* summary) const {
const double start_time = WallTimeInSeconds();
- *CHECK_NOTNULL(summary) = LineSearch::Summary();
+ CHECK(summary != nullptr);
+ *summary = LineSearch::Summary();
summary->cost_evaluation_time_in_seconds = 0.0;
summary->gradient_evaluation_time_in_seconds = 0.0;
summary->polynomial_minimization_time_in_seconds = 0.0;
-
options().function->ResetTimeStatistics();
this->DoSearch(step_size_estimate, initial_cost, initial_gradient, summary);
options().function->
@@ -243,12 +251,12 @@ double LineSearch::InterpolatingPolynomialMinimizingStepSize(
if (interpolation_type == QUADRATIC) {
// Two point interpolation using function values and the
// gradient at the lower bound.
- samples.push_back(ValueSample(current.x, current.value));
+ samples.push_back(FunctionSample(current.x, current.value));
if (previous.value_is_valid) {
// Three point interpolation, using function values and the
// gradient at the lower bound.
- samples.push_back(ValueSample(previous.x, previous.value));
+ samples.push_back(FunctionSample(previous.x, previous.value));
}
} else if (interpolation_type == CUBIC) {
// Two point interpolation using the function values and the gradients.
@@ -286,34 +294,26 @@ void ArmijoLineSearch::DoSearch(const double step_size_estimate,
// Note initial_cost & initial_gradient are evaluated at step_size = 0,
// not step_size_estimate, which is our starting guess.
- const FunctionSample initial_position =
- ValueAndGradientSample(0.0, initial_cost, initial_gradient);
+ FunctionSample initial_position(0.0, initial_cost, initial_gradient);
+ initial_position.vector_x = function->position();
+ initial_position.vector_x_is_valid = true;
- FunctionSample previous = ValueAndGradientSample(0.0, 0.0, 0.0);
- previous.value_is_valid = false;
-
- FunctionSample current = ValueAndGradientSample(step_size_estimate, 0.0, 0.0);
- current.value_is_valid = false;
+ const double descent_direction_max_norm = function->DirectionInfinityNorm();
+ FunctionSample previous;
+ FunctionSample current;
// As the Armijo line search algorithm always uses the initial point, for
// which both the function value and derivative are known, when fitting a
// minimizing polynomial, we can fit up to a quadratic without requiring the
// gradient at the current query point.
- const bool interpolation_uses_gradient_at_current_sample =
- options().interpolation_type == CUBIC;
- const double descent_direction_max_norm = function->DirectionInfinityNorm();
+ const bool kEvaluateGradient = options().interpolation_type == CUBIC;
++summary->num_function_evaluations;
- if (interpolation_uses_gradient_at_current_sample) {
+ if (kEvaluateGradient) {
++summary->num_gradient_evaluations;
}
- current.value_is_valid =
- function->Evaluate(current.x,
- &current.value,
- interpolation_uses_gradient_at_current_sample
- ? &current.gradient : NULL);
- current.gradient_is_valid =
- interpolation_uses_gradient_at_current_sample && current.value_is_valid;
+
+ function->Evaluate(step_size_estimate, kEvaluateGradient, &current);
while (!current.value_is_valid ||
current.value > (initial_cost
+ options().sufficient_decrease
@@ -354,22 +354,16 @@ void ArmijoLineSearch::DoSearch(const double step_size_estimate,
}
previous = current;
- current.x = step_size;
++summary->num_function_evaluations;
- if (interpolation_uses_gradient_at_current_sample) {
+ if (kEvaluateGradient) {
++summary->num_gradient_evaluations;
}
- current.value_is_valid =
- function->Evaluate(current.x,
- &current.value,
- interpolation_uses_gradient_at_current_sample
- ? &current.gradient : NULL);
- current.gradient_is_valid =
- interpolation_uses_gradient_at_current_sample && current.value_is_valid;
+
+ function->Evaluate(step_size, kEvaluateGradient, &current);
}
- summary->optimal_step_size = current.x;
+ summary->optimal_point = current;
summary->success = true;
}
@@ -391,9 +385,9 @@ void WolfeLineSearch::DoSearch(const double step_size_estimate,
// Note initial_cost & initial_gradient are evaluated at step_size = 0,
// not step_size_estimate, which is our starting guess.
- const FunctionSample initial_position =
- ValueAndGradientSample(0.0, initial_cost, initial_gradient);
-
+ FunctionSample initial_position(0.0, initial_cost, initial_gradient);
+ initial_position.vector_x = options().function->position();
+ initial_position.vector_x_is_valid = true;
bool do_zoom_search = false;
// Important: The high/low in bracket_high & bracket_low refer to their
// _function_ values, not their step sizes i.e. it is _not_ required that
@@ -435,7 +429,7 @@ void WolfeLineSearch::DoSearch(const double step_size_estimate,
// produce a valid point when ArmijoLineSearch would succeed, we return the
// point with the lowest cost found thus far which satsifies the Armijo
// condition (but not the Wolfe conditions).
- summary->optimal_step_size = bracket_low.x;
+ summary->optimal_point = bracket_low;
summary->success = true;
return;
}
@@ -481,11 +475,13 @@ void WolfeLineSearch::DoSearch(const double step_size_estimate,
// satisfies the strong Wolfe curvature condition, that we return the point
// amongst those found thus far, which minimizes f() and satisfies the Armijo
// condition.
- solution =
- solution.value_is_valid && solution.value <= bracket_low.value
- ? solution : bracket_low;
- summary->optimal_step_size = solution.x;
+ if (!solution.value_is_valid || solution.value > bracket_low.value) {
+ summary->optimal_point = bracket_low;
+ } else {
+ summary->optimal_point = solution;
+ }
+
summary->success = true;
}
@@ -515,8 +511,7 @@ bool WolfeLineSearch::BracketingPhase(
LineSearchFunction* function = options().function;
FunctionSample previous = initial_position;
- FunctionSample current = ValueAndGradientSample(step_size_estimate, 0.0, 0.0);
- current.value_is_valid = false;
+ FunctionSample current;
const double descent_direction_max_norm =
function->DirectionInfinityNorm();
@@ -535,12 +530,8 @@ bool WolfeLineSearch::BracketingPhase(
// issues).
++summary->num_function_evaluations;
++summary->num_gradient_evaluations;
- current.value_is_valid =
- function->Evaluate(current.x,
- &current.value,
- &current.gradient);
- current.gradient_is_valid = current.value_is_valid;
-
+ const bool kEvaluateGradient = true;
+ function->Evaluate(step_size_estimate, kEvaluateGradient, &current);
while (true) {
++summary->num_iterations;
@@ -637,7 +628,18 @@ bool WolfeLineSearch::BracketingPhase(
// being a boundary of a bracket.
// If f(current) is valid, (but meets no criteria) expand the search by
- // increasing the step size.
+ // increasing the step size. If f(current) is invalid, contract the step
+ // size.
+ //
+ // In Nocedal & Wright [1] (p60), the step-size can only increase in the
+ // bracketing phase: step_size_{k+1} \in [step_size_k, step_size_k * factor].
+ // However this does not account for the function returning invalid values
+ // which we support, in which case we need to contract the step size whilst
+ // ensuring that we do not invert the bracket, i.e, we require that:
+ // step_size_{k-1} <= step_size_{k+1} < step_size_k.
+ const double min_step_size =
+ current.value_is_valid
+ ? current.x : previous.x;
const double max_step_size =
current.value_is_valid
? (current.x * options().max_step_expansion) : current.x;
@@ -656,7 +658,7 @@ bool WolfeLineSearch::BracketingPhase(
previous,
unused_previous,
current,
- previous.x,
+ min_step_size,
max_step_size);
summary->polynomial_minimization_time_in_seconds +=
(WallTimeInSeconds() - polynomial_minimization_start_time);
@@ -669,16 +671,14 @@ bool WolfeLineSearch::BracketingPhase(
return false;
}
+ // Only advance the lower boundary (in x) of the bracket if f(current)
+ // is valid such that we can support contracting the step size when
+ // f(current) is invalid without risking inverting the bracket in x, i.e.
+ // prevent previous.x > current.x.
previous = current.value_is_valid ? current : previous;
- current.x = step_size;
-
++summary->num_function_evaluations;
++summary->num_gradient_evaluations;
- current.value_is_valid =
- function->Evaluate(current.x,
- &current.value,
- &current.gradient);
- current.gradient_is_valid = current.value_is_valid;
+ function->Evaluate(step_size, kEvaluateGradient, &current);
}
// Ensure that even if a valid bracket was found, we will only mark a zoom
@@ -799,7 +799,7 @@ bool WolfeLineSearch::ZoomPhase(const FunctionSample& initial_position,
const FunctionSample unused_previous;
DCHECK(!unused_previous.value_is_valid);
const double polynomial_minimization_start_time = WallTimeInSeconds();
- solution->x =
+ const double step_size =
this->InterpolatingPolynomialMinimizingStepSize(
options().interpolation_type,
lower_bound_step,
@@ -823,12 +823,9 @@ bool WolfeLineSearch::ZoomPhase(const FunctionSample& initial_position,
// to numerical issues).
++summary->num_function_evaluations;
++summary->num_gradient_evaluations;
- solution->value_is_valid =
- function->Evaluate(solution->x,
- &solution->value,
- &solution->gradient);
- solution->gradient_is_valid = solution->value_is_valid;
- if (!solution->value_is_valid) {
+ const bool kEvaluateGradient = true;
+ function->Evaluate(step_size, kEvaluateGradient, solution);
+ if (!solution->value_is_valid || !solution->gradient_is_valid) {
summary->error =
StringPrintf("Line search failed: Wolfe Zoom phase found "
"step_size: %.5e, for which function is invalid, "
diff --git a/extern/ceres/internal/ceres/line_search.h b/extern/ceres/internal/ceres/line_search.h
index 6a21cbeac11..d59fd777367 100644
--- a/extern/ceres/internal/ceres/line_search.h
+++ b/extern/ceres/internal/ceres/line_search.h
@@ -35,6 +35,7 @@
#include <string>
#include <vector>
+#include "ceres/function_sample.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/port.h"
#include "ceres/types.h"
@@ -43,7 +44,6 @@ namespace ceres {
namespace internal {
class Evaluator;
-struct FunctionSample;
class LineSearchFunction;
// Line search is another name for a one dimensional optimization
@@ -51,7 +51,7 @@ class LineSearchFunction;
// dimensional optimization problems that arise as subproblems of
// general multidimensional optimization problems.
//
-// While finding the exact minimum of a one dimensionl function is
+// While finding the exact minimum of a one dimensional function is
// hard, instances of LineSearch find a point that satisfies a
// sufficient decrease condition. Depending on the particular
// condition used, we get a variety of different line search
@@ -61,21 +61,9 @@ class LineSearch {
struct Summary;
struct Options {
- Options()
- : interpolation_type(CUBIC),
- sufficient_decrease(1e-4),
- max_step_contraction(1e-3),
- min_step_contraction(0.9),
- min_step_size(1e-9),
- max_num_iterations(20),
- sufficient_curvature_decrease(0.9),
- max_step_expansion(10.0),
- is_silent(false),
- function(NULL) {}
-
// Degree of the polynomial used to approximate the objective
// function.
- LineSearchInterpolationType interpolation_type;
+ LineSearchInterpolationType interpolation_type = CUBIC;
// Armijo and Wolfe line search parameters.
@@ -88,7 +76,7 @@ class LineSearch {
// s.t.
//
// f(step_size) <= f(0) + sufficient_decrease * f'(0) * step_size
- double sufficient_decrease;
+ double sufficient_decrease = 1e-4;
// In each iteration of the Armijo / Wolfe line search,
//
@@ -98,7 +86,7 @@ class LineSearch {
//
// 0 < max_step_contraction < min_step_contraction < 1
//
- double max_step_contraction;
+ double max_step_contraction = 1e-3;
// In each iteration of the Armijo / Wolfe line search,
//
@@ -107,16 +95,16 @@ class LineSearch {
//
// 0 < max_step_contraction < min_step_contraction < 1
//
- double min_step_contraction;
+ double min_step_contraction = 0.9;
// If during the line search, the step_size falls below this
// value, it is truncated to zero.
- double min_step_size;
+ double min_step_size = 1e-9;
// Maximum number of trial step size iterations during each line search,
// if a step size satisfying the search conditions cannot be found within
// this number of trials, the line search will terminate.
- int max_num_iterations;
+ int max_num_iterations = 20;
// Wolfe-specific line search parameters.
@@ -131,7 +119,7 @@ class LineSearch {
//
// Where f() is the line search objective and f'() is the derivative
// of f w.r.t step_size (d f / d step_size).
- double sufficient_curvature_decrease;
+ double sufficient_curvature_decrease = 0.9;
// During the bracketing phase of the Wolfe search, the step size is
// increased until either a point satisfying the Wolfe conditions is
@@ -142,43 +130,32 @@ class LineSearch {
// new_step_size <= max_step_expansion * step_size.
//
// By definition for expansion, max_step_expansion > 1.0.
- double max_step_expansion;
+ double max_step_expansion = 10;
- bool is_silent;
+ bool is_silent = false;
// The one dimensional function that the line search algorithm
// minimizes.
- LineSearchFunction* function;
+ LineSearchFunction* function = nullptr;
};
// Result of the line search.
struct Summary {
- Summary()
- : success(false),
- optimal_step_size(0.0),
- num_function_evaluations(0),
- num_gradient_evaluations(0),
- num_iterations(0),
- cost_evaluation_time_in_seconds(-1.0),
- gradient_evaluation_time_in_seconds(-1.0),
- polynomial_minimization_time_in_seconds(-1.0),
- total_time_in_seconds(-1.0) {}
-
- bool success;
- double optimal_step_size;
- int num_function_evaluations;
- int num_gradient_evaluations;
- int num_iterations;
+ bool success = false;
+ FunctionSample optimal_point;
+ int num_function_evaluations = 0;
+ int num_gradient_evaluations = 0;
+ int num_iterations = 0;
// Cumulative time spent evaluating the value of the cost function across
// all iterations.
- double cost_evaluation_time_in_seconds;
+ double cost_evaluation_time_in_seconds = 0.0;
// Cumulative time spent evaluating the gradient of the cost function across
// all iterations.
- double gradient_evaluation_time_in_seconds;
+ double gradient_evaluation_time_in_seconds = 0.0;
// Cumulative time spent minimizing the interpolating polynomial to compute
// the next candidate step size across all iterations.
- double polynomial_minimization_time_in_seconds;
- double total_time_in_seconds;
+ double polynomial_minimization_time_in_seconds = 0.0;
+ double total_time_in_seconds = 0.0;
std::string error;
};
@@ -234,6 +211,7 @@ class LineSearchFunction {
public:
explicit LineSearchFunction(Evaluator* evaluator);
void Init(const Vector& position, const Vector& direction);
+
// Evaluate the line search objective
//
// f(x) = p(position + x * direction)
@@ -241,27 +219,29 @@ class LineSearchFunction {
// Where, p is the objective function of the general optimization
// problem.
//
- // g is the gradient f'(x) at x.
+ // evaluate_gradient controls whether the gradient will be evaluated
+ // or not.
//
- // f must not be null. The gradient is computed only if g is not null.
- bool Evaluate(double x, double* f, double* g);
+ // On return output->*_is_valid indicate indicate whether the
+ // corresponding fields have numerically valid values or not.
+ void Evaluate(double x, bool evaluate_gradient, FunctionSample* output);
+
double DirectionInfinityNorm() const;
+
// Resets to now, the start point for the results from TimeStatistics().
void ResetTimeStatistics();
void TimeStatistics(double* cost_evaluation_time_in_seconds,
double* gradient_evaluation_time_in_seconds) const;
+ const Vector& position() const { return position_; }
+ const Vector& direction() const { return direction_; }
private:
Evaluator* evaluator_;
Vector position_;
Vector direction_;
- // evaluation_point = Evaluator::Plus(position_, x * direction_);
- Vector evaluation_point_;
-
// scaled_direction = x * direction_;
Vector scaled_direction_;
- Vector gradient_;
// We may not exclusively own the evaluator (e.g. in the Trust Region
// minimizer), hence we need to save the initial evaluation durations for the
@@ -282,10 +262,10 @@ class ArmijoLineSearch : public LineSearch {
virtual ~ArmijoLineSearch() {}
private:
- virtual void DoSearch(double step_size_estimate,
- double initial_cost,
- double initial_gradient,
- Summary* summary) const;
+ void DoSearch(double step_size_estimate,
+ double initial_cost,
+ double initial_gradient,
+ Summary* summary) const final;
};
// Bracketing / Zoom Strong Wolfe condition line search. This implementation
@@ -315,10 +295,10 @@ class WolfeLineSearch : public LineSearch {
Summary* summary) const;
private:
- virtual void DoSearch(double step_size_estimate,
- double initial_cost,
- double initial_gradient,
- Summary* summary) const;
+ void DoSearch(double step_size_estimate,
+ double initial_cost,
+ double initial_gradient,
+ Summary* summary) const final;
};
} // namespace internal
diff --git a/extern/ceres/internal/ceres/line_search_minimizer.cc b/extern/ceres/internal/ceres/line_search_minimizer.cc
index fdde1ca9c86..931f56c960c 100644
--- a/extern/ceres/internal/ceres/line_search_minimizer.cc
+++ b/extern/ceres/internal/ceres/line_search_minimizer.cc
@@ -43,6 +43,7 @@
#include <algorithm>
#include <cstdlib>
#include <cmath>
+#include <memory>
#include <string>
#include <vector>
@@ -51,7 +52,6 @@
#include "ceres/evaluator.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/port.h"
-#include "ceres/internal/scoped_ptr.h"
#include "ceres/line_search.h"
#include "ceres/line_search_direction.h"
#include "ceres/stringprintf.h"
@@ -63,27 +63,14 @@ namespace ceres {
namespace internal {
namespace {
-// TODO(sameeragarwal): I think there is a small bug here, in that if
-// the evaluation fails, then the state can contain garbage. Look at
-// this more carefully.
-bool Evaluate(Evaluator* evaluator,
- const Vector& x,
- LineSearchMinimizer::State* state,
- std::string* message) {
- if (!evaluator->Evaluate(x.data(),
- &(state->cost),
- NULL,
- state->gradient.data(),
- NULL)) {
- *message = "Gradient evaluation failed.";
- return false;
- }
-
+bool EvaluateGradientNorms(Evaluator* evaluator,
+ const Vector& x,
+ LineSearchMinimizer::State* state,
+ std::string* message) {
Vector negative_gradient = -state->gradient;
Vector projected_gradient_step(x.size());
- if (!evaluator->Plus(x.data(),
- negative_gradient.data(),
- projected_gradient_step.data())) {
+ if (!evaluator->Plus(
+ x.data(), negative_gradient.data(), projected_gradient_step.data())) {
*message = "projected_gradient_step = Plus(x, -gradient) failed.";
return false;
}
@@ -103,7 +90,8 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
double start_time = WallTimeInSeconds();
double iteration_start_time = start_time;
- Evaluator* evaluator = CHECK_NOTNULL(options.evaluator.get());
+ CHECK(options.evaluator != nullptr);
+ Evaluator* evaluator = options.evaluator.get();
const int num_parameters = evaluator->NumParameters();
const int num_effective_parameters = evaluator->NumEffectiveParameters();
@@ -116,9 +104,6 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
State current_state(num_parameters, num_effective_parameters);
State previous_state(num_parameters, num_effective_parameters);
- Vector delta(num_effective_parameters);
- Vector x_plus_delta(num_parameters);
-
IterationSummary iteration_summary;
iteration_summary.iteration = 0;
iteration_summary.step_is_valid = false;
@@ -130,8 +115,19 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
iteration_summary.linear_solver_iterations = 0;
iteration_summary.step_solver_time_in_seconds = 0;
- // Do initial cost and Jacobian evaluation.
- if (!Evaluate(evaluator, x, &current_state, &summary->message)) {
+ // Do initial cost and gradient evaluation.
+ if (!evaluator->Evaluate(x.data(),
+ &(current_state.cost),
+ nullptr,
+ current_state.gradient.data(),
+ nullptr)) {
+ summary->termination_type = FAILURE;
+ summary->message = "Initial cost and jacobian evaluation failed.";
+ LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
+ return;
+ }
+
+ if (!EvaluateGradientNorms(evaluator, x, &current_state, &summary->message)) {
summary->termination_type = FAILURE;
summary->message = "Initial cost and jacobian evaluation failed. "
"More details: " + summary->message;
@@ -142,9 +138,8 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
summary->initial_cost = current_state.cost + summary->fixed_cost;
iteration_summary.cost = current_state.cost + summary->fixed_cost;
- iteration_summary.gradient_max_norm = current_state.gradient_max_norm;
iteration_summary.gradient_norm = sqrt(current_state.gradient_squared_norm);
-
+ iteration_summary.gradient_max_norm = current_state.gradient_max_norm;
if (iteration_summary.gradient_max_norm <= options.gradient_tolerance) {
summary->message = StringPrintf("Gradient tolerance reached. "
"Gradient max norm: %e <= %e",
@@ -170,7 +165,7 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
line_search_direction_options.max_lbfgs_rank = options.max_lbfgs_rank;
line_search_direction_options.use_approximate_eigenvalue_bfgs_scaling =
options.use_approximate_eigenvalue_bfgs_scaling;
- scoped_ptr<LineSearchDirection> line_search_direction(
+ std::unique_ptr<LineSearchDirection> line_search_direction(
LineSearchDirection::Create(line_search_direction_options));
LineSearchFunction line_search_function(evaluator);
@@ -194,11 +189,11 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
line_search_options.is_silent = options.is_silent;
line_search_options.function = &line_search_function;
- scoped_ptr<LineSearch>
+ std::unique_ptr<LineSearch>
line_search(LineSearch::Create(options.line_search_type,
line_search_options,
&summary->message));
- if (line_search.get() == NULL) {
+ if (line_search.get() == nullptr) {
summary->termination_type = FAILURE;
LOG_IF(ERROR, is_not_silent) << "Terminating: " << summary->message;
return;
@@ -326,28 +321,37 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
break;
}
- current_state.step_size = line_search_summary.optimal_step_size;
- delta = current_state.step_size * current_state.search_direction;
-
+ const FunctionSample& optimal_point = line_search_summary.optimal_point;
+ CHECK(optimal_point.vector_x_is_valid)
+ << "Congratulations, you found a bug in Ceres. Please report it.";
+ current_state.step_size = optimal_point.x;
previous_state = current_state;
iteration_summary.step_solver_time_in_seconds =
WallTimeInSeconds() - iteration_start_time;
- const double x_norm = x.norm();
-
- if (!evaluator->Plus(x.data(), delta.data(), x_plus_delta.data())) {
- summary->termination_type = FAILURE;
- summary->message =
- "x_plus_delta = Plus(x, delta) failed. This should not happen "
- "as the step was valid when it was selected by the line search.";
- LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
- break;
+ if (optimal_point.vector_gradient_is_valid) {
+ current_state.cost = optimal_point.value;
+ current_state.gradient = optimal_point.vector_gradient;
+ } else {
+ Evaluator::EvaluateOptions evaluate_options;
+ evaluate_options.new_evaluation_point = false;
+ if (!evaluator->Evaluate(evaluate_options,
+ optimal_point.vector_x.data(),
+ &(current_state.cost),
+ nullptr,
+ current_state.gradient.data(),
+ nullptr)) {
+ summary->termination_type = FAILURE;
+ summary->message = "Cost and jacobian evaluation failed.";
+ LOG_IF(WARNING, is_not_silent) << "Terminating: " << summary->message;
+ return;
+ }
}
- if (!Evaluate(evaluator,
- x_plus_delta,
- &current_state,
- &summary->message)) {
+ if (!EvaluateGradientNorms(evaluator,
+ optimal_point.vector_x,
+ &current_state,
+ &summary->message)) {
summary->termination_type = FAILURE;
summary->message =
"Step failed to evaluate. This should not happen as the step was "
@@ -358,8 +362,9 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
}
// Compute the norm of the step in the ambient space.
- iteration_summary.step_norm = (x_plus_delta - x).norm();
- x = x_plus_delta;
+ iteration_summary.step_norm = (optimal_point.vector_x - x).norm();
+ const double x_norm = x.norm();
+ x = optimal_point.vector_x;
iteration_summary.gradient_max_norm = current_state.gradient_max_norm;
iteration_summary.gradient_norm = sqrt(current_state.gradient_squared_norm);
@@ -380,6 +385,7 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
iteration_summary.cumulative_time_in_seconds =
WallTimeInSeconds() - start_time
+ summary->preprocessor_time_in_seconds;
+ summary->iterations.push_back(iteration_summary);
// Iterations inside the line search algorithm are considered
// 'steps' in the broader context, to distinguish these inner
@@ -423,20 +429,18 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options,
}
const double absolute_function_tolerance =
- options.function_tolerance * previous_state.cost;
- if (fabs(iteration_summary.cost_change) <= absolute_function_tolerance) {
- summary->message =
- StringPrintf("Function tolerance reached. "
- "|cost_change|/cost: %e <= %e",
- fabs(iteration_summary.cost_change) /
- previous_state.cost,
- options.function_tolerance);
+ options.function_tolerance * std::abs(previous_state.cost);
+ if (std::abs(iteration_summary.cost_change) <=
+ absolute_function_tolerance) {
+ summary->message = StringPrintf(
+ "Function tolerance reached. "
+ "|cost_change|/cost: %e <= %e",
+ std::abs(iteration_summary.cost_change) / previous_state.cost,
+ options.function_tolerance);
summary->termination_type = CONVERGENCE;
VLOG_IF(1, is_not_silent) << "Terminating: " << summary->message;
break;
}
-
- summary->iterations.push_back(iteration_summary);
}
}
diff --git a/extern/ceres/internal/ceres/line_search_minimizer.h b/extern/ceres/internal/ceres/line_search_minimizer.h
index 54b7202e0c3..191128a933b 100644
--- a/extern/ceres/internal/ceres/line_search_minimizer.h
+++ b/extern/ceres/internal/ceres/line_search_minimizer.h
@@ -66,9 +66,9 @@ class LineSearchMinimizer : public Minimizer {
};
~LineSearchMinimizer() {}
- virtual void Minimize(const Minimizer::Options& options,
- double* parameters,
- Solver::Summary* summary);
+ void Minimize(const Minimizer::Options& options,
+ double* parameters,
+ Solver::Summary* summary) final;
};
} // namespace internal
diff --git a/extern/ceres/internal/ceres/line_search_preprocessor.cc b/extern/ceres/internal/ceres/line_search_preprocessor.cc
index 831f5e8d079..5a21809e509 100644
--- a/extern/ceres/internal/ceres/line_search_preprocessor.cc
+++ b/extern/ceres/internal/ceres/line_search_preprocessor.cc
@@ -32,6 +32,8 @@
#include <numeric>
#include <string>
+#include "ceres/casts.h"
+#include "ceres/context_impl.h"
#include "ceres/evaluator.h"
#include "ceres/minimizer.h"
#include "ceres/problem_impl.h"
@@ -57,6 +59,9 @@ bool SetupEvaluator(PreprocessedProblem* pp) {
pp->evaluator_options.linear_solver_type = CGNR;
pp->evaluator_options.num_eliminate_blocks = 0;
pp->evaluator_options.num_threads = pp->options.num_threads;
+ pp->evaluator_options.context = pp->problem->context();
+ pp->evaluator_options.evaluation_callback =
+ pp->reduced_program->mutable_evaluation_callback();
pp->evaluator.reset(Evaluator::Create(pp->evaluator_options,
pp->reduced_program.get(),
&pp->error));
@@ -71,7 +76,7 @@ LineSearchPreprocessor::~LineSearchPreprocessor() {
bool LineSearchPreprocessor::Preprocess(const Solver::Options& options,
ProblemImpl* problem,
PreprocessedProblem* pp) {
- CHECK_NOTNULL(pp);
+ CHECK(pp != nullptr);
pp->options = options;
ChangeNumThreadsIfNeeded(&pp->options);
diff --git a/extern/ceres/internal/ceres/line_search_preprocessor.h b/extern/ceres/internal/ceres/line_search_preprocessor.h
index 132d83a0a9a..12ccb53e011 100644
--- a/extern/ceres/internal/ceres/line_search_preprocessor.h
+++ b/extern/ceres/internal/ceres/line_search_preprocessor.h
@@ -39,9 +39,9 @@ namespace internal {
class LineSearchPreprocessor : public Preprocessor {
public:
virtual ~LineSearchPreprocessor();
- virtual bool Preprocess(const Solver::Options& options,
- ProblemImpl* problem,
- PreprocessedProblem* preprocessed_problem);
+ bool Preprocess(const Solver::Options& options,
+ ProblemImpl* problem,
+ PreprocessedProblem* preprocessed_problem) final;
};
} // namespace internal
diff --git a/extern/ceres/internal/ceres/linear_least_squares_problems.cc b/extern/ceres/internal/ceres/linear_least_squares_problems.cc
index 0a69375f7b5..7c523d399e1 100644
--- a/extern/ceres/internal/ceres/linear_least_squares_problems.cc
+++ b/extern/ceres/internal/ceres/linear_least_squares_problems.cc
@@ -31,13 +31,14 @@
#include "ceres/linear_least_squares_problems.h"
#include <cstdio>
+#include <memory>
#include <string>
#include <vector>
+
#include "ceres/block_sparse_matrix.h"
#include "ceres/block_structure.h"
#include "ceres/casts.h"
#include "ceres/file.h"
-#include "ceres/internal/scoped_ptr.h"
#include "ceres/stringprintf.h"
#include "ceres/triplet_sparse_matrix.h"
#include "ceres/types.h"
@@ -297,7 +298,7 @@ LinearLeastSquaresProblem* LinearLeastSquaresProblem2() {
problem->num_eliminate_blocks = 2;
CompressedRowBlockStructure* bs = new CompressedRowBlockStructure;
- scoped_array<double> values(new double[num_rows * num_cols]);
+ std::unique_ptr<double[]> values(new double[num_rows * num_cols]);
for (int c = 0; c < num_cols; ++c) {
bs->cols.push_back(Block());
@@ -431,7 +432,7 @@ LinearLeastSquaresProblem* LinearLeastSquaresProblem3() {
problem->num_eliminate_blocks = 2;
CompressedRowBlockStructure* bs = new CompressedRowBlockStructure;
- scoped_array<double> values(new double[num_rows * num_cols]);
+ std::unique_ptr<double[]> values(new double[num_rows * num_cols]);
for (int c = 0; c < num_cols; ++c) {
bs->cols.push_back(Block());
@@ -538,7 +539,7 @@ LinearLeastSquaresProblem* LinearLeastSquaresProblem4() {
problem->num_eliminate_blocks = 1;
CompressedRowBlockStructure* bs = new CompressedRowBlockStructure;
- scoped_array<double> values(new double[num_rows * num_cols]);
+ std::unique_ptr<double[]> values(new double[num_rows * num_cols]);
// Column block structure
bs->cols.push_back(Block());
@@ -613,7 +614,7 @@ bool DumpLinearLeastSquaresProblemToConsole(const SparseMatrix* A,
const double* b,
const double* x,
int num_eliminate_blocks) {
- CHECK_NOTNULL(A);
+ CHECK(A != nullptr);
Matrix AA;
A->ToDenseMatrix(&AA);
LOG(INFO) << "A^T: \n" << AA.transpose();
@@ -636,10 +637,10 @@ bool DumpLinearLeastSquaresProblemToConsole(const SparseMatrix* A,
void WriteArrayToFileOrDie(const string& filename,
const double* x,
const int size) {
- CHECK_NOTNULL(x);
+ CHECK(x != nullptr);
VLOG(2) << "Writing array to: " << filename;
FILE* fptr = fopen(filename.c_str(), "w");
- CHECK_NOTNULL(fptr);
+ CHECK(fptr != nullptr);
for (int i = 0; i < size; ++i) {
fprintf(fptr, "%17f\n", x[i]);
}
@@ -652,7 +653,7 @@ bool DumpLinearLeastSquaresProblemToTextFile(const string& filename_base,
const double* b,
const double* x,
int num_eliminate_blocks) {
- CHECK_NOTNULL(A);
+ CHECK(A != nullptr);
LOG(INFO) << "writing to: " << filename_base << "*";
string matlab_script;
@@ -666,7 +667,7 @@ bool DumpLinearLeastSquaresProblemToTextFile(const string& filename_base,
{
string filename = filename_base + "_A.txt";
FILE* fptr = fopen(filename.c_str(), "w");
- CHECK_NOTNULL(fptr);
+ CHECK(fptr != nullptr);
A->ToTextFile(fptr);
fclose(fptr);
StringAppendF(&matlab_script,
diff --git a/extern/ceres/internal/ceres/linear_least_squares_problems.h b/extern/ceres/internal/ceres/linear_least_squares_problems.h
index 384efb59a2b..5dfcd34e109 100644
--- a/extern/ceres/internal/ceres/linear_least_squares_problems.h
+++ b/extern/ceres/internal/ceres/linear_least_squares_problems.h
@@ -31,11 +31,11 @@
#ifndef CERES_INTERNAL_LINEAR_LEAST_SQUARES_PROBLEMS_H_
#define CERES_INTERNAL_LINEAR_LEAST_SQUARES_PROBLEMS_H_
+#include <memory>
#include <string>
#include <vector>
#include "ceres/sparse_matrix.h"
#include "ceres/internal/port.h"
-#include "ceres/internal/scoped_ptr.h"
namespace ceres {
namespace internal {
@@ -44,21 +44,20 @@ namespace internal {
// ground truth solutions. To be used by various LinearSolver tests.
struct LinearLeastSquaresProblem {
LinearLeastSquaresProblem()
- : A(NULL), b(NULL), D(NULL), num_eliminate_blocks(0),
- x(NULL), x_D(NULL) {
+ : num_eliminate_blocks(0) {
}
- scoped_ptr<SparseMatrix> A;
- scoped_array<double> b;
- scoped_array<double> D;
+ std::unique_ptr<SparseMatrix> A;
+ std::unique_ptr<double[]> b;
+ std::unique_ptr<double[]> D;
// If using the schur eliminator then how many of the variable
// blocks are e_type blocks.
int num_eliminate_blocks;
// Solution to min_x |Ax - b|^2
- scoped_array<double> x;
+ std::unique_ptr<double[]> x;
// Solution to min_x |Ax - b|^2 + |Dx|^2
- scoped_array<double> x_D;
+ std::unique_ptr<double[]> x_D;
};
// Factories for linear least squares problem.
diff --git a/extern/ceres/internal/ceres/linear_solver.cc b/extern/ceres/internal/ceres/linear_solver.cc
index 38e4625f747..107af6afcc8 100644
--- a/extern/ceres/internal/ceres/linear_solver.cc
+++ b/extern/ceres/internal/ceres/linear_solver.cc
@@ -35,6 +35,7 @@
#include "ceres/dense_qr_solver.h"
#include "ceres/iterative_schur_complement_solver.h"
#include "ceres/schur_complement_solver.h"
+#include "ceres/dynamic_sparse_normal_cholesky_solver.h"
#include "ceres/sparse_normal_cholesky_solver.h"
#include "ceres/types.h"
#include "glog/logging.h"
@@ -70,23 +71,25 @@ LinearSolverType LinearSolver::LinearSolverForZeroEBlocks(
}
LinearSolver* LinearSolver::Create(const LinearSolver::Options& options) {
+ CHECK(options.context != NULL);
+
switch (options.type) {
case CGNR:
return new CgnrSolver(options);
case SPARSE_NORMAL_CHOLESKY:
-#if defined(CERES_NO_SUITESPARSE) && \
- defined(CERES_NO_CXSPARSE) && \
- !defined(CERES_USE_EIGEN_SPARSE)
+#if defined(CERES_NO_SPARSE)
return NULL;
#else
+ if (options.dynamic_sparsity) {
+ return new DynamicSparseNormalCholeskySolver(options);
+ }
+
return new SparseNormalCholeskySolver(options);
#endif
case SPARSE_SCHUR:
-#if defined(CERES_NO_SUITESPARSE) && \
- defined(CERES_NO_CXSPARSE) && \
- !defined(CERES_USE_EIGEN_SPARSE)
+#if defined(CERES_NO_SPARSE)
return NULL;
#else
return new SparseSchurComplementSolver(options);
diff --git a/extern/ceres/internal/ceres/linear_solver.h b/extern/ceres/internal/ceres/linear_solver.h
index fb9332ca6e3..cb624b372dd 100644
--- a/extern/ceres/internal/ceres/linear_solver.h
+++ b/extern/ceres/internal/ceres/linear_solver.h
@@ -41,6 +41,7 @@
#include "ceres/block_sparse_matrix.h"
#include "ceres/casts.h"
#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/context_impl.h"
#include "ceres/dense_sparse_matrix.h"
#include "ceres/execution_summary.h"
#include "ceres/triplet_sparse_matrix.h"
@@ -69,6 +70,16 @@ enum LinearSolverTerminationType {
LINEAR_SOLVER_FATAL_ERROR
};
+// This enum controls the fill-reducing ordering a sparse linear
+// algebra library should use before computing a sparse factorization
+// (usually Cholesky).
+enum OrderingType {
+ NATURAL, // Do not re-order the matrix. This is useful when the
+ // matrix has been ordered using a fill-reducing ordering
+ // already.
+ AMD // Use the Approximate Minimum Degree algorithm to re-order
+ // the matrix.
+};
class LinearOperator;
@@ -91,42 +102,25 @@ class LinearOperator;
class LinearSolver {
public:
struct Options {
- Options()
- : type(SPARSE_NORMAL_CHOLESKY),
- preconditioner_type(JACOBI),
- visibility_clustering_type(CANONICAL_VIEWS),
- dense_linear_algebra_library_type(EIGEN),
- sparse_linear_algebra_library_type(SUITE_SPARSE),
- use_postordering(false),
- dynamic_sparsity(false),
- use_explicit_schur_complement(false),
- min_num_iterations(1),
- max_num_iterations(1),
- num_threads(1),
- residual_reset_period(10),
- row_block_size(Eigen::Dynamic),
- e_block_size(Eigen::Dynamic),
- f_block_size(Eigen::Dynamic) {
- }
-
- LinearSolverType type;
- PreconditionerType preconditioner_type;
- VisibilityClusteringType visibility_clustering_type;
- DenseLinearAlgebraLibraryType dense_linear_algebra_library_type;
- SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type;
+ LinearSolverType type = SPARSE_NORMAL_CHOLESKY;
+ PreconditionerType preconditioner_type = JACOBI;
+ VisibilityClusteringType visibility_clustering_type = CANONICAL_VIEWS;
+ DenseLinearAlgebraLibraryType dense_linear_algebra_library_type = EIGEN;
+ SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type =
+ SUITE_SPARSE;
// See solver.h for information about these flags.
- bool use_postordering;
- bool dynamic_sparsity;
- bool use_explicit_schur_complement;
+ bool use_postordering = false;
+ bool dynamic_sparsity = false;
+ bool use_explicit_schur_complement = false;
// Number of internal iterations that the solver uses. This
// parameter only makes sense for iterative solvers like CG.
- int min_num_iterations;
- int max_num_iterations;
+ int min_num_iterations = 1;
+ int max_num_iterations = 1;
// If possible, how many threads can the solver use.
- int num_threads;
+ int num_threads = 1;
// Hints about the order in which the parameter blocks should be
// eliminated by the linear solver.
@@ -151,7 +145,7 @@ class LinearSolver {
// inaccurate over time. Thus for non-zero values of this
// parameter, the solver can be told to recalculate the value of
// the residual using a |b - Ax| evaluation.
- int residual_reset_period;
+ int residual_reset_period = 10;
// If the block sizes in a BlockSparseMatrix are fixed, then in
// some cases the Schur complement based solvers can detect and
@@ -162,20 +156,18 @@ class LinearSolver {
//
// Please see schur_complement_solver.h and schur_eliminator.h for
// more details.
- int row_block_size;
- int e_block_size;
- int f_block_size;
+ int row_block_size = Eigen::Dynamic;
+ int e_block_size = Eigen::Dynamic;
+ int f_block_size = Eigen::Dynamic;
+
+ bool use_mixed_precision_solves = false;
+ int max_num_refinement_iterations = 0;
+ int subset_preconditioner_start_row_block = -1;
+ ContextImpl* context = nullptr;
};
// Options for the Solve method.
struct PerSolveOptions {
- PerSolveOptions()
- : D(NULL),
- preconditioner(NULL),
- r_tolerance(0.0),
- q_tolerance(0.0) {
- }
-
// This option only makes sense for unsymmetric linear solvers
// that can solve rectangular linear systems.
//
@@ -199,7 +191,7 @@ class LinearSolver {
// does not have full column rank, the results returned by the
// solver cannot be relied on. D, if it is not null is an array of
// size n. b is an array of size m and x is an array of size n.
- double * D;
+ double* D = nullptr;
// This option only makes sense for iterative solvers.
//
@@ -221,7 +213,7 @@ class LinearSolver {
//
// A null preconditioner is equivalent to an identity matrix being
// used a preconditioner.
- LinearOperator* preconditioner;
+ LinearOperator* preconditioner = nullptr;
// The following tolerance related options only makes sense for
@@ -233,7 +225,7 @@ class LinearSolver {
//
// This is the most commonly used termination criterion for
// iterative solvers.
- double r_tolerance;
+ double r_tolerance = 0.0;
// For PSD matrices A, let
//
@@ -257,22 +249,16 @@ class LinearSolver {
// Journal of Computational and Applied Mathematics,
// 124(1-2), 45-59, 2000.
//
- double q_tolerance;
+ double q_tolerance = 0.0;
};
// Summary of a call to the Solve method. We should move away from
// the true/false method for determining solver success. We should
// let the summary object do the talking.
struct Summary {
- Summary()
- : residual_norm(0.0),
- num_iterations(-1),
- termination_type(LINEAR_SOLVER_FAILURE) {
- }
-
- double residual_norm;
- int num_iterations;
- LinearSolverTerminationType termination_type;
+ double residual_norm = -1.0;
+ int num_iterations = -1;
+ LinearSolverTerminationType termination_type = LINEAR_SOLVER_FAILURE;
std::string message;
};
@@ -292,16 +278,12 @@ class LinearSolver {
const PerSolveOptions& per_solve_options,
double* x) = 0;
- // The following two methods return copies instead of references so
- // that the base class implementation does not have to worry about
- // life time issues. Further, these calls are not expected to be
- // frequent or performance sensitive.
- virtual std::map<std::string, int> CallStatistics() const {
- return std::map<std::string, int>();
- }
-
- virtual std::map<std::string, double> TimeStatistics() const {
- return std::map<std::string, double>();
+ // This method returns copies instead of references so that the base
+ // class implementation does not have to worry about life time
+ // issues. Further, this calls are not expected to be frequent or
+ // performance sensitive.
+ virtual std::map<std::string, CallStatistics> Statistics() const {
+ return std::map<std::string, CallStatistics>();
}
// Factory
@@ -325,18 +307,14 @@ class TypedLinearSolver : public LinearSolver {
const LinearSolver::PerSolveOptions& per_solve_options,
double* x) {
ScopedExecutionTimer total_time("LinearSolver::Solve", &execution_summary_);
- CHECK_NOTNULL(A);
- CHECK_NOTNULL(b);
- CHECK_NOTNULL(x);
+ CHECK(A != nullptr);
+ CHECK(b != nullptr);
+ CHECK(x != nullptr);
return SolveImpl(down_cast<MatrixType*>(A), b, per_solve_options, x);
}
- virtual std::map<std::string, int> CallStatistics() const {
- return execution_summary_.calls();
- }
-
- virtual std::map<std::string, double> TimeStatistics() const {
- return execution_summary_.times();
+ virtual std::map<std::string, CallStatistics> Statistics() const {
+ return execution_summary_.statistics();
}
private:
diff --git a/extern/ceres/internal/ceres/local_parameterization.cc b/extern/ceres/internal/ceres/local_parameterization.cc
index a6bf1f6ddcc..97fdbed3eda 100644
--- a/extern/ceres/internal/ceres/local_parameterization.cc
+++ b/extern/ceres/internal/ceres/local_parameterization.cc
@@ -31,10 +31,11 @@
#include "ceres/local_parameterization.h"
#include <algorithm>
+
#include "Eigen/Geometry"
-#include "ceres/householder_vector.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/fixed_array.h"
+#include "ceres/internal/householder_vector.h"
#include "ceres/rotation.h"
#include "glog/logging.h"
@@ -42,13 +43,16 @@ namespace ceres {
using std::vector;
-LocalParameterization::~LocalParameterization() {
-}
+LocalParameterization::~LocalParameterization() {}
bool LocalParameterization::MultiplyByJacobian(const double* x,
const int num_rows,
const double* global_matrix,
double* local_matrix) const {
+ if (LocalSize() == 0) {
+ return true;
+ }
+
Matrix jacobian(GlobalSize(), LocalSize());
if (!ComputeJacobian(x, jacobian.data())) {
return false;
@@ -74,7 +78,7 @@ bool IdentityParameterization::Plus(const double* x,
bool IdentityParameterization::ComputeJacobian(const double* x,
double* jacobian) const {
- MatrixRef(jacobian, size_, size_) = Matrix::Identity(size_, size_);
+ MatrixRef(jacobian, size_, size_).setIdentity();
return true;
}
@@ -82,9 +86,8 @@ bool IdentityParameterization::MultiplyByJacobian(const double* x,
const int num_cols,
const double* global_matrix,
double* local_matrix) const {
- std::copy(global_matrix,
- global_matrix + num_cols * GlobalSize(),
- local_matrix);
+ std::copy(
+ global_matrix, global_matrix + num_cols * GlobalSize(), local_matrix);
return true;
}
@@ -93,8 +96,8 @@ SubsetParameterization::SubsetParameterization(
: local_size_(size - constant_parameters.size()), constancy_mask_(size, 0) {
vector<int> constant = constant_parameters;
std::sort(constant.begin(), constant.end());
- CHECK_GE(constant.front(), 0)
- << "Indices indicating constant parameter must be greater than zero.";
+ CHECK_GE(constant.front(), 0) << "Indices indicating constant parameter must "
+ "be greater than equal to zero.";
CHECK_LT(constant.back(), size)
<< "Indices indicating constant parameter must be less than the size "
<< "of the parameter block.";
@@ -108,7 +111,8 @@ SubsetParameterization::SubsetParameterization(
bool SubsetParameterization::Plus(const double* x,
const double* delta,
double* x_plus_delta) const {
- for (int i = 0, j = 0; i < constancy_mask_.size(); ++i) {
+ const int global_size = GlobalSize();
+ for (int i = 0, j = 0; i < global_size; ++i) {
if (constancy_mask_[i]) {
x_plus_delta[i] = x[i];
} else {
@@ -124,9 +128,10 @@ bool SubsetParameterization::ComputeJacobian(const double* x,
return true;
}
- MatrixRef m(jacobian, constancy_mask_.size(), local_size_);
+ const int global_size = GlobalSize();
+ MatrixRef m(jacobian, global_size, local_size_);
m.setZero();
- for (int i = 0, j = 0; i < constancy_mask_.size(); ++i) {
+ for (int i = 0, j = 0; i < global_size; ++i) {
if (!constancy_mask_[i]) {
m(i, j++) = 1.0;
}
@@ -135,18 +140,19 @@ bool SubsetParameterization::ComputeJacobian(const double* x,
}
bool SubsetParameterization::MultiplyByJacobian(const double* x,
- const int num_rows,
- const double* global_matrix,
- double* local_matrix) const {
+ const int num_cols,
+ const double* global_matrix,
+ double* local_matrix) const {
if (local_size_ == 0) {
return true;
}
- for (int row = 0; row < num_rows; ++row) {
- for (int col = 0, j = 0; col < constancy_mask_.size(); ++col) {
- if (!constancy_mask_[col]) {
- local_matrix[row * LocalSize() + j++] =
- global_matrix[row * GlobalSize() + col];
+ const int global_size = GlobalSize();
+ for (int col = 0; col < num_cols; ++col) {
+ for (int i = 0, j = 0; i < global_size; ++i) {
+ if (!constancy_mask_[i]) {
+ local_matrix[col * local_size_ + j++] =
+ global_matrix[col * global_size + i];
}
}
}
@@ -176,10 +182,12 @@ bool QuaternionParameterization::Plus(const double* x,
bool QuaternionParameterization::ComputeJacobian(const double* x,
double* jacobian) const {
- jacobian[0] = -x[1]; jacobian[1] = -x[2]; jacobian[2] = -x[3]; // NOLINT
- jacobian[3] = x[0]; jacobian[4] = x[3]; jacobian[5] = -x[2]; // NOLINT
- jacobian[6] = -x[3]; jacobian[7] = x[0]; jacobian[8] = x[1]; // NOLINT
- jacobian[9] = x[2]; jacobian[10] = -x[1]; jacobian[11] = x[0]; // NOLINT
+ // clang-format off
+ jacobian[0] = -x[1]; jacobian[1] = -x[2]; jacobian[2] = -x[3];
+ jacobian[3] = x[0]; jacobian[4] = x[3]; jacobian[5] = -x[2];
+ jacobian[6] = -x[3]; jacobian[7] = x[0]; jacobian[8] = x[1];
+ jacobian[9] = x[2]; jacobian[10] = -x[1]; jacobian[11] = x[0];
+ // clang-format on
return true;
}
@@ -209,10 +217,12 @@ bool EigenQuaternionParameterization::Plus(const double* x_ptr,
bool EigenQuaternionParameterization::ComputeJacobian(const double* x,
double* jacobian) const {
- jacobian[0] = x[3]; jacobian[1] = x[2]; jacobian[2] = -x[1]; // NOLINT
- jacobian[3] = -x[2]; jacobian[4] = x[3]; jacobian[5] = x[0]; // NOLINT
- jacobian[6] = x[1]; jacobian[7] = -x[0]; jacobian[8] = x[3]; // NOLINT
- jacobian[9] = -x[0]; jacobian[10] = -x[1]; jacobian[11] = -x[2]; // NOLINT
+ // clang-format off
+ jacobian[0] = x[3]; jacobian[1] = x[2]; jacobian[2] = -x[1];
+ jacobian[3] = -x[2]; jacobian[4] = x[3]; jacobian[5] = x[0];
+ jacobian[6] = x[1]; jacobian[7] = -x[0]; jacobian[8] = x[3];
+ jacobian[9] = -x[0]; jacobian[10] = -x[1]; jacobian[11] = -x[2];
+ // clang-format on
return true;
}
@@ -241,21 +251,26 @@ bool HomogeneousVectorParameterization::Plus(const double* x_ptr,
// (2nd Edition) for a detailed description. Note there is a typo on Page
// 625, line 4 so check the book errata.
const double norm_delta_div_2 = 0.5 * norm_delta;
- const double sin_delta_by_delta = sin(norm_delta_div_2) /
- norm_delta_div_2;
+ const double sin_delta_by_delta =
+ std::sin(norm_delta_div_2) / norm_delta_div_2;
Vector y(size_);
y.head(size_ - 1) = 0.5 * sin_delta_by_delta * delta;
- y(size_ - 1) = cos(norm_delta_div_2);
+ y(size_ - 1) = std::cos(norm_delta_div_2);
Vector v(size_);
double beta;
- internal::ComputeHouseholderVector<double>(x, &v, &beta);
+
+ // NOTE: The explicit template arguments are needed here because
+ // ComputeHouseholderVector is templated and some versions of MSVC
+ // have trouble deducing the type of v automatically.
+ internal::ComputeHouseholderVector<ConstVectorRef, double, Eigen::Dynamic>(
+ x, &v, &beta);
// Apply the delta update to remain on the unit sphere. See section A6.9.3
// on page 625 of Hartley & Zisserman (2nd Edition) for a detailed
// description.
- x_plus_delta = x.norm() * (y - v * (beta * (v.transpose() * y)));
+ x_plus_delta = x.norm() * (y - v * (beta * (v.transpose() * y)));
return true;
}
@@ -267,7 +282,12 @@ bool HomogeneousVectorParameterization::ComputeJacobian(
Vector v(size_);
double beta;
- internal::ComputeHouseholderVector<double>(x, &v, &beta);
+
+ // NOTE: The explicit template arguments are needed here because
+ // ComputeHouseholderVector is templated and some versions of MSVC
+ // have trouble deducing the type of v automatically.
+ internal::ComputeHouseholderVector<ConstVectorRef, double, Eigen::Dynamic>(
+ x, &v, &beta);
// The Jacobian is equal to J = 0.5 * H.leftCols(size_ - 1) where H is the
// Householder matrix (H = I - beta * v * v').
@@ -280,65 +300,14 @@ bool HomogeneousVectorParameterization::ComputeJacobian(
return true;
}
-ProductParameterization::ProductParameterization(
- LocalParameterization* local_param1,
- LocalParameterization* local_param2) {
- local_params_.push_back(local_param1);
- local_params_.push_back(local_param2);
- Init();
-}
-
-ProductParameterization::ProductParameterization(
- LocalParameterization* local_param1,
- LocalParameterization* local_param2,
- LocalParameterization* local_param3) {
- local_params_.push_back(local_param1);
- local_params_.push_back(local_param2);
- local_params_.push_back(local_param3);
- Init();
-}
-
-ProductParameterization::ProductParameterization(
- LocalParameterization* local_param1,
- LocalParameterization* local_param2,
- LocalParameterization* local_param3,
- LocalParameterization* local_param4) {
- local_params_.push_back(local_param1);
- local_params_.push_back(local_param2);
- local_params_.push_back(local_param3);
- local_params_.push_back(local_param4);
- Init();
-}
-
-ProductParameterization::~ProductParameterization() {
- for (int i = 0; i < local_params_.size(); ++i) {
- delete local_params_[i];
- }
-}
-
-void ProductParameterization::Init() {
- global_size_ = 0;
- local_size_ = 0;
- buffer_size_ = 0;
- for (int i = 0; i < local_params_.size(); ++i) {
- const LocalParameterization* param = local_params_[i];
- buffer_size_ = std::max(buffer_size_,
- param->LocalSize() * param->GlobalSize());
- global_size_ += param->GlobalSize();
- local_size_ += param->LocalSize();
- }
-}
-
bool ProductParameterization::Plus(const double* x,
const double* delta,
double* x_plus_delta) const {
int x_cursor = 0;
int delta_cursor = 0;
- for (int i = 0; i < local_params_.size(); ++i) {
- const LocalParameterization* param = local_params_[i];
- if (!param->Plus(x + x_cursor,
- delta + delta_cursor,
- x_plus_delta + x_cursor)) {
+ for (const auto& param : local_params_) {
+ if (!param->Plus(
+ x + x_cursor, delta + delta_cursor, x_plus_delta + x_cursor)) {
return false;
}
delta_cursor += param->LocalSize();
@@ -356,16 +325,15 @@ bool ProductParameterization::ComputeJacobian(const double* x,
int x_cursor = 0;
int delta_cursor = 0;
- for (int i = 0; i < local_params_.size(); ++i) {
- const LocalParameterization* param = local_params_[i];
+ for (const auto& param : local_params_) {
const int local_size = param->LocalSize();
const int global_size = param->GlobalSize();
- if (!param->ComputeJacobian(x + x_cursor, buffer.get())) {
+ if (!param->ComputeJacobian(x + x_cursor, buffer.data())) {
return false;
}
- jacobian.block(x_cursor, delta_cursor, global_size, local_size)
- = MatrixRef(buffer.get(), global_size, local_size);
+ jacobian.block(x_cursor, delta_cursor, global_size, local_size) =
+ MatrixRef(buffer.data(), global_size, local_size);
delta_cursor += local_size;
x_cursor += global_size;
diff --git a/extern/ceres/internal/ceres/loss_function.cc b/extern/ceres/internal/ceres/loss_function.cc
index eb5026784dd..2c21a7377ca 100644
--- a/extern/ceres/internal/ceres/loss_function.cc
+++ b/extern/ceres/internal/ceres/loss_function.cc
@@ -32,6 +32,7 @@
#include "ceres/loss_function.h"
+#include <algorithm>
#include <cmath>
#include <cstddef>
#include <limits>
@@ -101,7 +102,9 @@ void TolerantLoss::Evaluate(double s, double rho[3]) const {
// large, it will overflow. Since numerically 1 + e^x == e^x when the
// x is greater than about ln(2^53) for doubles, beyond this threshold
// we substitute x for ln(1 + e^x) as a numerically equivalent approximation.
- static const double kLog2Pow53 = 36.7; // ln(MathLimits<double>::kEpsilon).
+
+ // ln(MathLimits<double>::kEpsilon).
+ static constexpr double kLog2Pow53 = 36.7;
if (x > kLog2Pow53) {
rho[0] = s - a_ - c_;
rho[1] = 1.0;
@@ -119,12 +122,12 @@ void TukeyLoss::Evaluate(double s, double* rho) const {
// Inlier region.
const double value = 1.0 - s / a_squared_;
const double value_sq = value * value;
- rho[0] = a_squared_ / 6.0 * (1.0 - value_sq * value);
- rho[1] = 0.5 * value_sq;
- rho[2] = -1.0 / a_squared_ * value;
+ rho[0] = a_squared_ / 3.0 * (1.0 - value_sq * value);
+ rho[1] = value_sq;
+ rho[2] = -2.0 / a_squared_ * value;
} else {
// Outlier region.
- rho[0] = a_squared_ / 6.0;
+ rho[0] = a_squared_ / 3.0;
rho[1] = 0.0;
rho[2] = 0.0;
}
@@ -132,10 +135,12 @@ void TukeyLoss::Evaluate(double s, double* rho) const {
ComposedLoss::ComposedLoss(const LossFunction* f, Ownership ownership_f,
const LossFunction* g, Ownership ownership_g)
- : f_(CHECK_NOTNULL(f)),
- g_(CHECK_NOTNULL(g)),
+ : f_(f),
+ g_(g),
ownership_f_(ownership_f),
ownership_g_(ownership_g) {
+ CHECK(f_ != nullptr);
+ CHECK(g_ != nullptr);
}
ComposedLoss::~ComposedLoss() {
diff --git a/extern/ceres/internal/ceres/low_rank_inverse_hessian.cc b/extern/ceres/internal/ceres/low_rank_inverse_hessian.cc
index 1c6c9925f1c..f3953c46006 100644
--- a/extern/ceres/internal/ceres/low_rank_inverse_hessian.cc
+++ b/extern/ceres/internal/ceres/low_rank_inverse_hessian.cc
@@ -175,12 +175,10 @@ void LowRankInverseHessian::RightMultiply(const double* x_ptr,
<< "approximation.";
}
- for (list<int>::const_iterator it = indices_.begin();
- it != indices_.end();
- ++it) {
- const double beta = delta_gradient_history_.col(*it).dot(search_direction) /
- delta_x_dot_delta_gradient_(*it);
- search_direction += delta_x_history_.col(*it) * (alpha(*it) - beta);
+ for (const int i : indices_) {
+ const double beta = delta_gradient_history_.col(i).dot(search_direction) /
+ delta_x_dot_delta_gradient_(i);
+ search_direction += delta_x_history_.col(i) * (alpha(i) - beta);
}
}
diff --git a/extern/ceres/internal/ceres/low_rank_inverse_hessian.h b/extern/ceres/internal/ceres/low_rank_inverse_hessian.h
index 2c768c2ca53..0028a988923 100644
--- a/extern/ceres/internal/ceres/low_rank_inverse_hessian.h
+++ b/extern/ceres/internal/ceres/low_rank_inverse_hessian.h
@@ -54,7 +54,7 @@ namespace internal {
// enhanced with scaling rule by Byrd, Nocedal and Schanbel.
//
// Nocedal, J. (1980). "Updating Quasi-Newton Matrices with Limited
-// Storage". Mathematics of Computation 35 (151): 773–782.
+// Storage". Mathematics of Computation 35 (151): 773-782.
//
// Byrd, R. H.; Nocedal, J.; Schnabel, R. B. (1994).
// "Representations of Quasi-Newton Matrices and their use in
@@ -84,12 +84,12 @@ class LowRankInverseHessian : public LinearOperator {
bool Update(const Vector& delta_x, const Vector& delta_gradient);
// LinearOperator interface
- virtual void RightMultiply(const double* x, double* y) const;
- virtual void LeftMultiply(const double* x, double* y) const {
+ void RightMultiply(const double* x, double* y) const final;
+ void LeftMultiply(const double* x, double* y) const final {
RightMultiply(x, y);
}
- virtual int num_rows() const { return num_parameters_; }
- virtual int num_cols() const { return num_parameters_; }
+ int num_rows() const final { return num_parameters_; }
+ int num_cols() const final { return num_parameters_; }
private:
const int num_parameters_;
diff --git a/extern/ceres/internal/ceres/minimizer.h b/extern/ceres/internal/ceres/minimizer.h
index b59372806e7..afdd60d2944 100644
--- a/extern/ceres/internal/ceres/minimizer.h
+++ b/extern/ceres/internal/ceres/minimizer.h
@@ -31,6 +31,7 @@
#ifndef CERES_INTERNAL_MINIMIZER_H_
#define CERES_INTERNAL_MINIMIZER_H_
+#include <memory>
#include <string>
#include <vector>
#include "ceres/internal/port.h"
@@ -167,19 +168,19 @@ class Minimizer {
// Object responsible for evaluating the cost, residuals and
// Jacobian matrix.
- shared_ptr<Evaluator> evaluator;
+ std::shared_ptr<Evaluator> evaluator;
// Object responsible for actually computing the trust region
// step, and sizing the trust region radius.
- shared_ptr<TrustRegionStrategy> trust_region_strategy;
+ std::shared_ptr<TrustRegionStrategy> trust_region_strategy;
// Object holding the Jacobian matrix. It is assumed that the
// sparsity structure of the matrix has already been initialized
// and will remain constant for the life time of the
// optimization.
- shared_ptr<SparseMatrix> jacobian;
+ std::shared_ptr<SparseMatrix> jacobian;
- shared_ptr<CoordinateDescentMinimizer> inner_iteration_minimizer;
+ std::shared_ptr<CoordinateDescentMinimizer> inner_iteration_minimizer;
};
static Minimizer* Create(MinimizerType minimizer_type);
diff --git a/extern/ceres/internal/ceres/mutex.h b/extern/ceres/internal/ceres/mutex.h
deleted file mode 100644
index 2ce97772755..00000000000
--- a/extern/ceres/internal/ceres/mutex.h
+++ /dev/null
@@ -1,329 +0,0 @@
-// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
-// http://ceres-solver.org/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// * Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-// * Neither the name of Google Inc. nor the names of its contributors may be
-// used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-// POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: Craig Silverstein.
-//
-// A simple mutex wrapper, supporting locks and read-write locks.
-// You should assume the locks are *not* re-entrant.
-//
-// This class is meant to be internal-only and should be wrapped by an
-// internal namespace. Before you use this module, please give the
-// name of your internal namespace for this module. Or, if you want
-// to expose it, you'll want to move it to the Google namespace. We
-// cannot put this class in global namespace because there can be some
-// problems when we have multiple versions of Mutex in each shared object.
-//
-// NOTE: by default, we have #ifdef'ed out the TryLock() method.
-// This is for two reasons:
-// 1) TryLock() under Windows is a bit annoying (it requires a
-// #define to be defined very early).
-// 2) TryLock() is broken for NO_THREADS mode, at least in NDEBUG
-// mode.
-// If you need TryLock(), and either these two caveats are not a
-// problem for you, or you're willing to work around them, then
-// feel free to #define GMUTEX_TRYLOCK, or to remove the #ifdefs
-// in the code below.
-//
-// CYGWIN NOTE: Cygwin support for rwlock seems to be buggy:
-// http://www.cygwin.com/ml/cygwin/2008-12/msg00017.html
-// Because of that, we might as well use windows locks for
-// cygwin. They seem to be more reliable than the cygwin pthreads layer.
-//
-// TRICKY IMPLEMENTATION NOTE:
-// This class is designed to be safe to use during
-// dynamic-initialization -- that is, by global constructors that are
-// run before main() starts. The issue in this case is that
-// dynamic-initialization happens in an unpredictable order, and it
-// could be that someone else's dynamic initializer could call a
-// function that tries to acquire this mutex -- but that all happens
-// before this mutex's constructor has run. (This can happen even if
-// the mutex and the function that uses the mutex are in the same .cc
-// file.) Basically, because Mutex does non-trivial work in its
-// constructor, it's not, in the naive implementation, safe to use
-// before dynamic initialization has run on it.
-//
-// The solution used here is to pair the actual mutex primitive with a
-// bool that is set to true when the mutex is dynamically initialized.
-// (Before that it's false.) Then we modify all mutex routines to
-// look at the bool, and not try to lock/unlock until the bool makes
-// it to true (which happens after the Mutex constructor has run.)
-//
-// This works because before main() starts -- particularly, during
-// dynamic initialization -- there are no threads, so a) it's ok that
-// the mutex operations are a no-op, since we don't need locking then
-// anyway; and b) we can be quite confident our bool won't change
-// state between a call to Lock() and a call to Unlock() (that would
-// require a global constructor in one translation unit to call Lock()
-// and another global constructor in another translation unit to call
-// Unlock() later, which is pretty perverse).
-//
-// That said, it's tricky, and can conceivably fail; it's safest to
-// avoid trying to acquire a mutex in a global constructor, if you
-// can. One way it can fail is that a really smart compiler might
-// initialize the bool to true at static-initialization time (too
-// early) rather than at dynamic-initialization time. To discourage
-// that, we set is_safe_ to true in code (not the constructor
-// colon-initializer) and set it to true via a function that always
-// evaluates to true, but that the compiler can't know always
-// evaluates to true. This should be good enough.
-
-#ifndef CERES_INTERNAL_MUTEX_H_
-#define CERES_INTERNAL_MUTEX_H_
-
-#include "ceres/internal/port.h"
-
-#if defined(CERES_NO_THREADS)
- typedef int MutexType; // to keep a lock-count
-#elif defined(_WIN32) || defined(__CYGWIN32__) || defined(__CYGWIN64__)
-# define CERES_WIN32_LEAN_AND_MEAN // We only need minimal includes
-# ifdef CERES_GMUTEX_TRYLOCK
- // We need Windows NT or later for TryEnterCriticalSection(). If you
- // don't need that functionality, you can remove these _WIN32_WINNT
- // lines, and change TryLock() to assert(0) or something.
-# ifndef _WIN32_WINNT
-# define _WIN32_WINNT 0x0400
-# endif
-# endif
-// Unfortunately, windows.h defines a bunch of macros with common
-// names. Two in particular need avoiding: ERROR and min/max.
-// To avoid macro definition of ERROR.
-# define NOGDI
-// To avoid macro definition of min/max.
-# ifndef NOMINMAX
-# define NOMINMAX
-# endif
-# include <windows.h>
- typedef CRITICAL_SECTION MutexType;
-#elif defined(CERES_HAVE_PTHREAD) && defined(CERES_HAVE_RWLOCK)
- // Needed for pthread_rwlock_*. If it causes problems, you could take it
- // out, but then you'd have to unset CERES_HAVE_RWLOCK (at least on linux --
- // it *does* cause problems for FreeBSD, or MacOSX, but isn't needed for
- // locking there.)
-# if defined(__linux__) && !defined(_XOPEN_SOURCE)
-# define _XOPEN_SOURCE 500 // may be needed to get the rwlock calls
-# endif
-# include <pthread.h>
- typedef pthread_rwlock_t MutexType;
-#elif defined(CERES_HAVE_PTHREAD)
-# include <pthread.h>
- typedef pthread_mutex_t MutexType;
-#else
-# error Need to implement mutex.h for your architecture, or #define NO_THREADS
-#endif
-
-// We need to include these header files after defining _XOPEN_SOURCE
-// as they may define the _XOPEN_SOURCE macro.
-#include <assert.h>
-#include <stdlib.h> // for abort()
-
-namespace ceres {
-namespace internal {
-
-class Mutex {
- public:
- // Create a Mutex that is not held by anybody. This constructor is
- // typically used for Mutexes allocated on the heap or the stack.
- // See below for a recommendation for constructing global Mutex
- // objects.
- inline Mutex();
-
- // Destructor
- inline ~Mutex();
-
- inline void Lock(); // Block if needed until free then acquire exclusively
- inline void Unlock(); // Release a lock acquired via Lock()
-#ifdef CERES_GMUTEX_TRYLOCK
- inline bool TryLock(); // If free, Lock() and return true, else return false
-#endif
- // Note that on systems that don't support read-write locks, these may
- // be implemented as synonyms to Lock() and Unlock(). So you can use
- // these for efficiency, but don't use them anyplace where being able
- // to do shared reads is necessary to avoid deadlock.
- inline void ReaderLock(); // Block until free or shared then acquire a share
- inline void ReaderUnlock(); // Release a read share of this Mutex
- inline void WriterLock() { Lock(); } // Acquire an exclusive lock
- inline void WriterUnlock() { Unlock(); } // Release a lock from WriterLock()
-
- // TODO(hamaji): Do nothing, implement correctly.
- inline void AssertHeld() {}
-
- private:
- MutexType mutex_;
- // We want to make sure that the compiler sets is_safe_ to true only
- // when we tell it to, and never makes assumptions is_safe_ is
- // always true. volatile is the most reliable way to do that.
- volatile bool is_safe_;
-
- inline void SetIsSafe() { is_safe_ = true; }
-
- // Catch the error of writing Mutex when intending MutexLock.
- Mutex(Mutex* /*ignored*/) {}
- // Disallow "evil" constructors
- Mutex(const Mutex&);
- void operator=(const Mutex&);
-};
-
-// Now the implementation of Mutex for various systems
-#if defined(CERES_NO_THREADS)
-
-// When we don't have threads, we can be either reading or writing,
-// but not both. We can have lots of readers at once (in no-threads
-// mode, that's most likely to happen in recursive function calls),
-// but only one writer. We represent this by having mutex_ be -1 when
-// writing and a number > 0 when reading (and 0 when no lock is held).
-//
-// In debug mode, we assert these invariants, while in non-debug mode
-// we do nothing, for efficiency. That's why everything is in an
-// assert.
-
-Mutex::Mutex() : mutex_(0) { }
-Mutex::~Mutex() { assert(mutex_ == 0); }
-void Mutex::Lock() { assert(--mutex_ == -1); }
-void Mutex::Unlock() { assert(mutex_++ == -1); }
-#ifdef CERES_GMUTEX_TRYLOCK
-bool Mutex::TryLock() { if (mutex_) return false; Lock(); return true; }
-#endif
-void Mutex::ReaderLock() { assert(++mutex_ > 0); }
-void Mutex::ReaderUnlock() { assert(mutex_-- > 0); }
-
-#elif defined(_WIN32) || defined(__CYGWIN32__) || defined(__CYGWIN64__)
-
-Mutex::Mutex() { InitializeCriticalSection(&mutex_); SetIsSafe(); }
-Mutex::~Mutex() { DeleteCriticalSection(&mutex_); }
-void Mutex::Lock() { if (is_safe_) EnterCriticalSection(&mutex_); }
-void Mutex::Unlock() { if (is_safe_) LeaveCriticalSection(&mutex_); }
-#ifdef GMUTEX_TRYLOCK
-bool Mutex::TryLock() { return is_safe_ ?
- TryEnterCriticalSection(&mutex_) != 0 : true; }
-#endif
-void Mutex::ReaderLock() { Lock(); } // we don't have read-write locks
-void Mutex::ReaderUnlock() { Unlock(); }
-
-#elif defined(CERES_HAVE_PTHREAD) && defined(CERES_HAVE_RWLOCK)
-
-#define CERES_SAFE_PTHREAD(fncall) do { /* run fncall if is_safe_ is true */ \
- if (is_safe_ && fncall(&mutex_) != 0) abort(); \
-} while (0)
-
-Mutex::Mutex() {
- SetIsSafe();
- if (is_safe_ && pthread_rwlock_init(&mutex_, NULL) != 0) abort();
-}
-Mutex::~Mutex() { CERES_SAFE_PTHREAD(pthread_rwlock_destroy); }
-void Mutex::Lock() { CERES_SAFE_PTHREAD(pthread_rwlock_wrlock); }
-void Mutex::Unlock() { CERES_SAFE_PTHREAD(pthread_rwlock_unlock); }
-#ifdef CERES_GMUTEX_TRYLOCK
-bool Mutex::TryLock() { return is_safe_ ?
- pthread_rwlock_trywrlock(&mutex_) == 0 :
- true; }
-#endif
-void Mutex::ReaderLock() { CERES_SAFE_PTHREAD(pthread_rwlock_rdlock); }
-void Mutex::ReaderUnlock() { CERES_SAFE_PTHREAD(pthread_rwlock_unlock); }
-#undef CERES_SAFE_PTHREAD
-
-#elif defined(CERES_HAVE_PTHREAD)
-
-#define CERES_SAFE_PTHREAD(fncall) do { /* run fncall if is_safe_ is true */ \
- if (is_safe_ && fncall(&mutex_) != 0) abort(); \
-} while (0)
-
-Mutex::Mutex() {
- SetIsSafe();
- if (is_safe_ && pthread_mutex_init(&mutex_, NULL) != 0) abort();
-}
-Mutex::~Mutex() { CERES_SAFE_PTHREAD(pthread_mutex_destroy); }
-void Mutex::Lock() { CERES_SAFE_PTHREAD(pthread_mutex_lock); }
-void Mutex::Unlock() { CERES_SAFE_PTHREAD(pthread_mutex_unlock); }
-#ifdef CERES_GMUTEX_TRYLOCK
-bool Mutex::TryLock() { return is_safe_ ?
- pthread_mutex_trylock(&mutex_) == 0 : true; }
-#endif
-void Mutex::ReaderLock() { Lock(); }
-void Mutex::ReaderUnlock() { Unlock(); }
-#undef CERES_SAFE_PTHREAD
-
-#endif
-
-// --------------------------------------------------------------------------
-// Some helper classes
-
-// Note: The weird "Ceres" prefix for the class is a workaround for having two
-// similar mutex.h files included in the same translation unit. This is a
-// problem because macros do not respect C++ namespaces, and as a result, this
-// does not work well (e.g. inside Chrome). The offending macros are
-// "MutexLock(x) COMPILE_ASSERT(false)". To work around this, "Ceres" is
-// prefixed to the class names; this permits defining the classes.
-
-// CeresMutexLock(mu) acquires mu when constructed and releases it
-// when destroyed.
-class CeresMutexLock {
- public:
- explicit CeresMutexLock(Mutex *mu) : mu_(mu) { mu_->Lock(); }
- ~CeresMutexLock() { mu_->Unlock(); }
- private:
- Mutex * const mu_;
- // Disallow "evil" constructors
- CeresMutexLock(const CeresMutexLock&);
- void operator=(const CeresMutexLock&);
-};
-
-// CeresReaderMutexLock and CeresWriterMutexLock do the same, for rwlocks
-class CeresReaderMutexLock {
- public:
- explicit CeresReaderMutexLock(Mutex *mu) : mu_(mu) { mu_->ReaderLock(); }
- ~CeresReaderMutexLock() { mu_->ReaderUnlock(); }
- private:
- Mutex * const mu_;
- // Disallow "evil" constructors
- CeresReaderMutexLock(const CeresReaderMutexLock&);
- void operator=(const CeresReaderMutexLock&);
-};
-
-class CeresWriterMutexLock {
- public:
- explicit CeresWriterMutexLock(Mutex *mu) : mu_(mu) { mu_->WriterLock(); }
- ~CeresWriterMutexLock() { mu_->WriterUnlock(); }
- private:
- Mutex * const mu_;
- // Disallow "evil" constructors
- CeresWriterMutexLock(const CeresWriterMutexLock&);
- void operator=(const CeresWriterMutexLock&);
-};
-
-// Catch bug where variable name is omitted, e.g. MutexLock (&mu);
-#define CeresMutexLock(x) \
- COMPILE_ASSERT(0, ceres_mutex_lock_decl_missing_var_name)
-#define CeresReaderMutexLock(x) \
- COMPILE_ASSERT(0, ceres_rmutex_lock_decl_missing_var_name)
-#define CeresWriterMutexLock(x) \
- COMPILE_ASSERT(0, ceres_wmutex_lock_decl_missing_var_name)
-
-} // namespace internal
-} // namespace ceres
-
-#endif // CERES_INTERNAL_MUTEX_H_
diff --git a/extern/ceres/internal/ceres/normal_prior.cc b/extern/ceres/internal/ceres/normal_prior.cc
index b3666cd702f..a3d5d8ed772 100644
--- a/extern/ceres/internal/ceres/normal_prior.cc
+++ b/extern/ceres/internal/ceres/normal_prior.cc
@@ -33,7 +33,6 @@
#include <cstddef>
#include <vector>
#include "ceres/internal/eigen.h"
-#include "ceres/internal/scoped_ptr.h"
#include "ceres/types.h"
#include "glog/logging.h"
diff --git a/extern/ceres/internal/ceres/pair_hash.h b/extern/ceres/internal/ceres/pair_hash.h
new file mode 100644
index 00000000000..80453bae7db
--- /dev/null
+++ b/extern/ceres/internal/ceres/pair_hash.h
@@ -0,0 +1,112 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// A hasher for std::pair<T, T>.
+
+#ifndef CERES_INTERNAL_PAIR_HASH_H_
+#define CERES_INTERNAL_PAIR_HASH_H_
+
+#include "ceres/internal/port.h"
+#include <cstdint>
+#include <utility>
+
+namespace ceres {
+namespace internal {
+
+#if defined(_WIN32) && !defined(__MINGW64__) && !defined(__MINGW32__)
+#define GG_LONGLONG(x) x##I64
+#define GG_ULONGLONG(x) x##UI64
+#else
+#define GG_LONGLONG(x) x##LL
+#define GG_ULONGLONG(x) x##ULL
+#endif
+
+// The hash function is due to Bob Jenkins (see
+// http://burtleburtle.net/bob/hash/index.html). Each mix takes 36 instructions,
+// in 18 cycles if you're lucky. On x86 architectures, this requires 45
+// instructions in 27 cycles, if you're lucky.
+//
+// 32bit version
+inline void hash_mix(uint32_t& a, uint32_t& b, uint32_t& c) {
+ a -= b; a -= c; a ^= (c>>13);
+ b -= c; b -= a; b ^= (a<<8);
+ c -= a; c -= b; c ^= (b>>13);
+ a -= b; a -= c; a ^= (c>>12);
+ b -= c; b -= a; b ^= (a<<16);
+ c -= a; c -= b; c ^= (b>>5);
+ a -= b; a -= c; a ^= (c>>3);
+ b -= c; b -= a; b ^= (a<<10);
+ c -= a; c -= b; c ^= (b>>15);
+}
+
+// 64bit version
+inline void hash_mix(uint64_t& a, uint64_t& b, uint64_t& c) {
+ a -= b; a -= c; a ^= (c>>43);
+ b -= c; b -= a; b ^= (a<<9);
+ c -= a; c -= b; c ^= (b>>8);
+ a -= b; a -= c; a ^= (c>>38);
+ b -= c; b -= a; b ^= (a<<23);
+ c -= a; c -= b; c ^= (b>>5);
+ a -= b; a -= c; a ^= (c>>35);
+ b -= c; b -= a; b ^= (a<<49);
+ c -= a; c -= b; c ^= (b>>11);
+}
+
+inline uint32_t Hash32NumWithSeed(uint32_t num, uint32_t c) {
+ // The golden ratio; an arbitrary value.
+ uint32_t b = 0x9e3779b9UL;
+ hash_mix(num, b, c);
+ return c;
+}
+
+inline uint64_t Hash64NumWithSeed(uint64_t num, uint64_t c) {
+ // More of the golden ratio.
+ uint64_t b = GG_ULONGLONG(0xe08c1d668b756f82);
+ hash_mix(num, b, c);
+ return c;
+}
+
+// Hasher for STL pairs. Requires hashers for both members to be defined.
+struct pair_hash {
+ public:
+ template <typename T>
+ std::size_t operator()(const std::pair<T, T>& p) const {
+ const std::size_t h1 = std::hash<T>()(p.first);
+ const std::size_t h2 = std::hash<T>()(p.second);
+ // The decision below is at compile time
+ return (sizeof(h1) <= sizeof(uint32_t)) ? Hash32NumWithSeed(h1, h2)
+ : Hash64NumWithSeed(h1, h2);
+ }
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_PAIR_HASH_H_
diff --git a/extern/ceres/internal/ceres/parallel_for.h b/extern/ceres/internal/ceres/parallel_for.h
new file mode 100644
index 00000000000..2da2320c137
--- /dev/null
+++ b/extern/ceres/internal/ceres/parallel_for.h
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: vitus@google.com (Michael Vitus)
+
+#ifndef CERES_INTERNAL_PARALLEL_FOR_
+#define CERES_INTERNAL_PARALLEL_FOR_
+
+#include <functional>
+
+#include "ceres/context_impl.h"
+
+namespace ceres {
+namespace internal {
+
+// Returns the maximum number of threads supported by the threading backend
+// Ceres was compiled with.
+int MaxNumThreadsAvailable();
+
+// Execute the function for every element in the range [start, end) with at most
+// num_threads. It will execute all the work on the calling thread if
+// num_threads is 1.
+void ParallelFor(ContextImpl* context,
+ int start,
+ int end,
+ int num_threads,
+ const std::function<void(int)>& function);
+
+// Execute the function for every element in the range [start, end) with at most
+// num_threads. It will execute all the work on the calling thread if
+// num_threads is 1. Each invocation of function() will be passed a thread_id
+// in [0, num_threads) that is guaranteed to be distinct from the value passed
+// to any concurrent execution of function().
+void ParallelFor(ContextImpl* context,
+ int start,
+ int end,
+ int num_threads,
+ const std::function<void(int thread_id, int i)>& function);
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_PARALLEL_FOR_H_
diff --git a/extern/ceres/internal/ceres/parallel_for_cxx.cc b/extern/ceres/internal/ceres/parallel_for_cxx.cc
new file mode 100644
index 00000000000..8e358f5900b
--- /dev/null
+++ b/extern/ceres/internal/ceres/parallel_for_cxx.cc
@@ -0,0 +1,247 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: vitus@google.com (Michael Vitus)
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifdef CERES_USE_CXX_THREADS
+
+#include "ceres/parallel_for.h"
+
+#include <cmath>
+#include <condition_variable>
+#include <memory>
+#include <mutex>
+
+#include "ceres/concurrent_queue.h"
+#include "ceres/scoped_thread_token.h"
+#include "ceres/thread_token_provider.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+namespace {
+// This class creates a thread safe barrier which will block until a
+// pre-specified number of threads call Finished. This allows us to block the
+// main thread until all the parallel threads are finished processing all the
+// work.
+class BlockUntilFinished {
+ public:
+ explicit BlockUntilFinished(int num_total)
+ : num_finished_(0), num_total_(num_total) {}
+
+ // Increment the number of jobs that have finished and signal the blocking
+ // thread if all jobs have finished.
+ void Finished() {
+ std::lock_guard<std::mutex> lock(mutex_);
+ ++num_finished_;
+ CHECK_LE(num_finished_, num_total_);
+ if (num_finished_ == num_total_) {
+ condition_.notify_one();
+ }
+ }
+
+ // Block until all threads have signaled they are finished.
+ void Block() {
+ std::unique_lock<std::mutex> lock(mutex_);
+ condition_.wait(lock, [&]() { return num_finished_ == num_total_; });
+ }
+
+ private:
+ std::mutex mutex_;
+ std::condition_variable condition_;
+ // The current number of jobs finished.
+ int num_finished_;
+ // The total number of jobs.
+ int num_total_;
+};
+
+// Shared state between the parallel tasks. Each thread will use this
+// information to get the next block of work to be performed.
+struct SharedState {
+ SharedState(int start, int end, int num_work_items)
+ : start(start),
+ end(end),
+ num_work_items(num_work_items),
+ i(0),
+ thread_token_provider(num_work_items),
+ block_until_finished(num_work_items) {}
+
+ // The start and end index of the for loop.
+ const int start;
+ const int end;
+ // The number of blocks that need to be processed.
+ const int num_work_items;
+
+ // The next block of work to be assigned to a worker. The parallel for loop
+ // range is split into num_work_items blocks of work, i.e. a single block of
+ // work is:
+ // for (int j = start + i; j < end; j += num_work_items) { ... }.
+ int i;
+ std::mutex mutex_i;
+
+ // Provides a unique thread ID among all active threads working on the same
+ // group of tasks. Thread-safe.
+ ThreadTokenProvider thread_token_provider;
+
+ // Used to signal when all the work has been completed. Thread safe.
+ BlockUntilFinished block_until_finished;
+};
+
+} // namespace
+
+int MaxNumThreadsAvailable() {
+ return ThreadPool::MaxNumThreadsAvailable();
+}
+
+// See ParallelFor (below) for more details.
+void ParallelFor(ContextImpl* context,
+ int start,
+ int end,
+ int num_threads,
+ const std::function<void(int)>& function) {
+ CHECK_GT(num_threads, 0);
+ CHECK(context != NULL);
+ if (end <= start) {
+ return;
+ }
+
+ // Fast path for when it is single threaded.
+ if (num_threads == 1) {
+ for (int i = start; i < end; ++i) {
+ function(i);
+ }
+ return;
+ }
+
+ ParallelFor(context, start, end, num_threads,
+ [&function](int /*thread_id*/, int i) { function(i); });
+}
+
+// This implementation uses a fixed size max worker pool with a shared task
+// queue. The problem of executing the function for the interval of [start, end)
+// is broken up into at most num_threads blocks and added to the thread pool. To
+// avoid deadlocks, the calling thread is allowed to steal work from the worker
+// pool. This is implemented via a shared state between the tasks. In order for
+// the calling thread or thread pool to get a block of work, it will query the
+// shared state for the next block of work to be done. If there is nothing left,
+// it will return. We will exit the ParallelFor call when all of the work has
+// been done, not when all of the tasks have been popped off the task queue.
+//
+// A unique thread ID among all active tasks will be acquired once for each
+// block of work. This avoids the significant performance penalty for acquiring
+// it on every iteration of the for loop. The thread ID is guaranteed to be in
+// [0, num_threads).
+//
+// A performance analysis has shown this implementation is onpar with OpenMP and
+// TBB.
+void ParallelFor(ContextImpl* context,
+ int start,
+ int end,
+ int num_threads,
+ const std::function<void(int thread_id, int i)>& function) {
+ CHECK_GT(num_threads, 0);
+ CHECK(context != NULL);
+ if (end <= start) {
+ return;
+ }
+
+ // Fast path for when it is single threaded.
+ if (num_threads == 1) {
+ // Even though we only have one thread, use the thread token provider to
+ // guarantee the exact same behavior when running with multiple threads.
+ ThreadTokenProvider thread_token_provider(num_threads);
+ const ScopedThreadToken scoped_thread_token(&thread_token_provider);
+ const int thread_id = scoped_thread_token.token();
+ for (int i = start; i < end; ++i) {
+ function(thread_id, i);
+ }
+ return;
+ }
+
+ // We use a std::shared_ptr because the main thread can finish all
+ // the work before the tasks have been popped off the queue. So the
+ // shared state needs to exist for the duration of all the tasks.
+ const int num_work_items = std::min((end - start), num_threads);
+ std::shared_ptr<SharedState> shared_state(
+ new SharedState(start, end, num_work_items));
+
+ // A function which tries to perform a chunk of work. This returns false if
+ // there is no work to be done.
+ auto task_function = [shared_state, &function]() {
+ int i = 0;
+ {
+ // Get the next available chunk of work to be performed. If there is no
+ // work, return false.
+ std::lock_guard<std::mutex> lock(shared_state->mutex_i);
+ if (shared_state->i >= shared_state->num_work_items) {
+ return false;
+ }
+ i = shared_state->i;
+ ++shared_state->i;
+ }
+
+ const ScopedThreadToken scoped_thread_token(
+ &shared_state->thread_token_provider);
+ const int thread_id = scoped_thread_token.token();
+
+ // Perform each task.
+ for (int j = shared_state->start + i;
+ j < shared_state->end;
+ j += shared_state->num_work_items) {
+ function(thread_id, j);
+ }
+ shared_state->block_until_finished.Finished();
+ return true;
+ };
+
+ // Add all the tasks to the thread pool.
+ for (int i = 0; i < num_work_items; ++i) {
+ // Note we are taking the task_function as value so the shared_state
+ // shared pointer is copied and the ref count is increased. This is to
+ // prevent it from being deleted when the main thread finishes all the
+ // work and exits before the threads finish.
+ context->thread_pool.AddTask([task_function]() { task_function(); });
+ }
+
+ // Try to do any available work on the main thread. This may steal work from
+ // the thread pool, but when there is no work left the thread pool tasks
+ // will be no-ops.
+ while (task_function()) {
+ }
+
+ // Wait until all tasks have finished.
+ shared_state->block_until_finished.Block();
+}
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_USE_CXX_THREADS
diff --git a/extern/ceres/internal/ceres/parallel_for_nothreads.cc b/extern/ceres/internal/ceres/parallel_for_nothreads.cc
new file mode 100644
index 00000000000..e8f450a714d
--- /dev/null
+++ b/extern/ceres/internal/ceres/parallel_for_nothreads.cc
@@ -0,0 +1,78 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: alexs.mac@gmail.com (Alex Stewart)
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifdef CERES_NO_THREADS
+
+#include "ceres/parallel_for.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+int MaxNumThreadsAvailable() { return 1; }
+
+void ParallelFor(ContextImpl* context,
+ int start,
+ int end,
+ int num_threads,
+ const std::function<void(int)>& function) {
+ CHECK_GT(num_threads, 0);
+ CHECK(context != NULL);
+ if (end <= start) {
+ return;
+ }
+ for (int i = start; i < end; ++i) {
+ function(i);
+ }
+}
+
+void ParallelFor(ContextImpl* context,
+ int start,
+ int end,
+ int num_threads,
+ const std::function<void(int thread_id, int i)>& function) {
+ CHECK_GT(num_threads, 0);
+ CHECK(context != NULL);
+ if (end <= start) {
+ return;
+ }
+ const int thread_id = 0;
+ for (int i = start; i < end; ++i) {
+ function(thread_id, i);
+ }
+}
+
+}
+}
+
+#endif // CERES_NO_THREADS
diff --git a/extern/ceres/internal/ceres/parallel_for_openmp.cc b/extern/ceres/internal/ceres/parallel_for_openmp.cc
new file mode 100644
index 00000000000..8afe3b11f8d
--- /dev/null
+++ b/extern/ceres/internal/ceres/parallel_for_openmp.cc
@@ -0,0 +1,88 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: vitus@google.com (Michael Vitus)
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#if defined(CERES_USE_OPENMP)
+
+#include "ceres/parallel_for.h"
+
+#include "ceres/scoped_thread_token.h"
+#include "ceres/thread_token_provider.h"
+#include "glog/logging.h"
+#include "omp.h"
+
+namespace ceres {
+namespace internal {
+
+int MaxNumThreadsAvailable() {
+ return omp_get_max_threads();
+}
+
+void ParallelFor(ContextImpl* context,
+ int start,
+ int end,
+ int num_threads,
+ const std::function<void(int)>& function) {
+ CHECK_GT(num_threads, 0);
+ CHECK(context != NULL);
+ if (end <= start) {
+ return;
+ }
+
+#ifdef CERES_USE_OPENMP
+#pragma omp parallel for num_threads(num_threads) \
+ schedule(dynamic) if (num_threads > 1)
+#endif // CERES_USE_OPENMP
+ for (int i = start; i < end; ++i) {
+ function(i);
+ }
+}
+
+void ParallelFor(ContextImpl* context,
+ int start,
+ int end,
+ int num_threads,
+ const std::function<void(int thread_id, int i)>& function) {
+ CHECK(context != NULL);
+
+ ThreadTokenProvider thread_token_provider(num_threads);
+ ParallelFor(context, start, end, num_threads, [&](int i) {
+ const ScopedThreadToken scoped_thread_token(&thread_token_provider);
+ const int thread_id = scoped_thread_token.token();
+ function(thread_id, i);
+ });
+}
+
+} // namespace internal
+} // namespace ceres
+
+#endif // defined(CERES_USE_OPENMP)
diff --git a/extern/ceres/internal/ceres/parallel_utils.cc b/extern/ceres/internal/ceres/parallel_utils.cc
new file mode 100644
index 00000000000..e1cb5f979ec
--- /dev/null
+++ b/extern/ceres/internal/ceres/parallel_utils.cc
@@ -0,0 +1,90 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wjr@google.com (William Rucklidge)
+
+#include "ceres/parallel_utils.h"
+
+namespace ceres {
+namespace internal {
+
+void LinearIndexToUpperTriangularIndex(int k, int n, int* i, int* j) {
+ // This works by unfolding a rectangle into a triangle.
+ // Say n is even. 4 is a nice even number. The 10 i,j pairs that we
+ // want to produce are:
+ // 0,0 0,1 0,2 0,3
+ // 1,1 1,2 1,3
+ // 2,2 2,3
+ // 3,3
+ // This triangle can be folded into a 5x2 rectangle:
+ // 3,3 0,0 0,1 0,2 0,3
+ // 2,2 2,3 1,1 1,2 1,3
+
+ // If N is odd, say 5, then the 15 i,j pairs are:
+ // 0,0 0,1 0,2 0,3 0,4
+ // 1,1 1,2 1,3 1,4
+ // 2,2 2,3 2,3
+ // 3,3 3,4
+ // 4,4
+ // which folds to a 5x3 rectangle:
+ // 0,0 0,1 0,2 0,3 0,4
+ // 4,4 1,1 1,2 1,3 1,4
+ // 3,3 3,4 2,2 2,3 2,4
+
+ // All this function does is map the linear iteration position to a
+ // location in the rectangle and work out the appropriate (i, j) for that
+ // location.
+ if (n & 1) {
+ // Odd n. The tip of the triangle is on row 1.
+ int w = n; // Width of the rectangle to unfold
+ int i0 = k / w;
+ int j0 = k % w;
+ if (j0 >= i0) {
+ *i = i0;
+ *j = j0;
+ } else {
+ *i = n - i0;
+ *j = *i + j0;
+ }
+ } else {
+ // Even n. The tip of the triangle is on row 0, making it one wider.
+ int w = n + 1;
+ int i0 = k / w;
+ int j0 = k % w;
+ if (j0 > i0) {
+ *i = i0;
+ *j = j0 - 1;
+ } else {
+ *i = n - 1 - i0;
+ *j = *i + j0;
+ }
+ }
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/extern/ceres/internal/ceres/parallel_utils.h b/extern/ceres/internal/ceres/parallel_utils.h
new file mode 100644
index 00000000000..1291428228a
--- /dev/null
+++ b/extern/ceres/internal/ceres/parallel_utils.h
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wjr@google.com (William Rucklidge)
+
+#ifndef CERES_INTERNAL_PARALLEL_UTILS_H_
+#define CERES_INTERNAL_PARALLEL_UTILS_H_
+
+namespace ceres {
+namespace internal {
+
+// Converts a linear iteration order into a triangular iteration order.
+// Suppose you have nested loops that look like
+// for (int i = 0; i < n; i++) {
+// for (int j = i; j < n; j++) {
+// ... use i and j
+// }
+// }
+// Naively using ParallelFor to parallelise those loops might look like
+// ParallelFor(..., 0, n * n, num_threads,
+// [](int thread_id, int k) {
+// int i = k / n, j = k % n;
+// if (j < i) return;
+// ...
+// });
+// but these empty work items can lead to very unbalanced threading. Instead,
+// do this:
+// int actual_work_items = (n * (n + 1)) / 2;
+// ParallelFor(..., 0, actual_work_items, num_threads,
+// [](int thread_id, int k) {
+// int i, j;
+// UnfoldIteration(k, n, &i, &j);
+// ...
+// });
+// which in each iteration will produce i and j satisfying
+// 0 <= i <= j < n
+void LinearIndexToUpperTriangularIndex(int k, int n, int* i, int* j);
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_PARALLEL_UTILS_H_
diff --git a/extern/ceres/internal/ceres/parameter_block.h b/extern/ceres/internal/ceres/parameter_block.h
index 8e21553c668..88943dfbcff 100644
--- a/extern/ceres/internal/ceres/parameter_block.h
+++ b/extern/ceres/internal/ceres/parameter_block.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -32,15 +32,16 @@
#define CERES_INTERNAL_PARAMETER_BLOCK_H_
#include <algorithm>
+#include <cstdint>
#include <cstdlib>
#include <limits>
+#include <memory>
#include <string>
+#include <unordered_set>
+
#include "ceres/array_utils.h"
-#include "ceres/collections_port.h"
-#include "ceres/integral_types.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/port.h"
-#include "ceres/internal/scoped_ptr.h"
#include "ceres/local_parameterization.h"
#include "ceres/stringprintf.h"
#include "glog/logging.h"
@@ -62,29 +63,28 @@ class ResidualBlock;
// responsible for the proper disposal of the local parameterization.
class ParameterBlock {
public:
- // TODO(keir): Decide what data structure is best here. Should this be a set?
- // Probably not, because sets are memory inefficient. However, if it's a
- // vector, you can get into pathological linear performance when removing a
- // residual block from a problem where all the residual blocks depend on one
- // parameter; for example, shared focal length in a bundle adjustment
- // problem. It might be worth making a custom structure that is just an array
- // when it is small, but transitions to a hash set when it has more elements.
- //
- // For now, use a hash set.
- typedef HashSet<ResidualBlock*> ResidualBlockSet;
+ typedef std::unordered_set<ResidualBlock*> ResidualBlockSet;
// Create a parameter block with the user state, size, and index specified.
// The size is the size of the parameter block and the index is the position
// of the parameter block inside a Program (if any).
- ParameterBlock(double* user_state, int size, int index) {
- Init(user_state, size, index, NULL);
- }
+ ParameterBlock(double* user_state, int size, int index)
+ : user_state_(user_state),
+ size_(size),
+ state_(user_state),
+ index_(index) {}
ParameterBlock(double* user_state,
int size,
int index,
- LocalParameterization* local_parameterization) {
- Init(user_state, size, index, local_parameterization);
+ LocalParameterization* local_parameterization)
+ : user_state_(user_state),
+ size_(size),
+ state_(user_state),
+ index_(index) {
+ if (local_parameterization != nullptr) {
+ SetParameterization(local_parameterization);
+ }
}
// The size of the parameter block.
@@ -92,12 +92,10 @@ class ParameterBlock {
// Manipulate the parameter state.
bool SetState(const double* x) {
- CHECK(x != NULL)
- << "Tried to set the state of constant parameter "
- << "with user location " << user_state_;
- CHECK(!is_constant_)
- << "Tried to set the state of constant parameter "
- << "with user location " << user_state_;
+ CHECK(x != nullptr) << "Tried to set the state of constant parameter "
+ << "with user location " << user_state_;
+ CHECK(!IsConstant()) << "Tried to set the state of constant parameter "
+ << "with user location " << user_state_;
state_ = x;
return UpdateLocalParameterizationJacobian();
@@ -106,9 +104,9 @@ class ParameterBlock {
// Copy the current parameter state out to x. This is "GetState()" rather than
// simply "state()" since it is actively copying the data into the passed
// pointer.
- void GetState(double *x) const {
+ void GetState(double* x) const {
if (x != state_) {
- memcpy(x, state_, sizeof(*state_) * size_);
+ std::copy(state_, state_ + size_, x);
}
}
@@ -116,7 +114,7 @@ class ParameterBlock {
const double* state() const { return state_; }
const double* user_state() const { return user_state_; }
double* mutable_user_state() { return user_state_; }
- LocalParameterization* local_parameterization() const {
+ const LocalParameterization* local_parameterization() const {
return local_parameterization_;
}
LocalParameterization* mutable_local_parameterization() {
@@ -124,9 +122,22 @@ class ParameterBlock {
}
// Set this parameter block to vary or not.
- void SetConstant() { is_constant_ = true; }
- void SetVarying() { is_constant_ = false; }
- bool IsConstant() const { return is_constant_; }
+ void SetConstant() { is_set_constant_ = true; }
+ void SetVarying() { is_set_constant_ = false; }
+ bool IsConstant() const { return (is_set_constant_ || LocalSize() == 0); }
+
+ double UpperBound(int index) const {
+ return (upper_bounds_ ? upper_bounds_[index]
+ : std::numeric_limits<double>::max());
+ }
+
+ double LowerBound(int index) const {
+ return (lower_bounds_ ? lower_bounds_[index]
+ : -std::numeric_limits<double>::max());
+ }
+
+ bool IsUpperBounded() const { return (upper_bounds_ == nullptr); }
+ bool IsLowerBounded() const { return (lower_bounds_ == nullptr); }
// This parameter block's index in an array.
int index() const { return index_; }
@@ -142,7 +153,7 @@ class ParameterBlock {
// Methods relating to the parameter block's parameterization.
- // The local to global jacobian. Returns NULL if there is no local
+ // The local to global jacobian. Returns nullptr if there is no local
// parameterization for this parameter block. The returned matrix is row-major
// and has Size() rows and LocalSize() columns.
const double* LocalParameterizationJacobian() const {
@@ -150,27 +161,24 @@ class ParameterBlock {
}
int LocalSize() const {
- return (local_parameterization_ == NULL)
- ? size_
- : local_parameterization_->LocalSize();
+ return (local_parameterization_ == nullptr)
+ ? size_
+ : local_parameterization_->LocalSize();
}
- // Set the parameterization. The parameterization can be set exactly once;
- // multiple calls to set the parameterization to different values will crash.
- // It is an error to pass NULL for the parameterization. The parameter block
- // does not take ownership of the parameterization.
+ // Set the parameterization. The parameter block does not take
+ // ownership of the parameterization.
void SetParameterization(LocalParameterization* new_parameterization) {
- CHECK(new_parameterization != NULL) << "NULL parameterization invalid.";
// Nothing to do if the new parameterization is the same as the
// old parameterization.
if (new_parameterization == local_parameterization_) {
return;
}
- CHECK(local_parameterization_ == NULL)
- << "Can't re-set the local parameterization; it leads to "
- << "ambiguous ownership. Current local parameterization is: "
- << local_parameterization_;
+ if (new_parameterization == nullptr) {
+ local_parameterization_ = nullptr;
+ return;
+ }
CHECK(new_parameterization->GlobalSize() == size_)
<< "Invalid parameterization for parameter block. The parameter block "
@@ -178,9 +186,9 @@ class ParameterBlock {
<< "size of " << new_parameterization->GlobalSize() << ". Did you "
<< "accidentally use the wrong parameter block or parameterization?";
- CHECK_GT(new_parameterization->LocalSize(), 0)
- << "Invalid parameterization. Parameterizations must have a positive "
- << "dimensional tangent space.";
+ CHECK_GE(new_parameterization->LocalSize(), 0)
+ << "Invalid parameterization. Parameterizations must have a "
+ << "non-negative dimensional tangent space.";
local_parameterization_ = new_parameterization;
local_parameterization_jacobian_.reset(
@@ -194,7 +202,11 @@ class ParameterBlock {
void SetUpperBound(int index, double upper_bound) {
CHECK_LT(index, size_);
- if (upper_bounds_.get() == NULL) {
+ if (upper_bound >= std::numeric_limits<double>::max() && !upper_bounds_) {
+ return;
+ }
+
+ if (!upper_bounds_) {
upper_bounds_.reset(new double[size_]);
std::fill(upper_bounds_.get(),
upper_bounds_.get() + size_,
@@ -207,7 +219,11 @@ class ParameterBlock {
void SetLowerBound(int index, double lower_bound) {
CHECK_LT(index, size_);
- if (lower_bounds_.get() == NULL) {
+ if (lower_bound <= -std::numeric_limits<double>::max() && !lower_bounds_) {
+ return;
+ }
+
+ if (!lower_bounds_) {
lower_bounds_.reset(new double[size_]);
std::fill(lower_bounds_.get(),
lower_bounds_.get() + size_,
@@ -220,24 +236,24 @@ class ParameterBlock {
// Generalization of the addition operation. This is the same as
// LocalParameterization::Plus() followed by projection onto the
// hyper cube implied by the bounds constraints.
- bool Plus(const double *x, const double* delta, double* x_plus_delta) {
- if (local_parameterization_ != NULL) {
+ bool Plus(const double* x, const double* delta, double* x_plus_delta) {
+ if (local_parameterization_ != nullptr) {
if (!local_parameterization_->Plus(x, delta, x_plus_delta)) {
return false;
}
} else {
- VectorRef(x_plus_delta, size_) = ConstVectorRef(x, size_) +
- ConstVectorRef(delta, size_);
+ VectorRef(x_plus_delta, size_) =
+ ConstVectorRef(x, size_) + ConstVectorRef(delta, size_);
}
// Project onto the box constraints.
- if (lower_bounds_.get() != NULL) {
+ if (lower_bounds_.get() != nullptr) {
for (int i = 0; i < size_; ++i) {
x_plus_delta[i] = std::max(x_plus_delta[i], lower_bounds_[i]);
}
}
- if (upper_bounds_.get() != NULL) {
+ if (upper_bounds_.get() != nullptr) {
for (int i = 0; i < size_; ++i) {
x_plus_delta[i] = std::min(x_plus_delta[i], upper_bounds_[i]);
}
@@ -247,35 +263,36 @@ class ParameterBlock {
}
std::string ToString() const {
- return StringPrintf("{ this=%p, user_state=%p, state=%p, size=%d, "
- "constant=%d, index=%d, state_offset=%d, "
- "delta_offset=%d }",
- this,
- user_state_,
- state_,
- size_,
- is_constant_,
- index_,
- state_offset_,
- delta_offset_);
+ return StringPrintf(
+ "{ this=%p, user_state=%p, state=%p, size=%d, "
+ "constant=%d, index=%d, state_offset=%d, "
+ "delta_offset=%d }",
+ this,
+ user_state_,
+ state_,
+ size_,
+ is_set_constant_,
+ index_,
+ state_offset_,
+ delta_offset_);
}
void EnableResidualBlockDependencies() {
- CHECK(residual_blocks_.get() == NULL)
+ CHECK(residual_blocks_.get() == nullptr)
<< "Ceres bug: There is already a residual block collection "
<< "for parameter block: " << ToString();
residual_blocks_.reset(new ResidualBlockSet);
}
void AddResidualBlock(ResidualBlock* residual_block) {
- CHECK(residual_blocks_.get() != NULL)
+ CHECK(residual_blocks_.get() != nullptr)
<< "Ceres bug: The residual block collection is null for parameter "
<< "block: " << ToString();
residual_blocks_->insert(residual_block);
}
void RemoveResidualBlock(ResidualBlock* residual_block) {
- CHECK(residual_blocks_.get() != NULL)
+ CHECK(residual_blocks_.get() != nullptr)
<< "Ceres bug: The residual block collection is null for parameter "
<< "block: " << ToString();
CHECK(residual_blocks_->find(residual_block) != residual_blocks_->end())
@@ -285,12 +302,10 @@ class ParameterBlock {
// This is only intended for iterating; perhaps this should only expose
// .begin() and .end().
- ResidualBlockSet* mutable_residual_blocks() {
- return residual_blocks_.get();
- }
+ ResidualBlockSet* mutable_residual_blocks() { return residual_blocks_.get(); }
double LowerBoundForParameter(int index) const {
- if (lower_bounds_.get() == NULL) {
+ if (lower_bounds_.get() == nullptr) {
return -std::numeric_limits<double>::max();
} else {
return lower_bounds_[index];
@@ -298,7 +313,7 @@ class ParameterBlock {
}
double UpperBoundForParameter(int index) const {
- if (upper_bounds_.get() == NULL) {
+ if (upper_bounds_.get() == nullptr) {
return std::numeric_limits<double>::max();
} else {
return upper_bounds_[index];
@@ -306,27 +321,8 @@ class ParameterBlock {
}
private:
- void Init(double* user_state,
- int size,
- int index,
- LocalParameterization* local_parameterization) {
- user_state_ = user_state;
- size_ = size;
- index_ = index;
- is_constant_ = false;
- state_ = user_state_;
-
- local_parameterization_ = NULL;
- if (local_parameterization != NULL) {
- SetParameterization(local_parameterization);
- }
-
- state_offset_ = -1;
- delta_offset_ = -1;
- }
-
bool UpdateLocalParameterizationJacobian() {
- if (local_parameterization_ == NULL) {
+ if (local_parameterization_ == nullptr) {
return true;
}
@@ -335,13 +331,12 @@ class ParameterBlock {
// at that time.
const int jacobian_size = Size() * LocalSize();
- InvalidateArray(jacobian_size,
- local_parameterization_jacobian_.get());
+ InvalidateArray(jacobian_size, local_parameterization_jacobian_.get());
if (!local_parameterization_->ComputeJacobian(
- state_,
- local_parameterization_jacobian_.get())) {
+ state_, local_parameterization_jacobian_.get())) {
LOG(WARNING) << "Local parameterization Jacobian computation failed"
- "for x: " << ConstVectorRef(state_, Size()).transpose();
+ "for x: "
+ << ConstVectorRef(state_, Size()).transpose();
return false;
}
@@ -358,30 +353,30 @@ class ParameterBlock {
return true;
}
- double* user_state_;
- int size_;
- bool is_constant_;
- LocalParameterization* local_parameterization_;
+ double* user_state_ = nullptr;
+ int size_ = -1;
+ bool is_set_constant_ = false;
+ LocalParameterization* local_parameterization_ = nullptr;
// The "state" of the parameter. These fields are only needed while the
// solver is running. While at first glance using mutable is a bad idea, this
// ends up simplifying the internals of Ceres enough to justify the potential
// pitfalls of using "mutable."
- mutable const double* state_;
- mutable scoped_array<double> local_parameterization_jacobian_;
+ mutable const double* state_ = nullptr;
+ mutable std::unique_ptr<double[]> local_parameterization_jacobian_;
// The index of the parameter. This is used by various other parts of Ceres to
// permit switching from a ParameterBlock* to an index in another array.
- int32 index_;
+ int index_ = -1;
// The offset of this parameter block inside a larger state vector.
- int32 state_offset_;
+ int state_offset_ = -1;
// The offset of this parameter block inside a larger delta vector.
- int32 delta_offset_;
+ int delta_offset_ = -1;
// If non-null, contains the residual blocks this parameter block is in.
- scoped_ptr<ResidualBlockSet> residual_blocks_;
+ std::unique_ptr<ResidualBlockSet> residual_blocks_;
// Upper and lower bounds for the parameter block. SetUpperBound
// and SetLowerBound lazily initialize the upper_bounds_ and
@@ -394,8 +389,8 @@ class ParameterBlock {
// std::numeric_limits<double>::max() and
// -std::numeric_limits<double>::max() respectively which correspond
// to the parameter block being unconstrained.
- scoped_array<double> upper_bounds_;
- scoped_array<double> lower_bounds_;
+ std::unique_ptr<double[]> upper_bounds_;
+ std::unique_ptr<double[]> lower_bounds_;
// Necessary so ProblemImpl can clean up the parameterizations.
friend class ProblemImpl;
diff --git a/extern/ceres/internal/ceres/parameter_block_ordering.cc b/extern/ceres/internal/ceres/parameter_block_ordering.cc
index efba339977c..ef521c0e11b 100644
--- a/extern/ceres/internal/ceres/parameter_block_ordering.cc
+++ b/extern/ceres/internal/ceres/parameter_block_ordering.cc
@@ -30,9 +30,11 @@
#include "ceres/parameter_block_ordering.h"
+#include <memory>
+#include <unordered_set>
+
#include "ceres/graph.h"
#include "ceres/graph_algorithms.h"
-#include "ceres/internal/scoped_ptr.h"
#include "ceres/map_util.h"
#include "ceres/parameter_block.h"
#include "ceres/program.h"
@@ -49,13 +51,14 @@ using std::vector;
int ComputeStableSchurOrdering(const Program& program,
vector<ParameterBlock*>* ordering) {
- CHECK_NOTNULL(ordering)->clear();
+ CHECK(ordering != nullptr);
+ ordering->clear();
EventLogger event_logger("ComputeStableSchurOrdering");
- scoped_ptr<Graph< ParameterBlock*> > graph(CreateHessianGraph(program));
+ std::unique_ptr<Graph< ParameterBlock*> > graph(CreateHessianGraph(program));
event_logger.AddEvent("CreateHessianGraph");
const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
- const HashSet<ParameterBlock*>& vertices = graph->vertices();
+ const std::unordered_set<ParameterBlock*>& vertices = graph->vertices();
for (int i = 0; i < parameter_blocks.size(); ++i) {
if (vertices.count(parameter_blocks[i]) > 0) {
ordering->push_back(parameter_blocks[i]);
@@ -80,9 +83,10 @@ int ComputeStableSchurOrdering(const Program& program,
int ComputeSchurOrdering(const Program& program,
vector<ParameterBlock*>* ordering) {
- CHECK_NOTNULL(ordering)->clear();
+ CHECK(ordering != nullptr);
+ ordering->clear();
- scoped_ptr<Graph< ParameterBlock*> > graph(CreateHessianGraph(program));
+ std::unique_ptr<Graph< ParameterBlock*> > graph(CreateHessianGraph(program));
int independent_set_size = IndependentSetOrdering(*graph, ordering);
const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
@@ -99,9 +103,10 @@ int ComputeSchurOrdering(const Program& program,
void ComputeRecursiveIndependentSetOrdering(const Program& program,
ParameterBlockOrdering* ordering) {
- CHECK_NOTNULL(ordering)->Clear();
+ CHECK(ordering != nullptr);
+ ordering->Clear();
const vector<ParameterBlock*> parameter_blocks = program.parameter_blocks();
- scoped_ptr<Graph< ParameterBlock*> > graph(CreateHessianGraph(program));
+ std::unique_ptr<Graph< ParameterBlock*> > graph(CreateHessianGraph(program));
int num_covered = 0;
int round = 0;
@@ -120,7 +125,8 @@ void ComputeRecursiveIndependentSetOrdering(const Program& program,
}
Graph<ParameterBlock*>* CreateHessianGraph(const Program& program) {
- Graph<ParameterBlock*>* graph = CHECK_NOTNULL(new Graph<ParameterBlock*>);
+ Graph<ParameterBlock*>* graph = new Graph<ParameterBlock*>;
+ CHECK(graph != nullptr);
const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
for (int i = 0; i < parameter_blocks.size(); ++i) {
ParameterBlock* parameter_block = parameter_blocks[i];
@@ -155,17 +161,16 @@ Graph<ParameterBlock*>* CreateHessianGraph(const Program& program) {
void OrderingToGroupSizes(const ParameterBlockOrdering* ordering,
vector<int>* group_sizes) {
- CHECK_NOTNULL(group_sizes)->clear();
+ CHECK(group_sizes != nullptr);
+ group_sizes->clear();
if (ordering == NULL) {
return;
}
- const map<int, set<double*> >& group_to_elements =
+ const map<int, set<double*>>& group_to_elements =
ordering->group_to_elements();
- for (map<int, set<double*> >::const_iterator it = group_to_elements.begin();
- it != group_to_elements.end();
- ++it) {
- group_sizes->push_back(it->second.size());
+ for (const auto& g_t_e : group_to_elements) {
+ group_sizes->push_back(g_t_e.second.size());
}
}
diff --git a/extern/ceres/internal/ceres/partitioned_matrix_view.cc b/extern/ceres/internal/ceres/partitioned_matrix_view.cc
index 8054964e039..d7a998d68a3 100644
--- a/extern/ceres/internal/ceres/partitioned_matrix_view.cc
+++ b/extern/ceres/internal/ceres/partitioned_matrix_view.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_partitioned_matrix_view_specializations.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
#include "ceres/linear_solver.h"
#include "ceres/partitioned_matrix_view.h"
@@ -51,126 +50,105 @@ PartitionedMatrixViewBase*
PartitionedMatrixViewBase::Create(const LinearSolver::Options& options,
const BlockSparseMatrix& matrix) {
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
- if ((options.row_block_size == 2) &&
- (options.e_block_size == 2) &&
- (options.f_block_size == 2)) {
- return new PartitionedMatrixView<2, 2, 2>(
- matrix, options.elimination_groups[0]);
- }
- if ((options.row_block_size == 2) &&
- (options.e_block_size == 2) &&
- (options.f_block_size == 3)) {
- return new PartitionedMatrixView<2, 2, 3>(
- matrix, options.elimination_groups[0]);
- }
- if ((options.row_block_size == 2) &&
- (options.e_block_size == 2) &&
- (options.f_block_size == 4)) {
- return new PartitionedMatrixView<2, 2, 4>(
- matrix, options.elimination_groups[0]);
- }
- if ((options.row_block_size == 2) &&
- (options.e_block_size == 2) &&
- (options.f_block_size == Eigen::Dynamic)) {
- return new PartitionedMatrixView<2, 2, Eigen::Dynamic>(
- matrix, options.elimination_groups[0]);
- }
- if ((options.row_block_size == 2) &&
- (options.e_block_size == 3) &&
- (options.f_block_size == 3)) {
- return new PartitionedMatrixView<2, 3, 3>(
- matrix, options.elimination_groups[0]);
- }
- if ((options.row_block_size == 2) &&
- (options.e_block_size == 3) &&
- (options.f_block_size == 4)) {
- return new PartitionedMatrixView<2, 3, 4>(
- matrix, options.elimination_groups[0]);
- }
- if ((options.row_block_size == 2) &&
- (options.e_block_size == 3) &&
- (options.f_block_size == 6)) {
- return new PartitionedMatrixView<2, 3, 6>(
- matrix, options.elimination_groups[0]);
- }
- if ((options.row_block_size == 2) &&
- (options.e_block_size == 3) &&
- (options.f_block_size == 9)) {
- return new PartitionedMatrixView<2, 3, 9>(
- matrix, options.elimination_groups[0]);
- }
- if ((options.row_block_size == 2) &&
- (options.e_block_size == 3) &&
- (options.f_block_size == Eigen::Dynamic)) {
- return new PartitionedMatrixView<2, 3, Eigen::Dynamic>(
- matrix, options.elimination_groups[0]);
- }
- if ((options.row_block_size == 2) &&
- (options.e_block_size == 4) &&
- (options.f_block_size == 3)) {
- return new PartitionedMatrixView<2, 4, 3>(
- matrix, options.elimination_groups[0]);
- }
- if ((options.row_block_size == 2) &&
- (options.e_block_size == 4) &&
- (options.f_block_size == 4)) {
- return new PartitionedMatrixView<2, 4, 4>(
- matrix, options.elimination_groups[0]);
- }
- if ((options.row_block_size == 2) &&
- (options.e_block_size == 4) &&
- (options.f_block_size == 8)) {
- return new PartitionedMatrixView<2, 4, 8>(
- matrix, options.elimination_groups[0]);
- }
- if ((options.row_block_size == 2) &&
- (options.e_block_size == 4) &&
- (options.f_block_size == 9)) {
- return new PartitionedMatrixView<2, 4, 9>(
- matrix, options.elimination_groups[0]);
- }
- if ((options.row_block_size == 2) &&
- (options.e_block_size == 4) &&
- (options.f_block_size == Eigen::Dynamic)) {
- return new PartitionedMatrixView<2, 4, Eigen::Dynamic>(
- matrix, options.elimination_groups[0]);
- }
- if ((options.row_block_size == 2) &&
- (options.e_block_size == Eigen::Dynamic) &&
- (options.f_block_size == Eigen::Dynamic)) {
- return new PartitionedMatrixView<2, Eigen::Dynamic, Eigen::Dynamic>(
- matrix, options.elimination_groups[0]);
- }
- if ((options.row_block_size == 4) &&
- (options.e_block_size == 4) &&
- (options.f_block_size == 2)) {
- return new PartitionedMatrixView<4, 4, 2>(
- matrix, options.elimination_groups[0]);
- }
- if ((options.row_block_size == 4) &&
- (options.e_block_size == 4) &&
- (options.f_block_size == 3)) {
- return new PartitionedMatrixView<4, 4, 3>(
- matrix, options.elimination_groups[0]);
- }
- if ((options.row_block_size == 4) &&
- (options.e_block_size == 4) &&
- (options.f_block_size == 4)) {
- return new PartitionedMatrixView<4, 4, 4>(
- matrix, options.elimination_groups[0]);
- }
- if ((options.row_block_size == 4) &&
- (options.e_block_size == 4) &&
- (options.f_block_size == Eigen::Dynamic)) {
- return new PartitionedMatrixView<4, 4, Eigen::Dynamic>(
- matrix, options.elimination_groups[0]);
- }
- if ((options.row_block_size == Eigen::Dynamic) &&
- (options.e_block_size == Eigen::Dynamic) &&
- (options.f_block_size == Eigen::Dynamic)) {
- return new PartitionedMatrixView<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic>(
- matrix, options.elimination_groups[0]);
- }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 2) &&
+ (options.f_block_size == 2)) {
+ return new PartitionedMatrixView<2, 2, 2>(matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 2) &&
+ (options.f_block_size == 3)) {
+ return new PartitionedMatrixView<2, 2, 3>(matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 2) &&
+ (options.f_block_size == 4)) {
+ return new PartitionedMatrixView<2, 2, 4>(matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 2)) {
+ return new PartitionedMatrixView<2, 2, Eigen::Dynamic>(matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 3) &&
+ (options.f_block_size == 3)) {
+ return new PartitionedMatrixView<2, 3, 3>(matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 3) &&
+ (options.f_block_size == 4)) {
+ return new PartitionedMatrixView<2, 3, 4>(matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 3) &&
+ (options.f_block_size == 6)) {
+ return new PartitionedMatrixView<2, 3, 6>(matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 3) &&
+ (options.f_block_size == 9)) {
+ return new PartitionedMatrixView<2, 3, 9>(matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 3)) {
+ return new PartitionedMatrixView<2, 3, Eigen::Dynamic>(matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == 3)) {
+ return new PartitionedMatrixView<2, 4, 3>(matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == 4)) {
+ return new PartitionedMatrixView<2, 4, 4>(matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == 6)) {
+ return new PartitionedMatrixView<2, 4, 6>(matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == 8)) {
+ return new PartitionedMatrixView<2, 4, 8>(matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == 9)) {
+ return new PartitionedMatrixView<2, 4, 9>(matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 4)) {
+ return new PartitionedMatrixView<2, 4, Eigen::Dynamic>(matrix, options.elimination_groups[0]);
+ }
+ if (options.row_block_size == 2){
+ return new PartitionedMatrixView<2, Eigen::Dynamic, Eigen::Dynamic>(matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 3) &&
+ (options.e_block_size == 3) &&
+ (options.f_block_size == 3)) {
+ return new PartitionedMatrixView<3, 3, 3>(matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 4) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == 2)) {
+ return new PartitionedMatrixView<4, 4, 2>(matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 4) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == 3)) {
+ return new PartitionedMatrixView<4, 4, 3>(matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 4) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == 4)) {
+ return new PartitionedMatrixView<4, 4, 4>(matrix, options.elimination_groups[0]);
+ }
+ if ((options.row_block_size == 4) &&
+ (options.e_block_size == 4)) {
+ return new PartitionedMatrixView<4, 4, Eigen::Dynamic>(matrix, options.elimination_groups[0]);
+ }
#endif
VLOG(1) << "Template specializations not found for <"
diff --git a/extern/ceres/internal/ceres/partitioned_matrix_view.h b/extern/ceres/internal/ceres/partitioned_matrix_view.h
index 6e75060a47e..3853ea10dd6 100644
--- a/extern/ceres/internal/ceres/partitioned_matrix_view.h
+++ b/extern/ceres/internal/ceres/partitioned_matrix_view.h
@@ -119,20 +119,20 @@ class PartitionedMatrixView : public PartitionedMatrixViewBase {
PartitionedMatrixView(const BlockSparseMatrix& matrix, int num_col_blocks_e);
virtual ~PartitionedMatrixView();
- virtual void LeftMultiplyE(const double* x, double* y) const;
- virtual void LeftMultiplyF(const double* x, double* y) const;
- virtual void RightMultiplyE(const double* x, double* y) const;
- virtual void RightMultiplyF(const double* x, double* y) const;
- virtual BlockSparseMatrix* CreateBlockDiagonalEtE() const;
- virtual BlockSparseMatrix* CreateBlockDiagonalFtF() const;
- virtual void UpdateBlockDiagonalEtE(BlockSparseMatrix* block_diagonal) const;
- virtual void UpdateBlockDiagonalFtF(BlockSparseMatrix* block_diagonal) const;
- virtual int num_col_blocks_e() const { return num_col_blocks_e_; }
- virtual int num_col_blocks_f() const { return num_col_blocks_f_; }
- virtual int num_cols_e() const { return num_cols_e_; }
- virtual int num_cols_f() const { return num_cols_f_; }
- virtual int num_rows() const { return matrix_.num_rows(); }
- virtual int num_cols() const { return matrix_.num_cols(); }
+ void LeftMultiplyE(const double* x, double* y) const final;
+ void LeftMultiplyF(const double* x, double* y) const final;
+ void RightMultiplyE(const double* x, double* y) const final;
+ void RightMultiplyF(const double* x, double* y) const final;
+ BlockSparseMatrix* CreateBlockDiagonalEtE() const final;
+ BlockSparseMatrix* CreateBlockDiagonalFtF() const final;
+ void UpdateBlockDiagonalEtE(BlockSparseMatrix* block_diagonal) const final;
+ void UpdateBlockDiagonalFtF(BlockSparseMatrix* block_diagonal) const final;
+ int num_col_blocks_e() const final { return num_col_blocks_e_; }
+ int num_col_blocks_f() const final { return num_col_blocks_f_; }
+ int num_cols_e() const final { return num_cols_e_; }
+ int num_cols_f() const final { return num_cols_f_; }
+ int num_rows() const final { return matrix_.num_rows(); }
+ int num_cols() const final { return matrix_.num_cols(); }
private:
BlockSparseMatrix* CreateBlockDiagonalMatrixLayout(int start_col_block,
diff --git a/extern/ceres/internal/ceres/partitioned_matrix_view_impl.h b/extern/ceres/internal/ceres/partitioned_matrix_view_impl.h
index 86fb278fa27..f3f548c7a80 100644
--- a/extern/ceres/internal/ceres/partitioned_matrix_view_impl.h
+++ b/extern/ceres/internal/ceres/partitioned_matrix_view_impl.h
@@ -50,7 +50,7 @@ PartitionedMatrixView(
: matrix_(matrix),
num_col_blocks_e_(num_col_blocks_e) {
const CompressedRowBlockStructure* bs = matrix_.block_structure();
- CHECK_NOTNULL(bs);
+ CHECK(bs != nullptr);
num_col_blocks_f_ = bs->cols.size() - num_col_blocks_e_;
diff --git a/extern/ceres/internal/ceres/generate_partitioned_matrix_view_specializations.py b/extern/ceres/internal/ceres/partitioned_matrix_view_template.py
index c4ac3cf2332..7894523cdea 100644
--- a/extern/ceres/internal/ceres/generate_partitioned_matrix_view_specializations.py
+++ b/extern/ceres/internal/ceres/partitioned_matrix_view_template.py
@@ -46,29 +46,8 @@
# The list of tuples, specializations indicates the set of
# specializations that is generated.
-# Set of template specializations to generate
-SPECIALIZATIONS = [(2, 2, 2),
- (2, 2, 3),
- (2, 2, 4),
- (2, 2, "Eigen::Dynamic"),
- (2, 3, 3),
- (2, 3, 4),
- (2, 3, 6),
- (2, 3, 9),
- (2, 3, "Eigen::Dynamic"),
- (2, 4, 3),
- (2, 4, 4),
- (2, 4, 8),
- (2, 4, 9),
- (2, 4, "Eigen::Dynamic"),
- (2, "Eigen::Dynamic", "Eigen::Dynamic"),
- (4, 4, 2),
- (4, 4, 3),
- (4, 4, 4),
- (4, 4, "Eigen::Dynamic"),
- ("Eigen::Dynamic", "Eigen::Dynamic", "Eigen::Dynamic")]
HEADER = """// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -106,8 +85,7 @@ HEADER = """// Ceres Solver - A fast non-linear least squares minimizer
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_partitioned_matrix_view_specializations.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
"""
DYNAMIC_FILE = """
@@ -157,14 +135,7 @@ PartitionedMatrixViewBase::Create(const LinearSolver::Options& options,
const BlockSparseMatrix& matrix) {
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
"""
-
-FACTORY_CONDITIONAL = """ if ((options.row_block_size == %s) &&
- (options.e_block_size == %s) &&
- (options.f_block_size == %s)) {
- return new PartitionedMatrixView<%s, %s, %s>(
- matrix, options.elimination_groups[0]);
- }
-"""
+FACTORY = """ return new PartitionedMatrixView<%s, %s, %s>(matrix, options.elimination_groups[0]);"""
FACTORY_FOOTER = """
#endif
@@ -179,54 +150,3 @@ FACTORY_FOOTER = """
} // namespace internal
} // namespace ceres
"""
-
-
-def SuffixForSize(size):
- if size == "Eigen::Dynamic":
- return "d"
- return str(size)
-
-
-def SpecializationFilename(prefix, row_block_size, e_block_size, f_block_size):
- return "_".join([prefix] + map(SuffixForSize, (row_block_size,
- e_block_size,
- f_block_size)))
-
-
-def Specialize():
- """
- Generate specialization code and the conditionals to instantiate it.
- """
- f = open("partitioned_matrix_view.cc", "w")
- f.write(HEADER)
- f.write(FACTORY_FILE_HEADER)
-
- for row_block_size, e_block_size, f_block_size in SPECIALIZATIONS:
- output = SpecializationFilename("generated/partitioned_matrix_view",
- row_block_size,
- e_block_size,
- f_block_size) + ".cc"
- fptr = open(output, "w")
- fptr.write(HEADER)
-
- template = SPECIALIZATION_FILE
- if (row_block_size == "Eigen::Dynamic" and
- e_block_size == "Eigen::Dynamic" and
- f_block_size == "Eigen::Dynamic"):
- template = DYNAMIC_FILE
-
- fptr.write(template % (row_block_size, e_block_size, f_block_size))
- fptr.close()
-
- f.write(FACTORY_CONDITIONAL % (row_block_size,
- e_block_size,
- f_block_size,
- row_block_size,
- e_block_size,
- f_block_size))
- f.write(FACTORY_FOOTER)
- f.close()
-
-
-if __name__ == "__main__":
- Specialize()
diff --git a/extern/ceres/internal/ceres/polynomial.cc b/extern/ceres/internal/ceres/polynomial.cc
index 13bf8edeee6..20812f4de81 100644
--- a/extern/ceres/internal/ceres/polynomial.cc
+++ b/extern/ceres/internal/ceres/polynomial.cc
@@ -36,14 +36,13 @@
#include <vector>
#include "Eigen/Dense"
+#include "ceres/function_sample.h"
#include "ceres/internal/port.h"
-#include "ceres/stringprintf.h"
#include "glog/logging.h"
namespace ceres {
namespace internal {
-using std::string;
using std::vector;
namespace {
@@ -53,7 +52,7 @@ namespace {
// In: Numerische Mathematik, Volume 13, Number 4 (1969), 293-304,
// Springer Berlin / Heidelberg. DOI: 10.1007/BF02165404
void BalanceCompanionMatrix(Matrix* companion_matrix_ptr) {
- CHECK_NOTNULL(companion_matrix_ptr);
+ CHECK(companion_matrix_ptr != nullptr);
Matrix& companion_matrix = *companion_matrix_ptr;
Matrix companion_matrix_offdiagonal = companion_matrix;
companion_matrix_offdiagonal.diagonal().setZero();
@@ -105,7 +104,7 @@ void BalanceCompanionMatrix(Matrix* companion_matrix_ptr) {
void BuildCompanionMatrix(const Vector& polynomial,
Matrix* companion_matrix_ptr) {
- CHECK_NOTNULL(companion_matrix_ptr);
+ CHECK(companion_matrix_ptr != nullptr);
Matrix& companion_matrix = *companion_matrix_ptr;
const int degree = polynomial.size() - 1;
@@ -327,12 +326,6 @@ void MinimizePolynomial(const Vector& polynomial,
}
}
-string FunctionSample::ToDebugString() const {
- return StringPrintf("[x: %.8e, value: %.8e, gradient: %.8e, "
- "value_is_valid: %d, gradient_is_valid: %d]",
- x, value, gradient, value_is_valid, gradient_is_valid);
-}
-
Vector FindInterpolatingPolynomial(const vector<FunctionSample>& samples) {
const int num_samples = samples.size();
int num_constraints = 0;
@@ -370,7 +363,10 @@ Vector FindInterpolatingPolynomial(const vector<FunctionSample>& samples) {
}
}
- return lhs.fullPivLu().solve(rhs);
+ // TODO(sameeragarwal): This is a hack.
+ // https://github.com/ceres-solver/ceres-solver/issues/248
+ Eigen::FullPivLU<Matrix> lu(lhs);
+ return lu.setThreshold(0.0).solve(rhs);
}
void MinimizeInterpolatingPolynomial(const vector<FunctionSample>& samples,
diff --git a/extern/ceres/internal/ceres/polynomial.h b/extern/ceres/internal/ceres/polynomial.h
index 09a64c577f5..3e09bae3d0f 100644
--- a/extern/ceres/internal/ceres/polynomial.h
+++ b/extern/ceres/internal/ceres/polynomial.h
@@ -32,7 +32,6 @@
#ifndef CERES_INTERNAL_POLYNOMIAL_SOLVER_H_
#define CERES_INTERNAL_POLYNOMIAL_SOLVER_H_
-#include <string>
#include <vector>
#include "ceres/internal/eigen.h"
#include "ceres/internal/port.h"
@@ -40,6 +39,8 @@
namespace ceres {
namespace internal {
+struct FunctionSample;
+
// All polynomials are assumed to be the form
//
// sum_{i=0}^N polynomial(i) x^{N-i}.
@@ -84,27 +85,6 @@ void MinimizePolynomial(const Vector& polynomial,
double* optimal_x,
double* optimal_value);
-// Structure for storing sample values of a function.
-//
-// Clients can use this struct to communicate the value of the
-// function and or its gradient at a given point x.
-struct FunctionSample {
- FunctionSample()
- : x(0.0),
- value(0.0),
- value_is_valid(false),
- gradient(0.0),
- gradient_is_valid(false) {
- }
- std::string ToDebugString() const;
-
- double x;
- double value; // value = f(x)
- bool value_is_valid;
- double gradient; // gradient = f'(x)
- bool gradient_is_valid;
-};
-
// Given a set of function value and/or gradient samples, find a
// polynomial whose value and gradients are exactly equal to the ones
// in samples.
diff --git a/extern/ceres/internal/ceres/preconditioner.cc b/extern/ceres/internal/ceres/preconditioner.cc
index 82621dae50c..f98374e0cf8 100644
--- a/extern/ceres/internal/ceres/preconditioner.cc
+++ b/extern/ceres/internal/ceres/preconditioner.cc
@@ -49,7 +49,8 @@ PreconditionerType Preconditioner::PreconditionerForZeroEBlocks(
SparseMatrixPreconditionerWrapper::SparseMatrixPreconditionerWrapper(
const SparseMatrix* matrix)
- : matrix_(CHECK_NOTNULL(matrix)) {
+ : matrix_(matrix) {
+ CHECK(matrix != nullptr);
}
SparseMatrixPreconditionerWrapper::~SparseMatrixPreconditionerWrapper() {
diff --git a/extern/ceres/internal/ceres/preconditioner.h b/extern/ceres/internal/ceres/preconditioner.h
index a248eae060d..3e46ed83db2 100644
--- a/extern/ceres/internal/ceres/preconditioner.h
+++ b/extern/ceres/internal/ceres/preconditioner.h
@@ -34,6 +34,7 @@
#include <vector>
#include "ceres/casts.h"
#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/context_impl.h"
#include "ceres/linear_operator.h"
#include "ceres/sparse_matrix.h"
#include "ceres/types.h"
@@ -47,22 +48,27 @@ class SparseMatrix;
class Preconditioner : public LinearOperator {
public:
struct Options {
- Options()
- : type(JACOBI),
- visibility_clustering_type(CANONICAL_VIEWS),
- sparse_linear_algebra_library_type(SUITE_SPARSE),
- num_threads(1),
- row_block_size(Eigen::Dynamic),
- e_block_size(Eigen::Dynamic),
- f_block_size(Eigen::Dynamic) {
- }
-
- PreconditionerType type;
- VisibilityClusteringType visibility_clustering_type;
- SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type;
+ PreconditionerType type = JACOBI;
+ VisibilityClusteringType visibility_clustering_type = CANONICAL_VIEWS;
+ SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type = SUITE_SPARSE;
+
+ // When using the subset preconditioner, all row blocks starting
+ // from this row block are used to construct the preconditioner.
+ //
+ // i.e., the Jacobian matrix A is horizontally partitioned as
+ //
+ // A = [P]
+ // [Q]
+ //
+ // where P has subset_preconditioner_start_row_block row blocks,
+ // and the preconditioner is the inverse of the matrix Q'Q.
+ int subset_preconditioner_start_row_block = -1;
+
+ // See solver.h for information about these flags.
+ bool use_postordering = false;
// If possible, how many threads the preconditioner can use.
- int num_threads;
+ int num_threads = 1;
// Hints about the order in which the parameter blocks should be
// eliminated by the linear solver.
@@ -91,9 +97,11 @@ class Preconditioner : public LinearOperator {
//
// Please see schur_complement_solver.h and schur_eliminator.h for
// more details.
- int row_block_size;
- int e_block_size;
- int f_block_size;
+ int row_block_size = Eigen::Dynamic;
+ int e_block_size = Eigen::Dynamic;
+ int f_block_size = Eigen::Dynamic;
+
+ ContextImpl* context = nullptr;
};
// If the optimization problem is such that there are no remaining
@@ -123,13 +131,13 @@ class Preconditioner : public LinearOperator {
// LeftMultiply and num_cols are just calls to RightMultiply and
// num_rows respectively. Update() must be called before
// RightMultiply can be called.
- virtual void RightMultiply(const double* x, double* y) const = 0;
- virtual void LeftMultiply(const double* x, double* y) const {
+ void RightMultiply(const double* x, double* y) const override = 0;
+ void LeftMultiply(const double* x, double* y) const override {
return RightMultiply(x, y);
}
- virtual int num_rows() const = 0;
- virtual int num_cols() const {
+ int num_rows() const override = 0;
+ int num_cols() const override {
return num_rows();
}
};
@@ -141,7 +149,7 @@ template <typename MatrixType>
class TypedPreconditioner : public Preconditioner {
public:
virtual ~TypedPreconditioner() {}
- virtual bool Update(const LinearOperator& A, const double* D) {
+ bool Update(const LinearOperator& A, const double* D) final {
return UpdateImpl(*down_cast<const MatrixType*>(&A), D);
}
@@ -149,7 +157,7 @@ class TypedPreconditioner : public Preconditioner {
virtual bool UpdateImpl(const MatrixType& A, const double* D) = 0;
};
-// Preconditioners that depend on acccess to the low level structure
+// Preconditioners that depend on access to the low level structure
// of a SparseMatrix.
typedef TypedPreconditioner<SparseMatrix> SparseMatrixPreconditioner; // NOLINT
typedef TypedPreconditioner<BlockSparseMatrix> BlockSparseMatrixPreconditioner; // NOLINT
diff --git a/extern/ceres/internal/ceres/preprocessor.cc b/extern/ceres/internal/ceres/preprocessor.cc
index 4aba6a39ce8..02219147d75 100644
--- a/extern/ceres/internal/ceres/preprocessor.cc
+++ b/extern/ceres/internal/ceres/preprocessor.cc
@@ -31,6 +31,7 @@
#include "ceres/callbacks.h"
#include "ceres/gradient_checking_cost_function.h"
#include "ceres/line_search_preprocessor.h"
+#include "ceres/parallel_for.h"
#include "ceres/preprocessor.h"
#include "ceres/problem_impl.h"
#include "ceres/solver.h"
@@ -56,25 +57,15 @@ Preprocessor::~Preprocessor() {
}
void ChangeNumThreadsIfNeeded(Solver::Options* options) {
-#ifndef CERES_USE_OPENMP
- if (options->num_threads > 1) {
+ const int num_threads_available = MaxNumThreadsAvailable();
+ if (options->num_threads > num_threads_available) {
LOG(WARNING)
- << "OpenMP support is not compiled into this binary; "
- << "only options.num_threads = 1 is supported. Switching "
- << "to single threaded mode.";
- options->num_threads = 1;
+ << "Specified options.num_threads: " << options->num_threads
+ << " exceeds maximum available from the threading model Ceres "
+ << "was compiled with: " << num_threads_available
+ << ". Bounding to maximum number available.";
+ options->num_threads = num_threads_available;
}
-
- // Only the Trust Region solver currently uses a linear solver.
- if (options->minimizer_type == TRUST_REGION &&
- options->num_linear_solver_threads > 1) {
- LOG(WARNING)
- << "OpenMP support is not compiled into this binary; "
- << "only options.num_linear_solver_threads=1 is supported. Switching "
- << "to single threaded mode.";
- options->num_linear_solver_threads = 1;
- }
-#endif // CERES_USE_OPENMP
}
void SetupCommonMinimizerOptions(PreprocessedProblem* pp) {
diff --git a/extern/ceres/internal/ceres/preprocessor.h b/extern/ceres/internal/ceres/preprocessor.h
index ff53d6f0d3f..99bd6c0c5dd 100644
--- a/extern/ceres/internal/ceres/preprocessor.h
+++ b/extern/ceres/internal/ceres/preprocessor.h
@@ -31,6 +31,7 @@
#ifndef CERES_INTERNAL_PREPROCESSOR_H_
#define CERES_INTERNAL_PREPROCESSOR_H_
+#include <memory>
#include <string>
#include <vector>
@@ -38,7 +39,6 @@
#include "ceres/evaluator.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/port.h"
-#include "ceres/internal/scoped_ptr.h"
#include "ceres/iteration_callback.h"
#include "ceres/linear_solver.h"
#include "ceres/minimizer.h"
@@ -91,14 +91,14 @@ struct PreprocessedProblem {
Minimizer::Options minimizer_options;
ProblemImpl* problem;
- scoped_ptr<ProblemImpl> gradient_checking_problem;
- scoped_ptr<Program> reduced_program;
- scoped_ptr<LinearSolver> linear_solver;
- scoped_ptr<IterationCallback> logging_callback;
- scoped_ptr<IterationCallback> state_updating_callback;
+ std::unique_ptr<ProblemImpl> gradient_checking_problem;
+ std::unique_ptr<Program> reduced_program;
+ std::unique_ptr<LinearSolver> linear_solver;
+ std::unique_ptr<IterationCallback> logging_callback;
+ std::unique_ptr<IterationCallback> state_updating_callback;
- shared_ptr<Evaluator> evaluator;
- shared_ptr<CoordinateDescentMinimizer> inner_iteration_minimizer;
+ std::shared_ptr<Evaluator> evaluator;
+ std::shared_ptr<CoordinateDescentMinimizer> inner_iteration_minimizer;
std::vector<double*> removed_parameter_blocks;
Vector reduced_parameters;
@@ -107,8 +107,9 @@ struct PreprocessedProblem {
// Common functions used by various preprocessors.
-// If OpenMP support is not available and user has requested more than
-// one thread, then set the *_num_threads options as needed to 1.
+// If the user has specified a num_threads > the maximum number of threads
+// available from the compiled threading model, bound the number of threads
+// to the maximum.
void ChangeNumThreadsIfNeeded(Solver::Options* options);
// Extract the effective parameter vector from the preprocessed
diff --git a/extern/ceres/internal/ceres/problem.cc b/extern/ceres/internal/ceres/problem.cc
index 730ce642036..767fe977296 100644
--- a/extern/ceres/internal/ceres/problem.cc
+++ b/extern/ceres/internal/ceres/problem.cc
@@ -32,6 +32,7 @@
#include "ceres/problem.h"
#include <vector>
+
#include "ceres/crs_matrix.h"
#include "ceres/problem_impl.h"
@@ -39,166 +40,90 @@ namespace ceres {
using std::vector;
-Problem::Problem() : problem_impl_(new internal::ProblemImpl) {}
+Problem::Problem() : impl_(new internal::ProblemImpl) {}
Problem::Problem(const Problem::Options& options)
- : problem_impl_(new internal::ProblemImpl(options)) {}
+ : impl_(new internal::ProblemImpl(options)) {}
+// Not inline defaulted in declaration due to use of std::unique_ptr.
+Problem::Problem(Problem&&) = default;
+Problem& Problem::operator=(Problem&&) = default;
Problem::~Problem() {}
ResidualBlockId Problem::AddResidualBlock(
CostFunction* cost_function,
LossFunction* loss_function,
const vector<double*>& parameter_blocks) {
- return problem_impl_->AddResidualBlock(cost_function,
- loss_function,
- parameter_blocks);
-}
-
-ResidualBlockId Problem::AddResidualBlock(
- CostFunction* cost_function,
- LossFunction* loss_function,
- double* x0) {
- return problem_impl_->AddResidualBlock(cost_function,
- loss_function,
- x0);
-}
-
-ResidualBlockId Problem::AddResidualBlock(
- CostFunction* cost_function,
- LossFunction* loss_function,
- double* x0, double* x1) {
- return problem_impl_->AddResidualBlock(cost_function,
- loss_function,
- x0, x1);
-}
-
-ResidualBlockId Problem::AddResidualBlock(
- CostFunction* cost_function,
- LossFunction* loss_function,
- double* x0, double* x1, double* x2) {
- return problem_impl_->AddResidualBlock(cost_function,
- loss_function,
- x0, x1, x2);
-}
-
-ResidualBlockId Problem::AddResidualBlock(
- CostFunction* cost_function,
- LossFunction* loss_function,
- double* x0, double* x1, double* x2, double* x3) {
- return problem_impl_->AddResidualBlock(cost_function,
- loss_function,
- x0, x1, x2, x3);
-}
-
-ResidualBlockId Problem::AddResidualBlock(
- CostFunction* cost_function,
- LossFunction* loss_function,
- double* x0, double* x1, double* x2, double* x3, double* x4) {
- return problem_impl_->AddResidualBlock(cost_function,
- loss_function,
- x0, x1, x2, x3, x4);
-}
-
-ResidualBlockId Problem::AddResidualBlock(
- CostFunction* cost_function,
- LossFunction* loss_function,
- double* x0, double* x1, double* x2, double* x3, double* x4, double* x5) {
- return problem_impl_->AddResidualBlock(cost_function,
- loss_function,
- x0, x1, x2, x3, x4, x5);
-}
-
-ResidualBlockId Problem::AddResidualBlock(
- CostFunction* cost_function,
- LossFunction* loss_function,
- double* x0, double* x1, double* x2, double* x3, double* x4, double* x5,
- double* x6) {
- return problem_impl_->AddResidualBlock(cost_function,
- loss_function,
- x0, x1, x2, x3, x4, x5, x6);
+ return impl_->AddResidualBlock(cost_function,
+ loss_function,
+ parameter_blocks.data(),
+ static_cast<int>(parameter_blocks.size()));
}
-ResidualBlockId Problem::AddResidualBlock(
- CostFunction* cost_function,
- LossFunction* loss_function,
- double* x0, double* x1, double* x2, double* x3, double* x4, double* x5,
- double* x6, double* x7) {
- return problem_impl_->AddResidualBlock(cost_function,
- loss_function,
- x0, x1, x2, x3, x4, x5, x6, x7);
-}
-
-ResidualBlockId Problem::AddResidualBlock(
- CostFunction* cost_function,
- LossFunction* loss_function,
- double* x0, double* x1, double* x2, double* x3, double* x4, double* x5,
- double* x6, double* x7, double* x8) {
- return problem_impl_->AddResidualBlock(cost_function,
- loss_function,
- x0, x1, x2, x3, x4, x5, x6, x7, x8);
-}
-
-ResidualBlockId Problem::AddResidualBlock(
- CostFunction* cost_function,
- LossFunction* loss_function,
- double* x0, double* x1, double* x2, double* x3, double* x4, double* x5,
- double* x6, double* x7, double* x8, double* x9) {
- return problem_impl_->AddResidualBlock(
- cost_function,
- loss_function,
- x0, x1, x2, x3, x4, x5, x6, x7, x8, x9);
+ResidualBlockId Problem::AddResidualBlock(CostFunction* cost_function,
+ LossFunction* loss_function,
+ double* const* const parameter_blocks,
+ int num_parameter_blocks) {
+ return impl_->AddResidualBlock(
+ cost_function, loss_function, parameter_blocks, num_parameter_blocks);
}
void Problem::AddParameterBlock(double* values, int size) {
- problem_impl_->AddParameterBlock(values, size);
+ impl_->AddParameterBlock(values, size);
}
void Problem::AddParameterBlock(double* values,
int size,
LocalParameterization* local_parameterization) {
- problem_impl_->AddParameterBlock(values, size, local_parameterization);
+ impl_->AddParameterBlock(values, size, local_parameterization);
}
void Problem::RemoveResidualBlock(ResidualBlockId residual_block) {
- problem_impl_->RemoveResidualBlock(residual_block);
+ impl_->RemoveResidualBlock(residual_block);
}
-void Problem::RemoveParameterBlock(double* values) {
- problem_impl_->RemoveParameterBlock(values);
+void Problem::RemoveParameterBlock(const double* values) {
+ impl_->RemoveParameterBlock(values);
}
-void Problem::SetParameterBlockConstant(double* values) {
- problem_impl_->SetParameterBlockConstant(values);
+void Problem::SetParameterBlockConstant(const double* values) {
+ impl_->SetParameterBlockConstant(values);
}
void Problem::SetParameterBlockVariable(double* values) {
- problem_impl_->SetParameterBlockVariable(values);
+ impl_->SetParameterBlockVariable(values);
}
-bool Problem::IsParameterBlockConstant(double* values) const {
- return problem_impl_->IsParameterBlockConstant(values);
+bool Problem::IsParameterBlockConstant(const double* values) const {
+ return impl_->IsParameterBlockConstant(values);
}
void Problem::SetParameterization(
- double* values,
- LocalParameterization* local_parameterization) {
- problem_impl_->SetParameterization(values, local_parameterization);
+ double* values, LocalParameterization* local_parameterization) {
+ impl_->SetParameterization(values, local_parameterization);
}
const LocalParameterization* Problem::GetParameterization(
- double* values) const {
- return problem_impl_->GetParameterization(values);
+ const double* values) const {
+ return impl_->GetParameterization(values);
}
void Problem::SetParameterLowerBound(double* values,
int index,
double lower_bound) {
- problem_impl_->SetParameterLowerBound(values, index, lower_bound);
+ impl_->SetParameterLowerBound(values, index, lower_bound);
}
void Problem::SetParameterUpperBound(double* values,
int index,
double upper_bound) {
- problem_impl_->SetParameterUpperBound(values, index, upper_bound);
+ impl_->SetParameterUpperBound(values, index, upper_bound);
+}
+
+double Problem::GetParameterUpperBound(const double* values, int index) const {
+ return impl_->GetParameterUpperBound(values, index);
+}
+
+double Problem::GetParameterLowerBound(const double* values, int index) const {
+ return impl_->GetParameterLowerBound(values, index);
}
bool Problem::Evaluate(const EvaluateOptions& evaluate_options,
@@ -206,72 +131,66 @@ bool Problem::Evaluate(const EvaluateOptions& evaluate_options,
vector<double>* residuals,
vector<double>* gradient,
CRSMatrix* jacobian) {
- return problem_impl_->Evaluate(evaluate_options,
- cost,
- residuals,
- gradient,
- jacobian);
+ return impl_->Evaluate(evaluate_options, cost, residuals, gradient, jacobian);
}
-int Problem::NumParameterBlocks() const {
- return problem_impl_->NumParameterBlocks();
+bool Problem::EvaluateResidualBlock(ResidualBlockId residual_block_id,
+ bool apply_loss_function,
+ double* cost,
+ double* residuals,
+ double** jacobians) const {
+ return impl_->EvaluateResidualBlock(
+ residual_block_id, apply_loss_function, cost, residuals, jacobians);
}
-int Problem::NumParameters() const {
- return problem_impl_->NumParameters();
-}
+int Problem::NumParameterBlocks() const { return impl_->NumParameterBlocks(); }
-int Problem::NumResidualBlocks() const {
- return problem_impl_->NumResidualBlocks();
-}
+int Problem::NumParameters() const { return impl_->NumParameters(); }
-int Problem::NumResiduals() const {
- return problem_impl_->NumResiduals();
-}
+int Problem::NumResidualBlocks() const { return impl_->NumResidualBlocks(); }
+
+int Problem::NumResiduals() const { return impl_->NumResiduals(); }
int Problem::ParameterBlockSize(const double* parameter_block) const {
- return problem_impl_->ParameterBlockSize(parameter_block);
+ return impl_->ParameterBlockSize(parameter_block);
}
int Problem::ParameterBlockLocalSize(const double* parameter_block) const {
- return problem_impl_->ParameterBlockLocalSize(parameter_block);
+ return impl_->ParameterBlockLocalSize(parameter_block);
}
bool Problem::HasParameterBlock(const double* values) const {
- return problem_impl_->HasParameterBlock(values);
+ return impl_->HasParameterBlock(values);
}
void Problem::GetParameterBlocks(vector<double*>* parameter_blocks) const {
- problem_impl_->GetParameterBlocks(parameter_blocks);
+ impl_->GetParameterBlocks(parameter_blocks);
}
void Problem::GetResidualBlocks(
vector<ResidualBlockId>* residual_blocks) const {
- problem_impl_->GetResidualBlocks(residual_blocks);
+ impl_->GetResidualBlocks(residual_blocks);
}
void Problem::GetParameterBlocksForResidualBlock(
const ResidualBlockId residual_block,
vector<double*>* parameter_blocks) const {
- problem_impl_->GetParameterBlocksForResidualBlock(residual_block,
- parameter_blocks);
+ impl_->GetParameterBlocksForResidualBlock(residual_block, parameter_blocks);
}
const CostFunction* Problem::GetCostFunctionForResidualBlock(
const ResidualBlockId residual_block) const {
- return problem_impl_->GetCostFunctionForResidualBlock(residual_block);
+ return impl_->GetCostFunctionForResidualBlock(residual_block);
}
const LossFunction* Problem::GetLossFunctionForResidualBlock(
const ResidualBlockId residual_block) const {
- return problem_impl_->GetLossFunctionForResidualBlock(residual_block);
+ return impl_->GetLossFunctionForResidualBlock(residual_block);
}
void Problem::GetResidualBlocksForParameterBlock(
- const double* values,
- vector<ResidualBlockId>* residual_blocks) const {
- problem_impl_->GetResidualBlocksForParameterBlock(values,
- residual_blocks);
+ const double* values, vector<ResidualBlockId>* residual_blocks) const {
+ impl_->GetResidualBlocksForParameterBlock(values, residual_blocks);
}
} // namespace ceres
diff --git a/extern/ceres/internal/ceres/problem_impl.cc b/extern/ceres/internal/ceres/problem_impl.cc
index 4abea8b33ee..6cc4d336c6a 100644
--- a/extern/ceres/internal/ceres/problem_impl.cc
+++ b/extern/ceres/internal/ceres/problem_impl.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -33,21 +33,31 @@
#include <algorithm>
#include <cstddef>
+#include <cstdint>
#include <iterator>
+#include <memory>
#include <set>
#include <string>
#include <utility>
#include <vector>
+
#include "ceres/casts.h"
+#include "ceres/compressed_row_jacobian_writer.h"
#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/context_impl.h"
#include "ceres/cost_function.h"
#include "ceres/crs_matrix.h"
+#include "ceres/evaluation_callback.h"
#include "ceres/evaluator.h"
+#include "ceres/internal/fixed_array.h"
+#include "ceres/internal/port.h"
#include "ceres/loss_function.h"
#include "ceres/map_util.h"
#include "ceres/parameter_block.h"
#include "ceres/program.h"
+#include "ceres/program_evaluator.h"
#include "ceres/residual_block.h"
+#include "ceres/scratch_evaluate_preparer.h"
#include "ceres/stl_util.h"
#include "ceres/stringprintf.h"
#include "glog/logging.h"
@@ -58,36 +68,66 @@ namespace internal {
using std::map;
using std::string;
using std::vector;
-typedef std::map<double*, internal::ParameterBlock*> ParameterMap;
namespace {
// Returns true if two regions of memory, a and b, with sizes size_a and size_b
// respectively, overlap.
-bool RegionsAlias(const double* a, int size_a,
- const double* b, int size_b) {
- return (a < b) ? b < (a + size_a)
- : a < (b + size_b);
+bool RegionsAlias(const double* a, int size_a, const double* b, int size_b) {
+ return (a < b) ? b < (a + size_a) : a < (b + size_b);
}
void CheckForNoAliasing(double* existing_block,
int existing_block_size,
double* new_block,
int new_block_size) {
- CHECK(!RegionsAlias(existing_block, existing_block_size,
- new_block, new_block_size))
+ CHECK(!RegionsAlias(
+ existing_block, existing_block_size, new_block, new_block_size))
<< "Aliasing detected between existing parameter block at memory "
- << "location " << existing_block
- << " and has size " << existing_block_size << " with new parameter "
+ << "location " << existing_block << " and has size "
+ << existing_block_size << " with new parameter "
<< "block that has memory address " << new_block << " and would have "
<< "size " << new_block_size << ".";
}
+template <typename KeyType>
+void DecrementValueOrDeleteKey(const KeyType key,
+ std::map<KeyType, int>* container) {
+ auto it = container->find(key);
+ if (it->second == 1) {
+ delete key;
+ container->erase(it);
+ } else {
+ --it->second;
+ }
+}
+
+template <typename ForwardIterator>
+void STLDeleteContainerPairFirstPointers(ForwardIterator begin,
+ ForwardIterator end) {
+ while (begin != end) {
+ delete begin->first;
+ ++begin;
+ }
+}
+
+void InitializeContext(Context* context,
+ ContextImpl** context_impl,
+ bool* context_impl_owned) {
+ if (context == nullptr) {
+ *context_impl_owned = true;
+ *context_impl = new ContextImpl;
+ } else {
+ *context_impl_owned = false;
+ *context_impl = down_cast<ContextImpl*>(context);
+ }
+}
+
} // namespace
ParameterBlock* ProblemImpl::InternalAddParameterBlock(double* values,
int size) {
- CHECK(values != NULL) << "Null pointer passed to AddParameterBlock "
- << "for a parameter with size " << size;
+ CHECK(values != nullptr) << "Null pointer passed to AddParameterBlock "
+ << "for a parameter with size " << size;
// Ignore the request if there is a block for the given pointer already.
ParameterMap::iterator it = parameter_block_map_.find(values);
@@ -97,8 +137,7 @@ ParameterBlock* ProblemImpl::InternalAddParameterBlock(double* values,
CHECK(size == existing_size)
<< "Tried adding a parameter block with the same double pointer, "
<< values << ", twice, but with different block sizes. Original "
- << "size was " << existing_size << " but new size is "
- << size;
+ << "size was " << existing_size << " but new size is " << size;
}
return it->second;
}
@@ -113,18 +152,13 @@ ParameterBlock* ProblemImpl::InternalAddParameterBlock(double* values,
if (lb != parameter_block_map_.begin()) {
ParameterMap::iterator previous = lb;
--previous;
- CheckForNoAliasing(previous->first,
- previous->second->Size(),
- values,
- size);
+ CheckForNoAliasing(
+ previous->first, previous->second->Size(), values, size);
}
// If lb is not off the end, check lb for aliasing.
if (lb != parameter_block_map_.end()) {
- CheckForNoAliasing(lb->first,
- lb->second->Size(),
- values,
- size);
+ CheckForNoAliasing(lb->first, lb->second->Size(), values, size);
}
}
}
@@ -145,7 +179,7 @@ ParameterBlock* ProblemImpl::InternalAddParameterBlock(double* values,
}
void ProblemImpl::InternalRemoveResidualBlock(ResidualBlock* residual_block) {
- CHECK_NOTNULL(residual_block);
+ CHECK(residual_block != nullptr);
// Perform no check on the validity of residual_block, that is handled in
// the public method: RemoveResidualBlock().
@@ -154,8 +188,8 @@ void ProblemImpl::InternalRemoveResidualBlock(ResidualBlock* residual_block) {
const int num_parameter_blocks_for_residual =
residual_block->NumParameterBlocks();
for (int i = 0; i < num_parameter_blocks_for_residual; ++i) {
- residual_block->parameter_blocks()[i]
- ->RemoveResidualBlock(residual_block);
+ residual_block->parameter_blocks()[i]->RemoveResidualBlock(
+ residual_block);
}
ResidualBlockSet::iterator it = residual_block_set_.find(residual_block);
@@ -173,16 +207,19 @@ void ProblemImpl::DeleteBlock(ResidualBlock* residual_block) {
// The const casts here are legit, since ResidualBlock holds these
// pointers as const pointers but we have ownership of them and
// have the right to destroy them when the destructor is called.
- if (options_.cost_function_ownership == TAKE_OWNERSHIP &&
- residual_block->cost_function() != NULL) {
- cost_functions_to_delete_.push_back(
- const_cast<CostFunction*>(residual_block->cost_function()));
+ CostFunction* cost_function =
+ const_cast<CostFunction*>(residual_block->cost_function());
+ if (options_.cost_function_ownership == TAKE_OWNERSHIP) {
+ DecrementValueOrDeleteKey(cost_function, &cost_function_ref_count_);
}
+
+ LossFunction* loss_function =
+ const_cast<LossFunction*>(residual_block->loss_function());
if (options_.loss_function_ownership == TAKE_OWNERSHIP &&
- residual_block->loss_function() != NULL) {
- loss_functions_to_delete_.push_back(
- const_cast<LossFunction*>(residual_block->loss_function()));
+ loss_function != nullptr) {
+ DecrementValueOrDeleteKey(loss_function, &loss_function_ref_count_);
}
+
delete residual_block;
}
@@ -193,7 +230,7 @@ void ProblemImpl::DeleteBlock(ResidualBlock* residual_block) {
// without doing a full scan.
void ProblemImpl::DeleteBlock(ParameterBlock* parameter_block) {
if (options_.local_parameterization_ownership == TAKE_OWNERSHIP &&
- parameter_block->local_parameterization() != NULL) {
+ parameter_block->local_parameterization() != nullptr) {
local_parameterizations_to_delete_.push_back(
parameter_block->mutable_local_parameterization());
}
@@ -201,18 +238,29 @@ void ProblemImpl::DeleteBlock(ParameterBlock* parameter_block) {
delete parameter_block;
}
-ProblemImpl::ProblemImpl() : program_(new internal::Program) {}
+ProblemImpl::ProblemImpl()
+ : options_(Problem::Options()), program_(new internal::Program) {
+ InitializeContext(options_.context, &context_impl_, &context_impl_owned_);
+}
+
ProblemImpl::ProblemImpl(const Problem::Options& options)
- : options_(options),
- program_(new internal::Program) {}
+ : options_(options), program_(new internal::Program) {
+ program_->evaluation_callback_ = options.evaluation_callback;
+ InitializeContext(options_.context, &context_impl_, &context_impl_owned_);
+}
ProblemImpl::~ProblemImpl() {
- // Collect the unique cost/loss functions and delete the residuals.
- const int num_residual_blocks = program_->residual_blocks_.size();
- cost_functions_to_delete_.reserve(num_residual_blocks);
- loss_functions_to_delete_.reserve(num_residual_blocks);
- for (int i = 0; i < program_->residual_blocks_.size(); ++i) {
- DeleteBlock(program_->residual_blocks_[i]);
+ STLDeleteContainerPointers(program_->residual_blocks_.begin(),
+ program_->residual_blocks_.end());
+
+ if (options_.cost_function_ownership == TAKE_OWNERSHIP) {
+ STLDeleteContainerPairFirstPointers(cost_function_ref_count_.begin(),
+ cost_function_ref_count_.end());
+ }
+
+ if (options_.loss_function_ownership == TAKE_OWNERSHIP) {
+ STLDeleteContainerPairFirstPointers(loss_function_ref_count_.begin(),
+ loss_function_ref_count_.end());
}
// Collect the unique parameterizations and delete the parameters.
@@ -220,57 +268,57 @@ ProblemImpl::~ProblemImpl() {
DeleteBlock(program_->parameter_blocks_[i]);
}
- // Delete the owned cost/loss functions and parameterizations.
+ // Delete the owned parameterizations.
STLDeleteUniqueContainerPointers(local_parameterizations_to_delete_.begin(),
local_parameterizations_to_delete_.end());
- STLDeleteUniqueContainerPointers(cost_functions_to_delete_.begin(),
- cost_functions_to_delete_.end());
- STLDeleteUniqueContainerPointers(loss_functions_to_delete_.begin(),
- loss_functions_to_delete_.end());
+
+ if (context_impl_owned_) {
+ delete context_impl_;
+ }
}
-ResidualBlock* ProblemImpl::AddResidualBlock(
+ResidualBlockId ProblemImpl::AddResidualBlock(
CostFunction* cost_function,
LossFunction* loss_function,
- const vector<double*>& parameter_blocks) {
- CHECK_NOTNULL(cost_function);
- CHECK_EQ(parameter_blocks.size(),
- cost_function->parameter_block_sizes().size());
+ double* const* const parameter_blocks,
+ int num_parameter_blocks) {
+ CHECK(cost_function != nullptr);
+ CHECK_EQ(num_parameter_blocks, cost_function->parameter_block_sizes().size());
// Check the sizes match.
- const vector<int32>& parameter_block_sizes =
+ const vector<int32_t>& parameter_block_sizes =
cost_function->parameter_block_sizes();
if (!options_.disable_all_safety_checks) {
- CHECK_EQ(parameter_block_sizes.size(), parameter_blocks.size())
+ CHECK_EQ(parameter_block_sizes.size(), num_parameter_blocks)
<< "Number of blocks input is different than the number of blocks "
<< "that the cost function expects.";
// Check for duplicate parameter blocks.
- vector<double*> sorted_parameter_blocks(parameter_blocks);
+ vector<double*> sorted_parameter_blocks(
+ parameter_blocks, parameter_blocks + num_parameter_blocks);
sort(sorted_parameter_blocks.begin(), sorted_parameter_blocks.end());
const bool has_duplicate_items =
(std::adjacent_find(sorted_parameter_blocks.begin(),
- sorted_parameter_blocks.end())
- != sorted_parameter_blocks.end());
+ sorted_parameter_blocks.end()) !=
+ sorted_parameter_blocks.end());
if (has_duplicate_items) {
string blocks;
- for (int i = 0; i < parameter_blocks.size(); ++i) {
+ for (int i = 0; i < num_parameter_blocks; ++i) {
blocks += StringPrintf(" %p ", parameter_blocks[i]);
}
LOG(FATAL) << "Duplicate parameter blocks in a residual parameter "
- << "are not allowed. Parameter block pointers: ["
- << blocks << "]";
+ << "are not allowed. Parameter block pointers: [" << blocks
+ << "]";
}
}
// Add parameter blocks and convert the double*'s to parameter blocks.
- vector<ParameterBlock*> parameter_block_ptrs(parameter_blocks.size());
- for (int i = 0; i < parameter_blocks.size(); ++i) {
- parameter_block_ptrs[i] =
- InternalAddParameterBlock(parameter_blocks[i],
- parameter_block_sizes[i]);
+ vector<ParameterBlock*> parameter_block_ptrs(num_parameter_blocks);
+ for (int i = 0; i < num_parameter_blocks; ++i) {
+ parameter_block_ptrs[i] = InternalAddParameterBlock(
+ parameter_blocks[i], parameter_block_sizes[i]);
}
if (!options_.disable_all_safety_checks) {
@@ -279,8 +327,8 @@ ResidualBlock* ProblemImpl::AddResidualBlock(
for (int i = 0; i < parameter_block_ptrs.size(); ++i) {
CHECK_EQ(cost_function->parameter_block_sizes()[i],
parameter_block_ptrs[i]->Size())
- << "The cost function expects parameter block " << i
- << " of size " << cost_function->parameter_block_sizes()[i]
+ << "The cost function expects parameter block " << i << " of size "
+ << cost_function->parameter_block_sizes()[i]
<< " but was given a block of size "
<< parameter_block_ptrs[i]->Size();
}
@@ -294,7 +342,7 @@ ResidualBlock* ProblemImpl::AddResidualBlock(
// Add dependencies on the residual to the parameter blocks.
if (options_.enable_fast_removal) {
- for (int i = 0; i < parameter_blocks.size(); ++i) {
+ for (int i = 0; i < num_parameter_blocks; ++i) {
parameter_block_ptrs[i]->AddResidualBlock(new_residual_block);
}
}
@@ -305,148 +353,19 @@ ResidualBlock* ProblemImpl::AddResidualBlock(
residual_block_set_.insert(new_residual_block);
}
- return new_residual_block;
-}
-
-// Unfortunately, macros don't help much to reduce this code, and var args don't
-// work because of the ambiguous case that there is no loss function.
-ResidualBlock* ProblemImpl::AddResidualBlock(
- CostFunction* cost_function,
- LossFunction* loss_function,
- double* x0) {
- vector<double*> residual_parameters;
- residual_parameters.push_back(x0);
- return AddResidualBlock(cost_function, loss_function, residual_parameters);
-}
-
-ResidualBlock* ProblemImpl::AddResidualBlock(
- CostFunction* cost_function,
- LossFunction* loss_function,
- double* x0, double* x1) {
- vector<double*> residual_parameters;
- residual_parameters.push_back(x0);
- residual_parameters.push_back(x1);
- return AddResidualBlock(cost_function, loss_function, residual_parameters);
-}
-
-ResidualBlock* ProblemImpl::AddResidualBlock(
- CostFunction* cost_function,
- LossFunction* loss_function,
- double* x0, double* x1, double* x2) {
- vector<double*> residual_parameters;
- residual_parameters.push_back(x0);
- residual_parameters.push_back(x1);
- residual_parameters.push_back(x2);
- return AddResidualBlock(cost_function, loss_function, residual_parameters);
-}
-
-ResidualBlock* ProblemImpl::AddResidualBlock(
- CostFunction* cost_function,
- LossFunction* loss_function,
- double* x0, double* x1, double* x2, double* x3) {
- vector<double*> residual_parameters;
- residual_parameters.push_back(x0);
- residual_parameters.push_back(x1);
- residual_parameters.push_back(x2);
- residual_parameters.push_back(x3);
- return AddResidualBlock(cost_function, loss_function, residual_parameters);
-}
+ if (options_.cost_function_ownership == TAKE_OWNERSHIP) {
+ // Increment the reference count, creating an entry in the table if
+ // needed. Note: C++ maps guarantee that new entries have default
+ // constructed values; this implies integers are zero initialized.
+ ++cost_function_ref_count_[cost_function];
+ }
-ResidualBlock* ProblemImpl::AddResidualBlock(
- CostFunction* cost_function,
- LossFunction* loss_function,
- double* x0, double* x1, double* x2, double* x3, double* x4) {
- vector<double*> residual_parameters;
- residual_parameters.push_back(x0);
- residual_parameters.push_back(x1);
- residual_parameters.push_back(x2);
- residual_parameters.push_back(x3);
- residual_parameters.push_back(x4);
- return AddResidualBlock(cost_function, loss_function, residual_parameters);
-}
+ if (options_.loss_function_ownership == TAKE_OWNERSHIP &&
+ loss_function != nullptr) {
+ ++loss_function_ref_count_[loss_function];
+ }
-ResidualBlock* ProblemImpl::AddResidualBlock(
- CostFunction* cost_function,
- LossFunction* loss_function,
- double* x0, double* x1, double* x2, double* x3, double* x4, double* x5) {
- vector<double*> residual_parameters;
- residual_parameters.push_back(x0);
- residual_parameters.push_back(x1);
- residual_parameters.push_back(x2);
- residual_parameters.push_back(x3);
- residual_parameters.push_back(x4);
- residual_parameters.push_back(x5);
- return AddResidualBlock(cost_function, loss_function, residual_parameters);
-}
-
-ResidualBlock* ProblemImpl::AddResidualBlock(
- CostFunction* cost_function,
- LossFunction* loss_function,
- double* x0, double* x1, double* x2, double* x3, double* x4, double* x5,
- double* x6) {
- vector<double*> residual_parameters;
- residual_parameters.push_back(x0);
- residual_parameters.push_back(x1);
- residual_parameters.push_back(x2);
- residual_parameters.push_back(x3);
- residual_parameters.push_back(x4);
- residual_parameters.push_back(x5);
- residual_parameters.push_back(x6);
- return AddResidualBlock(cost_function, loss_function, residual_parameters);
-}
-
-ResidualBlock* ProblemImpl::AddResidualBlock(
- CostFunction* cost_function,
- LossFunction* loss_function,
- double* x0, double* x1, double* x2, double* x3, double* x4, double* x5,
- double* x6, double* x7) {
- vector<double*> residual_parameters;
- residual_parameters.push_back(x0);
- residual_parameters.push_back(x1);
- residual_parameters.push_back(x2);
- residual_parameters.push_back(x3);
- residual_parameters.push_back(x4);
- residual_parameters.push_back(x5);
- residual_parameters.push_back(x6);
- residual_parameters.push_back(x7);
- return AddResidualBlock(cost_function, loss_function, residual_parameters);
-}
-
-ResidualBlock* ProblemImpl::AddResidualBlock(
- CostFunction* cost_function,
- LossFunction* loss_function,
- double* x0, double* x1, double* x2, double* x3, double* x4, double* x5,
- double* x6, double* x7, double* x8) {
- vector<double*> residual_parameters;
- residual_parameters.push_back(x0);
- residual_parameters.push_back(x1);
- residual_parameters.push_back(x2);
- residual_parameters.push_back(x3);
- residual_parameters.push_back(x4);
- residual_parameters.push_back(x5);
- residual_parameters.push_back(x6);
- residual_parameters.push_back(x7);
- residual_parameters.push_back(x8);
- return AddResidualBlock(cost_function, loss_function, residual_parameters);
-}
-
-ResidualBlock* ProblemImpl::AddResidualBlock(
- CostFunction* cost_function,
- LossFunction* loss_function,
- double* x0, double* x1, double* x2, double* x3, double* x4, double* x5,
- double* x6, double* x7, double* x8, double* x9) {
- vector<double*> residual_parameters;
- residual_parameters.push_back(x0);
- residual_parameters.push_back(x1);
- residual_parameters.push_back(x2);
- residual_parameters.push_back(x3);
- residual_parameters.push_back(x4);
- residual_parameters.push_back(x5);
- residual_parameters.push_back(x6);
- residual_parameters.push_back(x7);
- residual_parameters.push_back(x8);
- residual_parameters.push_back(x9);
- return AddResidualBlock(cost_function, loss_function, residual_parameters);
+ return new_residual_block;
}
void ProblemImpl::AddParameterBlock(double* values, int size) {
@@ -454,12 +373,9 @@ void ProblemImpl::AddParameterBlock(double* values, int size) {
}
void ProblemImpl::AddParameterBlock(
- double* values,
- int size,
- LocalParameterization* local_parameterization) {
- ParameterBlock* parameter_block =
- InternalAddParameterBlock(values, size);
- if (local_parameterization != NULL) {
+ double* values, int size, LocalParameterization* local_parameterization) {
+ ParameterBlock* parameter_block = InternalAddParameterBlock(values, size);
+ if (local_parameterization != nullptr) {
parameter_block->SetParameterization(local_parameterization);
}
}
@@ -468,13 +384,12 @@ void ProblemImpl::AddParameterBlock(
// This is done in constant time by moving an element from the end of the
// vector over the element to remove, then popping the last element. It
// destroys the ordering in the interest of speed.
-template<typename Block>
+template <typename Block>
void ProblemImpl::DeleteBlockInVector(vector<Block*>* mutable_blocks,
Block* block_to_remove) {
CHECK_EQ((*mutable_blocks)[block_to_remove->index()], block_to_remove)
<< "You found a Ceres bug! \n"
- << "Block requested: "
- << block_to_remove->ToString() << "\n"
+ << "Block requested: " << block_to_remove->ToString() << "\n"
<< "Block present: "
<< (*mutable_blocks)[block_to_remove->index()]->ToString();
@@ -493,24 +408,23 @@ void ProblemImpl::DeleteBlockInVector(vector<Block*>* mutable_blocks,
}
void ProblemImpl::RemoveResidualBlock(ResidualBlock* residual_block) {
- CHECK_NOTNULL(residual_block);
+ CHECK(residual_block != nullptr);
// Verify that residual_block identifies a residual in the current problem.
- const string residual_not_found_message =
- StringPrintf("Residual block to remove: %p not found. This usually means "
- "one of three things have happened:\n"
- " 1) residual_block is uninitialised and points to a random "
- "area in memory.\n"
- " 2) residual_block represented a residual that was added to"
- " the problem, but referred to a parameter block which has "
- "since been removed, which removes all residuals which "
- "depend on that parameter block, and was thus removed.\n"
- " 3) residual_block referred to a residual that has already "
- "been removed from the problem (by the user).",
- residual_block);
+ const string residual_not_found_message = StringPrintf(
+ "Residual block to remove: %p not found. This usually means "
+ "one of three things have happened:\n"
+ " 1) residual_block is uninitialised and points to a random "
+ "area in memory.\n"
+ " 2) residual_block represented a residual that was added to"
+ " the problem, but referred to a parameter block which has "
+ "since been removed, which removes all residuals which "
+ "depend on that parameter block, and was thus removed.\n"
+ " 3) residual_block referred to a residual that has already "
+ "been removed from the problem (by the user).",
+ residual_block);
if (options_.enable_fast_removal) {
- CHECK(residual_block_set_.find(residual_block) !=
- residual_block_set_.end())
+ CHECK(residual_block_set_.find(residual_block) != residual_block_set_.end())
<< residual_not_found_message;
} else {
// Perform a full search over all current residuals.
@@ -523,10 +437,10 @@ void ProblemImpl::RemoveResidualBlock(ResidualBlock* residual_block) {
InternalRemoveResidualBlock(residual_block);
}
-void ProblemImpl::RemoveParameterBlock(double* values) {
- ParameterBlock* parameter_block =
- FindWithDefault(parameter_block_map_, values, NULL);
- if (parameter_block == NULL) {
+void ProblemImpl::RemoveParameterBlock(const double* values) {
+ ParameterBlock* parameter_block = FindWithDefault(
+ parameter_block_map_, const_cast<double*>(values), nullptr);
+ if (parameter_block == nullptr) {
LOG(FATAL) << "Parameter block not found: " << values
<< ". You must add the parameter block to the problem before "
<< "it can be removed.";
@@ -561,10 +475,10 @@ void ProblemImpl::RemoveParameterBlock(double* values) {
DeleteBlockInVector(program_->mutable_parameter_blocks(), parameter_block);
}
-void ProblemImpl::SetParameterBlockConstant(double* values) {
- ParameterBlock* parameter_block =
- FindWithDefault(parameter_block_map_, values, NULL);
- if (parameter_block == NULL) {
+void ProblemImpl::SetParameterBlockConstant(const double* values) {
+ ParameterBlock* parameter_block = FindWithDefault(
+ parameter_block_map_, const_cast<double*>(values), nullptr);
+ if (parameter_block == nullptr) {
LOG(FATAL) << "Parameter block not found: " << values
<< ". You must add the parameter block to the problem before "
<< "it can be set constant.";
@@ -573,20 +487,19 @@ void ProblemImpl::SetParameterBlockConstant(double* values) {
parameter_block->SetConstant();
}
-bool ProblemImpl::IsParameterBlockConstant(double* values) const {
- const ParameterBlock* parameter_block =
- FindWithDefault(parameter_block_map_, values, NULL);
- CHECK(parameter_block != NULL)
- << "Parameter block not found: " << values << ". You must add the "
- << "parameter block to the problem before it can be queried.";
-
+bool ProblemImpl::IsParameterBlockConstant(const double* values) const {
+ const ParameterBlock* parameter_block = FindWithDefault(
+ parameter_block_map_, const_cast<double*>(values), nullptr);
+ CHECK(parameter_block != nullptr)
+ << "Parameter block not found: " << values << ". You must add the "
+ << "parameter block to the problem before it can be queried.";
return parameter_block->IsConstant();
}
void ProblemImpl::SetParameterBlockVariable(double* values) {
ParameterBlock* parameter_block =
- FindWithDefault(parameter_block_map_, values, NULL);
- if (parameter_block == NULL) {
+ FindWithDefault(parameter_block_map_, values, nullptr);
+ if (parameter_block == nullptr) {
LOG(FATAL) << "Parameter block not found: " << values
<< ". You must add the parameter block to the problem before "
<< "it can be set varying.";
@@ -596,24 +509,32 @@ void ProblemImpl::SetParameterBlockVariable(double* values) {
}
void ProblemImpl::SetParameterization(
- double* values,
- LocalParameterization* local_parameterization) {
+ double* values, LocalParameterization* local_parameterization) {
ParameterBlock* parameter_block =
- FindWithDefault(parameter_block_map_, values, NULL);
- if (parameter_block == NULL) {
+ FindWithDefault(parameter_block_map_, values, nullptr);
+ if (parameter_block == nullptr) {
LOG(FATAL) << "Parameter block not found: " << values
<< ". You must add the parameter block to the problem before "
<< "you can set its local parameterization.";
}
+ // If the parameter block already has a local parameterization and
+ // we are to take ownership of local parameterizations, then add it
+ // to local_parameterizations_to_delete_ for eventual deletion.
+ if (parameter_block->local_parameterization_ &&
+ options_.local_parameterization_ownership == TAKE_OWNERSHIP) {
+ local_parameterizations_to_delete_.push_back(
+ parameter_block->local_parameterization_);
+ }
+
parameter_block->SetParameterization(local_parameterization);
}
const LocalParameterization* ProblemImpl::GetParameterization(
- double* values) const {
- ParameterBlock* parameter_block =
- FindWithDefault(parameter_block_map_, values, NULL);
- if (parameter_block == NULL) {
+ const double* values) const {
+ ParameterBlock* parameter_block = FindWithDefault(
+ parameter_block_map_, const_cast<double*>(values), nullptr);
+ if (parameter_block == nullptr) {
LOG(FATAL) << "Parameter block not found: " << values
<< ". You must add the parameter block to the problem before "
<< "you can get its local parameterization.";
@@ -626,8 +547,8 @@ void ProblemImpl::SetParameterLowerBound(double* values,
int index,
double lower_bound) {
ParameterBlock* parameter_block =
- FindWithDefault(parameter_block_map_, values, NULL);
- if (parameter_block == NULL) {
+ FindWithDefault(parameter_block_map_, values, nullptr);
+ if (parameter_block == nullptr) {
LOG(FATAL) << "Parameter block not found: " << values
<< ". You must add the parameter block to the problem before "
<< "you can set a lower bound on one of its components.";
@@ -640,8 +561,8 @@ void ProblemImpl::SetParameterUpperBound(double* values,
int index,
double upper_bound) {
ParameterBlock* parameter_block =
- FindWithDefault(parameter_block_map_, values, NULL);
- if (parameter_block == NULL) {
+ FindWithDefault(parameter_block_map_, values, nullptr);
+ if (parameter_block == nullptr) {
LOG(FATAL) << "Parameter block not found: " << values
<< ". You must add the parameter block to the problem before "
<< "you can set an upper bound on one of its components.";
@@ -649,15 +570,37 @@ void ProblemImpl::SetParameterUpperBound(double* values,
parameter_block->SetUpperBound(index, upper_bound);
}
+double ProblemImpl::GetParameterLowerBound(const double* values,
+ int index) const {
+ ParameterBlock* parameter_block = FindWithDefault(
+ parameter_block_map_, const_cast<double*>(values), nullptr);
+ if (parameter_block == nullptr) {
+ LOG(FATAL) << "Parameter block not found: " << values
+ << ". You must add the parameter block to the problem before "
+ << "you can get the lower bound on one of its components.";
+ }
+ return parameter_block->LowerBound(index);
+}
+
+double ProblemImpl::GetParameterUpperBound(const double* values,
+ int index) const {
+ ParameterBlock* parameter_block = FindWithDefault(
+ parameter_block_map_, const_cast<double*>(values), nullptr);
+ if (parameter_block == nullptr) {
+ LOG(FATAL) << "Parameter block not found: " << values
+ << ". You must add the parameter block to the problem before "
+ << "you can set an upper bound on one of its components.";
+ }
+ return parameter_block->UpperBound(index);
+}
+
bool ProblemImpl::Evaluate(const Problem::EvaluateOptions& evaluate_options,
double* cost,
vector<double>* residuals,
vector<double>* gradient,
CRSMatrix* jacobian) {
- if (cost == NULL &&
- residuals == NULL &&
- gradient == NULL &&
- jacobian == NULL) {
+ if (cost == nullptr && residuals == nullptr && gradient == nullptr &&
+ jacobian == nullptr) {
LOG(INFO) << "Nothing to do.";
return true;
}
@@ -667,7 +610,8 @@ bool ProblemImpl::Evaluate(const Problem::EvaluateOptions& evaluate_options,
Program program;
*program.mutable_residual_blocks() =
((evaluate_options.residual_blocks.size() > 0)
- ? evaluate_options.residual_blocks : program_->residual_blocks());
+ ? evaluate_options.residual_blocks
+ : program_->residual_blocks());
const vector<double*>& parameter_block_ptrs =
evaluate_options.parameter_blocks;
@@ -688,10 +632,9 @@ bool ProblemImpl::Evaluate(const Problem::EvaluateOptions& evaluate_options,
// 1. Convert double* into ParameterBlock*
parameter_blocks.resize(parameter_block_ptrs.size());
for (int i = 0; i < parameter_block_ptrs.size(); ++i) {
- parameter_blocks[i] = FindWithDefault(parameter_block_map_,
- parameter_block_ptrs[i],
- NULL);
- if (parameter_blocks[i] == NULL) {
+ parameter_blocks[i] = FindWithDefault(
+ parameter_block_map_, parameter_block_ptrs[i], nullptr);
+ if (parameter_blocks[i] == nullptr) {
LOG(FATAL) << "No known parameter block for "
<< "Problem::Evaluate::Options.parameter_blocks[" << i << "]"
<< " = " << parameter_block_ptrs[i];
@@ -742,45 +685,36 @@ bool ProblemImpl::Evaluate(const Problem::EvaluateOptions& evaluate_options,
// the Evaluator decides the storage for the Jacobian based on the
// type of linear solver being used.
evaluator_options.linear_solver_type = SPARSE_NORMAL_CHOLESKY;
-#ifndef CERES_USE_OPENMP
+#ifdef CERES_NO_THREADS
LOG_IF(WARNING, evaluate_options.num_threads > 1)
- << "OpenMP support is not compiled into this binary; "
+ << "No threading support is compiled into this binary; "
<< "only evaluate_options.num_threads = 1 is supported. Switching "
<< "to single threaded mode.";
evaluator_options.num_threads = 1;
#else
evaluator_options.num_threads = evaluate_options.num_threads;
-#endif // CERES_USE_OPENMP
-
- string error;
- scoped_ptr<Evaluator> evaluator(
- Evaluator::Create(evaluator_options, &program, &error));
- if (evaluator.get() == NULL) {
- LOG(ERROR) << "Unable to create an Evaluator object. "
- << "Error: " << error
- << "This is a Ceres bug; please contact the developers!";
-
- // Make the parameter blocks that were temporarily marked
- // constant, variable again.
- for (int i = 0; i < variable_parameter_blocks.size(); ++i) {
- variable_parameter_blocks[i]->SetVarying();
- }
-
- program_->SetParameterBlockStatePtrsToUserStatePtrs();
- program_->SetParameterOffsetsAndIndex();
- return false;
- }
-
- if (residuals !=NULL) {
+#endif // CERES_NO_THREADS
+
+ // The main thread also does work so we only need to launch num_threads - 1.
+ context_impl_->EnsureMinimumThreads(evaluator_options.num_threads - 1);
+ evaluator_options.context = context_impl_;
+ evaluator_options.evaluation_callback =
+ program_->mutable_evaluation_callback();
+ std::unique_ptr<Evaluator> evaluator(
+ new ProgramEvaluator<ScratchEvaluatePreparer,
+ CompressedRowJacobianWriter>(evaluator_options,
+ &program));
+
+ if (residuals != nullptr) {
residuals->resize(evaluator->NumResiduals());
}
- if (gradient != NULL) {
+ if (gradient != nullptr) {
gradient->resize(evaluator->NumEffectiveParameters());
}
- scoped_ptr<CompressedRowSparseMatrix> tmp_jacobian;
- if (jacobian != NULL) {
+ std::unique_ptr<CompressedRowSparseMatrix> tmp_jacobian;
+ if (jacobian != nullptr) {
tmp_jacobian.reset(
down_cast<CompressedRowSparseMatrix*>(evaluator->CreateJacobian()));
}
@@ -804,12 +738,13 @@ bool ProblemImpl::Evaluate(const Problem::EvaluateOptions& evaluate_options,
Evaluator::EvaluateOptions evaluator_evaluate_options;
evaluator_evaluate_options.apply_loss_function =
evaluate_options.apply_loss_function;
- bool status = evaluator->Evaluate(evaluator_evaluate_options,
- parameters.data(),
- &tmp_cost,
- residuals != NULL ? &(*residuals)[0] : NULL,
- gradient != NULL ? &(*gradient)[0] : NULL,
- tmp_jacobian.get());
+ bool status =
+ evaluator->Evaluate(evaluator_evaluate_options,
+ parameters.data(),
+ &tmp_cost,
+ residuals != nullptr ? &(*residuals)[0] : nullptr,
+ gradient != nullptr ? &(*gradient)[0] : nullptr,
+ tmp_jacobian.get());
// Make the parameter blocks that were temporarily marked constant,
// variable again.
@@ -818,10 +753,10 @@ bool ProblemImpl::Evaluate(const Problem::EvaluateOptions& evaluate_options,
}
if (status) {
- if (cost != NULL) {
+ if (cost != nullptr) {
*cost = tmp_cost;
}
- if (jacobian != NULL) {
+ if (jacobian != nullptr) {
tmp_jacobian->ToCRSMatrix(jacobian);
}
}
@@ -831,26 +766,53 @@ bool ProblemImpl::Evaluate(const Problem::EvaluateOptions& evaluate_options,
return status;
}
+bool ProblemImpl::EvaluateResidualBlock(ResidualBlock* residual_block,
+ bool apply_loss_function,
+ double* cost,
+ double* residuals,
+ double** jacobians) const {
+ ParameterBlock* const* parameter_blocks = residual_block->parameter_blocks();
+ const int num_parameter_blocks = residual_block->NumParameterBlocks();
+ for (int i = 0; i < num_parameter_blocks; ++i) {
+ ParameterBlock* parameter_block = parameter_blocks[i];
+ if (parameter_block->IsConstant()) {
+ if (jacobians != nullptr && jacobians[i] != nullptr) {
+ LOG(ERROR) << "Jacobian requested for parameter block : " << i
+ << ". But the parameter block is marked constant.";
+ return false;
+ }
+ } else {
+ CHECK(parameter_block->SetState(parameter_block->user_state()))
+ << "Congratulations, you found a Ceres bug! Please report this error "
+ << "to the developers.";
+ }
+ }
+
+ double dummy_cost = 0.0;
+ FixedArray<double> scratch(residual_block->NumScratchDoublesForEvaluate());
+ return residual_block->Evaluate(apply_loss_function,
+ cost ? cost : &dummy_cost,
+ residuals,
+ jacobians,
+ scratch.data());
+}
+
int ProblemImpl::NumParameterBlocks() const {
return program_->NumParameterBlocks();
}
-int ProblemImpl::NumParameters() const {
- return program_->NumParameters();
-}
+int ProblemImpl::NumParameters() const { return program_->NumParameters(); }
int ProblemImpl::NumResidualBlocks() const {
return program_->NumResidualBlocks();
}
-int ProblemImpl::NumResiduals() const {
- return program_->NumResiduals();
-}
+int ProblemImpl::NumResiduals() const { return program_->NumResiduals(); }
int ProblemImpl::ParameterBlockSize(const double* values) const {
- ParameterBlock* parameter_block =
- FindWithDefault(parameter_block_map_, const_cast<double*>(values), NULL);
- if (parameter_block == NULL) {
+ ParameterBlock* parameter_block = FindWithDefault(
+ parameter_block_map_, const_cast<double*>(values), nullptr);
+ if (parameter_block == nullptr) {
LOG(FATAL) << "Parameter block not found: " << values
<< ". You must add the parameter block to the problem before "
<< "you can get its size.";
@@ -860,9 +822,9 @@ int ProblemImpl::ParameterBlockSize(const double* values) const {
}
int ProblemImpl::ParameterBlockLocalSize(const double* values) const {
- ParameterBlock* parameter_block =
- FindWithDefault(parameter_block_map_, const_cast<double*>(values), NULL);
- if (parameter_block == NULL) {
+ ParameterBlock* parameter_block = FindWithDefault(
+ parameter_block_map_, const_cast<double*>(values), nullptr);
+ if (parameter_block == nullptr) {
LOG(FATAL) << "Parameter block not found: " << values
<< ". You must add the parameter block to the problem before "
<< "you can get its local size.";
@@ -877,18 +839,17 @@ bool ProblemImpl::HasParameterBlock(const double* parameter_block) const {
}
void ProblemImpl::GetParameterBlocks(vector<double*>* parameter_blocks) const {
- CHECK_NOTNULL(parameter_blocks);
+ CHECK(parameter_blocks != nullptr);
parameter_blocks->resize(0);
- for (ParameterMap::const_iterator it = parameter_block_map_.begin();
- it != parameter_block_map_.end();
- ++it) {
- parameter_blocks->push_back(it->first);
+ parameter_blocks->reserve(parameter_block_map_.size());
+ for (const auto& entry : parameter_block_map_) {
+ parameter_blocks->push_back(entry.first);
}
}
void ProblemImpl::GetResidualBlocks(
vector<ResidualBlockId>* residual_blocks) const {
- CHECK_NOTNULL(residual_blocks);
+ CHECK(residual_blocks != nullptr);
*residual_blocks = program().residual_blocks();
}
@@ -896,7 +857,8 @@ void ProblemImpl::GetParameterBlocksForResidualBlock(
const ResidualBlockId residual_block,
vector<double*>* parameter_blocks) const {
int num_parameter_blocks = residual_block->NumParameterBlocks();
- CHECK_NOTNULL(parameter_blocks)->resize(num_parameter_blocks);
+ CHECK(parameter_blocks != nullptr);
+ parameter_blocks->resize(num_parameter_blocks);
for (int i = 0; i < num_parameter_blocks; ++i) {
(*parameter_blocks)[i] =
residual_block->parameter_blocks()[i]->mutable_user_state();
@@ -914,11 +876,10 @@ const LossFunction* ProblemImpl::GetLossFunctionForResidualBlock(
}
void ProblemImpl::GetResidualBlocksForParameterBlock(
- const double* values,
- vector<ResidualBlockId>* residual_blocks) const {
- ParameterBlock* parameter_block =
- FindWithDefault(parameter_block_map_, const_cast<double*>(values), NULL);
- if (parameter_block == NULL) {
+ const double* values, vector<ResidualBlockId>* residual_blocks) const {
+ ParameterBlock* parameter_block = FindWithDefault(
+ parameter_block_map_, const_cast<double*>(values), nullptr);
+ if (parameter_block == nullptr) {
LOG(FATAL) << "Parameter block not found: " << values
<< ". You must add the parameter block to the problem before "
<< "you can get the residual blocks that depend on it.";
@@ -927,8 +888,8 @@ void ProblemImpl::GetResidualBlocksForParameterBlock(
if (options_.enable_fast_removal) {
// In this case the residual blocks that depend on the parameter block are
// stored in the parameter block already, so just copy them out.
- CHECK_NOTNULL(residual_blocks)->resize(
- parameter_block->mutable_residual_blocks()->size());
+ CHECK(residual_blocks != nullptr);
+ residual_blocks->resize(parameter_block->mutable_residual_blocks()->size());
std::copy(parameter_block->mutable_residual_blocks()->begin(),
parameter_block->mutable_residual_blocks()->end(),
residual_blocks->begin());
@@ -936,11 +897,11 @@ void ProblemImpl::GetResidualBlocksForParameterBlock(
}
// Find residual blocks that depend on the parameter block.
- CHECK_NOTNULL(residual_blocks)->clear();
+ CHECK(residual_blocks != nullptr);
+ residual_blocks->clear();
const int num_residual_blocks = NumResidualBlocks();
for (int i = 0; i < num_residual_blocks; ++i) {
- ResidualBlock* residual_block =
- (*(program_->mutable_residual_blocks()))[i];
+ ResidualBlock* residual_block = (*(program_->mutable_residual_blocks()))[i];
const int num_parameter_blocks = residual_block->NumParameterBlocks();
for (int j = 0; j < num_parameter_blocks; ++j) {
if (residual_block->parameter_blocks()[j] == parameter_block) {
diff --git a/extern/ceres/internal/ceres/problem_impl.h b/extern/ceres/internal/ceres/problem_impl.h
index a4689c362f6..8bbe7238d27 100644
--- a/extern/ceres/internal/ceres/problem_impl.h
+++ b/extern/ceres/internal/ceres/problem_impl.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -39,19 +39,21 @@
#ifndef CERES_PUBLIC_PROBLEM_IMPL_H_
#define CERES_PUBLIC_PROBLEM_IMPL_H_
+#include <array>
#include <map>
+#include <memory>
+#include <unordered_set>
#include <vector>
-#include "ceres/internal/macros.h"
+#include "ceres/context_impl.h"
#include "ceres/internal/port.h"
-#include "ceres/internal/scoped_ptr.h"
-#include "ceres/collections_port.h"
#include "ceres/problem.h"
#include "ceres/types.h"
namespace ceres {
class CostFunction;
+class EvaluationCallback;
class LossFunction;
class LocalParameterization;
struct CRSMatrix;
@@ -64,78 +66,55 @@ class ResidualBlock;
class ProblemImpl {
public:
typedef std::map<double*, ParameterBlock*> ParameterMap;
- typedef HashSet<ResidualBlock*> ResidualBlockSet;
+ typedef std::unordered_set<ResidualBlock*> ResidualBlockSet;
+ typedef std::map<CostFunction*, int> CostFunctionRefCount;
+ typedef std::map<LossFunction*, int> LossFunctionRefCount;
ProblemImpl();
explicit ProblemImpl(const Problem::Options& options);
+ ProblemImpl(const ProblemImpl&) = delete;
+ void operator=(const ProblemImpl&) = delete;
~ProblemImpl();
// See the public problem.h file for description of these methods.
- ResidualBlockId AddResidualBlock(
- CostFunction* cost_function,
- LossFunction* loss_function,
- const std::vector<double*>& parameter_blocks);
ResidualBlockId AddResidualBlock(CostFunction* cost_function,
LossFunction* loss_function,
- double* x0);
- ResidualBlockId AddResidualBlock(CostFunction* cost_function,
- LossFunction* loss_function,
- double* x0, double* x1);
- ResidualBlockId AddResidualBlock(CostFunction* cost_function,
- LossFunction* loss_function,
- double* x0, double* x1, double* x2);
- ResidualBlockId AddResidualBlock(CostFunction* cost_function,
- LossFunction* loss_function,
- double* x0, double* x1, double* x2,
- double* x3);
- ResidualBlockId AddResidualBlock(CostFunction* cost_function,
- LossFunction* loss_function,
- double* x0, double* x1, double* x2,
- double* x3, double* x4);
- ResidualBlockId AddResidualBlock(CostFunction* cost_function,
- LossFunction* loss_function,
- double* x0, double* x1, double* x2,
- double* x3, double* x4, double* x5);
- ResidualBlockId AddResidualBlock(CostFunction* cost_function,
- LossFunction* loss_function,
- double* x0, double* x1, double* x2,
- double* x3, double* x4, double* x5,
- double* x6);
- ResidualBlockId AddResidualBlock(CostFunction* cost_function,
- LossFunction* loss_function,
- double* x0, double* x1, double* x2,
- double* x3, double* x4, double* x5,
- double* x6, double* x7);
- ResidualBlockId AddResidualBlock(CostFunction* cost_function,
- LossFunction* loss_function,
- double* x0, double* x1, double* x2,
- double* x3, double* x4, double* x5,
- double* x6, double* x7, double* x8);
+ double* const* const parameter_blocks,
+ int num_parameter_blocks);
+
+ template <typename... Ts>
ResidualBlockId AddResidualBlock(CostFunction* cost_function,
LossFunction* loss_function,
- double* x0, double* x1, double* x2,
- double* x3, double* x4, double* x5,
- double* x6, double* x7, double* x8,
- double* x9);
+ double* x0,
+ Ts*... xs) {
+ const std::array<double*, sizeof...(Ts) + 1> parameter_blocks{{x0, xs...}};
+ return AddResidualBlock(cost_function,
+ loss_function,
+ parameter_blocks.data(),
+ static_cast<int>(parameter_blocks.size()));
+ }
+
void AddParameterBlock(double* values, int size);
void AddParameterBlock(double* values,
int size,
LocalParameterization* local_parameterization);
void RemoveResidualBlock(ResidualBlock* residual_block);
- void RemoveParameterBlock(double* values);
+ void RemoveParameterBlock(const double* values);
- void SetParameterBlockConstant(double* values);
+ void SetParameterBlockConstant(const double* values);
void SetParameterBlockVariable(double* values);
- bool IsParameterBlockConstant(double* values) const;
+ bool IsParameterBlockConstant(const double* values) const;
void SetParameterization(double* values,
LocalParameterization* local_parameterization);
- const LocalParameterization* GetParameterization(double* values) const;
+ const LocalParameterization* GetParameterization(const double* values) const;
void SetParameterLowerBound(double* values, int index, double lower_bound);
void SetParameterUpperBound(double* values, int index, double upper_bound);
+ double GetParameterLowerBound(const double* values, int index) const;
+ double GetParameterUpperBound(const double* values, int index) const;
bool Evaluate(const Problem::EvaluateOptions& options,
double* cost,
@@ -143,6 +122,12 @@ class ProblemImpl {
std::vector<double>* gradient,
CRSMatrix* jacobian);
+ bool EvaluateResidualBlock(ResidualBlock* residual_block,
+ bool apply_loss_function,
+ double* cost,
+ double* residuals,
+ double** jacobians) const;
+
int NumParameterBlocks() const;
int NumParameters() const;
int NumResidualBlocks() const;
@@ -179,20 +164,16 @@ class ProblemImpl {
return residual_block_set_;
}
+ ContextImpl* context() { return context_impl_; }
+
private:
ParameterBlock* InternalAddParameterBlock(double* values, int size);
void InternalRemoveResidualBlock(ResidualBlock* residual_block);
- bool InternalEvaluate(Program* program,
- double* cost,
- std::vector<double>* residuals,
- std::vector<double>* gradient,
- CRSMatrix* jacobian);
-
// Delete the arguments in question. These differ from the Remove* functions
// in that they do not clean up references to the block to delete; they
// merely delete them.
- template<typename Block>
+ template <typename Block>
void DeleteBlockInVector(std::vector<Block*>* mutable_blocks,
Block* block_to_remove);
void DeleteBlock(ResidualBlock* residual_block);
@@ -200,26 +181,32 @@ class ProblemImpl {
const Problem::Options options_;
+ bool context_impl_owned_;
+ ContextImpl* context_impl_;
+
// The mapping from user pointers to parameter blocks.
- std::map<double*, ParameterBlock*> parameter_block_map_;
+ ParameterMap parameter_block_map_;
// Iff enable_fast_removal is enabled, contains the current residual blocks.
ResidualBlockSet residual_block_set_;
// The actual parameter and residual blocks.
- internal::scoped_ptr<internal::Program> program_;
+ std::unique_ptr<internal::Program> program_;
- // When removing residual and parameter blocks, cost/loss functions and
- // parameterizations have ambiguous ownership. Instead of scanning the entire
- // problem to see if the cost/loss/parameterization is shared with other
- // residual or parameter blocks, buffer them until destruction.
+ // When removing parameter blocks, parameterizations have ambiguous
+ // ownership. Instead of scanning the entire problem to see if the
+ // parameterization is shared with other parameter blocks, buffer
+ // them until destruction.
//
// TODO(keir): See if it makes sense to use sets instead.
- std::vector<CostFunction*> cost_functions_to_delete_;
- std::vector<LossFunction*> loss_functions_to_delete_;
std::vector<LocalParameterization*> local_parameterizations_to_delete_;
- CERES_DISALLOW_COPY_AND_ASSIGN(ProblemImpl);
+ // For each cost function and loss function in the problem, a count
+ // of the number of residual blocks that refer to them. When the
+ // count goes to zero and the problem owns these objects, they are
+ // destroyed.
+ CostFunctionRefCount cost_function_ref_count_;
+ LossFunctionRefCount loss_function_ref_count_;
};
} // namespace internal
diff --git a/extern/ceres/internal/ceres/program.cc b/extern/ceres/internal/ceres/program.cc
index 8e97f072113..f1ded2e5d5a 100644
--- a/extern/ceres/internal/ceres/program.cc
+++ b/extern/ceres/internal/ceres/program.cc
@@ -30,8 +30,11 @@
#include "ceres/program.h"
+#include <algorithm>
#include <map>
+#include <memory>
#include <vector>
+
#include "ceres/array_utils.h"
#include "ceres/casts.h"
#include "ceres/compressed_row_sparse_matrix.h"
@@ -59,8 +62,8 @@ Program::Program() {}
Program::Program(const Program& program)
: parameter_blocks_(program.parameter_blocks_),
- residual_blocks_(program.residual_blocks_) {
-}
+ residual_blocks_(program.residual_blocks_),
+ evaluation_callback_(program.evaluation_callback_) {}
const vector<ParameterBlock*>& Program::parameter_blocks() const {
return parameter_blocks_;
@@ -78,7 +81,11 @@ vector<ResidualBlock*>* Program::mutable_residual_blocks() {
return &residual_blocks_;
}
-bool Program::StateVectorToParameterBlocks(const double *state) {
+EvaluationCallback* Program::mutable_evaluation_callback() {
+ return evaluation_callback_;
+}
+
+bool Program::StateVectorToParameterBlocks(const double* state) {
for (int i = 0; i < parameter_blocks_.size(); ++i) {
if (!parameter_blocks_[i]->IsConstant() &&
!parameter_blocks_[i]->SetState(state)) {
@@ -89,7 +96,7 @@ bool Program::StateVectorToParameterBlocks(const double *state) {
return true;
}
-void Program::ParameterBlocksToStateVector(double *state) const {
+void Program::ParameterBlocksToStateVector(double* state) const {
for (int i = 0; i < parameter_blocks_.size(); ++i) {
parameter_blocks_[i]->GetState(state);
state += parameter_blocks_[i]->Size();
@@ -178,7 +185,7 @@ bool Program::IsValid() const {
}
bool Program::ParameterBlocksAreFinite(string* message) const {
- CHECK_NOTNULL(message);
+ CHECK(message != nullptr);
for (int i = 0; i < parameter_blocks_.size(); ++i) {
const ParameterBlock* parameter_block = parameter_blocks_[i];
const double* array = parameter_block->user_state();
@@ -189,7 +196,9 @@ bool Program::ParameterBlocksAreFinite(string* message) const {
"ParameterBlock: %p with size %d has at least one invalid value.\n"
"First invalid value is at index: %d.\n"
"Parameter block values: ",
- array, size, invalid_index);
+ array,
+ size,
+ invalid_index);
AppendArrayToString(size, array, message);
return false;
}
@@ -217,7 +226,7 @@ bool Program::IsBoundsConstrained() const {
}
bool Program::IsFeasible(string* message) const {
- CHECK_NOTNULL(message);
+ CHECK(message != nullptr);
for (int i = 0; i < parameter_blocks_.size(); ++i) {
const ParameterBlock* parameter_block = parameter_blocks_[i];
const double* parameters = parameter_block->user_state();
@@ -236,7 +245,12 @@ bool Program::IsFeasible(string* message) const {
"\nFirst infeasible value is at index: %d."
"\nLower bound: %e, value: %e, upper bound: %e"
"\nParameter block values: ",
- parameters, size, j, lower_bound, parameters[j], upper_bound);
+ parameters,
+ size,
+ j,
+ lower_bound,
+ parameters[j],
+ upper_bound);
AppendArrayToString(size, parameters, message);
return false;
}
@@ -255,7 +269,11 @@ bool Program::IsFeasible(string* message) const {
"\nFirst infeasible bound is at index: %d."
"\nLower bound: %e, upper bound: %e"
"\nParameter block values: ",
- parameters, size, j, lower_bound, upper_bound);
+ parameters,
+ size,
+ j,
+ lower_bound,
+ upper_bound);
AppendArrayToString(size, parameters, message);
return false;
}
@@ -270,15 +288,14 @@ Program* Program::CreateReducedProgram(
vector<double*>* removed_parameter_blocks,
double* fixed_cost,
string* error) const {
- CHECK_NOTNULL(removed_parameter_blocks);
- CHECK_NOTNULL(fixed_cost);
- CHECK_NOTNULL(error);
-
- scoped_ptr<Program> reduced_program(new Program(*this));
- if (!reduced_program->RemoveFixedBlocks(removed_parameter_blocks,
- fixed_cost,
- error)) {
- return NULL;
+ CHECK(removed_parameter_blocks != nullptr);
+ CHECK(fixed_cost != nullptr);
+ CHECK(error != nullptr);
+
+ std::unique_ptr<Program> reduced_program(new Program(*this));
+ if (!reduced_program->RemoveFixedBlocks(
+ removed_parameter_blocks, fixed_cost, error)) {
+ return nullptr;
}
reduced_program->SetParameterOffsetsAndIndex();
@@ -288,15 +305,17 @@ Program* Program::CreateReducedProgram(
bool Program::RemoveFixedBlocks(vector<double*>* removed_parameter_blocks,
double* fixed_cost,
string* error) {
- CHECK_NOTNULL(removed_parameter_blocks);
- CHECK_NOTNULL(fixed_cost);
- CHECK_NOTNULL(error);
+ CHECK(removed_parameter_blocks != nullptr);
+ CHECK(fixed_cost != nullptr);
+ CHECK(error != nullptr);
- scoped_array<double> residual_block_evaluate_scratch;
+ std::unique_ptr<double[]> residual_block_evaluate_scratch;
residual_block_evaluate_scratch.reset(
new double[MaxScratchDoublesNeededForEvaluate()]);
*fixed_cost = 0.0;
+ bool need_to_call_prepare_for_evaluation = evaluation_callback_ != nullptr;
+
// Mark all the parameters as unused. Abuse the index member of the
// parameter blocks for the marking.
for (int i = 0; i < parameter_blocks_.size(); ++i) {
@@ -326,18 +345,45 @@ bool Program::RemoveFixedBlocks(vector<double*>* removed_parameter_blocks,
continue;
}
+ // This is an exceedingly rare case, where the user has residual
+ // blocks which are effectively constant but they are also
+ // performance sensitive enough to add an EvaluationCallback.
+ //
+ // In this case before we evaluate the cost of the constant
+ // residual blocks, we must call
+ // EvaluationCallback::PrepareForEvaluation(). Because this call
+ // can be costly, we only call this if we actually encounter a
+ // residual block with all constant parameter blocks.
+ //
+ // It is worth nothing that there is a minor inefficiency here,
+ // that the iteration 0 of TrustRegionMinimizer will also cause
+ // PrepareForEvaluation to be called on the same point, but with
+ // evaluate_jacobians = true. We could try and optimize this here,
+ // but given the rarity of this case, the additional complexity
+ // and long range dependency is not worth it.
+ if (need_to_call_prepare_for_evaluation) {
+ constexpr bool kNewPoint = true;
+ constexpr bool kDoNotEvaluateJacobians = false;
+ evaluation_callback_->PrepareForEvaluation(kDoNotEvaluateJacobians,
+ kNewPoint);
+ need_to_call_prepare_for_evaluation = false;
+ }
+
// The residual is constant and will be removed, so its cost is
// added to the variable fixed_cost.
double cost = 0.0;
if (!residual_block->Evaluate(true,
&cost,
- NULL,
- NULL,
+ nullptr,
+ nullptr,
residual_block_evaluate_scratch.get())) {
- *error = StringPrintf("Evaluation of the residual %d failed during "
- "removal of fixed residual blocks.", i);
+ *error = StringPrintf(
+ "Evaluation of the residual %d failed during "
+ "removal of fixed residual blocks.",
+ i);
return false;
}
+
*fixed_cost += cost;
}
residual_blocks_.resize(num_active_residual_blocks);
@@ -356,11 +402,9 @@ bool Program::RemoveFixedBlocks(vector<double*>* removed_parameter_blocks,
}
parameter_blocks_.resize(num_active_parameter_blocks);
- if (!(((NumResidualBlocks() == 0) &&
- (NumParameterBlocks() == 0)) ||
- ((NumResidualBlocks() != 0) &&
- (NumParameterBlocks() != 0)))) {
- *error = "Congratulations, you found a bug in Ceres. Please report it.";
+ if (!(((NumResidualBlocks() == 0) && (NumParameterBlocks() == 0)) ||
+ ((NumResidualBlocks() != 0) && (NumParameterBlocks() != 0)))) {
+ *error = "Congratulations, you found a bug in Ceres. Please report it.";
return false;
}
@@ -373,15 +417,13 @@ bool Program::IsParameterBlockSetIndependent(
// blocks in the same residual block are part of
// parameter_block_ptrs as that would violate the assumption that it
// is an independent set in the Hessian matrix.
- for (vector<ResidualBlock*>::const_iterator it = residual_blocks_.begin();
- it != residual_blocks_.end();
- ++it) {
- ParameterBlock* const* parameter_blocks = (*it)->parameter_blocks();
- const int num_parameter_blocks = (*it)->NumParameterBlocks();
+ for (const ResidualBlock* residual_block : residual_blocks_) {
+ ParameterBlock* const* parameter_blocks =
+ residual_block->parameter_blocks();
+ const int num_parameter_blocks = residual_block->NumParameterBlocks();
int count = 0;
for (int i = 0; i < num_parameter_blocks; ++i) {
- count += independent_set.count(
- parameter_blocks[i]->mutable_user_state());
+ count += independent_set.count(parameter_blocks[i]->mutable_user_state());
}
if (count > 1) {
return false;
@@ -390,18 +432,20 @@ bool Program::IsParameterBlockSetIndependent(
return true;
}
-TripletSparseMatrix* Program::CreateJacobianBlockSparsityTranspose() const {
+std::unique_ptr<TripletSparseMatrix>
+Program::CreateJacobianBlockSparsityTranspose(int start_residual_block) const {
// Matrix to store the block sparsity structure of the Jacobian.
- TripletSparseMatrix* tsm =
- new TripletSparseMatrix(NumParameterBlocks(),
- NumResidualBlocks(),
- 10 * NumResidualBlocks());
+ const int num_rows = NumParameterBlocks();
+ const int num_cols = NumResidualBlocks() - start_residual_block;
+
+ std::unique_ptr<TripletSparseMatrix> tsm(
+ new TripletSparseMatrix(num_rows, num_cols, 10 * num_cols));
int num_nonzeros = 0;
int* rows = tsm->mutable_rows();
int* cols = tsm->mutable_cols();
double* values = tsm->mutable_values();
- for (int c = 0; c < residual_blocks_.size(); ++c) {
+ for (int c = start_residual_block; c < residual_blocks_.size(); ++c) {
const ResidualBlock* residual_block = residual_blocks_[c];
const int num_parameter_blocks = residual_block->NumParameterBlocks();
ParameterBlock* const* parameter_blocks =
@@ -423,7 +467,7 @@ TripletSparseMatrix* Program::CreateJacobianBlockSparsityTranspose() const {
const int r = parameter_blocks[j]->index();
rows[num_nonzeros] = r;
- cols[num_nonzeros] = c;
+ cols[num_nonzeros] = c - start_residual_block;
values[num_nonzeros] = 1.0;
++num_nonzeros;
}
@@ -433,13 +477,9 @@ TripletSparseMatrix* Program::CreateJacobianBlockSparsityTranspose() const {
return tsm;
}
-int Program::NumResidualBlocks() const {
- return residual_blocks_.size();
-}
+int Program::NumResidualBlocks() const { return residual_blocks_.size(); }
-int Program::NumParameterBlocks() const {
- return parameter_blocks_.size();
-}
+int Program::NumParameterBlocks() const { return parameter_blocks_.size(); }
int Program::NumResiduals() const {
int num_residuals = 0;
@@ -465,6 +505,9 @@ int Program::NumEffectiveParameters() const {
return num_parameters;
}
+// TODO(sameeragarwal): The following methods should just be updated
+// incrementally and the values cached, rather than the linear
+// complexity we have right now on every call.
int Program::MaxScratchDoublesNeededForEvaluate() const {
// Compute the scratch space needed for evaluate.
int max_scratch_bytes_for_evaluate = 0;
@@ -494,8 +537,8 @@ int Program::MaxDerivativesPerResidualBlock() const {
int Program::MaxParametersPerResidualBlock() const {
int max_parameters = 0;
for (int i = 0; i < residual_blocks_.size(); ++i) {
- max_parameters = max(max_parameters,
- residual_blocks_[i]->NumParameterBlocks());
+ max_parameters =
+ max(max_parameters, residual_blocks_[i]->NumParameterBlocks());
}
return max_parameters;
}
@@ -514,8 +557,8 @@ string Program::ToString() const {
ret += StringPrintf("Number of parameters: %d\n", NumParameters());
ret += "Parameters:\n";
for (int i = 0; i < parameter_blocks_.size(); ++i) {
- ret += StringPrintf("%d: %s\n",
- i, parameter_blocks_[i]->ToString().c_str());
+ ret +=
+ StringPrintf("%d: %s\n", i, parameter_blocks_[i]->ToString().c_str());
}
return ret;
}
diff --git a/extern/ceres/internal/ceres/program.h b/extern/ceres/internal/ceres/program.h
index 38c958fe34a..797129980e3 100644
--- a/extern/ceres/internal/ceres/program.h
+++ b/extern/ceres/internal/ceres/program.h
@@ -31,10 +31,13 @@
#ifndef CERES_INTERNAL_PROGRAM_H_
#define CERES_INTERNAL_PROGRAM_H_
+#include <memory>
#include <set>
#include <string>
#include <vector>
+
#include "ceres/internal/port.h"
+#include "ceres/evaluation_callback.h"
namespace ceres {
namespace internal {
@@ -64,6 +67,7 @@ class Program {
const std::vector<ResidualBlock*>& residual_blocks() const;
std::vector<ParameterBlock*>* mutable_parameter_blocks();
std::vector<ResidualBlock*>* mutable_residual_blocks();
+ EvaluationCallback* mutable_evaluation_callback();
// Serialize to/from the program and update states.
//
@@ -71,8 +75,8 @@ class Program {
// computation of the Jacobian of its local parameterization. If
// this computation fails for some reason, then this method returns
// false and the state of the parameter blocks cannot be trusted.
- bool StateVectorToParameterBlocks(const double *state);
- void ParameterBlocksToStateVector(double *state) const;
+ bool StateVectorToParameterBlocks(const double* state);
+ void ParameterBlocksToStateVector(double* state) const;
// Copy internal state to the user's parameters.
void CopyParameterBlockStateToUserState();
@@ -127,8 +131,10 @@ class Program {
// structure corresponding to the block sparsity of the transpose of
// the Jacobian matrix.
//
- // Caller owns the result.
- TripletSparseMatrix* CreateJacobianBlockSparsityTranspose() const;
+ // start_residual_block which allows the user to ignore the first
+ // start_residual_block residuals.
+ std::unique_ptr<TripletSparseMatrix> CreateJacobianBlockSparsityTranspose(
+ int start_residual_block = 0) const;
// Create a copy of this program and removes constant parameter
// blocks and residual blocks with no varying parameter blocks while
@@ -182,6 +188,7 @@ class Program {
// The Program does not own the ParameterBlock or ResidualBlock objects.
std::vector<ParameterBlock*> parameter_blocks_;
std::vector<ResidualBlock*> residual_blocks_;
+ EvaluationCallback* evaluation_callback_ = nullptr;
friend class ProblemImpl;
};
diff --git a/extern/ceres/internal/ceres/program_evaluator.h b/extern/ceres/internal/ceres/program_evaluator.h
index 74a812adeef..97ee590fbab 100644
--- a/extern/ceres/internal/ceres/program_evaluator.h
+++ b/extern/ceres/internal/ceres/program_evaluator.h
@@ -43,7 +43,7 @@
// residual jacobians are written directly into their final position in the
// block sparse matrix by the user's CostFunction; there is no copying.
//
-// The evaluation is threaded with OpenMP.
+// The evaluation is threaded with OpenMP or C++ threads.
//
// The EvaluatePreparer and JacobianWriter interfaces are as follows:
//
@@ -82,16 +82,16 @@
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
-#ifdef CERES_USE_OPENMP
-#include <omp.h>
-#endif
-
+#include <atomic>
#include <map>
+#include <memory>
#include <string>
#include <vector>
+
+#include "ceres/evaluation_callback.h"
#include "ceres/execution_summary.h"
#include "ceres/internal/eigen.h"
-#include "ceres/internal/scoped_ptr.h"
+#include "ceres/parallel_for.h"
#include "ceres/parameter_block.h"
#include "ceres/program.h"
#include "ceres/residual_block.h"
@@ -104,34 +104,33 @@ struct NullJacobianFinalizer {
void operator()(SparseMatrix* jacobian, int num_parameters) {}
};
-template<typename EvaluatePreparer,
- typename JacobianWriter,
- typename JacobianFinalizer = NullJacobianFinalizer>
+template <typename EvaluatePreparer,
+ typename JacobianWriter,
+ typename JacobianFinalizer = NullJacobianFinalizer>
class ProgramEvaluator : public Evaluator {
public:
- ProgramEvaluator(const Evaluator::Options &options, Program* program)
+ ProgramEvaluator(const Evaluator::Options& options, Program* program)
: options_(options),
program_(program),
jacobian_writer_(options, program),
evaluate_preparers_(
jacobian_writer_.CreateEvaluatePreparers(options.num_threads)) {
-#ifndef CERES_USE_OPENMP
+#ifdef CERES_NO_THREADS
if (options_.num_threads > 1) {
- LOG(WARNING)
- << "OpenMP support is not compiled into this binary; "
- << "only options.num_threads = 1 is supported. Switching "
- << "to single threaded mode.";
+ LOG(WARNING) << "No threading support is compiled into this binary; "
+ << "only options.num_threads = 1 is supported. Switching "
+ << "to single threaded mode.";
options_.num_threads = 1;
}
-#endif
+#endif // CERES_NO_THREADS
BuildResidualLayout(*program, &residual_layout_);
- evaluate_scratch_.reset(CreateEvaluatorScratch(*program,
- options.num_threads));
+ evaluate_scratch_.reset(
+ CreateEvaluatorScratch(*program, options.num_threads));
}
// Implementation of Evaluator interface.
- SparseMatrix* CreateJacobian() const {
+ SparseMatrix* CreateJacobian() const final {
return jacobian_writer_.CreateJacobian();
}
@@ -140,133 +139,133 @@ class ProgramEvaluator : public Evaluator {
double* cost,
double* residuals,
double* gradient,
- SparseMatrix* jacobian) {
+ SparseMatrix* jacobian) final {
ScopedExecutionTimer total_timer("Evaluator::Total", &execution_summary_);
- ScopedExecutionTimer call_type_timer(gradient == NULL && jacobian == NULL
- ? "Evaluator::Residual"
- : "Evaluator::Jacobian",
- &execution_summary_);
+ ScopedExecutionTimer call_type_timer(
+ gradient == nullptr && jacobian == nullptr ? "Evaluator::Residual"
+ : "Evaluator::Jacobian",
+ &execution_summary_);
// The parameters are stateful, so set the state before evaluating.
if (!program_->StateVectorToParameterBlocks(state)) {
return false;
}
- if (residuals != NULL) {
+ // Notify the user about a new evaluation point if they are interested.
+ if (options_.evaluation_callback != nullptr) {
+ program_->CopyParameterBlockStateToUserState();
+ options_.evaluation_callback->PrepareForEvaluation(
+ /*jacobians=*/(gradient != nullptr || jacobian != nullptr),
+ evaluate_options.new_evaluation_point);
+ }
+
+ if (residuals != nullptr) {
VectorRef(residuals, program_->NumResiduals()).setZero();
}
- if (jacobian != NULL) {
+ if (jacobian != nullptr) {
jacobian->SetZero();
}
// Each thread gets it's own cost and evaluate scratch space.
for (int i = 0; i < options_.num_threads; ++i) {
evaluate_scratch_[i].cost = 0.0;
- if (gradient != NULL) {
+ if (gradient != nullptr) {
VectorRef(evaluate_scratch_[i].gradient.get(),
- program_->NumEffectiveParameters()).setZero();
+ program_->NumEffectiveParameters())
+ .setZero();
}
}
- // This bool is used to disable the loop if an error is encountered
- // without breaking out of it. The remaining loop iterations are still run,
- // but with an empty body, and so will finish quickly.
- bool abort = false;
- int num_residual_blocks = program_->NumResidualBlocks();
-#pragma omp parallel for num_threads(options_.num_threads)
- for (int i = 0; i < num_residual_blocks; ++i) {
-// Disable the loop instead of breaking, as required by OpenMP.
-#pragma omp flush(abort)
- if (abort) {
- continue;
- }
+ const int num_residual_blocks = program_->NumResidualBlocks();
+ // This bool is used to disable the loop if an error is encountered without
+ // breaking out of it. The remaining loop iterations are still run, but with
+ // an empty body, and so will finish quickly.
+ std::atomic_bool abort(false);
+ ParallelFor(
+ options_.context,
+ 0,
+ num_residual_blocks,
+ options_.num_threads,
+ [&](int thread_id, int i) {
+ if (abort) {
+ return;
+ }
-#ifdef CERES_USE_OPENMP
- int thread_id = omp_get_thread_num();
-#else
- int thread_id = 0;
-#endif
- EvaluatePreparer* preparer = &evaluate_preparers_[thread_id];
- EvaluateScratch* scratch = &evaluate_scratch_[thread_id];
-
- // Prepare block residuals if requested.
- const ResidualBlock* residual_block = program_->residual_blocks()[i];
- double* block_residuals = NULL;
- if (residuals != NULL) {
- block_residuals = residuals + residual_layout_[i];
- } else if (gradient != NULL) {
- block_residuals = scratch->residual_block_residuals.get();
- }
+ EvaluatePreparer* preparer = &evaluate_preparers_[thread_id];
+ EvaluateScratch* scratch = &evaluate_scratch_[thread_id];
- // Prepare block jacobians if requested.
- double** block_jacobians = NULL;
- if (jacobian != NULL || gradient != NULL) {
- preparer->Prepare(residual_block,
- i,
- jacobian,
- scratch->jacobian_block_ptrs.get());
- block_jacobians = scratch->jacobian_block_ptrs.get();
- }
+ // Prepare block residuals if requested.
+ const ResidualBlock* residual_block = program_->residual_blocks()[i];
+ double* block_residuals = nullptr;
+ if (residuals != nullptr) {
+ block_residuals = residuals + residual_layout_[i];
+ } else if (gradient != nullptr) {
+ block_residuals = scratch->residual_block_residuals.get();
+ }
- // Evaluate the cost, residuals, and jacobians.
- double block_cost;
- if (!residual_block->Evaluate(
- evaluate_options.apply_loss_function,
- &block_cost,
- block_residuals,
- block_jacobians,
- scratch->residual_block_evaluate_scratch.get())) {
- abort = true;
-// This ensures that the OpenMP threads have a consistent view of 'abort'. Do
-// the flush inside the failure case so that there is usually only one
-// synchronization point per loop iteration instead of two.
-#pragma omp flush(abort)
- continue;
- }
+ // Prepare block jacobians if requested.
+ double** block_jacobians = nullptr;
+ if (jacobian != nullptr || gradient != nullptr) {
+ preparer->Prepare(residual_block,
+ i,
+ jacobian,
+ scratch->jacobian_block_ptrs.get());
+ block_jacobians = scratch->jacobian_block_ptrs.get();
+ }
- scratch->cost += block_cost;
+ // Evaluate the cost, residuals, and jacobians.
+ double block_cost;
+ if (!residual_block->Evaluate(
+ evaluate_options.apply_loss_function,
+ &block_cost,
+ block_residuals,
+ block_jacobians,
+ scratch->residual_block_evaluate_scratch.get())) {
+ abort = true;
+ return;
+ }
- // Store the jacobians, if they were requested.
- if (jacobian != NULL) {
- jacobian_writer_.Write(i,
- residual_layout_[i],
- block_jacobians,
- jacobian);
- }
+ scratch->cost += block_cost;
- // Compute and store the gradient, if it was requested.
- if (gradient != NULL) {
- int num_residuals = residual_block->NumResiduals();
- int num_parameter_blocks = residual_block->NumParameterBlocks();
- for (int j = 0; j < num_parameter_blocks; ++j) {
- const ParameterBlock* parameter_block =
- residual_block->parameter_blocks()[j];
- if (parameter_block->IsConstant()) {
- continue;
+ // Store the jacobians, if they were requested.
+ if (jacobian != nullptr) {
+ jacobian_writer_.Write(
+ i, residual_layout_[i], block_jacobians, jacobian);
}
- MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
- block_jacobians[j],
- num_residuals,
- parameter_block->LocalSize(),
- block_residuals,
- scratch->gradient.get() + parameter_block->delta_offset());
- }
- }
- }
+ // Compute and store the gradient, if it was requested.
+ if (gradient != nullptr) {
+ int num_residuals = residual_block->NumResiduals();
+ int num_parameter_blocks = residual_block->NumParameterBlocks();
+ for (int j = 0; j < num_parameter_blocks; ++j) {
+ const ParameterBlock* parameter_block =
+ residual_block->parameter_blocks()[j];
+ if (parameter_block->IsConstant()) {
+ continue;
+ }
+
+ MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>(
+ block_jacobians[j],
+ num_residuals,
+ parameter_block->LocalSize(),
+ block_residuals,
+ scratch->gradient.get() + parameter_block->delta_offset());
+ }
+ }
+ });
if (!abort) {
const int num_parameters = program_->NumEffectiveParameters();
// Sum the cost and gradient (if requested) from each thread.
(*cost) = 0.0;
- if (gradient != NULL) {
+ if (gradient != nullptr) {
VectorRef(gradient, num_parameters).setZero();
}
for (int i = 0; i < options_.num_threads; ++i) {
(*cost) += evaluate_scratch_[i].cost;
- if (gradient != NULL) {
+ if (gradient != nullptr) {
VectorRef(gradient, num_parameters) +=
VectorRef(evaluate_scratch_[i].gradient.get(), num_parameters);
}
@@ -276,7 +275,7 @@ class ProgramEvaluator : public Evaluator {
// `num_parameters` is passed to the finalizer so that additional
// storage can be reserved for additional diagonal elements if
// necessary.
- if (jacobian != NULL) {
+ if (jacobian != nullptr) {
JacobianFinalizer f;
f(jacobian, num_parameters);
}
@@ -286,27 +285,19 @@ class ProgramEvaluator : public Evaluator {
bool Plus(const double* state,
const double* delta,
- double* state_plus_delta) const {
+ double* state_plus_delta) const final {
return program_->Plus(state, delta, state_plus_delta);
}
- int NumParameters() const {
- return program_->NumParameters();
- }
- int NumEffectiveParameters() const {
+ int NumParameters() const final { return program_->NumParameters(); }
+ int NumEffectiveParameters() const final {
return program_->NumEffectiveParameters();
}
- int NumResiduals() const {
- return program_->NumResiduals();
- }
-
- virtual std::map<std::string, int> CallStatistics() const {
- return execution_summary_.calls();
- }
+ int NumResiduals() const final { return program_->NumResiduals(); }
- virtual std::map<std::string, double> TimeStatistics() const {
- return execution_summary_.times();
+ std::map<std::string, CallStatistics> Statistics() const final {
+ return execution_summary_.statistics();
}
private:
@@ -322,17 +313,16 @@ class ProgramEvaluator : public Evaluator {
VectorRef(gradient.get(), num_parameters).setZero();
residual_block_residuals.reset(
new double[max_residuals_per_residual_block]);
- jacobian_block_ptrs.reset(
- new double*[max_parameters_per_residual_block]);
+ jacobian_block_ptrs.reset(new double*[max_parameters_per_residual_block]);
}
double cost;
- scoped_array<double> residual_block_evaluate_scratch;
+ std::unique_ptr<double[]> residual_block_evaluate_scratch;
// The gradient in the local parameterization.
- scoped_array<double> gradient;
+ std::unique_ptr<double[]> gradient;
// Enough space to store the residual for the largest residual block.
- scoped_array<double> residual_block_residuals;
- scoped_array<double*> jacobian_block_ptrs;
+ std::unique_ptr<double[]> residual_block_residuals;
+ std::unique_ptr<double*[]> jacobian_block_ptrs;
};
static void BuildResidualLayout(const Program& program,
@@ -372,8 +362,8 @@ class ProgramEvaluator : public Evaluator {
Evaluator::Options options_;
Program* program_;
JacobianWriter jacobian_writer_;
- scoped_array<EvaluatePreparer> evaluate_preparers_;
- scoped_array<EvaluateScratch> evaluate_scratch_;
+ std::unique_ptr<EvaluatePreparer[]> evaluate_preparers_;
+ std::unique_ptr<EvaluateScratch[]> evaluate_scratch_;
std::vector<int> residual_layout_;
::ceres::internal::ExecutionSummary execution_summary_;
};
diff --git a/extern/ceres/internal/ceres/random.h b/extern/ceres/internal/ceres/random.h
index 2a025600609..87d9d77d90d 100644
--- a/extern/ceres/internal/ceres/random.h
+++ b/extern/ceres/internal/ceres/random.h
@@ -43,7 +43,11 @@ inline void SetRandomState(int state) {
}
inline int Uniform(int n) {
- return rand() % n;
+ if (n) {
+ return rand() % n;
+ } else {
+ return 0;
+ }
}
inline double RandDouble() {
diff --git a/extern/ceres/internal/ceres/reorder_program.cc b/extern/ceres/internal/ceres/reorder_program.cc
index a7c37107591..aa6032a9e9e 100644
--- a/extern/ceres/internal/ceres/reorder_program.cc
+++ b/extern/ceres/internal/ceres/reorder_program.cc
@@ -31,6 +31,7 @@
#include "ceres/reorder_program.h"
#include <algorithm>
+#include <memory>
#include <numeric>
#include <vector>
@@ -84,7 +85,7 @@ static int MinParameterBlock(const ResidualBlock* residual_block,
return min_parameter_block_position;
}
-#if EIGEN_VERSION_AT_LEAST(3, 2, 2) && defined(CERES_USE_EIGEN_SPARSE)
+#if defined(CERES_USE_EIGEN_SPARSE)
Eigen::SparseMatrix<int> CreateBlockJacobian(
const TripletSparseMatrix& block_jacobian_transpose) {
typedef Eigen::SparseMatrix<int> SparseMatrix;
@@ -178,7 +179,6 @@ void OrderingForSparseNormalCholeskyUsingCXSparse(
}
-#if EIGEN_VERSION_AT_LEAST(3, 2, 2)
void OrderingForSparseNormalCholeskyUsingEigenSparse(
const TripletSparseMatrix& tsm_block_jacobian_transpose,
int* ordering) {
@@ -211,7 +211,6 @@ void OrderingForSparseNormalCholeskyUsingEigenSparse(
}
#endif // CERES_USE_EIGEN_SPARSE
}
-#endif
} // namespace
@@ -233,24 +232,19 @@ bool ApplyOrdering(const ProblemImpl::ParameterMap& parameter_map,
program->mutable_parameter_blocks();
parameter_blocks->clear();
- const map<int, set<double*> >& groups = ordering.group_to_elements();
- for (map<int, set<double*> >::const_iterator group_it = groups.begin();
- group_it != groups.end();
- ++group_it) {
- const set<double*>& group = group_it->second;
- for (set<double*>::const_iterator parameter_block_ptr_it = group.begin();
- parameter_block_ptr_it != group.end();
- ++parameter_block_ptr_it) {
- ProblemImpl::ParameterMap::const_iterator parameter_block_it =
- parameter_map.find(*parameter_block_ptr_it);
- if (parameter_block_it == parameter_map.end()) {
+ const map<int, set<double*>>& groups = ordering.group_to_elements();
+ for (const auto& p : groups) {
+ const set<double*>& group = p.second;
+ for (double* parameter_block_ptr : group) {
+ auto it = parameter_map.find(parameter_block_ptr);
+ if (it == parameter_map.end()) {
*error = StringPrintf("User specified ordering contains a pointer "
"to a double that is not a parameter block in "
"the problem. The invalid double is in group: %d",
- group_it->first);
+ p.first);
return false;
}
- parameter_blocks->push_back(parameter_block_it->second);
+ parameter_blocks->push_back(it->second);
}
}
return true;
@@ -339,7 +333,7 @@ bool LexicographicallyOrderResidualBlocks(
// Pre-order the columns corresponding to the schur complement if
// possible.
-void MaybeReorderSchurComplementColumnsUsingSuiteSparse(
+static void MaybeReorderSchurComplementColumnsUsingSuiteSparse(
const ParameterBlockOrdering& parameter_block_ordering,
Program* program) {
#ifndef CERES_NO_SUITESPARSE
@@ -364,7 +358,7 @@ void MaybeReorderSchurComplementColumnsUsingSuiteSparse(
MapValuesToContiguousRange(constraints.size(), &constraints[0]);
// Compute a block sparse presentation of J'.
- scoped_ptr<TripletSparseMatrix> tsm_block_jacobian_transpose(
+ std::unique_ptr<TripletSparseMatrix> tsm_block_jacobian_transpose(
program->CreateJacobianBlockSparsityTranspose());
cholmod_sparse* block_jacobian_transpose =
@@ -385,15 +379,12 @@ void MaybeReorderSchurComplementColumnsUsingSuiteSparse(
#endif
}
-void MaybeReorderSchurComplementColumnsUsingEigen(
+static void MaybeReorderSchurComplementColumnsUsingEigen(
const int size_of_first_elimination_group,
const ProblemImpl::ParameterMap& parameter_map,
Program* program) {
-#if !EIGEN_VERSION_AT_LEAST(3, 2, 2) || !defined(CERES_USE_EIGEN_SPARSE)
- return;
-#else
-
- scoped_ptr<TripletSparseMatrix> tsm_block_jacobian_transpose(
+#if defined(CERES_USE_EIGEN_SPARSE)
+ std::unique_ptr<TripletSparseMatrix> tsm_block_jacobian_transpose(
program->CreateJacobianBlockSparsityTranspose());
typedef Eigen::SparseMatrix<int> SparseMatrix;
@@ -531,18 +522,14 @@ bool ReorderProgramForSchurTypeLinearSolver(
// Schur type solvers also require that their residual blocks be
// lexicographically ordered.
- if (!LexicographicallyOrderResidualBlocks(size_of_first_elimination_group,
- program,
- error)) {
- return false;
- }
-
- return true;
+ return LexicographicallyOrderResidualBlocks(
+ size_of_first_elimination_group, program, error);
}
-bool ReorderProgramForSparseNormalCholesky(
+bool ReorderProgramForSparseCholesky(
const SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
const ParameterBlockOrdering& parameter_block_ordering,
+ int start_row_block,
Program* program,
string* error) {
if (parameter_block_ordering.NumElements() != program->NumParameterBlocks()) {
@@ -555,8 +542,8 @@ bool ReorderProgramForSparseNormalCholesky(
}
// Compute a block sparse presentation of J'.
- scoped_ptr<TripletSparseMatrix> tsm_block_jacobian_transpose(
- program->CreateJacobianBlockSparsityTranspose());
+ std::unique_ptr<TripletSparseMatrix> tsm_block_jacobian_transpose(
+ program->CreateJacobianBlockSparsityTranspose(start_row_block));
vector<int> ordering(program->NumParameterBlocks(), 0);
vector<ParameterBlock*>& parameter_blocks =
@@ -572,19 +559,19 @@ bool ReorderProgramForSparseNormalCholesky(
OrderingForSparseNormalCholeskyUsingCXSparse(
*tsm_block_jacobian_transpose,
&ordering[0]);
+ } else if (sparse_linear_algebra_library_type == ACCELERATE_SPARSE) {
+ // Accelerate does not provide a function to perform reordering without
+ // performing a full symbolic factorisation. As such, we have nothing
+ // to gain from trying to reorder the problem here, as it will happen
+ // in AppleAccelerateCholesky::Factorize() (once) and reordering here
+ // would involve performing two symbolic factorisations instead of one
+ // which would have a negative overall impact on performance.
+ return true;
+
} else if (sparse_linear_algebra_library_type == EIGEN_SPARSE) {
-#if EIGEN_VERSION_AT_LEAST(3, 2, 2)
- OrderingForSparseNormalCholeskyUsingEigenSparse(
+ OrderingForSparseNormalCholeskyUsingEigenSparse(
*tsm_block_jacobian_transpose,
&ordering[0]);
-#else
- // For Eigen versions less than 3.2.2, there is nothing to do as
- // older versions of Eigen do not expose a method for doing
- // symbolic analysis on pre-ordered matrices, so a block
- // pre-ordering is a bit pointless.
-
- return true;
-#endif
}
// Apply ordering.
@@ -597,5 +584,17 @@ bool ReorderProgramForSparseNormalCholesky(
return true;
}
+int ReorderResidualBlocksByPartition(
+ const std::unordered_set<ResidualBlockId>& bottom_residual_blocks,
+ Program* program) {
+ auto residual_blocks = program->mutable_residual_blocks();
+ auto it = std::partition(
+ residual_blocks->begin(), residual_blocks->end(),
+ [&bottom_residual_blocks](ResidualBlock* r) {
+ return bottom_residual_blocks.count(r) == 0;
+ });
+ return it - residual_blocks->begin();
+}
+
} // namespace internal
} // namespace ceres
diff --git a/extern/ceres/internal/ceres/reorder_program.h b/extern/ceres/internal/ceres/reorder_program.h
index 36e5d1637a9..88cbee3af21 100644
--- a/extern/ceres/internal/ceres/reorder_program.h
+++ b/extern/ceres/internal/ceres/reorder_program.h
@@ -89,12 +89,27 @@ bool ReorderProgramForSchurTypeLinearSolver(
// fill-reducing ordering is available in the sparse linear algebra
// library (SuiteSparse version >= 4.2.0) then the fill reducing
// ordering will take it into account, otherwise it will be ignored.
-bool ReorderProgramForSparseNormalCholesky(
+bool ReorderProgramForSparseCholesky(
SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type,
const ParameterBlockOrdering& parameter_block_ordering,
+ int start_row_block,
Program* program,
std::string* error);
+// Reorder the residual blocks in the program so that all the residual
+// blocks in bottom_residual_blocks are at the bottom. The return
+// value is the number of residual blocks in the program in "top" part
+// of the Program, i.e., the ones not included in
+// bottom_residual_blocks.
+//
+// This number can be different from program->NumResidualBlocks() -
+// bottom_residual_blocks.size() because we allow
+// bottom_residual_blocks to contain residual blocks not present in
+// the Program.
+int ReorderResidualBlocksByPartition(
+ const std::unordered_set<ResidualBlockId>& bottom_residual_blocks,
+ Program* program);
+
} // namespace internal
} // namespace ceres
diff --git a/extern/ceres/internal/ceres/residual_block.cc b/extern/ceres/internal/ceres/residual_block.cc
index 9a123cf132e..0bf30bcf446 100644
--- a/extern/ceres/internal/ceres/residual_block.cc
+++ b/extern/ceres/internal/ceres/residual_block.cc
@@ -35,13 +35,13 @@
#include <cstddef>
#include <vector>
#include "ceres/corrector.h"
-#include "ceres/parameter_block.h"
-#include "ceres/residual_block_utils.h"
#include "ceres/cost_function.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/fixed_array.h"
#include "ceres/local_parameterization.h"
#include "ceres/loss_function.h"
+#include "ceres/parameter_block.h"
+#include "ceres/residual_block_utils.h"
#include "ceres/small_blas.h"
using Eigen::Dynamic;
@@ -50,16 +50,14 @@ namespace ceres {
namespace internal {
ResidualBlock::ResidualBlock(
- const CostFunction* cost_function,
- const LossFunction* loss_function,
- const std::vector<ParameterBlock*>& parameter_blocks,
- int index)
+ const CostFunction* cost_function, const LossFunction* loss_function,
+ const std::vector<ParameterBlock*>& parameter_blocks, int index)
: cost_function_(cost_function),
loss_function_(loss_function),
parameter_blocks_(
- new ParameterBlock* [
- cost_function->parameter_block_sizes().size()]),
+ new ParameterBlock*[cost_function->parameter_block_sizes().size()]),
index_(index) {
+ CHECK(cost_function_ != nullptr);
std::copy(parameter_blocks.begin(),
parameter_blocks.end(),
parameter_blocks_.get());
@@ -82,11 +80,11 @@ bool ResidualBlock::Evaluate(const bool apply_loss_function,
// Put pointers into the scratch space into global_jacobians as appropriate.
FixedArray<double*, 8> global_jacobians(num_parameter_blocks);
- if (jacobians != NULL) {
+ if (jacobians != nullptr) {
for (int i = 0; i < num_parameter_blocks; ++i) {
const ParameterBlock* parameter_block = parameter_blocks_[i];
- if (jacobians[i] != NULL &&
- parameter_block->LocalParameterizationJacobian() != NULL) {
+ if (jacobians[i] != nullptr &&
+ parameter_block->LocalParameterizationJacobian() != nullptr) {
global_jacobians[i] = scratch;
scratch += num_residuals * parameter_block->Size();
} else {
@@ -96,7 +94,7 @@ bool ResidualBlock::Evaluate(const bool apply_loss_function,
}
// If the caller didn't request residuals, use the scratch space for them.
- bool outputting_residuals = (residuals != NULL);
+ bool outputting_residuals = (residuals != nullptr);
if (!outputting_residuals) {
residuals = scratch;
}
@@ -104,16 +102,17 @@ bool ResidualBlock::Evaluate(const bool apply_loss_function,
// Invalidate the evaluation buffers so that we can check them after
// the CostFunction::Evaluate call, to see if all the return values
// that were required were written to and that they are finite.
- double** eval_jacobians = (jacobians != NULL) ? global_jacobians.get() : NULL;
+ double** eval_jacobians =
+ (jacobians != nullptr) ? global_jacobians.data() : nullptr;
InvalidateEvaluation(*this, cost, residuals, eval_jacobians);
- if (!cost_function_->Evaluate(parameters.get(), residuals, eval_jacobians)) {
+ if (!cost_function_->Evaluate(parameters.data(), residuals, eval_jacobians)) {
return false;
}
if (!IsEvaluationValid(*this,
- parameters.get(),
+ parameters.data(),
cost,
residuals,
eval_jacobians)) {
@@ -124,7 +123,7 @@ bool ResidualBlock::Evaluate(const bool apply_loss_function,
"residual and jacobians that were requested or there was a non-finite value (nan/infinite)\n" // NOLINT
"generated during the or jacobian computation. \n\n" +
EvaluationToString(*this,
- parameters.get(),
+ parameters.data(),
cost,
residuals,
eval_jacobians);
@@ -135,13 +134,13 @@ bool ResidualBlock::Evaluate(const bool apply_loss_function,
double squared_norm = VectorRef(residuals, num_residuals).squaredNorm();
// Update the jacobians with the local parameterizations.
- if (jacobians != NULL) {
+ if (jacobians != nullptr) {
for (int i = 0; i < num_parameter_blocks; ++i) {
- if (jacobians[i] != NULL) {
+ if (jacobians[i] != nullptr) {
const ParameterBlock* parameter_block = parameter_blocks_[i];
// Apply local reparameterization to the jacobians.
- if (parameter_block->LocalParameterizationJacobian() != NULL) {
+ if (parameter_block->LocalParameterizationJacobian() != nullptr) {
// jacobians[i] = global_jacobians[i] * global_to_local_jacobian.
MatrixMatrixMultiply<Dynamic, Dynamic, Dynamic, Dynamic, 0>(
global_jacobians[i],
@@ -156,7 +155,7 @@ bool ResidualBlock::Evaluate(const bool apply_loss_function,
}
}
- if (loss_function_ == NULL || !apply_loss_function) {
+ if (loss_function_ == nullptr || !apply_loss_function) {
*cost = 0.5 * squared_norm;
return true;
}
@@ -167,16 +166,16 @@ bool ResidualBlock::Evaluate(const bool apply_loss_function,
// No jacobians and not outputting residuals? All done. Doing an early exit
// here avoids constructing the "Corrector" object below in a common case.
- if (jacobians == NULL && !outputting_residuals) {
+ if (jacobians == nullptr && !outputting_residuals) {
return true;
}
// Correct for the effects of the loss function. The jacobians need to be
// corrected before the residuals, since they use the uncorrected residuals.
Corrector correct(squared_norm, rho);
- if (jacobians != NULL) {
+ if (jacobians != nullptr) {
for (int i = 0; i < num_parameter_blocks; ++i) {
- if (jacobians[i] != NULL) {
+ if (jacobians[i] != nullptr) {
const ParameterBlock* parameter_block = parameter_blocks_[i];
// Correct the jacobians for the loss function.
@@ -206,8 +205,7 @@ int ResidualBlock::NumScratchDoublesForEvaluate() const {
int scratch_doubles = 1;
for (int i = 0; i < num_parameters; ++i) {
const ParameterBlock* parameter_block = parameter_blocks_[i];
- if (!parameter_block->IsConstant() &&
- parameter_block->LocalParameterizationJacobian() != NULL) {
+ if (parameter_block->LocalParameterizationJacobian() != nullptr) {
scratch_doubles += parameter_block->Size();
}
}
diff --git a/extern/ceres/internal/ceres/residual_block.h b/extern/ceres/internal/ceres/residual_block.h
index a32f1c36cd3..a2e4425b911 100644
--- a/extern/ceres/internal/ceres/residual_block.h
+++ b/extern/ceres/internal/ceres/residual_block.h
@@ -34,12 +34,13 @@
#ifndef CERES_INTERNAL_RESIDUAL_BLOCK_H_
#define CERES_INTERNAL_RESIDUAL_BLOCK_H_
+#include <cstdint>
+#include <memory>
#include <string>
#include <vector>
#include "ceres/cost_function.h"
#include "ceres/internal/port.h"
-#include "ceres/internal/scoped_ptr.h"
#include "ceres/stringprintf.h"
#include "ceres/types.h"
@@ -81,12 +82,14 @@ class ResidualBlock {
// computed. If jacobians[i] is NULL, then the jacobian for that parameter is
// not computed.
//
+ // cost must not be null.
+ //
// Evaluate needs scratch space which must be supplied by the caller via
// scratch. The array should have at least NumScratchDoublesForEvaluate()
// space available.
//
// The return value indicates the success or failure. If the function returns
- // false, the caller should expect the the output memory locations to have
+ // false, the caller should expect the output memory locations to have
// been modified.
//
// The returned cost and jacobians have had robustification and local
@@ -134,12 +137,12 @@ class ResidualBlock {
private:
const CostFunction* cost_function_;
const LossFunction* loss_function_;
- scoped_array<ParameterBlock*> parameter_blocks_;
+ std::unique_ptr<ParameterBlock*[]> parameter_blocks_;
// The index of the residual, typically in a Program. This is only to permit
// switching from a ResidualBlock* to an index in the Program's array, needed
// to do efficient removals.
- int32 index_;
+ int32_t index_;
};
} // namespace internal
diff --git a/extern/ceres/internal/ceres/residual_block_utils.cc b/extern/ceres/internal/ceres/residual_block_utils.cc
index dd2bd73a6ac..35e928bbcc1 100644
--- a/extern/ceres/internal/ceres/residual_block_utils.cc
+++ b/extern/ceres/internal/ceres/residual_block_utils.cc
@@ -68,8 +68,8 @@ string EvaluationToString(const ResidualBlock& block,
double* cost,
double* residuals,
double** jacobians) {
- CHECK_NOTNULL(cost);
- CHECK_NOTNULL(residuals);
+ CHECK(cost != nullptr);
+ CHECK(residuals != nullptr);
const int num_parameter_blocks = block.NumParameterBlocks();
const int num_residuals = block.NumResiduals();
diff --git a/extern/ceres/internal/ceres/schur_complement_solver.cc b/extern/ceres/internal/ceres/schur_complement_solver.cc
index 65449832c4c..0083300b036 100644
--- a/extern/ceres/internal/ceres/schur_complement_solver.cc
+++ b/extern/ceres/internal/ceres/schur_complement_solver.cc
@@ -28,33 +28,30 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
-#include "ceres/internal/port.h"
+#include "ceres/schur_complement_solver.h"
#include <algorithm>
#include <ctime>
+#include <memory>
#include <set>
-#include <sstream>
#include <vector>
+#include "Eigen/Dense"
+#include "Eigen/SparseCore"
#include "ceres/block_random_access_dense_matrix.h"
#include "ceres/block_random_access_matrix.h"
#include "ceres/block_random_access_sparse_matrix.h"
#include "ceres/block_sparse_matrix.h"
#include "ceres/block_structure.h"
#include "ceres/conjugate_gradients_solver.h"
-#include "ceres/cxsparse.h"
#include "ceres/detect_structure.h"
#include "ceres/internal/eigen.h"
-#include "ceres/internal/scoped_ptr.h"
#include "ceres/lapack.h"
#include "ceres/linear_solver.h"
-#include "ceres/schur_complement_solver.h"
-#include "ceres/suitesparse.h"
+#include "ceres/sparse_cholesky.h"
#include "ceres/triplet_sparse_matrix.h"
#include "ceres/types.h"
#include "ceres/wall_time.h"
-#include "Eigen/Dense"
-#include "Eigen/SparseCore"
namespace ceres {
namespace internal {
@@ -70,23 +67,22 @@ class BlockRandomAccessSparseMatrixAdapter : public LinearOperator {
public:
explicit BlockRandomAccessSparseMatrixAdapter(
const BlockRandomAccessSparseMatrix& m)
- : m_(m) {
- }
+ : m_(m) {}
virtual ~BlockRandomAccessSparseMatrixAdapter() {}
// y = y + Ax;
- virtual void RightMultiply(const double* x, double* y) const {
+ void RightMultiply(const double* x, double* y) const final {
m_.SymmetricRightMultiply(x, y);
}
// y = y + A'x;
- virtual void LeftMultiply(const double* x, double* y) const {
+ void LeftMultiply(const double* x, double* y) const final {
m_.SymmetricRightMultiply(x, y);
}
- virtual int num_rows() const { return m_.num_rows(); }
- virtual int num_cols() const { return m_.num_rows(); }
+ int num_rows() const final { return m_.num_rows(); }
+ int num_cols() const final { return m_.num_rows(); }
private:
const BlockRandomAccessSparseMatrix& m_;
@@ -96,29 +92,28 @@ class BlockRandomAccessDiagonalMatrixAdapter : public LinearOperator {
public:
explicit BlockRandomAccessDiagonalMatrixAdapter(
const BlockRandomAccessDiagonalMatrix& m)
- : m_(m) {
- }
+ : m_(m) {}
virtual ~BlockRandomAccessDiagonalMatrixAdapter() {}
// y = y + Ax;
- virtual void RightMultiply(const double* x, double* y) const {
+ void RightMultiply(const double* x, double* y) const final {
m_.RightMultiply(x, y);
}
// y = y + A'x;
- virtual void LeftMultiply(const double* x, double* y) const {
+ void LeftMultiply(const double* x, double* y) const final {
m_.RightMultiply(x, y);
}
- virtual int num_rows() const { return m_.num_rows(); }
- virtual int num_cols() const { return m_.num_rows(); }
+ int num_rows() const final { return m_.num_rows(); }
+ int num_cols() const final { return m_.num_rows(); }
private:
const BlockRandomAccessDiagonalMatrix& m_;
};
-} // namespace
+} // namespace
LinearSolver::Summary SchurComplementSolver::SolveImpl(
BlockSparseMatrix* A,
@@ -127,20 +122,45 @@ LinearSolver::Summary SchurComplementSolver::SolveImpl(
double* x) {
EventLogger event_logger("SchurComplementSolver::Solve");
+ const CompressedRowBlockStructure* bs = A->block_structure();
if (eliminator_.get() == NULL) {
- InitStorage(A->block_structure());
- DetectStructure(*A->block_structure(),
- options_.elimination_groups[0],
+ const int num_eliminate_blocks = options_.elimination_groups[0];
+ const int num_f_blocks = bs->cols.size() - num_eliminate_blocks;
+
+ InitStorage(bs);
+ DetectStructure(*bs,
+ num_eliminate_blocks,
&options_.row_block_size,
&options_.e_block_size,
&options_.f_block_size);
- eliminator_.reset(CHECK_NOTNULL(SchurEliminatorBase::Create(options_)));
- eliminator_->Init(options_.elimination_groups[0], A->block_structure());
- };
+
+ // For the special case of the static structure <2,3,6> with
+ // exactly one f block use the SchurEliminatorForOneFBlock.
+ //
+ // TODO(sameeragarwal): A more scalable template specialization
+ // mechanism that does not cause binary bloat.
+ if (options_.row_block_size == 2 &&
+ options_.e_block_size == 3 &&
+ options_.f_block_size == 6 &&
+ num_f_blocks == 1) {
+ eliminator_.reset(new SchurEliminatorForOneFBlock<2, 3, 6>);
+ } else {
+ eliminator_.reset(SchurEliminatorBase::Create(options_));
+ }
+
+ CHECK(eliminator_);
+ const bool kFullRankETE = true;
+ eliminator_->Init(num_eliminate_blocks, kFullRankETE, bs);
+ }
+
std::fill(x, x + A->num_cols(), 0.0);
event_logger.AddEvent("Setup");
- eliminator_->Eliminate(A, b, per_solve_options.D, lhs_.get(), rhs_.get());
+ eliminator_->Eliminate(BlockSparseMatrixData(*A),
+ b,
+ per_solve_options.D,
+ lhs_.get(),
+ rhs_.get());
event_logger.AddEvent("Eliminate");
double* reduced_solution = x + A->num_cols() - lhs_->num_cols();
@@ -149,7 +169,8 @@ LinearSolver::Summary SchurComplementSolver::SolveImpl(
event_logger.AddEvent("ReducedSolve");
if (summary.termination_type == LINEAR_SOLVER_SUCCESS) {
- eliminator_->BackSubstitute(A, b, per_solve_options.D, reduced_solution, x);
+ eliminator_->BackSubstitute(
+ BlockSparseMatrixData(*A), b, per_solve_options.D, reduced_solution, x);
event_logger.AddEvent("BackSubstitute");
}
@@ -164,9 +185,7 @@ void DenseSchurComplementSolver::InitStorage(
const int num_col_blocks = bs->cols.size();
vector<int> blocks(num_col_blocks - num_eliminate_blocks, 0);
- for (int i = num_eliminate_blocks, j = 0;
- i < num_col_blocks;
- ++i, ++j) {
+ for (int i = num_eliminate_blocks, j = 0; i < num_col_blocks; ++i, ++j) {
blocks[j] = bs->cols[i].size;
}
@@ -177,10 +196,8 @@ void DenseSchurComplementSolver::InitStorage(
// Solve the system Sx = r, assuming that the matrix S is stored in a
// BlockRandomAccessDenseMatrix. The linear system is solved using
// Eigen's Cholesky factorization.
-LinearSolver::Summary
-DenseSchurComplementSolver::SolveReducedLinearSystem(
- const LinearSolver::PerSolveOptions& per_solve_options,
- double* solution) {
+LinearSolver::Summary DenseSchurComplementSolver::SolveReducedLinearSystem(
+ const LinearSolver::PerSolveOptions& per_solve_options, double* solution) {
LinearSolver::Summary summary;
summary.num_iterations = 0;
summary.termination_type = LINEAR_SOLVER_SUCCESS;
@@ -201,8 +218,8 @@ DenseSchurComplementSolver::SolveReducedLinearSystem(
if (options().dense_linear_algebra_library_type == EIGEN) {
Eigen::LLT<Matrix, Eigen::Upper> llt =
ConstMatrixRef(m->values(), num_rows, num_rows)
- .selfadjointView<Eigen::Upper>()
- .llt();
+ .selfadjointView<Eigen::Upper>()
+ .llt();
if (llt.info() != Eigen::Success) {
summary.termination_type = LINEAR_SOLVER_FAILURE;
summary.message =
@@ -213,11 +230,8 @@ DenseSchurComplementSolver::SolveReducedLinearSystem(
VectorRef(solution, num_rows) = llt.solve(ConstVectorRef(rhs(), num_rows));
} else {
VectorRef(solution, num_rows) = ConstVectorRef(rhs(), num_rows);
- summary.termination_type =
- LAPACK::SolveInPlaceUsingCholesky(num_rows,
- m->values(),
- solution,
- &summary.message);
+ summary.termination_type = LAPACK::SolveInPlaceUsingCholesky(
+ num_rows, m->values(), solution, &summary.message);
}
return summary;
@@ -225,23 +239,14 @@ DenseSchurComplementSolver::SolveReducedLinearSystem(
SparseSchurComplementSolver::SparseSchurComplementSolver(
const LinearSolver::Options& options)
- : SchurComplementSolver(options),
- factor_(NULL),
- cxsparse_factor_(NULL) {
-}
-
-SparseSchurComplementSolver::~SparseSchurComplementSolver() {
- if (factor_ != NULL) {
- ss_.Free(factor_);
- factor_ = NULL;
- }
-
- if (cxsparse_factor_ != NULL) {
- cxsparse_.Free(cxsparse_factor_);
- cxsparse_factor_ = NULL;
+ : SchurComplementSolver(options) {
+ if (options.type != ITERATIVE_SCHUR) {
+ sparse_cholesky_ = SparseCholesky::Create(options);
}
}
+SparseSchurComplementSolver::~SparseSchurComplementSolver() {}
+
// Determine the non-zero blocks in the Schur Complement matrix, and
// initialize a BlockRandomAccessSparseMatrix object.
void SparseSchurComplementSolver::InitStorage(
@@ -255,7 +260,7 @@ void SparseSchurComplementSolver::InitStorage(
blocks_[i - num_eliminate_blocks] = bs->cols[i].size;
}
- set<pair<int, int> > block_pairs;
+ set<pair<int, int>> block_pairs;
for (int i = 0; i < blocks_.size(); ++i) {
block_pairs.insert(make_pair(i, i));
}
@@ -293,7 +298,7 @@ void SparseSchurComplementSolver::InitStorage(
}
}
- // Remaing rows do not contribute to the chunks and directly go
+ // Remaining rows do not contribute to the chunks and directly go
// into the schur complement via an outer product.
for (; r < num_row_blocks; ++r) {
const CompressedRow& row = bs->rows[r];
@@ -313,296 +318,49 @@ void SparseSchurComplementSolver::InitStorage(
set_rhs(new double[lhs()->num_rows()]);
}
-LinearSolver::Summary
-SparseSchurComplementSolver::SolveReducedLinearSystem(
- const LinearSolver::PerSolveOptions& per_solve_options,
- double* solution) {
+LinearSolver::Summary SparseSchurComplementSolver::SolveReducedLinearSystem(
+ const LinearSolver::PerSolveOptions& per_solve_options, double* solution) {
if (options().type == ITERATIVE_SCHUR) {
- CHECK(options().use_explicit_schur_complement);
return SolveReducedLinearSystemUsingConjugateGradients(per_solve_options,
solution);
}
- switch (options().sparse_linear_algebra_library_type) {
- case SUITE_SPARSE:
- return SolveReducedLinearSystemUsingSuiteSparse(per_solve_options,
- solution);
- case CX_SPARSE:
- return SolveReducedLinearSystemUsingCXSparse(per_solve_options,
- solution);
- case EIGEN_SPARSE:
- return SolveReducedLinearSystemUsingEigen(per_solve_options,
- solution);
- default:
- LOG(FATAL) << "Unknown sparse linear algebra library : "
- << options().sparse_linear_algebra_library_type;
- }
-
- return LinearSolver::Summary();
-}
-
-// Solve the system Sx = r, assuming that the matrix S is stored in a
-// BlockRandomAccessSparseMatrix. The linear system is solved using
-// CHOLMOD's sparse cholesky factorization routines.
-LinearSolver::Summary
-SparseSchurComplementSolver::SolveReducedLinearSystemUsingSuiteSparse(
- const LinearSolver::PerSolveOptions& per_solve_options,
- double* solution) {
-#ifdef CERES_NO_SUITESPARSE
-
- LinearSolver::Summary summary;
- summary.num_iterations = 0;
- summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
- summary.message = "Ceres was not built with SuiteSparse support. "
- "Therefore, SPARSE_SCHUR cannot be used with SUITE_SPARSE";
- return summary;
-
-#else
-
LinearSolver::Summary summary;
summary.num_iterations = 0;
summary.termination_type = LINEAR_SOLVER_SUCCESS;
summary.message = "Success.";
- TripletSparseMatrix* tsm =
- const_cast<TripletSparseMatrix*>(
- down_cast<const BlockRandomAccessSparseMatrix*>(lhs())->matrix());
- const int num_rows = tsm->num_rows();
-
- // The case where there are no f blocks, and the system is block
- // diagonal.
- if (num_rows == 0) {
+ const TripletSparseMatrix* tsm =
+ down_cast<const BlockRandomAccessSparseMatrix*>(lhs())->matrix();
+ if (tsm->num_rows() == 0) {
return summary;
}
- summary.num_iterations = 1;
- cholmod_sparse* cholmod_lhs = NULL;
- if (options().use_postordering) {
- // If we are going to do a full symbolic analysis of the schur
- // complement matrix from scratch and not rely on the
- // pre-ordering, then the fastest path in cholmod_factorize is the
- // one corresponding to upper triangular matrices.
-
- // Create a upper triangular symmetric matrix.
- cholmod_lhs = ss_.CreateSparseMatrix(tsm);
- cholmod_lhs->stype = 1;
-
- if (factor_ == NULL) {
- factor_ = ss_.BlockAnalyzeCholesky(cholmod_lhs,
- blocks_,
- blocks_,
- &summary.message);
- }
+ std::unique_ptr<CompressedRowSparseMatrix> lhs;
+ const CompressedRowSparseMatrix::StorageType storage_type =
+ sparse_cholesky_->StorageType();
+ if (storage_type == CompressedRowSparseMatrix::UPPER_TRIANGULAR) {
+ lhs.reset(CompressedRowSparseMatrix::FromTripletSparseMatrix(*tsm));
+ lhs->set_storage_type(CompressedRowSparseMatrix::UPPER_TRIANGULAR);
} else {
- // If we are going to use the natural ordering (i.e. rely on the
- // pre-ordering computed by solver_impl.cc), then the fastest
- // path in cholmod_factorize is the one corresponding to lower
- // triangular matrices.
-
- // Create a upper triangular symmetric matrix.
- cholmod_lhs = ss_.CreateSparseMatrixTranspose(tsm);
- cholmod_lhs->stype = -1;
-
- if (factor_ == NULL) {
- factor_ = ss_.AnalyzeCholeskyWithNaturalOrdering(cholmod_lhs,
- &summary.message);
- }
- }
-
- if (factor_ == NULL) {
- ss_.Free(cholmod_lhs);
- summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
- // No need to set message as it has already been set by the
- // symbolic analysis routines above.
- return summary;
+ lhs.reset(
+ CompressedRowSparseMatrix::FromTripletSparseMatrixTransposed(*tsm));
+ lhs->set_storage_type(CompressedRowSparseMatrix::LOWER_TRIANGULAR);
}
- summary.termination_type =
- ss_.Cholesky(cholmod_lhs, factor_, &summary.message);
-
- ss_.Free(cholmod_lhs);
-
- if (summary.termination_type != LINEAR_SOLVER_SUCCESS) {
- // No need to set message as it has already been set by the
- // numeric factorization routine above.
- return summary;
- }
-
- cholmod_dense* cholmod_rhs =
- ss_.CreateDenseVector(const_cast<double*>(rhs()), num_rows, num_rows);
- cholmod_dense* cholmod_solution = ss_.Solve(factor_,
- cholmod_rhs,
- &summary.message);
- ss_.Free(cholmod_rhs);
-
- if (cholmod_solution == NULL) {
- summary.message =
- "SuiteSparse failure. Unable to perform triangular solve.";
- summary.termination_type = LINEAR_SOLVER_FAILURE;
- return summary;
- }
-
- VectorRef(solution, num_rows)
- = VectorRef(static_cast<double*>(cholmod_solution->x), num_rows);
- ss_.Free(cholmod_solution);
- return summary;
-#endif // CERES_NO_SUITESPARSE
-}
-
-// Solve the system Sx = r, assuming that the matrix S is stored in a
-// BlockRandomAccessSparseMatrix. The linear system is solved using
-// CXSparse's sparse cholesky factorization routines.
-LinearSolver::Summary
-SparseSchurComplementSolver::SolveReducedLinearSystemUsingCXSparse(
- const LinearSolver::PerSolveOptions& per_solve_options,
- double* solution) {
-#ifdef CERES_NO_CXSPARSE
-
- LinearSolver::Summary summary;
- summary.num_iterations = 0;
- summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
- summary.message = "Ceres was not built with CXSparse support. "
- "Therefore, SPARSE_SCHUR cannot be used with CX_SPARSE";
- return summary;
-
-#else
-
- LinearSolver::Summary summary;
- summary.num_iterations = 0;
- summary.termination_type = LINEAR_SOLVER_SUCCESS;
- summary.message = "Success.";
-
- // Extract the TripletSparseMatrix that is used for actually storing S.
- TripletSparseMatrix* tsm =
- const_cast<TripletSparseMatrix*>(
- down_cast<const BlockRandomAccessSparseMatrix*>(lhs())->matrix());
- const int num_rows = tsm->num_rows();
-
- // The case where there are no f blocks, and the system is block
- // diagonal.
- if (num_rows == 0) {
- return summary;
- }
-
- cs_di* lhs = CHECK_NOTNULL(cxsparse_.CreateSparseMatrix(tsm));
- VectorRef(solution, num_rows) = ConstVectorRef(rhs(), num_rows);
-
- // Compute symbolic factorization if not available.
- if (cxsparse_factor_ == NULL) {
- cxsparse_factor_ = cxsparse_.BlockAnalyzeCholesky(lhs, blocks_, blocks_);
- }
-
- if (cxsparse_factor_ == NULL) {
- summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
- summary.message =
- "CXSparse failure. Unable to find symbolic factorization.";
- } else if (!cxsparse_.SolveCholesky(lhs, cxsparse_factor_, solution)) {
- summary.termination_type = LINEAR_SOLVER_FAILURE;
- summary.message = "CXSparse::SolveCholesky failed.";
- }
-
- cxsparse_.Free(lhs);
- return summary;
-#endif // CERES_NO_CXPARSE
-}
-
-// Solve the system Sx = r, assuming that the matrix S is stored in a
-// BlockRandomAccessSparseMatrix. The linear system is solved using
-// Eigen's sparse cholesky factorization routines.
-LinearSolver::Summary
-SparseSchurComplementSolver::SolveReducedLinearSystemUsingEigen(
- const LinearSolver::PerSolveOptions& per_solve_options,
- double* solution) {
-#ifndef CERES_USE_EIGEN_SPARSE
-
- LinearSolver::Summary summary;
- summary.num_iterations = 0;
- summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
- summary.message =
- "SPARSE_SCHUR cannot be used with EIGEN_SPARSE. "
- "Ceres was not built with support for "
- "Eigen's SimplicialLDLT decomposition. "
- "This requires enabling building with -DEIGENSPARSE=ON.";
- return summary;
-
-#else
- EventLogger event_logger("SchurComplementSolver::EigenSolve");
- LinearSolver::Summary summary;
- summary.num_iterations = 0;
- summary.termination_type = LINEAR_SOLVER_SUCCESS;
- summary.message = "Success.";
-
- // Extract the TripletSparseMatrix that is used for actually storing S.
- TripletSparseMatrix* tsm =
- const_cast<TripletSparseMatrix*>(
- down_cast<const BlockRandomAccessSparseMatrix*>(lhs())->matrix());
- const int num_rows = tsm->num_rows();
-
- // The case where there are no f blocks, and the system is block
- // diagonal.
- if (num_rows == 0) {
- return summary;
- }
-
- // This is an upper triangular matrix.
- CompressedRowSparseMatrix crsm(*tsm);
- // Map this to a column major, lower triangular matrix.
- Eigen::MappedSparseMatrix<double, Eigen::ColMajor> eigen_lhs(
- crsm.num_rows(),
- crsm.num_rows(),
- crsm.num_nonzeros(),
- crsm.mutable_rows(),
- crsm.mutable_cols(),
- crsm.mutable_values());
- event_logger.AddEvent("ToCompressedRowSparseMatrix");
-
- // Compute symbolic factorization if one does not exist.
- if (simplicial_ldlt_.get() == NULL) {
- simplicial_ldlt_.reset(new SimplicialLDLT);
- // This ordering is quite bad. The scalar ordering produced by the
- // AMD algorithm is quite bad and can be an order of magnitude
- // worse than the one computed using the block version of the
- // algorithm.
- simplicial_ldlt_->analyzePattern(eigen_lhs);
- if (VLOG_IS_ON(2)) {
- std::stringstream ss;
- simplicial_ldlt_->dumpMemory(ss);
- VLOG(2) << "Symbolic Analysis\n"
- << ss.str();
- }
- event_logger.AddEvent("Analysis");
- if (simplicial_ldlt_->info() != Eigen::Success) {
- summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
- summary.message =
- "Eigen failure. Unable to find symbolic factorization.";
- return summary;
- }
- }
-
- simplicial_ldlt_->factorize(eigen_lhs);
- event_logger.AddEvent("Factorize");
- if (simplicial_ldlt_->info() != Eigen::Success) {
- summary.termination_type = LINEAR_SOLVER_FAILURE;
- summary.message = "Eigen failure. Unable to find numeric factoriztion.";
- return summary;
- }
-
- VectorRef(solution, num_rows) =
- simplicial_ldlt_->solve(ConstVectorRef(rhs(), num_rows));
- event_logger.AddEvent("Solve");
- if (simplicial_ldlt_->info() != Eigen::Success) {
- summary.termination_type = LINEAR_SOLVER_FAILURE;
- summary.message = "Eigen failure. Unable to do triangular solve.";
- }
+ *lhs->mutable_col_blocks() = blocks_;
+ *lhs->mutable_row_blocks() = blocks_;
+ summary.num_iterations = 1;
+ summary.termination_type = sparse_cholesky_->FactorAndSolve(
+ lhs.get(), rhs(), solution, &summary.message);
return summary;
-#endif // CERES_USE_EIGEN_SPARSE
}
LinearSolver::Summary
SparseSchurComplementSolver::SolveReducedLinearSystemUsingConjugateGradients(
- const LinearSolver::PerSolveOptions& per_solve_options,
- double* solution) {
+ const LinearSolver::PerSolveOptions& per_solve_options, double* solution) {
+ CHECK(options().use_explicit_schur_complement);
const int num_rows = lhs()->num_rows();
// The case where there are no f blocks, and the system is block
// diagonal.
@@ -621,27 +379,24 @@ SparseSchurComplementSolver::SolveReducedLinearSystemUsingConjugateGradients(
preconditioner_.reset(new BlockRandomAccessDiagonalMatrix(blocks_));
}
- BlockRandomAccessSparseMatrix* sc =
- down_cast<BlockRandomAccessSparseMatrix*>(
- const_cast<BlockRandomAccessMatrix*>(lhs()));
+ BlockRandomAccessSparseMatrix* sc = down_cast<BlockRandomAccessSparseMatrix*>(
+ const_cast<BlockRandomAccessMatrix*>(lhs()));
// Extract block diagonal from the Schur complement to construct the
// schur_jacobi preconditioner.
- for (int i = 0; i < blocks_.size(); ++i) {
+ for (int i = 0; i < blocks_.size(); ++i) {
const int block_size = blocks_[i];
int sc_r, sc_c, sc_row_stride, sc_col_stride;
CellInfo* sc_cell_info =
- CHECK_NOTNULL(sc->GetCell(i, i,
- &sc_r, &sc_c,
- &sc_row_stride, &sc_col_stride));
+ sc->GetCell(i, i, &sc_r, &sc_c, &sc_row_stride, &sc_col_stride);
+ CHECK(sc_cell_info != nullptr);
MatrixRef sc_m(sc_cell_info->values, sc_row_stride, sc_col_stride);
int pre_r, pre_c, pre_row_stride, pre_col_stride;
- CellInfo* pre_cell_info = CHECK_NOTNULL(
- preconditioner_->GetCell(i, i,
- &pre_r, &pre_c,
- &pre_row_stride, &pre_col_stride));
+ CellInfo* pre_cell_info = preconditioner_->GetCell(
+ i, i, &pre_r, &pre_c, &pre_row_stride, &pre_col_stride);
+ CHECK(pre_cell_info != nullptr);
MatrixRef pre_m(pre_cell_info->values, pre_row_stride, pre_col_stride);
pre_m.block(pre_r, pre_c, block_size, block_size) =
@@ -651,12 +406,11 @@ SparseSchurComplementSolver::SolveReducedLinearSystemUsingConjugateGradients(
VectorRef(solution, num_rows).setZero();
- scoped_ptr<LinearOperator> lhs_adapter(
+ std::unique_ptr<LinearOperator> lhs_adapter(
new BlockRandomAccessSparseMatrixAdapter(*sc));
- scoped_ptr<LinearOperator> preconditioner_adapter(
+ std::unique_ptr<LinearOperator> preconditioner_adapter(
new BlockRandomAccessDiagonalMatrixAdapter(*preconditioner_));
-
LinearSolver::Options cg_options;
cg_options.min_num_iterations = options().min_num_iterations;
cg_options.max_num_iterations = options().max_num_iterations;
@@ -667,10 +421,8 @@ SparseSchurComplementSolver::SolveReducedLinearSystemUsingConjugateGradients(
cg_per_solve_options.q_tolerance = per_solve_options.q_tolerance;
cg_per_solve_options.preconditioner = preconditioner_adapter.get();
- return cg_solver.Solve(lhs_adapter.get(),
- rhs(),
- cg_per_solve_options,
- solution);
+ return cg_solver.Solve(
+ lhs_adapter.get(), rhs(), cg_per_solve_options, solution);
}
} // namespace internal
diff --git a/extern/ceres/internal/ceres/schur_complement_solver.h b/extern/ceres/internal/ceres/schur_complement_solver.h
index 714dafc5b0c..87f04785794 100644
--- a/extern/ceres/internal/ceres/schur_complement_solver.h
+++ b/extern/ceres/internal/ceres/schur_complement_solver.h
@@ -31,22 +31,19 @@
#ifndef CERES_INTERNAL_SCHUR_COMPLEMENT_SOLVER_H_
#define CERES_INTERNAL_SCHUR_COMPLEMENT_SOLVER_H_
+#include <memory>
#include <set>
#include <utility>
#include <vector>
-#include "ceres/internal/port.h"
-
+#include "ceres/block_random_access_diagonal_matrix.h"
#include "ceres/block_random_access_matrix.h"
#include "ceres/block_sparse_matrix.h"
#include "ceres/block_structure.h"
-#include "ceres/cxsparse.h"
+#include "ceres/internal/port.h"
#include "ceres/linear_solver.h"
#include "ceres/schur_eliminator.h"
-#include "ceres/suitesparse.h"
-#include "ceres/internal/scoped_ptr.h"
#include "ceres/types.h"
-#include "ceres/block_random_access_diagonal_matrix.h"
#ifdef CERES_USE_EIGEN_SPARSE
#include "Eigen/SparseCholesky"
@@ -57,6 +54,7 @@ namespace ceres {
namespace internal {
class BlockSparseMatrix;
+class SparseCholesky;
// Base class for Schur complement based linear least squares
// solvers. It assumes that the input linear system Ax = b can be
@@ -96,33 +94,37 @@ class BlockSparseMatrix;
// be used for problems with upto a few hundred cameras.
//
// SparseSchurComplementSolver: For problems where the Schur
-// complement matrix is large and sparse. It requires that
-// CHOLMOD/SuiteSparse be installed, as it uses CHOLMOD to find a
-// sparse Cholesky factorization of the Schur complement. This solver
-// can be used for solving structure from motion problems with tens of
-// thousands of cameras, though depending on the exact sparsity
-// structure, it maybe better to use an iterative solver.
+// complement matrix is large and sparse. It requires that Ceres be
+// build with at least one sparse linear algebra library, as it
+// computes a sparse Cholesky factorization of the Schur complement.
+//
+// This solver can be used for solving structure from motion problems
+// with tens of thousands of cameras, though depending on the exact
+// sparsity structure, it maybe better to use an iterative solver.
//
// The two solvers can be instantiated by calling
// LinearSolver::CreateLinearSolver with LinearSolver::Options::type
// set to DENSE_SCHUR and SPARSE_SCHUR
-// respectively. LinearSolver::Options::elimination_groups[0] should be
-// at least 1.
+// respectively. LinearSolver::Options::elimination_groups[0] should
+// be at least 1.
class SchurComplementSolver : public BlockSparseMatrixSolver {
public:
explicit SchurComplementSolver(const LinearSolver::Options& options)
: options_(options) {
CHECK_GT(options.elimination_groups.size(), 1);
CHECK_GT(options.elimination_groups[0], 0);
+ CHECK(options.context != NULL);
}
+ SchurComplementSolver(const SchurComplementSolver&) = delete;
+ void operator=(const SchurComplementSolver&) = delete;
// LinearSolver methods
virtual ~SchurComplementSolver() {}
- virtual LinearSolver::Summary SolveImpl(
+ LinearSolver::Summary SolveImpl(
BlockSparseMatrix* A,
const double* b,
const LinearSolver::PerSolveOptions& per_solve_options,
- double* x);
+ double* x) override;
protected:
const LinearSolver::Options& options() const { return options_; }
@@ -140,11 +142,9 @@ class SchurComplementSolver : public BlockSparseMatrixSolver {
LinearSolver::Options options_;
- scoped_ptr<SchurEliminatorBase> eliminator_;
- scoped_ptr<BlockRandomAccessMatrix> lhs_;
- scoped_array<double> rhs_;
-
- CERES_DISALLOW_COPY_AND_ASSIGN(SchurComplementSolver);
+ std::unique_ptr<SchurEliminatorBase> eliminator_;
+ std::unique_ptr<BlockRandomAccessMatrix> lhs_;
+ std::unique_ptr<double[]> rhs_;
};
// Dense Cholesky factorization based solver.
@@ -152,72 +152,40 @@ class DenseSchurComplementSolver : public SchurComplementSolver {
public:
explicit DenseSchurComplementSolver(const LinearSolver::Options& options)
: SchurComplementSolver(options) {}
+ DenseSchurComplementSolver(const DenseSchurComplementSolver&) = delete;
+ void operator=(const DenseSchurComplementSolver&) = delete;
+
virtual ~DenseSchurComplementSolver() {}
private:
- virtual void InitStorage(const CompressedRowBlockStructure* bs);
- virtual LinearSolver::Summary SolveReducedLinearSystem(
+ void InitStorage(const CompressedRowBlockStructure* bs) final;
+ LinearSolver::Summary SolveReducedLinearSystem(
const LinearSolver::PerSolveOptions& per_solve_options,
- double* solution);
-
- CERES_DISALLOW_COPY_AND_ASSIGN(DenseSchurComplementSolver);
+ double* solution) final;
};
// Sparse Cholesky factorization based solver.
class SparseSchurComplementSolver : public SchurComplementSolver {
public:
explicit SparseSchurComplementSolver(const LinearSolver::Options& options);
+ SparseSchurComplementSolver(const SparseSchurComplementSolver&) = delete;
+ void operator=(const SparseSchurComplementSolver&) = delete;
+
virtual ~SparseSchurComplementSolver();
private:
- virtual void InitStorage(const CompressedRowBlockStructure* bs);
- virtual LinearSolver::Summary SolveReducedLinearSystem(
- const LinearSolver::PerSolveOptions& per_solve_options,
- double* solution);
- LinearSolver::Summary SolveReducedLinearSystemUsingSuiteSparse(
+ void InitStorage(const CompressedRowBlockStructure* bs) final;
+ LinearSolver::Summary SolveReducedLinearSystem(
const LinearSolver::PerSolveOptions& per_solve_options,
- double* solution);
- LinearSolver::Summary SolveReducedLinearSystemUsingCXSparse(
- const LinearSolver::PerSolveOptions& per_solve_options,
- double* solution);
- LinearSolver::Summary SolveReducedLinearSystemUsingEigen(
- const LinearSolver::PerSolveOptions& per_solve_options,
- double* solution);
+ double* solution) final;
LinearSolver::Summary SolveReducedLinearSystemUsingConjugateGradients(
const LinearSolver::PerSolveOptions& per_solve_options,
double* solution);
// Size of the blocks in the Schur complement.
std::vector<int> blocks_;
-
- SuiteSparse ss_;
- // Symbolic factorization of the reduced linear system. Precomputed
- // once and reused in subsequent calls.
- cholmod_factor* factor_;
-
- CXSparse cxsparse_;
- // Cached factorization
- cs_dis* cxsparse_factor_;
-
-#ifdef CERES_USE_EIGEN_SPARSE
-
- // The preprocessor gymnastics here are dealing with the fact that
- // before version 3.2.2, Eigen did not support a third template
- // parameter to specify the ordering.
-#if EIGEN_VERSION_AT_LEAST(3,2,2)
- typedef Eigen::SimplicialLDLT<Eigen::SparseMatrix<double>, Eigen::Lower,
- Eigen::NaturalOrdering<int> >
- SimplicialLDLT;
-#else
- typedef Eigen::SimplicialLDLT<Eigen::SparseMatrix<double>, Eigen::Lower>
- SimplicialLDLT;
-#endif
-
- scoped_ptr<SimplicialLDLT> simplicial_ldlt_;
-#endif
-
- scoped_ptr<BlockRandomAccessDiagonalMatrix> preconditioner_;
- CERES_DISALLOW_COPY_AND_ASSIGN(SparseSchurComplementSolver);
+ std::unique_ptr<SparseCholesky> sparse_cholesky_;
+ std::unique_ptr<BlockRandomAccessDiagonalMatrix> preconditioner_;
};
} // namespace internal
diff --git a/extern/ceres/internal/ceres/schur_eliminator.cc b/extern/ceres/internal/ceres/schur_eliminator.cc
index ec0e2a020e5..bc8ced4bc7c 100644
--- a/extern/ceres/internal/ceres/schur_eliminator.cc
+++ b/extern/ceres/internal/ceres/schur_eliminator.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -37,8 +37,7 @@
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_eliminator_specialization.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
#include "ceres/linear_solver.h"
#include "ceres/schur_eliminator.h"
@@ -50,106 +49,105 @@ namespace internal {
SchurEliminatorBase*
SchurEliminatorBase::Create(const LinearSolver::Options& options) {
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
- if ((options.row_block_size == 2) &&
- (options.e_block_size == 2) &&
- (options.f_block_size == 2)) {
- return new SchurEliminator<2, 2, 2>(options);
- }
- if ((options.row_block_size == 2) &&
- (options.e_block_size == 2) &&
- (options.f_block_size == 3)) {
- return new SchurEliminator<2, 2, 3>(options);
- }
- if ((options.row_block_size == 2) &&
- (options.e_block_size == 2) &&
- (options.f_block_size == 4)) {
- return new SchurEliminator<2, 2, 4>(options);
- }
- if ((options.row_block_size == 2) &&
- (options.e_block_size == 2) &&
- (options.f_block_size == Eigen::Dynamic)) {
- return new SchurEliminator<2, 2, Eigen::Dynamic>(options);
- }
- if ((options.row_block_size == 2) &&
- (options.e_block_size == 3) &&
- (options.f_block_size == 3)) {
- return new SchurEliminator<2, 3, 3>(options);
- }
- if ((options.row_block_size == 2) &&
- (options.e_block_size == 3) &&
- (options.f_block_size == 4)) {
- return new SchurEliminator<2, 3, 4>(options);
- }
- if ((options.row_block_size == 2) &&
- (options.e_block_size == 3) &&
- (options.f_block_size == 6)) {
- return new SchurEliminator<2, 3, 6>(options);
- }
- if ((options.row_block_size == 2) &&
- (options.e_block_size == 3) &&
- (options.f_block_size == 9)) {
- return new SchurEliminator<2, 3, 9>(options);
- }
- if ((options.row_block_size == 2) &&
- (options.e_block_size == 3) &&
- (options.f_block_size == Eigen::Dynamic)) {
- return new SchurEliminator<2, 3, Eigen::Dynamic>(options);
- }
- if ((options.row_block_size == 2) &&
- (options.e_block_size == 4) &&
- (options.f_block_size == 3)) {
- return new SchurEliminator<2, 4, 3>(options);
- }
- if ((options.row_block_size == 2) &&
- (options.e_block_size == 4) &&
- (options.f_block_size == 4)) {
- return new SchurEliminator<2, 4, 4>(options);
- }
- if ((options.row_block_size == 2) &&
- (options.e_block_size == 4) &&
- (options.f_block_size == 8)) {
- return new SchurEliminator<2, 4, 8>(options);
- }
- if ((options.row_block_size == 2) &&
- (options.e_block_size == 4) &&
- (options.f_block_size == 9)) {
- return new SchurEliminator<2, 4, 9>(options);
- }
- if ((options.row_block_size == 2) &&
- (options.e_block_size == 4) &&
- (options.f_block_size == Eigen::Dynamic)) {
- return new SchurEliminator<2, 4, Eigen::Dynamic>(options);
- }
- if ((options.row_block_size == 2) &&
- (options.e_block_size == Eigen::Dynamic) &&
- (options.f_block_size == Eigen::Dynamic)) {
- return new SchurEliminator<2, Eigen::Dynamic, Eigen::Dynamic>(options);
- }
- if ((options.row_block_size == 4) &&
- (options.e_block_size == 4) &&
- (options.f_block_size == 2)) {
- return new SchurEliminator<4, 4, 2>(options);
- }
- if ((options.row_block_size == 4) &&
- (options.e_block_size == 4) &&
- (options.f_block_size == 3)) {
- return new SchurEliminator<4, 4, 3>(options);
- }
- if ((options.row_block_size == 4) &&
- (options.e_block_size == 4) &&
- (options.f_block_size == 4)) {
- return new SchurEliminator<4, 4, 4>(options);
- }
- if ((options.row_block_size == 4) &&
- (options.e_block_size == 4) &&
- (options.f_block_size == Eigen::Dynamic)) {
- return new SchurEliminator<4, 4, Eigen::Dynamic>(options);
- }
- if ((options.row_block_size == Eigen::Dynamic) &&
- (options.e_block_size == Eigen::Dynamic) &&
- (options.f_block_size == Eigen::Dynamic)) {
- return new SchurEliminator<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic>(options);
- }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 2) &&
+ (options.f_block_size == 2)) {
+ return new SchurEliminator<2, 2, 2>(options);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 2) &&
+ (options.f_block_size == 3)) {
+ return new SchurEliminator<2, 2, 3>(options);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 2) &&
+ (options.f_block_size == 4)) {
+ return new SchurEliminator<2, 2, 4>(options);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 2)) {
+ return new SchurEliminator<2, 2, Eigen::Dynamic>(options);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 3) &&
+ (options.f_block_size == 3)) {
+ return new SchurEliminator<2, 3, 3>(options);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 3) &&
+ (options.f_block_size == 4)) {
+ return new SchurEliminator<2, 3, 4>(options);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 3) &&
+ (options.f_block_size == 6)) {
+ return new SchurEliminator<2, 3, 6>(options);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 3) &&
+ (options.f_block_size == 9)) {
+ return new SchurEliminator<2, 3, 9>(options);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 3)) {
+ return new SchurEliminator<2, 3, Eigen::Dynamic>(options);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == 3)) {
+ return new SchurEliminator<2, 4, 3>(options);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == 4)) {
+ return new SchurEliminator<2, 4, 4>(options);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == 6)) {
+ return new SchurEliminator<2, 4, 6>(options);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == 8)) {
+ return new SchurEliminator<2, 4, 8>(options);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == 9)) {
+ return new SchurEliminator<2, 4, 9>(options);
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 4)) {
+ return new SchurEliminator<2, 4, Eigen::Dynamic>(options);
+ }
+ if (options.row_block_size == 2){
+ return new SchurEliminator<2, Eigen::Dynamic, Eigen::Dynamic>(options);
+ }
+ if ((options.row_block_size == 3) &&
+ (options.e_block_size == 3) &&
+ (options.f_block_size == 3)) {
+ return new SchurEliminator<3, 3, 3>(options);
+ }
+ if ((options.row_block_size == 4) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == 2)) {
+ return new SchurEliminator<4, 4, 2>(options);
+ }
+ if ((options.row_block_size == 4) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == 3)) {
+ return new SchurEliminator<4, 4, 3>(options);
+ }
+ if ((options.row_block_size == 4) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == 4)) {
+ return new SchurEliminator<4, 4, 4>(options);
+ }
+ if ((options.row_block_size == 4) &&
+ (options.e_block_size == 4)) {
+ return new SchurEliminator<4, 4, Eigen::Dynamic>(options);
+ }
#endif
VLOG(1) << "Template specializations not found for <"
diff --git a/extern/ceres/internal/ceres/schur_eliminator.h b/extern/ceres/internal/ceres/schur_eliminator.h
index 761b58adc7f..66fcb4d58e8 100644
--- a/extern/ceres/internal/ceres/schur_eliminator.h
+++ b/extern/ceres/internal/ceres/schur_eliminator.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2019 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -32,14 +32,17 @@
#define CERES_INTERNAL_SCHUR_ELIMINATOR_H_
#include <map>
+#include <memory>
+#include <mutex>
#include <vector>
-#include "ceres/mutex.h"
+
+#include "Eigen/Dense"
#include "ceres/block_random_access_matrix.h"
#include "ceres/block_sparse_matrix.h"
#include "ceres/block_structure.h"
-#include "ceres/linear_solver.h"
#include "ceres/internal/eigen.h"
-#include "ceres/internal/scoped_ptr.h"
+#include "ceres/internal/port.h"
+#include "ceres/linear_solver.h"
namespace ceres {
namespace internal {
@@ -50,7 +53,7 @@ namespace internal {
//
// E y + F z = b
//
-// Where x = [y;z] is a partition of the variables. The paritioning
+// Where x = [y;z] is a partition of the variables. The partitioning
// of the variables is such that, E'E is a block diagonal matrix. Or
// in other words, the parameter blocks in E form an independent set
// of the of the graph implied by the block matrix A'A. Then, this
@@ -147,7 +150,7 @@ namespace internal {
// Where the sum is over chunks and E_k'E_k is dense matrix of size y1
// x y1.
//
-// Advanced usage. Uptil now it has been assumed that the user would
+// Advanced usage. Until now it has been assumed that the user would
// be interested in all of the Schur Complement S. However, it is also
// possible to use this eliminator to obtain an arbitrary submatrix of
// the full Schur complement. When the eliminator is generating the
@@ -171,7 +174,14 @@ class SchurEliminatorBase {
// CompressedRowBlockStructure object passed to this method is the
// same one (or is equivalent to) the one associated with the
// BlockSparseMatrix objects below.
+ //
+ // assume_full_rank_ete controls how the eliminator inverts with the
+ // diagonal blocks corresponding to e blocks in A'A. If
+ // assume_full_rank_ete is true, then a Cholesky factorization is
+ // used to compute the inverse, otherwise a singular value
+ // decomposition is used to compute the pseudo inverse.
virtual void Init(int num_eliminate_blocks,
+ bool assume_full_rank_ete,
const CompressedRowBlockStructure* bs) = 0;
// Compute the Schur complement system from the augmented linear
@@ -185,7 +195,7 @@ class SchurEliminatorBase {
//
// Since the Schur complement is a symmetric matrix, only the upper
// triangular part of the Schur complement is computed.
- virtual void Eliminate(const BlockSparseMatrix* A,
+ virtual void Eliminate(const BlockSparseMatrixData& A,
const double* b,
const double* D,
BlockRandomAccessMatrix* lhs,
@@ -194,7 +204,7 @@ class SchurEliminatorBase {
// Given values for the variables z in the F block of A, solve for
// the optimal values of the variables y corresponding to the E
// block in A.
- virtual void BackSubstitute(const BlockSparseMatrix* A,
+ virtual void BackSubstitute(const BlockSparseMatrixData& A,
const double* b,
const double* D,
const double* z,
@@ -210,32 +220,31 @@ class SchurEliminatorBase {
// parameters as template arguments so that they are visible to the
// compiler and can be used for compile time optimization of the low
// level linear algebra routines.
-//
-// This implementation is mulithreaded using OpenMP. The level of
-// parallelism is controlled by LinearSolver::Options::num_threads.
template <int kRowBlockSize = Eigen::Dynamic,
int kEBlockSize = Eigen::Dynamic,
- int kFBlockSize = Eigen::Dynamic >
+ int kFBlockSize = Eigen::Dynamic>
class SchurEliminator : public SchurEliminatorBase {
public:
explicit SchurEliminator(const LinearSolver::Options& options)
- : num_threads_(options.num_threads) {
+ : num_threads_(options.num_threads), context_(options.context) {
+ CHECK(context_ != nullptr);
}
// SchurEliminatorBase Interface
virtual ~SchurEliminator();
- virtual void Init(int num_eliminate_blocks,
- const CompressedRowBlockStructure* bs);
- virtual void Eliminate(const BlockSparseMatrix* A,
- const double* b,
- const double* D,
- BlockRandomAccessMatrix* lhs,
- double* rhs);
- virtual void BackSubstitute(const BlockSparseMatrix* A,
- const double* b,
- const double* D,
- const double* z,
- double* y);
+ void Init(int num_eliminate_blocks,
+ bool assume_full_rank_ete,
+ const CompressedRowBlockStructure* bs) final;
+ void Eliminate(const BlockSparseMatrixData& A,
+ const double* b,
+ const double* D,
+ BlockRandomAccessMatrix* lhs,
+ double* rhs) final;
+ void BackSubstitute(const BlockSparseMatrixData& A,
+ const double* b,
+ const double* D,
+ const double* z,
+ double* y) final;
private:
// Chunk objects store combinatorial information needed to
@@ -273,7 +282,7 @@ class SchurEliminator : public SchurEliminatorBase {
void ChunkDiagonalBlockAndGradient(
const Chunk& chunk,
- const BlockSparseMatrix* A,
+ const BlockSparseMatrixData& A,
const double* b,
int row_block_counter,
typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix* eet,
@@ -282,33 +291,36 @@ class SchurEliminator : public SchurEliminatorBase {
BlockRandomAccessMatrix* lhs);
void UpdateRhs(const Chunk& chunk,
- const BlockSparseMatrix* A,
+ const BlockSparseMatrixData& A,
const double* b,
int row_block_counter,
const double* inverse_ete_g,
double* rhs);
- void ChunkOuterProduct(const CompressedRowBlockStructure* bs,
+ void ChunkOuterProduct(int thread_id,
+ const CompressedRowBlockStructure* bs,
const Matrix& inverse_eet,
const double* buffer,
const BufferLayoutType& buffer_layout,
BlockRandomAccessMatrix* lhs);
- void EBlockRowOuterProduct(const BlockSparseMatrix* A,
+ void EBlockRowOuterProduct(const BlockSparseMatrixData& A,
int row_block_index,
BlockRandomAccessMatrix* lhs);
+ void NoEBlockRowsUpdate(const BlockSparseMatrixData& A,
+ const double* b,
+ int row_block_counter,
+ BlockRandomAccessMatrix* lhs,
+ double* rhs);
- void NoEBlockRowsUpdate(const BlockSparseMatrix* A,
- const double* b,
- int row_block_counter,
- BlockRandomAccessMatrix* lhs,
- double* rhs);
-
- void NoEBlockRowOuterProduct(const BlockSparseMatrix* A,
+ void NoEBlockRowOuterProduct(const BlockSparseMatrixData& A,
int row_block_index,
BlockRandomAccessMatrix* lhs);
+ int num_threads_;
+ ContextImpl* context_;
int num_eliminate_blocks_;
+ bool assume_full_rank_ete_;
// Block layout of the columns of the reduced linear system. Since
// the f blocks can be of varying size, this vector stores the
@@ -330,7 +342,7 @@ class SchurEliminator : public SchurEliminatorBase {
//
// [thread_id * buffer_size_ , (thread_id + 1) * buffer_size_]
//
- scoped_array<double> buffer_;
+ std::unique_ptr<double[]> buffer_;
// Buffer to store per thread matrix matrix products used by
// ChunkOuterProduct. Like buffer_ it is of size num_threads *
@@ -338,15 +350,275 @@ class SchurEliminator : public SchurEliminatorBase {
//
// [thread_id * buffer_size_ , (thread_id + 1) * buffer_size_ -1]
//
- scoped_array<double> chunk_outer_product_buffer_;
+ std::unique_ptr<double[]> chunk_outer_product_buffer_;
int buffer_size_;
- int num_threads_;
int uneliminated_row_begins_;
// Locks for the blocks in the right hand side of the reduced linear
// system.
- std::vector<Mutex*> rhs_locks_;
+ std::vector<std::mutex*> rhs_locks_;
+};
+
+// SchurEliminatorForOneFBlock specializes the SchurEliminatorBase interface for
+// the case where there is exactly one f-block and all three parameters
+// kRowBlockSize, kEBlockSize and KFBlockSize are known at compile time. This is
+// the case for some two view bundle adjustment problems which have very
+// stringent latency requirements.
+//
+// Under these assumptions, we can simplify the more general algorithm
+// implemented by SchurEliminatorImpl significantly. Two of the major
+// contributors to the increased performance are:
+//
+// 1. Simpler loop structure and less use of dynamic memory. Almost everything
+// is on the stack and benefits from aligned memory as well as fixed sized
+// vectorization. We are also able to reason about temporaries and control
+// their lifetimes better.
+// 2. Use of inverse() over llt().solve(Identity).
+template <int kRowBlockSize = Eigen::Dynamic,
+ int kEBlockSize = Eigen::Dynamic,
+ int kFBlockSize = Eigen::Dynamic>
+class SchurEliminatorForOneFBlock : public SchurEliminatorBase {
+ public:
+ virtual ~SchurEliminatorForOneFBlock() {}
+ void Init(int num_eliminate_blocks,
+ bool assume_full_rank_ete,
+ const CompressedRowBlockStructure* bs) override {
+ CHECK_GT(num_eliminate_blocks, 0)
+ << "SchurComplementSolver cannot be initialized with "
+ << "num_eliminate_blocks = 0.";
+ CHECK_EQ(bs->cols.size() - num_eliminate_blocks, 1);
+
+ num_eliminate_blocks_ = num_eliminate_blocks;
+ const int num_row_blocks = bs->rows.size();
+ chunks_.clear();
+ int r = 0;
+ // Iterate over the row blocks of A, and detect the chunks. The
+ // matrix should already have been ordered so that all rows
+ // containing the same y block are vertically contiguous.
+ while (r < num_row_blocks) {
+ const int e_block_id = bs->rows[r].cells.front().block_id;
+ if (e_block_id >= num_eliminate_blocks_) {
+ break;
+ }
+
+ chunks_.push_back(Chunk());
+ Chunk& chunk = chunks_.back();
+ chunk.num_rows = 0;
+ chunk.start = r;
+ // Add to the chunk until the first block in the row is
+ // different than the one in the first row for the chunk.
+ while (r + chunk.num_rows < num_row_blocks) {
+ const CompressedRow& row = bs->rows[r + chunk.num_rows];
+ if (row.cells.front().block_id != e_block_id) {
+ break;
+ }
+ ++chunk.num_rows;
+ }
+ r += chunk.num_rows;
+ }
+
+ const Chunk& last_chunk = chunks_.back();
+ uneliminated_row_begins_ = last_chunk.start + last_chunk.num_rows;
+ e_t_e_inverse_matrices_.resize(kEBlockSize * kEBlockSize * chunks_.size());
+ std::fill(
+ e_t_e_inverse_matrices_.begin(), e_t_e_inverse_matrices_.end(), 0.0);
+ }
+
+ void Eliminate(const BlockSparseMatrixData& A,
+ const double* b,
+ const double* D,
+ BlockRandomAccessMatrix* lhs_bram,
+ double* rhs_ptr) override {
+ // Since there is only one f-block, we can call GetCell once, and cache its
+ // output.
+ int r, c, row_stride, col_stride;
+ CellInfo* cell_info =
+ lhs_bram->GetCell(0, 0, &r, &c, &row_stride, &col_stride);
+ typename EigenTypes<kFBlockSize, kFBlockSize>::MatrixRef lhs(
+ cell_info->values, kFBlockSize, kFBlockSize);
+ typename EigenTypes<kFBlockSize>::VectorRef rhs(rhs_ptr, kFBlockSize);
+
+ lhs.setZero();
+ rhs.setZero();
+
+ const CompressedRowBlockStructure* bs = A.block_structure();
+ const double* values = A.values();
+
+ // Add the diagonal to the schur complement.
+ if (D != nullptr) {
+ typename EigenTypes<kFBlockSize>::ConstVectorRef diag(
+ D + bs->cols[num_eliminate_blocks_].position, kFBlockSize);
+ lhs.diagonal() = diag.array().square().matrix();
+ }
+
+ Eigen::Matrix<double, kEBlockSize, kFBlockSize> tmp;
+ Eigen::Matrix<double, kEBlockSize, 1> tmp2;
+
+ // The following loop works on a block matrix which looks as follows
+ // (number of rows can be anything):
+ //
+ // [e_1 | f_1] = [b1]
+ // [e_2 | f_2] = [b2]
+ // [e_3 | f_3] = [b3]
+ // [e_4 | f_4] = [b4]
+ //
+ // and computes the following
+ //
+ // e_t_e = sum_i e_i^T * e_i
+ // e_t_e_inverse = inverse(e_t_e)
+ // e_t_f = sum_i e_i^T f_i
+ // e_t_b = sum_i e_i^T b_i
+ // f_t_b = sum_i f_i^T b_i
+ //
+ // lhs += sum_i f_i^T * f_i - e_t_f^T * e_t_e_inverse * e_t_f
+ // rhs += f_t_b - e_t_f^T * e_t_e_inverse * e_t_b
+ for (int i = 0; i < chunks_.size(); ++i) {
+ const Chunk& chunk = chunks_[i];
+ const int e_block_id = bs->rows[chunk.start].cells.front().block_id;
+
+ // Naming covention, e_t_e = e_block.transpose() * e_block;
+ Eigen::Matrix<double, kEBlockSize, kEBlockSize> e_t_e;
+ Eigen::Matrix<double, kEBlockSize, kFBlockSize> e_t_f;
+ Eigen::Matrix<double, kEBlockSize, 1> e_t_b;
+ Eigen::Matrix<double, kFBlockSize, 1> f_t_b;
+
+ // Add the square of the diagonal to e_t_e.
+ if (D != NULL) {
+ const typename EigenTypes<kEBlockSize>::ConstVectorRef diag(
+ D + bs->cols[e_block_id].position, kEBlockSize);
+ e_t_e = diag.array().square().matrix().asDiagonal();
+ } else {
+ e_t_e.setZero();
+ }
+ e_t_f.setZero();
+ e_t_b.setZero();
+ f_t_b.setZero();
+
+ for (int j = 0; j < chunk.num_rows; ++j) {
+ const int row_id = chunk.start + j;
+ const auto& row = bs->rows[row_id];
+ const typename EigenTypes<kRowBlockSize, kEBlockSize>::ConstMatrixRef
+ e_block(values + row.cells[0].position, kRowBlockSize, kEBlockSize);
+ const typename EigenTypes<kRowBlockSize>::ConstVectorRef b_block(
+ b + row.block.position, kRowBlockSize);
+
+ e_t_e.noalias() += e_block.transpose() * e_block;
+ e_t_b.noalias() += e_block.transpose() * b_block;
+
+ if (row.cells.size() == 1) {
+ // There is no f block, so there is nothing more to do.
+ continue;
+ }
+
+ const typename EigenTypes<kRowBlockSize, kFBlockSize>::ConstMatrixRef
+ f_block(values + row.cells[1].position, kRowBlockSize, kFBlockSize);
+ e_t_f.noalias() += e_block.transpose() * f_block;
+ lhs.noalias() += f_block.transpose() * f_block;
+ f_t_b.noalias() += f_block.transpose() * b_block;
+ }
+
+ // BackSubstitute computes the same inverse, and this is the key workload
+ // there, so caching these inverses makes BackSubstitute essentially free.
+ typename EigenTypes<kEBlockSize, kEBlockSize>::MatrixRef e_t_e_inverse(
+ &e_t_e_inverse_matrices_[kEBlockSize * kEBlockSize * i],
+ kEBlockSize,
+ kEBlockSize);
+
+ // e_t_e is a symmetric positive definite matrix, so the standard way to
+ // compute its inverse is via the Cholesky factorization by calling
+ // e_t_e.llt().solve(Identity()). However, the inverse() method even
+ // though it is not optimized for symmetric matrices is significantly
+ // faster for small fixed size (up to 4x4) matrices.
+ //
+ // https://eigen.tuxfamily.org/dox/group__TutorialLinearAlgebra.html#title3
+ e_t_e_inverse.noalias() = e_t_e.inverse();
+
+ // The use of these two pre-allocated tmp vectors saves temporaries in the
+ // expressions for lhs and rhs updates below and has a significant impact
+ // on the performance of this method.
+ tmp.noalias() = e_t_e_inverse * e_t_f;
+ tmp2.noalias() = e_t_e_inverse * e_t_b;
+
+ lhs.noalias() -= e_t_f.transpose() * tmp;
+ rhs.noalias() += f_t_b - e_t_f.transpose() * tmp2;
+ }
+
+ // The rows without any e-blocks can have arbitrary size but only contain
+ // the f-block.
+ //
+ // lhs += f_i^T f_i
+ // rhs += f_i^T b_i
+ for (int row_id = uneliminated_row_begins_; row_id < bs->rows.size();
+ ++row_id) {
+ const auto& row = bs->rows[row_id];
+ const auto& cell = row.cells[0];
+ const typename EigenTypes<Eigen::Dynamic, kFBlockSize>::ConstMatrixRef
+ f_block(values + cell.position, row.block.size, kFBlockSize);
+ const typename EigenTypes<Eigen::Dynamic>::ConstVectorRef b_block(
+ b + row.block.position, row.block.size);
+ lhs.noalias() += f_block.transpose() * f_block;
+ rhs.noalias() += f_block.transpose() * b_block;
+ }
+ }
+
+ // This implementation of BackSubstitute depends on Eliminate being called
+ // before this. SchurComplementSolver always does this.
+ //
+ // y_i = e_t_e_inverse * sum_i e_i^T * (b_i - f_i * z);
+ void BackSubstitute(const BlockSparseMatrixData& A,
+ const double* b,
+ const double* D,
+ const double* z_ptr,
+ double* y) override {
+ typename EigenTypes<kFBlockSize>::ConstVectorRef z(z_ptr, kFBlockSize);
+ const CompressedRowBlockStructure* bs = A.block_structure();
+ const double* values = A.values();
+ Eigen::Matrix<double, kEBlockSize, 1> tmp;
+ for (int i = 0; i < chunks_.size(); ++i) {
+ const Chunk& chunk = chunks_[i];
+ const int e_block_id = bs->rows[chunk.start].cells.front().block_id;
+ tmp.setZero();
+ for (int j = 0; j < chunk.num_rows; ++j) {
+ const int row_id = chunk.start + j;
+ const auto& row = bs->rows[row_id];
+ const typename EigenTypes<kRowBlockSize, kEBlockSize>::ConstMatrixRef
+ e_block(values + row.cells[0].position, kRowBlockSize, kEBlockSize);
+ const typename EigenTypes<kRowBlockSize>::ConstVectorRef b_block(
+ b + row.block.position, kRowBlockSize);
+
+ if (row.cells.size() == 1) {
+ // There is no f block.
+ tmp += e_block.transpose() * b_block;
+ } else {
+ typename EigenTypes<kRowBlockSize, kFBlockSize>::ConstMatrixRef
+ f_block(
+ values + row.cells[1].position, kRowBlockSize, kFBlockSize);
+ tmp += e_block.transpose() * (b_block - f_block * z);
+ }
+ }
+
+ typename EigenTypes<kEBlockSize, kEBlockSize>::MatrixRef e_t_e_inverse(
+ &e_t_e_inverse_matrices_[kEBlockSize * kEBlockSize * i],
+ kEBlockSize,
+ kEBlockSize);
+
+ typename EigenTypes<kEBlockSize>::VectorRef y_block(
+ y + bs->cols[e_block_id].position, kEBlockSize);
+ y_block.noalias() = e_t_e_inverse * tmp;
+ }
+ }
+
+ private:
+ struct Chunk {
+ int start = 0;
+ int num_rows = 0;
+ };
+
+ std::vector<Chunk> chunks_;
+ int num_eliminate_blocks_;
+ int uneliminated_row_begins_;
+ std::vector<double> e_t_e_inverse_matrices_;
};
} // namespace internal
diff --git a/extern/ceres/internal/ceres/schur_eliminator_impl.h b/extern/ceres/internal/ceres/schur_eliminator_impl.h
index f2535880f15..bd0881eec1e 100644
--- a/extern/ceres/internal/ceres/schur_eliminator_impl.h
+++ b/extern/ceres/internal/ceres/schur_eliminator_impl.h
@@ -48,23 +48,23 @@
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
-#ifdef CERES_USE_OPENMP
-#include <omp.h>
-#endif
-
#include <algorithm>
#include <map>
+
+#include "Eigen/Dense"
#include "ceres/block_random_access_matrix.h"
#include "ceres/block_sparse_matrix.h"
#include "ceres/block_structure.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/fixed_array.h"
-#include "ceres/internal/scoped_ptr.h"
+#include "ceres/invert_psd_matrix.h"
#include "ceres/map_util.h"
+#include "ceres/parallel_for.h"
#include "ceres/schur_eliminator.h"
+#include "ceres/scoped_thread_token.h"
#include "ceres/small_blas.h"
#include "ceres/stl_util.h"
-#include "Eigen/Dense"
+#include "ceres/thread_token_provider.h"
#include "glog/logging.h"
namespace ceres {
@@ -76,14 +76,16 @@ SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::~SchurEliminator() {
}
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
-void
-SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
-Init(int num_eliminate_blocks, const CompressedRowBlockStructure* bs) {
+void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::Init(
+ int num_eliminate_blocks,
+ bool assume_full_rank_ete,
+ const CompressedRowBlockStructure* bs) {
CHECK_GT(num_eliminate_blocks, 0)
<< "SchurComplementSolver cannot be initialized with "
<< "num_eliminate_blocks = 0.";
num_eliminate_blocks_ = num_eliminate_blocks;
+ assume_full_rank_ete_ = assume_full_rank_ete;
const int num_col_blocks = bs->cols.size();
const int num_row_blocks = bs->rows.size();
@@ -102,6 +104,13 @@ Init(int num_eliminate_blocks, const CompressedRowBlockStructure* bs) {
lhs_num_rows += bs->cols[i].size;
}
+ // TODO(sameeragarwal): Now that we may have subset block structure,
+ // we need to make sure that we account for the fact that somep
+ // point blocks only have a "diagonal" row and nothing more.
+ //
+ // This likely requires a slightly different algorithm, which works
+ // off of the number of elimination blocks.
+
int r = 0;
// Iterate over the row blocks of A, and detect the chunks. The
// matrix should already have been ordered so that all rows
@@ -143,15 +152,12 @@ Init(int num_eliminate_blocks, const CompressedRowBlockStructure* bs) {
++chunk.size;
}
- CHECK_GT(chunk.size, 0);
+ CHECK_GT(chunk.size, 0); // This check will need to be resolved.
r += chunk.size;
}
const Chunk& chunk = chunks_.back();
uneliminated_row_begins_ = chunk.start + chunk.size;
- if (num_threads_ > 1) {
- random_shuffle(chunks_.begin(), chunks_.end());
- }
buffer_.reset(new double[buffer_size_ * num_threads_]);
@@ -163,46 +169,51 @@ Init(int num_eliminate_blocks, const CompressedRowBlockStructure* bs) {
STLDeleteElements(&rhs_locks_);
rhs_locks_.resize(num_col_blocks - num_eliminate_blocks_);
for (int i = 0; i < num_col_blocks - num_eliminate_blocks_; ++i) {
- rhs_locks_[i] = new Mutex;
+ rhs_locks_[i] = new std::mutex;
}
}
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
-Eliminate(const BlockSparseMatrix* A,
+Eliminate(const BlockSparseMatrixData& A,
const double* b,
const double* D,
BlockRandomAccessMatrix* lhs,
double* rhs) {
if (lhs->num_rows() > 0) {
lhs->SetZero();
- VectorRef(rhs, lhs->num_rows()).setZero();
+ if (rhs) {
+ VectorRef(rhs, lhs->num_rows()).setZero();
+ }
}
- const CompressedRowBlockStructure* bs = A->block_structure();
+ const CompressedRowBlockStructure* bs = A.block_structure();
const int num_col_blocks = bs->cols.size();
// Add the diagonal to the schur complement.
if (D != NULL) {
-#pragma omp parallel for num_threads(num_threads_) schedule(dynamic)
- for (int i = num_eliminate_blocks_; i < num_col_blocks; ++i) {
- const int block_id = i - num_eliminate_blocks_;
- int r, c, row_stride, col_stride;
- CellInfo* cell_info = lhs->GetCell(block_id, block_id,
- &r, &c,
- &row_stride, &col_stride);
- if (cell_info != NULL) {
- const int block_size = bs->cols[i].size;
- typename EigenTypes<Eigen::Dynamic>::ConstVectorRef
- diag(D + bs->cols[i].position, block_size);
-
- CeresMutexLock l(&cell_info->m);
- MatrixRef m(cell_info->values, row_stride, col_stride);
- m.block(r, c, block_size, block_size).diagonal()
- += diag.array().square().matrix();
- }
- }
+ ParallelFor(
+ context_,
+ num_eliminate_blocks_,
+ num_col_blocks,
+ num_threads_,
+ [&](int i) {
+ const int block_id = i - num_eliminate_blocks_;
+ int r, c, row_stride, col_stride;
+ CellInfo* cell_info = lhs->GetCell(block_id, block_id, &r, &c,
+ &row_stride, &col_stride);
+ if (cell_info != NULL) {
+ const int block_size = bs->cols[i].size;
+ typename EigenTypes<Eigen::Dynamic>::ConstVectorRef diag(
+ D + bs->cols[i].position, block_size);
+
+ std::lock_guard<std::mutex> l(cell_info->m);
+ MatrixRef m(cell_info->values, row_stride, col_stride);
+ m.block(r, c, block_size, block_size).diagonal() +=
+ diag.array().square().matrix();
+ }
+ });
}
// Eliminate y blocks one chunk at a time. For each chunk, compute
@@ -218,79 +229,78 @@ Eliminate(const BlockSparseMatrix* A,
// z blocks that share a row block/residual term with the y
// block. EliminateRowOuterProduct does the corresponding operation
// for the lhs of the reduced linear system.
-#pragma omp parallel for num_threads(num_threads_) schedule(dynamic)
- for (int i = 0; i < chunks_.size(); ++i) {
-#ifdef CERES_USE_OPENMP
- int thread_id = omp_get_thread_num();
-#else
- int thread_id = 0;
-#endif
- double* buffer = buffer_.get() + thread_id * buffer_size_;
- const Chunk& chunk = chunks_[i];
- const int e_block_id = bs->rows[chunk.start].cells.front().block_id;
- const int e_block_size = bs->cols[e_block_id].size;
-
- VectorRef(buffer, buffer_size_).setZero();
-
- typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix
- ete(e_block_size, e_block_size);
+ ParallelFor(
+ context_,
+ 0,
+ int(chunks_.size()),
+ num_threads_,
+ [&](int thread_id, int i) {
+ double* buffer = buffer_.get() + thread_id * buffer_size_;
+ const Chunk& chunk = chunks_[i];
+ const int e_block_id = bs->rows[chunk.start].cells.front().block_id;
+ const int e_block_size = bs->cols[e_block_id].size;
+
+ VectorRef(buffer, buffer_size_).setZero();
+
+ typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix
+ ete(e_block_size, e_block_size);
+
+ if (D != NULL) {
+ const typename EigenTypes<kEBlockSize>::ConstVectorRef
+ diag(D + bs->cols[e_block_id].position, e_block_size);
+ ete = diag.array().square().matrix().asDiagonal();
+ } else {
+ ete.setZero();
+ }
- if (D != NULL) {
- const typename EigenTypes<kEBlockSize>::ConstVectorRef
- diag(D + bs->cols[e_block_id].position, e_block_size);
- ete = diag.array().square().matrix().asDiagonal();
- } else {
- ete.setZero();
- }
+ FixedArray<double, 8> g(e_block_size);
+ typename EigenTypes<kEBlockSize>::VectorRef gref(g.data(),
+ e_block_size);
+ gref.setZero();
+
+ // We are going to be computing
+ //
+ // S += F'F - F'E(E'E)^{-1}E'F
+ //
+ // for each Chunk. The computation is broken down into a number of
+ // function calls as below.
+
+ // Compute the outer product of the e_blocks with themselves (ete
+ // = E'E). Compute the product of the e_blocks with the
+ // corresponding f_blocks (buffer = E'F), the gradient of the terms
+ // in this chunk (g) and add the outer product of the f_blocks to
+ // Schur complement (S += F'F).
+ ChunkDiagonalBlockAndGradient(
+ chunk, A, b, chunk.start, &ete, g.data(), buffer, lhs);
+
+ // Normally one wouldn't compute the inverse explicitly, but
+ // e_block_size will typically be a small number like 3, in
+ // which case its much faster to compute the inverse once and
+ // use it to multiply other matrices/vectors instead of doing a
+ // Solve call over and over again.
+ typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix inverse_ete =
+ InvertPSDMatrix<kEBlockSize>(assume_full_rank_ete_, ete);
+
+ // For the current chunk compute and update the rhs of the reduced
+ // linear system.
+ //
+ // rhs = F'b - F'E(E'E)^(-1) E'b
+
+ if (rhs) {
+ FixedArray<double, 8> inverse_ete_g(e_block_size);
+ MatrixVectorMultiply<kEBlockSize, kEBlockSize, 0>(
+ inverse_ete.data(),
+ e_block_size,
+ e_block_size,
+ g.data(),
+ inverse_ete_g.data());
+ UpdateRhs(chunk, A, b, chunk.start, inverse_ete_g.data(), rhs);
+ }
- FixedArray<double, 8> g(e_block_size);
- typename EigenTypes<kEBlockSize>::VectorRef gref(g.get(), e_block_size);
- gref.setZero();
-
- // We are going to be computing
- //
- // S += F'F - F'E(E'E)^{-1}E'F
- //
- // for each Chunk. The computation is broken down into a number of
- // function calls as below.
-
- // Compute the outer product of the e_blocks with themselves (ete
- // = E'E). Compute the product of the e_blocks with the
- // corresonding f_blocks (buffer = E'F), the gradient of the terms
- // in this chunk (g) and add the outer product of the f_blocks to
- // Schur complement (S += F'F).
- ChunkDiagonalBlockAndGradient(
- chunk, A, b, chunk.start, &ete, g.get(), buffer, lhs);
-
- // Normally one wouldn't compute the inverse explicitly, but
- // e_block_size will typically be a small number like 3, in
- // which case its much faster to compute the inverse once and
- // use it to multiply other matrices/vectors instead of doing a
- // Solve call over and over again.
- typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix inverse_ete =
- ete
- .template selfadjointView<Eigen::Upper>()
- .llt()
- .solve(Matrix::Identity(e_block_size, e_block_size));
-
- // For the current chunk compute and update the rhs of the reduced
- // linear system.
- //
- // rhs = F'b - F'E(E'E)^(-1) E'b
-
- FixedArray<double, 8> inverse_ete_g(e_block_size);
- MatrixVectorMultiply<kEBlockSize, kEBlockSize, 0>(
- inverse_ete.data(),
- e_block_size,
- e_block_size,
- g.get(),
- inverse_ete_g.get());
-
- UpdateRhs(chunk, A, b, chunk.start, inverse_ete_g.get(), rhs);
-
- // S -= F'E(E'E)^{-1}E'F
- ChunkOuterProduct(bs, inverse_ete, buffer, chunk.buffer_layout, lhs);
- }
+ // S -= F'E(E'E)^{-1}E'F
+ ChunkOuterProduct(
+ thread_id, bs, inverse_ete, buffer, chunk.buffer_layout, lhs);
+ });
// For rows with no e_blocks, the schur complement update reduces to
// S += F'F.
@@ -300,19 +310,25 @@ Eliminate(const BlockSparseMatrix* A,
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
-BackSubstitute(const BlockSparseMatrix* A,
+BackSubstitute(const BlockSparseMatrixData& A,
const double* b,
const double* D,
const double* z,
double* y) {
- const CompressedRowBlockStructure* bs = A->block_structure();
-#pragma omp parallel for num_threads(num_threads_) schedule(dynamic)
- for (int i = 0; i < chunks_.size(); ++i) {
+ const CompressedRowBlockStructure* bs = A.block_structure();
+ const double* values = A.values();
+
+ ParallelFor(
+ context_,
+ 0,
+ int(chunks_.size()),
+ num_threads_,
+ [&](int i) {
const Chunk& chunk = chunks_[i];
const int e_block_id = bs->rows[chunk.start].cells.front().block_id;
const int e_block_size = bs->cols[e_block_id].size;
- double* y_ptr = y + bs->cols[e_block_id].position;
+ double* y_ptr = y + bs->cols[e_block_id].position;
typename EigenTypes<kEBlockSize>::VectorRef y_block(y_ptr, e_block_size);
typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix
@@ -325,7 +341,6 @@ BackSubstitute(const BlockSparseMatrix* A,
ete.setZero();
}
- const double* values = A->values();
for (int j = 0; j < chunk.size; ++j) {
const CompressedRow& row = bs->rows[chunk.start + j];
const Cell& e_cell = row.cells.front();
@@ -333,9 +348,9 @@ BackSubstitute(const BlockSparseMatrix* A,
FixedArray<double, 8> sj(row.block.size);
- typename EigenTypes<kRowBlockSize>::VectorRef(sj.get(), row.block.size) =
- typename EigenTypes<kRowBlockSize>::ConstVectorRef
- (b + bs->rows[chunk.start + j].block.position, row.block.size);
+ typename EigenTypes<kRowBlockSize>::VectorRef(sj.data(), row.block.size) =
+ typename EigenTypes<kRowBlockSize>::ConstVectorRef(
+ b + bs->rows[chunk.start + j].block.position, row.block.size);
for (int c = 1; c < row.cells.size(); ++c) {
const int f_block_id = row.cells[c].block_id;
@@ -345,23 +360,24 @@ BackSubstitute(const BlockSparseMatrix* A,
MatrixVectorMultiply<kRowBlockSize, kFBlockSize, -1>(
values + row.cells[c].position, row.block.size, f_block_size,
z + lhs_row_layout_[r_block],
- sj.get());
+ sj.data());
}
MatrixTransposeVectorMultiply<kRowBlockSize, kEBlockSize, 1>(
values + e_cell.position, row.block.size, e_block_size,
- sj.get(),
+ sj.data(),
y_ptr);
MatrixTransposeMatrixMultiply
<kRowBlockSize, kEBlockSize, kRowBlockSize, kEBlockSize, 1>(
- values + e_cell.position, row.block.size, e_block_size,
- values + e_cell.position, row.block.size, e_block_size,
- ete.data(), 0, 0, e_block_size, e_block_size);
+ values + e_cell.position, row.block.size, e_block_size,
+ values + e_cell.position, row.block.size, e_block_size,
+ ete.data(), 0, 0, e_block_size, e_block_size);
}
- ete.llt().solveInPlace(y_block);
- }
+ y_block =
+ InvertPSDMatrix<kEBlockSize>(assume_full_rank_ete_, ete) * y_block;
+ });
}
// Update the rhs of the reduced linear system. Compute
@@ -371,17 +387,17 @@ template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
UpdateRhs(const Chunk& chunk,
- const BlockSparseMatrix* A,
+ const BlockSparseMatrixData& A,
const double* b,
int row_block_counter,
const double* inverse_ete_g,
double* rhs) {
- const CompressedRowBlockStructure* bs = A->block_structure();
+ const CompressedRowBlockStructure* bs = A.block_structure();
+ const double* values = A.values();
+
const int e_block_id = bs->rows[chunk.start].cells.front().block_id;
const int e_block_size = bs->cols[e_block_id].size;
-
int b_pos = bs->rows[row_block_counter].block.position;
- const double* values = A->values();
for (int j = 0; j < chunk.size; ++j) {
const CompressedRow& row = bs->rows[row_block_counter + j];
const Cell& e_cell = row.cells.front();
@@ -398,7 +414,7 @@ UpdateRhs(const Chunk& chunk,
const int block_id = row.cells[c].block_id;
const int block_size = bs->cols[block_id].size;
const int block = block_id - num_eliminate_blocks_;
- CeresMutexLock l(rhs_locks_[block]);
+ std::lock_guard<std::mutex> l(*rhs_locks_[block]);
MatrixTransposeVectorMultiply<kRowBlockSize, kFBlockSize, 1>(
values + row.cells[c].position,
row.block.size, block_size,
@@ -432,14 +448,15 @@ void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
ChunkDiagonalBlockAndGradient(
const Chunk& chunk,
- const BlockSparseMatrix* A,
+ const BlockSparseMatrixData& A,
const double* b,
int row_block_counter,
typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix* ete,
double* g,
double* buffer,
BlockRandomAccessMatrix* lhs) {
- const CompressedRowBlockStructure* bs = A->block_structure();
+ const CompressedRowBlockStructure* bs = A.block_structure();
+ const double* values = A.values();
int b_pos = bs->rows[row_block_counter].block.position;
const int e_block_size = ete->rows();
@@ -448,7 +465,6 @@ ChunkDiagonalBlockAndGradient(
// contribution of its F blocks to the Schur complement, the
// contribution of its E block to the matrix EE' (ete), and the
// corresponding block in the gradient vector.
- const double* values = A->values();
for (int j = 0; j < chunk.size; ++j) {
const CompressedRow& row = bs->rows[row_block_counter + j];
@@ -464,12 +480,13 @@ ChunkDiagonalBlockAndGradient(
values + e_cell.position, row.block.size, e_block_size,
ete->data(), 0, 0, e_block_size, e_block_size);
- // g += E_i' b_i
- MatrixTransposeVectorMultiply<kRowBlockSize, kEBlockSize, 1>(
- values + e_cell.position, row.block.size, e_block_size,
- b + b_pos,
- g);
-
+ if (b) {
+ // g += E_i' b_i
+ MatrixTransposeVectorMultiply<kRowBlockSize, kEBlockSize, 1>(
+ values + e_cell.position, row.block.size, e_block_size,
+ b + b_pos,
+ g);
+ }
// buffer = E'F. This computation is done by iterating over the
// f_blocks for each row in the chunk.
@@ -495,7 +512,8 @@ ChunkDiagonalBlockAndGradient(
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
-ChunkOuterProduct(const CompressedRowBlockStructure* bs,
+ChunkOuterProduct(int thread_id,
+ const CompressedRowBlockStructure* bs,
const Matrix& inverse_ete,
const double* buffer,
const BufferLayoutType& buffer_layout,
@@ -507,11 +525,6 @@ ChunkOuterProduct(const CompressedRowBlockStructure* bs,
const int e_block_size = inverse_ete.rows();
BufferLayoutType::const_iterator it1 = buffer_layout.begin();
-#ifdef CERES_USE_OPENMP
- int thread_id = omp_get_thread_num();
-#else
- int thread_id = 0;
-#endif
double* b1_transpose_inverse_ete =
chunk_outer_product_buffer_.get() + thread_id * buffer_size_;
@@ -535,7 +548,7 @@ ChunkOuterProduct(const CompressedRowBlockStructure* bs,
&row_stride, &col_stride);
if (cell_info != NULL) {
const int block2_size = bs->cols[it2->first].size;
- CeresMutexLock l(&cell_info->m);
+ std::lock_guard<std::mutex> l(cell_info->m);
MatrixMatrixMultiply
<kFBlockSize, kEBlockSize, kEBlockSize, kFBlockSize, -1>(
b1_transpose_inverse_ete, block1_size, e_block_size,
@@ -552,14 +565,18 @@ ChunkOuterProduct(const CompressedRowBlockStructure* bs,
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
-NoEBlockRowsUpdate(const BlockSparseMatrix* A,
+NoEBlockRowsUpdate(const BlockSparseMatrixData& A,
const double* b,
int row_block_counter,
BlockRandomAccessMatrix* lhs,
double* rhs) {
- const CompressedRowBlockStructure* bs = A->block_structure();
- const double* values = A->values();
+ const CompressedRowBlockStructure* bs = A.block_structure();
+ const double* values = A.values();
for (; row_block_counter < bs->rows.size(); ++row_block_counter) {
+ NoEBlockRowOuterProduct(A, row_block_counter, lhs);
+ if (!rhs) {
+ continue;
+ }
const CompressedRow& row = bs->rows[row_block_counter];
for (int c = 0; c < row.cells.size(); ++c) {
const int block_id = row.cells[c].block_id;
@@ -570,7 +587,6 @@ NoEBlockRowsUpdate(const BlockSparseMatrix* A,
b + row.block.position,
rhs + lhs_row_layout_[block]);
}
- NoEBlockRowOuterProduct(A, row_block_counter, lhs);
}
}
@@ -582,7 +598,7 @@ NoEBlockRowsUpdate(const BlockSparseMatrix* A,
// one difference. It does not use any of the template
// parameters. This is because the algorithm used for detecting the
// static structure of the matrix A only pays attention to rows with
-// e_blocks. This is becase rows without e_blocks are rare and
+// e_blocks. This is because rows without e_blocks are rare and
// typically arise from regularization terms in the original
// optimization problem, and have a very different structure than the
// rows with e_blocks. Including them in the static structure
@@ -592,12 +608,13 @@ NoEBlockRowsUpdate(const BlockSparseMatrix* A,
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
-NoEBlockRowOuterProduct(const BlockSparseMatrix* A,
+NoEBlockRowOuterProduct(const BlockSparseMatrixData& A,
int row_block_index,
BlockRandomAccessMatrix* lhs) {
- const CompressedRowBlockStructure* bs = A->block_structure();
+ const CompressedRowBlockStructure* bs = A.block_structure();
+ const double* values = A.values();
+
const CompressedRow& row = bs->rows[row_block_index];
- const double* values = A->values();
for (int i = 0; i < row.cells.size(); ++i) {
const int block1 = row.cells[i].block_id - num_eliminate_blocks_;
DCHECK_GE(block1, 0);
@@ -608,7 +625,7 @@ NoEBlockRowOuterProduct(const BlockSparseMatrix* A,
&r, &c,
&row_stride, &col_stride);
if (cell_info != NULL) {
- CeresMutexLock l(&cell_info->m);
+ std::lock_guard<std::mutex> l(cell_info->m);
// This multiply currently ignores the fact that this is a
// symmetric outer product.
MatrixTransposeMatrixMultiply
@@ -628,7 +645,7 @@ NoEBlockRowOuterProduct(const BlockSparseMatrix* A,
&row_stride, &col_stride);
if (cell_info != NULL) {
const int block2_size = bs->cols[row.cells[j].block_id].size;
- CeresMutexLock l(&cell_info->m);
+ std::lock_guard<std::mutex> l(cell_info->m);
MatrixTransposeMatrixMultiply
<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, 1>(
values + row.cells[i].position, row.block.size, block1_size,
@@ -639,18 +656,19 @@ NoEBlockRowOuterProduct(const BlockSparseMatrix* A,
}
}
-// For a row with an e_block, compute the contribition S += F'F. This
+// For a row with an e_block, compute the contribution S += F'F. This
// function has the same structure as NoEBlockRowOuterProduct, except
// that this function uses the template parameters.
template <int kRowBlockSize, int kEBlockSize, int kFBlockSize>
void
SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::
-EBlockRowOuterProduct(const BlockSparseMatrix* A,
+EBlockRowOuterProduct(const BlockSparseMatrixData& A,
int row_block_index,
BlockRandomAccessMatrix* lhs) {
- const CompressedRowBlockStructure* bs = A->block_structure();
+ const CompressedRowBlockStructure* bs = A.block_structure();
+ const double* values = A.values();
+
const CompressedRow& row = bs->rows[row_block_index];
- const double* values = A->values();
for (int i = 1; i < row.cells.size(); ++i) {
const int block1 = row.cells[i].block_id - num_eliminate_blocks_;
DCHECK_GE(block1, 0);
@@ -661,7 +679,7 @@ EBlockRowOuterProduct(const BlockSparseMatrix* A,
&r, &c,
&row_stride, &col_stride);
if (cell_info != NULL) {
- CeresMutexLock l(&cell_info->m);
+ std::lock_guard<std::mutex> l(cell_info->m);
// block += b1.transpose() * b1;
MatrixTransposeMatrixMultiply
<kRowBlockSize, kFBlockSize, kRowBlockSize, kFBlockSize, 1>(
@@ -681,7 +699,7 @@ EBlockRowOuterProduct(const BlockSparseMatrix* A,
&row_stride, &col_stride);
if (cell_info != NULL) {
// block += b1.transpose() * b2;
- CeresMutexLock l(&cell_info->m);
+ std::lock_guard<std::mutex> l(cell_info->m);
MatrixTransposeMatrixMultiply
<kRowBlockSize, kFBlockSize, kRowBlockSize, kFBlockSize, 1>(
values + row.cells[i].position, row.block.size, block1_size,
diff --git a/extern/ceres/internal/ceres/generate_eliminator_specialization.py b/extern/ceres/internal/ceres/schur_eliminator_template.py
index e89e7a48c98..2f38cf5ad8f 100644
--- a/extern/ceres/internal/ceres/generate_eliminator_specialization.py
+++ b/extern/ceres/internal/ceres/schur_eliminator_template.py
@@ -1,5 +1,5 @@
# Ceres Solver - A fast non-linear least squares minimizer
-# Copyright 2015 Google Inc. All rights reserved.
+# Copyright 2017 Google Inc. All rights reserved.
# http://ceres-solver.org/
#
# Redistribution and use in source and binary forms, with or without
@@ -49,28 +49,9 @@
# specializations that is generated.
# Set of template specializations to generate
-SPECIALIZATIONS = [(2, 2, 2),
- (2, 2, 3),
- (2, 2, 4),
- (2, 2, "Eigen::Dynamic"),
- (2, 3, 3),
- (2, 3, 4),
- (2, 3, 6),
- (2, 3, 9),
- (2, 3, "Eigen::Dynamic"),
- (2, 4, 3),
- (2, 4, 4),
- (2, 4, 8),
- (2, 4, 9),
- (2, 4, "Eigen::Dynamic"),
- (2, "Eigen::Dynamic", "Eigen::Dynamic"),
- (4, 4, 2),
- (4, 4, 3),
- (4, 4, 4),
- (4, 4, "Eigen::Dynamic"),
- ("Eigen::Dynamic", "Eigen::Dynamic", "Eigen::Dynamic")]
+
HEADER = """// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -108,8 +89,7 @@ HEADER = """// Ceres Solver - A fast non-linear least squares minimizer
// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
//=========================================
//
-// This file is generated using generate_eliminator_specialization.py.
-// Editing it manually is not recommended.
+// This file is generated using generate_template_specializations.py.
"""
DYNAMIC_FILE = """
@@ -159,12 +139,7 @@ SchurEliminatorBase::Create(const LinearSolver::Options& options) {
#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
"""
-FACTORY_CONDITIONAL = """ if ((options.row_block_size == %s) &&
- (options.e_block_size == %s) &&
- (options.f_block_size == %s)) {
- return new SchurEliminator<%s, %s, %s>(options);
- }
-"""
+FACTORY = """ return new SchurEliminator<%s, %s, %s>(options);"""
FACTORY_FOOTER = """
#endif
@@ -178,54 +153,3 @@ FACTORY_FOOTER = """
} // namespace internal
} // namespace ceres
"""
-
-
-def SuffixForSize(size):
- if size == "Eigen::Dynamic":
- return "d"
- return str(size)
-
-
-def SpecializationFilename(prefix, row_block_size, e_block_size, f_block_size):
- return "_".join([prefix] + map(SuffixForSize, (row_block_size,
- e_block_size,
- f_block_size)))
-
-
-def Specialize():
- """
- Generate specialization code and the conditionals to instantiate it.
- """
- f = open("schur_eliminator.cc", "w")
- f.write(HEADER)
- f.write(FACTORY_FILE_HEADER)
-
- for row_block_size, e_block_size, f_block_size in SPECIALIZATIONS:
- output = SpecializationFilename("generated/schur_eliminator",
- row_block_size,
- e_block_size,
- f_block_size) + ".cc"
- fptr = open(output, "w")
- fptr.write(HEADER)
-
- template = SPECIALIZATION_FILE
- if (row_block_size == "Eigen::Dynamic" and
- e_block_size == "Eigen::Dynamic" and
- f_block_size == "Eigen::Dynamic"):
- template = DYNAMIC_FILE
-
- fptr.write(template % (row_block_size, e_block_size, f_block_size))
- fptr.close()
-
- f.write(FACTORY_CONDITIONAL % (row_block_size,
- e_block_size,
- f_block_size,
- row_block_size,
- e_block_size,
- f_block_size))
- f.write(FACTORY_FOOTER)
- f.close()
-
-
-if __name__ == "__main__":
- Specialize()
diff --git a/extern/ceres/internal/ceres/schur_jacobi_preconditioner.cc b/extern/ceres/internal/ceres/schur_jacobi_preconditioner.cc
index 3e6cc90f63c..89d770b405a 100644
--- a/extern/ceres/internal/ceres/schur_jacobi_preconditioner.cc
+++ b/extern/ceres/internal/ceres/schur_jacobi_preconditioner.cc
@@ -32,10 +32,9 @@
#include <utility>
#include <vector>
+
#include "ceres/block_random_access_diagonal_matrix.h"
#include "ceres/block_sparse_matrix.h"
-#include "ceres/collections_port.h"
-#include "ceres/internal/scoped_ptr.h"
#include "ceres/linear_solver.h"
#include "ceres/schur_eliminator.h"
#include "glog/logging.h"
@@ -50,9 +49,9 @@ SchurJacobiPreconditioner::SchurJacobiPreconditioner(
CHECK_GT(options_.elimination_groups.size(), 1);
CHECK_GT(options_.elimination_groups[0], 0);
const int num_blocks = bs.cols.size() - options_.elimination_groups[0];
- CHECK_GT(num_blocks, 0)
- << "Jacobian should have atleast 1 f_block for "
- << "SCHUR_JACOBI preconditioner.";
+ CHECK_GT(num_blocks, 0) << "Jacobian should have at least 1 f_block for "
+ << "SCHUR_JACOBI preconditioner.";
+ CHECK(options_.context != NULL);
std::vector<int> blocks(num_blocks);
for (int i = 0; i < num_blocks; ++i) {
@@ -63,8 +62,7 @@ SchurJacobiPreconditioner::SchurJacobiPreconditioner(
InitEliminator(bs);
}
-SchurJacobiPreconditioner::~SchurJacobiPreconditioner() {
-}
+SchurJacobiPreconditioner::~SchurJacobiPreconditioner() {}
// Initialize the SchurEliminator.
void SchurJacobiPreconditioner::InitEliminator(
@@ -75,8 +73,11 @@ void SchurJacobiPreconditioner::InitEliminator(
eliminator_options.e_block_size = options_.e_block_size;
eliminator_options.f_block_size = options_.f_block_size;
eliminator_options.row_block_size = options_.row_block_size;
+ eliminator_options.context = options_.context;
eliminator_.reset(SchurEliminatorBase::Create(eliminator_options));
- eliminator_->Init(eliminator_options.elimination_groups[0], &bs);
+ const bool kFullRankETE = true;
+ eliminator_->Init(
+ eliminator_options.elimination_groups[0], kFullRankETE, &bs);
}
// Update the values of the preconditioner matrix and factorize it.
@@ -85,19 +86,9 @@ bool SchurJacobiPreconditioner::UpdateImpl(const BlockSparseMatrix& A,
const int num_rows = m_->num_rows();
CHECK_GT(num_rows, 0);
- // We need a dummy rhs vector and a dummy b vector since the Schur
- // eliminator combines the computation of the reduced camera matrix
- // with the computation of the right hand side of that linear
- // system.
- //
- // TODO(sameeragarwal): Perhaps its worth refactoring the
- // SchurEliminator::Eliminate function to allow NULL for the rhs. As
- // of now it does not seem to be worth the effort.
- Vector rhs = Vector::Zero(m_->num_rows());
- Vector b = Vector::Zero(A.num_rows());
-
// Compute a subset of the entries of the Schur complement.
- eliminator_->Eliminate(&A, b.data(), D, m_.get(), rhs.data());
+ eliminator_->Eliminate(
+ BlockSparseMatrixData(A), nullptr, D, m_.get(), nullptr);
m_->Invert();
return true;
}
@@ -107,9 +98,7 @@ void SchurJacobiPreconditioner::RightMultiply(const double* x,
m_->RightMultiply(x, y);
}
-int SchurJacobiPreconditioner::num_rows() const {
- return m_->num_rows();
-}
+int SchurJacobiPreconditioner::num_rows() const { return m_->num_rows(); }
} // namespace internal
} // namespace ceres
diff --git a/extern/ceres/internal/ceres/schur_jacobi_preconditioner.h b/extern/ceres/internal/ceres/schur_jacobi_preconditioner.h
index 5398f3ff35d..372b790b82f 100644
--- a/extern/ceres/internal/ceres/schur_jacobi_preconditioner.h
+++ b/extern/ceres/internal/ceres/schur_jacobi_preconditioner.h
@@ -38,12 +38,11 @@
#ifndef CERES_INTERNAL_SCHUR_JACOBI_PRECONDITIONER_H_
#define CERES_INTERNAL_SCHUR_JACOBI_PRECONDITIONER_H_
+#include <memory>
#include <set>
-#include <vector>
#include <utility>
-#include "ceres/collections_port.h"
-#include "ceres/internal/macros.h"
-#include "ceres/internal/scoped_ptr.h"
+#include <vector>
+
#include "ceres/preconditioner.h"
namespace ceres {
@@ -83,21 +82,23 @@ class SchurJacobiPreconditioner : public BlockSparseMatrixPreconditioner {
// based solvers. Please see schur_eliminator.h for more details.
SchurJacobiPreconditioner(const CompressedRowBlockStructure& bs,
const Preconditioner::Options& options);
+ SchurJacobiPreconditioner(const SchurJacobiPreconditioner&) = delete;
+ void operator=(const SchurJacobiPreconditioner&) = delete;
+
virtual ~SchurJacobiPreconditioner();
// Preconditioner interface.
- virtual void RightMultiply(const double* x, double* y) const;
- virtual int num_rows() const;
+ void RightMultiply(const double* x, double* y) const final;
+ int num_rows() const final;
private:
void InitEliminator(const CompressedRowBlockStructure& bs);
- virtual bool UpdateImpl(const BlockSparseMatrix& A, const double* D);
+ bool UpdateImpl(const BlockSparseMatrix& A, const double* D) final;
Preconditioner::Options options_;
- scoped_ptr<SchurEliminatorBase> eliminator_;
+ std::unique_ptr<SchurEliminatorBase> eliminator_;
// Preconditioner matrix.
- scoped_ptr<BlockRandomAccessDiagonalMatrix> m_;
- CERES_DISALLOW_COPY_AND_ASSIGN(SchurJacobiPreconditioner);
+ std::unique_ptr<BlockRandomAccessDiagonalMatrix> m_;
};
} // namespace internal
diff --git a/extern/ceres/internal/ceres/schur_templates.cc b/extern/ceres/internal/ceres/schur_templates.cc
new file mode 100644
index 00000000000..01528619b1b
--- /dev/null
+++ b/extern/ceres/internal/ceres/schur_templates.cc
@@ -0,0 +1,227 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// What template specializations are available.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+
+#include "ceres/internal/eigen.h"
+#include "ceres/schur_templates.h"
+
+namespace ceres {
+namespace internal {
+
+void GetBestSchurTemplateSpecialization(int* row_block_size,
+ int* e_block_size,
+ int* f_block_size) {
+ LinearSolver::Options options;
+ options.row_block_size = *row_block_size;
+ options.e_block_size = *e_block_size;
+ options.f_block_size = *f_block_size;
+ *row_block_size = Eigen::Dynamic;
+ *e_block_size = Eigen::Dynamic;
+ *f_block_size = Eigen::Dynamic;
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 2) &&
+ (options.f_block_size == 2)) {
+ *row_block_size = 2;
+ *e_block_size = 2;
+ *f_block_size = 2;
+ return;
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 2) &&
+ (options.f_block_size == 3)) {
+ *row_block_size = 2;
+ *e_block_size = 2;
+ *f_block_size = 3;
+ return;
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 2) &&
+ (options.f_block_size == 4)) {
+ *row_block_size = 2;
+ *e_block_size = 2;
+ *f_block_size = 4;
+ return;
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 2)) {
+ *row_block_size = 2;
+ *e_block_size = 2;
+ *f_block_size = Eigen::Dynamic;
+ return;
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 3) &&
+ (options.f_block_size == 3)) {
+ *row_block_size = 2;
+ *e_block_size = 3;
+ *f_block_size = 3;
+ return;
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 3) &&
+ (options.f_block_size == 4)) {
+ *row_block_size = 2;
+ *e_block_size = 3;
+ *f_block_size = 4;
+ return;
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 3) &&
+ (options.f_block_size == 6)) {
+ *row_block_size = 2;
+ *e_block_size = 3;
+ *f_block_size = 6;
+ return;
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 3) &&
+ (options.f_block_size == 9)) {
+ *row_block_size = 2;
+ *e_block_size = 3;
+ *f_block_size = 9;
+ return;
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 3)) {
+ *row_block_size = 2;
+ *e_block_size = 3;
+ *f_block_size = Eigen::Dynamic;
+ return;
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == 3)) {
+ *row_block_size = 2;
+ *e_block_size = 4;
+ *f_block_size = 3;
+ return;
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == 4)) {
+ *row_block_size = 2;
+ *e_block_size = 4;
+ *f_block_size = 4;
+ return;
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == 6)) {
+ *row_block_size = 2;
+ *e_block_size = 4;
+ *f_block_size = 6;
+ return;
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == 8)) {
+ *row_block_size = 2;
+ *e_block_size = 4;
+ *f_block_size = 8;
+ return;
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == 9)) {
+ *row_block_size = 2;
+ *e_block_size = 4;
+ *f_block_size = 9;
+ return;
+ }
+ if ((options.row_block_size == 2) &&
+ (options.e_block_size == 4)) {
+ *row_block_size = 2;
+ *e_block_size = 4;
+ *f_block_size = Eigen::Dynamic;
+ return;
+ }
+ if (options.row_block_size == 2){
+ *row_block_size = 2;
+ *e_block_size = Eigen::Dynamic;
+ *f_block_size = Eigen::Dynamic;
+ return;
+ }
+ if ((options.row_block_size == 3) &&
+ (options.e_block_size == 3) &&
+ (options.f_block_size == 3)) {
+ *row_block_size = 3;
+ *e_block_size = 3;
+ *f_block_size = 3;
+ return;
+ }
+ if ((options.row_block_size == 4) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == 2)) {
+ *row_block_size = 4;
+ *e_block_size = 4;
+ *f_block_size = 2;
+ return;
+ }
+ if ((options.row_block_size == 4) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == 3)) {
+ *row_block_size = 4;
+ *e_block_size = 4;
+ *f_block_size = 3;
+ return;
+ }
+ if ((options.row_block_size == 4) &&
+ (options.e_block_size == 4) &&
+ (options.f_block_size == 4)) {
+ *row_block_size = 4;
+ *e_block_size = 4;
+ *f_block_size = 4;
+ return;
+ }
+ if ((options.row_block_size == 4) &&
+ (options.e_block_size == 4)) {
+ *row_block_size = 4;
+ *e_block_size = 4;
+ *f_block_size = Eigen::Dynamic;
+ return;
+ }
+
+#endif
+ return;
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/extern/ceres/internal/ceres/schur_templates.h b/extern/ceres/internal/ceres/schur_templates.h
new file mode 100644
index 00000000000..90aee0a1afc
--- /dev/null
+++ b/extern/ceres/internal/ceres/schur_templates.h
@@ -0,0 +1,46 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+
+#ifndef CERES_INTERNAL_SCHUR_TEMPLATES_H_
+#define CERES_INTERNAL_SCHUR_TEMPLATES_H_
+
+#include "ceres/linear_solver.h"
+
+namespace ceres {
+namespace internal {
+
+void GetBestSchurTemplateSpecialization(int* row_block_size,
+ int* e_block_size,
+ int* f_block_size);
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_SCHUR_TEMPLATES_H_
diff --git a/extern/ceres/internal/ceres/scoped_thread_token.h b/extern/ceres/internal/ceres/scoped_thread_token.h
new file mode 100644
index 00000000000..c167397cce9
--- /dev/null
+++ b/extern/ceres/internal/ceres/scoped_thread_token.h
@@ -0,0 +1,61 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: yp@photonscore.de (Yury Prokazov)
+
+#ifndef CERES_INTERNAL_SCOPED_THREAD_TOKEN_H_
+#define CERES_INTERNAL_SCOPED_THREAD_TOKEN_H_
+
+#include "ceres/thread_token_provider.h"
+
+namespace ceres {
+namespace internal {
+
+// Helper class for ThreadTokenProvider. This object acquires a token in its
+// constructor and puts that token back with destruction.
+class ScopedThreadToken {
+ public:
+ ScopedThreadToken(ThreadTokenProvider* provider)
+ : provider_(provider), token_(provider->Acquire()) {}
+
+ ~ScopedThreadToken() { provider_->Release(token_); }
+
+ int token() const { return token_; }
+
+ private:
+ ThreadTokenProvider* provider_;
+ int token_;
+
+ ScopedThreadToken(ScopedThreadToken&);
+ ScopedThreadToken& operator=(ScopedThreadToken&);
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_SCOPED_THREAD_TOKEN_H_
diff --git a/extern/ceres/internal/ceres/scratch_evaluate_preparer.h b/extern/ceres/internal/ceres/scratch_evaluate_preparer.h
index fa9ebd0e50e..c8d9b937b47 100644
--- a/extern/ceres/internal/ceres/scratch_evaluate_preparer.h
+++ b/extern/ceres/internal/ceres/scratch_evaluate_preparer.h
@@ -35,7 +35,7 @@
#ifndef CERES_INTERNAL_SCRATCH_EVALUATE_PREPARER_H_
#define CERES_INTERNAL_SCRATCH_EVALUATE_PREPARER_H_
-#include "ceres/internal/scoped_ptr.h"
+#include <memory>
namespace ceres {
namespace internal {
@@ -60,7 +60,7 @@ class ScratchEvaluatePreparer {
private:
// Scratch space for the jacobians; each jacobian is packed one after another.
// There is enough scratch to hold all the jacobians for the largest residual.
- scoped_array<double> jacobian_scratch_;
+ std::unique_ptr<double[]> jacobian_scratch_;
};
} // namespace internal
diff --git a/extern/ceres/internal/ceres/single_linkage_clustering.cc b/extern/ceres/internal/ceres/single_linkage_clustering.cc
new file mode 100644
index 00000000000..394492cdf23
--- /dev/null
+++ b/extern/ceres/internal/ceres/single_linkage_clustering.cc
@@ -0,0 +1,94 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/single_linkage_clustering.h"
+
+#include <unordered_set>
+#include <unordered_map>
+#include "ceres/graph.h"
+#include "ceres/graph_algorithms.h"
+
+namespace ceres {
+namespace internal {
+
+int ComputeSingleLinkageClustering(
+ const SingleLinkageClusteringOptions& options,
+ const WeightedGraph<int>& graph,
+ std::unordered_map<int, int>* membership) {
+ CHECK(membership != nullptr);
+ membership->clear();
+
+ // Initially each vertex is in its own cluster.
+ const std::unordered_set<int>& vertices = graph.vertices();
+ for (const int v : vertices) {
+ (*membership)[v] = v;
+ }
+
+ for (const int vertex1 : vertices) {
+ const std::unordered_set<int>& neighbors = graph.Neighbors(vertex1);
+ for (const int vertex2 : neighbors) {
+ // Since the graph is undirected, only pay attention to one side
+ // of the edge and ignore weak edges.
+ if ((vertex1 > vertex2) ||
+ (graph.EdgeWeight(vertex1, vertex2) < options.min_similarity)) {
+ continue;
+ }
+
+ // Use a union-find algorithm to keep track of the clusters.
+ const int c1 = FindConnectedComponent(vertex1, membership);
+ const int c2 = FindConnectedComponent(vertex2, membership);
+
+ if (c1 == c2) {
+ continue;
+ }
+
+ if (c1 < c2) {
+ (*membership)[c2] = c1;
+ } else {
+ (*membership)[c1] = c2;
+ }
+ }
+ }
+
+ // Make sure that every vertex is connected directly to the vertex
+ // identifying the cluster.
+ int num_clusters = 0;
+ for (auto& m : *membership) {
+ m.second = FindConnectedComponent(m.first, membership);
+ if (m.first == m.second) {
+ ++num_clusters;
+ }
+ }
+
+ return num_clusters;
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/extern/ceres/include/ceres/fpclassify.h b/extern/ceres/internal/ceres/single_linkage_clustering.h
index bc2dc90026c..ccd6f8ea37d 100644
--- a/extern/ceres/include/ceres/fpclassify.h
+++ b/extern/ceres/internal/ceres/single_linkage_clustering.h
@@ -26,45 +26,39 @@
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
//
-// Author: keir@google.com (Keir Mierle)
-//
-// Portable floating point classification. The names are picked such that they
-// do not collide with macros. For example, "isnan" in C99 is a macro and hence
-// does not respect namespaces.
-//
-// TODO(keir): Finish porting!
+// Author: sameeragarwal@google.com (Sameer Agarwal)
-#ifndef CERES_PUBLIC_FPCLASSIFY_H_
-#define CERES_PUBLIC_FPCLASSIFY_H_
+#ifndef CERES_INTERNAL_SINGLE_LINKAGE_CLUSTERING_H_
+#define CERES_INTERNAL_SINGLE_LINKAGE_CLUSTERING_H_
-#if defined(_MSC_VER)
-#include <float.h>
-#endif
-
-#include <limits>
+#include <unordered_map>
+#include "ceres/graph.h"
namespace ceres {
+namespace internal {
-#if defined(_MSC_VER)
-
-inline bool IsFinite (double x) { return _finite(x) != 0; }
-inline bool IsInfinite(double x) { return _finite(x) == 0 && _isnan(x) == 0; }
-inline bool IsNaN (double x) { return _isnan(x) != 0; }
-inline bool IsNormal (double x) { // NOLINT
- const int classification = _fpclass(x);
- return (classification == _FPCLASS_NN || classification == _FPCLASS_PN);
-}
+struct SingleLinkageClusteringOptions {
+ // Graph edges with edge weight less than min_similarity are ignored
+ // during the clustering process.
+ double min_similarity = 0.99;
+};
-# else
-
-// These definitions are for the normal Unix suspects.
-inline bool IsFinite (double x) { return std::isfinite(x); }
-inline bool IsInfinite(double x) { return std::isinf(x); }
-inline bool IsNaN (double x) { return std::isnan(x); }
-inline bool IsNormal (double x) { return std::isnormal(x); }
-
-#endif
+// Compute a partitioning of the vertices of the graph using the
+// single linkage clustering algorithm. Edges with weight less than
+// SingleLinkageClusteringOptions::min_similarity will be ignored.
+//
+// membership upon return will contain a mapping from the vertices of
+// the graph to an integer indicating the identity of the cluster that
+// it belongs to.
+//
+// The return value of this function is the number of clusters
+// identified by the algorithm.
+int ComputeSingleLinkageClustering(
+ const SingleLinkageClusteringOptions& options,
+ const WeightedGraph<int>& graph,
+ std::unordered_map<int, int>* membership);
+} // namespace internal
} // namespace ceres
-#endif // CERES_PUBLIC_FPCLASSIFY_H_
+#endif // CERES_INTERNAL_SINGLE_LINKAGE_CLUSTERING_H_
diff --git a/extern/ceres/internal/ceres/small_blas.h b/extern/ceres/internal/ceres/small_blas.h
index 264ac53047d..81c58722d5b 100644
--- a/extern/ceres/internal/ceres/small_blas.h
+++ b/extern/ceres/internal/ceres/small_blas.h
@@ -38,6 +38,7 @@
#include "ceres/internal/port.h"
#include "ceres/internal/eigen.h"
#include "glog/logging.h"
+#include "small_blas_generic.h"
namespace ceres {
namespace internal {
@@ -89,6 +90,26 @@ namespace internal {
B, num_row_b, num_col_b, \
C, start_row_c, start_col_c, row_stride_c, col_stride_c);
+#define CERES_GEMM_STORE_SINGLE(p, index, value) \
+ if (kOperation > 0) { \
+ p[index] += value; \
+ } else if (kOperation < 0) { \
+ p[index] -= value; \
+ } else { \
+ p[index] = value; \
+ }
+
+#define CERES_GEMM_STORE_PAIR(p, index, v1, v2) \
+ if (kOperation > 0) { \
+ p[index] += v1; \
+ p[index + 1] += v2; \
+ } else if (kOperation < 0) { \
+ p[index] -= v1; \
+ p[index + 1] -= v2; \
+ } else { \
+ p[index] = v1; \
+ p[index + 1] = v2; \
+ }
// For the matrix-matrix functions below, there are three variants for
// each functionality. Foo, FooNaive and FooEigen. Foo is the one to
@@ -160,24 +181,64 @@ CERES_GEMM_BEGIN(MatrixMatrixMultiplyNaive) {
const int NUM_COL_C = NUM_COL_B;
DCHECK_LE(start_row_c + NUM_ROW_C, row_stride_c);
DCHECK_LE(start_col_c + NUM_COL_C, col_stride_c);
+ const int span = 4;
+
+ // Calculate the remainder part first.
- for (int row = 0; row < NUM_ROW_C; ++row) {
- for (int col = 0; col < NUM_COL_C; ++col) {
+ // Process the last odd column if present.
+ if (NUM_COL_C & 1) {
+ int col = NUM_COL_C - 1;
+ const double* pa = &A[0];
+ for (int row = 0; row < NUM_ROW_C; ++row, pa += NUM_COL_A) {
+ const double* pb = &B[col];
double tmp = 0.0;
- for (int k = 0; k < NUM_COL_A; ++k) {
- tmp += A[row * NUM_COL_A + k] * B[k * NUM_COL_B + col];
+ for (int k = 0; k < NUM_COL_A; ++k, pb += NUM_COL_B) {
+ tmp += pa[k] * pb[0];
}
const int index = (row + start_row_c) * col_stride_c + start_col_c + col;
- if (kOperation > 0) {
- C[index] += tmp;
- } else if (kOperation < 0) {
- C[index] -= tmp;
- } else {
- C[index] = tmp;
+ CERES_GEMM_STORE_SINGLE(C, index, tmp);
+ }
+
+ // Return directly for efficiency of extremely small matrix multiply.
+ if (NUM_COL_C == 1) {
+ return;
+ }
+ }
+
+ // Process the couple columns in remainder if present.
+ if (NUM_COL_C & 2) {
+ int col = NUM_COL_C & (int)(~(span - 1)) ;
+ const double* pa = &A[0];
+ for (int row = 0; row < NUM_ROW_C; ++row, pa += NUM_COL_A) {
+ const double* pb = &B[col];
+ double tmp1 = 0.0, tmp2 = 0.0;
+ for (int k = 0; k < NUM_COL_A; ++k, pb += NUM_COL_B) {
+ double av = pa[k];
+ tmp1 += av * pb[0];
+ tmp2 += av * pb[1];
}
+
+ const int index = (row + start_row_c) * col_stride_c + start_col_c + col;
+ CERES_GEMM_STORE_PAIR(C, index, tmp1, tmp2);
+ }
+
+ // Return directly for efficiency of extremely small matrix multiply.
+ if (NUM_COL_C < span) {
+ return;
}
}
+
+ // Calculate the main part with multiples of 4.
+ int col_m = NUM_COL_C & (int)(~(span - 1));
+ for (int col = 0; col < col_m; col += span) {
+ for (int row = 0; row < NUM_ROW_C; ++row) {
+ const int index = (row + start_row_c) * col_stride_c + start_col_c + col;
+ MMM_mat1x4(NUM_COL_A, &A[row * NUM_COL_A],
+ &B[col], NUM_COL_B, &C[index], kOperation);
+ }
+ }
+
}
CERES_GEMM_BEGIN(MatrixMatrixMultiply) {
@@ -220,24 +281,68 @@ CERES_GEMM_BEGIN(MatrixTransposeMatrixMultiplyNaive) {
const int NUM_COL_C = NUM_COL_B;
DCHECK_LE(start_row_c + NUM_ROW_C, row_stride_c);
DCHECK_LE(start_col_c + NUM_COL_C, col_stride_c);
+ const int span = 4;
- for (int row = 0; row < NUM_ROW_C; ++row) {
- for (int col = 0; col < NUM_COL_C; ++col) {
+ // Process the remainder part first.
+
+ // Process the last odd column if present.
+ if (NUM_COL_C & 1) {
+ int col = NUM_COL_C - 1;
+ for (int row = 0; row < NUM_ROW_C; ++row) {
+ const double* pa = &A[row];
+ const double* pb = &B[col];
double tmp = 0.0;
for (int k = 0; k < NUM_ROW_A; ++k) {
- tmp += A[k * NUM_COL_A + row] * B[k * NUM_COL_B + col];
+ tmp += pa[0] * pb[0];
+ pa += NUM_COL_A;
+ pb += NUM_COL_B;
}
const int index = (row + start_row_c) * col_stride_c + start_col_c + col;
- if (kOperation > 0) {
- C[index]+= tmp;
- } else if (kOperation < 0) {
- C[index]-= tmp;
- } else {
- C[index]= tmp;
+ CERES_GEMM_STORE_SINGLE(C, index, tmp);
+ }
+
+ // Return directly for efficiency of extremely small matrix multiply.
+ if (NUM_COL_C == 1) {
+ return;
+ }
+ }
+
+ // Process the couple columns in remainder if present.
+ if (NUM_COL_C & 2) {
+ int col = NUM_COL_C & (int)(~(span - 1)) ;
+ for (int row = 0; row < NUM_ROW_C; ++row) {
+ const double* pa = &A[row];
+ const double* pb = &B[col];
+ double tmp1 = 0.0, tmp2 = 0.0;
+ for (int k = 0; k < NUM_ROW_A; ++k) {
+ double av = *pa;
+ tmp1 += av * pb[0];
+ tmp2 += av * pb[1];
+ pa += NUM_COL_A;
+ pb += NUM_COL_B;
}
+
+ const int index = (row + start_row_c) * col_stride_c + start_col_c + col;
+ CERES_GEMM_STORE_PAIR(C, index, tmp1, tmp2);
+ }
+
+ // Return directly for efficiency of extremely small matrix multiply.
+ if (NUM_COL_C < span) {
+ return;
}
}
+
+ // Process the main part with multiples of 4.
+ int col_m = NUM_COL_C & (int)(~(span - 1));
+ for (int col = 0; col < col_m; col += span) {
+ for (int row = 0; row < NUM_ROW_C; ++row) {
+ const int index = (row + start_row_c) * col_stride_c + start_col_c + col;
+ MTM_mat1x4(NUM_ROW_A, &A[row], NUM_COL_A,
+ &B[col], NUM_COL_B, &C[index], kOperation);
+ }
+ }
+
}
CERES_GEMM_BEGIN(MatrixTransposeMatrixMultiply) {
@@ -301,21 +406,54 @@ inline void MatrixVectorMultiply(const double* A,
const int NUM_ROW_A = (kRowA != Eigen::Dynamic ? kRowA : num_row_a);
const int NUM_COL_A = (kColA != Eigen::Dynamic ? kColA : num_col_a);
+ const int span = 4;
- for (int row = 0; row < NUM_ROW_A; ++row) {
+ // Calculate the remainder part first.
+
+ // Process the last odd row if present.
+ if (NUM_ROW_A & 1) {
+ int row = NUM_ROW_A - 1;
+ const double* pa = &A[row * NUM_COL_A];
+ const double* pb = &b[0];
double tmp = 0.0;
for (int col = 0; col < NUM_COL_A; ++col) {
- tmp += A[row * NUM_COL_A + col] * b[col];
+ tmp += (*pa++) * (*pb++);
+ }
+ CERES_GEMM_STORE_SINGLE(c, row, tmp);
+
+ // Return directly for efficiency of extremely small matrix multiply.
+ if (NUM_ROW_A == 1) {
+ return;
+ }
+ }
+
+ // Process the couple rows in remainder if present.
+ if (NUM_ROW_A & 2) {
+ int row = NUM_ROW_A & (int)(~(span - 1));
+ const double* pa1 = &A[row * NUM_COL_A];
+ const double* pa2 = pa1 + NUM_COL_A;
+ const double* pb = &b[0];
+ double tmp1 = 0.0, tmp2 = 0.0;
+ for (int col = 0; col < NUM_COL_A; ++col) {
+ double bv = *pb++;
+ tmp1 += *(pa1++) * bv;
+ tmp2 += *(pa2++) * bv;
}
+ CERES_GEMM_STORE_PAIR(c, row, tmp1, tmp2);
- if (kOperation > 0) {
- c[row] += tmp;
- } else if (kOperation < 0) {
- c[row] -= tmp;
- } else {
- c[row] = tmp;
+ // Return directly for efficiency of extremely small matrix multiply.
+ if (NUM_ROW_A < span) {
+ return;
}
}
+
+ // Calculate the main part with multiples of 4.
+ int row_m = NUM_ROW_A & (int)(~(span - 1));
+ for (int row = 0; row < row_m; row += span) {
+ MVM_mat4x1(NUM_COL_A, &A[row * NUM_COL_A], NUM_COL_A,
+ &b[0], &c[row], kOperation);
+ }
+
#endif // CERES_NO_CUSTOM_BLAS
}
@@ -352,21 +490,55 @@ inline void MatrixTransposeVectorMultiply(const double* A,
const int NUM_ROW_A = (kRowA != Eigen::Dynamic ? kRowA : num_row_a);
const int NUM_COL_A = (kColA != Eigen::Dynamic ? kColA : num_col_a);
+ const int span = 4;
+
+ // Calculate the remainder part first.
- for (int row = 0; row < NUM_COL_A; ++row) {
+ // Process the last odd column if present.
+ if (NUM_COL_A & 1) {
+ int row = NUM_COL_A - 1;
+ const double* pa = &A[row];
+ const double* pb = &b[0];
double tmp = 0.0;
for (int col = 0; col < NUM_ROW_A; ++col) {
- tmp += A[col * NUM_COL_A + row] * b[col];
+ tmp += *pa * (*pb++);
+ pa += NUM_COL_A;
}
+ CERES_GEMM_STORE_SINGLE(c, row, tmp);
- if (kOperation > 0) {
- c[row] += tmp;
- } else if (kOperation < 0) {
- c[row] -= tmp;
- } else {
- c[row] = tmp;
+ // Return directly for efficiency of extremely small matrix multiply.
+ if (NUM_COL_A == 1) {
+ return;
}
}
+
+ // Process the couple columns in remainder if present.
+ if (NUM_COL_A & 2) {
+ int row = NUM_COL_A & (int)(~(span - 1));
+ const double* pa = &A[row];
+ const double* pb = &b[0];
+ double tmp1 = 0.0, tmp2 = 0.0;
+ for (int col = 0; col < NUM_ROW_A; ++col) {
+ double bv = *pb++;
+ tmp1 += *(pa ) * bv;
+ tmp2 += *(pa + 1) * bv;
+ pa += NUM_COL_A;
+ }
+ CERES_GEMM_STORE_PAIR(c, row, tmp1, tmp2);
+
+ // Return directly for efficiency of extremely small matrix multiply.
+ if (NUM_COL_A < span) {
+ return;
+ }
+ }
+
+ // Calculate the main part with multiples of 4.
+ int row_m = NUM_COL_A & (int)(~(span - 1));
+ for (int row = 0; row < row_m; row += span) {
+ MTV_mat4x1(NUM_ROW_A, &A[row], NUM_COL_A,
+ &b[0], &c[row], kOperation);
+ }
+
#endif // CERES_NO_CUSTOM_BLAS
}
@@ -374,6 +546,8 @@ inline void MatrixTransposeVectorMultiply(const double* A,
#undef CERES_GEMM_EIGEN_HEADER
#undef CERES_GEMM_NAIVE_HEADER
#undef CERES_CALL_GEMM
+#undef CERES_GEMM_STORE_SINGLE
+#undef CERES_GEMM_STORE_PAIR
} // namespace internal
} // namespace ceres
diff --git a/extern/ceres/internal/ceres/small_blas_generic.h b/extern/ceres/internal/ceres/small_blas_generic.h
new file mode 100644
index 00000000000..978c5d56a84
--- /dev/null
+++ b/extern/ceres/internal/ceres/small_blas_generic.h
@@ -0,0 +1,315 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: yangfan34@lenovo.com (Lenovo Research Device+ Lab - Shanghai)
+//
+// Optimization for simple blas functions used in the Schur Eliminator.
+// These are fairly basic implementations which already yield a significant
+// speedup in the eliminator performance.
+
+#ifndef CERES_INTERNAL_SMALL_BLAS_GENERIC_H_
+#define CERES_INTERNAL_SMALL_BLAS_GENERIC_H_
+
+namespace ceres {
+namespace internal {
+
+// The following macros are used to share code
+#define CERES_GEMM_OPT_NAIVE_HEADER \
+ double c0 = 0.0; \
+ double c1 = 0.0; \
+ double c2 = 0.0; \
+ double c3 = 0.0; \
+ const double* pa = a; \
+ const double* pb = b; \
+ const int span = 4; \
+ int col_r = col_a & (span - 1); \
+ int col_m = col_a - col_r;
+
+#define CERES_GEMM_OPT_STORE_MAT1X4 \
+ if (kOperation > 0) { \
+ *c++ += c0; \
+ *c++ += c1; \
+ *c++ += c2; \
+ *c++ += c3; \
+ } else if (kOperation < 0) { \
+ *c++ -= c0; \
+ *c++ -= c1; \
+ *c++ -= c2; \
+ *c++ -= c3; \
+ } else { \
+ *c++ = c0; \
+ *c++ = c1; \
+ *c++ = c2; \
+ *c++ = c3; \
+ }
+
+// Matrix-Matrix Multiplication
+// Figure out 1x4 of Matrix C in one batch
+//
+// c op a * B;
+// where op can be +=, -=, or =, indicated by kOperation.
+//
+// Matrix C Matrix A Matrix B
+//
+// C0, C1, C2, C3 op A0, A1, A2, A3, ... * B0, B1, B2, B3
+// B4, B5, B6, B7
+// B8, B9, Ba, Bb
+// Bc, Bd, Be, Bf
+// . , . , . , .
+// . , . , . , .
+// . , . , . , .
+//
+// unroll for loops
+// utilize the data resided in cache
+// NOTE: col_a means the columns of A
+static inline void MMM_mat1x4(const int col_a,
+ const double* a,
+ const double* b,
+ const int col_stride_b,
+ double* c,
+ const int kOperation) {
+ CERES_GEMM_OPT_NAIVE_HEADER
+ double av = 0.0;
+ int bi = 0;
+
+#define CERES_GEMM_OPT_MMM_MAT1X4_MUL \
+ av = pa[k]; \
+ pb = b + bi; \
+ c0 += av * *pb++; \
+ c1 += av * *pb++; \
+ c2 += av * *pb++; \
+ c3 += av * *pb++; \
+ bi += col_stride_b; \
+ k++;
+
+ for (int k = 0; k < col_m;) {
+ CERES_GEMM_OPT_MMM_MAT1X4_MUL
+ CERES_GEMM_OPT_MMM_MAT1X4_MUL
+ CERES_GEMM_OPT_MMM_MAT1X4_MUL
+ CERES_GEMM_OPT_MMM_MAT1X4_MUL
+ }
+
+ for (int k = col_m; k < col_a;) {
+ CERES_GEMM_OPT_MMM_MAT1X4_MUL
+ }
+
+ CERES_GEMM_OPT_STORE_MAT1X4
+
+#undef CERES_GEMM_OPT_MMM_MAT1X4_MUL
+}
+
+// Matrix Transpose-Matrix multiplication
+// Figure out 1x4 of Matrix C in one batch
+//
+// c op a' * B;
+// where op can be +=, -=, or = indicated by kOperation.
+//
+// Matrix A
+//
+// A0
+// A1
+// A2
+// A3
+// .
+// .
+// .
+//
+// Matrix C Matrix A' Matrix B
+//
+// C0, C1, C2, C3 op A0, A1, A2, A3, ... * B0, B1, B2, B3
+// B4, B5, B6, B7
+// B8, B9, Ba, Bb
+// Bc, Bd, Be, Bf
+// . , . , . , .
+// . , . , . , .
+// . , . , . , .
+//
+// unroll for loops
+// utilize the data resided in cache
+// NOTE: col_a means the columns of A'
+static inline void MTM_mat1x4(const int col_a,
+ const double* a,
+ const int col_stride_a,
+ const double* b,
+ const int col_stride_b,
+ double* c,
+ const int kOperation) {
+ CERES_GEMM_OPT_NAIVE_HEADER
+ double av = 0.0;
+ int ai = 0;
+ int bi = 0;
+
+#define CERES_GEMM_OPT_MTM_MAT1X4_MUL \
+ av = pa[ai]; \
+ pb = b + bi; \
+ c0 += av * *pb++; \
+ c1 += av * *pb++; \
+ c2 += av * *pb++; \
+ c3 += av * *pb++; \
+ ai += col_stride_a; \
+ bi += col_stride_b;
+
+ for (int k = 0; k < col_m; k += span) {
+ CERES_GEMM_OPT_MTM_MAT1X4_MUL
+ CERES_GEMM_OPT_MTM_MAT1X4_MUL
+ CERES_GEMM_OPT_MTM_MAT1X4_MUL
+ CERES_GEMM_OPT_MTM_MAT1X4_MUL
+ }
+
+ for (int k = col_m; k < col_a; k++) {
+ CERES_GEMM_OPT_MTM_MAT1X4_MUL
+ }
+
+ CERES_GEMM_OPT_STORE_MAT1X4
+
+#undef CERES_GEMM_OPT_MTM_MAT1X4_MUL
+}
+
+// Matrix-Vector Multiplication
+// Figure out 4x1 of vector c in one batch
+//
+// c op A * b;
+// where op can be +=, -=, or =, indicated by kOperation.
+//
+// Vector c Matrix A Vector b
+//
+// C0 op A0, A1, A2, A3, ... * B0
+// C1 A4, A5, A6, A7, ... B1
+// C2 A8, A9, Aa, Ab, ... B2
+// C3 Ac, Ad, Ae, Af, ... B3
+// .
+// .
+// .
+//
+// unroll for loops
+// utilize the data resided in cache
+// NOTE: col_a means the columns of A
+static inline void MVM_mat4x1(const int col_a,
+ const double* a,
+ const int col_stride_a,
+ const double* b,
+ double* c,
+ const int kOperation) {
+ CERES_GEMM_OPT_NAIVE_HEADER
+ double bv = 0.0;
+
+#define CERES_GEMM_OPT_MVM_MAT4X1_MUL \
+ bv = *pb; \
+ c0 += *(pa ) * bv; \
+ c1 += *(pa + col_stride_a ) * bv; \
+ c2 += *(pa + col_stride_a * 2) * bv; \
+ c3 += *(pa + col_stride_a * 3) * bv; \
+ pa++; \
+ pb++;
+
+ for (int k = 0; k < col_m; k += span) {
+ CERES_GEMM_OPT_MVM_MAT4X1_MUL
+ CERES_GEMM_OPT_MVM_MAT4X1_MUL
+ CERES_GEMM_OPT_MVM_MAT4X1_MUL
+ CERES_GEMM_OPT_MVM_MAT4X1_MUL
+ }
+
+ for (int k = col_m; k < col_a; k++) {
+ CERES_GEMM_OPT_MVM_MAT4X1_MUL
+ }
+
+ CERES_GEMM_OPT_STORE_MAT1X4
+
+#undef CERES_GEMM_OPT_MVM_MAT4X1_MUL
+}
+
+// Matrix Transpose-Vector multiplication
+// Figure out 4x1 of vector c in one batch
+//
+// c op A' * b;
+// where op can be +=, -=, or =, indicated by kOperation.
+//
+// Matrix A
+//
+// A0, A4, A8, Ac
+// A1, A5, A9, Ad
+// A2, A6, Aa, Ae
+// A3, A7, Ab, Af
+// . , . , . , .
+// . , . , . , .
+// . , . , . , .
+//
+// Vector c Matrix A' Vector b
+//
+// C0 op A0, A1, A2, A3, ... * B0
+// C1 A4, A5, A6, A7, ... B1
+// C2 A8, A9, Aa, Ab, ... B2
+// C3 Ac, Ad, Ae, Af, ... B3
+// .
+// .
+// .
+//
+// unroll for loops
+// utilize the data resided in cache
+// NOTE: col_a means the columns of A'
+static inline void MTV_mat4x1(const int col_a,
+ const double* a,
+ const int col_stride_a,
+ const double* b,
+ double* c,
+ const int kOperation) {
+ CERES_GEMM_OPT_NAIVE_HEADER
+ double bv = 0.0;
+
+#define CERES_GEMM_OPT_MTV_MAT4X1_MUL \
+ bv = *pb; \
+ c0 += *(pa ) * bv; \
+ c1 += *(pa + 1) * bv; \
+ c2 += *(pa + 2) * bv; \
+ c3 += *(pa + 3) * bv; \
+ pa += col_stride_a; \
+ pb++;
+
+ for (int k = 0; k < col_m; k += span) {
+ CERES_GEMM_OPT_MTV_MAT4X1_MUL
+ CERES_GEMM_OPT_MTV_MAT4X1_MUL
+ CERES_GEMM_OPT_MTV_MAT4X1_MUL
+ CERES_GEMM_OPT_MTV_MAT4X1_MUL
+ }
+
+ for (int k = col_m; k < col_a; k++) {
+ CERES_GEMM_OPT_MTV_MAT4X1_MUL
+ }
+
+ CERES_GEMM_OPT_STORE_MAT1X4
+
+#undef CERES_GEMM_OPT_MTV_MAT4X1_MUL
+}
+
+#undef CERES_GEMM_OPT_NAIVE_HEADER
+#undef CERES_GEMM_OPT_STORE_MAT1X4
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_SMALL_BLAS_GENERIC_H_
diff --git a/extern/ceres/internal/ceres/solver.cc b/extern/ceres/internal/ceres/solver.cc
index 8411350986a..861d8d30485 100644
--- a/extern/ceres/internal/ceres/solver.cc
+++ b/extern/ceres/internal/ceres/solver.cc
@@ -32,8 +32,14 @@
#include "ceres/solver.h"
#include <algorithm>
-#include <sstream> // NOLINT
+#include <memory>
+#include <sstream> // NOLINT
#include <vector>
+
+#include "ceres/casts.h"
+#include "ceres/context.h"
+#include "ceres/context_impl.h"
+#include "ceres/detect_structure.h"
#include "ceres/gradient_checking_cost_function.h"
#include "ceres/internal/port.h"
#include "ceres/parameter_block_ordering.h"
@@ -41,6 +47,7 @@
#include "ceres/problem.h"
#include "ceres/problem_impl.h"
#include "ceres/program.h"
+#include "ceres/schur_templates.h"
#include "ceres/solver_utils.h"
#include "ceres/stringprintf.h"
#include "ceres/types.h"
@@ -52,6 +59,8 @@ namespace {
using std::map;
using std::string;
using std::vector;
+using internal::StringAppendF;
+using internal::StringPrintf;
#define OPTION_OP(x, y, OP) \
if (!(options.x OP y)) { \
@@ -91,7 +100,6 @@ bool CommonOptionsAreValid(const Solver::Options& options, string* error) {
OPTION_GE(gradient_tolerance, 0.0);
OPTION_GE(parameter_tolerance, 0.0);
OPTION_GT(num_threads, 0);
- OPTION_GT(num_linear_solver_threads, 0);
if (options.check_gradients) {
OPTION_GT(gradient_check_relative_precision, 0.0);
OPTION_GT(gradient_check_numeric_derivative_relative_step_size, 0.0);
@@ -132,101 +140,51 @@ bool TrustRegionOptionsAreValid(const Solver::Options& options, string* error) {
return false;
}
- if (options.preconditioner_type == CLUSTER_JACOBI &&
- options.sparse_linear_algebra_library_type != SUITE_SPARSE) {
- *error = "CLUSTER_JACOBI requires "
- "Solver::Options::sparse_linear_algebra_library_type to be "
- "SUITE_SPARSE";
- return false;
- }
-
- if (options.preconditioner_type == CLUSTER_TRIDIAGONAL &&
- options.sparse_linear_algebra_library_type != SUITE_SPARSE) {
- *error = "CLUSTER_TRIDIAGONAL requires "
- "Solver::Options::sparse_linear_algebra_library_type to be "
- "SUITE_SPARSE";
- return false;
- }
-
-#ifdef CERES_NO_LAPACK
- if (options.dense_linear_algebra_library_type == LAPACK) {
- if (options.linear_solver_type == DENSE_NORMAL_CHOLESKY) {
- *error = "Can't use DENSE_NORMAL_CHOLESKY with LAPACK because "
- "LAPACK was not enabled when Ceres was built.";
- return false;
- } else if (options.linear_solver_type == DENSE_QR) {
- *error = "Can't use DENSE_QR with LAPACK because "
- "LAPACK was not enabled when Ceres was built.";
- return false;
- } else if (options.linear_solver_type == DENSE_SCHUR) {
- *error = "Can't use DENSE_SCHUR with LAPACK because "
- "LAPACK was not enabled when Ceres was built.";
- return false;
- }
- }
-#endif
-
-#ifdef CERES_NO_SUITESPARSE
- if (options.sparse_linear_algebra_library_type == SUITE_SPARSE) {
- if (options.linear_solver_type == SPARSE_NORMAL_CHOLESKY) {
- *error = "Can't use SPARSE_NORMAL_CHOLESKY with SUITESPARSE because "
- "SuiteSparse was not enabled when Ceres was built.";
- return false;
- } else if (options.linear_solver_type == SPARSE_SCHUR) {
- *error = "Can't use SPARSE_SCHUR with SUITESPARSE because "
- "SuiteSparse was not enabled when Ceres was built.";
- return false;
- } else if (options.preconditioner_type == CLUSTER_JACOBI) {
- *error = "CLUSTER_JACOBI preconditioner not supported. "
- "SuiteSparse was not enabled when Ceres was built.";
- return false;
- } else if (options.preconditioner_type == CLUSTER_TRIDIAGONAL) {
- *error = "CLUSTER_TRIDIAGONAL preconditioner not supported. "
- "SuiteSparse was not enabled when Ceres was built.";
+ if (options.dense_linear_algebra_library_type == LAPACK &&
+ !IsDenseLinearAlgebraLibraryTypeAvailable(LAPACK) &&
+ (options.linear_solver_type == DENSE_NORMAL_CHOLESKY ||
+ options.linear_solver_type == DENSE_QR ||
+ options.linear_solver_type == DENSE_SCHUR)) {
+ *error = StringPrintf(
+ "Can't use %s with "
+ "Solver::Options::dense_linear_algebra_library_type = LAPACK "
+ "because LAPACK was not enabled when Ceres was built.",
+ LinearSolverTypeToString(options.linear_solver_type));
return false;
- }
}
-#endif
-#ifdef CERES_NO_CXSPARSE
- if (options.sparse_linear_algebra_library_type == CX_SPARSE) {
- if (options.linear_solver_type == SPARSE_NORMAL_CHOLESKY) {
- *error = "Can't use SPARSE_NORMAL_CHOLESKY with CX_SPARSE because "
- "CXSparse was not enabled when Ceres was built.";
- return false;
- } else if (options.linear_solver_type == SPARSE_SCHUR) {
- *error = "Can't use SPARSE_SCHUR with CX_SPARSE because "
- "CXSparse was not enabled when Ceres was built.";
- return false;
+ {
+ const char* sparse_linear_algebra_library_name =
+ SparseLinearAlgebraLibraryTypeToString(
+ options.sparse_linear_algebra_library_type);
+ const char* name = nullptr;
+ if (options.linear_solver_type == SPARSE_NORMAL_CHOLESKY ||
+ options.linear_solver_type == SPARSE_SCHUR) {
+ name = LinearSolverTypeToString(options.linear_solver_type);
+ } else if ((options.linear_solver_type == ITERATIVE_SCHUR &&
+ (options.preconditioner_type == CLUSTER_JACOBI ||
+ options.preconditioner_type == CLUSTER_TRIDIAGONAL)) ||
+ (options.linear_solver_type == CGNR &&
+ options.preconditioner_type == SUBSET)) {
+ name = PreconditionerTypeToString(options.preconditioner_type);
}
- }
-#endif
-#ifndef CERES_USE_EIGEN_SPARSE
- if (options.sparse_linear_algebra_library_type == EIGEN_SPARSE) {
- if (options.linear_solver_type == SPARSE_NORMAL_CHOLESKY) {
- *error = "Can't use SPARSE_NORMAL_CHOLESKY with EIGEN_SPARSE because "
- "Eigen's sparse linear algebra was not enabled when Ceres was "
- "built.";
- return false;
- } else if (options.linear_solver_type == SPARSE_SCHUR) {
- *error = "Can't use SPARSE_SCHUR with EIGEN_SPARSE because "
- "Eigen's sparse linear algebra was not enabled when Ceres was "
- "built.";
- return false;
- }
- }
-#endif
-
- if (options.sparse_linear_algebra_library_type == NO_SPARSE) {
- if (options.linear_solver_type == SPARSE_NORMAL_CHOLESKY) {
- *error = "Can't use SPARSE_NORMAL_CHOLESKY as "
- "sparse_linear_algebra_library_type is NO_SPARSE.";
- return false;
- } else if (options.linear_solver_type == SPARSE_SCHUR) {
- *error = "Can't use SPARSE_SCHUR as "
- "sparse_linear_algebra_library_type is NO_SPARSE.";
- return false;
+ if (name) {
+ if (options.sparse_linear_algebra_library_type == NO_SPARSE) {
+ *error = StringPrintf(
+ "Can't use %s with "
+ "Solver::Options::sparse_linear_algebra_library_type = %s.",
+ name, sparse_linear_algebra_library_name);
+ return false;
+ } else if (!IsSparseLinearAlgebraLibraryTypeAvailable(
+ options.sparse_linear_algebra_library_type)) {
+ *error = StringPrintf(
+ "Can't use %s with "
+ "Solver::Options::sparse_linear_algebra_library_type = %s, "
+ "because support was not enabled when Ceres Solver was built.",
+ name, sparse_linear_algebra_library_name);
+ return false;
+ }
}
}
@@ -240,16 +198,32 @@ bool TrustRegionOptionsAreValid(const Solver::Options& options, string* error) {
}
}
- if (options.trust_region_minimizer_iterations_to_dump.size() > 0 &&
+ if (!options.trust_region_minimizer_iterations_to_dump.empty() &&
options.trust_region_problem_dump_format_type != CONSOLE &&
options.trust_region_problem_dump_directory.empty()) {
*error = "Solver::Options::trust_region_problem_dump_directory is empty.";
return false;
}
- if (options.dynamic_sparsity &&
- options.linear_solver_type != SPARSE_NORMAL_CHOLESKY) {
- *error = "Dynamic sparsity is only supported with SPARSE_NORMAL_CHOLESKY.";
+ if (options.dynamic_sparsity) {
+ if (options.linear_solver_type != SPARSE_NORMAL_CHOLESKY) {
+ *error = "Dynamic sparsity is only supported with SPARSE_NORMAL_CHOLESKY.";
+ return false;
+ }
+ if (options.sparse_linear_algebra_library_type == ACCELERATE_SPARSE) {
+ *error = "ACCELERATE_SPARSE is not currently supported with dynamic "
+ "sparsity.";
+ return false;
+ }
+ }
+
+ if (options.linear_solver_type == CGNR &&
+ options.preconditioner_type == SUBSET &&
+ options.residual_blocks_for_subset_preconditioner.empty()) {
+ *error =
+ "When using SUBSET preconditioner, "
+ "Solver::Options::residual_blocks_for_subset_preconditioner cannot be "
+ "empty";
return false;
}
@@ -264,7 +238,8 @@ bool LineSearchOptionsAreValid(const Solver::Options& options, string* error) {
OPTION_LT_OPTION(max_line_search_step_contraction,
min_line_search_step_contraction);
OPTION_LE(min_line_search_step_contraction, 1.0);
- OPTION_GT(max_num_line_search_step_size_iterations, 0);
+ OPTION_GE(max_num_line_search_step_size_iterations,
+ (options.minimizer_type == ceres::TRUST_REGION ? 0 : 1));
OPTION_GT(line_search_sufficient_function_decrease, 0.0);
OPTION_LT_OPTION(line_search_sufficient_function_decrease,
line_search_sufficient_curvature_decrease);
@@ -310,13 +285,13 @@ bool LineSearchOptionsAreValid(const Solver::Options& options, string* error) {
#undef OPTION_LT_OPTION
void StringifyOrdering(const vector<int>& ordering, string* report) {
- if (ordering.size() == 0) {
+ if (ordering.empty()) {
internal::StringAppendF(report, "AUTOMATIC");
return;
}
for (int i = 0; i < ordering.size() - 1; ++i) {
- internal::StringAppendF(report, "%d, ", ordering[i]);
+ internal::StringAppendF(report, "%d,", ordering[i]);
}
internal::StringAppendF(report, "%d", ordering.back());
}
@@ -364,7 +339,6 @@ void PreSolveSummarize(const Solver::Options& options,
summary->max_lbfgs_rank = options.max_lbfgs_rank;
summary->minimizer_type = options.minimizer_type;
summary->nonlinear_conjugate_gradient_type = options.nonlinear_conjugate_gradient_type; // NOLINT
- summary->num_linear_solver_threads_given = options.num_linear_solver_threads; // NOLINT
summary->num_threads_given = options.num_threads;
summary->preconditioner_type_given = options.preconditioner_type;
summary->sparse_linear_algebra_library_type = options.sparse_linear_algebra_library_type; // NOLINT
@@ -380,10 +354,9 @@ void PostSolveSummarize(const internal::PreprocessedProblem& pp,
&(summary->inner_iteration_ordering_used));
summary->inner_iterations_used = pp.inner_iteration_minimizer.get() != NULL; // NOLINT
- summary->linear_solver_type_used = pp.options.linear_solver_type;
- summary->num_linear_solver_threads_used = pp.options.num_linear_solver_threads; // NOLINT
+ summary->linear_solver_type_used = pp.linear_solver_options.type;
summary->num_threads_used = pp.options.num_threads;
- summary->preconditioner_type_used = pp.options.preconditioner_type; // NOLINT
+ summary->preconditioner_type_used = pp.options.preconditioner_type;
internal::SetSummaryFinalCost(summary);
@@ -391,36 +364,47 @@ void PostSolveSummarize(const internal::PreprocessedProblem& pp,
SummarizeReducedProgram(*pp.reduced_program, summary);
}
+ using internal::CallStatistics;
+
// It is possible that no evaluator was created. This would be the
// case if the preprocessor failed, or if the reduced problem did
// not contain any parameter blocks. Thus, only extract the
// evaluator statistics if one exists.
if (pp.evaluator.get() != NULL) {
- const map<string, double>& evaluator_time_statistics =
- pp.evaluator->TimeStatistics();
- summary->residual_evaluation_time_in_seconds =
- FindWithDefault(evaluator_time_statistics, "Evaluator::Residual", 0.0);
- summary->jacobian_evaluation_time_in_seconds =
- FindWithDefault(evaluator_time_statistics, "Evaluator::Jacobian", 0.0);
+ const map<string, CallStatistics>& evaluator_statistics =
+ pp.evaluator->Statistics();
+ {
+ const CallStatistics& call_stats = FindWithDefault(
+ evaluator_statistics, "Evaluator::Residual", CallStatistics());
+
+ summary->residual_evaluation_time_in_seconds = call_stats.time;
+ summary->num_residual_evaluations = call_stats.calls;
+ }
+ {
+ const CallStatistics& call_stats = FindWithDefault(
+ evaluator_statistics, "Evaluator::Jacobian", CallStatistics());
+
+ summary->jacobian_evaluation_time_in_seconds = call_stats.time;
+ summary->num_jacobian_evaluations = call_stats.calls;
+ }
}
// Again, like the evaluator, there may or may not be a linear
// solver from which we can extract run time statistics. In
// particular the line search solver does not use a linear solver.
if (pp.linear_solver.get() != NULL) {
- const map<string, double>& linear_solver_time_statistics =
- pp.linear_solver->TimeStatistics();
- summary->linear_solver_time_in_seconds =
- FindWithDefault(linear_solver_time_statistics,
- "LinearSolver::Solve",
- 0.0);
+ const map<string, CallStatistics>& linear_solver_statistics =
+ pp.linear_solver->Statistics();
+ const CallStatistics& call_stats = FindWithDefault(
+ linear_solver_statistics, "LinearSolver::Solve", CallStatistics());
+ summary->num_linear_solves = call_stats.calls;
+ summary->linear_solver_time_in_seconds = call_stats.time;
}
}
void Minimize(internal::PreprocessedProblem* pp,
Solver::Summary* summary) {
using internal::Program;
- using internal::scoped_ptr;
using internal::Minimizer;
Program* program = pp->reduced_program.get();
@@ -434,16 +418,36 @@ void Minimize(internal::PreprocessedProblem* pp,
return;
}
- scoped_ptr<Minimizer> minimizer(
+ const Vector original_reduced_parameters = pp->reduced_parameters;
+ std::unique_ptr<Minimizer> minimizer(
Minimizer::Create(pp->options.minimizer_type));
minimizer->Minimize(pp->minimizer_options,
pp->reduced_parameters.data(),
summary);
- if (summary->IsSolutionUsable()) {
- program->StateVectorToParameterBlocks(pp->reduced_parameters.data());
- program->CopyParameterBlockStateToUserState();
- }
+ program->StateVectorToParameterBlocks(
+ summary->IsSolutionUsable()
+ ? pp->reduced_parameters.data()
+ : original_reduced_parameters.data());
+ program->CopyParameterBlockStateToUserState();
+}
+
+std::string SchurStructureToString(const int row_block_size,
+ const int e_block_size,
+ const int f_block_size) {
+ const std::string row =
+ (row_block_size == Eigen::Dynamic)
+ ? "d" : internal::StringPrintf("%d", row_block_size);
+
+ const std::string e =
+ (e_block_size == Eigen::Dynamic)
+ ? "d" : internal::StringPrintf("%d", e_block_size);
+
+ const std::string f =
+ (f_block_size == Eigen::Dynamic)
+ ? "d" : internal::StringPrintf("%d", f_block_size);
+
+ return internal::StringPrintf("%s,%s,%s", row.c_str(), e.c_str(), f.c_str());
}
} // namespace
@@ -475,11 +479,10 @@ void Solver::Solve(const Solver::Options& options,
using internal::Preprocessor;
using internal::ProblemImpl;
using internal::Program;
- using internal::scoped_ptr;
using internal::WallTimeInSeconds;
- CHECK_NOTNULL(problem);
- CHECK_NOTNULL(summary);
+ CHECK(problem != nullptr);
+ CHECK(summary != nullptr);
double start_time = WallTimeInSeconds();
*summary = Summary();
@@ -488,18 +491,14 @@ void Solver::Solve(const Solver::Options& options,
return;
}
- ProblemImpl* problem_impl = problem->problem_impl_.get();
+ ProblemImpl* problem_impl = problem->impl_.get();
Program* program = problem_impl->mutable_program();
PreSolveSummarize(options, problem_impl, summary);
- // Make sure that all the parameter blocks states are set to the
- // values provided by the user.
- program->SetParameterBlockStatePtrsToUserStatePtrs();
-
// If gradient_checking is enabled, wrap all cost functions in a
// gradient checker and install a callback that terminates if any gradient
// error is detected.
- scoped_ptr<internal::ProblemImpl> gradient_checking_problem;
+ std::unique_ptr<internal::ProblemImpl> gradient_checking_problem;
internal::GradientCheckingIterationCallback gradient_checking_callback;
Solver::Options modified_options = options;
if (options.check_gradients) {
@@ -514,10 +513,46 @@ void Solver::Solve(const Solver::Options& options,
program = problem_impl->mutable_program();
}
- scoped_ptr<Preprocessor> preprocessor(
+ // Make sure that all the parameter blocks states are set to the
+ // values provided by the user.
+ program->SetParameterBlockStatePtrsToUserStatePtrs();
+
+ // The main thread also does work so we only need to launch num_threads - 1.
+ problem_impl->context()->EnsureMinimumThreads(options.num_threads - 1);
+
+ std::unique_ptr<Preprocessor> preprocessor(
Preprocessor::Create(modified_options.minimizer_type));
PreprocessedProblem pp;
+
const bool status = preprocessor->Preprocess(modified_options, problem_impl, &pp);
+
+ // We check the linear_solver_options.type rather than
+ // modified_options.linear_solver_type because, depending on the
+ // lack of a Schur structure, the preprocessor may change the linear
+ // solver type.
+ if (IsSchurType(pp.linear_solver_options.type)) {
+ // TODO(sameeragarwal): We can likely eliminate the duplicate call
+ // to DetectStructure here and inside the linear solver, by
+ // calling this in the preprocessor.
+ int row_block_size;
+ int e_block_size;
+ int f_block_size;
+ DetectStructure(*static_cast<internal::BlockSparseMatrix*>(
+ pp.minimizer_options.jacobian.get())
+ ->block_structure(),
+ pp.linear_solver_options.elimination_groups[0],
+ &row_block_size,
+ &e_block_size,
+ &f_block_size);
+ summary->schur_structure_given =
+ SchurStructureToString(row_block_size, e_block_size, f_block_size);
+ internal::GetBestSchurTemplateSpecialization(&row_block_size,
+ &e_block_size,
+ &f_block_size);
+ summary->schur_structure_used =
+ SchurStructureToString(row_block_size, e_block_size, f_block_size);
+ }
+
summary->fixed_cost = pp.fixed_cost;
summary->preprocessor_time_in_seconds = WallTimeInSeconds() - start_time;
@@ -531,7 +566,7 @@ void Solver::Solve(const Solver::Options& options,
}
const double postprocessor_start_time = WallTimeInSeconds();
- problem_impl = problem->problem_impl_.get();
+ problem_impl = problem->impl_.get();
program = problem_impl->mutable_program();
// On exit, ensure that the parameter blocks again point at the user
// provided values and the parameter blocks are numbered according
@@ -559,66 +594,6 @@ void Solve(const Solver::Options& options,
solver.Solve(options, problem, summary);
}
-Solver::Summary::Summary()
- // Invalid values for most fields, to ensure that we are not
- // accidentally reporting default values.
- : minimizer_type(TRUST_REGION),
- termination_type(FAILURE),
- message("ceres::Solve was not called."),
- initial_cost(-1.0),
- final_cost(-1.0),
- fixed_cost(-1.0),
- num_successful_steps(-1),
- num_unsuccessful_steps(-1),
- num_inner_iteration_steps(-1),
- num_line_search_steps(-1),
- preprocessor_time_in_seconds(-1.0),
- minimizer_time_in_seconds(-1.0),
- postprocessor_time_in_seconds(-1.0),
- total_time_in_seconds(-1.0),
- linear_solver_time_in_seconds(-1.0),
- residual_evaluation_time_in_seconds(-1.0),
- jacobian_evaluation_time_in_seconds(-1.0),
- inner_iteration_time_in_seconds(-1.0),
- line_search_cost_evaluation_time_in_seconds(-1.0),
- line_search_gradient_evaluation_time_in_seconds(-1.0),
- line_search_polynomial_minimization_time_in_seconds(-1.0),
- line_search_total_time_in_seconds(-1.0),
- num_parameter_blocks(-1),
- num_parameters(-1),
- num_effective_parameters(-1),
- num_residual_blocks(-1),
- num_residuals(-1),
- num_parameter_blocks_reduced(-1),
- num_parameters_reduced(-1),
- num_effective_parameters_reduced(-1),
- num_residual_blocks_reduced(-1),
- num_residuals_reduced(-1),
- is_constrained(false),
- num_threads_given(-1),
- num_threads_used(-1),
- num_linear_solver_threads_given(-1),
- num_linear_solver_threads_used(-1),
- linear_solver_type_given(SPARSE_NORMAL_CHOLESKY),
- linear_solver_type_used(SPARSE_NORMAL_CHOLESKY),
- inner_iterations_given(false),
- inner_iterations_used(false),
- preconditioner_type_given(IDENTITY),
- preconditioner_type_used(IDENTITY),
- visibility_clustering_type(CANONICAL_VIEWS),
- trust_region_strategy_type(LEVENBERG_MARQUARDT),
- dense_linear_algebra_library_type(EIGEN),
- sparse_linear_algebra_library_type(SUITE_SPARSE),
- line_search_direction_type(LBFGS),
- line_search_type(ARMIJO),
- line_search_interpolation_type(BISECTION),
- nonlinear_conjugate_gradient_type(FLETCHER_REEVES),
- max_lbfgs_rank(-1) {
-}
-
-using internal::StringAppendF;
-using internal::StringPrintf;
-
string Solver::Summary::BriefReport() const {
return StringPrintf("Ceres Solver Report: "
"Iterations: %d, "
@@ -647,7 +622,7 @@ string Solver::Summary::FullReport() const {
}
StringAppendF(&report, "Residual blocks % 25d% 25d\n",
num_residual_blocks, num_residual_blocks_reduced);
- StringAppendF(&report, "Residual % 25d% 25d\n",
+ StringAppendF(&report, "Residuals % 25d% 25d\n",
num_residuals, num_residuals_reduced);
if (minimizer_type == TRUST_REGION) {
@@ -708,9 +683,6 @@ string Solver::Summary::FullReport() const {
}
StringAppendF(&report, "Threads % 25d% 25d\n",
num_threads_given, num_threads_used);
- StringAppendF(&report, "Linear solver threads % 23d% 25d\n",
- num_linear_solver_threads_given,
- num_linear_solver_threads_used);
string given;
StringifyOrdering(linear_solver_ordering_given, &given);
@@ -720,6 +692,12 @@ string Solver::Summary::FullReport() const {
"Linear solver ordering %22s %24s\n",
given.c_str(),
used.c_str());
+ if (IsSchurType(linear_solver_type_used)) {
+ StringAppendF(&report,
+ "Schur structure %22s %24s\n",
+ schur_structure_given.c_str(),
+ schur_structure_used.c_str());
+ }
if (inner_iterations_given) {
StringAppendF(&report,
@@ -808,44 +786,44 @@ string Solver::Summary::FullReport() const {
}
StringAppendF(&report, "\nTime (in seconds):\n");
- StringAppendF(&report, "Preprocessor %25.4f\n",
+ StringAppendF(&report, "Preprocessor %25.6f\n",
preprocessor_time_in_seconds);
- StringAppendF(&report, "\n Residual evaluation %23.4f\n",
- residual_evaluation_time_in_seconds);
+ StringAppendF(&report, "\n Residual only evaluation %18.6f (%d)\n",
+ residual_evaluation_time_in_seconds, num_residual_evaluations);
if (line_search_used) {
- StringAppendF(&report, " Line search cost evaluation %10.4f\n",
+ StringAppendF(&report, " Line search cost evaluation %10.6f\n",
line_search_cost_evaluation_time_in_seconds);
}
- StringAppendF(&report, " Jacobian evaluation %23.4f\n",
- jacobian_evaluation_time_in_seconds);
+ StringAppendF(&report, " Jacobian & residual evaluation %12.6f (%d)\n",
+ jacobian_evaluation_time_in_seconds, num_jacobian_evaluations);
if (line_search_used) {
- StringAppendF(&report, " Line search gradient evaluation %6.4f\n",
+ StringAppendF(&report, " Line search gradient evaluation %6.6f\n",
line_search_gradient_evaluation_time_in_seconds);
}
if (minimizer_type == TRUST_REGION) {
- StringAppendF(&report, " Linear solver %23.4f\n",
- linear_solver_time_in_seconds);
+ StringAppendF(&report, " Linear solver %23.6f (%d)\n",
+ linear_solver_time_in_seconds, num_linear_solves);
}
if (inner_iterations_used) {
- StringAppendF(&report, " Inner iterations %23.4f\n",
+ StringAppendF(&report, " Inner iterations %23.6f\n",
inner_iteration_time_in_seconds);
}
if (line_search_used) {
- StringAppendF(&report, " Line search polynomial minimization %.4f\n",
+ StringAppendF(&report, " Line search polynomial minimization %.6f\n",
line_search_polynomial_minimization_time_in_seconds);
}
- StringAppendF(&report, "Minimizer %25.4f\n\n",
+ StringAppendF(&report, "Minimizer %25.6f\n\n",
minimizer_time_in_seconds);
- StringAppendF(&report, "Postprocessor %24.4f\n",
+ StringAppendF(&report, "Postprocessor %24.6f\n",
postprocessor_time_in_seconds);
- StringAppendF(&report, "Total %25.4f\n\n",
+ StringAppendF(&report, "Total %25.6f\n\n",
total_time_in_seconds);
StringAppendF(&report, "Termination: %25s (%s)\n",
diff --git a/extern/ceres/internal/ceres/solver_utils.cc b/extern/ceres/internal/ceres/solver_utils.cc
index 7f4ff7eb940..177a928e090 100644
--- a/extern/ceres/internal/ceres/solver_utils.cc
+++ b/extern/ceres/internal/ceres/solver_utils.cc
@@ -30,6 +30,8 @@
#include <string>
+#include "ceres/internal/config.h"
+
#include "Eigen/Core"
#include "ceres/internal/port.h"
#include "ceres/solver_utils.h"
@@ -61,6 +63,10 @@ std::string VersionString() {
value += "-cxsparse-(" + std::string(CERES_CXSPARSE_VERSION) + ")";
#endif
+#ifndef CERES_NO_ACCELERATE_SPARSE
+ value += "-acceleratesparse";
+#endif
+
#ifdef CERES_USE_EIGEN_SPARSE
value += "-eigensparse";
#endif
diff --git a/extern/ceres/internal/ceres/sparse_cholesky.cc b/extern/ceres/internal/ceres/sparse_cholesky.cc
new file mode 100644
index 00000000000..d9d2100d3f9
--- /dev/null
+++ b/extern/ceres/internal/ceres/sparse_cholesky.cc
@@ -0,0 +1,163 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/sparse_cholesky.h"
+
+#include "ceres/accelerate_sparse.h"
+#include "ceres/cxsparse.h"
+#include "ceres/eigensparse.h"
+#include "ceres/float_cxsparse.h"
+#include "ceres/float_suitesparse.h"
+#include "ceres/iterative_refiner.h"
+#include "ceres/suitesparse.h"
+
+namespace ceres {
+namespace internal {
+
+std::unique_ptr<SparseCholesky> SparseCholesky::Create(
+ const LinearSolver::Options& options) {
+ const OrderingType ordering_type = options.use_postordering ? AMD : NATURAL;
+ std::unique_ptr<SparseCholesky> sparse_cholesky;
+
+ switch (options.sparse_linear_algebra_library_type) {
+ case SUITE_SPARSE:
+#ifndef CERES_NO_SUITESPARSE
+ if (options.use_mixed_precision_solves) {
+ sparse_cholesky = FloatSuiteSparseCholesky::Create(ordering_type);
+ } else {
+ sparse_cholesky = SuiteSparseCholesky::Create(ordering_type);
+ }
+ break;
+#else
+ (void)ordering_type;
+ LOG(FATAL) << "Ceres was compiled without support for SuiteSparse.";
+#endif
+
+ case EIGEN_SPARSE:
+#ifdef CERES_USE_EIGEN_SPARSE
+ if (options.use_mixed_precision_solves) {
+ sparse_cholesky = FloatEigenSparseCholesky::Create(ordering_type);
+ } else {
+ sparse_cholesky = EigenSparseCholesky::Create(ordering_type);
+ }
+ break;
+#else
+ LOG(FATAL) << "Ceres was compiled without support for "
+ << "Eigen's sparse Cholesky factorization routines.";
+#endif
+
+ case CX_SPARSE:
+#ifndef CERES_NO_CXSPARSE
+ if (options.use_mixed_precision_solves) {
+ sparse_cholesky = FloatCXSparseCholesky::Create(ordering_type);
+ } else {
+ sparse_cholesky = CXSparseCholesky::Create(ordering_type);
+ }
+ break;
+#else
+ LOG(FATAL) << "Ceres was compiled without support for CXSparse.";
+#endif
+
+ case ACCELERATE_SPARSE:
+#ifndef CERES_NO_ACCELERATE_SPARSE
+ if (options.use_mixed_precision_solves) {
+ sparse_cholesky = AppleAccelerateCholesky<float>::Create(ordering_type);
+ } else {
+ sparse_cholesky = AppleAccelerateCholesky<double>::Create(ordering_type);
+ }
+ break;
+#else
+ LOG(FATAL) << "Ceres was compiled without support for Apple's Accelerate "
+ << "framework solvers.";
+#endif
+
+ default:
+ LOG(FATAL) << "Unknown sparse linear algebra library type : "
+ << SparseLinearAlgebraLibraryTypeToString(
+ options.sparse_linear_algebra_library_type);
+ }
+
+ if (options.max_num_refinement_iterations > 0) {
+ std::unique_ptr<IterativeRefiner> refiner(
+ new IterativeRefiner(options.max_num_refinement_iterations));
+ sparse_cholesky = std::unique_ptr<SparseCholesky>(new RefinedSparseCholesky(
+ std::move(sparse_cholesky), std::move(refiner)));
+ }
+ return sparse_cholesky;
+}
+
+SparseCholesky::~SparseCholesky() {}
+
+LinearSolverTerminationType SparseCholesky::FactorAndSolve(
+ CompressedRowSparseMatrix* lhs,
+ const double* rhs,
+ double* solution,
+ std::string* message) {
+ LinearSolverTerminationType termination_type = Factorize(lhs, message);
+ if (termination_type == LINEAR_SOLVER_SUCCESS) {
+ termination_type = Solve(rhs, solution, message);
+ }
+ return termination_type;
+}
+
+RefinedSparseCholesky::RefinedSparseCholesky(
+ std::unique_ptr<SparseCholesky> sparse_cholesky,
+ std::unique_ptr<IterativeRefiner> iterative_refiner)
+ : sparse_cholesky_(std::move(sparse_cholesky)),
+ iterative_refiner_(std::move(iterative_refiner)) {}
+
+RefinedSparseCholesky::~RefinedSparseCholesky() {}
+
+CompressedRowSparseMatrix::StorageType RefinedSparseCholesky::StorageType()
+ const {
+ return sparse_cholesky_->StorageType();
+}
+
+LinearSolverTerminationType RefinedSparseCholesky::Factorize(
+ CompressedRowSparseMatrix* lhs, std::string* message) {
+ lhs_ = lhs;
+ return sparse_cholesky_->Factorize(lhs, message);
+}
+
+LinearSolverTerminationType RefinedSparseCholesky::Solve(const double* rhs,
+ double* solution,
+ std::string* message) {
+ CHECK(lhs_ != nullptr);
+ auto termination_type = sparse_cholesky_->Solve(rhs, solution, message);
+ if (termination_type != LINEAR_SOLVER_SUCCESS) {
+ return termination_type;
+ }
+
+ iterative_refiner_->Refine(*lhs_, rhs, sparse_cholesky_.get(), solution);
+ return LINEAR_SOLVER_SUCCESS;
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/extern/ceres/internal/ceres/sparse_cholesky.h b/extern/ceres/internal/ceres/sparse_cholesky.h
new file mode 100644
index 00000000000..bbe42370505
--- /dev/null
+++ b/extern/ceres/internal/ceres/sparse_cholesky.h
@@ -0,0 +1,138 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_SPARSE_CHOLESKY_H_
+#define CERES_INTERNAL_SPARSE_CHOLESKY_H_
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#include <memory>
+#include "ceres/linear_solver.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+// An interface that abstracts away the internal details of various
+// sparse linear algebra libraries and offers a simple API for solving
+// symmetric positive definite linear systems using a sparse Cholesky
+// factorization.
+//
+// Instances of SparseCholesky are expected to cache the symbolic
+// factorization of the linear system. They do this on the first call
+// to Factorize or FactorAndSolve. Subsequent calls to Factorize and
+// FactorAndSolve are expected to have the same sparsity structure.
+//
+// Example usage:
+//
+// std::unique_ptr<SparseCholesky>
+// sparse_cholesky(SparseCholesky::Create(SUITE_SPARSE, AMD));
+//
+// CompressedRowSparseMatrix lhs = ...;
+// std::string message;
+// CHECK_EQ(sparse_cholesky->Factorize(&lhs, &message), LINEAR_SOLVER_SUCCESS);
+// Vector rhs = ...;
+// Vector solution = ...;
+// CHECK_EQ(sparse_cholesky->Solve(rhs.data(), solution.data(), &message),
+// LINEAR_SOLVER_SUCCESS);
+
+class SparseCholesky {
+ public:
+ static std::unique_ptr<SparseCholesky> Create(
+ const LinearSolver::Options& options);
+
+ virtual ~SparseCholesky();
+
+ // Due to the symmetry of the linear system, sparse linear algebra
+ // libraries only use one half of the input matrix. Whether it is
+ // the upper or the lower triangular part of the matrix depends on
+ // the library and the re-ordering strategy being used. This
+ // function tells the user the storage type expected of the input
+ // matrix for the sparse linear algebra library and reordering
+ // strategy used.
+ virtual CompressedRowSparseMatrix::StorageType StorageType() const = 0;
+
+ // Computes the numeric factorization of the given matrix. If this
+ // is the first call to Factorize, first the symbolic factorization
+ // will be computed and cached and the numeric factorization will be
+ // computed based on that.
+ //
+ // Subsequent calls to Factorize will use that symbolic
+ // factorization assuming that the sparsity of the matrix has
+ // remained constant.
+ virtual LinearSolverTerminationType Factorize(
+ CompressedRowSparseMatrix* lhs, std::string* message) = 0;
+
+ // Computes the solution to the equation
+ //
+ // lhs * solution = rhs
+ virtual LinearSolverTerminationType Solve(const double* rhs,
+ double* solution,
+ std::string* message) = 0;
+
+ // Convenience method which combines a call to Factorize and
+ // Solve. Solve is only called if Factorize returns
+ // LINEAR_SOLVER_SUCCESS.
+ virtual LinearSolverTerminationType FactorAndSolve(
+ CompressedRowSparseMatrix* lhs,
+ const double* rhs,
+ double* solution,
+ std::string* message);
+
+};
+
+class IterativeRefiner;
+
+// Computes an initial solution using the given instance of
+// SparseCholesky, and then refines it using the IterativeRefiner.
+class RefinedSparseCholesky : public SparseCholesky {
+ public:
+ RefinedSparseCholesky(std::unique_ptr<SparseCholesky> sparse_cholesky,
+ std::unique_ptr<IterativeRefiner> iterative_refiner);
+ virtual ~RefinedSparseCholesky();
+
+ virtual CompressedRowSparseMatrix::StorageType StorageType() const;
+ virtual LinearSolverTerminationType Factorize(
+ CompressedRowSparseMatrix* lhs, std::string* message);
+ virtual LinearSolverTerminationType Solve(const double* rhs,
+ double* solution,
+ std::string* message);
+
+ private:
+ std::unique_ptr<SparseCholesky> sparse_cholesky_;
+ std::unique_ptr<IterativeRefiner> iterative_refiner_;
+ CompressedRowSparseMatrix* lhs_ = nullptr;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_SPARSE_CHOLESKY_H_
diff --git a/extern/ceres/internal/ceres/sparse_matrix.h b/extern/ceres/internal/ceres/sparse_matrix.h
index b3af1d06440..074d847807e 100644
--- a/extern/ceres/internal/ceres/sparse_matrix.h
+++ b/extern/ceres/internal/ceres/sparse_matrix.h
@@ -90,7 +90,7 @@ class SparseMatrix : public LinearOperator {
virtual void ToTextFile(FILE* file) const = 0;
// Accessors for the values array that stores the entries of the
- // sparse matrix. The exact interpreptation of the values of this
+ // sparse matrix. The exact interpretation of the values of this
// array depends on the particular kind of SparseMatrix being
// accessed.
virtual double* mutable_values() = 0;
diff --git a/extern/ceres/internal/ceres/sparse_normal_cholesky_solver.cc b/extern/ceres/internal/ceres/sparse_normal_cholesky_solver.cc
index a4c2c766ddc..0f2e589d041 100644
--- a/extern/ceres/internal/ceres/sparse_normal_cholesky_solver.cc
+++ b/extern/ceres/internal/ceres/sparse_normal_cholesky_solver.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -33,461 +33,82 @@
#include <algorithm>
#include <cstring>
#include <ctime>
-#include <sstream>
+#include <memory>
-#include "ceres/compressed_row_sparse_matrix.h"
-#include "ceres/cxsparse.h"
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/inner_product_computer.h"
#include "ceres/internal/eigen.h"
-#include "ceres/internal/scoped_ptr.h"
+#include "ceres/iterative_refiner.h"
#include "ceres/linear_solver.h"
-#include "ceres/suitesparse.h"
+#include "ceres/sparse_cholesky.h"
#include "ceres/triplet_sparse_matrix.h"
#include "ceres/types.h"
#include "ceres/wall_time.h"
-#include "Eigen/SparseCore"
-
-#ifdef CERES_USE_EIGEN_SPARSE
-#include "Eigen/SparseCholesky"
-#endif
namespace ceres {
namespace internal {
-namespace {
-
-#ifdef CERES_USE_EIGEN_SPARSE
-// A templated factorized and solve function, which allows us to use
-// the same code independent of whether a AMD or a Natural ordering is
-// used.
-template <typename SimplicialCholeskySolver, typename SparseMatrixType>
-LinearSolver::Summary SimplicialLDLTSolve(
- const SparseMatrixType& lhs,
- const bool do_symbolic_analysis,
- SimplicialCholeskySolver* solver,
- double* rhs_and_solution,
- EventLogger* event_logger) {
- LinearSolver::Summary summary;
- summary.num_iterations = 1;
- summary.termination_type = LINEAR_SOLVER_SUCCESS;
- summary.message = "Success.";
-
- if (do_symbolic_analysis) {
- solver->analyzePattern(lhs);
- if (VLOG_IS_ON(2)) {
- std::stringstream ss;
- solver->dumpMemory(ss);
- VLOG(2) << "Symbolic Analysis\n"
- << ss.str();
- }
- event_logger->AddEvent("Analyze");
- if (solver->info() != Eigen::Success) {
- summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
- summary.message =
- "Eigen failure. Unable to find symbolic factorization.";
- return summary;
- }
- }
-
- solver->factorize(lhs);
- event_logger->AddEvent("Factorize");
- if (solver->info() != Eigen::Success) {
- summary.termination_type = LINEAR_SOLVER_FAILURE;
- summary.message = "Eigen failure. Unable to find numeric factorization.";
- return summary;
- }
-
- const Vector rhs = VectorRef(rhs_and_solution, lhs.cols());
-
- VectorRef(rhs_and_solution, lhs.cols()) = solver->solve(rhs);
- event_logger->AddEvent("Solve");
- if (solver->info() != Eigen::Success) {
- summary.termination_type = LINEAR_SOLVER_FAILURE;
- summary.message = "Eigen failure. Unable to do triangular solve.";
- return summary;
- }
-
- return summary;
-}
-
-#endif // CERES_USE_EIGEN_SPARSE
-
-#ifndef CERES_NO_CXSPARSE
-LinearSolver::Summary ComputeNormalEquationsAndSolveUsingCXSparse(
- CompressedRowSparseMatrix* A,
- double * rhs_and_solution,
- EventLogger* event_logger) {
- LinearSolver::Summary summary;
- summary.num_iterations = 1;
- summary.termination_type = LINEAR_SOLVER_SUCCESS;
- summary.message = "Success.";
-
- CXSparse cxsparse;
-
- // Wrap the augmented Jacobian in a compressed sparse column matrix.
- cs_di a_transpose = cxsparse.CreateSparseMatrixTransposeView(A);
-
- // Compute the normal equations. J'J delta = J'f and solve them
- // using a sparse Cholesky factorization. Notice that when compared
- // to SuiteSparse we have to explicitly compute the transpose of Jt,
- // and then the normal equations before they can be
- // factorized. CHOLMOD/SuiteSparse on the other hand can just work
- // off of Jt to compute the Cholesky factorization of the normal
- // equations.
- cs_di* a = cxsparse.TransposeMatrix(&a_transpose);
- cs_di* lhs = cxsparse.MatrixMatrixMultiply(&a_transpose, a);
- cxsparse.Free(a);
- event_logger->AddEvent("NormalEquations");
-
- cs_dis* factor = cxsparse.AnalyzeCholesky(lhs);
- event_logger->AddEvent("Analysis");
-
- if (factor == NULL) {
- summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
- summary.message = "CXSparse::AnalyzeCholesky failed.";
- } else if (!cxsparse.SolveCholesky(lhs, factor, rhs_and_solution)) {
- summary.termination_type = LINEAR_SOLVER_FAILURE;
- summary.message = "CXSparse::SolveCholesky failed.";
- }
- event_logger->AddEvent("Solve");
-
- cxsparse.Free(lhs);
- cxsparse.Free(factor);
- event_logger->AddEvent("TearDown");
- return summary;
-}
-
-#endif // CERES_NO_CXSPARSE
-
-} // namespace
SparseNormalCholeskySolver::SparseNormalCholeskySolver(
const LinearSolver::Options& options)
- : factor_(NULL),
- cxsparse_factor_(NULL),
- options_(options) {
+ : options_(options) {
+ sparse_cholesky_ = SparseCholesky::Create(options);
}
-void SparseNormalCholeskySolver::FreeFactorization() {
- if (factor_ != NULL) {
- ss_.Free(factor_);
- factor_ = NULL;
- }
-
- if (cxsparse_factor_ != NULL) {
- cxsparse_.Free(cxsparse_factor_);
- cxsparse_factor_ = NULL;
- }
-}
-
-SparseNormalCholeskySolver::~SparseNormalCholeskySolver() {
- FreeFactorization();
-}
+SparseNormalCholeskySolver::~SparseNormalCholeskySolver() {}
LinearSolver::Summary SparseNormalCholeskySolver::SolveImpl(
- CompressedRowSparseMatrix* A,
+ BlockSparseMatrix* A,
const double* b,
const LinearSolver::PerSolveOptions& per_solve_options,
- double * x) {
-
- const int num_cols = A->num_cols();
- VectorRef(x, num_cols).setZero();
- A->LeftMultiply(b, x);
-
- if (per_solve_options.D != NULL) {
- // Temporarily append a diagonal block to the A matrix, but undo
- // it before returning the matrix to the user.
- scoped_ptr<CompressedRowSparseMatrix> regularizer;
- if (A->col_blocks().size() > 0) {
- regularizer.reset(CompressedRowSparseMatrix::CreateBlockDiagonalMatrix(
- per_solve_options.D, A->col_blocks()));
- } else {
- regularizer.reset(new CompressedRowSparseMatrix(
- per_solve_options.D, num_cols));
- }
- A->AppendRows(*regularizer);
- }
-
- LinearSolver::Summary summary;
- switch (options_.sparse_linear_algebra_library_type) {
- case SUITE_SPARSE:
- summary = SolveImplUsingSuiteSparse(A, x);
- break;
- case CX_SPARSE:
- summary = SolveImplUsingCXSparse(A, x);
- break;
- case EIGEN_SPARSE:
- summary = SolveImplUsingEigen(A, x);
- break;
- default:
- LOG(FATAL) << "Unknown sparse linear algebra library : "
- << options_.sparse_linear_algebra_library_type;
- }
-
- if (per_solve_options.D != NULL) {
- A->DeleteRows(num_cols);
- }
-
- return summary;
-}
-
-LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingEigen(
- CompressedRowSparseMatrix* A,
- double * rhs_and_solution) {
-#ifndef CERES_USE_EIGEN_SPARSE
-
- LinearSolver::Summary summary;
- summary.num_iterations = 0;
- summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
- summary.message =
- "SPARSE_NORMAL_CHOLESKY cannot be used with EIGEN_SPARSE "
- "because Ceres was not built with support for "
- "Eigen's SimplicialLDLT decomposition. "
- "This requires enabling building with -DEIGENSPARSE=ON.";
- return summary;
-
-#else
-
- EventLogger event_logger("SparseNormalCholeskySolver::Eigen::Solve");
- // Compute the normal equations. J'J delta = J'f and solve them
- // using a sparse Cholesky factorization. Notice that when compared
- // to SuiteSparse we have to explicitly compute the normal equations
- // before they can be factorized. CHOLMOD/SuiteSparse on the other
- // hand can just work off of Jt to compute the Cholesky
- // factorization of the normal equations.
-
- if (options_.dynamic_sparsity) {
- // In the case where the problem has dynamic sparsity, it is not
- // worth using the ComputeOuterProduct routine, as the setup cost
- // is not amortized over multiple calls to Solve.
- Eigen::MappedSparseMatrix<double, Eigen::RowMajor> a(
- A->num_rows(),
- A->num_cols(),
- A->num_nonzeros(),
- A->mutable_rows(),
- A->mutable_cols(),
- A->mutable_values());
-
- Eigen::SparseMatrix<double> lhs = a.transpose() * a;
- Eigen::SimplicialLDLT<Eigen::SparseMatrix<double> > solver;
- return SimplicialLDLTSolve(lhs,
- true,
- &solver,
- rhs_and_solution,
- &event_logger);
- }
-
- if (outer_product_.get() == NULL) {
- outer_product_.reset(
- CompressedRowSparseMatrix::CreateOuterProductMatrixAndProgram(
- *A, &pattern_));
- }
-
- CompressedRowSparseMatrix::ComputeOuterProduct(
- *A, pattern_, outer_product_.get());
-
- // Map to an upper triangular column major matrix.
- //
- // outer_product_ is a compressed row sparse matrix and in lower
- // triangular form, when mapped to a compressed column sparse
- // matrix, it becomes an upper triangular matrix.
- Eigen::MappedSparseMatrix<double, Eigen::ColMajor> lhs(
- outer_product_->num_rows(),
- outer_product_->num_rows(),
- outer_product_->num_nonzeros(),
- outer_product_->mutable_rows(),
- outer_product_->mutable_cols(),
- outer_product_->mutable_values());
-
- bool do_symbolic_analysis = false;
-
- // If using post ordering or an old version of Eigen, we cannot
- // depend on a preordered jacobian, so we work with a SimplicialLDLT
- // decomposition with AMD ordering.
- if (options_.use_postordering ||
- !EIGEN_VERSION_AT_LEAST(3, 2, 2)) {
- if (amd_ldlt_.get() == NULL) {
- amd_ldlt_.reset(new SimplicialLDLTWithAMDOrdering);
- do_symbolic_analysis = true;
- }
-
- return SimplicialLDLTSolve(lhs,
- do_symbolic_analysis,
- amd_ldlt_.get(),
- rhs_and_solution,
- &event_logger);
- }
-
-#if EIGEN_VERSION_AT_LEAST(3,2,2)
- // The common case
- if (natural_ldlt_.get() == NULL) {
- natural_ldlt_.reset(new SimplicialLDLTWithNaturalOrdering);
- do_symbolic_analysis = true;
- }
-
- return SimplicialLDLTSolve(lhs,
- do_symbolic_analysis,
- natural_ldlt_.get(),
- rhs_and_solution,
- &event_logger);
-#endif
-
-#endif // EIGEN_USE_EIGEN_SPARSE
-}
-
-LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingCXSparse(
- CompressedRowSparseMatrix* A,
- double * rhs_and_solution) {
-#ifdef CERES_NO_CXSPARSE
-
- LinearSolver::Summary summary;
- summary.num_iterations = 0;
- summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
- summary.message =
- "SPARSE_NORMAL_CHOLESKY cannot be used with CX_SPARSE "
- "because Ceres was not built with support for CXSparse. "
- "This requires enabling building with -DCXSPARSE=ON.";
-
- return summary;
-
-#else
-
- EventLogger event_logger("SparseNormalCholeskySolver::CXSparse::Solve");
- if (options_.dynamic_sparsity) {
- return ComputeNormalEquationsAndSolveUsingCXSparse(A,
- rhs_and_solution,
- &event_logger);
- }
-
+ double* x) {
+ EventLogger event_logger("SparseNormalCholeskySolver::Solve");
LinearSolver::Summary summary;
summary.num_iterations = 1;
summary.termination_type = LINEAR_SOLVER_SUCCESS;
summary.message = "Success.";
- // Compute the normal equations. J'J delta = J'f and solve them
- // using a sparse Cholesky factorization. Notice that when compared
- // to SuiteSparse we have to explicitly compute the normal equations
- // before they can be factorized. CHOLMOD/SuiteSparse on the other
- // hand can just work off of Jt to compute the Cholesky
- // factorization of the normal equations.
- if (outer_product_.get() == NULL) {
- outer_product_.reset(
- CompressedRowSparseMatrix::CreateOuterProductMatrixAndProgram(
- *A, &pattern_));
- }
-
- CompressedRowSparseMatrix::ComputeOuterProduct(
- *A, pattern_, outer_product_.get());
- cs_di lhs =
- cxsparse_.CreateSparseMatrixTransposeView(outer_product_.get());
-
- event_logger.AddEvent("Setup");
-
- // Compute symbolic factorization if not available.
- if (cxsparse_factor_ == NULL) {
- if (options_.use_postordering) {
- cxsparse_factor_ = cxsparse_.BlockAnalyzeCholesky(&lhs,
- A->col_blocks(),
- A->col_blocks());
- } else {
- cxsparse_factor_ = cxsparse_.AnalyzeCholeskyWithNaturalOrdering(&lhs);
- }
- }
- event_logger.AddEvent("Analysis");
-
- if (cxsparse_factor_ == NULL) {
- summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
- summary.message =
- "CXSparse failure. Unable to find symbolic factorization.";
- } else if (!cxsparse_.SolveCholesky(&lhs,
- cxsparse_factor_,
- rhs_and_solution)) {
- summary.termination_type = LINEAR_SOLVER_FAILURE;
- summary.message = "CXSparse::SolveCholesky failed.";
- }
- event_logger.AddEvent("Solve");
-
- return summary;
-#endif
-}
-
-LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingSuiteSparse(
- CompressedRowSparseMatrix* A,
- double * rhs_and_solution) {
-#ifdef CERES_NO_SUITESPARSE
-
- LinearSolver::Summary summary;
- summary.num_iterations = 0;
- summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
- summary.message =
- "SPARSE_NORMAL_CHOLESKY cannot be used with SUITE_SPARSE "
- "because Ceres was not built with support for SuiteSparse. "
- "This requires enabling building with -DSUITESPARSE=ON.";
- return summary;
-
-#else
-
- EventLogger event_logger("SparseNormalCholeskySolver::SuiteSparse::Solve");
- LinearSolver::Summary summary;
- summary.termination_type = LINEAR_SOLVER_SUCCESS;
- summary.num_iterations = 1;
- summary.message = "Success.";
-
const int num_cols = A->num_cols();
- cholmod_sparse lhs = ss_.CreateSparseMatrixTransposeView(A);
- event_logger.AddEvent("Setup");
-
- if (options_.dynamic_sparsity) {
- FreeFactorization();
- }
+ VectorRef xref(x, num_cols);
+ xref.setZero();
+ rhs_.resize(num_cols);
+ rhs_.setZero();
+ A->LeftMultiply(b, rhs_.data());
+ event_logger.AddEvent("Compute RHS");
- if (factor_ == NULL) {
- if (options_.use_postordering) {
- factor_ = ss_.BlockAnalyzeCholesky(&lhs,
- A->col_blocks(),
- A->row_blocks(),
- &summary.message);
- } else {
- if (options_.dynamic_sparsity) {
- factor_ = ss_.AnalyzeCholesky(&lhs, &summary.message);
- } else {
- factor_ = ss_.AnalyzeCholeskyWithNaturalOrdering(&lhs,
- &summary.message);
- }
- }
+ if (per_solve_options.D != NULL) {
+ // Temporarily append a diagonal block to the A matrix, but undo
+ // it before returning the matrix to the user.
+ std::unique_ptr<BlockSparseMatrix> regularizer;
+ regularizer.reset(BlockSparseMatrix::CreateDiagonalMatrix(
+ per_solve_options.D, A->block_structure()->cols));
+ event_logger.AddEvent("Diagonal");
+ A->AppendRows(*regularizer);
+ event_logger.AddEvent("Append");
}
- event_logger.AddEvent("Analysis");
+ event_logger.AddEvent("Append Rows");
- if (factor_ == NULL) {
- summary.termination_type = LINEAR_SOLVER_FATAL_ERROR;
- // No need to set message as it has already been set by the
- // symbolic analysis routines above.
- return summary;
- }
+ if (inner_product_computer_.get() == NULL) {
+ inner_product_computer_.reset(
+ InnerProductComputer::Create(*A, sparse_cholesky_->StorageType()));
- summary.termination_type = ss_.Cholesky(&lhs, factor_, &summary.message);
- if (summary.termination_type != LINEAR_SOLVER_SUCCESS) {
- return summary;
+ event_logger.AddEvent("InnerProductComputer::Create");
}
- cholmod_dense* rhs = ss_.CreateDenseVector(rhs_and_solution,
- num_cols,
- num_cols);
- cholmod_dense* solution = ss_.Solve(factor_, rhs, &summary.message);
- event_logger.AddEvent("Solve");
+ inner_product_computer_->Compute();
+ event_logger.AddEvent("InnerProductComputer::Compute");
- ss_.Free(rhs);
- if (solution != NULL) {
- memcpy(rhs_and_solution, solution->x, num_cols * sizeof(*rhs_and_solution));
- ss_.Free(solution);
- } else {
- // No need to set message as it has already been set by the
- // numeric factorization routine above.
- summary.termination_type = LINEAR_SOLVER_FAILURE;
+ if (per_solve_options.D != NULL) {
+ A->DeleteRowBlocks(A->block_structure()->cols.size());
}
- event_logger.AddEvent("Teardown");
+ summary.termination_type = sparse_cholesky_->FactorAndSolve(
+ inner_product_computer_->mutable_result(),
+ rhs_.data(),
+ x,
+ &summary.message);
+ event_logger.AddEvent("SparseCholesky::FactorAndSolve");
return summary;
-#endif
}
-} // namespace internal
-} // namespace ceres
+} // namespace internal
+} // namespace ceres
diff --git a/extern/ceres/internal/ceres/sparse_normal_cholesky_solver.h b/extern/ceres/internal/ceres/sparse_normal_cholesky_solver.h
index 2a93bc56d29..cbff2bdb3f6 100644
--- a/extern/ceres/internal/ceres/sparse_normal_cholesky_solver.h
+++ b/extern/ceres/internal/ceres/sparse_normal_cholesky_solver.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -34,91 +34,40 @@
#ifndef CERES_INTERNAL_SPARSE_NORMAL_CHOLESKY_SOLVER_H_
#define CERES_INTERNAL_SPARSE_NORMAL_CHOLESKY_SOLVER_H_
-#include <vector>
-
// This include must come before any #ifndef check on Ceres compile options.
#include "ceres/internal/port.h"
-#include "ceres/internal/macros.h"
+#include <vector>
#include "ceres/linear_solver.h"
-#include "ceres/suitesparse.h"
-#include "ceres/cxsparse.h"
-
-#ifdef CERES_USE_EIGEN_SPARSE
-#include "Eigen/SparseCholesky"
-#endif
namespace ceres {
namespace internal {
class CompressedRowSparseMatrix;
+class InnerProductComputer;
+class SparseCholesky;
-// Solves the normal equations (A'A + D'D) x = A'b, using the CHOLMOD sparse
-// cholesky solver.
-class SparseNormalCholeskySolver : public CompressedRowSparseMatrixSolver {
+// Solves the normal equations (A'A + D'D) x = A'b, using the sparse
+// linear algebra library of the user's choice.
+class SparseNormalCholeskySolver : public BlockSparseMatrixSolver {
public:
explicit SparseNormalCholeskySolver(const LinearSolver::Options& options);
+ SparseNormalCholeskySolver(const SparseNormalCholeskySolver&) = delete;
+ void operator=(const SparseNormalCholeskySolver&) = delete;
+
virtual ~SparseNormalCholeskySolver();
private:
- virtual LinearSolver::Summary SolveImpl(
- CompressedRowSparseMatrix* A,
+ LinearSolver::Summary SolveImpl(
+ BlockSparseMatrix* A,
const double* b,
const LinearSolver::PerSolveOptions& options,
- double* x);
-
- LinearSolver::Summary SolveImplUsingSuiteSparse(
- CompressedRowSparseMatrix* A,
- double* rhs_and_solution);
-
-
- LinearSolver::Summary SolveImplUsingCXSparse(
- CompressedRowSparseMatrix* A,
- double* rhs_and_solution);
-
- LinearSolver::Summary SolveImplUsingEigen(
- CompressedRowSparseMatrix* A,
- double* rhs_and_solution);
-
- void FreeFactorization();
-
- SuiteSparse ss_;
- // Cached factorization
- cholmod_factor* factor_;
-
- CXSparse cxsparse_;
- // Cached factorization
- cs_dis* cxsparse_factor_;
-
-#ifdef CERES_USE_EIGEN_SPARSE
-
- // The preprocessor gymnastics here are dealing with the fact that
- // before version 3.2.2, Eigen did not support a third template
- // parameter to specify the ordering.
-#if EIGEN_VERSION_AT_LEAST(3,2,2)
- typedef Eigen::SimplicialLDLT<Eigen::SparseMatrix<double>, Eigen::Upper,
- Eigen::NaturalOrdering<int> >
- SimplicialLDLTWithNaturalOrdering;
- scoped_ptr<SimplicialLDLTWithNaturalOrdering> natural_ldlt_;
-
- typedef Eigen::SimplicialLDLT<Eigen::SparseMatrix<double>, Eigen::Upper,
- Eigen::AMDOrdering<int> >
- SimplicialLDLTWithAMDOrdering;
- scoped_ptr<SimplicialLDLTWithAMDOrdering> amd_ldlt_;
-
-#else
- typedef Eigen::SimplicialLDLT<Eigen::SparseMatrix<double>, Eigen::Upper>
- SimplicialLDLTWithAMDOrdering;
-
- scoped_ptr<SimplicialLDLTWithAMDOrdering> amd_ldlt_;
-#endif
-
-#endif
+ double* x) final;
- scoped_ptr<CompressedRowSparseMatrix> outer_product_;
- std::vector<int> pattern_;
const LinearSolver::Options options_;
- CERES_DISALLOW_COPY_AND_ASSIGN(SparseNormalCholeskySolver);
+ Vector rhs_;
+ std::unique_ptr<SparseCholesky> sparse_cholesky_;
+ std::unique_ptr<InnerProductComputer> inner_product_computer_;
};
} // namespace internal
diff --git a/extern/ceres/internal/ceres/split.cc b/extern/ceres/internal/ceres/split.cc
index 296c09a6440..3a09e866839 100644
--- a/extern/ceres/internal/ceres/split.cc
+++ b/extern/ceres/internal/ceres/split.cc
@@ -115,7 +115,7 @@ void SplitStringUsing(const string& full,
const char* delim,
vector<string>* result) {
result->reserve(result->size() + CalculateReserveForVector(full, delim));
- std::back_insert_iterator<vector<string> > it(*result);
+ std::back_insert_iterator<vector<string>> it(*result);
SplitStringToIteratorUsing(full, delim, it);
}
diff --git a/extern/ceres/internal/ceres/stringprintf.cc b/extern/ceres/internal/ceres/stringprintf.cc
index b3b7474d8f8..7a21f0e2118 100644
--- a/extern/ceres/internal/ceres/stringprintf.cc
+++ b/extern/ceres/internal/ceres/stringprintf.cc
@@ -43,28 +43,6 @@ namespace internal {
using std::string;
-// va_copy() was defined in the C99 standard. However, it did not appear in the
-// C++ standard until C++11. This means that if Ceres is being compiled with a
-// strict pre-C++11 standard (e.g. -std=c++03), va_copy() will NOT be defined,
-// as we are using the C++ compiler (it would however be defined if we were
-// using the C compiler). Note however that both GCC & Clang will in fact
-// define va_copy() when compiling for C++ if the C++ standard is not explicitly
-// specified (i.e. no -std=c++<XX> arg), even though it should not strictly be
-// defined unless -std=c++11 (or greater) was passed.
-#if !defined(va_copy)
-#if defined (__GNUC__)
-// On GCC/Clang, if va_copy() is not defined (C++ standard < C++11 explicitly
-// specified), use the internal __va_copy() version, which should be present
-// in even very old GCC versions.
-#define va_copy(d, s) __va_copy(d, s)
-#else
-// Some older versions of MSVC do not have va_copy(), in which case define it.
-// Although this is required for older MSVC versions, it should also work for
-// other non-GCC/Clang compilers which also do not defined va_copy().
-#define va_copy(d, s) ((d) = (s))
-#endif // defined (__GNUC__)
-#endif // !defined(va_copy)
-
void StringAppendV(string* dst, const char* format, va_list ap) {
// First try with a small fixed size buffer
char space[1024];
diff --git a/extern/ceres/internal/ceres/subset_preconditioner.cc b/extern/ceres/internal/ceres/subset_preconditioner.cc
new file mode 100644
index 00000000000..7c24ae9f288
--- /dev/null
+++ b/extern/ceres/internal/ceres/subset_preconditioner.cc
@@ -0,0 +1,117 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/subset_preconditioner.h"
+
+#include <memory>
+#include <string>
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/inner_product_computer.h"
+#include "ceres/linear_solver.h"
+#include "ceres/sparse_cholesky.h"
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+SubsetPreconditioner::SubsetPreconditioner(
+ const Preconditioner::Options& options, const BlockSparseMatrix& A)
+ : options_(options), num_cols_(A.num_cols()) {
+ CHECK_GE(options_.subset_preconditioner_start_row_block, 0)
+ << "Congratulations, you found a bug in Ceres. Please report it.";
+
+ LinearSolver::Options sparse_cholesky_options;
+ sparse_cholesky_options.sparse_linear_algebra_library_type =
+ options_.sparse_linear_algebra_library_type;
+ sparse_cholesky_options.use_postordering =
+ options_.use_postordering;
+ sparse_cholesky_ = SparseCholesky::Create(sparse_cholesky_options);
+}
+
+SubsetPreconditioner::~SubsetPreconditioner() {}
+
+void SubsetPreconditioner::RightMultiply(const double* x, double* y) const {
+ CHECK(x != nullptr);
+ CHECK(y != nullptr);
+ std::string message;
+ sparse_cholesky_->Solve(x, y, &message);
+}
+
+bool SubsetPreconditioner::UpdateImpl(const BlockSparseMatrix& A,
+ const double* D) {
+ BlockSparseMatrix* m = const_cast<BlockSparseMatrix*>(&A);
+ const CompressedRowBlockStructure* bs = m->block_structure();
+
+ // A = [P]
+ // [Q]
+
+ // Now add D to A if needed.
+ if (D != NULL) {
+ // A = [P]
+ // [Q]
+ // [D]
+ std::unique_ptr<BlockSparseMatrix> regularizer(
+ BlockSparseMatrix::CreateDiagonalMatrix(D, bs->cols));
+ m->AppendRows(*regularizer);
+ }
+
+ if (inner_product_computer_.get() == NULL) {
+ inner_product_computer_.reset(InnerProductComputer::Create(
+ *m,
+ options_.subset_preconditioner_start_row_block,
+ bs->rows.size(),
+ sparse_cholesky_->StorageType()));
+ }
+
+ // Compute inner_product = [Q'*Q + D'*D]
+ inner_product_computer_->Compute();
+
+ // Unappend D if needed.
+ if (D != NULL) {
+ // A = [P]
+ // [Q]
+ m->DeleteRowBlocks(bs->cols.size());
+ }
+
+ std::string message;
+ // Compute L. s.t., LL' = Q'*Q + D'*D
+ const LinearSolverTerminationType termination_type =
+ sparse_cholesky_->Factorize(inner_product_computer_->mutable_result(),
+ &message);
+ if (termination_type != LINEAR_SOLVER_SUCCESS) {
+ LOG(ERROR) << "Preconditioner factorization failed: " << message;
+ return false;
+ }
+
+ return true;
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/extern/ceres/internal/ceres/subset_preconditioner.h b/extern/ceres/internal/ceres/subset_preconditioner.h
new file mode 100644
index 00000000000..6f3c9ecd052
--- /dev/null
+++ b/extern/ceres/internal/ceres/subset_preconditioner.h
@@ -0,0 +1,91 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_SUBSET_PRECONDITIONER_H_
+#define CERES_INTERNAL_SUBSET_PRECONDITIONER_H_
+
+#include <memory>
+#include "ceres/preconditioner.h"
+
+namespace ceres {
+namespace internal {
+
+class BlockSparseMatrix;
+class SparseCholesky;
+class InnerProductComputer;
+
+// Subset preconditioning, uses a subset of the rows of the Jacobian
+// to construct a preconditioner for the normal equations.
+//
+// To keep the interface simple, we assume that the matrix A has
+// already been re-ordered that the user wishes to some subset of the
+// bottom row blocks of the matrix as the preconditioner. This is
+// controlled by
+// Preconditioner::Options::subset_preconditioner_start_row_block.
+//
+// When using the subset preconditioner, all row blocks starting
+// from this row block are used to construct the preconditioner.
+//
+// More precisely the matrix A is horizontally partitioned as
+//
+// A = [P]
+// [Q]
+//
+// where P has subset_preconditioner_start_row_block row blocks,
+// and the preconditioner is the inverse of the matrix Q'Q.
+//
+// Obviously, the smaller this number, the more accurate and
+// computationally expensive this preconditioner will be.
+//
+// See the tests for example usage.
+class SubsetPreconditioner : public BlockSparseMatrixPreconditioner {
+ public:
+ SubsetPreconditioner(const Preconditioner::Options& options,
+ const BlockSparseMatrix& A);
+ virtual ~SubsetPreconditioner();
+
+ // Preconditioner interface
+ void RightMultiply(const double* x, double* y) const final;
+ int num_rows() const final { return num_cols_; }
+ int num_cols() const final { return num_cols_; }
+
+ private:
+ bool UpdateImpl(const BlockSparseMatrix& A, const double* D) final;
+
+ const Preconditioner::Options options_;
+ const int num_cols_;
+ std::unique_ptr<SparseCholesky> sparse_cholesky_;
+ std::unique_ptr<InnerProductComputer> inner_product_computer_;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_SUBSET_PRECONDITIONER_H_
diff --git a/extern/ceres/internal/ceres/suitesparse.cc b/extern/ceres/internal/ceres/suitesparse.cc
new file mode 100644
index 00000000000..190d1755add
--- /dev/null
+++ b/extern/ceres/internal/ceres/suitesparse.cc
@@ -0,0 +1,430 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifndef CERES_NO_SUITESPARSE
+#include "ceres/suitesparse.h"
+
+#include <vector>
+
+#include "ceres/compressed_col_sparse_matrix_utils.h"
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/linear_solver.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "cholmod.h"
+
+namespace ceres {
+namespace internal {
+
+using std::string;
+using std::vector;
+
+SuiteSparse::SuiteSparse() { cholmod_start(&cc_); }
+
+SuiteSparse::~SuiteSparse() { cholmod_finish(&cc_); }
+
+cholmod_sparse* SuiteSparse::CreateSparseMatrix(TripletSparseMatrix* A) {
+ cholmod_triplet triplet;
+
+ triplet.nrow = A->num_rows();
+ triplet.ncol = A->num_cols();
+ triplet.nzmax = A->max_num_nonzeros();
+ triplet.nnz = A->num_nonzeros();
+ triplet.i = reinterpret_cast<void*>(A->mutable_rows());
+ triplet.j = reinterpret_cast<void*>(A->mutable_cols());
+ triplet.x = reinterpret_cast<void*>(A->mutable_values());
+ triplet.stype = 0; // Matrix is not symmetric.
+ triplet.itype = CHOLMOD_INT;
+ triplet.xtype = CHOLMOD_REAL;
+ triplet.dtype = CHOLMOD_DOUBLE;
+
+ return cholmod_triplet_to_sparse(&triplet, triplet.nnz, &cc_);
+}
+
+cholmod_sparse* SuiteSparse::CreateSparseMatrixTranspose(
+ TripletSparseMatrix* A) {
+ cholmod_triplet triplet;
+
+ triplet.ncol = A->num_rows(); // swap row and columns
+ triplet.nrow = A->num_cols();
+ triplet.nzmax = A->max_num_nonzeros();
+ triplet.nnz = A->num_nonzeros();
+
+ // swap rows and columns
+ triplet.j = reinterpret_cast<void*>(A->mutable_rows());
+ triplet.i = reinterpret_cast<void*>(A->mutable_cols());
+ triplet.x = reinterpret_cast<void*>(A->mutable_values());
+ triplet.stype = 0; // Matrix is not symmetric.
+ triplet.itype = CHOLMOD_INT;
+ triplet.xtype = CHOLMOD_REAL;
+ triplet.dtype = CHOLMOD_DOUBLE;
+
+ return cholmod_triplet_to_sparse(&triplet, triplet.nnz, &cc_);
+}
+
+cholmod_sparse SuiteSparse::CreateSparseMatrixTransposeView(
+ CompressedRowSparseMatrix* A) {
+ cholmod_sparse m;
+ m.nrow = A->num_cols();
+ m.ncol = A->num_rows();
+ m.nzmax = A->num_nonzeros();
+ m.nz = nullptr;
+ m.p = reinterpret_cast<void*>(A->mutable_rows());
+ m.i = reinterpret_cast<void*>(A->mutable_cols());
+ m.x = reinterpret_cast<void*>(A->mutable_values());
+ m.z = nullptr;
+
+ if (A->storage_type() == CompressedRowSparseMatrix::LOWER_TRIANGULAR) {
+ m.stype = 1;
+ } else if (A->storage_type() == CompressedRowSparseMatrix::UPPER_TRIANGULAR) {
+ m.stype = -1;
+ } else {
+ m.stype = 0;
+ }
+
+ m.itype = CHOLMOD_INT;
+ m.xtype = CHOLMOD_REAL;
+ m.dtype = CHOLMOD_DOUBLE;
+ m.sorted = 1;
+ m.packed = 1;
+
+ return m;
+}
+
+cholmod_dense SuiteSparse::CreateDenseVectorView(const double* x, int size) {
+ cholmod_dense v;
+ v.nrow = size;
+ v.ncol = 1;
+ v.nzmax = size;
+ v.d = size;
+ v.x = const_cast<void*>(reinterpret_cast<const void*>(x));
+ v.xtype = CHOLMOD_REAL;
+ v.dtype = CHOLMOD_DOUBLE;
+ return v;
+}
+
+cholmod_dense* SuiteSparse::CreateDenseVector(const double* x,
+ int in_size,
+ int out_size) {
+ CHECK_LE(in_size, out_size);
+ cholmod_dense* v = cholmod_zeros(out_size, 1, CHOLMOD_REAL, &cc_);
+ if (x != nullptr) {
+ memcpy(v->x, x, in_size * sizeof(*x));
+ }
+ return v;
+}
+
+cholmod_factor* SuiteSparse::AnalyzeCholesky(cholmod_sparse* A,
+ string* message) {
+ // Cholmod can try multiple re-ordering strategies to find a fill
+ // reducing ordering. Here we just tell it use AMD with automatic
+ // matrix dependence choice of supernodal versus simplicial
+ // factorization.
+ cc_.nmethods = 1;
+ cc_.method[0].ordering = CHOLMOD_AMD;
+ cc_.supernodal = CHOLMOD_AUTO;
+
+ cholmod_factor* factor = cholmod_analyze(A, &cc_);
+ if (VLOG_IS_ON(2)) {
+ cholmod_print_common(const_cast<char*>("Symbolic Analysis"), &cc_);
+ }
+
+ if (cc_.status != CHOLMOD_OK) {
+ *message =
+ StringPrintf("cholmod_analyze failed. error code: %d", cc_.status);
+ return nullptr;
+ }
+
+ CHECK(factor != nullptr);
+ return factor;
+}
+
+cholmod_factor* SuiteSparse::BlockAnalyzeCholesky(cholmod_sparse* A,
+ const vector<int>& row_blocks,
+ const vector<int>& col_blocks,
+ string* message) {
+ vector<int> ordering;
+ if (!BlockAMDOrdering(A, row_blocks, col_blocks, &ordering)) {
+ return nullptr;
+ }
+ return AnalyzeCholeskyWithUserOrdering(A, ordering, message);
+}
+
+cholmod_factor* SuiteSparse::AnalyzeCholeskyWithUserOrdering(
+ cholmod_sparse* A, const vector<int>& ordering, string* message) {
+ CHECK_EQ(ordering.size(), A->nrow);
+
+ cc_.nmethods = 1;
+ cc_.method[0].ordering = CHOLMOD_GIVEN;
+
+ cholmod_factor* factor =
+ cholmod_analyze_p(A, const_cast<int*>(&ordering[0]), nullptr, 0, &cc_);
+ if (VLOG_IS_ON(2)) {
+ cholmod_print_common(const_cast<char*>("Symbolic Analysis"), &cc_);
+ }
+ if (cc_.status != CHOLMOD_OK) {
+ *message =
+ StringPrintf("cholmod_analyze failed. error code: %d", cc_.status);
+ return nullptr;
+ }
+
+ CHECK(factor != nullptr);
+ return factor;
+}
+
+cholmod_factor* SuiteSparse::AnalyzeCholeskyWithNaturalOrdering(
+ cholmod_sparse* A, string* message) {
+ cc_.nmethods = 1;
+ cc_.method[0].ordering = CHOLMOD_NATURAL;
+ cc_.postorder = 0;
+
+ cholmod_factor* factor = cholmod_analyze(A, &cc_);
+ if (VLOG_IS_ON(2)) {
+ cholmod_print_common(const_cast<char*>("Symbolic Analysis"), &cc_);
+ }
+ if (cc_.status != CHOLMOD_OK) {
+ *message =
+ StringPrintf("cholmod_analyze failed. error code: %d", cc_.status);
+ return nullptr;
+ }
+
+ CHECK(factor != nullptr);
+ return factor;
+}
+
+bool SuiteSparse::BlockAMDOrdering(const cholmod_sparse* A,
+ const vector<int>& row_blocks,
+ const vector<int>& col_blocks,
+ vector<int>* ordering) {
+ const int num_row_blocks = row_blocks.size();
+ const int num_col_blocks = col_blocks.size();
+
+ // Arrays storing the compressed column structure of the matrix
+ // incoding the block sparsity of A.
+ vector<int> block_cols;
+ vector<int> block_rows;
+
+ CompressedColumnScalarMatrixToBlockMatrix(reinterpret_cast<const int*>(A->i),
+ reinterpret_cast<const int*>(A->p),
+ row_blocks,
+ col_blocks,
+ &block_rows,
+ &block_cols);
+ cholmod_sparse_struct block_matrix;
+ block_matrix.nrow = num_row_blocks;
+ block_matrix.ncol = num_col_blocks;
+ block_matrix.nzmax = block_rows.size();
+ block_matrix.p = reinterpret_cast<void*>(&block_cols[0]);
+ block_matrix.i = reinterpret_cast<void*>(&block_rows[0]);
+ block_matrix.x = nullptr;
+ block_matrix.stype = A->stype;
+ block_matrix.itype = CHOLMOD_INT;
+ block_matrix.xtype = CHOLMOD_PATTERN;
+ block_matrix.dtype = CHOLMOD_DOUBLE;
+ block_matrix.sorted = 1;
+ block_matrix.packed = 1;
+
+ vector<int> block_ordering(num_row_blocks);
+ if (!cholmod_amd(&block_matrix, nullptr, 0, &block_ordering[0], &cc_)) {
+ return false;
+ }
+
+ BlockOrderingToScalarOrdering(row_blocks, block_ordering, ordering);
+ return true;
+}
+
+LinearSolverTerminationType SuiteSparse::Cholesky(cholmod_sparse* A,
+ cholmod_factor* L,
+ string* message) {
+ CHECK(A != nullptr);
+ CHECK(L != nullptr);
+
+ // Save the current print level and silence CHOLMOD, otherwise
+ // CHOLMOD is prone to dumping stuff to stderr, which can be
+ // distracting when the error (matrix is indefinite) is not a fatal
+ // failure.
+ const int old_print_level = cc_.print;
+ cc_.print = 0;
+
+ cc_.quick_return_if_not_posdef = 1;
+ int cholmod_status = cholmod_factorize(A, L, &cc_);
+ cc_.print = old_print_level;
+
+ switch (cc_.status) {
+ case CHOLMOD_NOT_INSTALLED:
+ *message = "CHOLMOD failure: Method not installed.";
+ return LINEAR_SOLVER_FATAL_ERROR;
+ case CHOLMOD_OUT_OF_MEMORY:
+ *message = "CHOLMOD failure: Out of memory.";
+ return LINEAR_SOLVER_FATAL_ERROR;
+ case CHOLMOD_TOO_LARGE:
+ *message = "CHOLMOD failure: Integer overflow occurred.";
+ return LINEAR_SOLVER_FATAL_ERROR;
+ case CHOLMOD_INVALID:
+ *message = "CHOLMOD failure: Invalid input.";
+ return LINEAR_SOLVER_FATAL_ERROR;
+ case CHOLMOD_NOT_POSDEF:
+ *message = "CHOLMOD warning: Matrix not positive definite.";
+ return LINEAR_SOLVER_FAILURE;
+ case CHOLMOD_DSMALL:
+ *message =
+ "CHOLMOD warning: D for LDL' or diag(L) or "
+ "LL' has tiny absolute value.";
+ return LINEAR_SOLVER_FAILURE;
+ case CHOLMOD_OK:
+ if (cholmod_status != 0) {
+ return LINEAR_SOLVER_SUCCESS;
+ }
+
+ *message =
+ "CHOLMOD failure: cholmod_factorize returned false "
+ "but cholmod_common::status is CHOLMOD_OK."
+ "Please report this to ceres-solver@googlegroups.com.";
+ return LINEAR_SOLVER_FATAL_ERROR;
+ default:
+ *message = StringPrintf(
+ "Unknown cholmod return code: %d. "
+ "Please report this to ceres-solver@googlegroups.com.",
+ cc_.status);
+ return LINEAR_SOLVER_FATAL_ERROR;
+ }
+
+ return LINEAR_SOLVER_FATAL_ERROR;
+}
+
+cholmod_dense* SuiteSparse::Solve(cholmod_factor* L,
+ cholmod_dense* b,
+ string* message) {
+ if (cc_.status != CHOLMOD_OK) {
+ *message = "cholmod_solve failed. CHOLMOD status is not CHOLMOD_OK";
+ return nullptr;
+ }
+
+ return cholmod_solve(CHOLMOD_A, L, b, &cc_);
+}
+
+bool SuiteSparse::ApproximateMinimumDegreeOrdering(cholmod_sparse* matrix,
+ int* ordering) {
+ return cholmod_amd(matrix, nullptr, 0, ordering, &cc_);
+}
+
+bool SuiteSparse::ConstrainedApproximateMinimumDegreeOrdering(
+ cholmod_sparse* matrix, int* constraints, int* ordering) {
+#ifndef CERES_NO_CAMD
+ return cholmod_camd(matrix, nullptr, 0, constraints, ordering, &cc_);
+#else
+ LOG(FATAL) << "Congratulations you have found a bug in Ceres."
+ << "Ceres Solver was compiled with SuiteSparse "
+ << "version 4.1.0 or less. Calling this function "
+ << "in that case is a bug. Please contact the"
+ << "the Ceres Solver developers.";
+ return false;
+#endif
+}
+
+std::unique_ptr<SparseCholesky> SuiteSparseCholesky::Create(
+ const OrderingType ordering_type) {
+ return std::unique_ptr<SparseCholesky>(new SuiteSparseCholesky(ordering_type));
+}
+
+SuiteSparseCholesky::SuiteSparseCholesky(const OrderingType ordering_type)
+ : ordering_type_(ordering_type), factor_(nullptr) {}
+
+SuiteSparseCholesky::~SuiteSparseCholesky() {
+ if (factor_ != nullptr) {
+ ss_.Free(factor_);
+ }
+}
+
+LinearSolverTerminationType SuiteSparseCholesky::Factorize(
+ CompressedRowSparseMatrix* lhs, string* message) {
+ if (lhs == nullptr) {
+ *message = "Failure: Input lhs is NULL.";
+ return LINEAR_SOLVER_FATAL_ERROR;
+ }
+
+ cholmod_sparse cholmod_lhs = ss_.CreateSparseMatrixTransposeView(lhs);
+
+ if (factor_ == nullptr) {
+ if (ordering_type_ == NATURAL) {
+ factor_ = ss_.AnalyzeCholeskyWithNaturalOrdering(&cholmod_lhs, message);
+ } else {
+ if (!lhs->col_blocks().empty() && !(lhs->row_blocks().empty())) {
+ factor_ = ss_.BlockAnalyzeCholesky(
+ &cholmod_lhs, lhs->col_blocks(), lhs->row_blocks(), message);
+ } else {
+ factor_ = ss_.AnalyzeCholesky(&cholmod_lhs, message);
+ }
+ }
+
+ if (factor_ == nullptr) {
+ return LINEAR_SOLVER_FATAL_ERROR;
+ }
+ }
+
+ return ss_.Cholesky(&cholmod_lhs, factor_, message);
+}
+
+CompressedRowSparseMatrix::StorageType SuiteSparseCholesky::StorageType()
+ const {
+ return ((ordering_type_ == NATURAL)
+ ? CompressedRowSparseMatrix::UPPER_TRIANGULAR
+ : CompressedRowSparseMatrix::LOWER_TRIANGULAR);
+}
+
+LinearSolverTerminationType SuiteSparseCholesky::Solve(const double* rhs,
+ double* solution,
+ string* message) {
+ // Error checking
+ if (factor_ == nullptr) {
+ *message = "Solve called without a call to Factorize first.";
+ return LINEAR_SOLVER_FATAL_ERROR;
+ }
+
+ const int num_cols = factor_->n;
+ cholmod_dense cholmod_rhs = ss_.CreateDenseVectorView(rhs, num_cols);
+ cholmod_dense* cholmod_dense_solution =
+ ss_.Solve(factor_, &cholmod_rhs, message);
+
+ if (cholmod_dense_solution == nullptr) {
+ return LINEAR_SOLVER_FAILURE;
+ }
+
+ memcpy(solution, cholmod_dense_solution->x, num_cols * sizeof(*solution));
+ ss_.Free(cholmod_dense_solution);
+ return LINEAR_SOLVER_SUCCESS;
+}
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_NO_SUITESPARSE
diff --git a/extern/ceres/internal/ceres/suitesparse.h b/extern/ceres/internal/ceres/suitesparse.h
index 380d76e003a..b77b296a66e 100644
--- a/extern/ceres/internal/ceres/suitesparse.h
+++ b/extern/ceres/internal/ceres/suitesparse.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -41,11 +41,11 @@
#include <cstring>
#include <string>
#include <vector>
-
+#include "SuiteSparseQR.hpp"
#include "ceres/linear_solver.h"
+#include "ceres/sparse_cholesky.h"
#include "cholmod.h"
#include "glog/logging.h"
-#include "SuiteSparseQR.hpp"
// Before SuiteSparse version 4.2.0, cholmod_camd was only enabled
// if SuiteSparse was compiled with Metis support. This makes
@@ -99,6 +99,11 @@ class SuiteSparse {
// use the SuiteSparse machinery to allocate memory.
cholmod_sparse CreateSparseMatrixTransposeView(CompressedRowSparseMatrix* A);
+ // Create a cholmod_dense vector around the contents of the array x.
+ // This is a shallow object, which refers to the contents of x and
+ // does not use the SuiteSparse machinery to allocate memory.
+ cholmod_dense CreateDenseVectorView(const double* x, int size);
+
// Given a vector x, build a cholmod_dense vector of size out_size
// with the first in_size entries copied from x. If x is NULL, then
// an all zeros vector is returned. Caller owns the result.
@@ -197,12 +202,12 @@ class SuiteSparse {
// doing sparse direct factorization of these matrices the
// fill-reducing ordering algorithms (in particular AMD) can either
// be run on the block or the scalar form of these matrices. The two
- // SuiteSparse::AnalyzeCholesky methods allows the the client to
+ // SuiteSparse::AnalyzeCholesky methods allows the client to
// compute the symbolic factorization of a matrix by either using
// AMD on the matrix or a user provided ordering of the rows.
//
// But since the underlying matrices are block oriented, it is worth
- // running AMD on just the block structre of these matrices and then
+ // running AMD on just the block structure of these matrices and then
// lifting these block orderings to a full scalar ordering. This
// preserves the block structure of the permuted matrix, and exposes
// more of the super-nodal structure of the matrix to the numerical
@@ -278,6 +283,27 @@ class SuiteSparse {
cholmod_common cc_;
};
+class SuiteSparseCholesky : public SparseCholesky {
+ public:
+ static std::unique_ptr<SparseCholesky> Create(
+ OrderingType ordering_type);
+
+ // SparseCholesky interface.
+ virtual ~SuiteSparseCholesky();
+ CompressedRowSparseMatrix::StorageType StorageType() const final;
+ LinearSolverTerminationType Factorize(
+ CompressedRowSparseMatrix* lhs, std::string* message) final;
+ LinearSolverTerminationType Solve(const double* rhs,
+ double* solution,
+ std::string* message) final;
+ private:
+ SuiteSparseCholesky(const OrderingType ordering_type);
+
+ const OrderingType ordering_type_;
+ SuiteSparse ss_;
+ cholmod_factor* factor_;
+};
+
} // namespace internal
} // namespace ceres
@@ -285,6 +311,9 @@ class SuiteSparse {
typedef void cholmod_factor;
+namespace ceres {
+namespace internal {
+
class SuiteSparse {
public:
// Defining this static function even when SuiteSparse is not
@@ -292,7 +321,7 @@ class SuiteSparse {
// without checking for the absence of the CERES_NO_CAMD symbol.
//
// This is safer because the symbol maybe missing due to a user
- // accidently not including suitesparse.h in their code when
+ // accidentally not including suitesparse.h in their code when
// checking for the symbol.
static bool IsConstrainedApproximateMinimumDegreeOrderingAvailable() {
return false;
@@ -301,6 +330,9 @@ class SuiteSparse {
void Free(void* arg) {}
};
+} // namespace internal
+} // namespace ceres
+
#endif // CERES_NO_SUITESPARSE
#endif // CERES_INTERNAL_SUITESPARSE_H_
diff --git a/extern/ceres/internal/ceres/thread_pool.cc b/extern/ceres/internal/ceres/thread_pool.cc
new file mode 100644
index 00000000000..5a52c9d06a6
--- /dev/null
+++ b/extern/ceres/internal/ceres/thread_pool.cc
@@ -0,0 +1,116 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: vitus@google.com (Michael Vitus)
+
+// This include must come before any #ifndef check on Ceres compile options.
+#include "ceres/internal/port.h"
+
+#ifdef CERES_USE_CXX_THREADS
+
+#include "ceres/thread_pool.h"
+
+#include <cmath>
+#include <limits>
+
+namespace ceres {
+namespace internal {
+namespace {
+
+// Constrain the total number of threads to the amount the hardware can support.
+int GetNumAllowedThreads(int requested_num_threads) {
+ return std::min(requested_num_threads, ThreadPool::MaxNumThreadsAvailable());
+}
+
+} // namespace
+
+int ThreadPool::MaxNumThreadsAvailable() {
+ const int num_hardware_threads = std::thread::hardware_concurrency();
+ // hardware_concurrency() can return 0 if the value is not well defined or not
+ // computable.
+ return num_hardware_threads == 0
+ ? std::numeric_limits<int>::max()
+ : num_hardware_threads;
+}
+
+ThreadPool::ThreadPool() { }
+
+ThreadPool::ThreadPool(int num_threads) {
+ Resize(num_threads);
+}
+
+ThreadPool::~ThreadPool() {
+ std::lock_guard<std::mutex> lock(thread_pool_mutex_);
+ // Signal the thread workers to stop and wait for them to finish all scheduled
+ // tasks.
+ Stop();
+ for (std::thread& thread : thread_pool_) {
+ thread.join();
+ }
+}
+
+void ThreadPool::Resize(int num_threads) {
+ std::lock_guard<std::mutex> lock(thread_pool_mutex_);
+
+ const int num_current_threads = thread_pool_.size();
+ if (num_current_threads >= num_threads) {
+ return;
+ }
+
+ const int create_num_threads =
+ GetNumAllowedThreads(num_threads) - num_current_threads;
+
+ for (int i = 0; i < create_num_threads; ++i) {
+ thread_pool_.push_back(std::thread(&ThreadPool::ThreadMainLoop, this));
+ }
+}
+
+void ThreadPool::AddTask(const std::function<void()>& func) {
+ task_queue_.Push(func);
+}
+
+int ThreadPool::Size() {
+ std::lock_guard<std::mutex> lock(thread_pool_mutex_);
+ return thread_pool_.size();
+}
+
+void ThreadPool::ThreadMainLoop() {
+ std::function<void()> task;
+ while (task_queue_.Wait(&task)) {
+ task();
+ }
+}
+
+void ThreadPool::Stop() {
+ task_queue_.StopWaiters();
+}
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_USE_CXX_THREADS
diff --git a/extern/ceres/internal/ceres/thread_pool.h b/extern/ceres/internal/ceres/thread_pool.h
new file mode 100644
index 00000000000..1ebb52eb6b4
--- /dev/null
+++ b/extern/ceres/internal/ceres/thread_pool.h
@@ -0,0 +1,120 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2018 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: vitus@google.com (Michael Vitus)
+
+#ifndef CERES_INTERNAL_THREAD_POOL_H_
+#define CERES_INTERNAL_THREAD_POOL_H_
+
+#include <functional>
+#include <mutex>
+#include <thread>
+#include <vector>
+
+#include "ceres/concurrent_queue.h"
+
+namespace ceres {
+namespace internal {
+
+// A thread-safe thread pool with an unbounded task queue and a resizable number
+// of workers. The size of the thread pool can be increased but never decreased
+// in order to support the largest number of threads requested. The ThreadPool
+// has three states:
+//
+// (1) The thread pool size is zero. Tasks may be added to the thread pool via
+// AddTask but they will not be executed until the thread pool is resized.
+//
+// (2) The thread pool size is greater than zero. Tasks may be added to the
+// thread pool and will be executed as soon as a worker is available. The
+// thread pool may be resized while the thread pool is running.
+//
+// (3) The thread pool is destructing. The thread pool will signal all the
+// workers to stop. The workers will finish all of the tasks that have already
+// been added to the thread pool.
+//
+class ThreadPool {
+ public:
+ // Returns the maximum number of hardware threads.
+ static int MaxNumThreadsAvailable();
+
+ // Default constructor with no active threads. We allow instantiating a
+ // thread pool with no threads to support the use case of single threaded
+ // Ceres where everything will be executed on the main thread. For single
+ // threaded execution this has two benefits: avoid any overhead as threads
+ // are expensive to create, and no unused threads shown in the debugger.
+ ThreadPool();
+
+ // Instantiates a thread pool with min(MaxNumThreadsAvailable, num_threads)
+ // number of threads.
+ explicit ThreadPool(int num_threads);
+
+ // Signals the workers to stop and waits for them to finish any tasks that
+ // have been scheduled.
+ ~ThreadPool();
+
+ // Resizes the thread pool if it is currently less than the requested number
+ // of threads. The thread pool will be resized to min(MaxNumThreadsAvailable,
+ // num_threads) number of threads. Resize does not support reducing the
+ // thread pool size. If a smaller number of threads is requested, the thread
+ // pool remains the same size. The thread pool is reused within Ceres with
+ // different number of threads, and we need to ensure we can support the
+ // largest number of threads requested. It is safe to resize the thread pool
+ // while the workers are executing tasks, and the resizing is guaranteed to
+ // complete upon return.
+ void Resize(int num_threads);
+
+ // Adds a task to the queue and wakes up a blocked thread. If the thread pool
+ // size is greater than zero, then the task will be executed by a currently
+ // idle thread or when a thread becomes available. If the thread pool has no
+ // threads, then the task will never be executed and the user should use
+ // Resize() to create a non-empty thread pool.
+ void AddTask(const std::function<void()>& func);
+
+ // Returns the current size of the thread pool.
+ int Size();
+
+ private:
+ // Main loop for the threads which blocks on the task queue until work becomes
+ // available. It will return if and only if Stop has been called.
+ void ThreadMainLoop();
+
+ // Signal all the threads to stop. It does not block until the threads are
+ // finished.
+ void Stop();
+
+ // The queue that stores the units of work available for the thread pool. The
+ // task queue maintains its own thread safety.
+ ConcurrentQueue<std::function<void()>> task_queue_;
+ std::vector<std::thread> thread_pool_;
+ std::mutex thread_pool_mutex_;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_THREAD_POOL_H_
diff --git a/extern/ceres/internal/ceres/thread_token_provider.cc b/extern/ceres/internal/ceres/thread_token_provider.cc
new file mode 100644
index 00000000000..b04cf844488
--- /dev/null
+++ b/extern/ceres/internal/ceres/thread_token_provider.cc
@@ -0,0 +1,76 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: yp@photonscore.de (Yury Prokazov)
+
+#include "ceres/thread_token_provider.h"
+
+#ifdef CERES_USE_OPENMP
+#include <omp.h>
+#endif
+
+namespace ceres {
+namespace internal {
+
+ThreadTokenProvider::ThreadTokenProvider(int num_threads) {
+ (void)num_threads;
+#ifdef CERES_USE_CXX_THREADS
+ for (int i = 0; i < num_threads; i++) {
+ pool_.Push(i);
+ }
+#endif
+
+}
+
+int ThreadTokenProvider::Acquire() {
+#ifdef CERES_USE_OPENMP
+ return omp_get_thread_num();
+#endif
+
+#ifdef CERES_NO_THREADS
+ return 0;
+#endif
+
+#ifdef CERES_USE_CXX_THREADS
+ int thread_id;
+ CHECK(pool_.Wait(&thread_id));
+ return thread_id;
+#endif
+
+}
+
+void ThreadTokenProvider::Release(int thread_id) {
+ (void)thread_id;
+#ifdef CERES_USE_CXX_THREADS
+ pool_.Push(thread_id);
+#endif
+
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/extern/ceres/internal/ceres/thread_token_provider.h b/extern/ceres/internal/ceres/thread_token_provider.h
new file mode 100644
index 00000000000..06dc0438572
--- /dev/null
+++ b/extern/ceres/internal/ceres/thread_token_provider.h
@@ -0,0 +1,97 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2017 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: yp@photonscore.de (Yury Prokazov)
+
+#ifndef CERES_INTERNAL_THREAD_TOKEN_PROVIDER_H_
+#define CERES_INTERNAL_THREAD_TOKEN_PROVIDER_H_
+
+#include "ceres/internal/config.h"
+#include "ceres/internal/port.h"
+
+#ifdef CERES_USE_CXX_THREADS
+#include "ceres/concurrent_queue.h"
+#endif
+
+namespace ceres {
+namespace internal {
+
+// Helper for C++ thread number identification that is similar to
+// omp_get_thread_num() behaviour. This is necessary to support C++
+// threading with a sequential thread id. This is used to access preallocated
+// resources in the parallelized code parts. The sequence of tokens varies from
+// 0 to num_threads - 1 that can be acquired to identify the thread in a thread
+// pool.
+//
+// If CERES_NO_THREADS is defined, Acquire() always returns 0 and Release()
+// takes no action.
+//
+// If CERES_USE_OPENMP, omp_get_thread_num() is used to Acquire() with no action
+// in Release()
+//
+//
+// Example usage pseudocode:
+//
+// ThreadTokenProvider ttp(N); // allocate N tokens
+// Spawn N threads {
+// int token = ttp.Acquire(); // get unique token
+// ...
+// ... use token to access resources bound to the thread
+// ...
+// ttp.Release(token); // return token to the pool
+// }
+//
+class ThreadTokenProvider {
+ public:
+ ThreadTokenProvider(int num_threads);
+
+ // Returns the first token from the queue. The acquired value must be
+ // given back by Release().
+ int Acquire();
+
+ // Makes previously acquired token available for other threads.
+ void Release(int thread_id);
+
+ private:
+#ifdef CERES_USE_CXX_THREADS
+ // This queue initially holds a sequence from 0..num_threads-1. Every
+ // Acquire() call the first number is removed from here. When the token is not
+ // needed anymore it shall be given back with corresponding Release()
+ // call. This concurrent queue is more expensive than TBB's version, so you
+ // should not acquire the thread ID on every for loop iteration.
+ ConcurrentQueue<int> pool_;
+#endif
+
+ ThreadTokenProvider(ThreadTokenProvider&);
+ ThreadTokenProvider& operator=(ThreadTokenProvider&);
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_THREAD_TOKEN_PROVIDER_H_
diff --git a/extern/ceres/internal/ceres/triplet_sparse_matrix.cc b/extern/ceres/internal/ceres/triplet_sparse_matrix.cc
index 8df405ca115..54b588ba466 100644
--- a/extern/ceres/internal/ceres/triplet_sparse_matrix.cc
+++ b/extern/ceres/internal/ceres/triplet_sparse_matrix.cc
@@ -32,9 +32,10 @@
#include <algorithm>
#include <cstddef>
+
#include "ceres/internal/eigen.h"
#include "ceres/internal/port.h"
-#include "ceres/internal/scoped_ptr.h"
+#include "ceres/random.h"
#include "ceres/types.h"
#include "glog/logging.h"
@@ -45,10 +46,8 @@ TripletSparseMatrix::TripletSparseMatrix()
: num_rows_(0),
num_cols_(0),
max_num_nonzeros_(0),
- num_nonzeros_(0),
- rows_(NULL),
- cols_(NULL),
- values_(NULL) {}
+ num_nonzeros_(0) {}
+
TripletSparseMatrix::~TripletSparseMatrix() {}
@@ -58,10 +57,7 @@ TripletSparseMatrix::TripletSparseMatrix(int num_rows,
: num_rows_(num_rows),
num_cols_(num_cols),
max_num_nonzeros_(max_num_nonzeros),
- num_nonzeros_(0),
- rows_(NULL),
- cols_(NULL),
- values_(NULL) {
+ num_nonzeros_(0) {
// All the sizes should at least be zero
CHECK_GE(num_rows, 0);
CHECK_GE(num_cols, 0);
@@ -69,21 +65,41 @@ TripletSparseMatrix::TripletSparseMatrix(int num_rows,
AllocateMemory();
}
+TripletSparseMatrix::TripletSparseMatrix(const int num_rows,
+ const int num_cols,
+ const std::vector<int>& rows,
+ const std::vector<int>& cols,
+ const std::vector<double>& values)
+ : num_rows_(num_rows),
+ num_cols_(num_cols),
+ max_num_nonzeros_(values.size()),
+ num_nonzeros_(values.size()) {
+ // All the sizes should at least be zero
+ CHECK_GE(num_rows, 0);
+ CHECK_GE(num_cols, 0);
+ CHECK_EQ(rows.size(), cols.size());
+ CHECK_EQ(rows.size(), values.size());
+ AllocateMemory();
+ std::copy(rows.begin(), rows.end(), rows_.get());
+ std::copy(cols.begin(), cols.end(), cols_.get());
+ std::copy(values.begin(), values.end(), values_.get());
+}
+
TripletSparseMatrix::TripletSparseMatrix(const TripletSparseMatrix& orig)
: SparseMatrix(),
num_rows_(orig.num_rows_),
num_cols_(orig.num_cols_),
max_num_nonzeros_(orig.max_num_nonzeros_),
- num_nonzeros_(orig.num_nonzeros_),
- rows_(NULL),
- cols_(NULL),
- values_(NULL) {
+ num_nonzeros_(orig.num_nonzeros_) {
AllocateMemory();
CopyData(orig);
}
TripletSparseMatrix& TripletSparseMatrix::operator=(
const TripletSparseMatrix& rhs) {
+ if (this == &rhs) {
+ return *this;
+ }
num_rows_ = rhs.num_rows_;
num_cols_ = rhs.num_cols_;
num_nonzeros_ = rhs.num_nonzeros_;
@@ -165,7 +181,7 @@ void TripletSparseMatrix::LeftMultiply(const double* x, double* y) const {
}
void TripletSparseMatrix::SquaredColumnNorm(double* x) const {
- CHECK_NOTNULL(x);
+ CHECK(x != nullptr);
VectorRef(x, num_cols_).setZero();
for (int i = 0; i < num_nonzeros_; ++i) {
x[cols_[i]] += values_[i] * values_[i];
@@ -173,7 +189,7 @@ void TripletSparseMatrix::SquaredColumnNorm(double* x) const {
}
void TripletSparseMatrix::ScaleColumns(const double* scale) {
- CHECK_NOTNULL(scale);
+ CHECK(scale != nullptr);
for (int i = 0; i < num_nonzeros_; ++i) {
values_[i] = values_[i] * scale[cols_[i]];
}
@@ -254,11 +270,40 @@ TripletSparseMatrix* TripletSparseMatrix::CreateSparseDiagonalMatrix(
}
void TripletSparseMatrix::ToTextFile(FILE* file) const {
- CHECK_NOTNULL(file);
+ CHECK(file != nullptr);
for (int i = 0; i < num_nonzeros_; ++i) {
fprintf(file, "% 10d % 10d %17f\n", rows_[i], cols_[i], values_[i]);
}
}
+TripletSparseMatrix* TripletSparseMatrix::CreateRandomMatrix(
+ const TripletSparseMatrix::RandomMatrixOptions& options) {
+ CHECK_GT(options.num_rows, 0);
+ CHECK_GT(options.num_cols, 0);
+ CHECK_GT(options.density, 0.0);
+ CHECK_LE(options.density, 1.0);
+
+ std::vector<int> rows;
+ std::vector<int> cols;
+ std::vector<double> values;
+ while (rows.empty()) {
+ rows.clear();
+ cols.clear();
+ values.clear();
+ for (int r = 0; r < options.num_rows; ++r) {
+ for (int c = 0; c < options.num_cols; ++c) {
+ if (RandDouble() <= options.density) {
+ rows.push_back(r);
+ cols.push_back(c);
+ values.push_back(RandNormal());
+ }
+ }
+ }
+ }
+
+ return new TripletSparseMatrix(
+ options.num_rows, options.num_cols, rows, cols, values);
+}
+
} // namespace internal
} // namespace ceres
diff --git a/extern/ceres/internal/ceres/triplet_sparse_matrix.h b/extern/ceres/internal/ceres/triplet_sparse_matrix.h
index f3f5370df6f..2ee0fa992b5 100644
--- a/extern/ceres/internal/ceres/triplet_sparse_matrix.h
+++ b/extern/ceres/internal/ceres/triplet_sparse_matrix.h
@@ -31,9 +31,10 @@
#ifndef CERES_INTERNAL_TRIPLET_SPARSE_MATRIX_H_
#define CERES_INTERNAL_TRIPLET_SPARSE_MATRIX_H_
+#include <memory>
+#include <vector>
#include "ceres/sparse_matrix.h"
#include "ceres/internal/eigen.h"
-#include "ceres/internal/scoped_ptr.h"
#include "ceres/types.h"
namespace ceres {
@@ -47,26 +48,32 @@ class TripletSparseMatrix : public SparseMatrix {
public:
TripletSparseMatrix();
TripletSparseMatrix(int num_rows, int num_cols, int max_num_nonzeros);
+ TripletSparseMatrix(int num_rows,
+ int num_cols,
+ const std::vector<int>& rows,
+ const std::vector<int>& cols,
+ const std::vector<double>& values);
+
explicit TripletSparseMatrix(const TripletSparseMatrix& orig);
TripletSparseMatrix& operator=(const TripletSparseMatrix& rhs);
- ~TripletSparseMatrix();
+ virtual ~TripletSparseMatrix();
// Implementation of the SparseMatrix interface.
- virtual void SetZero();
- virtual void RightMultiply(const double* x, double* y) const;
- virtual void LeftMultiply(const double* x, double* y) const;
- virtual void SquaredColumnNorm(double* x) const;
- virtual void ScaleColumns(const double* scale);
- virtual void ToDenseMatrix(Matrix* dense_matrix) const;
- virtual void ToTextFile(FILE* file) const;
- virtual int num_rows() const { return num_rows_; }
- virtual int num_cols() const { return num_cols_; }
- virtual int num_nonzeros() const { return num_nonzeros_; }
- virtual const double* values() const { return values_.get(); }
- virtual double* mutable_values() { return values_.get(); }
- virtual void set_num_nonzeros(int num_nonzeros);
+ void SetZero() final;
+ void RightMultiply(const double* x, double* y) const final;
+ void LeftMultiply(const double* x, double* y) const final;
+ void SquaredColumnNorm(double* x) const final;
+ void ScaleColumns(const double* scale) final;
+ void ToDenseMatrix(Matrix* dense_matrix) const final;
+ void ToTextFile(FILE* file) const final;
+ int num_rows() const final { return num_rows_; }
+ int num_cols() const final { return num_cols_; }
+ int num_nonzeros() const final { return num_nonzeros_; }
+ const double* values() const final { return values_.get(); }
+ double* mutable_values() final { return values_.get(); }
+ void set_num_nonzeros(int num_nonzeros);
// Increase max_num_nonzeros and correspondingly increase the size
// of rows_, cols_ and values_. If new_max_num_nonzeros is smaller
@@ -105,6 +112,25 @@ class TripletSparseMatrix : public SparseMatrix {
static TripletSparseMatrix* CreateSparseDiagonalMatrix(const double* values,
int num_rows);
+ // Options struct to control the generation of random
+ // TripletSparseMatrix objects.
+ struct RandomMatrixOptions {
+ int num_rows;
+ int num_cols;
+ // 0 < density <= 1 is the probability of an entry being
+ // structurally non-zero. A given random matrix will not have
+ // precisely this density.
+ double density;
+ };
+
+ // Create a random CompressedRowSparseMatrix whose entries are
+ // normally distributed and whose structure is determined by
+ // RandomMatrixOptions.
+ //
+ // Caller owns the result.
+ static TripletSparseMatrix* CreateRandomMatrix(
+ const TripletSparseMatrix::RandomMatrixOptions& options);
+
private:
void AllocateMemory();
void CopyData(const TripletSparseMatrix& orig);
@@ -118,9 +144,9 @@ class TripletSparseMatrix : public SparseMatrix {
// stored at the location (rows_[i], cols_[i]). If the there are
// multiple entries with the same (rows_[i], cols_[i]), the values_
// entries corresponding to them are summed up.
- scoped_array<int> rows_;
- scoped_array<int> cols_;
- scoped_array<double> values_;
+ std::unique_ptr<int[]> rows_;
+ std::unique_ptr<int[]> cols_;
+ std::unique_ptr<double[]> values_;
};
} // namespace internal
diff --git a/extern/ceres/internal/ceres/trust_region_minimizer.cc b/extern/ceres/internal/ceres/trust_region_minimizer.cc
index d809906ab54..7065977ad77 100644
--- a/extern/ceres/internal/ceres/trust_region_minimizer.cc
+++ b/extern/ceres/internal/ceres/trust_region_minimizer.cc
@@ -35,6 +35,7 @@
#include <cstdlib>
#include <cstring>
#include <limits>
+#include <memory>
#include <string>
#include <vector>
@@ -92,7 +93,8 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
continue;
}
- if (options_.is_constrained) {
+ if (options_.is_constrained &&
+ options_.max_num_line_search_step_size_iterations > 0) {
// Use a projected line search to enforce the bounds constraints
// and improve the quality of the step.
DoLineSearch(x_, gradient_, x_cost_, &delta_);
@@ -135,13 +137,16 @@ void TrustRegionMinimizer::Init(const Minimizer::Options& options,
solver_summary_->num_unsuccessful_steps = 0;
solver_summary_->is_constrained = options.is_constrained;
- evaluator_ = CHECK_NOTNULL(options_.evaluator.get());
- jacobian_ = CHECK_NOTNULL(options_.jacobian.get());
- strategy_ = CHECK_NOTNULL(options_.trust_region_strategy.get());
+ CHECK(options_.evaluator != nullptr);
+ CHECK(options_.jacobian != nullptr);
+ CHECK(options_.trust_region_strategy != nullptr);
+ evaluator_ = options_.evaluator.get();
+ jacobian_ = options_.jacobian.get();
+ strategy_ = options_.trust_region_strategy.get();
is_not_silent_ = !options.is_silent;
inner_iterations_are_enabled_ =
- options.inner_iteration_minimizer.get() != NULL;
+ options.inner_iteration_minimizer.get() != nullptr;
inner_iterations_were_useful_ = false;
num_parameters_ = evaluator_->NumParameters();
@@ -201,7 +206,7 @@ bool TrustRegionMinimizer::IterationZero() {
x_norm_ = x_.norm();
}
- if (!EvaluateGradientAndJacobian()) {
+ if (!EvaluateGradientAndJacobian(/*new_evaluation_point=*/true)) {
return false;
}
@@ -223,8 +228,12 @@ bool TrustRegionMinimizer::IterationZero() {
// Returns true if all computations could be performed
// successfully. Any failures are considered fatal and the
// Solver::Summary is updated to indicate this.
-bool TrustRegionMinimizer::EvaluateGradientAndJacobian() {
- if (!evaluator_->Evaluate(x_.data(),
+bool TrustRegionMinimizer::EvaluateGradientAndJacobian(
+ bool new_evaluation_point) {
+ Evaluator::EvaluateOptions evaluate_options;
+ evaluate_options.new_evaluation_point = new_evaluation_point;
+ if (!evaluator_->Evaluate(evaluate_options,
+ x_.data(),
&x_cost_,
residuals_.data(),
gradient_.data(),
@@ -482,8 +491,11 @@ void TrustRegionMinimizer::DoInnerIterationsIfNeeded() {
options_.inner_iteration_minimizer->Minimize(
options_, inner_iteration_x_.data(), &inner_iteration_summary);
double inner_iteration_cost;
- if (!evaluator_->Evaluate(
- inner_iteration_x_.data(), &inner_iteration_cost, NULL, NULL, NULL)) {
+ if (!evaluator_->Evaluate(inner_iteration_x_.data(),
+ &inner_iteration_cost,
+ nullptr,
+ nullptr,
+ nullptr)) {
VLOG_IF(2, is_not_silent_) << "Inner iteration failed.";
return;
}
@@ -569,8 +581,8 @@ void TrustRegionMinimizer::DoLineSearch(const Vector& x,
line_search_options.function = &line_search_function;
std::string message;
- scoped_ptr<LineSearch> line_search(CHECK_NOTNULL(
- LineSearch::Create(ceres::ARMIJO, line_search_options, &message)));
+ std::unique_ptr<LineSearch> line_search(
+ LineSearch::Create(ceres::ARMIJO, line_search_options, &message));
LineSearch::Summary line_search_summary;
line_search_function.Init(x, *delta);
line_search->Search(1.0, cost, gradient.dot(*delta), &line_search_summary);
@@ -586,7 +598,7 @@ void TrustRegionMinimizer::DoLineSearch(const Vector& x,
line_search_summary.total_time_in_seconds;
if (line_search_summary.success) {
- *delta *= line_search_summary.optimal_step_size;
+ *delta *= line_search_summary.optimal_point.x;
}
}
@@ -601,10 +613,11 @@ bool TrustRegionMinimizer::MaxSolverTimeReached() {
return false;
}
- solver_summary_->message = StringPrintf("Maximum solver time reached. "
- "Total solver time: %e >= %e.",
- total_solver_time,
- options_.max_solver_time_in_seconds);
+ solver_summary_->message = StringPrintf(
+ "Maximum solver time reached. "
+ "Total solver time: %e >= %e.",
+ total_solver_time,
+ options_.max_solver_time_in_seconds);
solver_summary_->termination_type = NO_CONVERGENCE;
VLOG_IF(1, is_not_silent_) << "Terminating: " << solver_summary_->message;
return true;
@@ -618,10 +631,10 @@ bool TrustRegionMinimizer::MaxSolverIterationsReached() {
return false;
}
- solver_summary_->message =
- StringPrintf("Maximum number of iterations reached. "
- "Number of iterations: %d.",
- iteration_summary_.iteration);
+ solver_summary_->message = StringPrintf(
+ "Maximum number of iterations reached. "
+ "Number of iterations: %d.",
+ iteration_summary_.iteration);
solver_summary_->termination_type = NO_CONVERGENCE;
VLOG_IF(1, is_not_silent_) << "Terminating: " << solver_summary_->message;
@@ -653,11 +666,11 @@ bool TrustRegionMinimizer::MinTrustRegionRadiusReached() {
return false;
}
- solver_summary_->message =
- StringPrintf("Minimum trust region radius reached. "
- "Trust region radius: %e <= %e",
- iteration_summary_.trust_region_radius,
- options_.min_trust_region_radius);
+ solver_summary_->message = StringPrintf(
+ "Minimum trust region radius reached. "
+ "Trust region radius: %e <= %e",
+ iteration_summary_.trust_region_radius,
+ options_.min_trust_region_radius);
solver_summary_->termination_type = CONVERGENCE;
VLOG_IF(1, is_not_silent_) << "Terminating: " << solver_summary_->message;
return true;
@@ -725,7 +738,7 @@ void TrustRegionMinimizer::ComputeCandidatePointAndEvaluateCost() {
}
if (!evaluator_->Evaluate(
- candidate_x_.data(), &candidate_cost_, NULL, NULL, NULL)) {
+ candidate_x_.data(), &candidate_cost_, nullptr, nullptr, nullptr)) {
LOG_IF(WARNING, is_not_silent_)
<< "Step failed to evaluate. "
<< "Treating it as a step with infinite cost";
@@ -746,7 +759,7 @@ bool TrustRegionMinimizer::IsStepSuccessful() {
// small.
//
// This can cause the trust region loop to reject this step. To
- // get around this, we expicitly check if the inner iterations
+ // get around this, we explicitly check if the inner iterations
// led to a net decrease in the objective function value. If
// they did, we accept the step even if the trust region ratio
// is small.
@@ -768,7 +781,9 @@ bool TrustRegionMinimizer::HandleSuccessfulStep() {
x_ = candidate_x_;
x_norm_ = x_.norm();
- if (!EvaluateGradientAndJacobian()) {
+ // Since the step was successful, this point has already had the residual
+ // evaluated (but not the jacobian). So indicate that to the evaluator.
+ if (!EvaluateGradientAndJacobian(/*new_evaluation_point=*/false)) {
return false;
}
diff --git a/extern/ceres/internal/ceres/trust_region_minimizer.h b/extern/ceres/internal/ceres/trust_region_minimizer.h
index 43141da58a1..b5c41225ddc 100644
--- a/extern/ceres/internal/ceres/trust_region_minimizer.h
+++ b/extern/ceres/internal/ceres/trust_region_minimizer.h
@@ -31,8 +31,8 @@
#ifndef CERES_INTERNAL_TRUST_REGION_MINIMIZER_H_
#define CERES_INTERNAL_TRUST_REGION_MINIMIZER_H_
+#include <memory>
#include "ceres/internal/eigen.h"
-#include "ceres/internal/scoped_ptr.h"
#include "ceres/minimizer.h"
#include "ceres/solver.h"
#include "ceres/sparse_matrix.h"
@@ -51,9 +51,9 @@ class TrustRegionMinimizer : public Minimizer {
~TrustRegionMinimizer();
// This method is not thread safe.
- virtual void Minimize(const Minimizer::Options& options,
- double* parameters,
- Solver::Summary* solver_summary);
+ void Minimize(const Minimizer::Options& options,
+ double* parameters,
+ Solver::Summary* solver_summary) override;
private:
void Init(const Minimizer::Options& options,
@@ -63,7 +63,7 @@ class TrustRegionMinimizer : public Minimizer {
bool FinalizeIterationAndCheckIfMinimizerCanContinue();
bool ComputeTrustRegionStep();
- bool EvaluateGradientAndJacobian();
+ bool EvaluateGradientAndJacobian(bool new_evaluation_point);
void ComputeCandidatePointAndEvaluateCost();
void DoLineSearch(const Vector& x,
@@ -94,7 +94,7 @@ class TrustRegionMinimizer : public Minimizer {
SparseMatrix* jacobian_;
TrustRegionStrategy* strategy_;
- scoped_ptr<TrustRegionStepEvaluator> step_evaluator_;
+ std::unique_ptr<TrustRegionStepEvaluator> step_evaluator_;
bool is_not_silent_;
bool inner_iterations_are_enabled_;
diff --git a/extern/ceres/internal/ceres/trust_region_preprocessor.cc b/extern/ceres/internal/ceres/trust_region_preprocessor.cc
index 4020e4ca115..b8c6b49d1ca 100644
--- a/extern/ceres/internal/ceres/trust_region_preprocessor.cc
+++ b/extern/ceres/internal/ceres/trust_region_preprocessor.cc
@@ -32,7 +32,9 @@
#include <numeric>
#include <string>
+
#include "ceres/callbacks.h"
+#include "ceres/context_impl.h"
#include "ceres/evaluator.h"
#include "ceres/linear_solver.h"
#include "ceres/minimizer.h"
@@ -56,8 +58,7 @@ namespace {
ParameterBlockOrdering* CreateDefaultLinearSolverOrdering(
const Program& program) {
ParameterBlockOrdering* ordering = new ParameterBlockOrdering;
- const vector<ParameterBlock*>& parameter_blocks =
- program.parameter_blocks();
+ const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
for (int i = 0; i < parameter_blocks.size(); ++i) {
ordering->AddElementToGroup(
const_cast<double*>(parameter_blocks[i]->user_state()), 0);
@@ -68,8 +69,7 @@ ParameterBlockOrdering* CreateDefaultLinearSolverOrdering(
// Check if all the user supplied values in the parameter blocks are
// sane or not, and if the program is feasible or not.
bool IsProgramValid(const Program& program, std::string* error) {
- return (program.ParameterBlocksAreFinite(error) &&
- program.IsFeasible(error));
+ return (program.ParameterBlocksAreFinite(error) && program.IsFeasible(error));
}
void AlternateLinearSolverAndPreconditionerForSchurTypeLinearSolver(
@@ -81,36 +81,33 @@ void AlternateLinearSolverAndPreconditionerForSchurTypeLinearSolver(
const LinearSolverType linear_solver_type_given = options->linear_solver_type;
const PreconditionerType preconditioner_type_given =
options->preconditioner_type;
- options->linear_solver_type = LinearSolver::LinearSolverForZeroEBlocks(
- linear_solver_type_given);
+ options->linear_solver_type =
+ LinearSolver::LinearSolverForZeroEBlocks(linear_solver_type_given);
std::string message;
if (linear_solver_type_given == ITERATIVE_SCHUR) {
- options->preconditioner_type = Preconditioner::PreconditionerForZeroEBlocks(
- preconditioner_type_given);
+ options->preconditioner_type =
+ Preconditioner::PreconditionerForZeroEBlocks(preconditioner_type_given);
message =
- StringPrintf(
- "No E blocks. Switching from %s(%s) to %s(%s).",
- LinearSolverTypeToString(linear_solver_type_given),
- PreconditionerTypeToString(preconditioner_type_given),
- LinearSolverTypeToString(options->linear_solver_type),
- PreconditionerTypeToString(options->preconditioner_type));
+ StringPrintf("No E blocks. Switching from %s(%s) to %s(%s).",
+ LinearSolverTypeToString(linear_solver_type_given),
+ PreconditionerTypeToString(preconditioner_type_given),
+ LinearSolverTypeToString(options->linear_solver_type),
+ PreconditionerTypeToString(options->preconditioner_type));
} else {
message =
- StringPrintf(
- "No E blocks. Switching from %s to %s.",
- LinearSolverTypeToString(linear_solver_type_given),
- LinearSolverTypeToString(options->linear_solver_type));
+ StringPrintf("No E blocks. Switching from %s to %s.",
+ LinearSolverTypeToString(linear_solver_type_given),
+ LinearSolverTypeToString(options->linear_solver_type));
}
VLOG_IF(1, options->logging_type != SILENT) << message;
}
-// For Schur type and SPARSE_NORMAL_CHOLESKY linear solvers, reorder
-// the program to reduce fill-in and increase cache coherency.
+// Reorder the program to reduce fill-in and increase cache coherency.
bool ReorderProgram(PreprocessedProblem* pp) {
- Solver::Options& options = pp->options;
+ const Solver::Options& options = pp->options;
if (IsSchurType(options.linear_solver_type)) {
return ReorderProgramForSchurTypeLinearSolver(
options.linear_solver_type,
@@ -123,9 +120,25 @@ bool ReorderProgram(PreprocessedProblem* pp) {
if (options.linear_solver_type == SPARSE_NORMAL_CHOLESKY &&
!options.dynamic_sparsity) {
- return ReorderProgramForSparseNormalCholesky(
+ return ReorderProgramForSparseCholesky(
options.sparse_linear_algebra_library_type,
*options.linear_solver_ordering,
+ 0, /* use all the rows of the jacobian */
+ pp->reduced_program.get(),
+ &pp->error);
+ }
+
+ if (options.linear_solver_type == CGNR &&
+ options.preconditioner_type == SUBSET) {
+ pp->linear_solver_options.subset_preconditioner_start_row_block =
+ ReorderResidualBlocksByPartition(
+ options.residual_blocks_for_subset_preconditioner,
+ pp->reduced_program.get());
+
+ return ReorderProgramForSparseCholesky(
+ options.sparse_linear_algebra_library_type,
+ *options.linear_solver_ordering,
+ pp->linear_solver_options.subset_preconditioner_start_row_block,
pp->reduced_program.get(),
&pp->error);
}
@@ -139,7 +152,9 @@ bool ReorderProgram(PreprocessedProblem* pp) {
// too.
bool SetupLinearSolver(PreprocessedProblem* pp) {
Solver::Options& options = pp->options;
- if (options.linear_solver_ordering.get() == NULL) {
+ pp->linear_solver_options = LinearSolver::Options();
+
+ if (!options.linear_solver_ordering) {
// If the user has not supplied a linear solver ordering, then we
// assume that they are giving all the freedom to us in choosing
// the best possible ordering. This intent can be indicated by
@@ -163,8 +178,7 @@ bool SetupLinearSolver(PreprocessedProblem* pp) {
ordering->Remove(pp->removed_parameter_blocks);
if (IsSchurType(options.linear_solver_type) &&
min_group_id != ordering->MinNonZeroGroup()) {
- AlternateLinearSolverAndPreconditionerForSchurTypeLinearSolver(
- &options);
+ AlternateLinearSolverAndPreconditionerForSchurTypeLinearSolver(&options);
}
}
@@ -175,7 +189,6 @@ bool SetupLinearSolver(PreprocessedProblem* pp) {
}
// Configure the linear solver.
- pp->linear_solver_options = LinearSolver::Options();
pp->linear_solver_options.min_num_iterations =
options.min_linear_solver_iterations;
pp->linear_solver_options.max_num_iterations =
@@ -191,33 +204,50 @@ bool SetupLinearSolver(PreprocessedProblem* pp) {
pp->linear_solver_options.use_explicit_schur_complement =
options.use_explicit_schur_complement;
pp->linear_solver_options.dynamic_sparsity = options.dynamic_sparsity;
- pp->linear_solver_options.num_threads = options.num_linear_solver_threads;
-
- // Ignore user's postordering preferences and force it to be true if
- // cholmod_camd is not available. This ensures that the linear
- // solver does not assume that a fill-reducing pre-ordering has been
- // done.
+ pp->linear_solver_options.use_mixed_precision_solves =
+ options.use_mixed_precision_solves;
+ pp->linear_solver_options.max_num_refinement_iterations =
+ options.max_num_refinement_iterations;
+ pp->linear_solver_options.num_threads = options.num_threads;
pp->linear_solver_options.use_postordering = options.use_postordering;
- if (options.linear_solver_type == SPARSE_SCHUR &&
- options.sparse_linear_algebra_library_type == SUITE_SPARSE &&
- !SuiteSparse::IsConstrainedApproximateMinimumDegreeOrderingAvailable()) {
- pp->linear_solver_options.use_postordering = true;
- }
-
- OrderingToGroupSizes(options.linear_solver_ordering.get(),
- &pp->linear_solver_options.elimination_groups);
+ pp->linear_solver_options.context = pp->problem->context();
+
+ if (IsSchurType(pp->linear_solver_options.type)) {
+ OrderingToGroupSizes(options.linear_solver_ordering.get(),
+ &pp->linear_solver_options.elimination_groups);
+
+ // Schur type solvers expect at least two elimination groups. If
+ // there is only one elimination group, then it is guaranteed that
+ // this group only contains e_blocks. Thus we add a dummy
+ // elimination group with zero blocks in it.
+ if (pp->linear_solver_options.elimination_groups.size() == 1) {
+ pp->linear_solver_options.elimination_groups.push_back(0);
+ }
- // Schur type solvers expect at least two elimination groups. If
- // there is only one elimination group, then it is guaranteed that
- // this group only contains e_blocks. Thus we add a dummy
- // elimination group with zero blocks in it.
- if (IsSchurType(pp->linear_solver_options.type) &&
- pp->linear_solver_options.elimination_groups.size() == 1) {
- pp->linear_solver_options.elimination_groups.push_back(0);
+ if (options.linear_solver_type == SPARSE_SCHUR) {
+ // When using SPARSE_SCHUR, we ignore the user's postordering
+ // preferences in certain cases.
+ //
+ // 1. SUITE_SPARSE is the sparse linear algebra library requested
+ // but cholmod_camd is not available.
+ // 2. CX_SPARSE is the sparse linear algebra library requested.
+ //
+ // This ensures that the linear solver does not assume that a
+ // fill-reducing pre-ordering has been done.
+ //
+ // TODO(sameeragarwal): Implement the reordering of parameter
+ // blocks for CX_SPARSE.
+ if ((options.sparse_linear_algebra_library_type == SUITE_SPARSE &&
+ !SuiteSparse::
+ IsConstrainedApproximateMinimumDegreeOrderingAvailable()) ||
+ (options.sparse_linear_algebra_library_type == CX_SPARSE)) {
+ pp->linear_solver_options.use_postordering = true;
+ }
+ }
}
pp->linear_solver.reset(LinearSolver::Create(pp->linear_solver_options));
- return (pp->linear_solver.get() != NULL);
+ return (pp->linear_solver != nullptr);
}
// Configure and create the evaluator.
@@ -228,19 +258,20 @@ bool SetupEvaluator(PreprocessedProblem* pp) {
pp->evaluator_options.num_eliminate_blocks = 0;
if (IsSchurType(options.linear_solver_type)) {
pp->evaluator_options.num_eliminate_blocks =
- options
- .linear_solver_ordering
- ->group_to_elements().begin()
- ->second.size();
+ options.linear_solver_ordering->group_to_elements()
+ .begin()
+ ->second.size();
}
pp->evaluator_options.num_threads = options.num_threads;
pp->evaluator_options.dynamic_sparsity = options.dynamic_sparsity;
- pp->evaluator.reset(Evaluator::Create(pp->evaluator_options,
- pp->reduced_program.get(),
- &pp->error));
+ pp->evaluator_options.context = pp->problem->context();
+ pp->evaluator_options.evaluation_callback =
+ pp->reduced_program->mutable_evaluation_callback();
+ pp->evaluator.reset(Evaluator::Create(
+ pp->evaluator_options, pp->reduced_program.get(), &pp->error));
- return (pp->evaluator.get() != NULL);
+ return (pp->evaluator != nullptr);
}
// If the user requested inner iterations, then find an inner
@@ -252,6 +283,11 @@ bool SetupInnerIterationMinimizer(PreprocessedProblem* pp) {
return true;
}
+ if (pp->reduced_program->mutable_evaluation_callback()) {
+ pp->error = "Inner iterations cannot be used with EvaluationCallbacks";
+ return false;
+ }
+
// With just one parameter block, the outer iteration of the trust
// region method and inner iterations are doing exactly the same
// thing, and thus inner iterations are not needed.
@@ -261,7 +297,7 @@ bool SetupInnerIterationMinimizer(PreprocessedProblem* pp) {
return true;
}
- if (options.inner_iteration_ordering.get() != NULL) {
+ if (options.inner_iteration_ordering != nullptr) {
// If the user supplied an ordering, then remove the set of
// inactive parameter blocks from it
options.inner_iteration_ordering->Remove(pp->removed_parameter_blocks);
@@ -283,7 +319,8 @@ bool SetupInnerIterationMinimizer(PreprocessedProblem* pp) {
CoordinateDescentMinimizer::CreateOrdering(*pp->reduced_program));
}
- pp->inner_iteration_minimizer.reset(new CoordinateDescentMinimizer);
+ pp->inner_iteration_minimizer.reset(
+ new CoordinateDescentMinimizer(pp->problem->context()));
return pp->inner_iteration_minimizer->Init(*pp->reduced_program,
pp->problem->parameter_map(),
*options.inner_iteration_ordering,
@@ -303,8 +340,7 @@ void SetupMinimizerOptions(PreprocessedProblem* pp) {
TrustRegionStrategy::Options strategy_options;
strategy_options.linear_solver = pp->linear_solver.get();
- strategy_options.initial_radius =
- options.initial_trust_region_radius;
+ strategy_options.initial_radius = options.initial_trust_region_radius;
strategy_options.max_radius = options.max_trust_region_radius;
strategy_options.min_lm_diagonal = options.min_lm_diagonal;
strategy_options.max_lm_diagonal = options.max_lm_diagonal;
@@ -312,18 +348,18 @@ void SetupMinimizerOptions(PreprocessedProblem* pp) {
options.trust_region_strategy_type;
strategy_options.dogleg_type = options.dogleg_type;
pp->minimizer_options.trust_region_strategy.reset(
- CHECK_NOTNULL(TrustRegionStrategy::Create(strategy_options)));
+ TrustRegionStrategy::Create(strategy_options));
+ CHECK(pp->minimizer_options.trust_region_strategy != nullptr);
}
} // namespace
-TrustRegionPreprocessor::~TrustRegionPreprocessor() {
-}
+TrustRegionPreprocessor::~TrustRegionPreprocessor() {}
bool TrustRegionPreprocessor::Preprocess(const Solver::Options& options,
ProblemImpl* problem,
PreprocessedProblem* pp) {
- CHECK_NOTNULL(pp);
+ CHECK(pp != nullptr);
pp->options = options;
ChangeNumThreadsIfNeeded(&pp->options);
@@ -333,10 +369,8 @@ bool TrustRegionPreprocessor::Preprocess(const Solver::Options& options,
return false;
}
- pp->reduced_program.reset(
- program->CreateReducedProgram(&pp->removed_parameter_blocks,
- &pp->fixed_cost,
- &pp->error));
+ pp->reduced_program.reset(program->CreateReducedProgram(
+ &pp->removed_parameter_blocks, &pp->fixed_cost, &pp->error));
if (pp->reduced_program.get() == NULL) {
return false;
@@ -348,8 +382,7 @@ bool TrustRegionPreprocessor::Preprocess(const Solver::Options& options,
return true;
}
- if (!SetupLinearSolver(pp) ||
- !SetupEvaluator(pp) ||
+ if (!SetupLinearSolver(pp) || !SetupEvaluator(pp) ||
!SetupInnerIterationMinimizer(pp)) {
return false;
}
diff --git a/extern/ceres/internal/ceres/trust_region_preprocessor.h b/extern/ceres/internal/ceres/trust_region_preprocessor.h
index a6631ab3d40..9597905ae9a 100644
--- a/extern/ceres/internal/ceres/trust_region_preprocessor.h
+++ b/extern/ceres/internal/ceres/trust_region_preprocessor.h
@@ -39,9 +39,9 @@ namespace internal {
class TrustRegionPreprocessor : public Preprocessor {
public:
virtual ~TrustRegionPreprocessor();
- virtual bool Preprocess(const Solver::Options& options,
- ProblemImpl* problem,
- PreprocessedProblem* preprocessed_problem);
+ bool Preprocess(const Solver::Options& options,
+ ProblemImpl* problem,
+ PreprocessedProblem* preprocessed_problem) override;
};
} // namespace internal
diff --git a/extern/ceres/internal/ceres/trust_region_step_evaluator.cc b/extern/ceres/internal/ceres/trust_region_step_evaluator.cc
index c9167e623ef..33b0c41ead6 100644
--- a/extern/ceres/internal/ceres/trust_region_step_evaluator.cc
+++ b/extern/ceres/internal/ceres/trust_region_step_evaluator.cc
@@ -29,6 +29,7 @@
// Author: sameeragarwal@google.com (Sameer Agarwal)
#include <algorithm>
+#include <limits>
#include "ceres/trust_region_step_evaluator.h"
#include "glog/logging.h"
@@ -51,6 +52,15 @@ TrustRegionStepEvaluator::TrustRegionStepEvaluator(
double TrustRegionStepEvaluator::StepQuality(
const double cost,
const double model_cost_change) const {
+ // If the function evaluation for this step was a failure, in which
+ // case the TrustRegionMinimizer would have set the cost to
+ // std::numeric_limits<double>::max(). In this case, the division by
+ // model_cost_change can result in an overflow. To prevent that from
+ // happening, we will deal with this case explicitly.
+ if (cost >= std::numeric_limits<double>::max()) {
+ return std::numeric_limits<double>::lowest();
+ }
+
const double relative_decrease = (current_cost_ - cost) / model_cost_change;
const double historical_relative_decrease =
(reference_cost_ - cost) /
diff --git a/extern/ceres/internal/ceres/trust_region_step_evaluator.h b/extern/ceres/internal/ceres/trust_region_step_evaluator.h
index 06df102a308..03c00362dac 100644
--- a/extern/ceres/internal/ceres/trust_region_step_evaluator.h
+++ b/extern/ceres/internal/ceres/trust_region_step_evaluator.h
@@ -56,7 +56,7 @@ namespace internal {
// The parameter max_consecutive_nonmonotonic_steps controls the
// window size used by the step selection algorithm to accept
// non-monotonic steps. Setting this parameter to zero, recovers the
-// classic montonic descent algorithm.
+// classic monotonic descent algorithm.
//
// Based on algorithm 10.1.2 (page 357) of "Trust Region
// Methods" by Conn Gould & Toint, or equations 33-40 of
@@ -82,7 +82,7 @@ class TrustRegionStepEvaluator {
// max_consecutive_nonmonotonic_steps controls the window size used
// by the step selection algorithm to accept non-monotonic
// steps. Setting this parameter to zero, recovers the classic
- // montonic descent algorithm.
+ // monotonic descent algorithm.
TrustRegionStepEvaluator(double initial_cost,
int max_consecutive_nonmonotonic_steps);
diff --git a/extern/ceres/internal/ceres/trust_region_strategy.h b/extern/ceres/internal/ceres/trust_region_strategy.h
index 36e8e981cc0..5751691e5ec 100644
--- a/extern/ceres/internal/ceres/trust_region_strategy.h
+++ b/extern/ceres/internal/ceres/trust_region_strategy.h
@@ -56,43 +56,34 @@ class SparseMatrix;
class TrustRegionStrategy {
public:
struct Options {
- Options()
- : trust_region_strategy_type(LEVENBERG_MARQUARDT),
- initial_radius(1e4),
- max_radius(1e32),
- min_lm_diagonal(1e-6),
- max_lm_diagonal(1e32),
- dogleg_type(TRADITIONAL_DOGLEG) {
- }
-
- TrustRegionStrategyType trust_region_strategy_type;
+ TrustRegionStrategyType trust_region_strategy_type = LEVENBERG_MARQUARDT;
// Linear solver used for actually solving the trust region step.
- LinearSolver* linear_solver;
- double initial_radius;
- double max_radius;
+ LinearSolver* linear_solver = nullptr;
+ double initial_radius = 1e4;
+ double max_radius = 1e32;
// Minimum and maximum values of the diagonal damping matrix used
// by LevenbergMarquardtStrategy. The DoglegStrategy also uses
// these bounds to construct a regularizing diagonal to ensure
// that the Gauss-Newton step computation is of full rank.
- double min_lm_diagonal;
- double max_lm_diagonal;
+ double min_lm_diagonal = 1e-6;
+ double max_lm_diagonal = 1e32;
// Further specify which dogleg method to use
- DoglegType dogleg_type;
+ DoglegType dogleg_type = TRADITIONAL_DOGLEG;
};
+ // Factory.
+ static TrustRegionStrategy* Create(const Options& options);
+
+ virtual ~TrustRegionStrategy();
+
// Per solve options.
struct PerSolveOptions {
- PerSolveOptions()
- : eta(0),
- dump_format_type(TEXTFILE) {
- }
-
// Forcing sequence for inexact solves.
- double eta;
+ double eta = 1e-1;
- DumpFormatType dump_format_type;
+ DumpFormatType dump_format_type = TEXTFILE;
// If non-empty and dump_format_type is not CONSOLE, the trust
// regions strategy will write the linear system to file(s) with
@@ -103,12 +94,6 @@ class TrustRegionStrategy {
};
struct Summary {
- Summary()
- : residual_norm(0.0),
- num_iterations(-1),
- termination_type(LINEAR_SOLVER_FAILURE) {
- }
-
// If the trust region problem is,
//
// 1/2 x'Ax + b'x + c,
@@ -116,19 +101,17 @@ class TrustRegionStrategy {
// then
//
// residual_norm = |Ax -b|
- double residual_norm;
+ double residual_norm = -1;
// Number of iterations used by the linear solver. If a linear
// solver was not called (e.g., DogLegStrategy after an
// unsuccessful step), then this would be zero.
- int num_iterations;
+ int num_iterations = -1;
// Status of the linear solver used to solve the Newton system.
- LinearSolverTerminationType termination_type;
+ LinearSolverTerminationType termination_type = LINEAR_SOLVER_FAILURE;
};
- virtual ~TrustRegionStrategy();
-
// Use the current radius to solve for the trust region step.
virtual Summary ComputeStep(const PerSolveOptions& per_solve_options,
SparseMatrix* jacobian,
@@ -153,9 +136,6 @@ class TrustRegionStrategy {
// Current trust region radius.
virtual double Radius() const = 0;
-
- // Factory.
- static TrustRegionStrategy* Create(const Options& options);
};
} // namespace internal
diff --git a/extern/ceres/internal/ceres/types.cc b/extern/ceres/internal/ceres/types.cc
index f86fb78eb8c..93c4cfcb027 100644
--- a/extern/ceres/internal/ceres/types.cc
+++ b/extern/ceres/internal/ceres/types.cc
@@ -78,6 +78,7 @@ const char* PreconditionerTypeToString(PreconditionerType type) {
CASESTR(SCHUR_JACOBI);
CASESTR(CLUSTER_JACOBI);
CASESTR(CLUSTER_TRIDIAGONAL);
+ CASESTR(SUBSET);
default:
return "UNKNOWN";
}
@@ -90,6 +91,7 @@ bool StringToPreconditionerType(string value, PreconditionerType* type) {
STRENUM(SCHUR_JACOBI);
STRENUM(CLUSTER_JACOBI);
STRENUM(CLUSTER_TRIDIAGONAL);
+ STRENUM(SUBSET);
return false;
}
@@ -99,6 +101,7 @@ const char* SparseLinearAlgebraLibraryTypeToString(
CASESTR(SUITE_SPARSE);
CASESTR(CX_SPARSE);
CASESTR(EIGEN_SPARSE);
+ CASESTR(ACCELERATE_SPARSE);
CASESTR(NO_SPARSE);
default:
return "UNKNOWN";
@@ -112,6 +115,7 @@ bool StringToSparseLinearAlgebraLibraryType(
STRENUM(SUITE_SPARSE);
STRENUM(CX_SPARSE);
STRENUM(EIGEN_SPARSE);
+ STRENUM(ACCELERATE_SPARSE);
STRENUM(NO_SPARSE);
return false;
}
@@ -267,8 +271,7 @@ const char* CovarianceAlgorithmTypeToString(
CovarianceAlgorithmType type) {
switch (type) {
CASESTR(DENSE_SVD);
- CASESTR(EIGEN_SPARSE_QR);
- CASESTR(SUITE_SPARSE_QR);
+ CASESTR(SPARSE_QR);
default:
return "UNKNOWN";
}
@@ -279,8 +282,7 @@ bool StringToCovarianceAlgorithmType(
CovarianceAlgorithmType* type) {
UpperCase(&value);
STRENUM(DENSE_SVD);
- STRENUM(EIGEN_SPARSE_QR);
- STRENUM(SUITE_SPARSE_QR);
+ STRENUM(SPARSE_QR);
return false;
}
@@ -336,6 +338,39 @@ const char* TerminationTypeToString(TerminationType type) {
}
}
+const char* LoggingTypeToString(LoggingType type) {
+ switch (type) {
+ CASESTR(SILENT);
+ CASESTR(PER_MINIMIZER_ITERATION);
+ default:
+ return "UNKNOWN";
+ }
+}
+
+bool StringtoLoggingType(std::string value, LoggingType* type) {
+ UpperCase(&value);
+ STRENUM(SILENT);
+ STRENUM(PER_MINIMIZER_ITERATION);
+ return false;
+}
+
+
+const char* DumpFormatTypeToString(DumpFormatType type) {
+ switch (type) {
+ CASESTR(CONSOLE);
+ CASESTR(TEXTFILE);
+ default:
+ return "UNKNOWN";
+ }
+}
+
+bool StringtoDumpFormatType(std::string value, DumpFormatType* type) {
+ UpperCase(&value);
+ STRENUM(CONSOLE);
+ STRENUM(TEXTFILE);
+ return false;
+}
+
#undef CASESTR
#undef STRENUM
@@ -363,6 +398,14 @@ bool IsSparseLinearAlgebraLibraryTypeAvailable(
#endif
}
+ if (type == ACCELERATE_SPARSE) {
+#ifdef CERES_NO_ACCELERATE_SPARSE
+ return false;
+#else
+ return true;
+#endif
+ }
+
if (type == EIGEN_SPARSE) {
#ifdef CERES_USE_EIGEN_SPARSE
return true;
diff --git a/extern/ceres/internal/ceres/visibility.cc b/extern/ceres/internal/ceres/visibility.cc
new file mode 100644
index 00000000000..0981eeddcbf
--- /dev/null
+++ b/extern/ceres/internal/ceres/visibility.cc
@@ -0,0 +1,152 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: kushalav@google.com (Avanish Kushal)
+
+#include "ceres/visibility.h"
+
+#include <cmath>
+#include <ctime>
+#include <algorithm>
+#include <set>
+#include <vector>
+#include <unordered_map>
+#include <utility>
+#include "ceres/block_structure.h"
+#include "ceres/graph.h"
+#include "ceres/pair_hash.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+using std::make_pair;
+using std::max;
+using std::pair;
+using std::set;
+using std::vector;
+
+void ComputeVisibility(const CompressedRowBlockStructure& block_structure,
+ const int num_eliminate_blocks,
+ vector<set<int>>* visibility) {
+ CHECK(visibility != nullptr);
+
+ // Clear the visibility vector and resize it to hold a
+ // vector for each camera.
+ visibility->resize(0);
+ visibility->resize(block_structure.cols.size() - num_eliminate_blocks);
+
+ for (int i = 0; i < block_structure.rows.size(); ++i) {
+ const vector<Cell>& cells = block_structure.rows[i].cells;
+ int block_id = cells[0].block_id;
+ // If the first block is not an e_block, then skip this row block.
+ if (block_id >= num_eliminate_blocks) {
+ continue;
+ }
+
+ for (int j = 1; j < cells.size(); ++j) {
+ int camera_block_id = cells[j].block_id - num_eliminate_blocks;
+ DCHECK_GE(camera_block_id, 0);
+ DCHECK_LT(camera_block_id, visibility->size());
+ (*visibility)[camera_block_id].insert(block_id);
+ }
+ }
+}
+
+WeightedGraph<int>* CreateSchurComplementGraph(
+ const vector<set<int>>& visibility) {
+ const time_t start_time = time(NULL);
+ // Compute the number of e_blocks/point blocks. Since the visibility
+ // set for each e_block/camera contains the set of e_blocks/points
+ // visible to it, we find the maximum across all visibility sets.
+ int num_points = 0;
+ for (int i = 0; i < visibility.size(); i++) {
+ if (visibility[i].size() > 0) {
+ num_points = max(num_points, (*visibility[i].rbegin()) + 1);
+ }
+ }
+
+ // Invert the visibility. The input is a camera->point mapping,
+ // which tells us which points are visible in which
+ // cameras. However, to compute the sparsity structure of the Schur
+ // Complement efficiently, its better to have the point->camera
+ // mapping.
+ vector<set<int>> inverse_visibility(num_points);
+ for (int i = 0; i < visibility.size(); i++) {
+ const set<int>& visibility_set = visibility[i];
+ for (const int v : visibility_set) {
+ inverse_visibility[v].insert(i);
+ }
+ }
+
+ // Map from camera pairs to number of points visible to both cameras
+ // in the pair.
+ std::unordered_map<pair<int, int>, int, pair_hash> camera_pairs;
+
+ // Count the number of points visible to each camera/f_block pair.
+ for (const auto& inverse_visibility_set : inverse_visibility) {
+ for (set<int>::const_iterator camera1 = inverse_visibility_set.begin();
+ camera1 != inverse_visibility_set.end();
+ ++camera1) {
+ set<int>::const_iterator camera2 = camera1;
+ for (++camera2; camera2 != inverse_visibility_set.end(); ++camera2) {
+ ++(camera_pairs[make_pair(*camera1, *camera2)]);
+ }
+ }
+ }
+
+ WeightedGraph<int>* graph = new WeightedGraph<int>;
+
+ // Add vertices and initialize the pairs for self edges so that self
+ // edges are guaranteed. This is needed for the Canonical views
+ // algorithm to work correctly.
+ static constexpr double kSelfEdgeWeight = 1.0;
+ for (int i = 0; i < visibility.size(); ++i) {
+ graph->AddVertex(i);
+ graph->AddEdge(i, i, kSelfEdgeWeight);
+ }
+
+ // Add an edge for each camera pair.
+ for (const auto& camera_pair_count : camera_pairs) {
+ const int camera1 = camera_pair_count.first.first;
+ const int camera2 = camera_pair_count.first.second;
+ const int count = camera_pair_count.second;
+ DCHECK_NE(camera1, camera2);
+ // Static cast necessary for Windows.
+ const double weight = static_cast<double>(count) /
+ (sqrt(static_cast<double>(
+ visibility[camera1].size() * visibility[camera2].size())));
+ graph->AddEdge(camera1, camera2, weight);
+ }
+
+ VLOG(2) << "Schur complement graph time: " << (time(NULL) - start_time);
+ return graph;
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/extern/ceres/internal/ceres/visibility.h b/extern/ceres/internal/ceres/visibility.h
new file mode 100644
index 00000000000..115d45f7cf6
--- /dev/null
+++ b/extern/ceres/internal/ceres/visibility.h
@@ -0,0 +1,78 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: kushalav@google.com (Avanish Kushal)
+// sameeragarwal@google.com (Sameer Agarwal)
+//
+// Functions to manipulate visibility information from the block
+// structure of sparse matrices.
+
+#ifndef CERES_INTERNAL_VISIBILITY_H_
+#define CERES_INTERNAL_VISIBILITY_H_
+
+#include <set>
+#include <vector>
+#include "ceres/graph.h"
+
+namespace ceres {
+namespace internal {
+
+struct CompressedRowBlockStructure;
+
+// Given a compressed row block structure, computes the set of
+// e_blocks "visible" to each f_block. If an e_block co-occurs with an
+// f_block in a residual block, it is visible to the f_block. The
+// first num_eliminate_blocks columns blocks are e_blocks and the rest
+// f_blocks.
+//
+// In a structure from motion problem, e_blocks correspond to 3D
+// points and f_blocks correspond to cameras.
+void ComputeVisibility(const CompressedRowBlockStructure& block_structure,
+ int num_eliminate_blocks,
+ std::vector<std::set<int>>* visibility);
+
+// Given f_block visibility as computed by the ComputeVisibility
+// function above, construct and return a graph whose vertices are
+// f_blocks and an edge connects two vertices if they have at least one
+// e_block in common. The weight of this edge is normalized dot
+// product between the visibility vectors of the two
+// vertices/f_blocks.
+//
+// This graph reflects the sparsity structure of reduced camera
+// matrix/Schur complement matrix obtained by eliminating the e_blocks
+// from the normal equations.
+//
+// Caller acquires ownership of the returned WeightedGraph pointer
+// (heap-allocated).
+WeightedGraph<int>* CreateSchurComplementGraph(
+ const std::vector<std::set<int>>& visibility);
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_VISIBILITY_H_
diff --git a/extern/ceres/internal/ceres/visibility_based_preconditioner.cc b/extern/ceres/internal/ceres/visibility_based_preconditioner.cc
new file mode 100644
index 00000000000..3372e82d1e1
--- /dev/null
+++ b/extern/ceres/internal/ceres/visibility_based_preconditioner.cc
@@ -0,0 +1,585 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2015 Google Inc. All rights reserved.
+// http://ceres-solver.org/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/visibility_based_preconditioner.h"
+
+#include <algorithm>
+#include <functional>
+#include <iterator>
+#include <memory>
+#include <set>
+#include <utility>
+#include <vector>
+
+#include "Eigen/Dense"
+#include "ceres/block_random_access_sparse_matrix.h"
+#include "ceres/block_sparse_matrix.h"
+#include "ceres/canonical_views_clustering.h"
+#include "ceres/graph.h"
+#include "ceres/graph_algorithms.h"
+#include "ceres/linear_solver.h"
+#include "ceres/schur_eliminator.h"
+#include "ceres/single_linkage_clustering.h"
+#include "ceres/visibility.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+using std::make_pair;
+using std::pair;
+using std::set;
+using std::swap;
+using std::vector;
+
+// TODO(sameeragarwal): Currently these are magic weights for the
+// preconditioner construction. Move these higher up into the Options
+// struct and provide some guidelines for choosing them.
+//
+// This will require some more work on the clustering algorithm and
+// possibly some more refactoring of the code.
+static constexpr double kCanonicalViewsSizePenaltyWeight = 3.0;
+static constexpr double kCanonicalViewsSimilarityPenaltyWeight = 0.0;
+static constexpr double kSingleLinkageMinSimilarity = 0.9;
+
+VisibilityBasedPreconditioner::VisibilityBasedPreconditioner(
+ const CompressedRowBlockStructure& bs,
+ const Preconditioner::Options& options)
+ : options_(options), num_blocks_(0), num_clusters_(0) {
+ CHECK_GT(options_.elimination_groups.size(), 1);
+ CHECK_GT(options_.elimination_groups[0], 0);
+ CHECK(options_.type == CLUSTER_JACOBI || options_.type == CLUSTER_TRIDIAGONAL)
+ << "Unknown preconditioner type: " << options_.type;
+ num_blocks_ = bs.cols.size() - options_.elimination_groups[0];
+ CHECK_GT(num_blocks_, 0) << "Jacobian should have at least 1 f_block for "
+ << "visibility based preconditioning.";
+ CHECK(options_.context != NULL);
+
+ // Vector of camera block sizes
+ block_size_.resize(num_blocks_);
+ for (int i = 0; i < num_blocks_; ++i) {
+ block_size_[i] = bs.cols[i + options_.elimination_groups[0]].size;
+ }
+
+ const time_t start_time = time(NULL);
+ switch (options_.type) {
+ case CLUSTER_JACOBI:
+ ComputeClusterJacobiSparsity(bs);
+ break;
+ case CLUSTER_TRIDIAGONAL:
+ ComputeClusterTridiagonalSparsity(bs);
+ break;
+ default:
+ LOG(FATAL) << "Unknown preconditioner type";
+ }
+ const time_t structure_time = time(NULL);
+ InitStorage(bs);
+ const time_t storage_time = time(NULL);
+ InitEliminator(bs);
+ const time_t eliminator_time = time(NULL);
+
+ LinearSolver::Options sparse_cholesky_options;
+ sparse_cholesky_options.sparse_linear_algebra_library_type =
+ options_.sparse_linear_algebra_library_type;
+
+ // The preconditioner's sparsity is not available in the
+ // preprocessor, so the columns of the Jacobian have not been
+ // reordered to minimize fill in when computing its sparse Cholesky
+ // factorization. So we must tell the SparseCholesky object to
+ // perform approximate minimum-degree reordering, which is done by
+ // setting use_postordering to true.
+ sparse_cholesky_options.use_postordering = true;
+ sparse_cholesky_ = SparseCholesky::Create(sparse_cholesky_options);
+
+ const time_t init_time = time(NULL);
+ VLOG(2) << "init time: " << init_time - start_time
+ << " structure time: " << structure_time - start_time
+ << " storage time:" << storage_time - structure_time
+ << " eliminator time: " << eliminator_time - storage_time;
+}
+
+VisibilityBasedPreconditioner::~VisibilityBasedPreconditioner() {}
+
+// Determine the sparsity structure of the CLUSTER_JACOBI
+// preconditioner. It clusters cameras using their scene
+// visibility. The clusters form the diagonal blocks of the
+// preconditioner matrix.
+void VisibilityBasedPreconditioner::ComputeClusterJacobiSparsity(
+ const CompressedRowBlockStructure& bs) {
+ vector<set<int>> visibility;
+ ComputeVisibility(bs, options_.elimination_groups[0], &visibility);
+ CHECK_EQ(num_blocks_, visibility.size());
+ ClusterCameras(visibility);
+ cluster_pairs_.clear();
+ for (int i = 0; i < num_clusters_; ++i) {
+ cluster_pairs_.insert(make_pair(i, i));
+ }
+}
+
+// Determine the sparsity structure of the CLUSTER_TRIDIAGONAL
+// preconditioner. It clusters cameras using using the scene
+// visibility and then finds the strongly interacting pairs of
+// clusters by constructing another graph with the clusters as
+// vertices and approximating it with a degree-2 maximum spanning
+// forest. The set of edges in this forest are the cluster pairs.
+void VisibilityBasedPreconditioner::ComputeClusterTridiagonalSparsity(
+ const CompressedRowBlockStructure& bs) {
+ vector<set<int>> visibility;
+ ComputeVisibility(bs, options_.elimination_groups[0], &visibility);
+ CHECK_EQ(num_blocks_, visibility.size());
+ ClusterCameras(visibility);
+
+ // Construct a weighted graph on the set of clusters, where the
+ // edges are the number of 3D points/e_blocks visible in both the
+ // clusters at the ends of the edge. Return an approximate degree-2
+ // maximum spanning forest of this graph.
+ vector<set<int>> cluster_visibility;
+ ComputeClusterVisibility(visibility, &cluster_visibility);
+ std::unique_ptr<WeightedGraph<int>> cluster_graph(
+ CreateClusterGraph(cluster_visibility));
+ CHECK(cluster_graph != nullptr);
+ std::unique_ptr<WeightedGraph<int>> forest(
+ Degree2MaximumSpanningForest(*cluster_graph));
+ CHECK(forest != nullptr);
+ ForestToClusterPairs(*forest, &cluster_pairs_);
+}
+
+// Allocate storage for the preconditioner matrix.
+void VisibilityBasedPreconditioner::InitStorage(
+ const CompressedRowBlockStructure& bs) {
+ ComputeBlockPairsInPreconditioner(bs);
+ m_.reset(new BlockRandomAccessSparseMatrix(block_size_, block_pairs_));
+}
+
+// Call the canonical views algorithm and cluster the cameras based on
+// their visibility sets. The visibility set of a camera is the set of
+// e_blocks/3D points in the scene that are seen by it.
+//
+// The cluster_membership_ vector is updated to indicate cluster
+// memberships for each camera block.
+void VisibilityBasedPreconditioner::ClusterCameras(
+ const vector<set<int>>& visibility) {
+ std::unique_ptr<WeightedGraph<int>> schur_complement_graph(
+ CreateSchurComplementGraph(visibility));
+ CHECK(schur_complement_graph != nullptr);
+
+ std::unordered_map<int, int> membership;
+
+ if (options_.visibility_clustering_type == CANONICAL_VIEWS) {
+ vector<int> centers;
+ CanonicalViewsClusteringOptions clustering_options;
+ clustering_options.size_penalty_weight = kCanonicalViewsSizePenaltyWeight;
+ clustering_options.similarity_penalty_weight =
+ kCanonicalViewsSimilarityPenaltyWeight;
+ ComputeCanonicalViewsClustering(
+ clustering_options, *schur_complement_graph, &centers, &membership);
+ num_clusters_ = centers.size();
+ } else if (options_.visibility_clustering_type == SINGLE_LINKAGE) {
+ SingleLinkageClusteringOptions clustering_options;
+ clustering_options.min_similarity = kSingleLinkageMinSimilarity;
+ num_clusters_ = ComputeSingleLinkageClustering(
+ clustering_options, *schur_complement_graph, &membership);
+ } else {
+ LOG(FATAL) << "Unknown visibility clustering algorithm.";
+ }
+
+ CHECK_GT(num_clusters_, 0);
+ VLOG(2) << "num_clusters: " << num_clusters_;
+ FlattenMembershipMap(membership, &cluster_membership_);
+}
+
+// Compute the block sparsity structure of the Schur complement
+// matrix. For each pair of cameras contributing a non-zero cell to
+// the schur complement, determine if that cell is present in the
+// preconditioner or not.
+//
+// A pair of cameras contribute a cell to the preconditioner if they
+// are part of the same cluster or if the two clusters that they
+// belong have an edge connecting them in the degree-2 maximum
+// spanning forest.
+//
+// For example, a camera pair (i,j) where i belongs to cluster1 and
+// j belongs to cluster2 (assume that cluster1 < cluster2).
+//
+// The cell corresponding to (i,j) is present in the preconditioner
+// if cluster1 == cluster2 or the pair (cluster1, cluster2) were
+// connected by an edge in the degree-2 maximum spanning forest.
+//
+// Since we have already expanded the forest into a set of camera
+// pairs/edges, including self edges, the check can be reduced to
+// checking membership of (cluster1, cluster2) in cluster_pairs_.
+void VisibilityBasedPreconditioner::ComputeBlockPairsInPreconditioner(
+ const CompressedRowBlockStructure& bs) {
+ block_pairs_.clear();
+ for (int i = 0; i < num_blocks_; ++i) {
+ block_pairs_.insert(make_pair(i, i));
+ }
+
+ int r = 0;
+ const int num_row_blocks = bs.rows.size();
+ const int num_eliminate_blocks = options_.elimination_groups[0];
+
+ // Iterate over each row of the matrix. The block structure of the
+ // matrix is assumed to be sorted in order of the e_blocks/point
+ // blocks. Thus all row blocks containing an e_block/point occur
+ // contiguously. Further, if present, an e_block is always the first
+ // parameter block in each row block. These structural assumptions
+ // are common to all Schur complement based solvers in Ceres.
+ //
+ // For each e_block/point block we identify the set of cameras
+ // seeing it. The cross product of this set with itself is the set
+ // of non-zero cells contributed by this e_block.
+ //
+ // The time complexity of this is O(nm^2) where, n is the number of
+ // 3d points and m is the maximum number of cameras seeing any
+ // point, which for most scenes is a fairly small number.
+ while (r < num_row_blocks) {
+ int e_block_id = bs.rows[r].cells.front().block_id;
+ if (e_block_id >= num_eliminate_blocks) {
+ // Skip the rows whose first block is an f_block.
+ break;
+ }
+
+ set<int> f_blocks;
+ for (; r < num_row_blocks; ++r) {
+ const CompressedRow& row = bs.rows[r];
+ if (row.cells.front().block_id != e_block_id) {
+ break;
+ }
+
+ // Iterate over the blocks in the row, ignoring the first block
+ // since it is the one to be eliminated and adding the rest to
+ // the list of f_blocks associated with this e_block.
+ for (int c = 1; c < row.cells.size(); ++c) {
+ const Cell& cell = row.cells[c];
+ const int f_block_id = cell.block_id - num_eliminate_blocks;
+ CHECK_GE(f_block_id, 0);
+ f_blocks.insert(f_block_id);
+ }
+ }
+
+ for (set<int>::const_iterator block1 = f_blocks.begin();
+ block1 != f_blocks.end();
+ ++block1) {
+ set<int>::const_iterator block2 = block1;
+ ++block2;
+ for (; block2 != f_blocks.end(); ++block2) {
+ if (IsBlockPairInPreconditioner(*block1, *block2)) {
+ block_pairs_.insert(make_pair(*block1, *block2));
+ }
+ }
+ }
+ }
+
+ // The remaining rows which do not contain any e_blocks.
+ for (; r < num_row_blocks; ++r) {
+ const CompressedRow& row = bs.rows[r];
+ CHECK_GE(row.cells.front().block_id, num_eliminate_blocks);
+ for (int i = 0; i < row.cells.size(); ++i) {
+ const int block1 = row.cells[i].block_id - num_eliminate_blocks;
+ for (int j = 0; j < row.cells.size(); ++j) {
+ const int block2 = row.cells[j].block_id - num_eliminate_blocks;
+ if (block1 <= block2) {
+ if (IsBlockPairInPreconditioner(block1, block2)) {
+ block_pairs_.insert(make_pair(block1, block2));
+ }
+ }
+ }
+ }
+ }
+
+ VLOG(1) << "Block pair stats: " << block_pairs_.size();
+}
+
+// Initialize the SchurEliminator.
+void VisibilityBasedPreconditioner::InitEliminator(
+ const CompressedRowBlockStructure& bs) {
+ LinearSolver::Options eliminator_options;
+ eliminator_options.elimination_groups = options_.elimination_groups;
+ eliminator_options.num_threads = options_.num_threads;
+ eliminator_options.e_block_size = options_.e_block_size;
+ eliminator_options.f_block_size = options_.f_block_size;
+ eliminator_options.row_block_size = options_.row_block_size;
+ eliminator_options.context = options_.context;
+ eliminator_.reset(SchurEliminatorBase::Create(eliminator_options));
+ const bool kFullRankETE = true;
+ eliminator_->Init(
+ eliminator_options.elimination_groups[0], kFullRankETE, &bs);
+}
+
+// Update the values of the preconditioner matrix and factorize it.
+bool VisibilityBasedPreconditioner::UpdateImpl(const BlockSparseMatrix& A,
+ const double* D) {
+ const time_t start_time = time(NULL);
+ const int num_rows = m_->num_rows();
+ CHECK_GT(num_rows, 0);
+
+ // Compute a subset of the entries of the Schur complement.
+ eliminator_->Eliminate(
+ BlockSparseMatrixData(A), nullptr, D, m_.get(), nullptr);
+
+ // Try factorizing the matrix. For CLUSTER_JACOBI, this should
+ // always succeed modulo some numerical/conditioning problems. For
+ // CLUSTER_TRIDIAGONAL, in general the preconditioner matrix as
+ // constructed is not positive definite. However, we will go ahead
+ // and try factorizing it. If it works, great, otherwise we scale
+ // all the cells in the preconditioner corresponding to the edges in
+ // the degree-2 forest and that guarantees positive
+ // definiteness. The proof of this fact can be found in Lemma 1 in
+ // "Visibility Based Preconditioning for Bundle Adjustment".
+ //
+ // Doing the factorization like this saves us matrix mass when
+ // scaling is not needed, which is quite often in our experience.
+ LinearSolverTerminationType status = Factorize();
+
+ if (status == LINEAR_SOLVER_FATAL_ERROR) {
+ return false;
+ }
+
+ // The scaling only affects the tri-diagonal case, since
+ // ScaleOffDiagonalBlocks only pays attention to the cells that
+ // belong to the edges of the degree-2 forest. In the CLUSTER_JACOBI
+ // case, the preconditioner is guaranteed to be positive
+ // semidefinite.
+ if (status == LINEAR_SOLVER_FAILURE && options_.type == CLUSTER_TRIDIAGONAL) {
+ VLOG(1) << "Unscaled factorization failed. Retrying with off-diagonal "
+ << "scaling";
+ ScaleOffDiagonalCells();
+ status = Factorize();
+ }
+
+ VLOG(2) << "Compute time: " << time(NULL) - start_time;
+ return (status == LINEAR_SOLVER_SUCCESS);
+}
+
+// Consider the preconditioner matrix as meta-block matrix, whose
+// blocks correspond to the clusters. Then cluster pairs corresponding
+// to edges in the degree-2 forest are off diagonal entries of this
+// matrix. Scaling these off-diagonal entries by 1/2 forces this
+// matrix to be positive definite.
+void VisibilityBasedPreconditioner::ScaleOffDiagonalCells() {
+ for (const auto& block_pair : block_pairs_) {
+ const int block1 = block_pair.first;
+ const int block2 = block_pair.second;
+ if (!IsBlockPairOffDiagonal(block1, block2)) {
+ continue;
+ }
+
+ int r, c, row_stride, col_stride;
+ CellInfo* cell_info =
+ m_->GetCell(block1, block2, &r, &c, &row_stride, &col_stride);
+ CHECK(cell_info != NULL)
+ << "Cell missing for block pair (" << block1 << "," << block2 << ")"
+ << " cluster pair (" << cluster_membership_[block1] << " "
+ << cluster_membership_[block2] << ")";
+
+ // Ah the magic of tri-diagonal matrices and diagonal
+ // dominance. See Lemma 1 in "Visibility Based Preconditioning
+ // For Bundle Adjustment".
+ MatrixRef m(cell_info->values, row_stride, col_stride);
+ m.block(r, c, block_size_[block1], block_size_[block2]) *= 0.5;
+ }
+}
+
+// Compute the sparse Cholesky factorization of the preconditioner
+// matrix.
+LinearSolverTerminationType VisibilityBasedPreconditioner::Factorize() {
+ // Extract the TripletSparseMatrix that is used for actually storing
+ // S and convert it into a CompressedRowSparseMatrix.
+ const TripletSparseMatrix* tsm =
+ down_cast<BlockRandomAccessSparseMatrix*>(m_.get())->mutable_matrix();
+
+ std::unique_ptr<CompressedRowSparseMatrix> lhs;
+ const CompressedRowSparseMatrix::StorageType storage_type =
+ sparse_cholesky_->StorageType();
+ if (storage_type == CompressedRowSparseMatrix::UPPER_TRIANGULAR) {
+ lhs.reset(CompressedRowSparseMatrix::FromTripletSparseMatrix(*tsm));
+ lhs->set_storage_type(CompressedRowSparseMatrix::UPPER_TRIANGULAR);
+ } else {
+ lhs.reset(
+ CompressedRowSparseMatrix::FromTripletSparseMatrixTransposed(*tsm));
+ lhs->set_storage_type(CompressedRowSparseMatrix::LOWER_TRIANGULAR);
+ }
+
+ std::string message;
+ return sparse_cholesky_->Factorize(lhs.get(), &message);
+}
+
+void VisibilityBasedPreconditioner::RightMultiply(const double* x,
+ double* y) const {
+ CHECK(x != nullptr);
+ CHECK(y != nullptr);
+ CHECK(sparse_cholesky_ != nullptr);
+ std::string message;
+ sparse_cholesky_->Solve(x, y, &message);
+}
+
+int VisibilityBasedPreconditioner::num_rows() const { return m_->num_rows(); }
+
+// Classify camera/f_block pairs as in and out of the preconditioner,
+// based on whether the cluster pair that they belong to is in the
+// preconditioner or not.
+bool VisibilityBasedPreconditioner::IsBlockPairInPreconditioner(
+ const int block1, const int block2) const {
+ int cluster1 = cluster_membership_[block1];
+ int cluster2 = cluster_membership_[block2];
+ if (cluster1 > cluster2) {
+ swap(cluster1, cluster2);
+ }
+ return (cluster_pairs_.count(make_pair(cluster1, cluster2)) > 0);
+}
+
+bool VisibilityBasedPreconditioner::IsBlockPairOffDiagonal(
+ const int block1, const int block2) const {
+ return (cluster_membership_[block1] != cluster_membership_[block2]);
+}
+
+// Convert a graph into a list of edges that includes self edges for
+// each vertex.
+void VisibilityBasedPreconditioner::ForestToClusterPairs(
+ const WeightedGraph<int>& forest,
+ std::unordered_set<pair<int, int>, pair_hash>* cluster_pairs) const {
+ CHECK(cluster_pairs != nullptr);
+ cluster_pairs->clear();
+ const std::unordered_set<int>& vertices = forest.vertices();
+ CHECK_EQ(vertices.size(), num_clusters_);
+
+ // Add all the cluster pairs corresponding to the edges in the
+ // forest.
+ for (const int cluster1 : vertices) {
+ cluster_pairs->insert(make_pair(cluster1, cluster1));
+ const std::unordered_set<int>& neighbors = forest.Neighbors(cluster1);
+ for (const int cluster2 : neighbors) {
+ if (cluster1 < cluster2) {
+ cluster_pairs->insert(make_pair(cluster1, cluster2));
+ }
+ }
+ }
+}
+
+// The visibility set of a cluster is the union of the visibility sets
+// of all its cameras. In other words, the set of points visible to
+// any camera in the cluster.
+void VisibilityBasedPreconditioner::ComputeClusterVisibility(
+ const vector<set<int>>& visibility,
+ vector<set<int>>* cluster_visibility) const {
+ CHECK(cluster_visibility != nullptr);
+ cluster_visibility->resize(0);
+ cluster_visibility->resize(num_clusters_);
+ for (int i = 0; i < num_blocks_; ++i) {
+ const int cluster_id = cluster_membership_[i];
+ (*cluster_visibility)[cluster_id].insert(visibility[i].begin(),
+ visibility[i].end());
+ }
+}
+
+// Construct a graph whose vertices are the clusters, and the edge
+// weights are the number of 3D points visible to cameras in both the
+// vertices.
+WeightedGraph<int>* VisibilityBasedPreconditioner::CreateClusterGraph(
+ const vector<set<int>>& cluster_visibility) const {
+ WeightedGraph<int>* cluster_graph = new WeightedGraph<int>;
+
+ for (int i = 0; i < num_clusters_; ++i) {
+ cluster_graph->AddVertex(i);
+ }
+
+ for (int i = 0; i < num_clusters_; ++i) {
+ const set<int>& cluster_i = cluster_visibility[i];
+ for (int j = i + 1; j < num_clusters_; ++j) {
+ vector<int> intersection;
+ const set<int>& cluster_j = cluster_visibility[j];
+ set_intersection(cluster_i.begin(),
+ cluster_i.end(),
+ cluster_j.begin(),
+ cluster_j.end(),
+ back_inserter(intersection));
+
+ if (intersection.size() > 0) {
+ // Clusters interact strongly when they share a large number
+ // of 3D points. The degree-2 maximum spanning forest
+ // algorithm, iterates on the edges in decreasing order of
+ // their weight, which is the number of points shared by the
+ // two cameras that it connects.
+ cluster_graph->AddEdge(i, j, intersection.size());
+ }
+ }
+ }
+ return cluster_graph;
+}
+
+// Canonical views clustering returns a std::unordered_map from vertices to
+// cluster ids. Convert this into a flat array for quick lookup. It is
+// possible that some of the vertices may not be associated with any
+// cluster. In that case, randomly assign them to one of the clusters.
+//
+// The cluster ids can be non-contiguous integers. So as we flatten
+// the membership_map, we also map the cluster ids to a contiguous set
+// of integers so that the cluster ids are in [0, num_clusters_).
+void VisibilityBasedPreconditioner::FlattenMembershipMap(
+ const std::unordered_map<int, int>& membership_map,
+ vector<int>* membership_vector) const {
+ CHECK(membership_vector != nullptr);
+ membership_vector->resize(0);
+ membership_vector->resize(num_blocks_, -1);
+
+ std::unordered_map<int, int> cluster_id_to_index;
+ // Iterate over the cluster membership map and update the
+ // cluster_membership_ vector assigning arbitrary cluster ids to
+ // the few cameras that have not been clustered.
+ for (const auto& m : membership_map) {
+ const int camera_id = m.first;
+ int cluster_id = m.second;
+
+ // If the view was not clustered, randomly assign it to one of the
+ // clusters. This preserves the mathematical correctness of the
+ // preconditioner. If there are too many views which are not
+ // clustered, it may lead to some quality degradation though.
+ //
+ // TODO(sameeragarwal): Check if a large number of views have not
+ // been clustered and deal with it?
+ if (cluster_id == -1) {
+ cluster_id = camera_id % num_clusters_;
+ }
+
+ const int index = FindWithDefault(
+ cluster_id_to_index, cluster_id, cluster_id_to_index.size());
+
+ if (index == cluster_id_to_index.size()) {
+ cluster_id_to_index[cluster_id] = index;
+ }
+
+ CHECK_LT(index, num_clusters_);
+ membership_vector->at(camera_id) = index;
+ }
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/extern/ceres/internal/ceres/visibility_based_preconditioner.h b/extern/ceres/internal/ceres/visibility_based_preconditioner.h
index a627c13523c..aa582d5e7ef 100644
--- a/extern/ceres/internal/ceres/visibility_based_preconditioner.h
+++ b/extern/ceres/internal/ceres/visibility_based_preconditioner.h
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -48,16 +48,18 @@
#ifndef CERES_INTERNAL_VISIBILITY_BASED_PRECONDITIONER_H_
#define CERES_INTERNAL_VISIBILITY_BASED_PRECONDITIONER_H_
+#include <memory>
#include <set>
-#include <vector>
+#include <unordered_map>
+#include <unordered_set>
#include <utility>
-#include "ceres/collections_port.h"
+#include <vector>
+
#include "ceres/graph.h"
-#include "ceres/internal/macros.h"
-#include "ceres/internal/scoped_ptr.h"
#include "ceres/linear_solver.h"
+#include "ceres/pair_hash.h"
#include "ceres/preconditioner.h"
-#include "ceres/suitesparse.h"
+#include "ceres/sparse_cholesky.h"
namespace ceres {
namespace internal {
@@ -122,8 +124,6 @@ class SchurEliminatorBase;
// *A.block_structure(), options);
// preconditioner.Update(A, NULL);
// preconditioner.RightMultiply(x, y);
-//
-#ifndef CERES_NO_SUITESPARSE
class VisibilityBasedPreconditioner : public BlockSparseMatrixPreconditioner {
public:
// Initialize the symbolic structure of the preconditioner. bs is
@@ -134,16 +134,19 @@ class VisibilityBasedPreconditioner : public BlockSparseMatrixPreconditioner {
// based solvers. Please see schur_eliminator.h for more details.
VisibilityBasedPreconditioner(const CompressedRowBlockStructure& bs,
const Preconditioner::Options& options);
+ VisibilityBasedPreconditioner(const VisibilityBasedPreconditioner&) = delete;
+ void operator=(const VisibilityBasedPreconditioner&) = delete;
+
virtual ~VisibilityBasedPreconditioner();
// Preconditioner interface
- virtual void RightMultiply(const double* x, double* y) const;
- virtual int num_rows() const;
+ void RightMultiply(const double* x, double* y) const final;
+ int num_rows() const final;
friend class VisibilityBasedPreconditionerTest;
private:
- virtual bool UpdateImpl(const BlockSparseMatrix& A, const double* D);
+ bool UpdateImpl(const BlockSparseMatrix& A, const double* D) final;
void ComputeClusterJacobiSparsity(const CompressedRowBlockStructure& bs);
void ComputeClusterTridiagonalSparsity(const CompressedRowBlockStructure& bs);
void InitStorage(const CompressedRowBlockStructure& bs);
@@ -151,16 +154,16 @@ class VisibilityBasedPreconditioner : public BlockSparseMatrixPreconditioner {
LinearSolverTerminationType Factorize();
void ScaleOffDiagonalCells();
- void ClusterCameras(const std::vector<std::set<int> >& visibility);
- void FlattenMembershipMap(const HashMap<int, int>& membership_map,
+ void ClusterCameras(const std::vector<std::set<int>>& visibility);
+ void FlattenMembershipMap(const std::unordered_map<int, int>& membership_map,
std::vector<int>* membership_vector) const;
void ComputeClusterVisibility(
- const std::vector<std::set<int> >& visibility,
- std::vector<std::set<int> >* cluster_visibility) const;
+ const std::vector<std::set<int>>& visibility,
+ std::vector<std::set<int>>* cluster_visibility) const;
WeightedGraph<int>* CreateClusterGraph(
- const std::vector<std::set<int> >& visibility) const;
+ const std::vector<std::set<int>>& visibility) const;
void ForestToClusterPairs(const WeightedGraph<int>& forest,
- HashSet<std::pair<int, int> >* cluster_pairs) const;
+ std::unordered_set<std::pair<int, int>, pair_hash>* cluster_pairs) const;
void ComputeBlockPairsInPreconditioner(const CompressedRowBlockStructure& bs);
bool IsBlockPairInPreconditioner(int block1, int block2) const;
bool IsBlockPairOffDiagonal(int block1, int block2) const;
@@ -180,52 +183,17 @@ class VisibilityBasedPreconditioner : public BlockSparseMatrixPreconditioner {
// Non-zero camera pairs from the schur complement matrix that are
// present in the preconditioner, sorted by row (first element of
// each pair), then column (second).
- std::set<std::pair<int, int> > block_pairs_;
+ std::set<std::pair<int, int>> block_pairs_;
// Set of cluster pairs (including self pairs (i,i)) in the
// preconditioner.
- HashSet<std::pair<int, int> > cluster_pairs_;
- scoped_ptr<SchurEliminatorBase> eliminator_;
+ std::unordered_set<std::pair<int, int>, pair_hash> cluster_pairs_;
+ std::unique_ptr<SchurEliminatorBase> eliminator_;
// Preconditioner matrix.
- scoped_ptr<BlockRandomAccessSparseMatrix> m_;
-
- // RightMultiply is a const method for LinearOperators. It is
- // implemented using CHOLMOD's sparse triangular matrix solve
- // function. This however requires non-const access to the
- // SuiteSparse context object, even though it does not result in any
- // of the state of the preconditioner being modified.
- SuiteSparse ss_;
-
- // Symbolic and numeric factorization of the preconditioner.
- cholmod_factor* factor_;
-
- // Temporary vector used by RightMultiply.
- cholmod_dense* tmp_rhs_;
- CERES_DISALLOW_COPY_AND_ASSIGN(VisibilityBasedPreconditioner);
-};
-#else // SuiteSparse
-// If SuiteSparse is not compiled in, the preconditioner is not
-// available.
-class VisibilityBasedPreconditioner : public BlockSparseMatrixPreconditioner {
- public:
- VisibilityBasedPreconditioner(const CompressedRowBlockStructure& bs,
- const Preconditioner::Options& options) {
- LOG(FATAL) << "Visibility based preconditioning is not available. Please "
- "build Ceres with SuiteSparse.";
- }
- virtual ~VisibilityBasedPreconditioner() {}
- virtual void RightMultiply(const double* x, double* y) const {}
- virtual void LeftMultiply(const double* x, double* y) const {}
- virtual int num_rows() const { return -1; }
- virtual int num_cols() const { return -1; }
-
- private:
- bool UpdateImpl(const BlockSparseMatrix& A, const double* D) {
- return false;
- }
+ std::unique_ptr<BlockRandomAccessSparseMatrix> m_;
+ std::unique_ptr<SparseCholesky> sparse_cholesky_;
};
-#endif // CERES_NO_SUITESPARSE
} // namespace internal
} // namespace ceres
diff --git a/extern/ceres/internal/ceres/wall_time.cc b/extern/ceres/internal/ceres/wall_time.cc
index c353973cc3e..716392741e9 100644
--- a/extern/ceres/internal/ceres/wall_time.cc
+++ b/extern/ceres/internal/ceres/wall_time.cc
@@ -50,7 +50,12 @@ double WallTimeInSeconds() {
return omp_get_wtime();
#else
#ifdef _WIN32
- return static_cast<double>(std::time(NULL));
+ LARGE_INTEGER count;
+ LARGE_INTEGER frequency;
+ QueryPerformanceCounter(&count);
+ QueryPerformanceFrequency(&frequency);
+ return static_cast<double>(count.QuadPart) /
+ static_cast<double>(frequency.QuadPart);
#else
timeval time_val;
gettimeofday(&time_val, NULL);
@@ -59,20 +64,24 @@ double WallTimeInSeconds() {
#endif
}
-EventLogger::EventLogger(const std::string& logger_name)
- : start_time_(WallTimeInSeconds()),
- last_event_time_(start_time_),
- events_("") {
- StringAppendF(&events_,
- "\n%s\n Delta Cumulative\n",
- logger_name.c_str());
+EventLogger::EventLogger(const std::string& logger_name) {
+ if (!VLOG_IS_ON(3)) {
+ return;
+ }
+
+ start_time_ = WallTimeInSeconds();
+ last_event_time_ = start_time_;
+ events_ = StringPrintf(
+ "\n%s\n Delta Cumulative\n",
+ logger_name.c_str());
}
EventLogger::~EventLogger() {
- if (VLOG_IS_ON(3)) {
- AddEvent("Total");
- VLOG(2) << "\n" << events_ << "\n";
+ if (!VLOG_IS_ON(3)) {
+ return;
}
+ AddEvent("Total");
+ VLOG(3) << "\n" << events_ << "\n";
}
void EventLogger::AddEvent(const std::string& event_name) {
diff --git a/extern/ceres/internal/ceres/wall_time.h b/extern/ceres/internal/ceres/wall_time.h
index 966aa67cab6..ed0610f27da 100644
--- a/extern/ceres/internal/ceres/wall_time.h
+++ b/extern/ceres/internal/ceres/wall_time.h
@@ -77,7 +77,7 @@ class EventLogger {
void AddEvent(const std::string& event_name);
private:
- const double start_time_;
+ double start_time_;
double last_event_time_;
std::string events_;
};
diff --git a/extern/ceres/patches/series b/extern/ceres/patches/series
index e69de29bb2d..7fa3673acac 100644
--- a/extern/ceres/patches/series
+++ b/extern/ceres/patches/series
@@ -0,0 +1,2 @@
+unused_parameter.patch
+unused_variable.patch
diff --git a/extern/ceres/patches/unused_parameter.patch b/extern/ceres/patches/unused_parameter.patch
new file mode 100644
index 00000000000..14969d6a19f
--- /dev/null
+++ b/extern/ceres/patches/unused_parameter.patch
@@ -0,0 +1,13 @@
+diff --git a/extern/ceres/include/ceres/internal/autodiff.h b/extern/ceres/include/ceres/internal/autodiff.h
+index 71b7bae4757..cb7b1aca5b9 100644
+--- a/include/ceres/internal/autodiff.h
++++ b/include/ceres/internal/autodiff.h
+@@ -198,7 +198,7 @@ struct Make1stOrderPerturbation {
+ template <int N, int Offset, typename T, typename JetT>
+ struct Make1stOrderPerturbation<N, N, Offset, T, JetT> {
+ public:
+- static void Apply(const T* src, JetT* dst) {}
++ static void Apply(const T* /*src*/, JetT* /*dst*/) {}
+ };
+
+ // Calls Make1stOrderPerturbation for every parameter block.
diff --git a/extern/ceres/patches/unused_variable.patch b/extern/ceres/patches/unused_variable.patch
new file mode 100644
index 00000000000..24a4f392962
--- /dev/null
+++ b/extern/ceres/patches/unused_variable.patch
@@ -0,0 +1,12 @@
+diff --git a/extern/ceres/internal/ceres/sparse_cholesky.cc b/extern/ceres/internal/ceres/sparse_cholesky.cc
+index 0639ea90664..d9d2100d3f9 100644
+--- a/internal/ceres/sparse_cholesky.cc
++++ b/internal/ceres/sparse_cholesky.cc
+@@ -56,6 +56,7 @@ std::unique_ptr<SparseCholesky> SparseCholesky::Create(
+ }
+ break;
+ #else
++ (void)ordering_type;
+ LOG(FATAL) << "Ceres was compiled without support for SuiteSparse.";
+ #endif
+