diff options
author | Sergey Sharybin <sergey.vfx@gmail.com> | 2013-11-28 17:06:35 +0400 |
---|---|---|
committer | Sergey Sharybin <sergey.vfx@gmail.com> | 2013-11-28 17:46:23 +0400 |
commit | de6c1c9287849a6d3688ac4d67c8cc21f0c4cd31 (patch) | |
tree | 353570de412d500eb4b792d548f161798da95dbb /extern | |
parent | 918ad1719f5ece6cd1da9cc3c0933620b83a2526 (diff) |
Update Ceres to latest upstream version
- A richer Problem API.
- DynamicNumericDiffCostFunction.
- Faster ITERATIVE_SCHUR solver.
- Faster SCHUR_JACOBI preconditioner.
- Faster Jacobian evaluation.
- Faster visibility based preconditioning using single linkage clustering.
Also re-wrote rules for unordered collections detection,
should work on all platforms and compilers now :)
Diffstat (limited to 'extern')
101 files changed, 4862 insertions, 2096 deletions
diff --git a/extern/libmv/third_party/ceres/CMakeLists.txt b/extern/libmv/third_party/ceres/CMakeLists.txt index 56fd4c25473..a05c424df49 100644 --- a/extern/libmv/third_party/ceres/CMakeLists.txt +++ b/extern/libmv/third_party/ceres/CMakeLists.txt @@ -46,6 +46,7 @@ set(SRC internal/ceres/block_jacobi_preconditioner.cc internal/ceres/block_random_access_crs_matrix.cc internal/ceres/block_random_access_dense_matrix.cc + internal/ceres/block_random_access_diagonal_matrix.cc internal/ceres/block_random_access_matrix.cc internal/ceres/block_random_access_sparse_matrix.cc internal/ceres/block_sparse_matrix.cc @@ -70,6 +71,7 @@ set(SRC internal/ceres/dogleg_strategy.cc internal/ceres/evaluator.cc internal/ceres/file.cc + internal/ceres/generated/partitioned_matrix_view_d_d_d.cc internal/ceres/generated/schur_eliminator_d_d_d.cc internal/ceres/gradient_checking_cost_function.cc internal/ceres/implicit_schur_complement.cc @@ -97,11 +99,11 @@ set(SRC internal/ceres/program.cc internal/ceres/residual_block.cc internal/ceres/residual_block_utils.cc - internal/ceres/runtime_numeric_diff_cost_function.cc internal/ceres/schur_complement_solver.cc internal/ceres/schur_eliminator.cc internal/ceres/schur_jacobi_preconditioner.cc internal/ceres/scratch_evaluate_preparer.cc + internal/ceres/single_linkage_clustering.cc internal/ceres/solver.cc internal/ceres/solver_impl.cc internal/ceres/sparse_matrix.cc @@ -127,6 +129,7 @@ set(SRC include/ceres/covariance.h include/ceres/crs_matrix.h include/ceres/dynamic_autodiff_cost_function.h + include/ceres/dynamic_numeric_diff_cost_function.h include/ceres/fpclassify.h include/ceres/gradient_checker.h include/ceres/internal/autodiff.h @@ -158,6 +161,7 @@ set(SRC internal/ceres/block_jacobi_preconditioner.h internal/ceres/block_random_access_crs_matrix.h internal/ceres/block_random_access_dense_matrix.h + internal/ceres/block_random_access_diagonal_matrix.h internal/ceres/block_random_access_matrix.h internal/ceres/block_random_access_sparse_matrix.h internal/ceres/block_sparse_matrix.h @@ -206,6 +210,7 @@ set(SRC internal/ceres/parameter_block.h internal/ceres/parameter_block_ordering.h internal/ceres/partitioned_matrix_view.h + internal/ceres/partitioned_matrix_view_impl.h internal/ceres/polynomial.h internal/ceres/preconditioner.h internal/ceres/problem_impl.h @@ -214,12 +219,12 @@ set(SRC internal/ceres/random.h internal/ceres/residual_block.h internal/ceres/residual_block_utils.h - internal/ceres/runtime_numeric_diff_cost_function.h internal/ceres/schur_complement_solver.h internal/ceres/schur_eliminator.h internal/ceres/schur_eliminator_impl.h internal/ceres/schur_jacobi_preconditioner.h internal/ceres/scratch_evaluate_preparer.h + internal/ceres/single_linkage_clustering.h internal/ceres/small_blas.h internal/ceres/solver_impl.h internal/ceres/sparse_matrix.h @@ -238,6 +243,22 @@ set(SRC #if(FALSE) # list(APPEND SRC +# internal/ceres/generated/partitioned_matrix_view_2_2_2.cc +# internal/ceres/generated/partitioned_matrix_view_2_2_3.cc +# internal/ceres/generated/partitioned_matrix_view_2_2_4.cc +# internal/ceres/generated/partitioned_matrix_view_2_2_d.cc +# internal/ceres/generated/partitioned_matrix_view_2_3_3.cc +# internal/ceres/generated/partitioned_matrix_view_2_3_4.cc +# internal/ceres/generated/partitioned_matrix_view_2_3_9.cc +# internal/ceres/generated/partitioned_matrix_view_2_3_d.cc +# internal/ceres/generated/partitioned_matrix_view_2_4_3.cc +# internal/ceres/generated/partitioned_matrix_view_2_4_4.cc +# internal/ceres/generated/partitioned_matrix_view_2_4_d.cc +# internal/ceres/generated/partitioned_matrix_view_2_d_d.cc +# internal/ceres/generated/partitioned_matrix_view_4_4_2.cc +# internal/ceres/generated/partitioned_matrix_view_4_4_3.cc +# internal/ceres/generated/partitioned_matrix_view_4_4_4.cc +# internal/ceres/generated/partitioned_matrix_view_4_4_d.cc # internal/ceres/generated/schur_eliminator_2_2_2.cc # internal/ceres/generated/schur_eliminator_2_2_3.cc # internal/ceres/generated/schur_eliminator_2_2_4.cc @@ -249,6 +270,7 @@ set(SRC # internal/ceres/generated/schur_eliminator_2_4_3.cc # internal/ceres/generated/schur_eliminator_2_4_4.cc # internal/ceres/generated/schur_eliminator_2_4_d.cc +# internal/ceres/generated/schur_eliminator_2_d_d.cc # internal/ceres/generated/schur_eliminator_4_4_2.cc # internal/ceres/generated/schur_eliminator_4_4_3.cc # internal/ceres/generated/schur_eliminator_4_4_4.cc @@ -287,23 +309,18 @@ if(WITH_OPENMP) ) endif() -if(MSVC10) - add_definitions( - -D"CERES_HASH_NAMESPACE_START=namespace std {" - -D"CERES_HASH_NAMESPACE_END=}" - ) +include(CheckIncludeFileCXX) +CHECK_INCLUDE_FILE_CXX(unordered_map UNORDERED_MAP_IN_STD_NAMESPACE) +if(UNORDERED_MAP_IN_STD_NAMESPACE) + ADD_DEFINITIONS(-DCERES_STD_UNORDERED_MAP) else() - add_definitions( - -D"CERES_HASH_NAMESPACE_START=namespace std { namespace tr1 {" - -D"CERES_HASH_NAMESPACE_END=}}" - ) -endif() - -if(APPLE) - if(CMAKE_OSX_DEPLOYMENT_TARGET STREQUAL "10.5") - add_definitions( - -DCERES_NO_TR1 - ) + CHECK_INCLUDE_FILE_CXX("tr1/unordered_map" UNORDERED_MAP_IN_TR1_NAMESPACE) + if(UNORDERED_MAP_IN_TR1_NAMESPACE) + ADD_DEFINITIONS(-DCERES_TR1_UNORDERED_MAP) + else() + MESSAGE("-- Unable to find <unordered_map> or <tr1/unordered_map>. ") + MESSAGE("-- Replacing unordered_map/set with map/set (warning: slower!)") + ADD_DEFINITIONS(-DCERES_NO_UNORDERED_MAP) endif() endif() diff --git a/extern/libmv/third_party/ceres/ChangeLog b/extern/libmv/third_party/ceres/ChangeLog index 6bb33068b2a..2bd4206cf47 100644 --- a/extern/libmv/third_party/ceres/ChangeLog +++ b/extern/libmv/third_party/ceres/ChangeLog @@ -1,638 +1,684 @@ -commit 682cd3c27864ba6d67ca81890760a5f697f21d63 -Author: Keir Mierle <mierle@gmail.com> -Date: Tue Sep 3 14:28:32 2013 -0700 - - Update version history with shared libs changes - - Change-Id: Iafd55087bc5eef4c15c3b544222147aa99df7690 - -commit 340d7c1415f144ca335ec1e87832c3f41d5d515b -Author: Keir Mierle <mierle@gmail.com> -Date: Tue Sep 3 13:50:03 2013 -0700 - - Update version history with miniglog fix - - Change-Id: Ic69f4994259e05fa88548b957146a1aac73b7af7 - -commit ac061c0f2334868e671f26d24e34a14c77fac716 -Author: Keir Mierle <mierle@gmail.com> -Date: Tue Sep 3 13:03:28 2013 -0700 +commit 33e01b9c5e1416fe29c55ac0332cdca21c053c83 +Author: Sameer Agarwal <sameeragarwal@google.com> +Date: Wed Nov 27 10:24:03 2013 -0800 - Cleanups in logging.h + Rename LinearSolverTerminationType enums. - Thanks to Scott Ettinger for the patch this is based off of, - which restores the NDK build. + This increases clarity, drops redundant enums and makes things + cleaner all around. - Change-Id: I8036dc1388438a4940e6f4ae297162902afd8d3a + Change-Id: I761f195ddf17ea6bd8e4e55bf5a72863660c4c3b -commit 0338f9a8e69582a550ef6d128e447779536d623c +commit 068437eb89d495d905465544ccd442efef457b04 Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Mon Sep 2 22:28:40 2013 -0700 +Date: Wed Nov 27 07:05:57 2013 -0800 - ITERATIVE_SCHUR works with no f-blocks. - - When the Schur complement is of size zero, - i.e. none of the parameter blocks interact - with each other, the ITERATIVE_SCHUR linear - solver crashes due to some checks that are - triggered in the SCHUR_JACOBI preconditioner. + Pipe minimizer termination messages to Solver::Summary. - This patch adds logic to detect this condition - and to deal with it and adds tests that verify - the fix. + All minimizer termination messages are now available as + Solver::Summary::error. - Thanks to Soohyun Bae for reporting this bug. + This is part of the ongoing refactoring or - Change-Id: If29ddf32463cbb1960414fff0e29bbf0d2ee7989 + Change-Id: I4514c3c042645bbd1471bcde9bd3dbf81d9ee8b0 -commit 263de47419167786c9ab6d93fa2f3e32e8e75fe1 -Author: Taylor Braun-Jones <taylor@braun-jones.org> -Date: Thu Aug 29 10:33:29 2013 -0400 +commit 89a592f410fb6f80c03dea84b6b9f1a10bea36c1 +Author: Sameer Agarwal <sameeragarwal@google.com> +Date: Tue Nov 26 11:35:49 2013 -0800 - Incorporate RHEL build fixes from Brian Pitts + LinearSolver::Summary::status -> LinearSolver::Summary::message. - CMake build fixed so that versioned shared libraries are installed - (along with .so symlinks) + And a bunch of minor lint cleanups as they showed up. - Change-Id: Ibbaea9d37d17754cb8c3cd36fc17d015ca7d2a57 + Change-Id: I430a6b05710923c72daf6a5df4dfcd16fbf44b3a -commit 6b4131993ec0db6c850bb2ae07ba8793dbab3e39 +commit b16e118b96c55451c0d8556f3c5b52ad36b69cac Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Mon Aug 26 00:02:50 2013 -0700 +Date: Mon Nov 25 05:47:43 2013 -0800 - Update spec file + Better error checking and reporting for linear solvers. - Change-Id: Id6426d7cad41cde2cbab411964ac013d724a066c - -commit c24a4ec6fb6202d1f6a576f211b99fbe9c9906ef -Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Fri Aug 23 06:49:22 2013 -0700 - - Cmake refactoring + A lot of error checking cruft has accumulated over the years + in the various linear solvers. This change makes the error reporting + more robust and consistent across the various solvers. - 1. Use CMake FindLAPACK and FindBLAS Modules. - 2. Remove SEARCH_HEADERS and SEARCH_LIBS and replace them with - CMAKE variables. This leads to simplification of the FIND_LIBRARY - and FIND_PATH calls. - 3. Make miniglog a fallback when glog is not present and the - user indicates MINIGLOG=OFF. - 4. Add time.h to miniglog. - 5. Remove shared library building. + Preconditioners are not covered by this change and will be the + subject of a future change. - Change-Id: I8a97156d3d7cf645fbbfe8e571761bc16c89f43f + Change-Id: Ibeb2572a1e67758953dde8d12e3abc6d1df9052d -commit 48e9cd31db0bf7223beb83cdc90e3cd2b5aad054 +commit 5794d41be2d8d6a67dcdfe607e66050f0ac04c55 Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Wed Aug 21 10:55:16 2013 -0700 +Date: Mon Nov 25 13:37:02 2013 -0800 - Add a test name + Remove overzealous checks in Summary::FullReport. + + Thanks to sebi.koch@gmail.com for reporting this. - Change-Id: I06dfc9cad2c54ef6078342766577eab92645283f + Change-Id: I1ba9b375e5cf66639e292ba37b34a90446f13162 -commit 126dfbe27df9c5b9f41cf7cc92b75c1219518283 -Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Tue Aug 20 22:34:34 2013 -0700 +commit 40ef90304ac200bb948549e8e3748e487d27dc53 +Author: Alex Stewart <alexs.mac@gmail.com> +Date: Mon Nov 25 16:36:40 2013 +0000 - Fix how Ceres calls CAMD. + Adding VLOG output to line search. - CAMD requires that the id of the largest numbered elimination - group be less than the number of columns in the matrix. + - Previously line search was sparse in terms of debug orientated VLOG + output which made debugging failure cases difficult. - This patch ensures that this is the case. Without this, - in certain cases its possible for CAMD to silently fail - while doing out of bounds access and then causing Ceres to fail. - - Also add some logging about the problem size before and after - the reduced program has been created. - - Change-Id: I0ea3c6572a7c29cbbf09afec9ba5b4f4d4b21a9b + Change-Id: Idfabf74d2b3f7b8256f79dff8c6b7fcdc2fcf4d3 -commit 69af5d8b4d7c48b2efa3c61e51c86cfa1b380b8a +commit 1284a5141426597f3ca1e29ae8548c9b4c43c9c1 Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Tue Aug 20 13:58:59 2013 -0700 +Date: Sun Nov 24 15:09:43 2013 -0800 - Add comments to trust_region_minimizer.cc. + Use explicit formula to solve quadratic polynomials. + + polynomial.cc implements a companion matrix base method for solving + polynomials. This is both expensive and numerically sensitive. - trust_region_minimizer.cc now contains a comment that explains - the reasoning behind he inner iteration step acceptance change. + This change adds a quadratic equation solver. Instead of using the + usual quadratic formula, it uses the formula suggested by BKP Horn + for improved numerical stability. - Change-Id: I4eaa69d6bab92c543bba3f119c09f44625d393bd + Change-Id: I476933ce010d81db992f1c580d2fb23a4457eb3e -commit e45db9d05aaa26b1ddffa44c9190a1018aa2655f +commit a9334d67d7973c0f56e65f12ae897dd53504ef0d Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Mon Aug 19 23:13:29 2013 -0700 +Date: Wed Nov 20 10:12:23 2013 -0800 - Improve inner iteration step acceptance. - - Normally, in a trust region algorithm the quality of a trust region step - is measured by the ratio - - nonlinear_cost_change - r = --------------------- - model_cost_change - - All the change in the nonlinear objective is due to the trust region step - so this ratio is a good measure of the quality of the trust region radius. - - However, when inner iterations are being used, nonlinear_cost_change - includes the contribution of the inner iterations and its not fair to - credit it all to the trust region algorithm. So we change the ratio to be - - nonlinear_cost_change - r = ------------------------------------------------ - (model_cost_change + inner_iteration_cost_change) - - In most cases this is fine, but it can be the case that the - change in solution quality due to inner iterations is so large - and the trust region step is so bad, that this ratio can become - quite small. + Fix constant parameter handling in inner iterations. - This can cause the trust region loop to reject this step. + There was a bug in the way RemoveFixedBlocksFromProgram was working. + It only removed the constant parameter blocks from the + linear_solver_ordering, it was not even aware of the + inner_iteration_ordering. - This change, fixes this problem by looking at the inner_iteration_cost_change - explicitly and accepting a step if the inner iterations led to a net - decrease in the objective function value. + This change fixes this bug. The code for RemoveFixedBlocksFromProgram + is also cleaned up and made more readable and the test have been updated. - Along the way it also fixes the way model_cost_change is computed. - Changing to a more numerically robust way of computing it. + Thanks to Mikael Persson for reporting this. - The last and final change is to ensure that inner iterations and the - non-monotonic version of the trust region algorithm interact correctly. - - This addresses part 2 of - - https://code.google.com/p/ceres-solver/issues/detail?id=115 - - As an illustration of the change. - - Before this change + Change-Id: I454fa89f9b6f4f6320b02d5235e6f322cc15ff51 + +commit 331ff090dcae7096cea50144047b71cab2d3e819 +Author: Alex Stewart <alexs.mac@gmail.com> +Date: Mon Nov 25 13:44:53 2013 +0000 + + Downgrading log status of BFGS secant condition messages. - [master] build: ./bin/bundle_adjuster --input ~/Downloads/problem-245-198739-pre.txt -num_iterations 10 -translation_sigma 0.01 -rotation_sigma 0.001 -point_sigma 0.1 -inner_iterations -num_threads 4 - 0: f: 7.731660e+15 d: 0.00e+00 g: 3.51e+12 h: 0.00e+00 rho: 0.00e+00 mu: 1.00e+04 li: 0 it: 5.87e-01 tt: 9.37e+00 - 1: f: 7.731660e+15 d: 7.73e+15 g: 0.00e+00 h: 1.20e+10 rho: 2.43e-11 mu: 5.00e+03 li: 1 it: 1.41e+01 tt: 2.35e+01 - 2: f: 7.731660e+15 d: 7.73e+15 g: 0.00e+00 h: 1.25e+10 rho: 1.70e-07 mu: 1.25e+03 li: 1 it: 1.86e+01 tt: 4.22e+01 - 3: f: 7.731660e+15 d:-2.39e+40 g: 0.00e+00 h: 3.53e+10 rho:-2.63e-13 mu: 1.56e+02 li: 1 it: 3.35e+01 tt: 7.57e+01 - 4: f: 7.731660e+15 d:-1.66e+39 g: 0.00e+00 h: 1.21e+11 rho:-6.58e-15 mu: 9.77e+00 li: 1 it: 3.86e+01 tt: 1.14e+02 - 5: f: 7.731660e+15 d:-3.57e+55 g: 0.00e+00 h: 5.00e+12 rho:-1.89e-14 mu: 3.05e-01 li: 1 it: 3.84e+01 tt: 1.53e+02 - 6: f: 7.731660e+15 d:-2.26e+35 g: 0.00e+00 h: 3.82e+12 rho:-1.77e-20 mu: 4.77e-03 li: 1 it: 3.45e+01 tt: 1.87e+02 - 7: f: 7.731660e+15 d:-5.31e+19 g: 0.00e+00 h: 1.22e+11 rho:-9.96e-21 mu: 3.73e-05 li: 1 it: 2.77e+01 tt: 2.15e+02 - 8: f: 1.784990e+08 d: 7.73e+15 g: 4.13e+07 h: 1.20e+10 rho: 1.00e+00 mu: 1.12e-04 li: 1 it: 1.13e+01 tt: 2.26e+02 - 9: f: 1.524025e+08 d: 2.61e+07 g: 5.81e+10 h: 2.41e+08 rho: 1.00e+00 mu: 3.35e-04 li: 1 it: 1.13e+01 tt: 2.37e+02 - 10: f: 1.488524e+08 d: 3.55e+06 g: 2.79e+09 h: 5.01e+08 rho: 1.00e+00 mu: 1.01e-03 li: 1 it: 1.09e+01 tt: 2.48e+02 + - These messages were originally VLOG(2) and were mistakenly upgraded to + WARNINGs when the tolerances were reduced. - After this change + Change-Id: I89dee666a09bc82cfa89b793dc0907268662f95e + +commit 9697a08a2bf29531671526b49df73bfbc0d7d237 +Author: Alex Stewart <alexs.mac@gmail.com> +Date: Sat Nov 23 10:03:37 2013 +0000 + + Defining CERES_FOUND in addition to Ceres_FOUND in CeresConfig. - [inner] build: ./bin/bundle_adjuster --input ~/Downloads/problem-245-198739-pre.txt -num_iterations 10 -translation_sigma 0.01 -rotation_sigma 0.001 -point_sigma 0.1 -inner_iterations -num_threads 4 - 0: f: 7.731660e+15 d: 0.00e+00 g: 3.51e+12 h: 0.00e+00 rho: 0.00e+00 mu: 1.00e+04 li: 0 it: 5.66e-01 tt: 9.31e+00 - 1: f: 5.941477e+09 d: 7.73e+15 g: 1.20e+18 h: 1.20e+10 rho: 2.43e-11 mu: 5.00e+03 li: 1 it: 1.38e+01 tt: 2.32e+01 - 2: f: 3.341986e+08 d: 5.61e+09 g: 1.42e+14 h: 1.37e+09 rho: 9.38e-08 mu: 2.50e+03 li: 1 it: 1.30e+01 tt: 3.61e+01 - 3: f: 3.241492e+08 d: 1.00e+07 g: 3.64e+13 h: 8.26e+08 rho: 6.12e-08 mu: 1.25e+03 li: 1 it: 1.15e+01 tt: 4.77e+01 - 4: f: 3.152280e+08 d: 8.92e+06 g: 2.02e+13 h: 2.95e+08 rho: 1.56e-05 mu: 6.25e+02 li: 1 it: 1.11e+01 tt: 5.88e+01 - 5: f: 3.078535e+08 d: 7.37e+06 g: 9.72e+12 h: 4.57e+08 rho: 6.55e-09 mu: 3.13e+02 li: 1 it: 1.16e+01 tt: 7.04e+01 - 6: f: 3.025353e+08 d: 5.32e+06 g: 1.33e+13 h: 2.14e+08 rho: 7.21e-01 mu: 3.42e+02 li: 1 it: 1.14e+01 tt: 8.18e+01 - 7: f: 2.908298e+08 d: 1.17e+07 g: 5.97e+12 h: 7.25e+08 rho: 5.73e-01 mu: 3.43e+02 li: 1 it: 1.08e+01 tt: 9.26e+01 - 8: f: 2.803927e+08 d: 1.04e+07 g: 1.07e+12 h: 9.72e+07 rho: 5.27e-01 mu: 3.43e+02 li: 1 it: 1.03e+01 tt: 1.03e+02 - 9: f: 2.767074e+08 d: 3.69e+06 g: 2.10e+11 h: 7.35e+07 rho: 7.37e-01 mu: 3.84e+02 li: 1 it: 1.03e+01 tt: 1.13e+02 - 10: f: 2.744282e+08 d: 2.28e+06 g: 2.17e+11 h: 1.23e+08 rho: 3.11e-01 mu: 3.64e+02 li: 1 it: 9.61e+00 tt: 1.23e+02 + - Previously we relied on FindPackage() to define Ceres_FOUND when + find_package(Ceres) was called. + - This is fine, but users might legitimately expect the variable to be + CERES_FOUND given the form of CERES_INCLUDE_DIRS/LIBRARIES. + - As there is an inconsistency in the CMake recommended names when + FindPackage() is called in Module vs Config form, we now explicltly + define both. - Change-Id: I7c3b132f7ce62719795bfa489ec2276d0455cc97 + Change-Id: I54bce9aa112b684d26b60a9ae4d11eb7925a6ee5 -commit 3e6ef29be6f3cd672a73cefb52838832a49e5427 +commit 66e15b41d80b155f333f099a0278d50312cdaa15 Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Tue Aug 20 09:53:54 2013 -0700 +Date: Fri Nov 22 07:59:23 2013 -0800 - Update version history to reflect API changes + Lint cleanup from Jim Roseborough. - Change-Id: I5ce744d72b991abba17b5cf9c6a1e1f158693151 + Change-Id: I6ddbf5c3d66595d27f7967a309768e5f5dd7e1fd -commit 1918453aeeae629be1f02eb333e91c4f728ace12 +commit 79bde35f29291cf464b59f3dc2dd9f1fa88776a9 Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Mon Aug 19 14:15:48 2013 -0700 +Date: Thu Nov 21 21:33:51 2013 -0800 - Fix build breakage on old SuiteSparse. - - Errant semi colon is to blame. + SuiteSparse errors do not cause a fatal crash. - Thanks to Timothy Langlois for reporting this. + 1. Move LinearSolverTerminationType to ceres::internal. + 2. Add FATAL_ERROR as a new enum to LinearSolverTerminationType. + 3. Pipe SuiteSparse errors via a LinearSolverTerminationType so + to distinguish between fatal and non-fatal errors. + 4. Update levenberg marquardt and dogleg strategies to deal + with FATAL_ERROR. + 5. Update trust_region_minimizer to terminate when FATAL_ERROR + is encountered. + 6. Remove SuiteSparse::SolveCholesky as it screws up the error + handling. + 7. Fix all clients calling SuiteSparse to handle the result of + SuiteSparse::Cholesky correctly. + 8. Remove fatal failures in SuiteSparse when symbolic factorization + fails. + 9. Fix all clients of SuiteSparse to deal with null symbolic factors. - Change-Id: I57bb1cd69d78ab1897ead3627539a0da11b97455 - -commit 8f33332c598d8209df73eb1c729e0abe2c890468 -Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Sun Aug 18 23:25:00 2013 -0700 - - Documentation update for 1.7.0rc2 + This is a temporary fix to deal with some production problems. A more + extensive cleanup and testing regime will be put in place in a + subsequent CL. - Change-Id: I6b0c19bed57b51a0f6591c60a4ae0d849c62451b + Change-Id: I1f60d539799dd95db7ecc340911e261fa4824f92 -commit ad2819a1afa94990022999a96eb158add68419e0 +commit a674e0f8534ea6948f70a72fe9718e07b3d039ff Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Sat Aug 17 23:44:09 2013 -0700 +Date: Thu Nov 21 22:12:15 2013 -0800 - Fix breakage on old versions of SuiteSparse. + Fix corrector_test.cc. - Thanks to Fisher Yu for reporting this. + Fix two death tests dealing with the sign of the gradient. - Change-Id: Iefa89816cbb60e3512338a7c2a65655c017877ac + Change-Id: Ic91d54a64cc509307c94fce6d1fca083078936e2 -commit 880cba0939b2caa2641a5752373ffd47b64edd0f -Author: Petter Strandmark <petter.strandmark@gmail.com> -Date: Fri Aug 16 20:05:30 2013 +0200 +commit a8006af3110e98d64fb369e958fc00ec88d771a3 +Author: Alex Stewart <alexs.mac@gmail.com> +Date: Wed Nov 20 19:56:06 2013 +0000 - Fix warning C4373 in Visual Studio + Adding threads libraries to exported dependencies if using OpenMP. - The warning occurs because an overridden function added a const - to one argument. + - Previously we were only adding the flags to the link flags for the + Ceres project, which resulted in them not being exported. Thus + projects importing Ceres (if using OpenMP) would have to manually + specify them in addition to CERES_LIBRARIES. - Change-Id: Idd24f7c6ab60064747104bfc75ae9bf112f61b3e + Change-Id: If0354cc07e84dbebfc870a8862e1a8ca64659791 -commit d61b68aaac3fa51b8fca8b1a268e83b0d5da01ea +commit 6c0d96424e2c27326757936a3738f9efc37c6c24 Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Fri Aug 16 17:02:56 2013 -0700 +Date: Wed Nov 20 11:52:01 2013 -0800 - Lint cleanups from William Rucklidge + Minor documentation fix. - Change-Id: Ia4756ef97e65837d55838ee0b30806a234565bfd + Thanks to Satya Mallick. + + Change-Id: I556f1c141bf16739d54450351b0f29fd4ea40014 -commit b22d063075ec545a59a25abd5d83e4642dc329c2 +commit 7747bb0e6b0e54366933ed75c1bcafe6a1109c3d Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Thu Aug 15 22:55:23 2013 -0700 +Date: Wed Nov 20 11:29:22 2013 -0800 - Reduce memory usage in covariance estimation. - - When using the SPARSE_QR algorithm, now a Q-less - factorization is used. This results in significantly - less memory usage. + Minor corrections to the documentation. - The inversion of the semi-normal equations is now - threaded using openmp. Indeed if one has SuiteSparse - compiled with TBB, then both the factorization - and the inversion are completely threaded. + Thanks to Satya Mallick for reporting these. - Change-Id: Ia07591e48e7958d427ef91ff9e67662f6e982c21 + Change-Id: Ia52e08a7e21d5247dc475cfbf10bf57265aa118f -commit f258e4624f5bd86105ea28b9b92dd70a3f4a3a44 -Author: Sergey Sharybin <sergey.vfx@gmail.com> -Date: Thu Aug 15 14:50:08 2013 +0600 +commit 3fca2c4b2fae9abcaa9611f2bd3885ce6b11963b +Author: Alex Stewart <alexs.mac@gmail.com> +Date: Mon Nov 18 10:26:49 2013 +0000 - Move most of suitesparse/cxsparse ifdef code to their headers - - Main purpose of this is to make implementation files free from - endless ifdef blocks every time this libraries are needed to be - included. This would hopefully prevent compilation errors in - the future caused by missing ifdef around header include. + Decreasing update threshold for BFGS as per L-BFGS. - This also includes some stubs added to suitesparse/cxsparse - headers to make code even more free from ifdefs. + - Improves performance of BFGS on NIST, as per L-BFGS. + - Adding explanation of origin and purpose of Secant condition + tolerance check for Hessian update in (L)BFGS. - Change-Id: Ic8554e7df31d8c4751583fe004b99e71b3c9087b + Change-Id: If57b9957d31d8629c772c19a069e1e56e727b350 -commit dc60d9c4519b5eb5e2cff8741680fecf4d6eb2c5 +commit 54fcbf893852272ba2158d6a56572a2eb3ccc41f Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Thu Aug 15 10:13:45 2013 -0700 +Date: Tue Nov 19 10:12:05 2013 -0800 - Fix broken android build. + Relax the requirements on loss functiond derivatives. + + We now require that the first derivative of the loss function + be positive only if the second derivative is non-zero. This is + because when the second derivative is non-positive, we do not use + the second order correction suggested by BANS and instead use + a simpler first order strategy which does not use a division by + the gradient of the loss function. - Change-Id: I6f27e3ef9bd678f7393c9f573491064978e9c368 + Change-Id: I3d65713f152611998e196ff389a7081acfdfd8c1 -commit 367b65e17a541a9f29b9ea63682fe6f6b5b54074 +commit db98425b94c9eff9b125bf4a854545162e8c1aec Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Fri Aug 9 10:35:37 2013 -0700 +Date: Fri Nov 15 14:14:09 2013 -0800 - Multiple dense linear algebra backends. + Small bugfix to logging.h from Scott Ettinger. - 1. When a LAPACK implementation is present, then - DENSE_QR, DENSE_NORMAL_CHOLESKY and DENSE_SCHUR - can use it for doing dense linear algebra operations. - - 2. The user can switch dense linear algebra libraries - by setting Solver::Options::dense_linear_algebra_library_type. - - 3. Solver::Options::sparse_linear_algebra_library is now - Solver::Options::sparse_linear_algebra_library_type to be consistent - with all the other enums in Solver::Options. - - 4. Updated documentation as well as Solver::Summary::FullReport - to reflect these changes. - - Change-Id: I5ab930bc15e90906b648bc399b551e6bd5d6498f + Change-Id: Ie6d51e7883adf36c6fc7a78ff95afab6a78e488b -commit 080d1d04bdf722c3f602833c4c07ac1c5d26fcc0 -Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Mon Aug 12 16:28:37 2013 -0700 +commit 4d0e626b55f36ab8f44a4acc8157b85cfecd4673 +Author: Alex Stewart <alexs.mac@gmail.com> +Date: Fri Nov 15 13:53:44 2013 +0000 - Use more performant, less conservative Eigen solvers. + Fixing gflags HINTS variable names (adding missing “_DIR”). - colPivHouseholderQR -> householderQR - ldlt -> llt. + - The HINTS variables for gflags were incorrectly used as + GFLAGS_[INCLUDE/LIBRARY]_HINTS when they should have been + GFLAGS_[INCLUDE/LIBRARY]_DIR_HINTS as per the docs. + - Also removing a completed TODO in the main CMakeLists. + - Updating method of extracting current directory in CeresConfig.cmake + to avoid use of CMAKE_CURRENT_LIST_DIR, which was not present in + CMake =< v2.8.3. - The resulting performance differences are significant enough - to justify switching. + Change-Id: I42ae696e3b785febe48688d912f0f343e8947cb0 + +commit bf4c1b76e4926c738fc805e9ff4be0ed584d9eee +Author: Alex Stewart <alexs.mac@gmail.com> +Date: Thu Nov 14 21:27:20 2013 +0000 + + Decreasing threshold at which L-BFGS Hessian is updated. - LAPACK's dgels routine used for solving linear least squares - problems does not use pivoting either. + - Decreasing threshold at which L-BFGS Hessian is updated from 1e-10 + to 1e-14 results in a very significant improvement in NIST scores + (43 -> 53 for CUBIC). + - Adding comment in FindPolynomialRoots() explaining why behaviour + is correct. - Similarly, we are not actually using the fact that the matrix - being factorized can be indefinite when using LDLT factorization, so - its not clear that the performance hit is worth it. + Change-Id: If668e087e7a86d29659aa74e8528b192b604c841 + +commit 7124c3474cd201134c3a3350b46aca468f1edafa +Author: Alex Stewart <alexs.mac@gmail.com> +Date: Thu Nov 7 16:10:02 2013 +0000 + + Fixes for some line search bugs & corner cases. + + - Increase precision of numeric values output in error messages to + allow for easier debugging. + - Ensure termination after Wolfe search bracketing phase if bracket + width has been shrunk to below tolerance. + - Cleaned up return value for BracketingPhase(), now false iff + optimisation should stop, true otherwise. + - Fix bug whereby we would mark a step size as satisfying the Wolfe + conditions when it did not due to numerical issues in the cost + function. + - Adding explanation of a subtlety in which a zoom could still be + acceptably invoked with bracket_low.f > bracket_high.f. + - Replacing hard check of a pre-condition of ZoomPhase() with a + conditional return if not satisfied to address issue whereby a + bracket could be incorrectly identified due to inconsistent values + & gradients returned from the cost function. + - Adding missing check for step size validity in line search minimizer. + - Adding ToDebugString() for FunctionSample. + + Change-Id: Iad98e635749877f80c079ebad126bf022d82232d + +commit 54fc9423673886ac9ed3fe329a80f07544aeea70 +Author: Alex Stewart <alexs.mac@gmail.com> +Date: Thu Nov 14 11:42:00 2013 +0000 + + Removing incorrect specialisation of install dirs on Windows. - These two changes result in Eigen being able to use blocking - algorithms, which for Cholesky factorization, brings the performance - closer to hardware optimized LAPACK. Similarly for dense QR - factorization, on intel there is a 2x speedup. + - Previously on Windows the leaf include & lib install directories + passed to CeresConfig.cmake.in when configured where capitalised on + Windows. + - This capitalisation was incorrect, as the actual paths used are + specified in the install() statements and are always in the standard + lower-case form. + - This likely did not cause any issues previously as although NTFS is + case sensitive, the Win32 API is not, and most applications access + files through the Win32 API, and are thus not case-sensitive. - Change-Id: I4459ee0fc8eb87d58e2b299dfaa9e656d539dc5e + Change-Id: I335b6e2d10a1c64f320c2a1a68eeda1b22344e73 -commit fb465a03b83fad2dceaea091ee3763c3dc6e83d2 -Author: Sergey Sharybin <sergey.vfx@gmail.com> -Date: Mon Aug 5 22:35:14 2013 -0700 +commit fcbbb11e37386097b1427dc3aa89f264d6951ded +Author: Alex Stewart <alexs.mac@gmail.com> +Date: Wed Nov 13 22:22:30 2013 +0000 - Fix compilation error caused by missing suitesparse headers - - Covariance implementation file used to unconditionally include - SuiteSparseQR.hpp which caused compilation error in cases you - don't have SuiteSuite installed to the system + Ensure build paths for dependencies are searched in FindPackage(Ceres) - Moved the include to #ifdef block. + - Append to hint locations used by FindPackage scripts for public + dependencies (glog & Eigen) the locations of the dependencies when + Ceres was built. + - This means that the user should not have to supply them again when + using find_package(Ceres) even if they are installed in a + non-standard location. - Change-Id: I3a52c0f81711b2b70ae625fe80b758ecb0817cc6 + Change-Id: I9550de91025ba47f01f1ea3c3fefe80fe38d14ff -commit 2460bf0733b4070e52d68a4a85046c1b20913e2c -Author: Steven Lovegrove <stevenlovegrove@gmail.com> -Date: Sun Jul 21 13:13:11 2013 -0400 +commit 7899e45d378f589a67ad8e042bf6a7cb7e15df00 +Author: Alex Stewart <alexs.mac@gmail.com> +Date: Wed Nov 13 21:08:27 2013 +0000 - Check GCC Version before adding -fast compiler option on OSX. - - -fast compiler option is only supported using Apple's GCC packaged with XCode. - Other GCC versions will fail when this flag is enabled. This commit checks the - GCC version on OSX and only enables this flag when < 4.3. Apple's GCC is - currently 4.2.1 and a user is unlikely to install a non-apple version this old - on OSX. + Fixing a documentation typo, DIRS -> DIR in HINTS variables. - Change-Id: Ifca9149625c065cd16559d7e30c218a322cf79aa + Change-Id: I42b75a5e0b8a451c3a43ab29d0c14856e4b86ab8 -commit c5bcfc01af37b4f667be075c3c58dc024f3c7f06 +commit 1a041c35b780e60c3b497eb096b72ad20f47960e Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Fri Jul 19 15:50:27 2013 -0700 +Date: Tue Nov 12 14:17:52 2013 -0800 - Lint fixes from Jim Roseborough. + Update to 1.8.0. - Change-Id: If93e1972041b36410225a509e3c8c7c818f92124 + Change-Id: Id42e594f03e3575d06e18c1ef66df64f43d86839 -commit 16924168ce0b3e29d9b1e16a08d2b3d2930e017a +commit 36b26139296060511718b3ef0da03a52706db481 +Author: Alex Stewart <alexs.mac@gmail.com> +Date: Thu Nov 7 16:57:36 2013 +0000 + + Fix ordering of ParseCommandLineFlags() & InitGoogleTest() for Windows. + + - On Windows gtest passes additional non-gflags command line flags + for death-tests, to avoid gflags invoking an error for these flags + InitGoogleTest() must be called before ParseCommandLineFlags() to + handle and remove them before gflags parses the remaining flags. + + Change-Id: I0c705ecd3aa029b70a2589b592e6a2c192745c0e + +commit 8c155d51fab099ee7bf64f4bdbfeda82881925a5 Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Thu Jul 18 12:52:35 2013 -0700 +Date: Fri Nov 8 08:04:44 2013 -0800 - Update version from 1.6.0 -> 1.7.0rc1. + Speed up the application of robust loss functions. + + Since we added special handling for the case for rho[2] < 0, + the bulk of CorrectJacobian is pointless in the common case. + So add a simple one dimensional loop which rescales the Jacobian. + This speeds up this method immensely. + + The robustification of a Jacobian gets speeded up by > 50%. - Change-Id: I420a8907142bffad0e3aa6c7196541ca2309c099 + Change-Id: I97c4e897ccbb5521c053e1fb931c5d0d32f542c7 -commit 588228bdadcc0a1ffc55442a0672998241e53e09 +commit 58792dc8ee0e4b56331f33f753f1b1932c5c2960 Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Thu Jul 18 11:29:19 2013 -0700 +Date: Wed Nov 6 09:42:46 2013 -0800 - Add the ability to turn shared library compilation on and off + Update to 1.8.0rc2. - Change-Id: Ib9eacfbc894bb2b66aafff3b930c63e2ad8a555e + Change-Id: Ifbf5312377bf1791a29aefd3edc3a765999c5824 -commit 6d93450cb563dc992cbc29ca069c886bf24bb458 +commit af04d7f18740faf452e9171af530aa1bdead44bb Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Thu Jul 18 11:08:07 2013 -0700 +Date: Tue Nov 5 13:47:30 2013 -0800 - Fix build breakage on old versions of SuiteSparse. + Remove DCHECK_GE checks from fixed_array.h - SuiteSparse_long is only defined in recent versions of SuiteSparse - as the index variable type for large matrices. In older versions - UF_long was used. Ubuntu still ships with an older version of - SuiteSparse, so an ifdef is needed to fix the build. + This triggers -Wtype-limits warnings on comparisons + which are always true, since the test being done is + n >= 0, where n is of type size_t, which is always + true. - This patch has been tested on mac and on linux with older and - newer versions of SuiteSparse. + This causes problems when compiling Ceres on linux + with miniglog. - Change-Id: I4ada86d7973784a79bde4afec13ce3ca4e8dc225 + Change-Id: Ia1d1d1483e03469c71fde029b62ca6d84e9b27e0 -commit 42be9cafe6203745fb09d611773305433c117396 -Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Thu Jul 18 08:02:08 2013 -0700 +commit b5be6b9c065a02158337ee7eacfdb8be811dec7f +Author: Alex Stewart <alexs.mac@gmail.com> +Date: Tue Nov 5 13:10:27 2013 +0000 - Update documentation for Covariance + Cleaning up messages output when SuiteSparse is not found. + + - Automatically generated failure message now provides more + information as to which sub-modules are missing. - Change-Id: Ia4a7347ef8267b7107698d85fcbfc986111958dc + Change-Id: I6eed94af49263540b8f87917b75c41b8f49658a0 -commit 5a974716e111e4aa87a4840902b957060bd644fc +commit 9ba0b352a282f08b1b6368a5690434407d7c81af Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Fri Jun 7 22:38:30 2013 -0700 +Date: Tue Nov 5 13:04:56 2013 -0800 - Covariance estimation using SuiteSparseQR. + Lint and other cleanups from William Rucklidge - Change-Id: I70d1686e3288fdde5f9723e832e15ffb857d6d85 + Change-Id: I7fb23c2db85f0f121204560b79f1966f3d584431 -commit 719889b8b7a3ef6712516d169a4ce3a33d272fda -Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Wed Jul 17 11:31:08 2013 -0700 +commit 69bd65ff4368ce2841519f00ff48c5284c1743a3 +Author: Alex Stewart <alexs.mac@gmail.com> +Date: Mon Nov 4 23:01:14 2013 +0000 - Minor fixes + Downgrading warning messages when optional deps are not found. - 1. Typo in c_api.h - 2. The stream operator for FunctionSample is now in the ceres::internal namespace. + - Now when find_package() is called for a dependency without the + REQUIRED or QUIET qualifiers, we emit no priority (above STATUS, but + below WARNING) messages and continue. - Change-Id: Id927a7a49c47d8903505535749ecca78cd2e83b3 + Change-Id: I8cdeda7a8f6c91d45fb7f24fb366244c6c9b66e1 -commit 12cc164f79bb8a31e0eb3946e6f4898ac3c21c55 +commit b0a8731fcdde31e6c37a54e8c1e1c00f853c0d5c Author: Alex Stewart <alexs.mac@gmail.com> -Date: Wed Jul 17 12:08:33 2013 +0100 +Date: Mon Nov 4 20:32:40 2013 +0000 - Minor fix to reject a line search config with negative L-BFGS rank. + Removing duplicate SuiteSparse found message. + + - Also flipping ordering of variables in + find_package_handle_standard_args() so that the automatically + generated message prints the include directories, not TRUE. - Change-Id: Iad4c678efe574ef6696c34bd2a0ce61a504c7344 + Change-Id: I2bf62eacd5c96f27152e9542b9a74651243a584e -commit 9aa0e3cf7243a2e837bbfa22d4677010463f6a4e +commit 6fed9fe0de9d1737095c24e19ad8df9735b7e572 Author: Alex Stewart <alexs.mac@gmail.com> -Date: Fri Jul 5 20:22:37 2013 +0100 +Date: Mon Nov 4 18:33:05 2013 +0000 - Adding Wolfe line search algorithm and full BFGS search direction options. + Fix FindPackage scripts to emit warnings, not errors if not found. - Change-Id: I9d3fb117805bdfa5bc33613368f45ae8f10e0d79 + - Previously we used message priority: SEND_ERROR when a package was + not found and find_package() was called without QUIET or REQUIRED, + which emits an error message, and prevents generation, but continues + configuration. + - The fact SEND_ERROR induces an error message was confusing for users + as it implies that something bad has happened and they cannot + continue, when in fact we were disabling the option in question + and were thus able to continue, all they had to do was re-configure. + + - This commit also reorders the search lists for includes/libraries + so that we always search user installed locations (e.g. /usr/local) + before system installed locations. Thus we will now always prefer + a user install to a system install if both are available, which is + likely to be the users desired intention. + + Change-Id: Ide84919f27d3373f31282f70c685720cd77a6723 -commit 51c772c843ccecca006c706a9f64b8cbaf5416f9 -Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Tue Jul 16 16:42:52 2013 -0700 +commit cada337149cbc4b9e6f2bae14593b87ecf8f1a5c +Author: Alex Stewart <alexs.mac@gmail.com> +Date: Mon Nov 4 18:08:24 2013 +0000 - householderQR -> colPivHouseholderQR. + Fixing CXSparse include directories statement. + + - Reported as issue #135: + https://code.google.com/p/ceres-solver/issues/detail?id=135. + - CXSPARSE_INCLUDE was the legacy include directory variable, since + the buildsystem updates we now use the CMake standard: + CXSPARSE_INCLUDE_DIRS. - Change-Id: Ida623e853711f665e7a9d3b140a93e861591f96d + Change-Id: Iab0c2de14d524bb9e9da230bc574b5e6f09e1f31 -commit c2c6411d16db95cde0cc3a7a80bac87266234bb7 +commit c71085ed326239dc2d318d848ded9a99e4e3c107 Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Sat Jul 13 18:47:49 2013 -0700 +Date: Thu Oct 31 13:56:38 2013 -0700 - DENSE_QR solver now uses non pivoting QR decomposition. + Update to 1.8.0rc1. - Change-Id: I9099221448ccf71d0de20b9f652405009a6c24c5 + Change-Id: Iaa10fd5a20be2ef84aca0119306c44669d87cc5d -commit 3c2ad4018c8d2271434b9ff2bd05437b96f4927c -Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Mon Jul 15 08:09:38 2013 -0700 +commit 88a703f44ff0d6d5d4601584fa77f5ce853025f4 +Author: Petter Strandmark <petter.strandmark@gmail.com> +Date: Thu Oct 31 21:13:48 2013 +0100 - Speed up Automatic differentiation by 7%. + Fix compilation in Visual C++ 2013. - 1. Templatize Make1stOrderPerturbation. - 2. Convert a hard CHECK into DCHECK. + I had to fix the following things to make Ceres compile in 2013: + * Not link to 'm' (GNU math library). + * Excplicitly convert an std::ostream to bool. + * Include <algorithm> for std::max. - Change-Id: I02cd67f2b87bc5722f1a090057d55f23e98d2c3b + Change-Id: I3ff65413baf8711364360d46dd71fd553fa63e72 -commit 0a07fbf8731adcdce98c8e73127d379199341132 +commit f06b9face5bfbbc2b338aa2460bee2298a3865c5 Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Wed Jul 10 11:57:35 2013 -0700 +Date: Sun Oct 27 21:38:13 2013 -0700 - Use ATLAS as the example BLAS in building.rst + Add support for multiple visibility clustering algorithms. + + The original visibility based preconditioning paper and + implementation only used the canonical views algorithm. + + This algorithm for large dense graphs can be particularly + expensive. As its worst case complexity is cubic in size + of the graph. + + Further, for many uses the SCHUR_JACOBI preconditioner + was both effective enough while being cheap. It however + suffers from a fatal flaw. If the camera parameter blocks + are split between two or more parameter blocks, e.g, + extrinsics and intrinsics. The preconditioner because + it is block diagonal will not capture the interactions + between them. - OpenBLAS has subtle issues releated to threading. It - conflicts with the use of threads in the other parts of - the application. + Using CLUSTER_JACOBI or CLUSTER_TRIDIAGONAL will fix + this problem but as mentioned above this can be quite + expensive depending on the problem. - Careful users can still use it by disabling threads via - an environment variable, but by default we want to use - a BLAS/LAPACK that does not suffer from these problems. + This change extends the visibility based preconditioner + to allow for multiple clustering algorithms. And adds + a simple thresholded single linkage clustering algorithm + which allows you to construct versions of CLUSTER_JACOBI + and CLUSTER_TRIDIAGONAL preconditioners that are cheap + to construct and are more effective than SCHUR_JACOBI. - Change-Id: I8c1c0ed0b526453564c5f9ea69b646fac32fe027 + Currently the constants controlling the threshold above + which edges are considered in the single linkage algorithm + are not exposed. This would be done in a future change. + + Change-Id: I7ddc36790943f24b19c7f08b10694ae9a822f5c9 -commit aee5597acf9c2c064977e937f52689254ebd1a39 +commit 5a161a2b9653489ee9040f054b24df971e6b9bbc Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Tue Jul 9 23:30:07 2013 -0700 +Date: Tue Oct 29 22:08:15 2013 -0700 - Minor fix to curve_fitting.c + Template specializations for PartitionedMatrixView. + + This speeds up the matrix vector products in the + IterativeSchurSolver by upto 40%. - Change-Id: Ib3669a5c4c73178b088dc1e80141f844f807b179 + Change-Id: Ib5e8d77c7269cf5ffdd2d161893734bb6d38215d -commit bd82f82c3afeb3c57fa03f61fdbb0388f9ed8b02 +commit e5ce1170bc9993085c81a788e16eb48f1b2fdb97 Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Tue Jul 9 23:19:09 2013 -0700 +Date: Tue Oct 29 07:40:51 2013 -0700 - More CMake file cleanup. + Minor bug fix to autodiff.h - Reduce the verbosity of the Cmake file. All the "Checking for" - messages have been removed since we log both success and failures. + Change-Id: Ib41050a2f2ba1898c71ff19d74f8eca2496212c0 + +commit 9e9a7d6ca0e75727293f94452d602f02b56d10ba +Author: Sameer Agarwal <sameeragarwal@google.com> +Date: Tue Oct 29 06:54:44 2013 -0700 + + Documentation update. - Further, UFConfig is only searched for if SuiteSparse_config cannot - be found. + Add documentation for the new methods added to Problem. + Fix a bunch of ReST bugs. - Change-Id: I601a6ffc808e566ff78ce232c86519ef413f0b33 + Change-Id: I8a79a84040cfa8a679cc5355baccbe6d69bc9e70 -commit 9f4552b6475616df7e60681e60cd5afebb45a4ea +commit c6bafdd02c33ec0ccb705578d83e4f601ddeedea Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Tue Jul 9 00:10:08 2013 -0700 +Date: Mon Oct 28 19:38:08 2013 -0700 - Stop CMake from trying to detect OpenMP when using Clang. + Comments from Jim Roseborough. + + 1. Fix the tolerance on the rotation matrix conversion test. + 2. Fix some out of date comments. - Change-Id: Ie14c6466475b401ba35dbf13adc2e8701999c969 + Change-Id: I65e80da1f96d7b4d9ac0630ad8cb708c41739840 -commit 6e8bd501b25dc308df7b1a5eed16edfd8442002e +commit fda69b52130955479591e8f03f97b1cfceca369f Author: Keir Mierle <mierle@gmail.com> -Date: Thu May 23 01:49:08 2013 -0700 +Date: Thu Oct 10 00:25:24 2013 -0700 - Extend the C API to support loss functions + Export the structure of a problem to the public API + + This adds three new public methods to ceres::Problem: - This extends the C API to support loss functions. Both - user-supplied cost functions as well as the stock Ceres cost - functions (Cauchy, Huber, etc) are supported. In addition, this - adds a simple unit test for the C API. + Problem::GetResidualBlocks() + Problem::GetParameterBlocksForResidualBlock() + Problem::GetResidualBlocksForParameterBlock() - Supporting loss functions required changing the signature of the - ceres_add_residual_block() function to also take a thunk for the - loss function. + These permit access to the underlying graph structure of the problem. - Change-Id: Iefa58cf709adbb8f24588e5eb6aed9aef46b6d73 + Change-Id: I55a4c7f0e5f325f140cb4830e7a7070554594650 -commit 1ab7fde626c3d3ac02664183f21fedd397785bea +commit 63bcdffa7d188b8d8c5309a62c255ba33f061764 Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Mon Jul 8 10:03:49 2013 -0700 +Date: Sun Oct 27 21:34:13 2013 -0700 - Update gmock and gtest to the latest svn versions. + Add the 2_d_d SchurEliminator specialization. - This fixes a variety of mac/clang/c++11 issues. + This occurs far too often in bundle adjustment problems to be ignored. - Change-Id: I52e76d733cd53c9bb2fda125e51a6b58a90e41b3 + Change-Id: Ib137f1566acf5fffa63e50a55fe8e78ea9eb1c14 -commit eeedd2e191f5ce404453c735061ad13bd45b939b +commit 602096c91363a0b9384f887a15c82e2dac1fb923 Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Sun Jul 7 23:04:31 2013 -0700 +Date: Sun Oct 27 05:09:38 2013 -0700 - Rationalize some of the variable names in Solver::Options. + Move CERES_HASH_NAMESPACE macros to collections_port.h - lm_max_diagonal -> max_lm_diagonal - lm_min_diagonal -> min_lm_diagonal - linear_solver_max_num_iterations -> max_linear_solver_iterations - linear_solver_min_num_iterations -> min_linear_solver_iterations + Now that we have a clearer understanding of the naming rules + there is no need for these macro definitions to be done in + the cmake file. - This follows the pattern for the other parameters in Solver::Options - where, the max/min is the first word followed by the name of the - parameter. + This cleans up the compilation command line. - Change-Id: I0893610fceb6b7983fdb458a65522ba7079596a7 + Change-Id: Idc8fc7a7c9376e021dc4790af66e599105351917 -commit 7a8f79792467e56012d43b5f9aa7aefce14d5ee9 +commit f6b67df54ad6daa7036f5b6619243f722d678892 Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Wed Jul 3 09:03:55 2013 -0700 +Date: Fri Oct 25 06:24:19 2013 -0700 - Lint fixes + Fix handling of unordered_map/unordered_set on OSX 10.9.0. - Change-Id: Ic453597488ef92723a81a224e7443e8f454b25da - -commit 67ccb7379e7eab709480e227323ea48ea91e7ccc -Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Wed Jul 3 06:28:34 2013 -0700 - - Fix broken build. + Depending on the compiler + standard library combination, + unordered_map/set may or may not be available. If available + they maybe in the std or the std::tr1 namespaces. - Change-Id: Ieb122bb96d5776f962fff6d6e9345dfc855bfed7 - -commit 4f010b2db02f22cee8243ed83a49e63a305dbb76 -Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Mon Jul 1 08:01:01 2013 -0700 - - Improve Summary::FullReport when line search is used. + Apple switched to using libc++ with 10.9.0 which places + unordered_map in std, breaking our assumptions about the + platform. - Disable reporting of preconditioner when direct factorization - is being used. + This change refactors our logic for dealing with the namespace + switching, making it a three state thing rather than two. There + are three defines now, CERES_NO_UNORDERED_MAP, CERES_STD_UNORDERED_MAP + and CERES_TR1_UNORDERED_MAP. Earlier the first two were conflated + into one, leading to the breakage. - Change-Id: Id264d2292c5cab608724a6a8fab5d588db950468 + Change-Id: I904fe8c49529169bdefa9f2ee6d629e7eab0b855 -commit 09244015e304b0ebfb2f2399edd2d97e3b9dcd8f +commit 21d6a99fe68e99fa51db32d55f587b42ef9a476c Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Sun Jun 30 14:33:23 2013 -0700 +Date: Fri Oct 25 10:20:24 2013 -0700 - Expose line search parameters in Solver::Options. + Fix AngleAxisToRotationMatrix near zero. + + The Taylor series approximation had its sign flipped and the + tests did not catch it since we were switching exactly at zero, + which was not getting triggered. - Change-Id: Ifc52980976e7bac73c8164d80518a5a19db1b79d + This changes modifies the tolerance, adds a test that triggers + and fixes the bug. + + Thanks to Michael Samples for reporting this. + + Change-Id: I6f92f6348e5d4421ffe194fba92c04285449484c -commit 1c70ae9aa626e591cda987a970c240dd40d23a69 +commit 0e2743e24d013b25109396cfa0d8d0f1e8e84964 Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Sun Jun 30 12:50:43 2013 -0700 +Date: Wed Oct 23 14:51:07 2013 -0700 - Fix Solver::Summary when line search is used. + Add BlockRandomAccessDiagonalMatrix. - Also enable line search in bundle_adjuster. + This class is used in the SchurJacobiPreconditioner for + storing the preconditioner matrix. Using it speeds up + the computation of the preconditioner by ~15% due to + the elimination of a hash table lookup. - Change-Id: Ic4343a4334b9f5a6fdeab38d4e3e1f6932bbc601 + Change-Id: Iba2b34aad0d9eb9bcb7f6e6fad16aa416aac0d2a -commit 70b06c89c7491d7749957c8454769bfcb0108a97 +commit 6a2bcaa1d55d38bc10d043f1458657caac2be7a7 Author: Alex Stewart <alexs.mac@gmail.com> -Date: Sun Jun 30 18:49:56 2013 +0100 +Date: Wed Oct 23 14:06:44 2013 +0100 - Fix update of L-BFGS history buffers after they become full. - - Previously there was an assignment dimension mismatch in the - history update; thus, over time, the history would contain - (only) replicated copies of the (max_num_corrections_ -1)-th - update and the most recent update. + Adding explicit link to libm for pure-C curve fitting example. - Change-Id: I26203acf689686d41a5029c675ebbe001fe05d90 - -commit a427c877f968d951b3cdcb5f5298deaf84647830 -Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Mon Jun 24 17:50:56 2013 -0700 - - Lint cleanup. + - Any pure-C program #including <math.h> will need to link against + libm, some compilers will let an indirect link slide (via Ceres in + this case) but some won't. - Change-Id: Ie489f1ff182d99251ed8c0728cc6ea8e1c262ce0 + Change-Id: I6890702fa0d2c3fbb747f0f81fc3fa3631839de4 diff --git a/extern/libmv/third_party/ceres/SConscript b/extern/libmv/third_party/ceres/SConscript index 164aedfe415..e61e979efd2 100644 --- a/extern/libmv/third_party/ceres/SConscript +++ b/extern/libmv/third_party/ceres/SConscript @@ -14,11 +14,10 @@ defs = [] src += env.Glob('internal/ceres/*.cc') src += env.Glob('internal/ceres/generated/schur_eliminator_d_d_d.cc') +src += env.Glob('internal/ceres/generated/partitioned_matrix_view_d_d_d.cc') #src += env.Glob('internal/ceres/generated/*.cc') defs.append('CERES_HAVE_PTHREAD') -defs.append('CERES_HASH_NAMESPACE_START=namespace std { namespace tr1 {') -defs.append('CERES_HASH_NAMESPACE_END=}}') defs.append('CERES_NO_SUITESPARSE') defs.append('CERES_NO_CXSPARSE') defs.append('CERES_NO_LAPACK') @@ -28,8 +27,17 @@ defs.append('CERES_HAVE_RWLOCK') if env['WITH_BF_OPENMP']: defs.append('CERES_USE_OPENMP') -if 'Mac OS X 10.5' in env['MACOSX_SDK']: - defs.append('CERES_NO_TR1') +conf = Configure(env) +if conf.CheckCXXHeader("unordered_map"): + defs.append('CERES_STD_UNORDERED_MAP') +elif conf.CheckCXXHeader("tr1/unordered_map"): + defs.append('CERES_TR1_UNORDERED_MAP') +else: + print("-- Unable to find <unordered_map> or <tr1/unordered_map>. ") + print("-- Replacing unordered_map/set with map/set (warning: slower!)") + defs.append('CERES_NO_UNORDERED_MAP') + +env = conf.Finish() incs = '. ../../ ../../../Eigen3 ./include ./internal ../gflags' diff --git a/extern/libmv/third_party/ceres/bundle.sh b/extern/libmv/third_party/ceres/bundle.sh index a6f040b6d29..10c70ec51f7 100755 --- a/extern/libmv/third_party/ceres/bundle.sh +++ b/extern/libmv/third_party/ceres/bundle.sh @@ -43,8 +43,11 @@ done rm -rf $tmp -sources=`find ./include ./internal -type f -iname '*.cc' -or -iname '*.cpp' -or -iname '*.c' | sed -r 's/^\.\//\t/' | grep -v -E 'schur_eliminator_[0-9]_[0-9]_[0-9d].cc' | sort -d` -generated_sources=`find ./include ./internal -type f -iname '*.cc' -or -iname '*.cpp' -or -iname '*.c' | sed -r 's/^\.\//#\t\t/' | grep -E 'schur_eliminator_[0-9]_[0-9]_[0-9d].cc' | sort -d` +sources=`find ./include ./internal -type f -iname '*.cc' -or -iname '*.cpp' -or -iname '*.c' | sed -r 's/^\.\//\t/' | \ + grep -v -E 'schur_eliminator_[0-9]_[0-9d]_[0-9d].cc' | \ + grep -v -E 'partitioned_matrix_view_[0-9]_[0-9d]_[0-9d].cc' | sort -d` +generated_sources=`find ./include ./internal -type f -iname '*.cc' -or -iname '*.cpp' -or -iname '*.c' | sed -r 's/^\.\//#\t\t/' | \ + grep -E 'schur_eliminator_[0-9]_[0-9d]_[0-9d].cc|partitioned_matrix_view_[0-9]_[0-9d]_[0-9d].cc' | sort -d` headers=`find ./include ./internal -type f -iname '*.h' | sed -r 's/^\.\//\t/' | sort -d` src_dir=`find ./internal -type f -iname '*.cc' -exec dirname {} \; -or -iname '*.cpp' -exec dirname {} \; -or -iname '*.c' -exec dirname {} \; | sed -r 's/^\.\//\t/' | sort -d | uniq` @@ -171,23 +174,18 @@ if(WITH_OPENMP) ) endif() -if(MSVC10) - add_definitions( - -D"CERES_HASH_NAMESPACE_START=namespace std {" - -D"CERES_HASH_NAMESPACE_END=}" - ) +include(CheckIncludeFileCXX) +CHECK_INCLUDE_FILE_CXX(unordered_map UNORDERED_MAP_IN_STD_NAMESPACE) +if(UNORDERED_MAP_IN_STD_NAMESPACE) + ADD_DEFINITIONS(-DCERES_STD_UNORDERED_MAP) else() - add_definitions( - -D"CERES_HASH_NAMESPACE_START=namespace std { namespace tr1 {" - -D"CERES_HASH_NAMESPACE_END=}}" - ) -endif() - -if(APPLE) - if(CMAKE_OSX_DEPLOYMENT_TARGET STREQUAL "10.5") - add_definitions( - -DCERES_NO_TR1 - ) + CHECK_INCLUDE_FILE_CXX("tr1/unordered_map" UNORDERED_MAP_IN_TR1_NAMESPACE) + if(UNORDERED_MAP_IN_TR1_NAMESPACE) + ADD_DEFINITIONS(-DCERES_TR1_UNORDERED_MAP) + else() + MESSAGE("-- Unable to find <unordered_map> or <tr1/unordered_map>. ") + MESSAGE("-- Replacing unordered_map/set with map/set (warning: slower!)") + ADD_DEFINITIONS(-DCERES_NO_UNORDERED_MAP) endif() endif() @@ -211,11 +209,10 @@ defs = [] $src src += env.Glob('internal/ceres/generated/schur_eliminator_d_d_d.cc') +src += env.Glob('internal/ceres/generated/partitioned_matrix_view_d_d_d.cc') #src += env.Glob('internal/ceres/generated/*.cc') defs.append('CERES_HAVE_PTHREAD') -defs.append('CERES_HASH_NAMESPACE_START=namespace std { namespace tr1 {') -defs.append('CERES_HASH_NAMESPACE_END=}}') defs.append('CERES_NO_SUITESPARSE') defs.append('CERES_NO_CXSPARSE') defs.append('CERES_NO_LAPACK') @@ -225,8 +222,15 @@ defs.append('CERES_HAVE_RWLOCK') if env['WITH_BF_OPENMP']: defs.append('CERES_USE_OPENMP') -if 'Mac OS X 10.5' in env['MACOSX_SDK']: - defs.append('CERES_NO_TR1') +conf = Configure(env) +if conf.CheckCXXHeader("unordered_map"): + defs.append('CERES_STD_UNORDERED_MAP') +elif conf.CheckCXXHeader("tr1/unordered_map"): + defs.append('CERES_TR1_UNORDERED_MAP') +else: + print("-- Unable to find <unordered_map> or <tr1/unordered_map>. ") + print("-- Replacing unordered_map/set with map/set (warning: slower!)") + defs.append('CERES_NO_UNORDERED_MAP') incs = '. ../../ ../../../Eigen3 ./include ./internal ../gflags' diff --git a/extern/libmv/third_party/ceres/files.txt b/extern/libmv/third_party/ceres/files.txt index 071ccda655c..0ec7fc5354e 100644 --- a/extern/libmv/third_party/ceres/files.txt +++ b/extern/libmv/third_party/ceres/files.txt @@ -8,6 +8,7 @@ include/ceres/cost_function_to_functor.h include/ceres/covariance.h include/ceres/crs_matrix.h include/ceres/dynamic_autodiff_cost_function.h +include/ceres/dynamic_numeric_diff_cost_function.h include/ceres/fpclassify.h include/ceres/gradient_checker.h include/ceres/internal/autodiff.h @@ -46,6 +47,8 @@ internal/ceres/block_random_access_crs_matrix.cc internal/ceres/block_random_access_crs_matrix.h internal/ceres/block_random_access_dense_matrix.cc internal/ceres/block_random_access_dense_matrix.h +internal/ceres/block_random_access_diagonal_matrix.cc +internal/ceres/block_random_access_diagonal_matrix.h internal/ceres/block_random_access_matrix.cc internal/ceres/block_random_access_matrix.h internal/ceres/block_random_access_sparse_matrix.cc @@ -96,6 +99,23 @@ internal/ceres/evaluator.h internal/ceres/execution_summary.h internal/ceres/file.cc internal/ceres/file.h +internal/ceres/generated/partitioned_matrix_view_2_2_2.cc +internal/ceres/generated/partitioned_matrix_view_2_2_3.cc +internal/ceres/generated/partitioned_matrix_view_2_2_4.cc +internal/ceres/generated/partitioned_matrix_view_2_2_d.cc +internal/ceres/generated/partitioned_matrix_view_2_3_3.cc +internal/ceres/generated/partitioned_matrix_view_2_3_4.cc +internal/ceres/generated/partitioned_matrix_view_2_3_9.cc +internal/ceres/generated/partitioned_matrix_view_2_3_d.cc +internal/ceres/generated/partitioned_matrix_view_2_4_3.cc +internal/ceres/generated/partitioned_matrix_view_2_4_4.cc +internal/ceres/generated/partitioned_matrix_view_2_4_d.cc +internal/ceres/generated/partitioned_matrix_view_2_d_d.cc +internal/ceres/generated/partitioned_matrix_view_4_4_2.cc +internal/ceres/generated/partitioned_matrix_view_4_4_3.cc +internal/ceres/generated/partitioned_matrix_view_4_4_4.cc +internal/ceres/generated/partitioned_matrix_view_4_4_d.cc +internal/ceres/generated/partitioned_matrix_view_d_d_d.cc internal/ceres/generated/schur_eliminator_2_2_2.cc internal/ceres/generated/schur_eliminator_2_2_3.cc internal/ceres/generated/schur_eliminator_2_2_4.cc @@ -107,12 +127,14 @@ internal/ceres/generated/schur_eliminator_2_3_d.cc internal/ceres/generated/schur_eliminator_2_4_3.cc internal/ceres/generated/schur_eliminator_2_4_4.cc internal/ceres/generated/schur_eliminator_2_4_d.cc +internal/ceres/generated/schur_eliminator_2_d_d.cc internal/ceres/generated/schur_eliminator_4_4_2.cc internal/ceres/generated/schur_eliminator_4_4_3.cc internal/ceres/generated/schur_eliminator_4_4_4.cc internal/ceres/generated/schur_eliminator_4_4_d.cc internal/ceres/generated/schur_eliminator_d_d_d.cc internal/ceres/generate_eliminator_specialization.py +internal/ceres/generate_partitioned_matrix_view_specializations.py internal/ceres/gradient_checking_cost_function.cc internal/ceres/gradient_checking_cost_function.h internal/ceres/graph_algorithms.h @@ -154,6 +176,7 @@ internal/ceres/parameter_block_ordering.cc internal/ceres/parameter_block_ordering.h internal/ceres/partitioned_matrix_view.cc internal/ceres/partitioned_matrix_view.h +internal/ceres/partitioned_matrix_view_impl.h internal/ceres/polynomial.cc internal/ceres/polynomial.h internal/ceres/preconditioner.cc @@ -169,8 +192,6 @@ internal/ceres/residual_block.cc internal/ceres/residual_block.h internal/ceres/residual_block_utils.cc internal/ceres/residual_block_utils.h -internal/ceres/runtime_numeric_diff_cost_function.cc -internal/ceres/runtime_numeric_diff_cost_function.h internal/ceres/schur_complement_solver.cc internal/ceres/schur_complement_solver.h internal/ceres/schur_eliminator.cc @@ -180,6 +201,8 @@ internal/ceres/schur_jacobi_preconditioner.cc internal/ceres/schur_jacobi_preconditioner.h internal/ceres/scratch_evaluate_preparer.cc internal/ceres/scratch_evaluate_preparer.h +internal/ceres/single_linkage_clustering.cc +internal/ceres/single_linkage_clustering.h internal/ceres/small_blas.h internal/ceres/solver.cc internal/ceres/solver_impl.cc diff --git a/extern/libmv/third_party/ceres/include/ceres/autodiff_cost_function.h b/extern/libmv/third_party/ceres/include/ceres/autodiff_cost_function.h index 371a11f71ec..7c0fa79ad0b 100644 --- a/extern/libmv/third_party/ceres/include/ceres/autodiff_cost_function.h +++ b/extern/libmv/third_party/ceres/include/ceres/autodiff_cost_function.h @@ -96,7 +96,7 @@ // "MyScalarCostFunctor", "1, 2, 2", describe the functor as computing a // 1-dimensional output from two arguments, both 2-dimensional. // -// The autodiff cost function also supports cost functions with a +// AutoDiffCostFunction also supports cost functions with a // runtime-determined number of residuals. For example: // // CostFunction* cost_function @@ -110,8 +110,9 @@ // Dimension of x ------------------------------------+ | // Dimension of y ---------------------------------------+ // -// The framework can currently accommodate cost functions of up to 6 independent -// variables, and there is no limit on the dimensionality of each of them. +// The framework can currently accommodate cost functions of up to 10 +// independent variables, and there is no limit on the dimensionality +// of each of them. // // WARNING #1: Since the functor will get instantiated with different types for // T, you must to convert from other numeric types to T before mixing @@ -145,13 +146,13 @@ namespace ceres { // // The constructors take ownership of the cost functor. // -// If the number of residuals (argument "M" below) is ceres::DYNAMIC, then the -// two-argument constructor must be used. The second constructor takes a number -// of residuals (in addition to the templated number of residuals). This allows -// for varying the number of residuals for a single autodiff cost function at -// runtime. +// If the number of residuals (argument kNumResiduals below) is +// ceres::DYNAMIC, then the two-argument constructor must be used. The +// second constructor takes a number of residuals (in addition to the +// templated number of residuals). This allows for varying the number +// of residuals for a single autodiff cost function at runtime. template <typename CostFunctor, - int M, // Number of residuals, or ceres::DYNAMIC. + int kNumResiduals, // Number of residuals, or ceres::DYNAMIC. int N0, // Number of parameters in block 0. int N1 = 0, // Number of parameters in block 1. int N2 = 0, // Number of parameters in block 2. @@ -162,28 +163,32 @@ template <typename CostFunctor, int N7 = 0, // Number of parameters in block 7. int N8 = 0, // Number of parameters in block 8. int N9 = 0> // Number of parameters in block 9. -class AutoDiffCostFunction : public SizedCostFunction<M, +class AutoDiffCostFunction : public SizedCostFunction<kNumResiduals, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9> { public: // Takes ownership of functor. Uses the template-provided value for the - // number of residuals ("M"). + // number of residuals ("kNumResiduals"). explicit AutoDiffCostFunction(CostFunctor* functor) : functor_(functor) { - CHECK_NE(M, DYNAMIC) << "Can't run the fixed-size constructor if the " - << "number of residuals is set to ceres::DYNAMIC."; + CHECK_NE(kNumResiduals, DYNAMIC) + << "Can't run the fixed-size constructor if the " + << "number of residuals is set to ceres::DYNAMIC."; } - // Takes ownership of functor. Ignores the template-provided number of - // residuals ("M") in favor of the "num_residuals" argument provided. + // Takes ownership of functor. Ignores the template-provided + // kNumResiduals in favor of the "num_residuals" argument provided. // // This allows for having autodiff cost functions which return varying // numbers of residuals at runtime. AutoDiffCostFunction(CostFunctor* functor, int num_residuals) : functor_(functor) { - CHECK_EQ(M, DYNAMIC) << "Can't run the dynamic-size constructor if the " - << "number of residuals is not ceres::DYNAMIC."; - SizedCostFunction<M, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9> + CHECK_EQ(kNumResiduals, DYNAMIC) + << "Can't run the dynamic-size constructor if the " + << "number of residuals is not ceres::DYNAMIC."; + SizedCostFunction<kNumResiduals, + N0, N1, N2, N3, N4, + N5, N6, N7, N8, N9> ::set_num_residuals(num_residuals); } @@ -206,8 +211,9 @@ class AutoDiffCostFunction : public SizedCostFunction<M, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>::Differentiate( *functor_, parameters, - SizedCostFunction<M, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9> - ::num_residuals(), + SizedCostFunction<kNumResiduals, + N0, N1, N2, N3, N4, + N5, N6, N7, N8, N9>::num_residuals(), residuals, jacobians); } diff --git a/extern/libmv/third_party/ceres/include/ceres/ceres.h b/extern/libmv/third_party/ceres/include/ceres/ceres.h index 61b8b94dcaa..fca4907d384 100644 --- a/extern/libmv/third_party/ceres/include/ceres/ceres.h +++ b/extern/libmv/third_party/ceres/include/ceres/ceres.h @@ -34,8 +34,8 @@ #ifndef CERES_PUBLIC_CERES_H_ #define CERES_PUBLIC_CERES_H_ -#define CERES_VERSION 1.7.0 -#define CERES_ABI_VERSION 1.7.0 +#define CERES_VERSION 1.8.0 +#define CERES_ABI_VERSION 1.8.0 #include "ceres/autodiff_cost_function.h" #include "ceres/autodiff_local_parameterization.h" @@ -43,6 +43,8 @@ #include "ceres/cost_function_to_functor.h" #include "ceres/covariance.h" #include "ceres/crs_matrix.h" +#include "ceres/dynamic_autodiff_cost_function.h" +#include "ceres/dynamic_numeric_diff_cost_function.h" #include "ceres/iteration_callback.h" #include "ceres/jet.h" #include "ceres/local_parameterization.h" diff --git a/extern/libmv/third_party/ceres/include/ceres/dynamic_autodiff_cost_function.h b/extern/libmv/third_party/ceres/include/ceres/dynamic_autodiff_cost_function.h index 5d8f188e5a7..f9342cdbab9 100644 --- a/extern/libmv/third_party/ceres/include/ceres/dynamic_autodiff_cost_function.h +++ b/extern/libmv/third_party/ceres/include/ceres/dynamic_autodiff_cost_function.h @@ -1,5 +1,5 @@ // Ceres Solver - A fast non-linear least squares minimizer -// Copyright 2012 Google Inc. All rights reserved. +// Copyright 2013 Google Inc. All rights reserved. // http://code.google.com/p/ceres-solver/ // // Redistribution and use in source and binary forms, with or without @@ -26,18 +26,17 @@ // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. // -// Author: mierle@gmail.com (Keir Mierle) -// sameeragarwal@google.com (Sameer Agarwal) -// thadh@gmail.com (Thad Hughes) +// Author: sameeragarwal@google.com (Sameer Agarwal) +// mierle@gmail.com (Keir Mierle) // // This autodiff implementation differs from the one found in -// autodiff_cost_function.h by supporting autodiff on cost functions with -// variable numbers of parameters with variable sizes. With the other -// implementation, all the sizes (both the number of parameter blocks and the -// size of each block) must be fixed at compile time. +// autodiff_cost_function.h by supporting autodiff on cost functions +// with variable numbers of parameters with variable sizes. With the +// other implementation, all the sizes (both the number of parameter +// blocks and the size of each block) must be fixed at compile time. // -// The functor API differs slightly from the API for fixed size autodiff; the -// expected interface for the cost functors is: +// The functor API differs slightly from the API for fixed size +// autodiff; the expected interface for the cost functors is: // // struct MyCostFunctor { // template<typename T> @@ -46,8 +45,9 @@ // } // } // -// Since the sizing of the parameters is done at runtime, you must also specify -// the sizes after creating the dynamic autodiff cost function. For example: +// Since the sizing of the parameters is done at runtime, you must +// also specify the sizes after creating the dynamic autodiff cost +// function. For example: // // DynamicAutoDiffCostFunction<MyCostFunctor, 3> cost_function( // new MyCostFunctor()); @@ -55,10 +55,11 @@ // cost_function.AddParameterBlock(10); // cost_function.SetNumResiduals(21); // -// Under the hood, the implementation evaluates the cost function multiple -// times, computing a small set of the derivatives (four by default, controlled -// by the Stride template parameter) with each pass. There is a tradeoff with -// the size of the passes; you may want to experiment with the stride. +// Under the hood, the implementation evaluates the cost function +// multiple times, computing a small set of the derivatives (four by +// default, controlled by the Stride template parameter) with each +// pass. There is a tradeoff with the size of the passes; you may want +// to experiment with the stride. #ifndef CERES_PUBLIC_DYNAMIC_AUTODIFF_COST_FUNCTION_H_ #define CERES_PUBLIC_DYNAMIC_AUTODIFF_COST_FUNCTION_H_ diff --git a/extern/libmv/third_party/ceres/include/ceres/dynamic_numeric_diff_cost_function.h b/extern/libmv/third_party/ceres/include/ceres/dynamic_numeric_diff_cost_function.h new file mode 100644 index 00000000000..c2bfb3223cb --- /dev/null +++ b/extern/libmv/third_party/ceres/include/ceres/dynamic_numeric_diff_cost_function.h @@ -0,0 +1,265 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2012 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: mierle@gmail.com (Keir Mierle) +// sameeragarwal@google.com (Sameer Agarwal) +// thadh@gmail.com (Thad Hughes) +// +// This numeric diff implementation differs from the one found in +// numeric_diff_cost_function.h by supporting numericdiff on cost +// functions with variable numbers of parameters with variable +// sizes. With the other implementation, all the sizes (both the +// number of parameter blocks and the size of each block) must be +// fixed at compile time. +// +// The functor API differs slightly from the API for fixed size +// numeric diff; the expected interface for the cost functors is: +// +// struct MyCostFunctor { +// template<typename T> +// bool operator()(double const* const* parameters, double* residuals) const { +// // Use parameters[i] to access the i'th parameter block. +// } +// } +// +// Since the sizing of the parameters is done at runtime, you must +// also specify the sizes after creating the +// DynamicNumericDiffCostFunction. For example: +// +// DynamicAutoDiffCostFunction<MyCostFunctor, CENTRAL> cost_function( +// new MyCostFunctor()); +// cost_function.AddParameterBlock(5); +// cost_function.AddParameterBlock(10); +// cost_function.SetNumResiduals(21); + +#ifndef CERES_PUBLIC_DYNAMIC_NUMERIC_DIFF_COST_FUNCTION_H_ +#define CERES_PUBLIC_DYNAMIC_NUMERIC_DIFF_COST_FUNCTION_H_ + +#include <cmath> +#include <numeric> +#include <vector> + +#include "ceres/cost_function.h" +#include "ceres/internal/scoped_ptr.h" +#include "ceres/internal/eigen.h" +#include "ceres/internal/numeric_diff.h" +#include "glog/logging.h" + +namespace ceres { + +template <typename CostFunctor, NumericDiffMethod method = CENTRAL> +class DynamicNumericDiffCostFunction : public CostFunction { + public: + explicit DynamicNumericDiffCostFunction(const CostFunctor* functor, + Ownership ownership = TAKE_OWNERSHIP, + double relative_step_size = 1e-6) + : functor_(functor), + ownership_(ownership), + relative_step_size_(relative_step_size) { + } + + virtual ~DynamicNumericDiffCostFunction() { + if (ownership_ != TAKE_OWNERSHIP) { + functor_.release(); + } + } + + void AddParameterBlock(int size) { + mutable_parameter_block_sizes()->push_back(size); + } + + void SetNumResiduals(int num_residuals) { + set_num_residuals(num_residuals); + } + + virtual bool Evaluate(double const* const* parameters, + double* residuals, + double** jacobians) const { + CHECK_GT(num_residuals(), 0) + << "You must call DynamicNumericDiffCostFunction::SetNumResiduals() " + << "before DynamicNumericDiffCostFunction::Evaluate()."; + + const vector<int16>& block_sizes = parameter_block_sizes(); + CHECK(!block_sizes.empty()) + << "You must call DynamicNumericDiffCostFunction::AddParameterBlock() " + << "before DynamicNumericDiffCostFunction::Evaluate()."; + + const bool status = EvaluateCostFunctor(parameters, residuals); + if (jacobians == NULL || !status) { + return status; + } + + // Create local space for a copy of the parameters which will get mutated. + int parameters_size = accumulate(block_sizes.begin(), block_sizes.end(), 0); + vector<double> parameters_copy(parameters_size); + vector<double*> parameters_references_copy(block_sizes.size()); + parameters_references_copy[0] = ¶meters_copy[0]; + for (int block = 1; block < block_sizes.size(); ++block) { + parameters_references_copy[block] = parameters_references_copy[block - 1] + + block_sizes[block - 1]; + } + + // Copy the parameters into the local temp space. + for (int block = 0; block < block_sizes.size(); ++block) { + memcpy(parameters_references_copy[block], + parameters[block], + block_sizes[block] * sizeof(*parameters[block])); + } + + for (int block = 0; block < block_sizes.size(); ++block) { + if (jacobians[block] != NULL && + !EvaluateJacobianForParameterBlock(block_sizes[block], + block, + relative_step_size_, + residuals, + ¶meters_references_copy[0], + jacobians)) { + return false; + } + } + return true; + } + + private: + bool EvaluateJacobianForParameterBlock(const int parameter_block_size, + const int parameter_block, + const double relative_step_size, + double const* residuals_at_eval_point, + double** parameters, + double** jacobians) const { + using Eigen::Map; + using Eigen::Matrix; + using Eigen::Dynamic; + using Eigen::RowMajor; + + typedef Matrix<double, Dynamic, 1> ResidualVector; + typedef Matrix<double, Dynamic, 1> ParameterVector; + typedef Matrix<double, Dynamic, Dynamic, RowMajor> JacobianMatrix; + + int num_residuals = this->num_residuals(); + + Map<JacobianMatrix> parameter_jacobian(jacobians[parameter_block], + num_residuals, + parameter_block_size); + + // Mutate one element at a time and then restore. + Map<ParameterVector> x_plus_delta(parameters[parameter_block], + parameter_block_size); + ParameterVector x(x_plus_delta); + ParameterVector step_size = x.array().abs() * relative_step_size; + + // To handle cases where a paremeter is exactly zero, instead use + // the mean step_size for the other dimensions. + double fallback_step_size = step_size.sum() / step_size.rows(); + if (fallback_step_size == 0.0) { + // If all the parameters are zero, there's no good answer. Use the given + // relative step_size as absolute step_size and hope for the best. + fallback_step_size = relative_step_size; + } + + // For each parameter in the parameter block, use finite + // differences to compute the derivative for that parameter. + for (int j = 0; j < parameter_block_size; ++j) { + if (step_size(j) == 0.0) { + // The parameter is exactly zero, so compromise and use the + // mean step_size from the other parameters. This can break in + // many cases, but it's hard to pick a good number without + // problem specific knowledge. + step_size(j) = fallback_step_size; + } + x_plus_delta(j) = x(j) + step_size(j); + + ResidualVector residuals(num_residuals); + if (!EvaluateCostFunctor(parameters, &residuals[0])) { + // Something went wrong; bail. + return false; + } + + // Compute this column of the jacobian in 3 steps: + // 1. Store residuals for the forward part. + // 2. Subtract residuals for the backward (or 0) part. + // 3. Divide out the run. + parameter_jacobian.col(j).matrix() = residuals; + + double one_over_h = 1 / step_size(j); + if (method == CENTRAL) { + // Compute the function on the other side of x(j). + x_plus_delta(j) = x(j) - step_size(j); + + if (!EvaluateCostFunctor(parameters, &residuals[0])) { + // Something went wrong; bail. + return false; + } + + parameter_jacobian.col(j) -= residuals; + one_over_h /= 2; + } else { + // Forward difference only; reuse existing residuals evaluation. + parameter_jacobian.col(j) -= + Map<const ResidualVector>(residuals_at_eval_point, num_residuals); + } + x_plus_delta(j) = x(j); // Restore x_plus_delta. + + // Divide out the run to get slope. + parameter_jacobian.col(j) *= one_over_h; + } + return true; + } + + bool EvaluateCostFunctor(double const* const* parameters, + double* residuals) const { + return EvaluateCostFunctorImpl(functor_.get(), + parameters, + residuals, + functor_.get()); + } + + // Helper templates to allow evaluation of a functor or a + // CostFunction. + bool EvaluateCostFunctorImpl(const CostFunctor* functor, + double const* const* parameters, + double* residuals, + const void* /* NOT USED */) const { + return (*functor)(parameters, residuals); + } + + bool EvaluateCostFunctorImpl(const CostFunctor* functor, + double const* const* parameters, + double* residuals, + const CostFunction* /* NOT USED */) const { + return functor->Evaluate(parameters, residuals, NULL); + } + + internal::scoped_ptr<const CostFunctor> functor_; + Ownership ownership_; + const double relative_step_size_; +}; + +} // namespace ceres + +#endif // CERES_PUBLIC_DYNAMIC_AUTODIFF_COST_FUNCTION_H_ diff --git a/extern/libmv/third_party/ceres/include/ceres/internal/autodiff.h b/extern/libmv/third_party/ceres/include/ceres/internal/autodiff.h index cf21d7a5001..3a96625e3fd 100644 --- a/extern/libmv/third_party/ceres/include/ceres/internal/autodiff.h +++ b/extern/libmv/third_party/ceres/include/ceres/internal/autodiff.h @@ -172,7 +172,7 @@ inline void Make1stOrderPerturbation(int offset, const T* src, JetT* dst) { for (int j = 0; j < N; ++j) { dst[j].a = src[j]; dst[j].v.setZero(); - dst[j].v[offset + j] = 1.0; + dst[j].v[offset + j] = T(1.0); } } diff --git a/extern/libmv/third_party/ceres/include/ceres/internal/fixed_array.h b/extern/libmv/third_party/ceres/include/ceres/internal/fixed_array.h index ee264d1619d..694070b228c 100644 --- a/extern/libmv/third_party/ceres/include/ceres/internal/fixed_array.h +++ b/extern/libmv/third_party/ceres/include/ceres/internal/fixed_array.h @@ -113,7 +113,6 @@ class FixedArray { // REQUIRES: 0 <= i < size() // Returns a reference to the "i"th element. inline T& operator[](size_type i) { - DCHECK_GE(i, 0); DCHECK_LT(i, size_); return array_[i].element; } @@ -121,7 +120,6 @@ class FixedArray { // REQUIRES: 0 <= i < size() // Returns a reference to the "i"th element. inline const T& operator[](size_type i) const { - DCHECK_GE(i, 0); DCHECK_LT(i, size_); return array_[i].element; } @@ -168,8 +166,6 @@ inline FixedArray<T, S>::FixedArray(typename FixedArray<T, S>::size_type n) array_((n <= kInlineElements ? reinterpret_cast<InnerContainer*>(inline_space_) : new InnerContainer[n])) { - DCHECK_GE(n, size_t(0)); - // Construct only the elements actually used. if (array_ == reinterpret_cast<InnerContainer*>(inline_space_)) { for (size_t i = 0; i != size_; ++i) { diff --git a/extern/libmv/third_party/ceres/include/ceres/internal/macros.h b/extern/libmv/third_party/ceres/include/ceres/internal/macros.h index 388cf30fe70..1ed55be6e03 100644 --- a/extern/libmv/third_party/ceres/include/ceres/internal/macros.h +++ b/extern/libmv/third_party/ceres/include/ceres/internal/macros.h @@ -145,12 +145,11 @@ char (&ArraySizeHelper(const T (&array)[N]))[N]; // // Sprocket* AllocateSprocket() MUST_USE_RESULT; // -#undef MUST_USE_RESULT #if (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) \ && !defined(COMPILER_ICC) -#define MUST_USE_RESULT __attribute__ ((warn_unused_result)) +#define CERES_MUST_USE_RESULT __attribute__ ((warn_unused_result)) #else -#define MUST_USE_RESULT +#define CERES_MUST_USE_RESULT #endif // Platform independent macros to get aligned memory allocations. diff --git a/extern/libmv/third_party/ceres/include/ceres/internal/numeric_diff.h b/extern/libmv/third_party/ceres/include/ceres/internal/numeric_diff.h index 4058366c4a1..5048348564a 100644 --- a/extern/libmv/third_party/ceres/include/ceres/internal/numeric_diff.h +++ b/extern/libmv/third_party/ceres/include/ceres/internal/numeric_diff.h @@ -90,6 +90,7 @@ struct NumericDiff { const CostFunctor* functor, double const* residuals_at_eval_point, const double relative_step_size, + int num_residuals, double **parameters, double *jacobian) { using Eigen::Map; @@ -97,15 +98,21 @@ struct NumericDiff { using Eigen::RowMajor; using Eigen::ColMajor; + const int NUM_RESIDUALS = + (kNumResiduals != ceres::DYNAMIC ? kNumResiduals : num_residuals); + typedef Matrix<double, kNumResiduals, 1> ResidualVector; typedef Matrix<double, kParameterBlockSize, 1> ParameterVector; - typedef Matrix<double, kNumResiduals, kParameterBlockSize, + typedef Matrix<double, + kNumResiduals, + kParameterBlockSize, (kParameterBlockSize == 1 && - kNumResiduals > 1) ? ColMajor : RowMajor> JacobianMatrix; + kNumResiduals > 1) ? ColMajor : RowMajor> + JacobianMatrix; Map<JacobianMatrix> parameter_jacobian(jacobian, - kNumResiduals, + NUM_RESIDUALS, kParameterBlockSize); // Mutate 1 element at a time and then restore. @@ -125,16 +132,16 @@ struct NumericDiff { // For each parameter in the parameter block, use finite differences to // compute the derivative for that parameter. + + ResidualVector residuals(NUM_RESIDUALS); for (int j = 0; j < kParameterBlockSize; ++j) { const double delta = (step_size(j) == 0.0) ? fallback_step_size : step_size(j); x_plus_delta(j) = x(j) + delta; - double residuals[kNumResiduals]; // NOLINT - if (!EvaluateImpl<CostFunctor, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>( - functor, parameters, residuals, functor)) { + functor, parameters, residuals.data(), functor)) { return false; } @@ -142,8 +149,7 @@ struct NumericDiff { // 1. Store residuals for the forward part. // 2. Subtract residuals for the backward (or 0) part. // 3. Divide out the run. - parameter_jacobian.col(j) = - Map<const ResidualVector>(residuals, kNumResiduals); + parameter_jacobian.col(j) = residuals; double one_over_delta = 1.0 / delta; if (kMethod == CENTRAL) { @@ -151,17 +157,16 @@ struct NumericDiff { x_plus_delta(j) = x(j) - delta; if (!EvaluateImpl<CostFunctor, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>( - functor, parameters, residuals, functor)) { + functor, parameters, residuals.data(), functor)) { return false; } - parameter_jacobian.col(j) -= - Map<ResidualVector>(residuals, kNumResiduals, 1); + parameter_jacobian.col(j) -= residuals; one_over_delta /= 2; } else { // Forward difference only; reuse existing residuals evaluation. parameter_jacobian.col(j) -= - Map<const ResidualVector>(residuals_at_eval_point, kNumResiduals); + Map<const ResidualVector>(residuals_at_eval_point, NUM_RESIDUALS); } x_plus_delta(j) = x(j); // Restore x_plus_delta. @@ -186,6 +191,7 @@ struct NumericDiff<CostFunctor, kMethod, kNumResiduals, const CostFunctor* functor, double const* residuals_at_eval_point, const double relative_step_size, + const int num_residuals, double **parameters, double *jacobian) { LOG(FATAL) << "Control should never reach here."; diff --git a/extern/libmv/third_party/ceres/include/ceres/iteration_callback.h b/extern/libmv/third_party/ceres/include/ceres/iteration_callback.h index 987c2d91f79..56892562556 100644 --- a/extern/libmv/third_party/ceres/include/ceres/iteration_callback.h +++ b/extern/libmv/third_party/ceres/include/ceres/iteration_callback.h @@ -50,6 +50,7 @@ struct IterationSummary { cost(0.0), cost_change(0.0), gradient_max_norm(0.0), + gradient_norm(0.0), step_norm(0.0), eta(0.0), step_size(0.0), @@ -100,6 +101,9 @@ struct IterationSummary { // Infinity norm of the gradient vector. double gradient_max_norm; + // 2-norm of the gradient vector. + double gradient_norm; + // 2-norm of the size of the step computed by the optimization // algorithm. double step_norm; diff --git a/extern/libmv/third_party/ceres/include/ceres/jet.h b/extern/libmv/third_party/ceres/include/ceres/jet.h index 4d2a857dc3d..55caa05dbac 100644 --- a/extern/libmv/third_party/ceres/include/ceres/jet.h +++ b/extern/libmv/third_party/ceres/include/ceres/jet.h @@ -106,8 +106,8 @@ // Jet<double, 2> y(1); // Pick the 1st dual number for y. // Jet<double, 2> z = f(x, y); // -// LG << "df/dx = " << z.a[0] -// << "df/dy = " << z.a[1]; +// LOG(INFO) << "df/dx = " << z.a[0] +// << "df/dy = " << z.a[1]; // // Most users should not use Jet objects directly; a wrapper around Jet objects, // which makes computing the derivative, gradient, or jacobian of templated @@ -192,6 +192,17 @@ struct Jet { v[k] = T(1.0); } + // Constructor from scalar and vector part + // The use of Eigen::DenseBase allows Eigen expressions + // to be passed in without being fully evaluated until + // they are assigned to v + template<typename Derived> + Jet(const T& value, const Eigen::DenseBase<Derived> &vIn) + : a(value), + v(vIn) + { + } + // Compound operators Jet<T, N>& operator+=(const Jet<T, N> &y) { *this = *this + y; @@ -246,101 +257,70 @@ Jet<T, N> const& operator+(const Jet<T, N>& f) { // Unary - template<typename T, int N> inline Jet<T, N> operator-(const Jet<T, N>&f) { - Jet<T, N> g; - g.a = -f.a; - g.v = -f.v; - return g; + return Jet<T, N>(-f.a, -f.v); } // Binary + template<typename T, int N> inline Jet<T, N> operator+(const Jet<T, N>& f, const Jet<T, N>& g) { - Jet<T, N> h; - h.a = f.a + g.a; - h.v = f.v + g.v; - return h; + return Jet<T, N>(f.a + g.a, f.v + g.v); } // Binary + with a scalar: x + s template<typename T, int N> inline Jet<T, N> operator+(const Jet<T, N>& f, T s) { - Jet<T, N> h; - h.a = f.a + s; - h.v = f.v; - return h; + return Jet<T, N>(f.a + s, f.v); } // Binary + with a scalar: s + x template<typename T, int N> inline Jet<T, N> operator+(T s, const Jet<T, N>& f) { - Jet<T, N> h; - h.a = f.a + s; - h.v = f.v; - return h; + return Jet<T, N>(f.a + s, f.v); } // Binary - template<typename T, int N> inline Jet<T, N> operator-(const Jet<T, N>& f, const Jet<T, N>& g) { - Jet<T, N> h; - h.a = f.a - g.a; - h.v = f.v - g.v; - return h; + return Jet<T, N>(f.a - g.a, f.v - g.v); } // Binary - with a scalar: x - s template<typename T, int N> inline Jet<T, N> operator-(const Jet<T, N>& f, T s) { - Jet<T, N> h; - h.a = f.a - s; - h.v = f.v; - return h; + return Jet<T, N>(f.a - s, f.v); } // Binary - with a scalar: s - x template<typename T, int N> inline Jet<T, N> operator-(T s, const Jet<T, N>& f) { - Jet<T, N> h; - h.a = s - f.a; - h.v = -f.v; - return h; + return Jet<T, N>(s - f.a, -f.v); } // Binary * template<typename T, int N> inline Jet<T, N> operator*(const Jet<T, N>& f, const Jet<T, N>& g) { - Jet<T, N> h; - h.a = f.a * g.a; - h.v = f.a * g.v + f.v * g.a; - return h; + return Jet<T, N>(f.a * g.a, f.a * g.v + f.v * g.a); } // Binary * with a scalar: x * s template<typename T, int N> inline Jet<T, N> operator*(const Jet<T, N>& f, T s) { - Jet<T, N> h; - h.a = f.a * s; - h.v = f.v * s; - return h; + return Jet<T, N>(f.a * s, f.v * s); } // Binary * with a scalar: s * x template<typename T, int N> inline Jet<T, N> operator*(T s, const Jet<T, N>& f) { - Jet<T, N> h; - h.a = f.a * s; - h.v = f.v * s; - return h; + return Jet<T, N>(f.a * s, f.v * s); } // Binary / template<typename T, int N> inline Jet<T, N> operator/(const Jet<T, N>& f, const Jet<T, N>& g) { - Jet<T, N> h; // This uses: // // a + u (a + u)(b - v) (a + u)(b - v) @@ -349,32 +329,22 @@ Jet<T, N> operator/(const Jet<T, N>& f, // // which holds because v*v = 0. const T g_a_inverse = T(1.0) / g.a; - h.a = f.a * g_a_inverse; const T f_a_by_g_a = f.a * g_a_inverse; - for (int i = 0; i < N; ++i) { - h.v[i] = (f.v[i] - f_a_by_g_a * g.v[i]) * g_a_inverse; - } - return h; + return Jet<T, N>(f.a * g_a_inverse, (f.v - f_a_by_g_a * g.v) * g_a_inverse); } // Binary / with a scalar: s / x template<typename T, int N> inline Jet<T, N> operator/(T s, const Jet<T, N>& g) { - Jet<T, N> h; - h.a = s / g.a; const T minus_s_g_a_inverse2 = -s / (g.a * g.a); - h.v = g.v * minus_s_g_a_inverse2; - return h; + return Jet<T, N>(s / g.a, g.v * minus_s_g_a_inverse2); } // Binary / with a scalar: x / s template<typename T, int N> inline Jet<T, N> operator/(const Jet<T, N>& f, T s) { - Jet<T, N> h; const T s_inverse = 1.0 / s; - h.a = f.a * s_inverse; - h.v = f.v * s_inverse; - return h; + return Jet<T, N>(f.a * s_inverse, f.v * s_inverse); } // Binary comparison operators for both scalars and jets. @@ -433,122 +403,84 @@ Jet<T, N> abs(const Jet<T, N>& f) { // log(a + h) ~= log(a) + h / a template <typename T, int N> inline Jet<T, N> log(const Jet<T, N>& f) { - Jet<T, N> g; - g.a = log(f.a); const T a_inverse = T(1.0) / f.a; - g.v = f.v * a_inverse; - return g; + return Jet<T, N>(log(f.a), f.v * a_inverse); } // exp(a + h) ~= exp(a) + exp(a) h template <typename T, int N> inline Jet<T, N> exp(const Jet<T, N>& f) { - Jet<T, N> g; - g.a = exp(f.a); - g.v = g.a * f.v; - return g; + const T tmp = exp(f.a); + return Jet<T, N>(tmp, tmp * f.v); } // sqrt(a + h) ~= sqrt(a) + h / (2 sqrt(a)) template <typename T, int N> inline Jet<T, N> sqrt(const Jet<T, N>& f) { - Jet<T, N> g; - g.a = sqrt(f.a); - const T two_a_inverse = T(1.0) / (T(2.0) * g.a); - g.v = f.v * two_a_inverse; - return g; + const T tmp = sqrt(f.a); + const T two_a_inverse = T(1.0) / (T(2.0) * tmp); + return Jet<T, N>(tmp, f.v * two_a_inverse); } // cos(a + h) ~= cos(a) - sin(a) h template <typename T, int N> inline Jet<T, N> cos(const Jet<T, N>& f) { - Jet<T, N> g; - g.a = cos(f.a); - const T sin_a = sin(f.a); - g.v = - sin_a * f.v; - return g; + return Jet<T, N>(cos(f.a), - sin(f.a) * f.v); } // acos(a + h) ~= acos(a) - 1 / sqrt(1 - a^2) h template <typename T, int N> inline Jet<T, N> acos(const Jet<T, N>& f) { - Jet<T, N> g; - g.a = acos(f.a); const T tmp = - T(1.0) / sqrt(T(1.0) - f.a * f.a); - g.v = tmp * f.v; - return g; + return Jet<T, N>(acos(f.a), tmp * f.v); } // sin(a + h) ~= sin(a) + cos(a) h template <typename T, int N> inline Jet<T, N> sin(const Jet<T, N>& f) { - Jet<T, N> g; - g.a = sin(f.a); - const T cos_a = cos(f.a); - g.v = cos_a * f.v; - return g; + return Jet<T, N>(sin(f.a), cos(f.a) * f.v); } // asin(a + h) ~= asin(a) + 1 / sqrt(1 - a^2) h template <typename T, int N> inline Jet<T, N> asin(const Jet<T, N>& f) { - Jet<T, N> g; - g.a = asin(f.a); const T tmp = T(1.0) / sqrt(T(1.0) - f.a * f.a); - g.v = tmp * f.v; - return g; + return Jet<T, N>(asin(f.a), tmp * f.v); } // tan(a + h) ~= tan(a) + (1 + tan(a)^2) h template <typename T, int N> inline Jet<T, N> tan(const Jet<T, N>& f) { - Jet<T, N> g; - g.a = tan(f.a); - double tan_a = tan(f.a); + const T tan_a = tan(f.a); const T tmp = T(1.0) + tan_a * tan_a; - g.v = tmp * f.v; - return g; + return Jet<T, N>(tan_a, tmp * f.v); } // atan(a + h) ~= atan(a) + 1 / (1 + a^2) h template <typename T, int N> inline Jet<T, N> atan(const Jet<T, N>& f) { - Jet<T, N> g; - g.a = atan(f.a); const T tmp = T(1.0) / (T(1.0) + f.a * f.a); - g.v = tmp * f.v; - return g; + return Jet<T, N>(atan(f.a), tmp * f.v); } // sinh(a + h) ~= sinh(a) + cosh(a) h template <typename T, int N> inline Jet<T, N> sinh(const Jet<T, N>& f) { - Jet<T, N> g; - g.a = sinh(f.a); - const T cosh_a = cosh(f.a); - g.v = cosh_a * f.v; - return g; + return Jet<T, N>(sinh(f.a), cosh(f.a) * f.v); } // cosh(a + h) ~= cosh(a) + sinh(a) h template <typename T, int N> inline Jet<T, N> cosh(const Jet<T, N>& f) { - Jet<T, N> g; - g.a = cosh(f.a); - const T sinh_a = sinh(f.a); - g.v = sinh_a * f.v; - return g; + return Jet<T, N>(cosh(f.a), sinh(f.a) * f.v); } // tanh(a + h) ~= tanh(a) + (1 - tanh(a)^2) h template <typename T, int N> inline Jet<T, N> tanh(const Jet<T, N>& f) { - Jet<T, N> g; - g.a = tanh(f.a); - double tanh_a = tanh(f.a); + const T tanh_a = tanh(f.a); const T tmp = T(1.0) - tanh_a * tanh_a; - g.v = tmp * f.v; - return g; + return Jet<T, N>(tanh_a, tmp * f.v); } // Jet Classification. It is not clear what the appropriate semantics are for @@ -628,36 +560,25 @@ Jet<T, N> atan2(const Jet<T, N>& g, const Jet<T, N>& f) { // f = a + da // g = b + db - Jet<T, N> out; - - out.a = atan2(g.a, f.a); - - T const temp = T(1.0) / (f.a * f.a + g.a * g.a); - out.v = temp * (- g.a * f.v + f.a * g.v); - return out; + T const tmp = T(1.0) / (f.a * f.a + g.a * g.a); + return Jet<T, N>(atan2(g.a, f.a), tmp * (- g.a * f.v + f.a * g.v)); } -// pow -- base is a differentiatble function, exponent is a constant. +// pow -- base is a differentiable function, exponent is a constant. // (a+da)^p ~= a^p + p*a^(p-1) da template <typename T, int N> inline Jet<T, N> pow(const Jet<T, N>& f, double g) { - Jet<T, N> out; - out.a = pow(f.a, g); - T const temp = g * pow(f.a, g - T(1.0)); - out.v = temp * f.v; - return out; + T const tmp = g * pow(f.a, g - T(1.0)); + return Jet<T, N>(pow(f.a, g), tmp * f.v); } // pow -- base is a constant, exponent is a differentiable function. // (a)^(p+dp) ~= a^p + a^p log(a) dp template <typename T, int N> inline Jet<T, N> pow(double f, const Jet<T, N>& g) { - Jet<T, N> out; - out.a = pow(f, g.a); - T const temp = log(f) * out.a; - out.v = temp * g.v; - return out; + T const tmp = pow(f, g.a); + return Jet<T, N>(tmp, log(f) * tmp * g.v); } @@ -665,15 +586,11 @@ Jet<T, N> pow(double f, const Jet<T, N>& g) { // (a+da)^(b+db) ~= a^b + b * a^(b-1) da + a^b log(a) * db template <typename T, int N> inline Jet<T, N> pow(const Jet<T, N>& f, const Jet<T, N>& g) { - Jet<T, N> out; - - T const temp1 = pow(f.a, g.a); - T const temp2 = g.a * pow(f.a, g.a - T(1.0)); - T const temp3 = temp1 * log(f.a); + T const tmp1 = pow(f.a, g.a); + T const tmp2 = g.a * pow(f.a, g.a - T(1.0)); + T const tmp3 = tmp1 * log(f.a); - out.a = temp1; - out.v = temp2 * f.v + temp3 * g.v; - return out; + return Jet<T, N>(tmp1, tmp2 * f.v + tmp3 * g.v); } // Define the helper functions Eigen needs to embed Jet types. @@ -740,7 +657,8 @@ struct NumTraits<ceres::Jet<T, N> > { AddCost = 1, // For Jet types, multiplication is more expensive than addition. MulCost = 3, - HasFloatingPoint = 1 + HasFloatingPoint = 1, + RequireInitialization = 1 }; }; diff --git a/extern/libmv/third_party/ceres/include/ceres/numeric_diff_cost_function.h b/extern/libmv/third_party/ceres/include/ceres/numeric_diff_cost_function.h index a47a66d9672..de6b74ad552 100644 --- a/extern/libmv/third_party/ceres/include/ceres/numeric_diff_cost_function.h +++ b/extern/libmv/third_party/ceres/include/ceres/numeric_diff_cost_function.h @@ -95,6 +95,21 @@ // "MyScalarCostFunctor", "1, 2, 2", describe the functor as computing // a 1-dimensional output from two arguments, both 2-dimensional. // +// NumericDiffCostFunction also supports cost functions with a +// runtime-determined number of residuals. For example: +// +// CostFunction* cost_function +// = new NumericDiffCostFunction<MyScalarCostFunctor, CENTRAL, DYNAMIC, 2, 2>( +// new CostFunctorWithDynamicNumResiduals(1.0), ^ ^ ^ +// TAKE_OWNERSHIP, | | | +// runtime_number_of_residuals); <----+ | | | +// | | | | +// | | | | +// Actual number of residuals ------+ | | | +// Indicate dynamic number of residuals --------------------+ | | +// Dimension of x ------------------------------------------------+ | +// Dimension of y ---------------------------------------------------+ +// // The framework can currently accommodate cost functions of up to 10 // independent variables, and there is no limit on the dimensionality // of each of them. @@ -104,8 +119,6 @@ // central differences begin with, and only after that works, trying forward // difference to improve performance. // -// TODO(sameeragarwal): Add support for dynamic number of residuals. -// // WARNING #1: A common beginner's error when first using // NumericDiffCostFunction is to get the sizing wrong. In particular, // there is a tendency to set the template parameters to (dimension of @@ -177,17 +190,19 @@ class NumericDiffCostFunction N5, N6, N7, N8, N9> { public: NumericDiffCostFunction(CostFunctor* functor, + Ownership ownership = TAKE_OWNERSHIP, + int num_residuals = kNumResiduals, const double relative_step_size = 1e-6) :functor_(functor), - ownership_(TAKE_OWNERSHIP), - relative_step_size_(relative_step_size) {} - - NumericDiffCostFunction(CostFunctor* functor, - Ownership ownership, - const double relative_step_size = 1e-6) - : functor_(functor), - ownership_(ownership), - relative_step_size_(relative_step_size) {} + ownership_(ownership), + relative_step_size_(relative_step_size) { + if (kNumResiduals == DYNAMIC) { + SizedCostFunction<kNumResiduals, + N0, N1, N2, N3, N4, + N5, N6, N7, N8, N9> + ::set_num_residuals(num_residuals); + } + } ~NumericDiffCostFunction() { if (ownership_ != TAKE_OWNERSHIP) { @@ -216,7 +231,7 @@ class NumericDiffCostFunction return false; } - if (!jacobians) { + if (jacobians == NULL) { return true; } @@ -264,6 +279,9 @@ class NumericDiffCostFunction functor_.get(), \ residuals, \ relative_step_size_, \ + SizedCostFunction<kNumResiduals, \ + N0, N1, N2, N3, N4, \ + N5, N6, N7, N8, N9>::num_residuals(), \ parameters_reference_copy.get(), \ jacobians[block])) { \ return false; \ diff --git a/extern/libmv/third_party/ceres/include/ceres/numeric_diff_functor.h b/extern/libmv/third_party/ceres/include/ceres/numeric_diff_functor.h index 039e1a17aa7..a29eb97fa6e 100644 --- a/extern/libmv/third_party/ceres/include/ceres/numeric_diff_functor.h +++ b/extern/libmv/third_party/ceres/include/ceres/numeric_diff_functor.h @@ -124,6 +124,8 @@ class NumericDiffFunctor { kNumResiduals, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>(new Functor, + TAKE_OWNERSHIP, + kNumResiduals, relative_step_size)) { } @@ -133,7 +135,10 @@ class NumericDiffFunctor { kNumResiduals, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>( - functor, relative_step_size)) { + functor, + TAKE_OWNERSHIP, + kNumResiduals, + relative_step_size)) { } bool operator()(const double* x0, double* residuals) const { diff --git a/extern/libmv/third_party/ceres/include/ceres/ordered_groups.h b/extern/libmv/third_party/ceres/include/ceres/ordered_groups.h index e373d35b9d7..dff859d7b82 100644 --- a/extern/libmv/third_party/ceres/include/ceres/ordered_groups.h +++ b/extern/libmv/third_party/ceres/include/ceres/ordered_groups.h @@ -84,11 +84,8 @@ class OrderedGroups { element_to_group_.clear(); } - // Remove the element, no matter what group it is in. If the element - // is not a member of any group, calling this method will result in - // a crash. - // - // Return value indicates if the element was actually removed. + // Remove the element, no matter what group it is in. Return value + // indicates if the element was actually removed. bool Remove(const T element) { const int current_group = GroupId(element); if (current_group < 0) { diff --git a/extern/libmv/third_party/ceres/include/ceres/problem.h b/extern/libmv/third_party/ceres/include/ceres/problem.h index 663616ddb3b..cd433f9c5b2 100644 --- a/extern/libmv/third_party/ceres/include/ceres/problem.h +++ b/extern/libmv/third_party/ceres/include/ceres/problem.h @@ -1,5 +1,5 @@ // Ceres Solver - A fast non-linear least squares minimizer -// Copyright 2010, 2011, 2012 Google Inc. All rights reserved. +// Copyright 2013 Google Inc. All rights reserved. // http://code.google.com/p/ceres-solver/ // // Redistribution and use in source and binary forms, with or without @@ -341,6 +341,26 @@ class Problem { // parameter_block.size() == NumParameterBlocks. void GetParameterBlocks(vector<double*>* parameter_blocks) const; + // Fills the passed residual_blocks vector with pointers to the + // residual blocks currently in the problem. After this call, + // residual_blocks.size() == NumResidualBlocks. + void GetResidualBlocks(vector<ResidualBlockId>* residual_blocks) const; + + // Get all the parameter blocks that depend on the given residual block. + void GetParameterBlocksForResidualBlock( + const ResidualBlockId residual_block, + vector<double*>* parameter_blocks) const; + + // Get all the residual blocks that depend on the given parameter block. + // + // If Problem::Options::enable_fast_parameter_block_removal is true, then + // getting the residual blocks is fast and depends only on the number of + // residual blocks. Otherwise, getting the residual blocks for a parameter + // block will incur a scan of the entire Problem object. + void GetResidualBlocksForParameterBlock( + const double* values, + vector<ResidualBlockId>* residual_blocks) const; + // Options struct to control Problem::Evaluate. struct EvaluateOptions { EvaluateOptions() diff --git a/extern/libmv/third_party/ceres/include/ceres/rotation.h b/extern/libmv/third_party/ceres/include/ceres/rotation.h index ea0b76947eb..e3dbfe84a5a 100644 --- a/extern/libmv/third_party/ceres/include/ceres/rotation.h +++ b/extern/libmv/third_party/ceres/include/ceres/rotation.h @@ -395,7 +395,7 @@ void AngleAxisToRotationMatrix( const MatrixAdapter<T, row_stride, col_stride>& R) { static const T kOne = T(1.0); const T theta2 = DotProduct(angle_axis, angle_axis); - if (theta2 > 0.0) { + if (theta2 > T(std::numeric_limits<double>::epsilon())) { // We want to be careful to only evaluate the square root if the // norm of the angle_axis vector is greater than zero. Otherwise // we get a division by zero. @@ -417,15 +417,15 @@ void AngleAxisToRotationMatrix( R(1, 2) = -wx*sintheta + wy*wz*(kOne - costheta); R(2, 2) = costheta + wz*wz*(kOne - costheta); } else { - // At zero, we switch to using the first order Taylor expansion. + // Near zero, we switch to using the first order Taylor expansion. R(0, 0) = kOne; - R(1, 0) = -angle_axis[2]; - R(2, 0) = angle_axis[1]; - R(0, 1) = angle_axis[2]; + R(1, 0) = angle_axis[2]; + R(2, 0) = -angle_axis[1]; + R(0, 1) = -angle_axis[2]; R(1, 1) = kOne; - R(2, 1) = -angle_axis[0]; - R(0, 2) = -angle_axis[1]; - R(1, 2) = angle_axis[0]; + R(2, 1) = angle_axis[0]; + R(0, 2) = angle_axis[1]; + R(1, 2) = -angle_axis[0]; R(2, 2) = kOne; } } @@ -580,12 +580,8 @@ T DotProduct(const T x[3], const T y[3]) { template<typename T> inline void AngleAxisRotatePoint(const T angle_axis[3], const T pt[3], T result[3]) { - T w[3]; - T sintheta; - T costheta; - const T theta2 = DotProduct(angle_axis, angle_axis); - if (theta2 > 0.0) { + if (theta2 > T(std::numeric_limits<double>::epsilon())) { // Away from zero, use the rodriguez formula // // result = pt costheta + @@ -597,19 +593,25 @@ void AngleAxisRotatePoint(const T angle_axis[3], const T pt[3], T result[3]) { // we get a division by zero. // const T theta = sqrt(theta2); - w[0] = angle_axis[0] / theta; - w[1] = angle_axis[1] / theta; - w[2] = angle_axis[2] / theta; - costheta = cos(theta); - sintheta = sin(theta); - T w_cross_pt[3]; - CrossProduct(w, pt, w_cross_pt); - T w_dot_pt = DotProduct(w, pt); - for (int i = 0; i < 3; ++i) { - result[i] = pt[i] * costheta + - w_cross_pt[i] * sintheta + - w[i] * (T(1.0) - costheta) * w_dot_pt; - } + const T costheta = cos(theta); + const T sintheta = sin(theta); + const T theta_inverse = 1.0 / theta; + + const T w[3] = { angle_axis[0] * theta_inverse, + angle_axis[1] * theta_inverse, + angle_axis[2] * theta_inverse }; + + // Explicitly inlined evaluation of the cross product for + // performance reasons. + const T w_cross_pt[3] = { w[1] * pt[2] - w[2] * pt[1], + w[2] * pt[0] - w[0] * pt[2], + w[0] * pt[1] - w[1] * pt[0] }; + const T tmp = + (w[0] * pt[0] + w[1] * pt[1] + w[2] * pt[2]) * (T(1.0) - costheta); + + result[0] = pt[0] * costheta + w_cross_pt[0] * sintheta + w[0] * tmp; + result[1] = pt[1] * costheta + w_cross_pt[1] * sintheta + w[1] * tmp; + result[2] = pt[2] * costheta + w_cross_pt[2] * sintheta + w[2] * tmp; } else { // Near zero, the first order Taylor approximation of the rotation // matrix R corresponding to a vector w and angle w is @@ -623,13 +625,18 @@ void AngleAxisRotatePoint(const T angle_axis[3], const T pt[3], T result[3]) { // and actually performing multiplication with the point pt, gives us // R * pt = pt + w x pt. // - // Switching to the Taylor expansion at zero helps avoid all sorts - // of numerical nastiness. - T w_cross_pt[3]; - CrossProduct(angle_axis, pt, w_cross_pt); - for (int i = 0; i < 3; ++i) { - result[i] = pt[i] + w_cross_pt[i]; - } + // Switching to the Taylor expansion near zero provides meaningful + // derivatives when evaluated using Jets. + // + // Explicitly inlined evaluation of the cross product for + // performance reasons. + const T w_cross_pt[3] = { angle_axis[1] * pt[2] - angle_axis[2] * pt[1], + angle_axis[2] * pt[0] - angle_axis[0] * pt[2], + angle_axis[0] * pt[1] - angle_axis[1] * pt[0] }; + + result[0] = pt[0] + w_cross_pt[0]; + result[1] = pt[1] + w_cross_pt[1]; + result[2] = pt[2] + w_cross_pt[2]; } } diff --git a/extern/libmv/third_party/ceres/include/ceres/solver.h b/extern/libmv/third_party/ceres/include/ceres/solver.h index 25b762a7bd5..7776c470eba 100644 --- a/extern/libmv/third_party/ceres/include/ceres/solver.h +++ b/extern/libmv/third_party/ceres/include/ceres/solver.h @@ -98,7 +98,7 @@ class Solver { #endif preconditioner_type = JACOBI; - + visibility_clustering_type = CANONICAL_VIEWS; dense_linear_algebra_library_type = EIGEN; sparse_linear_algebra_library_type = SUITE_SPARSE; #if defined(CERES_NO_SUITESPARSE) && !defined(CERES_NO_CXSPARSE) @@ -385,6 +385,11 @@ class Solver { // Type of preconditioner to use with the iterative linear solvers. PreconditionerType preconditioner_type; + // Type of clustering algorithm to use for visibility based + // preconditioning. This option is used only when the + // preconditioner_type is CLUSTER_JACOBI or CLUSTER_TRIDIAGONAL. + VisibilityClusteringType visibility_clustering_type; + // Ceres supports using multiple dense linear algebra libraries // for dense matrix factorizations. Currently EIGEN and LAPACK are // the valid choices. EIGEN is always available, LAPACK refers to @@ -718,9 +723,12 @@ class Solver { // description of the error. string error; - // Cost of the problem before and after the optimization. See - // problem.h for definition of the cost of a problem. + // Cost of the problem (value of the objective function) before + // the optimization. double initial_cost; + + // Cost of the problem (value of the objective function) after the + // optimization. double final_cost; // The part of the total cost that comes from residual blocks that @@ -728,10 +736,21 @@ class Solver { // blocks that they depend on were fixed. double fixed_cost; + // IterationSummary for each minimizer iteration in order. vector<IterationSummary> iterations; + // Number of minimizer iterations in which the step was + // accepted. Unless use_non_monotonic_steps is true this is also + // the number of steps in which the objective function value/cost + // went down. int num_successful_steps; + + // Number of minimizer iterations in which the step was rejected + // either because it did not reduce the cost enough or the step + // was not numerically valid. int num_unsuccessful_steps; + + // Number of times inner iterations were performed. int num_inner_iteration_steps; // All times reported below are wall times. @@ -753,58 +772,160 @@ class Solver { // Some total of all time spent inside Ceres when Solve is called. double total_time_in_seconds; + // Time (in seconds) spent in the linear solver computing the + // trust region step. double linear_solver_time_in_seconds; + + // Time (in seconds) spent evaluating the residual vector. double residual_evaluation_time_in_seconds; + + // Time (in seconds) spent evaluating the jacobian matrix. double jacobian_evaluation_time_in_seconds; + + // Time (in seconds) spent doing inner iterations. double inner_iteration_time_in_seconds; - // Preprocessor summary. + // Number of parameter blocks in the problem. int num_parameter_blocks; + + // Number of parameters in the probem. int num_parameters; + + // Dimension of the tangent space of the problem (or the number of + // columns in the Jacobian for the problem). This is different + // from num_parameters if a parameter block is associated with a + // LocalParameterization int num_effective_parameters; + + // Number of residual blocks in the problem. int num_residual_blocks; + + // Number of residuals in the problem. int num_residuals; + // Number of parameter blocks in the problem after the inactive + // and constant parameter blocks have been removed. A parameter + // block is inactive if no residual block refers to it. int num_parameter_blocks_reduced; + + // Number of parameters in the reduced problem. int num_parameters_reduced; + + // Dimension of the tangent space of the reduced problem (or the + // number of columns in the Jacobian for the reduced + // problem). This is different from num_parameters_reduced if a + // parameter block in the reduced problem is associated with a + // LocalParameterization. int num_effective_parameters_reduced; + + // Number of residual blocks in the reduced problem. int num_residual_blocks_reduced; - int num_residuals_reduced; - int num_eliminate_blocks_given; - int num_eliminate_blocks_used; + // Number of residuals in the reduced problem. + int num_residuals_reduced; + // Number of threads specified by the user for Jacobian and + // residual evaluation. int num_threads_given; + + // Number of threads actually used by the solver for Jacobian and + // residual evaluation. This number is not equal to + // num_threads_given if OpenMP is not available. int num_threads_used; + // Number of threads specified by the user for solving the trust + // region problem. int num_linear_solver_threads_given; + + // Number of threads actually used by the solver for solving the + // trust region problem. This number is not equal to + // num_threads_given if OpenMP is not available. int num_linear_solver_threads_used; + // Type of the linear solver requested by the user. LinearSolverType linear_solver_type_given; + + // Type of the linear solver actually used. This may be different + // from linear_solver_type_given if Ceres determines that the + // problem structure is not compatible with the linear solver + // requested or if the linear solver requested by the user is not + // available, e.g. The user requested SPARSE_NORMAL_CHOLESKY but + // no sparse linear algebra library was available. LinearSolverType linear_solver_type_used; + // Size of the elimination groups given by the user as hints to + // the linear solver. vector<int> linear_solver_ordering_given; + + // Size of the parameter groups used by the solver when ordering + // the columns of the Jacobian. This maybe different from + // linear_solver_ordering_given if the user left + // linear_solver_ordering_given blank and asked for an automatic + // ordering, or if the problem contains some constant or inactive + // parameter blocks. vector<int> linear_solver_ordering_used; + // True if the user asked for inner iterations to be used as part + // of the optimization. bool inner_iterations_given; + + // True if the user asked for inner iterations to be used as part + // of the optimization and the problem structure was such that + // they were actually performed. e.g., in a problem with just one + // parameter block, inner iterations are not performed. bool inner_iterations_used; + // Size of the parameter groups given by the user for performing + // inner iterations. vector<int> inner_iteration_ordering_given; + + // Size of the parameter groups given used by the solver for + // performing inner iterations. This maybe different from + // inner_iteration_ordering_given if the user left + // inner_iteration_ordering_given blank and asked for an automatic + // ordering, or if the problem contains some constant or inactive + // parameter blocks. vector<int> inner_iteration_ordering_used; + // Type of preconditioner used for solving the trust region + // step. Only meaningful when an iterative linear solver is used. PreconditionerType preconditioner_type; + // Type of clustering algorithm used for visibility based + // preconditioning. Only meaningful when the preconditioner_type + // is CLUSTER_JACOBI or CLUSTER_TRIDIAGONAL. + VisibilityClusteringType visibility_clustering_type; + + // Type of trust region strategy. TrustRegionStrategyType trust_region_strategy_type; + + // Type of dogleg strategy used for solving the trust region + // problem. DoglegType dogleg_type; + // Type of the dense linear algebra library used. DenseLinearAlgebraLibraryType dense_linear_algebra_library_type; + + // Type of the sparse linear algebra library used. SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type; + // Type of line search direction used. LineSearchDirectionType line_search_direction_type; + + // Type of the line search algorithm used. LineSearchType line_search_type; + + // When performing line search, the degree of the polynomial used + // to approximate the objective function. LineSearchInterpolationType line_search_interpolation_type; + + // If the line search direction is NONLINEAR_CONJUGATE_GRADIENT, + // then this indicates the particular variant of non-linear + // conjugate gradient used. NonlinearConjugateGradientType nonlinear_conjugate_gradient_type; + // If the type of the line search direction is LBFGS, then this + // indicates the rank of the Hessian approximation. int max_lbfgs_rank; }; diff --git a/extern/libmv/third_party/ceres/include/ceres/types.h b/extern/libmv/third_party/ceres/include/ceres/types.h index ffa743a2d97..617bec0e1b6 100644 --- a/extern/libmv/third_party/ceres/include/ceres/types.h +++ b/extern/libmv/third_party/ceres/include/ceres/types.h @@ -102,21 +102,49 @@ enum PreconditionerType { // Block diagonal of the Gauss-Newton Hessian. JACOBI, + // Note: The following three preconditioners can only be used with + // the ITERATIVE_SCHUR solver. They are well suited for Structure + // from Motion problems. + // Block diagonal of the Schur complement. This preconditioner may // only be used with the ITERATIVE_SCHUR solver. SCHUR_JACOBI, // Visibility clustering based preconditioners. // - // These preconditioners are well suited for Structure from Motion - // problems, particularly problems arising from community photo - // collections. These preconditioners use the visibility structure - // of the scene to determine the sparsity structure of the - // preconditioner. Requires SuiteSparse/CHOLMOD. + // The following two preconditioners use the visibility structure of + // the scene to determine the sparsity structure of the + // preconditioner. This is done using a clustering algorithm. The + // available visibility clustering algorithms are described below. + // + // Note: Requires SuiteSparse. CLUSTER_JACOBI, CLUSTER_TRIDIAGONAL }; +enum VisibilityClusteringType { + // Canonical views algorithm as described in + // + // "Scene Summarization for Online Image Collections", Ian Simon, Noah + // Snavely, Steven M. Seitz, ICCV 2007. + // + // This clustering algorithm can be quite slow, but gives high + // quality clusters. The original visibility based clustering paper + // used this algorithm. + CANONICAL_VIEWS, + + // The classic single linkage algorithm. It is extremely fast as + // compared to CANONICAL_VIEWS, but can give slightly poorer + // results. For problems with large number of cameras though, this + // is generally a pretty good option. + // + // If you are using SCHUR_JACOBI preconditioner and have SuiteSparse + // available, CLUSTER_JACOBI and CLUSTER_TRIDIAGONAL in combination + // with the SINGLE_LINKAGE algorithm will generally give better + // results. + SINGLE_LINKAGE +}; + enum SparseLinearAlgebraLibraryType { // High performance sparse Cholesky factorization and approximate // minimum degree ordering. @@ -131,26 +159,6 @@ enum DenseLinearAlgebraLibraryType { LAPACK }; -enum LinearSolverTerminationType { - // Termination criterion was met. For factorization based solvers - // the tolerance is assumed to be zero. Any user provided values are - // ignored. - TOLERANCE, - - // Solver ran for max_num_iterations and terminated before the - // termination tolerance could be satified. - MAX_ITERATIONS, - - // Solver is stuck and further iterations will not result in any - // measurable progress. - STAGNATION, - - // Solver failed. Solver was terminated due to numerical errors. The - // exact cause of failure depends on the particular solver being - // used. - FAILURE -}; - // Logging options // The options get progressively noisier. enum LoggingType { @@ -400,6 +408,10 @@ bool StringToLinearSolverType(string value, LinearSolverType* type); const char* PreconditionerTypeToString(PreconditionerType type); bool StringToPreconditionerType(string value, PreconditionerType* type); +const char* VisibilityClusteringTypeToString(VisibilityClusteringType type); +bool StringToVisibilityClusteringType(string value, + VisibilityClusteringType* type); + const char* SparseLinearAlgebraLibraryTypeToString( SparseLinearAlgebraLibraryType type); bool StringToSparseLinearAlgebraLibraryType( @@ -447,9 +459,6 @@ bool StringToCovarianceAlgorithmType( string value, CovarianceAlgorithmType* type); -const char* LinearSolverTerminationTypeToString( - LinearSolverTerminationType type); - const char* SolverTerminationTypeToString(SolverTerminationType type); bool IsSchurType(LinearSolverType type); diff --git a/extern/libmv/third_party/ceres/internal/ceres/block_jacobi_preconditioner.cc b/extern/libmv/third_party/ceres/internal/ceres/block_jacobi_preconditioner.cc index 29974d45bc9..19b749bfc39 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/block_jacobi_preconditioner.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/block_jacobi_preconditioner.cc @@ -94,7 +94,9 @@ bool BlockJacobiPreconditioner::UpdateImpl(const BlockSparseMatrix& A, // // MatrixRef(blocks_[cells[c].block_id], // col_block_size, - // col_block_size).selfadjointView<Eigen::Upper>().rankUpdate(m); + // col_block_size) + // .selfadjointView<Eigen::Upper>() + // .rankUpdate(m); // } } diff --git a/extern/libmv/third_party/ceres/internal/ceres/block_random_access_diagonal_matrix.cc b/extern/libmv/third_party/ceres/internal/ceres/block_random_access_diagonal_matrix.cc new file mode 100644 index 00000000000..d8bf4ef0cb5 --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/block_random_access_diagonal_matrix.cc @@ -0,0 +1,120 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2013 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + +#include "ceres/block_random_access_diagonal_matrix.h" + +#include <algorithm> +#include <set> +#include <utility> +#include <vector> +#include "ceres/internal/port.h" +#include "ceres/internal/scoped_ptr.h" +#include "ceres/triplet_sparse_matrix.h" +#include "ceres/types.h" +#include "ceres/stl_util.h" +#include "glog/logging.h" + +namespace ceres { +namespace internal { + +BlockRandomAccessDiagonalMatrix::BlockRandomAccessDiagonalMatrix( + const vector<int>& blocks) + : blocks_(blocks) { + // Build the row/column layout vector and count the number of scalar + // rows/columns. + int num_cols = 0; + int num_nonzeros = 0; + vector<int> col_layout; + for (int i = 0; i < blocks_.size(); ++i) { + col_layout.push_back(num_cols); + num_cols += blocks_[i]; + num_nonzeros += blocks_[i] * blocks_[i]; + } + + VLOG(1) << "Matrix Size [" << num_cols + << "," << num_cols + << "] " << num_nonzeros; + + tsm_.reset(new TripletSparseMatrix(num_cols, num_cols, num_nonzeros)); + tsm_->set_num_nonzeros(num_nonzeros); + int* rows = tsm_->mutable_rows(); + int* cols = tsm_->mutable_cols(); + double* values = tsm_->mutable_values(); + + int pos = 0; + for (int i = 0; i < blocks_.size(); ++i) { + const int block_size = blocks_[i]; + layout_.push_back(new CellInfo(values + pos)); + const int block_begin = col_layout[i]; + for (int r = 0; r < block_size; ++r) { + for (int c = 0; c < block_size; ++c, ++pos) { + rows[pos] = block_begin + r; + cols[pos] = block_begin + c; + } + } + } +} + +// Assume that the user does not hold any locks on any cell blocks +// when they are calling SetZero. +BlockRandomAccessDiagonalMatrix::~BlockRandomAccessDiagonalMatrix() { + STLDeleteContainerPointers(layout_.begin(), layout_.end()); +} + +CellInfo* BlockRandomAccessDiagonalMatrix::GetCell(int row_block_id, + int col_block_id, + int* row, + int* col, + int* row_stride, + int* col_stride) { + if (row_block_id != col_block_id) { + return NULL; + } + const int stride = blocks_[row_block_id]; + + // Each cell is stored contiguously as its own little dense matrix. + *row = 0; + *col = 0; + *row_stride = stride; + *col_stride = stride; + return layout_[row_block_id]; +} + +// Assume that the user does not hold any locks on any cell blocks +// when they are calling SetZero. +void BlockRandomAccessDiagonalMatrix::SetZero() { + if (tsm_->num_nonzeros()) { + VectorRef(tsm_->mutable_values(), + tsm_->num_nonzeros()).setZero(); + } +} + +} // namespace internal +} // namespace ceres diff --git a/extern/libmv/third_party/ceres/internal/ceres/block_random_access_diagonal_matrix.h b/extern/libmv/third_party/ceres/internal/ceres/block_random_access_diagonal_matrix.h new file mode 100644 index 00000000000..6b3cff2338f --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/block_random_access_diagonal_matrix.h @@ -0,0 +1,96 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2013 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + +#ifndef CERES_INTERNAL_BLOCK_RANDOM_ACCESS_DIAGONAL_MATRIX_H_ +#define CERES_INTERNAL_BLOCK_RANDOM_ACCESS_DIAGONAL_MATRIX_H_ + +#include <set> +#include <vector> +#include <utility> +#include "ceres/mutex.h" +#include "ceres/block_random_access_matrix.h" +#include "ceres/collections_port.h" +#include "ceres/triplet_sparse_matrix.h" +#include "ceres/integral_types.h" +#include "ceres/internal/macros.h" +#include "ceres/internal/port.h" +#include "ceres/internal/scoped_ptr.h" +#include "ceres/types.h" + +namespace ceres { +namespace internal { + +// A thread safe block diagonal matrix implementation of +// BlockRandomAccessMatrix. +class BlockRandomAccessDiagonalMatrix : public BlockRandomAccessMatrix { + public: + // blocks is an array of block sizes. + BlockRandomAccessDiagonalMatrix(const vector<int>& blocks); + + // The destructor is not thread safe. It assumes that no one is + // modifying any cells when the matrix is being destroyed. + virtual ~BlockRandomAccessDiagonalMatrix(); + + // BlockRandomAccessMatrix Interface. + virtual CellInfo* GetCell(int row_block_id, + int col_block_id, + int* row, + int* col, + int* row_stride, + int* col_stride); + + // This is not a thread safe method, it assumes that no cell is + // locked. + virtual void SetZero(); + + // Since the matrix is square, num_rows() == num_cols(). + virtual int num_rows() const { return tsm_->num_rows(); } + virtual int num_cols() const { return tsm_->num_cols(); } + + // Access to the underlying matrix object. + const TripletSparseMatrix* matrix() const { return tsm_.get(); } + TripletSparseMatrix* mutable_matrix() { return tsm_.get(); } + + private: + // row/column block sizes. + const vector<int> blocks_; + vector<CellInfo*> layout_; + + // The underlying matrix object which actually stores the cells. + scoped_ptr<TripletSparseMatrix> tsm_; + + friend class BlockRandomAccessDiagonalMatrixTest; + CERES_DISALLOW_COPY_AND_ASSIGN(BlockRandomAccessDiagonalMatrix); +}; + +} // namespace internal +} // namespace ceres + +#endif // CERES_INTERNAL_BLOCK_RANDOM_ACCESS_DIAGONAL_MATRIX_H_ diff --git a/extern/libmv/third_party/ceres/internal/ceres/block_random_access_sparse_matrix.h b/extern/libmv/third_party/ceres/internal/ceres/block_random_access_sparse_matrix.h index a6b5f39a985..27b10296d6c 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/block_random_access_sparse_matrix.h +++ b/extern/libmv/third_party/ceres/internal/ceres/block_random_access_sparse_matrix.h @@ -47,7 +47,7 @@ namespace ceres { namespace internal { -// A threaf safe square block sparse implementation of +// A thread safe square block sparse implementation of // BlockRandomAccessMatrix. Internally a TripletSparseMatrix is used // for doing the actual storage. This class augments this matrix with // an unordered_map that allows random read/write access. diff --git a/extern/libmv/third_party/ceres/internal/ceres/canonical_views_clustering.cc b/extern/libmv/third_party/ceres/internal/ceres/canonical_views_clustering.cc index 653194571b1..044d4381b11 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/canonical_views_clustering.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/canonical_views_clustering.cc @@ -57,8 +57,8 @@ class CanonicalViewsClustering { // configuration of the clustering algorithm that some of the // vertices may not be assigned to any cluster. In this case they // are assigned to a cluster with id = kInvalidClusterId. - void ComputeClustering(const Graph<int>& graph, - const CanonicalViewsClusteringOptions& options, + void ComputeClustering(const CanonicalViewsClusteringOptions& options, + const Graph<int>& graph, vector<int>* centers, IntMap* membership); @@ -81,21 +81,21 @@ class CanonicalViewsClustering { }; void ComputeCanonicalViewsClustering( - const Graph<int>& graph, const CanonicalViewsClusteringOptions& options, + const Graph<int>& graph, vector<int>* centers, IntMap* membership) { time_t start_time = time(NULL); CanonicalViewsClustering cv; - cv.ComputeClustering(graph, options, centers, membership); + cv.ComputeClustering(options, graph, centers, membership); VLOG(2) << "Canonical views clustering time (secs): " << time(NULL) - start_time; } // Implementation of CanonicalViewsClustering void CanonicalViewsClustering::ComputeClustering( - const Graph<int>& graph, const CanonicalViewsClusteringOptions& options, + const Graph<int>& graph, vector<int>* centers, IntMap* membership) { options_ = options; diff --git a/extern/libmv/third_party/ceres/internal/ceres/canonical_views_clustering.h b/extern/libmv/third_party/ceres/internal/ceres/canonical_views_clustering.h index 48d1ed210c9..06d80c89e92 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/canonical_views_clustering.h +++ b/extern/libmv/third_party/ceres/internal/ceres/canonical_views_clustering.h @@ -47,9 +47,6 @@ #include "ceres/collections_port.h" #include "ceres/graph.h" -#include "ceres/internal/macros.h" -#include "ceres/map_util.h" -#include "glog/logging.h" namespace ceres { namespace internal { @@ -100,8 +97,8 @@ struct CanonicalViewsClusteringOptions; // algorithm that some of the vertices may not be assigned to any // cluster. In this case they are assigned to a cluster with id = -1; void ComputeCanonicalViewsClustering( - const Graph<int>& graph, const CanonicalViewsClusteringOptions& options, + const Graph<int>& graph, vector<int>* centers, HashMap<int, int>* membership); diff --git a/extern/libmv/third_party/ceres/internal/ceres/cgnr_solver.cc b/extern/libmv/third_party/ceres/internal/ceres/cgnr_solver.cc index 9b8f9808cc9..88e61d9ed1b 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/cgnr_solver.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/cgnr_solver.cc @@ -33,6 +33,7 @@ #include "ceres/block_jacobi_preconditioner.h" #include "ceres/cgnr_linear_operator.h" #include "ceres/conjugate_gradients_solver.h" +#include "ceres/internal/eigen.h" #include "ceres/linear_solver.h" #include "ceres/wall_time.h" #include "glog/logging.h" @@ -43,6 +44,10 @@ namespace internal { CgnrSolver::CgnrSolver(const LinearSolver::Options& options) : options_(options), preconditioner_(NULL) { + if (options_.preconditioner_type != JACOBI && + options_.preconditioner_type != IDENTITY) { + LOG(FATAL) << "CGNR only supports IDENTITY and JACOBI preconditioners."; + } } LinearSolver::Summary CgnrSolver::SolveImpl( @@ -53,9 +58,9 @@ LinearSolver::Summary CgnrSolver::SolveImpl( EventLogger event_logger("CgnrSolver::Solve"); // Form z = Atb. - scoped_array<double> z(new double[A->num_cols()]); - std::fill(z.get(), z.get() + A->num_cols(), 0.0); - A->LeftMultiply(b, z.get()); + Vector z(A->num_cols()); + z.setZero(); + A->LeftMultiply(b, z.data()); // Precondition if necessary. LinearSolver::PerSolveOptions cg_per_solve_options = per_solve_options; @@ -65,20 +70,17 @@ LinearSolver::Summary CgnrSolver::SolveImpl( } preconditioner_->Update(*A, per_solve_options.D); cg_per_solve_options.preconditioner = preconditioner_.get(); - } else if (options_.preconditioner_type != IDENTITY) { - LOG(FATAL) << "CGNR only supports IDENTITY and JACOBI preconditioners."; } // Solve (AtA + DtD)x = z (= Atb). - std::fill(x, x + A->num_cols(), 0.0); + VectorRef(x, A->num_cols()).setZero(); CgnrLinearOperator lhs(*A, per_solve_options.D); event_logger.AddEvent("Setup"); ConjugateGradientsSolver conjugate_gradient_solver(options_); LinearSolver::Summary summary = - conjugate_gradient_solver.Solve(&lhs, z.get(), cg_per_solve_options, x); + conjugate_gradient_solver.Solve(&lhs, z.data(), cg_per_solve_options, x); event_logger.AddEvent("Solve"); - return summary; } diff --git a/extern/libmv/third_party/ceres/internal/ceres/collections_port.h b/extern/libmv/third_party/ceres/internal/ceres/collections_port.h index 715c975e00e..8f345d4d70a 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/collections_port.h +++ b/extern/libmv/third_party/ceres/internal/ceres/collections_port.h @@ -33,26 +33,37 @@ #ifndef CERES_INTERNAL_COLLECTIONS_PORT_H_ #define CERES_INTERNAL_COLLECTIONS_PORT_H_ -#if defined(CERES_NO_TR1) +#if defined(CERES_NO_UNORDERED_MAP) # include <map> # include <set> -#else -# if defined(_MSC_VER) -# include <unordered_map> -# include <unordered_set> -# else -# include <tr1/unordered_map> -# include <tr1/unordered_set> -# endif #endif + +#if defined(CERES_TR1_UNORDERED_MAP) +# include <tr1/unordered_map> +# include <tr1/unordered_set> +# define CERES_HASH_NAMESPACE_START namespace std { namespace tr1 { +# define CERES_HASH_NAMESPACE_END } } +#endif + +#if defined(CERES_STD_UNORDERED_MAP) +# include <unordered_map> +# include <unordered_set> +# define CERES_HASH_NAMESPACE_START namespace std { +# define CERES_HASH_NAMESPACE_END } +#endif + +#if !defined(CERES_NO_UNORDERED_MAP) && !defined(CERES_TR1_UNORDERED_MAP) && !defined(CERES_STD_UNORDERED_MAP) +#error One of: CERES_NO_UNORDERED_MAP, CERES_TR1_UNORDERED_MAP, CERES_STD_UNORDERED_MAP must be defined! +#endif + #include <utility> #include "ceres/integral_types.h" #include "ceres/internal/port.h" -// Some systems don't have access to TR1. In that case, substitute the hash -// map/set with normal map/set. The price to pay is slightly slower speed for -// some operations. -#if defined(CERES_NO_TR1) +// Some systems don't have access to unordered_map/unordered_set. In +// that case, substitute the hash map/set with normal map/set. The +// price to pay is slower speed for some operations. +#if defined(CERES_NO_UNORDERED_MAP) namespace ceres { namespace internal { @@ -71,11 +82,19 @@ struct HashSet : set<K> {}; namespace ceres { namespace internal { +#if defined(CERES_TR1_UNORDERED_MAP) template<typename K, typename V> struct HashMap : std::tr1::unordered_map<K, V> {}; - template<typename K> struct HashSet : std::tr1::unordered_set<K> {}; +#endif + +#if defined(CERES_STD_UNORDERED_MAP) +template<typename K, typename V> +struct HashMap : std::unordered_map<K, V> {}; +template<typename K> +struct HashSet : std::unordered_set<K> {}; +#endif #if defined(_WIN32) && !defined(__MINGW64__) && !defined(__MINGW32__) #define GG_LONGLONG(x) x##I64 @@ -162,6 +181,5 @@ struct hash<pair<T, T> > { CERES_HASH_NAMESPACE_END -#endif // CERES_NO_TR1 - +#endif // CERES_NO_UNORDERED_MAP #endif // CERES_INTERNAL_COLLECTIONS_PORT_H_ diff --git a/extern/libmv/third_party/ceres/internal/ceres/conjugate_gradients_solver.cc b/extern/libmv/third_party/ceres/internal/ceres/conjugate_gradients_solver.cc index ae8e8774709..524cb8ad988 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/conjugate_gradients_solver.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/conjugate_gradients_solver.cc @@ -44,6 +44,7 @@ #include "ceres/fpclassify.h" #include "ceres/internal/eigen.h" #include "ceres/linear_operator.h" +#include "ceres/stringprintf.h" #include "ceres/types.h" #include "glog/logging.h" @@ -55,9 +56,6 @@ bool IsZeroOrInfinity(double x) { return ((x == 0.0) || (IsInfinite(x))); } -// Constant used in the MATLAB implementation ~ 2 * eps. -const double kEpsilon = 2.2204e-16; - } // namespace ConjugateGradientsSolver::ConjugateGradientsSolver( @@ -76,17 +74,19 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve( CHECK_EQ(A->num_rows(), A->num_cols()); LinearSolver::Summary summary; - summary.termination_type = MAX_ITERATIONS; + summary.termination_type = LINEAR_SOLVER_NO_CONVERGENCE; + summary.message = "Maximum number of iterations reached."; summary.num_iterations = 0; - int num_cols = A->num_cols(); + const int num_cols = A->num_cols(); VectorRef xref(x, num_cols); ConstVectorRef bref(b, num_cols); - double norm_b = bref.norm(); + const double norm_b = bref.norm(); if (norm_b == 0.0) { xref.setZero(); - summary.termination_type = TOLERANCE; + summary.termination_type = LINEAR_SOLVER_SUCCESS; + summary.message = "Convergence. |b| = 0."; return summary; } @@ -95,15 +95,16 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve( Vector z(num_cols); Vector tmp(num_cols); - double tol_r = per_solve_options.r_tolerance * norm_b; + const double tol_r = per_solve_options.r_tolerance * norm_b; tmp.setZero(); A->RightMultiply(x, tmp.data()); r = bref - tmp; double norm_r = r.norm(); - if (norm_r <= tol_r) { - summary.termination_type = TOLERANCE; + summary.termination_type = LINEAR_SOLVER_SUCCESS; + summary.message = + StringPrintf("Convergence. |r| = %e <= %e.", norm_r, tol_r); return summary; } @@ -115,8 +116,6 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve( for (summary.num_iterations = 1; summary.num_iterations < options_.max_num_iterations; ++summary.num_iterations) { - VLOG(3) << "cg iteration " << summary.num_iterations; - // Apply preconditioner if (per_solve_options.preconditioner != NULL) { z.setZero(); @@ -127,10 +126,9 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve( double last_rho = rho; rho = r.dot(z); - if (IsZeroOrInfinity(rho)) { - LOG(ERROR) << "Numerical failure. rho = " << rho; - summary.termination_type = FAILURE; + summary.termination_type = LINEAR_SOLVER_FAILURE; + summary.message = StringPrintf("Numerical failure. rho = r'z = %e.", rho); break; }; @@ -139,8 +137,9 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve( } else { double beta = rho / last_rho; if (IsZeroOrInfinity(beta)) { - LOG(ERROR) << "Numerical failure. beta = " << beta; - summary.termination_type = FAILURE; + summary.termination_type = LINEAR_SOLVER_FAILURE; + summary.message = StringPrintf( + "Numerical failure. beta = rho_n / rho_{n-1} = %e.", beta); break; } p = z + beta * p; @@ -149,18 +148,18 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve( Vector& q = z; q.setZero(); A->RightMultiply(p.data(), q.data()); - double pq = p.dot(q); - + const double pq = p.dot(q); if ((pq <= 0) || IsInfinite(pq)) { - LOG(ERROR) << "Numerical failure. pq = " << pq; - summary.termination_type = FAILURE; + summary.termination_type = LINEAR_SOLVER_FAILURE; + summary.message = StringPrintf("Numerical failure. p'q = %e.", pq); break; } - double alpha = rho / pq; + const double alpha = rho / pq; if (IsInfinite(alpha)) { - LOG(ERROR) << "Numerical failure. alpha " << alpha; - summary.termination_type = FAILURE; + summary.termination_type = LINEAR_SOLVER_FAILURE; + summary.message = + StringPrintf("Numerical failure. alpha = rho / pq = %e", alpha); break; } @@ -183,7 +182,7 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve( // Quadratic model based termination. // Q1 = x'Ax - 2 * b' x. - double Q1 = -1.0 * xref.dot(bref + r); + const double Q1 = -1.0 * xref.dot(bref + r); // For PSD matrices A, let // @@ -207,21 +206,23 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve( // Journal of Computational and Applied Mathematics, // 124(1-2), 45-59, 2000. // - double zeta = summary.num_iterations * (Q1 - Q0) / Q1; - VLOG(3) << "Q termination: zeta " << zeta - << " " << per_solve_options.q_tolerance; + const double zeta = summary.num_iterations * (Q1 - Q0) / Q1; if (zeta < per_solve_options.q_tolerance) { - summary.termination_type = TOLERANCE; + summary.termination_type = LINEAR_SOLVER_SUCCESS; + summary.message = + StringPrintf("Convergence: zeta = %e < %e", + zeta, + per_solve_options.q_tolerance); break; } Q0 = Q1; // Residual based termination. norm_r = r. norm(); - VLOG(3) << "R termination: norm_r " << norm_r - << " " << tol_r; if (norm_r <= tol_r) { - summary.termination_type = TOLERANCE; + summary.termination_type = LINEAR_SOLVER_SUCCESS; + summary.message = + StringPrintf("Convergence. |r| = %e <= %e.", norm_r, tol_r); break; } } diff --git a/extern/libmv/third_party/ceres/internal/ceres/coordinate_descent_minimizer.cc b/extern/libmv/third_party/ceres/internal/ceres/coordinate_descent_minimizer.cc index c4da987919a..bfe93c49826 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/coordinate_descent_minimizer.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/coordinate_descent_minimizer.cc @@ -227,6 +227,7 @@ void CoordinateDescentMinimizer::Solve(Program* program, minimizer_options.evaluator = evaluator.get(); minimizer_options.jacobian = jacobian.get(); minimizer_options.trust_region_strategy = trust_region_strategy.get(); + minimizer_options.is_silent = true; TrustRegionMinimizer minimizer; minimizer.Minimize(minimizer_options, parameter, summary); diff --git a/extern/libmv/third_party/ceres/internal/ceres/corrector.cc b/extern/libmv/third_party/ceres/internal/ceres/corrector.cc index 60269a6a4b9..581fc6d4fc0 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/corrector.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/corrector.cc @@ -32,14 +32,14 @@ #include <cstddef> #include <cmath> +#include "ceres/internal/eigen.h" #include "glog/logging.h" namespace ceres { namespace internal { -Corrector::Corrector(double sq_norm, const double rho[3]) { +Corrector::Corrector(const double sq_norm, const double rho[3]) { CHECK_GE(sq_norm, 0.0); - CHECK_GT(rho[1], 0.0); sqrt_rho1_ = sqrt(rho[1]); // If sq_norm = 0.0, the correction becomes trivial, the residual @@ -84,6 +84,14 @@ Corrector::Corrector(double sq_norm, const double rho[3]) { return; } + // We now require that the first derivative of the loss function be + // positive only if the second derivative is positive. This is + // because when the second derivative is non-positive, we do not use + // the second order correction suggested by BANS and instead use a + // simpler first order strategy which does not use a division by the + // gradient of the loss function. + CHECK_GT(rho[1], 0.0); + // Calculate the smaller of the two solutions to the equation // // 0.5 * alpha^2 - alpha - rho'' / rho' * z'z = 0. @@ -101,20 +109,25 @@ Corrector::Corrector(double sq_norm, const double rho[3]) { alpha_sq_norm_ = alpha / sq_norm; } -void Corrector::CorrectResiduals(int num_rows, double* residuals) { +void Corrector::CorrectResiduals(const int num_rows, double* residuals) { DCHECK(residuals != NULL); // Equation 11 in BANS. - for (int r = 0; r < num_rows; ++r) { - residuals[r] *= residual_scaling_; - } + VectorRef(residuals, num_rows) *= residual_scaling_; } -void Corrector::CorrectJacobian(int num_rows, - int num_cols, +void Corrector::CorrectJacobian(const int num_rows, + const int num_cols, double* residuals, double* jacobian) { DCHECK(residuals != NULL); DCHECK(jacobian != NULL); + + // The common case (rho[2] <= 0). + if (alpha_sq_norm_ == 0.0) { + VectorRef(jacobian, num_rows * num_cols) *= sqrt_rho1_; + return; + } + // Equation 11 in BANS. // // J = sqrt(rho) * (J - alpha^2 r * r' J) diff --git a/extern/libmv/third_party/ceres/internal/ceres/covariance_impl.cc b/extern/libmv/third_party/ceres/internal/ceres/covariance_impl.cc index 19d545cc2d3..91f0393d966 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/covariance_impl.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/covariance_impl.cc @@ -35,6 +35,7 @@ #endif #include <algorithm> +#include <cstdlib> #include <utility> #include <vector> #include "Eigen/SVD" @@ -164,9 +165,9 @@ bool CovarianceImpl::GetCovarianceBlock(const double* original_parameter_block1, } if (offset == row_size) { - LOG(WARNING) << "Unable to find covariance block for " - << original_parameter_block1 << " " - << original_parameter_block2; + LOG(ERROR) << "Unable to find covariance block for " + << original_parameter_block1 << " " + << original_parameter_block2; return false; } @@ -347,8 +348,8 @@ bool CovarianceImpl::ComputeCovarianceSparsity( // values of the parameter blocks. Thus iterating over the keys of // parameter_block_to_row_index_ corresponds to iterating over the // rows of the covariance matrix in order. - int i = 0; // index into covariance_blocks. - int cursor = 0; // index into the covariance matrix. + int i = 0; // index into covariance_blocks. + int cursor = 0; // index into the covariance matrix. for (map<const double*, int>::const_iterator it = parameter_block_to_row_index_.begin(); it != parameter_block_to_row_index_.end(); @@ -392,12 +393,12 @@ bool CovarianceImpl::ComputeCovarianceSparsity( bool CovarianceImpl::ComputeCovarianceValues() { switch (options_.algorithm_type) { - case (DENSE_SVD): + case DENSE_SVD: return ComputeCovarianceValuesUsingDenseSVD(); #ifndef CERES_NO_SUITESPARSE - case (SPARSE_CHOLESKY): + case SPARSE_CHOLESKY: return ComputeCovarianceValuesUsingSparseCholesky(); - case (SPARSE_QR): + case SPARSE_QR: return ComputeCovarianceValuesUsingSparseQR(); #endif default: @@ -440,27 +441,38 @@ bool CovarianceImpl::ComputeCovarianceValuesUsingSparseCholesky() { cholmod_jacobian_view.sorted = 1; cholmod_jacobian_view.packed = 1; - cholmod_factor* factor = ss.AnalyzeCholesky(&cholmod_jacobian_view); + string status; + cholmod_factor* factor = ss.AnalyzeCholesky(&cholmod_jacobian_view, &status); event_logger.AddEvent("Symbolic Factorization"); - bool factorization_succeeded = ss.Cholesky(&cholmod_jacobian_view, factor); - if (factorization_succeeded) { - const double reciprocal_condition_number = - cholmod_rcond(factor, ss.mutable_cc()); - if (reciprocal_condition_number < - options_.min_reciprocal_condition_number) { - LOG(WARNING) << "Cholesky factorization of J'J is not reliable. " - << "Reciprocal condition number: " - << reciprocal_condition_number << " " - << "min_reciprocal_condition_number : " - << options_.min_reciprocal_condition_number; - factorization_succeeded = false; - } + if (factor == NULL) { + LOG(ERROR) << "Covariance estimation failed. " + << "CHOLMOD symbolic cholesky factorization returned with: " + << status; + return false; } + LinearSolverTerminationType termination_type = + ss.Cholesky(&cholmod_jacobian_view, factor, &status); event_logger.AddEvent("Numeric Factorization"); - if (!factorization_succeeded) { + if (termination_type != LINEAR_SOLVER_SUCCESS) { + LOG(ERROR) << "Covariance estimation failed. " + << "CHOLMOD numeric cholesky factorization returned with: " + << status; + ss.Free(factor); + return false; + } + + const double reciprocal_condition_number = + cholmod_rcond(factor, ss.mutable_cc()); + + if (reciprocal_condition_number < + options_.min_reciprocal_condition_number) { + LOG(ERROR) << "Cholesky factorization of J'J is not reliable. " + << "Reciprocal condition number: " + << reciprocal_condition_number << " " + << "min_reciprocal_condition_number : " + << options_.min_reciprocal_condition_number; ss.Free(factor); - LOG(WARNING) << "Cholesky factorization failed."; return false; } @@ -681,10 +693,10 @@ bool CovarianceImpl::ComputeCovarianceValuesUsingSparseQR() { CHECK_NOTNULL(R); if (rank < cholmod_jacobian.ncol) { - LOG(WARNING) << "Jacobian matrix is rank deficient." - << "Number of columns: " << cholmod_jacobian.ncol - << " rank: " << rank; - delete []permutation; + LOG(ERROR) << "Jacobian matrix is rank deficient. " + << "Number of columns: " << cholmod_jacobian.ncol + << " rank: " << rank; + free(permutation); cholmod_l_free_sparse(&R, &cc); cholmod_l_finish(&cc); return false; @@ -739,7 +751,7 @@ bool CovarianceImpl::ComputeCovarianceValuesUsingSparseQR() { } } - delete []permutation; + free(permutation); cholmod_l_free_sparse(&R, &cc); cholmod_l_finish(&cc); event_logger.AddEvent("Inversion"); @@ -807,11 +819,11 @@ bool CovarianceImpl::ComputeCovarianceValuesUsingDenseSVD() { if (automatic_truncation) { break; } else { - LOG(WARNING) << "Cholesky factorization of J'J is not reliable. " - << "Reciprocal condition number: " - << singular_value_ratio * singular_value_ratio << " " - << "min_reciprocal_condition_number : " - << options_.min_reciprocal_condition_number; + LOG(ERROR) << "Cholesky factorization of J'J is not reliable. " + << "Reciprocal condition number: " + << singular_value_ratio * singular_value_ratio << " " + << "min_reciprocal_condition_number : " + << options_.min_reciprocal_condition_number; return false; } } diff --git a/extern/libmv/third_party/ceres/internal/ceres/cxsparse.cc b/extern/libmv/third_party/ceres/internal/ceres/cxsparse.cc index c6d77439653..7145f73a2ba 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/cxsparse.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/cxsparse.cc @@ -175,8 +175,8 @@ cs_di CXSparse::CreateSparseMatrixTransposeView(CompressedRowSparseMatrix* A) { cs_di* CXSparse::CreateSparseMatrix(TripletSparseMatrix* tsm) { cs_di_sparse tsm_wrapper; - tsm_wrapper.nzmax = tsm->num_nonzeros();; - tsm_wrapper.nz = tsm->num_nonzeros();; + tsm_wrapper.nzmax = tsm->num_nonzeros(); + tsm_wrapper.nz = tsm->num_nonzeros(); tsm_wrapper.m = tsm->num_rows(); tsm_wrapper.n = tsm->num_cols(); tsm_wrapper.p = tsm->mutable_cols(); diff --git a/extern/libmv/third_party/ceres/internal/ceres/dense_normal_cholesky_solver.cc b/extern/libmv/third_party/ceres/internal/ceres/dense_normal_cholesky_solver.cc index fbf3cbec9d2..f44d6da824c 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/dense_normal_cholesky_solver.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/dense_normal_cholesky_solver.cc @@ -95,9 +95,19 @@ LinearSolver::Summary DenseNormalCholeskySolver::SolveUsingEigen( LinearSolver::Summary summary; summary.num_iterations = 1; - summary.termination_type = TOLERANCE; - VectorRef(x, num_cols) = - lhs.selfadjointView<Eigen::Upper>().llt().solve(rhs); + summary.termination_type = LINEAR_SOLVER_SUCCESS; + Eigen::LLT<Matrix, Eigen::Upper> llt = + lhs.selfadjointView<Eigen::Upper>().llt(); + + if (llt.info() != Eigen::Success) { + summary.termination_type = LINEAR_SOLVER_FAILURE; + summary.message = "Eigen LLT decomposition failed."; + } else { + summary.termination_type = LINEAR_SOLVER_SUCCESS; + summary.message = "Success."; + } + + VectorRef(x, num_cols) = llt.solve(rhs); event_logger.AddEvent("Solve"); return summary; } @@ -142,14 +152,14 @@ LinearSolver::Summary DenseNormalCholeskySolver::SolveUsingLAPACK( A->matrix().transpose() * ConstVectorRef(b, A->num_rows()); event_logger.AddEvent("Product"); - const int info = LAPACK::SolveInPlaceUsingCholesky(num_cols, lhs.data(), x); - event_logger.AddEvent("Solve"); - LinearSolver::Summary summary; summary.num_iterations = 1; - summary.termination_type = info == 0 ? TOLERANCE : FAILURE; - - event_logger.AddEvent("TearDown"); + summary.termination_type = + LAPACK::SolveInPlaceUsingCholesky(num_cols, + lhs.data(), + x, + &summary.message); + event_logger.AddEvent("Solve"); return summary; } } // namespace internal diff --git a/extern/libmv/third_party/ceres/internal/ceres/dense_qr_solver.cc b/extern/libmv/third_party/ceres/internal/ceres/dense_qr_solver.cc index d76d58b51b5..4388357bd2d 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/dense_qr_solver.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/dense_qr_solver.cc @@ -60,6 +60,7 @@ LinearSolver::Summary DenseQRSolver::SolveImpl( return SolveUsingLAPACK(A, b, per_solve_options, x); } } + LinearSolver::Summary DenseQRSolver::SolveUsingLAPACK( DenseSparseMatrix* A, const double* b, @@ -100,21 +101,18 @@ LinearSolver::Summary DenseQRSolver::SolveUsingLAPACK( work_.resize(work_size); } - const int info = LAPACK::SolveUsingQR(lhs_.rows(), - lhs_.cols(), - lhs_.data(), - work_.rows(), - work_.data(), - rhs_.data()); - event_logger.AddEvent("Solve"); - LinearSolver::Summary summary; summary.num_iterations = 1; - if (info == 0) { + summary.termination_type = LAPACK::SolveInPlaceUsingQR(lhs_.rows(), + lhs_.cols(), + lhs_.data(), + work_.rows(), + work_.data(), + rhs_.data(), + &summary.message); + event_logger.AddEvent("Solve"); + if (summary.termination_type == LINEAR_SOLVER_SUCCESS) { VectorRef(x, num_cols) = rhs_.head(num_cols); - summary.termination_type = TOLERANCE; - } else { - summary.termination_type = FAILURE; } event_logger.AddEvent("TearDown"); @@ -161,7 +159,8 @@ LinearSolver::Summary DenseQRSolver::SolveUsingEigen( // is good enough or not. LinearSolver::Summary summary; summary.num_iterations = 1; - summary.termination_type = TOLERANCE; + summary.termination_type = LINEAR_SOLVER_SUCCESS; + summary.message = "Success."; event_logger.AddEvent("TearDown"); return summary; diff --git a/extern/libmv/third_party/ceres/internal/ceres/dogleg_strategy.cc b/extern/libmv/third_party/ceres/internal/ceres/dogleg_strategy.cc index c85c8e5cbf5..f29376db793 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/dogleg_strategy.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/dogleg_strategy.cc @@ -99,7 +99,7 @@ TrustRegionStrategy::Summary DoglegStrategy::ComputeStep( } TrustRegionStrategy::Summary summary; summary.num_iterations = 0; - summary.termination_type = TOLERANCE; + summary.termination_type = LINEAR_SOLVER_SUCCESS; return summary; } @@ -135,7 +135,11 @@ TrustRegionStrategy::Summary DoglegStrategy::ComputeStep( summary.num_iterations = linear_solver_summary.num_iterations; summary.termination_type = linear_solver_summary.termination_type; - if (linear_solver_summary.termination_type != FAILURE) { + if (linear_solver_summary.termination_type == LINEAR_SOLVER_FATAL_ERROR) { + return summary; + } + + if (linear_solver_summary.termination_type != LINEAR_SOLVER_FAILURE) { switch (dogleg_type_) { // Interpolate the Cauchy point and the Gauss-Newton step. case TRADITIONAL_DOGLEG: @@ -146,7 +150,7 @@ TrustRegionStrategy::Summary DoglegStrategy::ComputeStep( // Cauchy point and the (Gauss-)Newton step. case SUBSPACE_DOGLEG: if (!ComputeSubspaceModel(jacobian)) { - summary.termination_type = FAILURE; + summary.termination_type = LINEAR_SOLVER_FAILURE; break; } ComputeSubspaceDoglegStep(step); @@ -513,7 +517,7 @@ LinearSolver::Summary DoglegStrategy::ComputeGaussNewtonStep( const double* residuals) { const int n = jacobian->num_cols(); LinearSolver::Summary linear_solver_summary; - linear_solver_summary.termination_type = FAILURE; + linear_solver_summary.termination_type = LINEAR_SOLVER_FAILURE; // The Jacobian matrix is often quite poorly conditioned. Thus it is // necessary to add a diagonal matrix at the bottom to prevent the @@ -526,7 +530,7 @@ LinearSolver::Summary DoglegStrategy::ComputeGaussNewtonStep( // If the solve fails, the multiplier to the diagonal is increased // up to max_mu_ by a factor of mu_increase_factor_ every time. If // the linear solver is still not successful, the strategy returns - // with FAILURE. + // with LINEAR_SOLVER_FAILURE. // // Next time when a new Gauss-Newton step is requested, the // multiplier starts out from the last successful solve. @@ -579,17 +583,21 @@ LinearSolver::Summary DoglegStrategy::ComputeGaussNewtonStep( } } - if (linear_solver_summary.termination_type == FAILURE || + if (linear_solver_summary.termination_type == LINEAR_SOLVER_FATAL_ERROR) { + return linear_solver_summary; + } + + if (linear_solver_summary.termination_type == LINEAR_SOLVER_FAILURE || !IsArrayValid(n, gauss_newton_step_.data())) { mu_ *= mu_increase_factor_; VLOG(2) << "Increasing mu " << mu_; - linear_solver_summary.termination_type = FAILURE; + linear_solver_summary.termination_type = LINEAR_SOLVER_FAILURE; continue; } break; } - if (linear_solver_summary.termination_type != FAILURE) { + if (linear_solver_summary.termination_type != LINEAR_SOLVER_FAILURE) { // The scaled Gauss-Newton step is D * GN: // // - (D^-1 J^T J D^-1)^-1 (D^-1 g) diff --git a/extern/libmv/third_party/ceres/internal/ceres/generate_eliminator_specialization.py b/extern/libmv/third_party/ceres/internal/ceres/generate_eliminator_specialization.py index caeca69fb80..78f779f2b70 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/generate_eliminator_specialization.py +++ b/extern/libmv/third_party/ceres/internal/ceres/generate_eliminator_specialization.py @@ -60,6 +60,7 @@ SPECIALIZATIONS = [(2, 2, 2), (2, 4, 3), (2, 4, 4), (2, 4, "Eigen::Dynamic"), + (2, "Eigen::Dynamic", "Eigen::Dynamic"), (4, 4, 2), (4, 4, 3), (4, 4, 4), diff --git a/extern/libmv/third_party/ceres/internal/ceres/generate_partitioned_matrix_view_specializations.py b/extern/libmv/third_party/ceres/internal/ceres/generate_partitioned_matrix_view_specializations.py new file mode 100644 index 00000000000..c6ab573fa4b --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/generate_partitioned_matrix_view_specializations.py @@ -0,0 +1,226 @@ +# Ceres Solver - A fast non-linear least squares minimizer +# Copyright 2013 Google Inc. All rights reserved. +# http://code.google.com/p/ceres-solver/ +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# * Neither the name of Google Inc. nor the names of its contributors may be +# used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# Author: sameeragarwal@google.com (Sameer Agarwal) +# +# Script for explicitly generating template specialization of the +# PartitionedMatrixView class. Explicitly generating these +# instantiations in separate .cc files breaks the compilation into +# separate compilation unit rather than one large cc file. +# +# This script creates two sets of files. +# +# 1. partitioned_matrix_view_x_x_x.cc +# where the x indicates the template parameters and +# +# 2. partitioned_matrix_view.cc +# +# that contains a factory function for instantiating these classes +# based on runtime parameters. +# +# The list of tuples, specializations indicates the set of +# specializations that is generated. + +# Set of template specializations to generate +SPECIALIZATIONS = [(2, 2, 2), + (2, 2, 3), + (2, 2, 4), + (2, 2, "Eigen::Dynamic"), + (2, 3, 3), + (2, 3, 4), + (2, 3, 9), + (2, 3, "Eigen::Dynamic"), + (2, 4, 3), + (2, 4, 4), + (2, 4, "Eigen::Dynamic"), + (2, "Eigen::Dynamic", "Eigen::Dynamic"), + (4, 4, 2), + (4, 4, 3), + (4, 4, 4), + (4, 4, "Eigen::Dynamic"), + ("Eigen::Dynamic", "Eigen::Dynamic", "Eigen::Dynamic")] +HEADER = """// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2013 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// +// Template specialization of PartitionedMatrixView. +// +// ======================================== +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +//========================================= +// +// This file is generated using generate_eliminator_specialization.py. +// Editing it manually is not recommended. +""" + +DYNAMIC_FILE = """ + +#include "ceres/partitioned_matrix_view_impl.h" +#include "ceres/internal/eigen.h" + +namespace ceres { +namespace internal { + +template class PartitionedMatrixView<%s, %s, %s>; + +} // namespace internal +} // namespace ceres +""" + +SPECIALIZATION_FILE = """ +#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION + +#include "ceres/partitioned_matrix_view_impl.h" +#include "ceres/internal/eigen.h" + +namespace ceres { +namespace internal { + +template class PartitionedMatrixView<%s, %s, %s>; + +} // namespace internal +} // namespace ceres + +#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION +""" + +FACTORY_FILE_HEADER = """ +#include "ceres/linear_solver.h" +#include "ceres/partitioned_matrix_view.h" +#include "ceres/internal/eigen.h" + +namespace ceres { +namespace internal { + +PartitionedMatrixViewBase* +PartitionedMatrixViewBase::Create(const LinearSolver::Options& options, + const BlockSparseMatrix& matrix) { +#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION +""" + +FACTORY_CONDITIONAL = """ if ((options.row_block_size == %s) && + (options.e_block_size == %s) && + (options.f_block_size == %s)) { + return new PartitionedMatrixView<%s, %s, %s>( + matrix, options.elimination_groups[0]); + } +""" + +FACTORY_FOOTER = """ +#endif + VLOG(1) << "Template specializations not found for <" + << options.row_block_size << "," + << options.e_block_size << "," + << options.f_block_size << ">"; + return new PartitionedMatrixView<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic>( + matrix, options.elimination_groups[0]); +}; + +} // namespace internal +} // namespace ceres +""" + + +def SuffixForSize(size): + if size == "Eigen::Dynamic": + return "d" + return str(size) + + +def SpecializationFilename(prefix, row_block_size, e_block_size, f_block_size): + return "_".join([prefix] + map(SuffixForSize, (row_block_size, + e_block_size, + f_block_size))) + + +def Specialize(): + """ + Generate specialization code and the conditionals to instantiate it. + """ + f = open("partitioned_matrix_view.cc", "w") + f.write(HEADER) + f.write(FACTORY_FILE_HEADER) + + for row_block_size, e_block_size, f_block_size in SPECIALIZATIONS: + output = SpecializationFilename("generated/partitioned_matrix_view", + row_block_size, + e_block_size, + f_block_size) + ".cc" + fptr = open(output, "w") + fptr.write(HEADER) + + template = SPECIALIZATION_FILE + if (row_block_size == "Eigen::Dynamic" and + e_block_size == "Eigen::Dynamic" and + f_block_size == "Eigen::Dynamic"): + template = DYNAMIC_FILE + + fptr.write(template % (row_block_size, e_block_size, f_block_size)) + fptr.close() + + f.write(FACTORY_CONDITIONAL % (row_block_size, + e_block_size, + f_block_size, + row_block_size, + e_block_size, + f_block_size)) + f.write(FACTORY_FOOTER) + f.close() + + +if __name__ == "__main__": + Specialize() diff --git a/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_2.cc b/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_2.cc new file mode 100644 index 00000000000..3079cff83f6 --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_2.cc @@ -0,0 +1,56 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2013 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// +// Template specialization of PartitionedMatrixView. +// +// ======================================== +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +//========================================= +// +// This file is generated using generate_eliminator_specialization.py. +// Editing it manually is not recommended. + +#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION + +#include "ceres/partitioned_matrix_view_impl.h" +#include "ceres/internal/eigen.h" + +namespace ceres { +namespace internal { + +template class PartitionedMatrixView<2, 2, 2>; + +} // namespace internal +} // namespace ceres + +#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION diff --git a/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_3.cc b/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_3.cc new file mode 100644 index 00000000000..d2ea113fa75 --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_3.cc @@ -0,0 +1,56 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2013 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// +// Template specialization of PartitionedMatrixView. +// +// ======================================== +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +//========================================= +// +// This file is generated using generate_eliminator_specialization.py. +// Editing it manually is not recommended. + +#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION + +#include "ceres/partitioned_matrix_view_impl.h" +#include "ceres/internal/eigen.h" + +namespace ceres { +namespace internal { + +template class PartitionedMatrixView<2, 2, 3>; + +} // namespace internal +} // namespace ceres + +#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION diff --git a/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_4.cc b/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_4.cc new file mode 100644 index 00000000000..4e59910f7fc --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_4.cc @@ -0,0 +1,56 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2013 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// +// Template specialization of PartitionedMatrixView. +// +// ======================================== +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +//========================================= +// +// This file is generated using generate_eliminator_specialization.py. +// Editing it manually is not recommended. + +#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION + +#include "ceres/partitioned_matrix_view_impl.h" +#include "ceres/internal/eigen.h" + +namespace ceres { +namespace internal { + +template class PartitionedMatrixView<2, 2, 4>; + +} // namespace internal +} // namespace ceres + +#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION diff --git a/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_d.cc b/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_d.cc new file mode 100644 index 00000000000..82392958590 --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_2_2_d.cc @@ -0,0 +1,56 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2013 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// +// Template specialization of PartitionedMatrixView. +// +// ======================================== +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +//========================================= +// +// This file is generated using generate_eliminator_specialization.py. +// Editing it manually is not recommended. + +#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION + +#include "ceres/partitioned_matrix_view_impl.h" +#include "ceres/internal/eigen.h" + +namespace ceres { +namespace internal { + +template class PartitionedMatrixView<2, 2, Eigen::Dynamic>; + +} // namespace internal +} // namespace ceres + +#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION diff --git a/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_3.cc b/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_3.cc new file mode 100644 index 00000000000..b408ca5d301 --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_3.cc @@ -0,0 +1,56 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2013 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// +// Template specialization of PartitionedMatrixView. +// +// ======================================== +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +//========================================= +// +// This file is generated using generate_eliminator_specialization.py. +// Editing it manually is not recommended. + +#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION + +#include "ceres/partitioned_matrix_view_impl.h" +#include "ceres/internal/eigen.h" + +namespace ceres { +namespace internal { + +template class PartitionedMatrixView<2, 3, 3>; + +} // namespace internal +} // namespace ceres + +#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION diff --git a/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_4.cc b/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_4.cc new file mode 100644 index 00000000000..fc468bf6264 --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_4.cc @@ -0,0 +1,56 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2013 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// +// Template specialization of PartitionedMatrixView. +// +// ======================================== +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +//========================================= +// +// This file is generated using generate_eliminator_specialization.py. +// Editing it manually is not recommended. + +#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION + +#include "ceres/partitioned_matrix_view_impl.h" +#include "ceres/internal/eigen.h" + +namespace ceres { +namespace internal { + +template class PartitionedMatrixView<2, 3, 4>; + +} // namespace internal +} // namespace ceres + +#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION diff --git a/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_9.cc b/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_9.cc new file mode 100644 index 00000000000..3633a1c6827 --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_9.cc @@ -0,0 +1,56 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2013 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// +// Template specialization of PartitionedMatrixView. +// +// ======================================== +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +//========================================= +// +// This file is generated using generate_eliminator_specialization.py. +// Editing it manually is not recommended. + +#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION + +#include "ceres/partitioned_matrix_view_impl.h" +#include "ceres/internal/eigen.h" + +namespace ceres { +namespace internal { + +template class PartitionedMatrixView<2, 3, 9>; + +} // namespace internal +} // namespace ceres + +#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION diff --git a/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_d.cc b/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_d.cc new file mode 100644 index 00000000000..83147276ab8 --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_2_3_d.cc @@ -0,0 +1,56 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2013 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// +// Template specialization of PartitionedMatrixView. +// +// ======================================== +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +//========================================= +// +// This file is generated using generate_eliminator_specialization.py. +// Editing it manually is not recommended. + +#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION + +#include "ceres/partitioned_matrix_view_impl.h" +#include "ceres/internal/eigen.h" + +namespace ceres { +namespace internal { + +template class PartitionedMatrixView<2, 3, Eigen::Dynamic>; + +} // namespace internal +} // namespace ceres + +#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION diff --git a/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_3.cc b/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_3.cc new file mode 100644 index 00000000000..04ebe93de77 --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_3.cc @@ -0,0 +1,56 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2013 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// +// Template specialization of PartitionedMatrixView. +// +// ======================================== +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +//========================================= +// +// This file is generated using generate_eliminator_specialization.py. +// Editing it manually is not recommended. + +#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION + +#include "ceres/partitioned_matrix_view_impl.h" +#include "ceres/internal/eigen.h" + +namespace ceres { +namespace internal { + +template class PartitionedMatrixView<2, 4, 3>; + +} // namespace internal +} // namespace ceres + +#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION diff --git a/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_4.cc b/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_4.cc new file mode 100644 index 00000000000..5374554673d --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_4.cc @@ -0,0 +1,56 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2013 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// +// Template specialization of PartitionedMatrixView. +// +// ======================================== +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +//========================================= +// +// This file is generated using generate_eliminator_specialization.py. +// Editing it manually is not recommended. + +#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION + +#include "ceres/partitioned_matrix_view_impl.h" +#include "ceres/internal/eigen.h" + +namespace ceres { +namespace internal { + +template class PartitionedMatrixView<2, 4, 4>; + +} // namespace internal +} // namespace ceres + +#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION diff --git a/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_d.cc b/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_d.cc new file mode 100644 index 00000000000..69eccf934dd --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_2_4_d.cc @@ -0,0 +1,56 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2013 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// +// Template specialization of PartitionedMatrixView. +// +// ======================================== +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +//========================================= +// +// This file is generated using generate_eliminator_specialization.py. +// Editing it manually is not recommended. + +#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION + +#include "ceres/partitioned_matrix_view_impl.h" +#include "ceres/internal/eigen.h" + +namespace ceres { +namespace internal { + +template class PartitionedMatrixView<2, 4, Eigen::Dynamic>; + +} // namespace internal +} // namespace ceres + +#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION diff --git a/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_2_d_d.cc b/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_2_d_d.cc new file mode 100644 index 00000000000..32388121824 --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_2_d_d.cc @@ -0,0 +1,56 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2013 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// +// Template specialization of PartitionedMatrixView. +// +// ======================================== +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +//========================================= +// +// This file is generated using generate_eliminator_specialization.py. +// Editing it manually is not recommended. + +#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION + +#include "ceres/partitioned_matrix_view_impl.h" +#include "ceres/internal/eigen.h" + +namespace ceres { +namespace internal { + +template class PartitionedMatrixView<2, Eigen::Dynamic, Eigen::Dynamic>; + +} // namespace internal +} // namespace ceres + +#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION diff --git a/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_2.cc b/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_2.cc new file mode 100644 index 00000000000..1a223ffbde8 --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_2.cc @@ -0,0 +1,56 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2013 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// +// Template specialization of PartitionedMatrixView. +// +// ======================================== +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +//========================================= +// +// This file is generated using generate_eliminator_specialization.py. +// Editing it manually is not recommended. + +#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION + +#include "ceres/partitioned_matrix_view_impl.h" +#include "ceres/internal/eigen.h" + +namespace ceres { +namespace internal { + +template class PartitionedMatrixView<4, 4, 2>; + +} // namespace internal +} // namespace ceres + +#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION diff --git a/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_3.cc b/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_3.cc new file mode 100644 index 00000000000..d50c18d70e1 --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_3.cc @@ -0,0 +1,56 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2013 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// +// Template specialization of PartitionedMatrixView. +// +// ======================================== +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +//========================================= +// +// This file is generated using generate_eliminator_specialization.py. +// Editing it manually is not recommended. + +#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION + +#include "ceres/partitioned_matrix_view_impl.h" +#include "ceres/internal/eigen.h" + +namespace ceres { +namespace internal { + +template class PartitionedMatrixView<4, 4, 3>; + +} // namespace internal +} // namespace ceres + +#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION diff --git a/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_4.cc b/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_4.cc new file mode 100644 index 00000000000..adf7783c3ed --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_4.cc @@ -0,0 +1,56 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2013 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// +// Template specialization of PartitionedMatrixView. +// +// ======================================== +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +//========================================= +// +// This file is generated using generate_eliminator_specialization.py. +// Editing it manually is not recommended. + +#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION + +#include "ceres/partitioned_matrix_view_impl.h" +#include "ceres/internal/eigen.h" + +namespace ceres { +namespace internal { + +template class PartitionedMatrixView<4, 4, 4>; + +} // namespace internal +} // namespace ceres + +#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION diff --git a/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_d.cc b/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_d.cc new file mode 100644 index 00000000000..06f75eaea1c --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_4_4_d.cc @@ -0,0 +1,56 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2013 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// +// Template specialization of PartitionedMatrixView. +// +// ======================================== +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +//========================================= +// +// This file is generated using generate_eliminator_specialization.py. +// Editing it manually is not recommended. + +#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION + +#include "ceres/partitioned_matrix_view_impl.h" +#include "ceres/internal/eigen.h" + +namespace ceres { +namespace internal { + +template class PartitionedMatrixView<4, 4, Eigen::Dynamic>; + +} // namespace internal +} // namespace ceres + +#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION diff --git a/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_d_d_d.cc b/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_d_d_d.cc new file mode 100644 index 00000000000..b392fd50917 --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/generated/partitioned_matrix_view_d_d_d.cc @@ -0,0 +1,53 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2013 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// +// Template specialization of PartitionedMatrixView. +// +// ======================================== +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +//========================================= +// +// This file is generated using generate_eliminator_specialization.py. +// Editing it manually is not recommended. + + +#include "ceres/partitioned_matrix_view_impl.h" +#include "ceres/internal/eigen.h" + +namespace ceres { +namespace internal { + +template class PartitionedMatrixView<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic>; + +} // namespace internal +} // namespace ceres diff --git a/extern/libmv/third_party/ceres/internal/ceres/generated/schur_eliminator_2_d_d.cc b/extern/libmv/third_party/ceres/internal/ceres/generated/schur_eliminator_2_d_d.cc new file mode 100644 index 00000000000..4b420c371a1 --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/generated/schur_eliminator_2_d_d.cc @@ -0,0 +1,56 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2010, 2011, 2012, 2013 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// +// Template specialization of SchurEliminator. +// +// ======================================== +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +//========================================= +// +// This file is generated using generate_eliminator_specialization.py. +// Editing it manually is not recommended. + +#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION + +#include "ceres/schur_eliminator_impl.h" +#include "ceres/internal/eigen.h" + +namespace ceres { +namespace internal { + +template class SchurEliminator<2, Eigen::Dynamic, Eigen::Dynamic>; + +} // namespace internal +} // namespace ceres + +#endif // CERES_RESTRICT_SCHUR_SPECIALIZATION diff --git a/extern/libmv/third_party/ceres/internal/ceres/gradient_checking_cost_function.cc b/extern/libmv/third_party/ceres/internal/ceres/gradient_checking_cost_function.cc index 3edf95da6e0..550301359ad 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/gradient_checking_cost_function.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/gradient_checking_cost_function.cc @@ -44,7 +44,7 @@ #include "ceres/problem_impl.h" #include "ceres/program.h" #include "ceres/residual_block.h" -#include "ceres/runtime_numeric_diff_cost_function.h" +#include "ceres/dynamic_numeric_diff_cost_function.h" #include "ceres/stringprintf.h" #include "ceres/types.h" #include "glog/logging.h" @@ -84,14 +84,24 @@ class GradientCheckingCostFunction : public CostFunction { double relative_precision, const string& extra_info) : function_(function), - finite_diff_cost_function_( - CreateRuntimeNumericDiffCostFunction(function, - CENTRAL, - relative_step_size)), relative_precision_(relative_precision), extra_info_(extra_info) { - *mutable_parameter_block_sizes() = function->parameter_block_sizes(); + DynamicNumericDiffCostFunction<CostFunction, CENTRAL>* + finite_diff_cost_function = + new DynamicNumericDiffCostFunction<CostFunction, CENTRAL>( + function, + DO_NOT_TAKE_OWNERSHIP, + relative_step_size); + + const vector<int16>& parameter_block_sizes = + function->parameter_block_sizes(); + for (int i = 0; i < parameter_block_sizes.size(); ++i) { + finite_diff_cost_function->AddParameterBlock(parameter_block_sizes[i]); + } + *mutable_parameter_block_sizes() = parameter_block_sizes; set_num_residuals(function->num_residuals()); + finite_diff_cost_function->SetNumResiduals(num_residuals()); + finite_diff_cost_function_.reset(finite_diff_cost_function); } virtual ~GradientCheckingCostFunction() { } diff --git a/extern/libmv/third_party/ceres/internal/ceres/implicit_schur_complement.cc b/extern/libmv/third_party/ceres/internal/ceres/implicit_schur_complement.cc index 32722bb6e8f..2da6235f513 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/implicit_schur_complement.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/implicit_schur_complement.cc @@ -35,21 +35,18 @@ #include "ceres/block_structure.h" #include "ceres/internal/eigen.h" #include "ceres/internal/scoped_ptr.h" +#include "ceres/linear_solver.h" #include "ceres/types.h" #include "glog/logging.h" namespace ceres { namespace internal { -ImplicitSchurComplement::ImplicitSchurComplement(int num_eliminate_blocks, - bool preconditioner) - : num_eliminate_blocks_(num_eliminate_blocks), - preconditioner_(preconditioner), - A_(NULL), +ImplicitSchurComplement::ImplicitSchurComplement( + const LinearSolver::Options& options) + : options_(options), D_(NULL), - b_(NULL), - block_diagonal_EtE_inverse_(NULL), - block_diagonal_FtF_inverse_(NULL) { + b_(NULL) { } ImplicitSchurComplement::~ImplicitSchurComplement() { @@ -61,7 +58,7 @@ void ImplicitSchurComplement::Init(const BlockSparseMatrix& A, // Since initialization is reasonably heavy, perhaps we can save on // constructing a new object everytime. if (A_ == NULL) { - A_.reset(new PartitionedMatrixView(A, num_eliminate_blocks_)); + A_.reset(PartitionedMatrixViewBase::Create(options_, A)); } D_ = D; @@ -71,7 +68,7 @@ void ImplicitSchurComplement::Init(const BlockSparseMatrix& A, // E'E and F'E. if (block_diagonal_EtE_inverse_ == NULL) { block_diagonal_EtE_inverse_.reset(A_->CreateBlockDiagonalEtE()); - if (preconditioner_) { + if (options_.preconditioner_type == JACOBI) { block_diagonal_FtF_inverse_.reset(A_->CreateBlockDiagonalFtF()); } rhs_.resize(A_->num_cols_f()); @@ -82,7 +79,7 @@ void ImplicitSchurComplement::Init(const BlockSparseMatrix& A, tmp_f_cols_.resize(A_->num_cols_f()); } else { A_->UpdateBlockDiagonalEtE(block_diagonal_EtE_inverse_.get()); - if (preconditioner_) { + if (options_.preconditioner_type == JACOBI) { A_->UpdateBlockDiagonalFtF(block_diagonal_FtF_inverse_.get()); } } @@ -91,7 +88,7 @@ void ImplicitSchurComplement::Init(const BlockSparseMatrix& A, // contributions from the diagonal D if it is non-null. Add that to // the block diagonals and invert them. AddDiagonalAndInvert(D_, block_diagonal_EtE_inverse_.get()); - if (preconditioner_) { + if (options_.preconditioner_type == JACOBI) { AddDiagonalAndInvert((D_ == NULL) ? NULL : D_ + A_->num_cols_e(), block_diagonal_FtF_inverse_.get()); } diff --git a/extern/libmv/third_party/ceres/internal/ceres/implicit_schur_complement.h b/extern/libmv/third_party/ceres/internal/ceres/implicit_schur_complement.h index c1bb6e19bab..c992bdc206e 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/implicit_schur_complement.h +++ b/extern/libmv/third_party/ceres/internal/ceres/implicit_schur_complement.h @@ -35,6 +35,7 @@ #define CERES_INTERNAL_IMPLICIT_SCHUR_COMPLEMENT_H_ #include "ceres/linear_operator.h" +#include "ceres/linear_solver.h" #include "ceres/partitioned_matrix_view.h" #include "ceres/internal/eigen.h" #include "ceres/internal/scoped_ptr.h" @@ -96,7 +97,7 @@ class ImplicitSchurComplement : public LinearOperator { // // TODO(sameeragarwal): Get rid of the two bools below and replace // them with enums. - ImplicitSchurComplement(int num_eliminate_blocks, bool preconditioner); + ImplicitSchurComplement(const LinearSolver::Options& options); virtual ~ImplicitSchurComplement(); // Initialize the Schur complement for a linear least squares @@ -142,10 +143,9 @@ class ImplicitSchurComplement : public LinearOperator { void AddDiagonalAndInvert(const double* D, BlockSparseMatrix* matrix); void UpdateRhs(); - int num_eliminate_blocks_; - bool preconditioner_; + const LinearSolver::Options& options_; - scoped_ptr<PartitionedMatrixView> A_; + scoped_ptr<PartitionedMatrixViewBase> A_; const double* D_; const double* b_; diff --git a/extern/libmv/third_party/ceres/internal/ceres/iterative_schur_complement_solver.cc b/extern/libmv/third_party/ceres/internal/ceres/iterative_schur_complement_solver.cc index 1aac5657ce6..6de410bf80f 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/iterative_schur_complement_solver.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/iterative_schur_complement_solver.cc @@ -38,6 +38,7 @@ #include "ceres/block_sparse_matrix.h" #include "ceres/block_structure.h" #include "ceres/conjugate_gradients_solver.h" +#include "ceres/detect_structure.h" #include "ceres/implicit_schur_complement.h" #include "ceres/internal/eigen.h" #include "ceres/internal/scoped_ptr.h" @@ -69,35 +70,36 @@ LinearSolver::Summary IterativeSchurComplementSolver::SolveImpl( EventLogger event_logger("IterativeSchurComplementSolver::Solve"); CHECK_NOTNULL(A->block_structure()); - + const int num_eliminate_blocks = options_.elimination_groups[0]; // Initialize a ImplicitSchurComplement object. if (schur_complement_ == NULL) { - schur_complement_.reset( - new ImplicitSchurComplement(options_.elimination_groups[0], - options_.preconditioner_type == JACOBI)); + DetectStructure(*(A->block_structure()), + num_eliminate_blocks, + &options_.row_block_size, + &options_.e_block_size, + &options_.f_block_size); + schur_complement_.reset(new ImplicitSchurComplement(options_)); } schur_complement_->Init(*A, per_solve_options.D, b); const int num_schur_complement_blocks = - A->block_structure()->cols.size() - options_.elimination_groups[0]; + A->block_structure()->cols.size() - num_eliminate_blocks; if (num_schur_complement_blocks == 0) { VLOG(2) << "No parameter blocks left in the schur complement."; LinearSolver::Summary cg_summary; cg_summary.num_iterations = 0; - cg_summary.termination_type = TOLERANCE; + cg_summary.termination_type = LINEAR_SOLVER_SUCCESS; schur_complement_->BackSubstitute(NULL, x); return cg_summary; } // Initialize the solution to the Schur complement system to zero. - // - // TODO(sameeragarwal): There maybe a better initialization than an - // all zeros solution. Explore other cheap starting points. reduced_linear_system_solution_.resize(schur_complement_->num_rows()); reduced_linear_system_solution_.setZero(); - // Instantiate a conjugate gradient solver that runs on the Schur complement - // matrix with the block diagonal of the matrix F'F as the preconditioner. + // Instantiate a conjugate gradient solver that runs on the Schur + // complement matrix with the block diagonal of the matrix F'F as + // the preconditioner. LinearSolver::Options cg_options; cg_options.max_num_iterations = options_.max_num_iterations; ConjugateGradientsSolver cg_solver(cg_options); @@ -108,6 +110,8 @@ LinearSolver::Summary IterativeSchurComplementSolver::SolveImpl( Preconditioner::Options preconditioner_options; preconditioner_options.type = options_.preconditioner_type; + preconditioner_options.visibility_clustering_type = + options_.visibility_clustering_type; preconditioner_options.sparse_linear_algebra_library_type = options_.sparse_linear_algebra_library_type; preconditioner_options.num_threads = options_.num_threads; @@ -149,26 +153,26 @@ LinearSolver::Summary IterativeSchurComplementSolver::SolveImpl( preconditioner_->Update(*A, per_solve_options.D); cg_per_solve_options.preconditioner = preconditioner_.get(); } - event_logger.AddEvent("Setup"); LinearSolver::Summary cg_summary; cg_summary.num_iterations = 0; - cg_summary.termination_type = FAILURE; + cg_summary.termination_type = LINEAR_SOLVER_FAILURE; + // TODO(sameeragarwal): Refactor preconditioners to return a more + // sane message. + cg_summary.message = "Preconditioner update failed."; if (preconditioner_update_was_successful) { cg_summary = cg_solver.Solve(schur_complement_.get(), schur_complement_->rhs().data(), cg_per_solve_options, reduced_linear_system_solution_.data()); - if (cg_summary.termination_type != FAILURE) { + if (cg_summary.termination_type != LINEAR_SOLVER_FAILURE && + cg_summary.termination_type != LINEAR_SOLVER_FATAL_ERROR) { schur_complement_->BackSubstitute( reduced_linear_system_solution_.data(), x); } } - - VLOG(2) << "CG Iterations : " << cg_summary.num_iterations; - event_logger.AddEvent("Solve"); return cg_summary; } diff --git a/extern/libmv/third_party/ceres/internal/ceres/lapack.cc b/extern/libmv/third_party/ceres/internal/ceres/lapack.cc index 73bfa69cbbd..c4f9302515e 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/lapack.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/lapack.cc @@ -29,6 +29,9 @@ // Author: sameeragarwal@google.com (Sameer Agarwal) #include "ceres/lapack.h" + +#include "ceres/internal/port.h" +#include "ceres/linear_solver.h" #include "glog/logging.h" // C interface to the LAPACK Cholesky factorization and triangular solve. @@ -63,12 +66,14 @@ extern "C" void dgels_(char* uplo, namespace ceres { namespace internal { -int LAPACK::SolveInPlaceUsingCholesky(int num_rows, - const double* in_lhs, - double* rhs_and_solution) { +LinearSolverTerminationType LAPACK::SolveInPlaceUsingCholesky( + int num_rows, + const double* in_lhs, + double* rhs_and_solution, + string* status) { #ifdef CERES_NO_LAPACK LOG(FATAL) << "Ceres was built without a BLAS library."; - return -1; + return LINEAR_SOLVER_FATAL_ERROR; #else char uplo = 'L'; int n = num_rows; @@ -77,17 +82,33 @@ int LAPACK::SolveInPlaceUsingCholesky(int num_rows, double* lhs = const_cast<double*>(in_lhs); dpotrf_(&uplo, &n, lhs, &n, &info); - if (info != 0) { - LOG(INFO) << "Cholesky factorization (dpotrf) failed: " << info; - return info; + if (info < 0) { + LOG(FATAL) << "Congratulations, you found a bug in Ceres." + << "Please report it." + << "LAPACK::dpotrf fatal error." + << "Argument: " << -info << " is invalid."; + return LINEAR_SOLVER_FATAL_ERROR; + } + + if (info > 0) { + *status = + StringPrintf( + "LAPACK::dpotrf numerical failure. " + "The leading minor of order %d is not positive definite.", info); + return LINEAR_SOLVER_FAILURE; } dpotrs_(&uplo, &n, &nrhs, lhs, &n, rhs_and_solution, &n, &info); - if (info != 0) { - LOG(INFO) << "Triangular solve (dpotrs) failed: " << info; + if (info < 0) { + LOG(FATAL) << "Congratulations, you found a bug in Ceres." + << "Please report it." + << "LAPACK::dpotrs fatal error." + << "Argument: " << -info << " is invalid."; + return LINEAR_SOLVER_FATAL_ERROR; } - return info; + *status = "Success"; + return LINEAR_SOLVER_SUCCESS; #endif }; @@ -113,20 +134,27 @@ int LAPACK::EstimateWorkSizeForQR(int num_rows, int num_cols) { &lwork, &info); - CHECK_EQ(info, 0); - return work; + if (info < 0) { + LOG(FATAL) << "Congratulations, you found a bug in Ceres." + << "Please report it." + << "LAPACK::dgels fatal error." + << "Argument: " << info << " is invalid."; + } + return static_cast<int>(work); #endif } -int LAPACK::SolveUsingQR(int num_rows, - int num_cols, - const double* in_lhs, - int work_size, - double* work, - double* rhs_and_solution) { +LinearSolverTerminationType LAPACK::SolveInPlaceUsingQR( + int num_rows, + int num_cols, + const double* in_lhs, + int work_size, + double* work, + double* rhs_and_solution, + string* status) { #ifdef CERES_NO_LAPACK LOG(FATAL) << "Ceres was built without a LAPACK library."; - return -1; + return LINEAR_SOLVER_FATAL_ERROR; #else char trans = 'N'; int m = num_rows; @@ -149,7 +177,15 @@ int LAPACK::SolveUsingQR(int num_rows, &work_size, &info); - return info; + if (info < 0) { + LOG(FATAL) << "Congratulations, you found a bug in Ceres." + << "Please report it." + << "LAPACK::dgels fatal error." + << "Argument: " << -info << " is invalid."; + } + + *status = "Success."; + return LINEAR_SOLVER_SUCCESS; #endif } diff --git a/extern/libmv/third_party/ceres/internal/ceres/lapack.h b/extern/libmv/third_party/ceres/internal/ceres/lapack.h index 4f3a88c700a..53a33e1b19d 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/lapack.h +++ b/extern/libmv/third_party/ceres/internal/ceres/lapack.h @@ -31,6 +31,10 @@ #ifndef CERES_INTERNAL_LAPACK_H_ #define CERES_INTERNAL_LAPACK_H_ +#include <string> +#include "ceres/internal/port.h" +#include "ceres/linear_solver.h" + namespace ceres { namespace internal { @@ -47,10 +51,14 @@ class LAPACK { // // This function uses the LAPACK dpotrf and dpotrs routines. // - // The return value is zero if the solve is successful. - static int SolveInPlaceUsingCholesky(int num_rows, - const double* lhs, - double* rhs_and_solution); + // The return value and the status string together describe whether + // the solver terminated successfully or not and if so, what was the + // reason for failure. + static LinearSolverTerminationType SolveInPlaceUsingCholesky( + int num_rows, + const double* lhs, + double* rhs_and_solution, + string* status); // The SolveUsingQR function requires a buffer for its temporary // computation. This function given the size of the lhs matrix will @@ -73,13 +81,17 @@ class LAPACK { // // This function uses the LAPACK dgels routine. // - // The return value is zero if the solve is successful. - static int SolveUsingQR(int num_rows, - int num_cols, - const double* lhs, - int work_size, - double* work, - double* rhs_and_solution); + // The return value and the status string together describe whether + // the solver terminated successfully or not and if so, what was the + // reason for failure. + static LinearSolverTerminationType SolveInPlaceUsingQR( + int num_rows, + int num_cols, + const double* lhs, + int work_size, + double* work, + double* rhs_and_solution, + string* status); }; } // namespace internal diff --git a/extern/libmv/third_party/ceres/internal/ceres/levenberg_marquardt_strategy.cc b/extern/libmv/third_party/ceres/internal/ceres/levenberg_marquardt_strategy.cc index fad7c1f3258..ce3b69a8005 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/levenberg_marquardt_strategy.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/levenberg_marquardt_strategy.cc @@ -105,10 +105,13 @@ TrustRegionStrategy::Summary LevenbergMarquardtStrategy::ComputeStep( // do not need to be modified. LinearSolver::Summary linear_solver_summary = linear_solver_->Solve(jacobian, residuals, solve_options, step); - if (linear_solver_summary.termination_type == FAILURE || - !IsArrayValid(num_parameters, step)) { + + if (linear_solver_summary.termination_type == LINEAR_SOLVER_FATAL_ERROR) { + LOG(WARNING) << "Linear solver fatal error."; + } else if (linear_solver_summary.termination_type == LINEAR_SOLVER_FAILURE || + !IsArrayValid(num_parameters, step)) { LOG(WARNING) << "Linear solver failure. Failed to compute a finite step."; - linear_solver_summary.termination_type = FAILURE; + linear_solver_summary.termination_type = LINEAR_SOLVER_FAILURE; } else { VectorRef(step, num_parameters) *= -1.0; } diff --git a/extern/libmv/third_party/ceres/internal/ceres/line_search.cc b/extern/libmv/third_party/ceres/internal/ceres/line_search.cc index 8323896915a..77d1369634b 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/line_search.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/line_search.cc @@ -29,6 +29,9 @@ // Author: sameeragarwal@google.com (Sameer Agarwal) #ifndef CERES_NO_LINE_SEARCH_MINIMIZER +#include <iomanip> +#include <iostream> // NOLINT + #include "ceres/line_search.h" #include "ceres/fpclassify.h" @@ -41,6 +44,8 @@ namespace ceres { namespace internal { namespace { +// Precision used for floating point values in error message output. +const int kErrorMessageNumericPrecision = 8; FunctionSample ValueSample(const double x, const double value) { FunctionSample sample; @@ -67,10 +72,7 @@ FunctionSample ValueAndGradientSample(const double x, // Convenience stream operator for pushing FunctionSamples into log messages. std::ostream& operator<<(std::ostream &os, const FunctionSample& sample) { - os << "[x: " << sample.x << ", value: " << sample.value - << ", gradient: " << sample.gradient << ", value_is_valid: " - << std::boolalpha << sample.value_is_valid << ", gradient_is_valid: " - << std::boolalpha << sample.gradient_is_valid << "]"; + os << sample.ToDebugString(); return os; } @@ -170,6 +172,7 @@ double LineSearch::InterpolatingPolynomialMinimizingStepSize( // to avoid replicating current.value_is_valid == false // behaviour in WolfeLineSearch. CHECK(lowerbound.value_is_valid) + << std::scientific << std::setprecision(kErrorMessageNumericPrecision) << "Ceres bug: lower-bound sample for interpolation is invalid, " << "please contact the developers!, interpolation_type: " << LineSearchInterpolationTypeToString(interpolation_type) @@ -237,20 +240,26 @@ void ArmijoLineSearch::Search(const double step_size_estimate, FunctionSample current = ValueAndGradientSample(step_size_estimate, 0.0, 0.0); current.value_is_valid = false; - const bool interpolation_uses_gradients = + // As the Armijo line search algorithm always uses the initial point, for + // which both the function value and derivative are known, when fitting a + // minimizing polynomial, we can fit up to a quadratic without requiring the + // gradient at the current query point. + const bool interpolation_uses_gradient_at_current_sample = options().interpolation_type == CUBIC; const double descent_direction_max_norm = static_cast<const LineSearchFunction*>(function)->DirectionInfinityNorm(); ++summary->num_function_evaluations; - if (interpolation_uses_gradients) { ++summary->num_gradient_evaluations; } + if (interpolation_uses_gradient_at_current_sample) { + ++summary->num_gradient_evaluations; + } current.value_is_valid = function->Evaluate(current.x, ¤t.value, - interpolation_uses_gradients + interpolation_uses_gradient_at_current_sample ? ¤t.gradient : NULL); current.gradient_is_valid = - interpolation_uses_gradients && current.value_is_valid; + interpolation_uses_gradient_at_current_sample && current.value_is_valid; while (!current.value_is_valid || current.value > (initial_cost + options().sufficient_decrease @@ -291,14 +300,16 @@ void ArmijoLineSearch::Search(const double step_size_estimate, current.x = step_size; ++summary->num_function_evaluations; - if (interpolation_uses_gradients) { ++summary->num_gradient_evaluations; } + if (interpolation_uses_gradient_at_current_sample) { + ++summary->num_gradient_evaluations; + } current.value_is_valid = function->Evaluate(current.x, ¤t.value, - interpolation_uses_gradients + interpolation_uses_gradient_at_current_sample ? ¤t.gradient : NULL); current.gradient_is_valid = - interpolation_uses_gradients && current.value_is_valid; + interpolation_uses_gradient_at_current_sample && current.value_is_valid; } summary->optimal_step_size = current.x; @@ -350,33 +361,36 @@ void WolfeLineSearch::Search(const double step_size_estimate, &bracket_low, &bracket_high, &do_zoom_search, - summary) && - summary->num_iterations < options().max_num_iterations) { - // Failed to find either a valid point or a valid bracket, but we did not - // run out of iterations. + summary)) { + // Failed to find either a valid point, a valid bracket satisfying the Wolfe + // conditions, or even a step size > minimum tolerance satisfying the Armijo + // condition. return; } + if (!do_zoom_search) { // Either: Bracketing phase already found a point satisfying the strong // Wolfe conditions, thus no Zoom required. // // Or: Bracketing failed to find a valid bracket or a point satisfying the - // strong Wolfe conditions within max_num_iterations. As this is an - // 'artificial' constraint, and we would otherwise fail to produce a valid - // point when ArmijoLineSearch would succeed, we return the lowest point - // found thus far which satsifies the Armijo condition (but not the Wolfe - // conditions). - CHECK(bracket_low.value_is_valid) - << "Ceres bug: Bracketing produced an invalid bracket_low, please " - << "contact the developers!, bracket_low: " << bracket_low - << ", bracket_high: " << bracket_high << ", num_iterations: " - << summary->num_iterations << ", max_num_iterations: " - << options().max_num_iterations; + // strong Wolfe conditions within max_num_iterations, or whilst searching + // shrank the bracket width until it was below our minimum tolerance. + // As these are 'artificial' constraints, and we would otherwise fail to + // produce a valid point when ArmijoLineSearch would succeed, we return the + // point with the lowest cost found thus far which satsifies the Armijo + // condition (but not the Wolfe conditions). summary->optimal_step_size = bracket_low.x; summary->success = true; return; } + VLOG(3) << std::scientific << std::setprecision(kErrorMessageNumericPrecision) + << "Starting line search zoom phase with bracket_low: " + << bracket_low << ", bracket_high: " << bracket_high + << ", bracket width: " << fabs(bracket_low.x - bracket_high.x) + << ", bracket abs delta cost: " + << fabs(bracket_low.value - bracket_high.value); + // Wolfe Zoom phase: Called when the Bracketing phase finds an interval of // non-zero, finite width that should bracket step sizes which satisfy the // (strong) Wolfe conditions (before finding a step size that satisfies the @@ -419,11 +433,22 @@ void WolfeLineSearch::Search(const double step_size_estimate, summary->success = true; } -// Returns true iff bracket_low & bracket_high bound a bracket that contains -// points which satisfy the strong Wolfe conditions. Otherwise, on return false, -// if we stopped searching due to the 'artificial' condition of reaching -// max_num_iterations, bracket_low is the step size amongst all those -// tested, which satisfied the Armijo decrease condition and minimized f(). +// Returns true if either: +// +// A termination condition satisfying the (strong) Wolfe bracketing conditions +// is found: +// +// - A valid point, defined as a bracket of zero width [zoom not required]. +// - A valid bracket (of width > tolerance), [zoom required]. +// +// Or, searching was stopped due to an 'artificial' constraint, i.e. not +// a condition imposed / required by the underlying algorithm, but instead an +// engineering / implementation consideration. But a step which exceeds the +// minimum step size, and satsifies the Armijo condition was still found, +// and should thus be used [zoom not required]. +// +// Returns false if no step size > minimum step size was found which +// satisfies at least the Armijo condition. bool WolfeLineSearch::BracketingPhase( const FunctionSample& initial_position, const double step_size_estimate, @@ -437,23 +462,28 @@ bool WolfeLineSearch::BracketingPhase( FunctionSample current = ValueAndGradientSample(step_size_estimate, 0.0, 0.0); current.value_is_valid = false; - const bool interpolation_uses_gradients = - options().interpolation_type == CUBIC; const double descent_direction_max_norm = static_cast<const LineSearchFunction*>(function)->DirectionInfinityNorm(); *do_zoom_search = false; *bracket_low = initial_position; + // As we require the gradient to evaluate the Wolfe condition, we always + // calculate it together with the value, irrespective of the interpolation + // type. As opposed to only calculating the gradient after the Armijo + // condition is satisifed, as the computational saving from this approach + // would be slight (perhaps even negative due to the extra call). Also, + // always calculating the value & gradient together protects against us + // reporting invalid solutions if the cost function returns slightly different + // function values when evaluated with / without gradients (due to numerical + // issues). ++summary->num_function_evaluations; - if (interpolation_uses_gradients) { ++summary->num_gradient_evaluations; } + ++summary->num_gradient_evaluations; current.value_is_valid = function->Evaluate(current.x, ¤t.value, - interpolation_uses_gradients - ? ¤t.gradient : NULL); - current.gradient_is_valid = - interpolation_uses_gradients && current.value_is_valid; + ¤t.gradient); + current.gradient_is_valid = current.value_is_valid; while (true) { ++summary->num_iterations; @@ -470,22 +500,14 @@ bool WolfeLineSearch::BracketingPhase( *do_zoom_search = true; *bracket_low = previous; *bracket_high = current; + VLOG(3) << std::scientific + << std::setprecision(kErrorMessageNumericPrecision) + << "Bracket found: current step (" << current.x + << ") violates Armijo sufficient condition, or has passed an " + << "inflection point of f() based on value."; break; } - // Irrespective of the interpolation type we are using, we now need the - // gradient at the current point (which satisfies the Armijo condition) - // in order to check the strong Wolfe conditions. - if (!interpolation_uses_gradients) { - ++summary->num_function_evaluations; - ++summary->num_gradient_evaluations; - current.value_is_valid = - function->Evaluate(current.x, - ¤t.value, - ¤t.gradient); - current.gradient_is_valid = current.value_is_valid; - } - if (current.value_is_valid && fabs(current.gradient) <= -options().sufficient_curvature_decrease * initial_position.gradient) { @@ -493,6 +515,11 @@ bool WolfeLineSearch::BracketingPhase( // valid termination point, therefore a Zoom not required. *bracket_low = current; *bracket_high = current; + VLOG(3) << std::scientific + << std::setprecision(kErrorMessageNumericPrecision) + << "Bracketing phase found step size: " << current.x + << ", satisfying strong Wolfe conditions, initial_position: " + << initial_position << ", current: " << current; break; } else if (current.value_is_valid && current.gradient >= 0) { @@ -505,6 +532,29 @@ bool WolfeLineSearch::BracketingPhase( // Note inverse ordering from first bracket case. *bracket_low = current; *bracket_high = previous; + VLOG(3) << "Bracket found: current step (" << current.x + << ") satisfies Armijo, but has gradient >= 0, thus have passed " + << "an inflection point of f()."; + break; + + } else if (current.value_is_valid && + fabs(current.x - previous.x) * descent_direction_max_norm + < options().min_step_size) { + // We have shrunk the search bracket to a width less than our tolerance, + // and still not found either a point satisfying the strong Wolfe + // conditions, or a valid bracket containing such a point. Stop searching + // and set bracket_low to the size size amongst all those tested which + // minimizes f() and satisfies the Armijo condition. + LOG(WARNING) << "Line search failed: Wolfe bracketing phase shrank " + << "bracket width: " << fabs(current.x - previous.x) + << ", to < tolerance: " << options().min_step_size + << ", with descent_direction_max_norm: " + << descent_direction_max_norm << ", and failed to find " + << "a point satisfying the strong Wolfe conditions or a " + << "bracketing containing such a point. Accepting " + << "point found satisfying Armijo condition only, to " + << "allow continuation."; + *bracket_low = current; break; } else if (summary->num_iterations >= options().max_num_iterations) { @@ -523,7 +573,7 @@ bool WolfeLineSearch::BracketingPhase( *bracket_low = current.value_is_valid && current.value < bracket_low->value ? current : *bracket_low; - return false; + break; } // Either: f(current) is invalid; or, f(current) is valid, but does not // satisfy the strong Wolfe conditions itself, or the conditions for @@ -563,17 +613,22 @@ bool WolfeLineSearch::BracketingPhase( current.x = step_size; ++summary->num_function_evaluations; - if (interpolation_uses_gradients) { ++summary->num_gradient_evaluations; } + ++summary->num_gradient_evaluations; current.value_is_valid = function->Evaluate(current.x, ¤t.value, - interpolation_uses_gradients - ? ¤t.gradient : NULL); - current.gradient_is_valid = - interpolation_uses_gradients && current.value_is_valid; + ¤t.gradient); + current.gradient_is_valid = current.value_is_valid; + } + + // Ensure that even if a valid bracket was found, we will only mark a zoom + // as required if the bracket's width is greater than our minimum tolerance. + if (*do_zoom_search && + fabs(bracket_high->x - bracket_low->x) * descent_direction_max_norm + < options().min_step_size) { + *do_zoom_search = false; } - // Either we have a valid point, defined as a bracket of zero width, in which - // case no zoom is required, or a valid bracket in which to zoom. + return true; } @@ -589,6 +644,7 @@ bool WolfeLineSearch::ZoomPhase(const FunctionSample& initial_position, Function* function = options().function; CHECK(bracket_low.value_is_valid && bracket_low.gradient_is_valid) + << std::scientific << std::setprecision(kErrorMessageNumericPrecision) << "Ceres bug: f_low input to Wolfe Zoom invalid, please contact " << "the developers!, initial_position: " << initial_position << ", bracket_low: " << bracket_low @@ -599,22 +655,46 @@ bool WolfeLineSearch::ZoomPhase(const FunctionSample& initial_position, // not have been calculated (if bracket_high.value does not satisfy the // Armijo sufficient decrease condition and interpolation method does not // require it). + // + // We also do not require that: bracket_low.value < bracket_high.value, + // although this is typical. This is to deal with the case when + // bracket_low = initial_position, bracket_high is the first sample, + // and bracket_high does not satisfy the Armijo condition, but still has + // bracket_high.value < initial_position.value. CHECK(bracket_high.value_is_valid) + << std::scientific << std::setprecision(kErrorMessageNumericPrecision) << "Ceres bug: f_high input to Wolfe Zoom invalid, please " << "contact the developers!, initial_position: " << initial_position << ", bracket_low: " << bracket_low << ", bracket_high: "<< bracket_high; - CHECK_LT(bracket_low.gradient * - (bracket_high.x - bracket_low.x), 0.0) - << "Ceres bug: f_high input to Wolfe Zoom does not satisfy gradient " - << "condition combined with f_low, please contact the developers!" - << ", initial_position: " << initial_position - << ", bracket_low: " << bracket_low - << ", bracket_high: "<< bracket_high; + + if (bracket_low.gradient * (bracket_high.x - bracket_low.x) >= 0) { + // The third condition for a valid initial bracket: + // + // 3. bracket_high is chosen after bracket_low, s.t. + // bracket_low.gradient * (bracket_high.x - bracket_low.x) < 0. + // + // is not satisfied. As this can happen when the users' cost function + // returns inconsistent gradient values relative to the function values, + // we do not CHECK_LT(), but we do stop processing and return an invalid + // value. + summary->error = + StringPrintf("Line search failed: Wolfe zoom phase passed a bracket " + "which does not satisfy: bracket_low.gradient * " + "(bracket_high.x - bracket_low.x) < 0 [%.8e !< 0] " + "with initial_position: %s, bracket_low: %s, bracket_high:" + " %s, the most likely cause of which is the cost function " + "returning inconsistent gradient & function values.", + bracket_low.gradient * (bracket_high.x - bracket_low.x), + initial_position.ToDebugString().c_str(), + bracket_low.ToDebugString().c_str(), + bracket_high.ToDebugString().c_str()); + LOG(WARNING) << summary->error; + solution->value_is_valid = false; + return false; + } const int num_bracketing_iterations = summary->num_iterations; - const bool interpolation_uses_gradients = - options().interpolation_type == CUBIC; const double descent_direction_max_norm = static_cast<const LineSearchFunction*>(function)->DirectionInfinityNorm(); @@ -669,15 +749,23 @@ bool WolfeLineSearch::ZoomPhase(const FunctionSample& initial_position, upper_bound_step.x); // No check on magnitude of step size being too small here as it is // lower-bounded by the initial bracket start point, which was valid. + // + // As we require the gradient to evaluate the Wolfe condition, we always + // calculate it together with the value, irrespective of the interpolation + // type. As opposed to only calculating the gradient after the Armijo + // condition is satisifed, as the computational saving from this approach + // would be slight (perhaps even negative due to the extra call). Also, + // always calculating the value & gradient together protects against us + // reporting invalid solutions if the cost function returns slightly + // different function values when evaluated with / without gradients (due + // to numerical issues). ++summary->num_function_evaluations; - if (interpolation_uses_gradients) { ++summary->num_gradient_evaluations; } + ++summary->num_gradient_evaluations; solution->value_is_valid = function->Evaluate(solution->x, &solution->value, - interpolation_uses_gradients - ? &solution->gradient : NULL); - solution->gradient_is_valid = - interpolation_uses_gradients && solution->value_is_valid; + &solution->gradient); + solution->gradient_is_valid = solution->value_is_valid; if (!solution->value_is_valid) { summary->error = StringPrintf("Line search failed: Wolfe Zoom phase found " @@ -689,6 +777,12 @@ bool WolfeLineSearch::ZoomPhase(const FunctionSample& initial_position, return false; } + VLOG(3) << "Zoom iteration: " + << summary->num_iterations - num_bracketing_iterations + << ", bracket_low: " << bracket_low + << ", bracket_high: " << bracket_high + << ", minimizing solution: " << *solution; + if ((solution->value > (initial_position.value + options().sufficient_decrease * initial_position.gradient @@ -701,31 +795,13 @@ bool WolfeLineSearch::ZoomPhase(const FunctionSample& initial_position, } // Armijo sufficient decrease satisfied, check strong Wolfe condition. - if (!interpolation_uses_gradients) { - // Irrespective of the interpolation type we are using, we now need the - // gradient at the current point (which satisfies the Armijo condition) - // in order to check the strong Wolfe conditions. - ++summary->num_function_evaluations; - ++summary->num_gradient_evaluations; - solution->value_is_valid = - function->Evaluate(solution->x, - &solution->value, - &solution->gradient); - solution->gradient_is_valid = solution->value_is_valid; - if (!solution->value_is_valid) { - summary->error = - StringPrintf("Line search failed: Wolfe Zoom phase found " - "step_size: %.5e, for which function is invalid, " - "between low_step: %.5e and high_step: %.5e " - "at which function is valid.", - solution->x, bracket_low.x, bracket_high.x); - LOG(WARNING) << summary->error; - return false; - } - } if (fabs(solution->gradient) <= -options().sufficient_curvature_decrease * initial_position.gradient) { // Found a valid termination point satisfying strong Wolfe conditions. + VLOG(3) << std::scientific + << std::setprecision(kErrorMessageNumericPrecision) + << "Zoom phase found step size: " << solution->x + << ", satisfying strong Wolfe conditions."; break; } else if (solution->gradient * (bracket_high.x - bracket_low.x) >= 0) { diff --git a/extern/libmv/third_party/ceres/internal/ceres/line_search_direction.cc b/extern/libmv/third_party/ceres/internal/ceres/line_search_direction.cc index 8ded823e5bd..a865f11f9f1 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/line_search_direction.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/line_search_direction.cc @@ -121,6 +121,7 @@ class LBFGS : public LineSearchDirection { low_rank_inverse_hessian_.Update( previous.search_direction * previous.step_size, current.gradient - previous.gradient); + search_direction->setZero(); low_rank_inverse_hessian_.RightMultiply(current.gradient.data(), search_direction->data()); @@ -176,9 +177,46 @@ class BFGS : public LineSearchDirection { const Vector delta_gradient = current.gradient - previous.gradient; const double delta_x_dot_delta_gradient = delta_x.dot(delta_gradient); - if (delta_x_dot_delta_gradient <= 1e-10) { + // The (L)BFGS algorithm explicitly requires that the secant equation: + // + // B_{k+1} * s_k = y_k + // + // Is satisfied at each iteration, where B_{k+1} is the approximated + // Hessian at the k+1-th iteration, s_k = (x_{k+1} - x_{k}) and + // y_k = (grad_{k+1} - grad_{k}). As the approximated Hessian must be + // positive definite, this is equivalent to the condition: + // + // s_k^T * y_k > 0 [s_k^T * B_{k+1} * s_k = s_k^T * y_k > 0] + // + // This condition would always be satisfied if the function was strictly + // convex, alternatively, it is always satisfied provided that a Wolfe line + // search is used (even if the function is not strictly convex). See [1] + // (p138) for a proof. + // + // Although Ceres will always use a Wolfe line search when using (L)BFGS, + // practical implementation considerations mean that the line search + // may return a point that satisfies only the Armijo condition, and thus + // could violate the Secant equation. As such, we will only use a step + // to update the Hessian approximation if: + // + // s_k^T * y_k > tolerance + // + // It is important that tolerance is very small (and >=0), as otherwise we + // might skip the update too often and fail to capture important curvature + // information in the Hessian. For example going from 1e-10 -> 1e-14 + // improves the NIST benchmark score from 43/54 to 53/54. + // + // [1] Nocedal J, Wright S, Numerical Optimization, 2nd Ed. Springer, 1999. + // + // TODO(alexs.mac): Consider using Damped BFGS update instead of + // skipping update. + const double kBFGSSecantConditionHessianUpdateTolerance = 1e-14; + if (delta_x_dot_delta_gradient <= + kBFGSSecantConditionHessianUpdateTolerance) { VLOG(2) << "Skipping BFGS Update, delta_x_dot_delta_gradient too " - << "small: " << delta_x_dot_delta_gradient; + << "small: " << delta_x_dot_delta_gradient << ", tolerance: " + << kBFGSSecantConditionHessianUpdateTolerance + << " (Secant condition)."; } else { // Update dense inverse Hessian approximation. @@ -214,8 +252,13 @@ class BFGS : public LineSearchDirection { // Part II: Implementation and experiments, Management Science, // 20(5), 863-874, 1974. // [2] Nocedal J., Wright S., Numerical Optimization, Springer, 1999. - inverse_hessian_ *= + const double approximate_eigenvalue_scale = delta_x_dot_delta_gradient / delta_gradient.dot(delta_gradient); + inverse_hessian_ *= approximate_eigenvalue_scale; + + VLOG(4) << "Applying approximate_eigenvalue_scale: " + << approximate_eigenvalue_scale << " to initial inverse " + << "Hessian approximation."; } initialized_ = true; diff --git a/extern/libmv/third_party/ceres/internal/ceres/line_search_minimizer.cc b/extern/libmv/third_party/ceres/internal/ceres/line_search_minimizer.cc index 2cc89faf4c4..4590afedbed 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/line_search_minimizer.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/line_search_minimizer.cc @@ -69,6 +69,9 @@ namespace { // use. const double kEpsilon = 1e-12; +// TODO(sameeragarwal): I think there is a small bug here, in that if +// the evaluation fails, then the state can contain garbage. Look at +// this more carefully. bool Evaluate(Evaluator* evaluator, const Vector& x, LineSearchMinimizer::State* state) { @@ -90,6 +93,7 @@ bool Evaluate(Evaluator* evaluator, void LineSearchMinimizer::Minimize(const Minimizer::Options& options, double* parameters, Solver::Summary* summary) { + const bool is_not_silent = !options.is_silent; double start_time = WallTimeInSeconds(); double iteration_start_time = start_time; @@ -115,14 +119,16 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options, iteration_summary.step_is_successful = false; iteration_summary.cost_change = 0.0; iteration_summary.gradient_max_norm = 0.0; + iteration_summary.gradient_norm = 0.0; iteration_summary.step_norm = 0.0; iteration_summary.linear_solver_iterations = 0; iteration_summary.step_solver_time_in_seconds = 0; // Do initial cost and Jacobian evaluation. if (!Evaluate(evaluator, x, ¤t_state)) { - LOG(WARNING) << "Terminating: Cost and gradient evaluation failed."; + summary->error = "Terminating: Cost and gradient evaluation failed."; summary->termination_type = NUMERICAL_FAILURE; + LOG_IF(WARNING, is_not_silent) << summary->error; return; } @@ -130,6 +136,7 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options, iteration_summary.cost = current_state.cost + summary->fixed_cost; iteration_summary.gradient_max_norm = current_state.gradient_max_norm; + iteration_summary.gradient_norm = sqrt(current_state.gradient_squared_norm); // The initial gradient max_norm is bounded from below so that we do // not divide by zero. @@ -139,11 +146,14 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options, options.gradient_tolerance * initial_gradient_max_norm; if (iteration_summary.gradient_max_norm <= absolute_gradient_tolerance) { + summary->error = + StringPrintf("Terminating: Gradient tolerance reached. " + "Relative gradient max norm: %e <= %e", + iteration_summary.gradient_max_norm / + initial_gradient_max_norm, + options.gradient_tolerance); summary->termination_type = GRADIENT_TOLERANCE; - VLOG(1) << "Terminating: Gradient tolerance reached." - << "Relative gradient max norm: " - << iteration_summary.gradient_max_norm / initial_gradient_max_norm - << " <= " << options.gradient_tolerance; + VLOG_IF(1, is_not_silent) << summary->error; return; } @@ -190,9 +200,8 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options, line_search_options, &summary->error)); if (line_search.get() == NULL) { - LOG(ERROR) << "Ceres bug: Unable to create a LineSearch object, please " - << "contact the developers!, error: " << summary->error; summary->termination_type = DID_NOT_RUN; + LOG_IF(ERROR, is_not_silent) << summary->error; return; } @@ -206,16 +215,18 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options, iteration_start_time = WallTimeInSeconds(); if (iteration_summary.iteration >= options.max_num_iterations) { + summary->error = "Terminating: Maximum number of iterations reached."; summary->termination_type = NO_CONVERGENCE; - VLOG(1) << "Terminating: Maximum number of iterations reached."; + VLOG_IF(1, is_not_silent) << summary->error; break; } const double total_solver_time = iteration_start_time - start_time + summary->preprocessor_time_in_seconds; if (total_solver_time >= options.max_solver_time_in_seconds) { + summary->error = "Terminating: Maximum solver time reached."; summary->termination_type = NO_CONVERGENCE; - VLOG(1) << "Terminating: Maximum solver time reached."; + VLOG_IF(1, is_not_silent) << summary->error; break; } @@ -241,13 +252,12 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options, // have already reached our specified maximum number of restarts, // terminate optimization. summary->error = - StringPrintf("Line search direction failure: specified " + StringPrintf("Termination: Line search direction failure: specified " "max_num_line_search_direction_restarts: %d reached.", options.max_num_line_search_direction_restarts); - LOG(WARNING) << summary->error << " terminating optimization."; summary->termination_type = NUMERICAL_FAILURE; + LOG_IF(WARNING, is_not_silent) << summary->error; break; - } else if (!line_search_status) { // Restart line search direction with gradient descent on first iteration // as we have not yet reached our maximum number of restarts. @@ -255,13 +265,16 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options, options.max_num_line_search_direction_restarts); ++num_line_search_direction_restarts; - LOG(WARNING) + LOG_IF(WARNING, is_not_silent) << "Line search direction algorithm: " - << LineSearchDirectionTypeToString(options.line_search_direction_type) - << ", failed to produce a valid new direction at iteration: " - << iteration_summary.iteration << ". Restarting, number of " - << "restarts: " << num_line_search_direction_restarts << " / " - << options.max_num_line_search_direction_restarts << " [max]."; + << LineSearchDirectionTypeToString( + options.line_search_direction_type) + << ", failed to produce a valid new direction at " + << "iteration: " << iteration_summary.iteration + << ". Restarting, number of restarts: " + << num_line_search_direction_restarts << " / " + << options.max_num_line_search_direction_restarts + << " [max]."; line_search_direction.reset( LineSearchDirection::Create(line_search_direction_options)); current_state.search_direction = -current_state.gradient; @@ -292,8 +305,8 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options, "(current_cost - previous_cost): %.5e", initial_step_size, current_state.directional_derivative, (current_state.cost - previous_state.cost)); - LOG(WARNING) << summary->error; summary->termination_type = NUMERICAL_FAILURE; + LOG_IF(WARNING, is_not_silent) << summary->error; break; } @@ -301,6 +314,18 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options, current_state.cost, current_state.directional_derivative, &line_search_summary); + if (!line_search_summary.success) { + summary->error = + StringPrintf("Numerical failure in line search, failed to find " + "a valid step size, (did not run out of iterations) " + "using initial_step_size: %.5e, initial_cost: %.5e, " + "initial_gradient: %.5e.", + initial_step_size, current_state.cost, + current_state.directional_derivative); + LOG_IF(WARNING, is_not_silent) << summary->error; + summary->termination_type = NUMERICAL_FAILURE; + break; + } current_state.step_size = line_search_summary.optimal_step_size; delta = current_state.step_size * current_state.search_direction; @@ -310,20 +335,34 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options, WallTimeInSeconds() - iteration_start_time; // TODO(sameeragarwal): Collect stats. - if (!evaluator->Plus(x.data(), delta.data(), x_plus_delta.data()) || - !Evaluate(evaluator, x_plus_delta, ¤t_state)) { - LOG(WARNING) << "Evaluation failed."; + // + // TODO(sameeragarwal): This call to Plus() directly updates the parameter + // vector via the VectorRef x. This is incorrect as we check the + // gradient and cost changes to determine if the step is accepted + // later, as such we could mutate x with a step that is not + // subsequently accepted, thus it is possible that + // summary->iterations.end()->x != x at termination. + if (!evaluator->Plus(x.data(), delta.data(), x_plus_delta.data())) { + LOG_IF(WARNING, is_not_silent) + << "x_plus_delta = Plus(x, delta) failed. "; + } else if (!Evaluate(evaluator, x_plus_delta, ¤t_state)) { + LOG_IF(WARNING, is_not_silent) << "Step failed to evaluate. "; } else { x = x_plus_delta; } iteration_summary.gradient_max_norm = current_state.gradient_max_norm; + iteration_summary.gradient_norm = sqrt(current_state.gradient_squared_norm); + if (iteration_summary.gradient_max_norm <= absolute_gradient_tolerance) { + summary->error = + StringPrintf("Terminating: Gradient tolerance reached. " + "Relative gradient max norm: %e <= %e. ", + (iteration_summary.gradient_max_norm / + initial_gradient_max_norm), + options.gradient_tolerance); summary->termination_type = GRADIENT_TOLERANCE; - VLOG(1) << "Terminating: Gradient tolerance reached." - << "Relative gradient max norm: " - << iteration_summary.gradient_max_norm / initial_gradient_max_norm - << " <= " << options.gradient_tolerance; + VLOG_IF(1, is_not_silent) << summary->error; break; } @@ -331,11 +370,14 @@ void LineSearchMinimizer::Minimize(const Minimizer::Options& options, const double absolute_function_tolerance = options.function_tolerance * previous_state.cost; if (fabs(iteration_summary.cost_change) < absolute_function_tolerance) { - VLOG(1) << "Terminating. Function tolerance reached. " - << "|cost_change|/cost: " - << fabs(iteration_summary.cost_change) / previous_state.cost - << " <= " << options.function_tolerance; + summary->error = + StringPrintf("Terminating. Function tolerance reached. " + "|cost_change|/cost: %e <= %e", + fabs(iteration_summary.cost_change) / + previous_state.cost, + options.function_tolerance); summary->termination_type = FUNCTION_TOLERANCE; + VLOG_IF(1, is_not_silent) << summary->error; return; } diff --git a/extern/libmv/third_party/ceres/internal/ceres/linear_solver.h b/extern/libmv/third_party/ceres/internal/ceres/linear_solver.h index 22691b33e44..cb26356d203 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/linear_solver.h +++ b/extern/libmv/third_party/ceres/internal/ceres/linear_solver.h @@ -50,6 +50,23 @@ namespace ceres { namespace internal { +enum LinearSolverTerminationType { + // Termination criterion was met. + LINEAR_SOLVER_SUCCESS, + + // Solver ran for max_num_iterations and terminated before the + // termination tolerance could be satisfied. + LINEAR_SOLVER_NO_CONVERGENCE, + + // Solver was terminated due to numerical problems, generally due to + // the linear system being poorly conditioned. + LINEAR_SOLVER_FAILURE, + + // Solver failed with a fatal error that cannot be recovered from. + LINEAR_SOLVER_FATAL_ERROR +}; + + class LinearOperator; // Abstract base class for objects that implement algorithms for @@ -74,6 +91,7 @@ class LinearSolver { Options() : type(SPARSE_NORMAL_CHOLESKY), preconditioner_type(JACOBI), + visibility_clustering_type(CANONICAL_VIEWS), dense_linear_algebra_library_type(EIGEN), sparse_linear_algebra_library_type(SUITE_SPARSE), use_postordering(false), @@ -87,9 +105,8 @@ class LinearSolver { } LinearSolverType type; - PreconditionerType preconditioner_type; - + VisibilityClusteringType visibility_clustering_type; DenseLinearAlgebraLibraryType dense_linear_algebra_library_type; SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type; @@ -243,12 +260,13 @@ class LinearSolver { Summary() : residual_norm(0.0), num_iterations(-1), - termination_type(FAILURE) { + termination_type(LINEAR_SOLVER_FAILURE) { } double residual_norm; int num_iterations; LinearSolverTerminationType termination_type; + string message; }; virtual ~LinearSolver(); diff --git a/extern/libmv/third_party/ceres/internal/ceres/low_rank_inverse_hessian.cc b/extern/libmv/third_party/ceres/internal/ceres/low_rank_inverse_hessian.cc index 372165f9523..9aeafecfb36 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/low_rank_inverse_hessian.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/low_rank_inverse_hessian.cc @@ -35,6 +35,41 @@ namespace ceres { namespace internal { +// The (L)BFGS algorithm explicitly requires that the secant equation: +// +// B_{k+1} * s_k = y_k +// +// Is satisfied at each iteration, where B_{k+1} is the approximated +// Hessian at the k+1-th iteration, s_k = (x_{k+1} - x_{k}) and +// y_k = (grad_{k+1} - grad_{k}). As the approximated Hessian must be +// positive definite, this is equivalent to the condition: +// +// s_k^T * y_k > 0 [s_k^T * B_{k+1} * s_k = s_k^T * y_k > 0] +// +// This condition would always be satisfied if the function was strictly +// convex, alternatively, it is always satisfied provided that a Wolfe line +// search is used (even if the function is not strictly convex). See [1] +// (p138) for a proof. +// +// Although Ceres will always use a Wolfe line search when using (L)BFGS, +// practical implementation considerations mean that the line search +// may return a point that satisfies only the Armijo condition, and thus +// could violate the Secant equation. As such, we will only use a step +// to update the Hessian approximation if: +// +// s_k^T * y_k > tolerance +// +// It is important that tolerance is very small (and >=0), as otherwise we +// might skip the update too often and fail to capture important curvature +// information in the Hessian. For example going from 1e-10 -> 1e-14 improves +// the NIST benchmark score from 43/54 to 53/54. +// +// [1] Nocedal J., Wright S., Numerical Optimization, 2nd Ed. Springer, 1999. +// +// TODO(alexs.mac): Consider using Damped BFGS update instead of +// skipping update. +const double kLBFGSSecantConditionHessianUpdateTolerance = 1e-14; + LowRankInverseHessian::LowRankInverseHessian( int num_parameters, int max_num_corrections, @@ -52,9 +87,12 @@ LowRankInverseHessian::LowRankInverseHessian( bool LowRankInverseHessian::Update(const Vector& delta_x, const Vector& delta_gradient) { const double delta_x_dot_delta_gradient = delta_x.dot(delta_gradient); - if (delta_x_dot_delta_gradient <= 1e-10) { - VLOG(2) << "Skipping LBFGS Update, delta_x_dot_delta_gradient too small: " - << delta_x_dot_delta_gradient; + if (delta_x_dot_delta_gradient <= + kLBFGSSecantConditionHessianUpdateTolerance) { + VLOG(2) << "Skipping L-BFGS Update, delta_x_dot_delta_gradient too " + << "small: " << delta_x_dot_delta_gradient << ", tolerance: " + << kLBFGSSecantConditionHessianUpdateTolerance + << " (Secant condition)."; return false; } @@ -133,6 +171,10 @@ void LowRankInverseHessian::RightMultiply(const double* x_ptr, // 20(5), 863-874, 1974. // [2] Nocedal J., Wright S., Numerical Optimization, Springer, 1999. search_direction *= approximate_eigenvalue_scale_; + + VLOG(4) << "Applying approximate_eigenvalue_scale: " + << approximate_eigenvalue_scale_ << " to initial inverse Hessian " + << "approximation."; } for (int i = 0; i < num_corrections_; ++i) { diff --git a/extern/libmv/third_party/ceres/internal/ceres/minimizer.h b/extern/libmv/third_party/ceres/internal/ceres/minimizer.h index 622e9cee1d0..3d9da997d24 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/minimizer.h +++ b/extern/libmv/third_party/ceres/internal/ceres/minimizer.h @@ -107,6 +107,7 @@ class Minimizer { options.line_search_sufficient_curvature_decrease; max_line_search_step_expansion = options.max_line_search_step_expansion; + is_silent = false; evaluator = NULL; trust_region_strategy = NULL; jacobian = NULL; @@ -153,6 +154,8 @@ class Minimizer { double line_search_sufficient_curvature_decrease; double max_line_search_step_expansion; + // If true, then all logging is disabled. + bool is_silent; // List of callbacks that are executed by the Minimizer at the end // of each iteration. diff --git a/extern/libmv/third_party/ceres/internal/ceres/partitioned_matrix_view.cc b/extern/libmv/third_party/ceres/internal/ceres/partitioned_matrix_view.cc index 59eaff8ec1b..7936a401d0d 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/partitioned_matrix_view.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/partitioned_matrix_view.cc @@ -1,5 +1,5 @@ // Ceres Solver - A fast non-linear least squares minimizer -// Copyright 2010, 2011, 2012 Google Inc. All rights reserved. +// Copyright 2013 Google Inc. All rights reserved. // http://code.google.com/p/ceres-solver/ // // Redistribution and use in source and binary forms, with or without @@ -27,277 +27,141 @@ // POSSIBILITY OF SUCH DAMAGE. // // Author: sameeragarwal@google.com (Sameer Agarwal) +// +// Template specialization of PartitionedMatrixView. +// +// ======================================== +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +// THIS FILE IS AUTOGENERATED. DO NOT EDIT. +//========================================= +// +// This file is generated using generate_eliminator_specialization.py. +// Editing it manually is not recommended. -#define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 10 - +#include "ceres/linear_solver.h" #include "ceres/partitioned_matrix_view.h" - -#include <algorithm> -#include <cstring> -#include <vector> -#include "ceres/block_sparse_matrix.h" -#include "ceres/block_structure.h" #include "ceres/internal/eigen.h" -#include "ceres/small_blas.h" -#include "glog/logging.h" namespace ceres { namespace internal { -PartitionedMatrixView::PartitionedMatrixView( - const BlockSparseMatrix& matrix, - int num_col_blocks_a) - : matrix_(matrix), - num_col_blocks_e_(num_col_blocks_a) { - const CompressedRowBlockStructure* bs = matrix_.block_structure(); - CHECK_NOTNULL(bs); - - num_col_blocks_f_ = bs->cols.size() - num_col_blocks_a; - - // Compute the number of row blocks in E. The number of row blocks - // in E maybe less than the number of row blocks in the input matrix - // as some of the row blocks at the bottom may not have any - // e_blocks. For a definition of what an e_block is, please see - // explicit_schur_complement_solver.h - num_row_blocks_e_ = 0; - for (int r = 0; r < bs->rows.size(); ++r) { - const vector<Cell>& cells = bs->rows[r].cells; - if (cells[0].block_id < num_col_blocks_a) { - ++num_row_blocks_e_; - } +PartitionedMatrixViewBase* +PartitionedMatrixViewBase::Create(const LinearSolver::Options& options, + const BlockSparseMatrix& matrix) { +#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION + if ((options.row_block_size == 2) && + (options.e_block_size == 2) && + (options.f_block_size == 2)) { + return new PartitionedMatrixView<2, 2, 2>( + matrix, options.elimination_groups[0]); } - - // Compute the number of columns in E and F. - num_cols_e_ = 0; - num_cols_f_ = 0; - - for (int c = 0; c < bs->cols.size(); ++c) { - const Block& block = bs->cols[c]; - if (c < num_col_blocks_a) { - num_cols_e_ += block.size; - } else { - num_cols_f_ += block.size; - } + if ((options.row_block_size == 2) && + (options.e_block_size == 2) && + (options.f_block_size == 3)) { + return new PartitionedMatrixView<2, 2, 3>( + matrix, options.elimination_groups[0]); } - - CHECK_EQ(num_cols_e_ + num_cols_f_, matrix_.num_cols()); -} - -PartitionedMatrixView::~PartitionedMatrixView() { -} - -// The next four methods don't seem to be particularly cache -// friendly. This is an artifact of how the BlockStructure of the -// input matrix is constructed. These methods will benefit from -// multithreading as well as improved data layout. - -void PartitionedMatrixView::RightMultiplyE(const double* x, double* y) const { - const CompressedRowBlockStructure* bs = matrix_.block_structure(); - - // Iterate over the first num_row_blocks_e_ row blocks, and multiply - // by the first cell in each row block. - const double* values = matrix_.values(); - for (int r = 0; r < num_row_blocks_e_; ++r) { - const Cell& cell = bs->rows[r].cells[0]; - const int row_block_pos = bs->rows[r].block.position; - const int row_block_size = bs->rows[r].block.size; - const int col_block_id = cell.block_id; - const int col_block_pos = bs->cols[col_block_id].position; - const int col_block_size = bs->cols[col_block_id].size; - MatrixVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>( - values + cell.position, row_block_size, col_block_size, - x + col_block_pos, - y + row_block_pos); + if ((options.row_block_size == 2) && + (options.e_block_size == 2) && + (options.f_block_size == 4)) { + return new PartitionedMatrixView<2, 2, 4>( + matrix, options.elimination_groups[0]); } -} - -void PartitionedMatrixView::RightMultiplyF(const double* x, double* y) const { - const CompressedRowBlockStructure* bs = matrix_.block_structure(); - - // Iterate over row blocks, and if the row block is in E, then - // multiply by all the cells except the first one which is of type - // E. If the row block is not in E (i.e its in the bottom - // num_row_blocks - num_row_blocks_e row blocks), then all the cells - // are of type F and multiply by them all. - const double* values = matrix_.values(); - for (int r = 0; r < bs->rows.size(); ++r) { - const int row_block_pos = bs->rows[r].block.position; - const int row_block_size = bs->rows[r].block.size; - const vector<Cell>& cells = bs->rows[r].cells; - for (int c = (r < num_row_blocks_e_) ? 1 : 0; c < cells.size(); ++c) { - const int col_block_id = cells[c].block_id; - const int col_block_pos = bs->cols[col_block_id].position; - const int col_block_size = bs->cols[col_block_id].size; - MatrixVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>( - values + cells[c].position, row_block_size, col_block_size, - x + col_block_pos - num_cols_e(), - y + row_block_pos); - } + if ((options.row_block_size == 2) && + (options.e_block_size == 2) && + (options.f_block_size == Eigen::Dynamic)) { + return new PartitionedMatrixView<2, 2, Eigen::Dynamic>( + matrix, options.elimination_groups[0]); } -} - -void PartitionedMatrixView::LeftMultiplyE(const double* x, double* y) const { - const CompressedRowBlockStructure* bs = matrix_.block_structure(); - - // Iterate over the first num_row_blocks_e_ row blocks, and multiply - // by the first cell in each row block. - const double* values = matrix_.values(); - for (int r = 0; r < num_row_blocks_e_; ++r) { - const Cell& cell = bs->rows[r].cells[0]; - const int row_block_pos = bs->rows[r].block.position; - const int row_block_size = bs->rows[r].block.size; - const int col_block_id = cell.block_id; - const int col_block_pos = bs->cols[col_block_id].position; - const int col_block_size = bs->cols[col_block_id].size; - MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>( - values + cell.position, row_block_size, col_block_size, - x + row_block_pos, - y + col_block_pos); + if ((options.row_block_size == 2) && + (options.e_block_size == 3) && + (options.f_block_size == 3)) { + return new PartitionedMatrixView<2, 3, 3>( + matrix, options.elimination_groups[0]); } -} - -void PartitionedMatrixView::LeftMultiplyF(const double* x, double* y) const { - const CompressedRowBlockStructure* bs = matrix_.block_structure(); - - // Iterate over row blocks, and if the row block is in E, then - // multiply by all the cells except the first one which is of type - // E. If the row block is not in E (i.e its in the bottom - // num_row_blocks - num_row_blocks_e row blocks), then all the cells - // are of type F and multiply by them all. - const double* values = matrix_.values(); - for (int r = 0; r < bs->rows.size(); ++r) { - const int row_block_pos = bs->rows[r].block.position; - const int row_block_size = bs->rows[r].block.size; - const vector<Cell>& cells = bs->rows[r].cells; - for (int c = (r < num_row_blocks_e_) ? 1 : 0; c < cells.size(); ++c) { - const int col_block_id = cells[c].block_id; - const int col_block_pos = bs->cols[col_block_id].position; - const int col_block_size = bs->cols[col_block_id].size; - MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>( - values + cells[c].position, row_block_size, col_block_size, - x + row_block_pos, - y + col_block_pos - num_cols_e()); - } + if ((options.row_block_size == 2) && + (options.e_block_size == 3) && + (options.f_block_size == 4)) { + return new PartitionedMatrixView<2, 3, 4>( + matrix, options.elimination_groups[0]); } -} - -// Given a range of columns blocks of a matrix m, compute the block -// structure of the block diagonal of the matrix m(:, -// start_col_block:end_col_block)'m(:, start_col_block:end_col_block) -// and return a BlockSparseMatrix with the this block structure. The -// caller owns the result. -BlockSparseMatrix* PartitionedMatrixView::CreateBlockDiagonalMatrixLayout( - int start_col_block, int end_col_block) const { - const CompressedRowBlockStructure* bs = matrix_.block_structure(); - CompressedRowBlockStructure* block_diagonal_structure = - new CompressedRowBlockStructure; - - int block_position = 0; - int diagonal_cell_position = 0; - - // Iterate over the column blocks, creating a new diagonal block for - // each column block. - for (int c = start_col_block; c < end_col_block; ++c) { - const Block& block = bs->cols[c]; - block_diagonal_structure->cols.push_back(Block()); - Block& diagonal_block = block_diagonal_structure->cols.back(); - diagonal_block.size = block.size; - diagonal_block.position = block_position; - - block_diagonal_structure->rows.push_back(CompressedRow()); - CompressedRow& row = block_diagonal_structure->rows.back(); - row.block = diagonal_block; - - row.cells.push_back(Cell()); - Cell& cell = row.cells.back(); - cell.block_id = c - start_col_block; - cell.position = diagonal_cell_position; - - block_position += block.size; - diagonal_cell_position += block.size * block.size; + if ((options.row_block_size == 2) && + (options.e_block_size == 3) && + (options.f_block_size == 9)) { + return new PartitionedMatrixView<2, 3, 9>( + matrix, options.elimination_groups[0]); } - - // Build a BlockSparseMatrix with the just computed block - // structure. - return new BlockSparseMatrix(block_diagonal_structure); -} - -BlockSparseMatrix* PartitionedMatrixView::CreateBlockDiagonalEtE() const { - BlockSparseMatrix* block_diagonal = - CreateBlockDiagonalMatrixLayout(0, num_col_blocks_e_); - UpdateBlockDiagonalEtE(block_diagonal); - return block_diagonal; -} - -BlockSparseMatrix* PartitionedMatrixView::CreateBlockDiagonalFtF() const { - BlockSparseMatrix* block_diagonal = - CreateBlockDiagonalMatrixLayout( - num_col_blocks_e_, num_col_blocks_e_ + num_col_blocks_f_); - UpdateBlockDiagonalFtF(block_diagonal); - return block_diagonal; -} - -// Similar to the code in RightMultiplyE, except instead of the matrix -// vector multiply its an outer product. -// -// block_diagonal = block_diagonal(E'E) -void PartitionedMatrixView::UpdateBlockDiagonalEtE( - BlockSparseMatrix* block_diagonal) const { - const CompressedRowBlockStructure* bs = matrix_.block_structure(); - const CompressedRowBlockStructure* block_diagonal_structure = - block_diagonal->block_structure(); - - block_diagonal->SetZero(); - const double* values = matrix_.values(); - for (int r = 0; r < num_row_blocks_e_ ; ++r) { - const Cell& cell = bs->rows[r].cells[0]; - const int row_block_size = bs->rows[r].block.size; - const int block_id = cell.block_id; - const int col_block_size = bs->cols[block_id].size; - const int cell_position = - block_diagonal_structure->rows[block_id].cells[0].position; - - MatrixTransposeMatrixMultiply - <Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, 1>( - values + cell.position, row_block_size, col_block_size, - values + cell.position, row_block_size, col_block_size, - block_diagonal->mutable_values() + cell_position, - 0, 0, col_block_size, col_block_size); + if ((options.row_block_size == 2) && + (options.e_block_size == 3) && + (options.f_block_size == Eigen::Dynamic)) { + return new PartitionedMatrixView<2, 3, Eigen::Dynamic>( + matrix, options.elimination_groups[0]); } -} - -// Similar to the code in RightMultiplyF, except instead of the matrix -// vector multiply its an outer product. -// -// block_diagonal = block_diagonal(F'F) -// -void PartitionedMatrixView::UpdateBlockDiagonalFtF( - BlockSparseMatrix* block_diagonal) const { - const CompressedRowBlockStructure* bs = matrix_.block_structure(); - const CompressedRowBlockStructure* block_diagonal_structure = - block_diagonal->block_structure(); - - block_diagonal->SetZero(); - const double* values = matrix_.values(); - for (int r = 0; r < bs->rows.size(); ++r) { - const int row_block_size = bs->rows[r].block.size; - const vector<Cell>& cells = bs->rows[r].cells; - for (int c = (r < num_row_blocks_e_) ? 1 : 0; c < cells.size(); ++c) { - const int col_block_id = cells[c].block_id; - const int col_block_size = bs->cols[col_block_id].size; - const int diagonal_block_id = col_block_id - num_col_blocks_e_; - const int cell_position = - block_diagonal_structure->rows[diagonal_block_id].cells[0].position; - - MatrixTransposeMatrixMultiply - <Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, 1>( - values + cells[c].position, row_block_size, col_block_size, - values + cells[c].position, row_block_size, col_block_size, - block_diagonal->mutable_values() + cell_position, - 0, 0, col_block_size, col_block_size); - } + if ((options.row_block_size == 2) && + (options.e_block_size == 4) && + (options.f_block_size == 3)) { + return new PartitionedMatrixView<2, 4, 3>( + matrix, options.elimination_groups[0]); + } + if ((options.row_block_size == 2) && + (options.e_block_size == 4) && + (options.f_block_size == 4)) { + return new PartitionedMatrixView<2, 4, 4>( + matrix, options.elimination_groups[0]); + } + if ((options.row_block_size == 2) && + (options.e_block_size == 4) && + (options.f_block_size == Eigen::Dynamic)) { + return new PartitionedMatrixView<2, 4, Eigen::Dynamic>( + matrix, options.elimination_groups[0]); + } + if ((options.row_block_size == 2) && + (options.e_block_size == Eigen::Dynamic) && + (options.f_block_size == Eigen::Dynamic)) { + return new PartitionedMatrixView<2, Eigen::Dynamic, Eigen::Dynamic>( + matrix, options.elimination_groups[0]); + } + if ((options.row_block_size == 4) && + (options.e_block_size == 4) && + (options.f_block_size == 2)) { + return new PartitionedMatrixView<4, 4, 2>( + matrix, options.elimination_groups[0]); } -} + if ((options.row_block_size == 4) && + (options.e_block_size == 4) && + (options.f_block_size == 3)) { + return new PartitionedMatrixView<4, 4, 3>( + matrix, options.elimination_groups[0]); + } + if ((options.row_block_size == 4) && + (options.e_block_size == 4) && + (options.f_block_size == 4)) { + return new PartitionedMatrixView<4, 4, 4>( + matrix, options.elimination_groups[0]); + } + if ((options.row_block_size == 4) && + (options.e_block_size == 4) && + (options.f_block_size == Eigen::Dynamic)) { + return new PartitionedMatrixView<4, 4, Eigen::Dynamic>( + matrix, options.elimination_groups[0]); + } + if ((options.row_block_size == Eigen::Dynamic) && + (options.e_block_size == Eigen::Dynamic) && + (options.f_block_size == Eigen::Dynamic)) { + return new PartitionedMatrixView<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic>( + matrix, options.elimination_groups[0]); + } + +#endif + VLOG(1) << "Template specializations not found for <" + << options.row_block_size << "," + << options.e_block_size << "," + << options.f_block_size << ">"; + return new PartitionedMatrixView<Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic>( + matrix, options.elimination_groups[0]); +}; } // namespace internal } // namespace ceres diff --git a/extern/libmv/third_party/ceres/internal/ceres/partitioned_matrix_view.h b/extern/libmv/third_party/ceres/internal/ceres/partitioned_matrix_view.h index ebfbe403189..661252d404f 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/partitioned_matrix_view.h +++ b/extern/libmv/third_party/ceres/internal/ceres/partitioned_matrix_view.h @@ -36,7 +36,15 @@ #ifndef CERES_INTERNAL_PARTITIONED_MATRIX_VIEW_H_ #define CERES_INTERNAL_PARTITIONED_MATRIX_VIEW_H_ -#include "ceres/block_sparse_matrix.h" +#include <algorithm> +#include <cstring> +#include <vector> + +#include "ceres/block_structure.h" +#include "ceres/internal/eigen.h" +#include "ceres/linear_solver.h" +#include "ceres/small_blas.h" +#include "glog/logging.h" namespace ceres { namespace internal { @@ -51,57 +59,80 @@ namespace internal { // block structure of the matrix does not satisfy the requirements of // the Schur complement solver it will result in unpredictable and // wrong output. -// -// This class lives in the internal name space as its a utility class -// to be used by the IterativeSchurComplementSolver class, found in -// iterative_schur_complement_solver.h, and is not meant for general -// consumption. -class PartitionedMatrixView { +class PartitionedMatrixViewBase { public: - // matrix = [E F], where the matrix E contains the first - // num_col_blocks_a column blocks. - PartitionedMatrixView(const BlockSparseMatrix& matrix, - int num_col_blocks_a); - ~PartitionedMatrixView(); + virtual ~PartitionedMatrixViewBase() {} // y += E'x - void LeftMultiplyE(const double* x, double* y) const; + virtual void LeftMultiplyE(const double* x, double* y) const = 0; // y += F'x - void LeftMultiplyF(const double* x, double* y) const; + virtual void LeftMultiplyF(const double* x, double* y) const = 0; // y += Ex - void RightMultiplyE(const double* x, double* y) const; + virtual void RightMultiplyE(const double* x, double* y) const = 0; // y += Fx - void RightMultiplyF(const double* x, double* y) const; + virtual void RightMultiplyF(const double* x, double* y) const = 0; // Create and return the block diagonal of the matrix E'E. - BlockSparseMatrix* CreateBlockDiagonalEtE() const; + virtual BlockSparseMatrix* CreateBlockDiagonalEtE() const = 0; - // Create and return the block diagonal of the matrix F'F. - BlockSparseMatrix* CreateBlockDiagonalFtF() const; + // Create and return the block diagonal of the matrix F'F. Caller + // owns the result. + virtual BlockSparseMatrix* CreateBlockDiagonalFtF() const = 0; // Compute the block diagonal of the matrix E'E and store it in // block_diagonal. The matrix block_diagonal is expected to have a // BlockStructure (preferably created using // CreateBlockDiagonalMatrixEtE) which is has the same structure as // the block diagonal of E'E. - void UpdateBlockDiagonalEtE(BlockSparseMatrix* block_diagonal) const; + virtual void UpdateBlockDiagonalEtE( + BlockSparseMatrix* block_diagonal) const = 0; // Compute the block diagonal of the matrix F'F and store it in // block_diagonal. The matrix block_diagonal is expected to have a // BlockStructure (preferably created using // CreateBlockDiagonalMatrixFtF) which is has the same structure as // the block diagonal of F'F. - void UpdateBlockDiagonalFtF(BlockSparseMatrix* block_diagonal) const; - - int num_col_blocks_e() const { return num_col_blocks_e_; } - int num_col_blocks_f() const { return num_col_blocks_f_; } - int num_cols_e() const { return num_cols_e_; } - int num_cols_f() const { return num_cols_f_; } - int num_rows() const { return matrix_.num_rows(); } - int num_cols() const { return matrix_.num_cols(); } + virtual void UpdateBlockDiagonalFtF( + BlockSparseMatrix* block_diagonal) const = 0; + + virtual int num_col_blocks_e() const = 0; + virtual int num_col_blocks_f() const = 0; + virtual int num_cols_e() const = 0; + virtual int num_cols_f() const = 0; + virtual int num_rows() const = 0; + virtual int num_cols() const = 0; + + static PartitionedMatrixViewBase* Create(const LinearSolver::Options& options, + const BlockSparseMatrix& matrix); +}; + +template <int kRowBlockSize = Eigen::Dynamic, + int kEBlockSize = Eigen::Dynamic, + int kFBlockSize = Eigen::Dynamic > +class PartitionedMatrixView : public PartitionedMatrixViewBase { + public: + // matrix = [E F], where the matrix E contains the first + // num_col_blocks_a column blocks. + PartitionedMatrixView(const BlockSparseMatrix& matrix, int num_col_blocks_e); + + virtual ~PartitionedMatrixView(); + virtual void LeftMultiplyE(const double* x, double* y) const; + virtual void LeftMultiplyF(const double* x, double* y) const; + virtual void RightMultiplyE(const double* x, double* y) const; + virtual void RightMultiplyF(const double* x, double* y) const; + virtual BlockSparseMatrix* CreateBlockDiagonalEtE() const; + virtual BlockSparseMatrix* CreateBlockDiagonalFtF() const; + virtual void UpdateBlockDiagonalEtE(BlockSparseMatrix* block_diagonal) const; + virtual void UpdateBlockDiagonalFtF(BlockSparseMatrix* block_diagonal) const; + virtual int num_col_blocks_e() const { return num_col_blocks_e_; } + virtual int num_col_blocks_f() const { return num_col_blocks_f_; } + virtual int num_cols_e() const { return num_cols_e_; } + virtual int num_cols_f() const { return num_cols_f_; } + virtual int num_rows() const { return matrix_.num_rows(); } + virtual int num_cols() const { return matrix_.num_cols(); } private: BlockSparseMatrix* CreateBlockDiagonalMatrixLayout(int start_col_block, diff --git a/extern/libmv/third_party/ceres/internal/ceres/partitioned_matrix_view_impl.h b/extern/libmv/third_party/ceres/internal/ceres/partitioned_matrix_view_impl.h new file mode 100644 index 00000000000..ae7f776e6b1 --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/partitioned_matrix_view_impl.h @@ -0,0 +1,380 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2010, 2011, 2012 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + +#include "ceres/partitioned_matrix_view.h" + +#include <algorithm> +#include <cstring> +#include <vector> +#include "ceres/block_sparse_matrix.h" +#include "ceres/block_structure.h" +#include "ceres/internal/eigen.h" +#include "ceres/small_blas.h" +#include "glog/logging.h" + +namespace ceres { +namespace internal { + +template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> +PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>:: +PartitionedMatrixView( + const BlockSparseMatrix& matrix, + int num_col_blocks_e) + : matrix_(matrix), + num_col_blocks_e_(num_col_blocks_e) { + const CompressedRowBlockStructure* bs = matrix_.block_structure(); + CHECK_NOTNULL(bs); + + num_col_blocks_f_ = bs->cols.size() - num_col_blocks_e_; + + // Compute the number of row blocks in E. The number of row blocks + // in E maybe less than the number of row blocks in the input matrix + // as some of the row blocks at the bottom may not have any + // e_blocks. For a definition of what an e_block is, please see + // explicit_schur_complement_solver.h + num_row_blocks_e_ = 0; + for (int r = 0; r < bs->rows.size(); ++r) { + const vector<Cell>& cells = bs->rows[r].cells; + if (cells[0].block_id < num_col_blocks_e_) { + ++num_row_blocks_e_; + } + } + + // Compute the number of columns in E and F. + num_cols_e_ = 0; + num_cols_f_ = 0; + + for (int c = 0; c < bs->cols.size(); ++c) { + const Block& block = bs->cols[c]; + if (c < num_col_blocks_e_) { + num_cols_e_ += block.size; + } else { + num_cols_f_ += block.size; + } + } + + CHECK_EQ(num_cols_e_ + num_cols_f_, matrix_.num_cols()); +} + +template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> +PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>:: +~PartitionedMatrixView() { +} + +// The next four methods don't seem to be particularly cache +// friendly. This is an artifact of how the BlockStructure of the +// input matrix is constructed. These methods will benefit from +// multithreading as well as improved data layout. + +template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> +void +PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>:: +RightMultiplyE(const double* x, double* y) const { + const CompressedRowBlockStructure* bs = matrix_.block_structure(); + + // Iterate over the first num_row_blocks_e_ row blocks, and multiply + // by the first cell in each row block. + const double* values = matrix_.values(); + for (int r = 0; r < num_row_blocks_e_; ++r) { + const Cell& cell = bs->rows[r].cells[0]; + const int row_block_pos = bs->rows[r].block.position; + const int row_block_size = bs->rows[r].block.size; + const int col_block_id = cell.block_id; + const int col_block_pos = bs->cols[col_block_id].position; + const int col_block_size = bs->cols[col_block_id].size; + MatrixVectorMultiply<kRowBlockSize, kEBlockSize, 1>( + values + cell.position, row_block_size, col_block_size, + x + col_block_pos, + y + row_block_pos); + } +} + +template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> +void +PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>:: +RightMultiplyF(const double* x, double* y) const { + const CompressedRowBlockStructure* bs = matrix_.block_structure(); + + // Iterate over row blocks, and if the row block is in E, then + // multiply by all the cells except the first one which is of type + // E. If the row block is not in E (i.e its in the bottom + // num_row_blocks - num_row_blocks_e row blocks), then all the cells + // are of type F and multiply by them all. + const double* values = matrix_.values(); + for (int r = 0; r < num_row_blocks_e_; ++r) { + const int row_block_pos = bs->rows[r].block.position; + const int row_block_size = bs->rows[r].block.size; + const vector<Cell>& cells = bs->rows[r].cells; + for (int c = 1; c < cells.size(); ++c) { + const int col_block_id = cells[c].block_id; + const int col_block_pos = bs->cols[col_block_id].position; + const int col_block_size = bs->cols[col_block_id].size; + MatrixVectorMultiply<kRowBlockSize, kFBlockSize, 1>( + values + cells[c].position, row_block_size, col_block_size, + x + col_block_pos - num_cols_e_, + y + row_block_pos); + } + } + + for (int r = num_row_blocks_e_; r < bs->rows.size(); ++r) { + const int row_block_pos = bs->rows[r].block.position; + const int row_block_size = bs->rows[r].block.size; + const vector<Cell>& cells = bs->rows[r].cells; + for (int c = 0; c < cells.size(); ++c) { + const int col_block_id = cells[c].block_id; + const int col_block_pos = bs->cols[col_block_id].position; + const int col_block_size = bs->cols[col_block_id].size; + MatrixVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>( + values + cells[c].position, row_block_size, col_block_size, + x + col_block_pos - num_cols_e_, + y + row_block_pos); + } + } +} + +template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> +void +PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>:: +LeftMultiplyE(const double* x, double* y) const { + const CompressedRowBlockStructure* bs = matrix_.block_structure(); + + // Iterate over the first num_row_blocks_e_ row blocks, and multiply + // by the first cell in each row block. + const double* values = matrix_.values(); + for (int r = 0; r < num_row_blocks_e_; ++r) { + const Cell& cell = bs->rows[r].cells[0]; + const int row_block_pos = bs->rows[r].block.position; + const int row_block_size = bs->rows[r].block.size; + const int col_block_id = cell.block_id; + const int col_block_pos = bs->cols[col_block_id].position; + const int col_block_size = bs->cols[col_block_id].size; + MatrixTransposeVectorMultiply<kRowBlockSize, kEBlockSize, 1>( + values + cell.position, row_block_size, col_block_size, + x + row_block_pos, + y + col_block_pos); + } +} + +template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> +void +PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>:: +LeftMultiplyF(const double* x, double* y) const { + const CompressedRowBlockStructure* bs = matrix_.block_structure(); + + // Iterate over row blocks, and if the row block is in E, then + // multiply by all the cells except the first one which is of type + // E. If the row block is not in E (i.e its in the bottom + // num_row_blocks - num_row_blocks_e row blocks), then all the cells + // are of type F and multiply by them all. + const double* values = matrix_.values(); + for (int r = 0; r < num_row_blocks_e_; ++r) { + const int row_block_pos = bs->rows[r].block.position; + const int row_block_size = bs->rows[r].block.size; + const vector<Cell>& cells = bs->rows[r].cells; + for (int c = 1; c < cells.size(); ++c) { + const int col_block_id = cells[c].block_id; + const int col_block_pos = bs->cols[col_block_id].position; + const int col_block_size = bs->cols[col_block_id].size; + MatrixTransposeVectorMultiply<kRowBlockSize, kFBlockSize, 1>( + values + cells[c].position, row_block_size, col_block_size, + x + row_block_pos, + y + col_block_pos - num_cols_e_); + } + } + + for (int r = num_row_blocks_e_; r < bs->rows.size(); ++r) { + const int row_block_pos = bs->rows[r].block.position; + const int row_block_size = bs->rows[r].block.size; + const vector<Cell>& cells = bs->rows[r].cells; + for (int c = 0; c < cells.size(); ++c) { + const int col_block_id = cells[c].block_id; + const int col_block_pos = bs->cols[col_block_id].position; + const int col_block_size = bs->cols[col_block_id].size; + MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>( + values + cells[c].position, row_block_size, col_block_size, + x + row_block_pos, + y + col_block_pos - num_cols_e_); + } + } +} + +// Given a range of columns blocks of a matrix m, compute the block +// structure of the block diagonal of the matrix m(:, +// start_col_block:end_col_block)'m(:, start_col_block:end_col_block) +// and return a BlockSparseMatrix with the this block structure. The +// caller owns the result. +template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> +BlockSparseMatrix* +PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>:: +CreateBlockDiagonalMatrixLayout(int start_col_block, int end_col_block) const { + const CompressedRowBlockStructure* bs = matrix_.block_structure(); + CompressedRowBlockStructure* block_diagonal_structure = + new CompressedRowBlockStructure; + + int block_position = 0; + int diagonal_cell_position = 0; + + // Iterate over the column blocks, creating a new diagonal block for + // each column block. + for (int c = start_col_block; c < end_col_block; ++c) { + const Block& block = bs->cols[c]; + block_diagonal_structure->cols.push_back(Block()); + Block& diagonal_block = block_diagonal_structure->cols.back(); + diagonal_block.size = block.size; + diagonal_block.position = block_position; + + block_diagonal_structure->rows.push_back(CompressedRow()); + CompressedRow& row = block_diagonal_structure->rows.back(); + row.block = diagonal_block; + + row.cells.push_back(Cell()); + Cell& cell = row.cells.back(); + cell.block_id = c - start_col_block; + cell.position = diagonal_cell_position; + + block_position += block.size; + diagonal_cell_position += block.size * block.size; + } + + // Build a BlockSparseMatrix with the just computed block + // structure. + return new BlockSparseMatrix(block_diagonal_structure); +} + +template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> +BlockSparseMatrix* +PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>:: +CreateBlockDiagonalEtE() const { + BlockSparseMatrix* block_diagonal = + CreateBlockDiagonalMatrixLayout(0, num_col_blocks_e_); + UpdateBlockDiagonalEtE(block_diagonal); + return block_diagonal; +} + +template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> +BlockSparseMatrix* +PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>:: +CreateBlockDiagonalFtF() const { + BlockSparseMatrix* block_diagonal = + CreateBlockDiagonalMatrixLayout( + num_col_blocks_e_, num_col_blocks_e_ + num_col_blocks_f_); + UpdateBlockDiagonalFtF(block_diagonal); + return block_diagonal; +} + +// Similar to the code in RightMultiplyE, except instead of the matrix +// vector multiply its an outer product. +// +// block_diagonal = block_diagonal(E'E) +// +template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> +void +PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>:: +UpdateBlockDiagonalEtE( + BlockSparseMatrix* block_diagonal) const { + const CompressedRowBlockStructure* bs = matrix_.block_structure(); + const CompressedRowBlockStructure* block_diagonal_structure = + block_diagonal->block_structure(); + + block_diagonal->SetZero(); + const double* values = matrix_.values(); + for (int r = 0; r < num_row_blocks_e_ ; ++r) { + const Cell& cell = bs->rows[r].cells[0]; + const int row_block_size = bs->rows[r].block.size; + const int block_id = cell.block_id; + const int col_block_size = bs->cols[block_id].size; + const int cell_position = + block_diagonal_structure->rows[block_id].cells[0].position; + + MatrixTransposeMatrixMultiply + <kRowBlockSize, kEBlockSize, kRowBlockSize, kEBlockSize, 1>( + values + cell.position, row_block_size, col_block_size, + values + cell.position, row_block_size, col_block_size, + block_diagonal->mutable_values() + cell_position, + 0, 0, col_block_size, col_block_size); + } +} + +// Similar to the code in RightMultiplyF, except instead of the matrix +// vector multiply its an outer product. +// +// block_diagonal = block_diagonal(F'F) +// +template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> +void +PartitionedMatrixView<kRowBlockSize, kEBlockSize, kFBlockSize>:: +UpdateBlockDiagonalFtF(BlockSparseMatrix* block_diagonal) const { + const CompressedRowBlockStructure* bs = matrix_.block_structure(); + const CompressedRowBlockStructure* block_diagonal_structure = + block_diagonal->block_structure(); + + block_diagonal->SetZero(); + const double* values = matrix_.values(); + for (int r = 0; r < num_row_blocks_e_; ++r) { + const int row_block_size = bs->rows[r].block.size; + const vector<Cell>& cells = bs->rows[r].cells; + for (int c = 1; c < cells.size(); ++c) { + const int col_block_id = cells[c].block_id; + const int col_block_size = bs->cols[col_block_id].size; + const int diagonal_block_id = col_block_id - num_col_blocks_e_; + const int cell_position = + block_diagonal_structure->rows[diagonal_block_id].cells[0].position; + + MatrixTransposeMatrixMultiply + <kRowBlockSize, kFBlockSize, kRowBlockSize, kFBlockSize, 1>( + values + cells[c].position, row_block_size, col_block_size, + values + cells[c].position, row_block_size, col_block_size, + block_diagonal->mutable_values() + cell_position, + 0, 0, col_block_size, col_block_size); + } + } + + for (int r = num_row_blocks_e_; r < bs->rows.size(); ++r) { + const int row_block_size = bs->rows[r].block.size; + const vector<Cell>& cells = bs->rows[r].cells; + for (int c = 0; c < cells.size(); ++c) { + const int col_block_id = cells[c].block_id; + const int col_block_size = bs->cols[col_block_id].size; + const int diagonal_block_id = col_block_id - num_col_blocks_e_; + const int cell_position = + block_diagonal_structure->rows[diagonal_block_id].cells[0].position; + + MatrixTransposeMatrixMultiply + <Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, Eigen::Dynamic, 1>( + values + cells[c].position, row_block_size, col_block_size, + values + cells[c].position, row_block_size, col_block_size, + block_diagonal->mutable_values() + cell_position, + 0, 0, col_block_size, col_block_size); + } + } +} + +} // namespace internal +} // namespace ceres diff --git a/extern/libmv/third_party/ceres/internal/ceres/polynomial.cc b/extern/libmv/third_party/ceres/internal/ceres/polynomial.cc index 3238b89670e..c4b0243d4f6 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/polynomial.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/polynomial.cc @@ -37,6 +37,7 @@ #include "Eigen/Dense" #include "ceres/internal/port.h" +#include "ceres/stringprintf.h" #include "glog/logging.h" namespace ceres { @@ -121,6 +122,63 @@ Vector RemoveLeadingZeros(const Vector& polynomial_in) { } } // namespace +void FindLinearPolynomialRoots(const Vector& polynomial, + Vector* real, + Vector* imaginary) { + CHECK_EQ(polynomial.size(), 2); + if (real != NULL) { + real->resize(1); + (*real)(0) = -polynomial(1) / polynomial(0); + } + + if (imaginary != NULL) { + imaginary->setZero(1); + } +} + +void FindQuadraticPolynomialRoots(const Vector& polynomial, + Vector* real, + Vector* imaginary) { + CHECK_EQ(polynomial.size(), 3); + const double a = polynomial(0); + const double b = polynomial(1); + const double c = polynomial(2); + const double D = b * b - 4 * a * c; + const double sqrt_D = sqrt(fabs(D)); + if (real != NULL) { + real->setZero(2); + } + if (imaginary != NULL) { + imaginary->setZero(2); + } + + // Real roots. + if (D >= 0) { + if (real != NULL) { + // Stable quadratic roots according to BKP Horn. + // http://people.csail.mit.edu/bkph/articles/Quadratics.pdf + if (b >= 0) { + (*real)(0) = (-b - sqrt_D) / (2.0 * a); + (*real)(1) = (2.0 * c) / (-b - sqrt_D); + } else { + (*real)(0) = (2.0 * c) / (-b + sqrt_D); + (*real)(1) = (-b + sqrt_D) / (2.0 * a); + } + } + return; + } + + // Use the normal quadratic formula for the complex case. + if (real != NULL) { + (*real)(0) = -b / (2.0 * a); + (*real)(1) = -b / (2.0 * a); + } + if (imaginary != NULL) { + (*imaginary)(0) = sqrt_D / (2.0 * a); + (*imaginary)(1) = -sqrt_D / (2.0 * a); + } +} + bool FindPolynomialRoots(const Vector& polynomial_in, Vector* real, Vector* imaginary) { @@ -132,30 +190,40 @@ bool FindPolynomialRoots(const Vector& polynomial_in, Vector polynomial = RemoveLeadingZeros(polynomial_in); const int degree = polynomial.size() - 1; + VLOG(3) << "Input polynomial: " << polynomial_in.transpose(); + if (polynomial.size() != polynomial_in.size()) { + VLOG(3) << "Trimmed polynomial: " << polynomial.transpose(); + } + // Is the polynomial constant? if (degree == 0) { LOG(WARNING) << "Trying to extract roots from a constant " << "polynomial in FindPolynomialRoots"; + // We return true with no roots, not false, as if the polynomial is constant + // it is correct that there are no roots. It is not the case that they were + // there, but that we have failed to extract them. + return true; + } + + // Linear + if (degree == 1) { + FindLinearPolynomialRoots(polynomial, real, imaginary); + return true; + } + + // Quadratic + if (degree == 2) { + FindQuadraticPolynomialRoots(polynomial, real, imaginary); return true; } + // The degree is now known to be at least 3. For cubic or higher + // roots we use the method of companion matrices. + // Divide by leading term const double leading_term = polynomial(0); polynomial /= leading_term; - // Separately handle linear polynomials. - if (degree == 1) { - if (real != NULL) { - real->resize(1); - (*real)(0) = -polynomial(1); - } - if (imaginary != NULL) { - imaginary->resize(1); - imaginary->setZero(); - } - } - - // The degree is now known to be at least 2. // Build and balance the companion matrix to the polynomial. Matrix companion_matrix(degree, degree); BuildCompanionMatrix(polynomial, &companion_matrix); @@ -255,6 +323,12 @@ void MinimizePolynomial(const Vector& polynomial, } } +string FunctionSample::ToDebugString() const { + return StringPrintf("[x: %.8e, value: %.8e, gradient: %.8e, " + "value_is_valid: %d, gradient_is_valid: %d]", + x, value, gradient, value_is_valid, gradient_is_valid); +} + Vector FindInterpolatingPolynomial(const vector<FunctionSample>& samples) { const int num_samples = samples.size(); int num_constraints = 0; @@ -268,6 +342,7 @@ Vector FindInterpolatingPolynomial(const vector<FunctionSample>& samples) { } const int degree = num_constraints - 1; + Matrix lhs = Matrix::Zero(num_constraints, num_constraints); Vector rhs = Vector::Zero(num_constraints); diff --git a/extern/libmv/third_party/ceres/internal/ceres/polynomial.h b/extern/libmv/third_party/ceres/internal/ceres/polynomial.h index 42ffdcb13c5..80ce77e6ae1 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/polynomial.h +++ b/extern/libmv/third_party/ceres/internal/ceres/polynomial.h @@ -95,6 +95,7 @@ struct FunctionSample { gradient(0.0), gradient_is_valid(false) { } + string ToDebugString() const; double x; double value; // value = f(x) diff --git a/extern/libmv/third_party/ceres/internal/ceres/preconditioner.h b/extern/libmv/third_party/ceres/internal/ceres/preconditioner.h index af64e3c9a44..21cbc00b542 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/preconditioner.h +++ b/extern/libmv/third_party/ceres/internal/ceres/preconditioner.h @@ -48,6 +48,7 @@ class Preconditioner : public LinearOperator { struct Options { Options() : type(JACOBI), + visibility_clustering_type(CANONICAL_VIEWS), sparse_linear_algebra_library_type(SUITE_SPARSE), num_threads(1), row_block_size(Eigen::Dynamic), @@ -56,7 +57,7 @@ class Preconditioner : public LinearOperator { } PreconditionerType type; - + VisibilityClusteringType visibility_clustering_type; SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type; // If possible, how many threads the preconditioner can use. diff --git a/extern/libmv/third_party/ceres/internal/ceres/problem.cc b/extern/libmv/third_party/ceres/internal/ceres/problem.cc index 403e96a3ade..89821b9e360 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/problem.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/problem.cc @@ -1,5 +1,5 @@ // Ceres Solver - A fast non-linear least squares minimizer -// Copyright 2010, 2011, 2012 Google Inc. All rights reserved. +// Copyright 2013 Google Inc. All rights reserved. // http://code.google.com/p/ceres-solver/ // // Redistribution and use in source and binary forms, with or without @@ -218,4 +218,23 @@ void Problem::GetParameterBlocks(vector<double*>* parameter_blocks) const { problem_impl_->GetParameterBlocks(parameter_blocks); } +void Problem::GetResidualBlocks( + vector<ResidualBlockId>* residual_blocks) const { + problem_impl_->GetResidualBlocks(residual_blocks); +} + +void Problem::GetParameterBlocksForResidualBlock( + const ResidualBlockId residual_block, + vector<double*>* parameter_blocks) const { + problem_impl_->GetParameterBlocksForResidualBlock(residual_block, + parameter_blocks); +} + +void Problem::GetResidualBlocksForParameterBlock( + const double* values, + vector<ResidualBlockId>* residual_blocks) const { + problem_impl_->GetResidualBlocksForParameterBlock(values, + residual_blocks); +} + } // namespace ceres diff --git a/extern/libmv/third_party/ceres/internal/ceres/problem_impl.cc b/extern/libmv/third_party/ceres/internal/ceres/problem_impl.cc index 830270269c3..ae87fcb0317 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/problem_impl.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/problem_impl.cc @@ -1,5 +1,5 @@ // Ceres Solver - A fast non-linear least squares minimizer -// Copyright 2010, 2011, 2012 Google Inc. All rights reserved. +// Copyright 2013 Google Inc. All rights reserved. // http://code.google.com/p/ceres-solver/ // // Redistribution and use in source and binary forms, with or without @@ -27,7 +27,7 @@ // POSSIBILITY OF SUCH DAMAGE. // // Author: sameeragarwal@google.com (Sameer Agarwal) -// keir@google.com (Keir Mierle) +// mierle@gmail.com (Keir Mierle) #include "ceres/problem_impl.h" @@ -452,7 +452,11 @@ template<typename Block> void ProblemImpl::DeleteBlockInVector(vector<Block*>* mutable_blocks, Block* block_to_remove) { CHECK_EQ((*mutable_blocks)[block_to_remove->index()], block_to_remove) - << "You found a Ceres bug! Block: " << block_to_remove->ToString(); + << "You found a Ceres bug! \n" + << "Block requested: " + << block_to_remove->ToString() << "\n" + << "Block present: " + << (*mutable_blocks)[block_to_remove->index()]->ToString(); // Prepare the to-be-moved block for the new, lower-in-index position by // setting the index to the blocks final location. @@ -731,6 +735,57 @@ void ProblemImpl::GetParameterBlocks(vector<double*>* parameter_blocks) const { } } +void ProblemImpl::GetResidualBlocks( + vector<ResidualBlockId>* residual_blocks) const { + CHECK_NOTNULL(residual_blocks); + *residual_blocks = program().residual_blocks(); +} + +void ProblemImpl::GetParameterBlocksForResidualBlock( + const ResidualBlockId residual_block, + vector<double*>* parameter_blocks) const { + int num_parameter_blocks = residual_block->NumParameterBlocks(); + CHECK_NOTNULL(parameter_blocks)->resize(num_parameter_blocks); + for (int i = 0; i < num_parameter_blocks; ++i) { + (*parameter_blocks)[i] = + residual_block->parameter_blocks()[i]->mutable_user_state(); + } +} + +void ProblemImpl::GetResidualBlocksForParameterBlock( + const double* values, + vector<ResidualBlockId>* residual_blocks) const { + ParameterBlock* parameter_block = + FindParameterBlockOrDie(parameter_block_map_, + const_cast<double*>(values)); + + if (options_.enable_fast_parameter_block_removal) { + // In this case the residual blocks that depend on the parameter block are + // stored in the parameter block already, so just copy them out. + CHECK_NOTNULL(residual_blocks)->resize( + parameter_block->mutable_residual_blocks()->size()); + std::copy(parameter_block->mutable_residual_blocks()->begin(), + parameter_block->mutable_residual_blocks()->end(), + residual_blocks->begin()); + return; + } + + // Find residual blocks that depend on the parameter block. + CHECK_NOTNULL(residual_blocks)->clear(); + const int num_residual_blocks = NumResidualBlocks(); + for (int i = 0; i < num_residual_blocks; ++i) { + ResidualBlock* residual_block = + (*(program_->mutable_residual_blocks()))[i]; + const int num_parameter_blocks = residual_block->NumParameterBlocks(); + for (int j = 0; j < num_parameter_blocks; ++j) { + if (residual_block->parameter_blocks()[j] == parameter_block) { + residual_blocks->push_back(residual_block); + // The parameter blocks are guaranteed unique. + break; + } + } + } +} } // namespace internal } // namespace ceres diff --git a/extern/libmv/third_party/ceres/internal/ceres/problem_impl.h b/extern/libmv/third_party/ceres/internal/ceres/problem_impl.h index ace27f56bb1..35c16cd8392 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/problem_impl.h +++ b/extern/libmv/third_party/ceres/internal/ceres/problem_impl.h @@ -142,6 +142,15 @@ class ProblemImpl { int ParameterBlockSize(const double* parameter_block) const; int ParameterBlockLocalSize(const double* parameter_block) const; void GetParameterBlocks(vector<double*>* parameter_blocks) const; + void GetResidualBlocks(vector<ResidualBlockId>* residual_blocks) const; + + void GetParameterBlocksForResidualBlock( + const ResidualBlockId residual_block, + vector<double*>* parameter_blocks) const; + + void GetResidualBlocksForParameterBlock( + const double* values, + vector<ResidualBlockId>* residual_blocks) const; const Program& program() const { return *program_; } Program* mutable_program() { return program_.get(); } diff --git a/extern/libmv/third_party/ceres/internal/ceres/runtime_numeric_diff_cost_function.cc b/extern/libmv/third_party/ceres/internal/ceres/runtime_numeric_diff_cost_function.cc deleted file mode 100644 index 7af275c1dd8..00000000000 --- a/extern/libmv/third_party/ceres/internal/ceres/runtime_numeric_diff_cost_function.cc +++ /dev/null @@ -1,217 +0,0 @@ -// Ceres Solver - A fast non-linear least squares minimizer -// Copyright 2010, 2011, 2012 Google Inc. All rights reserved. -// http://code.google.com/p/ceres-solver/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// * Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// * Neither the name of Google Inc. nor the names of its contributors may be -// used to endorse or promote products derived from this software without -// specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -// POSSIBILITY OF SUCH DAMAGE. -// -// Author: keir@google.com (Keir Mierle) -// -// Based on the templated version in public/numeric_diff_cost_function.h. - -#include "ceres/runtime_numeric_diff_cost_function.h" - -#include <algorithm> -#include <numeric> -#include <vector> -#include "Eigen/Dense" -#include "ceres/cost_function.h" -#include "ceres/internal/scoped_ptr.h" -#include "glog/logging.h" - -namespace ceres { -namespace internal { -namespace { - -bool EvaluateJacobianForParameterBlock(const CostFunction* function, - int parameter_block_size, - int parameter_block, - RuntimeNumericDiffMethod method, - double relative_step_size, - double const* residuals_at_eval_point, - double** parameters, - double** jacobians) { - using Eigen::Map; - using Eigen::Matrix; - using Eigen::Dynamic; - using Eigen::RowMajor; - - typedef Matrix<double, Dynamic, 1> ResidualVector; - typedef Matrix<double, Dynamic, 1> ParameterVector; - typedef Matrix<double, Dynamic, Dynamic, RowMajor> JacobianMatrix; - - int num_residuals = function->num_residuals(); - - Map<JacobianMatrix> parameter_jacobian(jacobians[parameter_block], - num_residuals, - parameter_block_size); - - // Mutate one element at a time and then restore. - Map<ParameterVector> x_plus_delta(parameters[parameter_block], - parameter_block_size); - ParameterVector x(x_plus_delta); - ParameterVector step_size = x.array().abs() * relative_step_size; - - // To handle cases where a paremeter is exactly zero, instead use the mean - // step_size for the other dimensions. - double fallback_step_size = step_size.sum() / step_size.rows(); - if (fallback_step_size == 0.0) { - // If all the parameters are zero, there's no good answer. Use the given - // relative step_size as absolute step_size and hope for the best. - fallback_step_size = relative_step_size; - } - - // For each parameter in the parameter block, use finite differences to - // compute the derivative for that parameter. - for (int j = 0; j < parameter_block_size; ++j) { - if (step_size(j) == 0.0) { - // The parameter is exactly zero, so compromise and use the mean step_size - // from the other parameters. This can break in many cases, but it's hard - // to pick a good number without problem specific knowledge. - step_size(j) = fallback_step_size; - } - x_plus_delta(j) = x(j) + step_size(j); - - ResidualVector residuals(num_residuals); - if (!function->Evaluate(parameters, &residuals[0], NULL)) { - // Something went wrong; bail. - return false; - } - - // Compute this column of the jacobian in 3 steps: - // 1. Store residuals for the forward part. - // 2. Subtract residuals for the backward (or 0) part. - // 3. Divide out the run. - parameter_jacobian.col(j) = residuals; - - double one_over_h = 1 / step_size(j); - if (method == CENTRAL) { - // Compute the function on the other side of x(j). - x_plus_delta(j) = x(j) - step_size(j); - - if (!function->Evaluate(parameters, &residuals[0], NULL)) { - // Something went wrong; bail. - return false; - } - parameter_jacobian.col(j) -= residuals; - one_over_h /= 2; - } else { - // Forward difference only; reuse existing residuals evaluation. - parameter_jacobian.col(j) -= - Map<const ResidualVector>(residuals_at_eval_point, num_residuals); - } - x_plus_delta(j) = x(j); // Restore x_plus_delta. - - // Divide out the run to get slope. - parameter_jacobian.col(j) *= one_over_h; - } - return true; -} - -class RuntimeNumericDiffCostFunction : public CostFunction { - public: - RuntimeNumericDiffCostFunction(const CostFunction* function, - RuntimeNumericDiffMethod method, - double relative_step_size) - : function_(function), - method_(method), - relative_step_size_(relative_step_size) { - *mutable_parameter_block_sizes() = function->parameter_block_sizes(); - set_num_residuals(function->num_residuals()); - } - - virtual ~RuntimeNumericDiffCostFunction() { } - - virtual bool Evaluate(double const* const* parameters, - double* residuals, - double** jacobians) const { - // Get the function value (residuals) at the the point to evaluate. - bool success = function_->Evaluate(parameters, residuals, NULL); - if (!success) { - // Something went wrong; ignore the jacobian. - return false; - } - if (!jacobians) { - // Nothing to do; just forward. - return true; - } - - const vector<int16>& block_sizes = function_->parameter_block_sizes(); - CHECK(!block_sizes.empty()); - - // Create local space for a copy of the parameters which will get mutated. - int parameters_size = accumulate(block_sizes.begin(), block_sizes.end(), 0); - vector<double> parameters_copy(parameters_size); - vector<double*> parameters_references_copy(block_sizes.size()); - parameters_references_copy[0] = ¶meters_copy[0]; - for (int block = 1; block < block_sizes.size(); ++block) { - parameters_references_copy[block] = parameters_references_copy[block - 1] - + block_sizes[block - 1]; - } - - // Copy the parameters into the local temp space. - for (int block = 0; block < block_sizes.size(); ++block) { - memcpy(parameters_references_copy[block], - parameters[block], - block_sizes[block] * sizeof(*parameters[block])); - } - - for (int block = 0; block < block_sizes.size(); ++block) { - if (!jacobians[block]) { - // No jacobian requested for this parameter / residual pair. - continue; - } - if (!EvaluateJacobianForParameterBlock(function_, - block_sizes[block], - block, - method_, - relative_step_size_, - residuals, - ¶meters_references_copy[0], - jacobians)) { - return false; - } - } - return true; - } - - private: - const CostFunction* function_; - RuntimeNumericDiffMethod method_; - double relative_step_size_; -}; - -} // namespace - -CostFunction* CreateRuntimeNumericDiffCostFunction( - const CostFunction* cost_function, - RuntimeNumericDiffMethod method, - double relative_step_size) { - return new RuntimeNumericDiffCostFunction(cost_function, - method, - relative_step_size); -} - -} // namespace internal -} // namespace ceres diff --git a/extern/libmv/third_party/ceres/internal/ceres/runtime_numeric_diff_cost_function.h b/extern/libmv/third_party/ceres/internal/ceres/runtime_numeric_diff_cost_function.h deleted file mode 100644 index 01b57f92ef3..00000000000 --- a/extern/libmv/third_party/ceres/internal/ceres/runtime_numeric_diff_cost_function.h +++ /dev/null @@ -1,87 +0,0 @@ -// Ceres Solver - A fast non-linear least squares minimizer -// Copyright 2010, 2011, 2012 Google Inc. All rights reserved. -// http://code.google.com/p/ceres-solver/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// -// * Redistributions of source code must retain the above copyright notice, -// this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright notice, -// this list of conditions and the following disclaimer in the documentation -// and/or other materials provided with the distribution. -// * Neither the name of Google Inc. nor the names of its contributors may be -// used to endorse or promote products derived from this software without -// specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -// POSSIBILITY OF SUCH DAMAGE. -// -// Author: keir@google.com (Keir Mierle) -// -// Create CostFunctions as needed by the least squares framework with jacobians -// computed via numeric differentiation. -// -// To get a numerically differentiated cost function, define a subclass of -// CostFunction such that the Evaluate() function ignores the jacobian -// parameter. The numeric differentiation wrapper will fill in the jacobian -// parameter if nececssary by repeatedly calling the Evaluate() function with -// small changes to the appropriate parameters, and computing the slope. This -// implementation is not templated (hence the "Runtime" prefix), which is a bit -// slower than but is more convenient than the templated version in -// numeric_diff_cost_function.h -// -// The numerically differentiated version of a cost function for a cost function -// can be constructed as follows: -// -// CostFunction* cost_function = -// CreateRuntimeNumericDiffCostFunction(new MyCostFunction(...), -// CENTRAL, -// TAKE_OWNERSHIP); -// -// The central difference method is considerably more accurate; consider using -// to start and only after that works, trying forward difference. -// -// TODO(keir): Characterize accuracy; mention pitfalls; provide alternatives. - -#ifndef CERES_INTERNAL_RUNTIME_NUMERIC_DIFF_COST_FUNCTION_H_ -#define CERES_INTERNAL_RUNTIME_NUMERIC_DIFF_COST_FUNCTION_H_ - -#include "ceres/cost_function.h" - -namespace ceres { -namespace internal { - -enum RuntimeNumericDiffMethod { - CENTRAL, - FORWARD, -}; - -// Create a cost function that evaluates the derivative with finite differences. -// The base cost_function's implementation of Evaluate() only needs to fill in -// the "residuals" argument and not the "jacobians". Any data written to the -// jacobians by the base cost_function is overwritten. -// -// Forward difference or central difference is selected with CENTRAL or FORWARD. -// The relative eps, which determines the step size for forward and central -// differencing, is set with relative eps. Caller owns the resulting cost -// function, and the resulting cost function does not own the base cost -// function. -CostFunction *CreateRuntimeNumericDiffCostFunction( - const CostFunction *cost_function, - RuntimeNumericDiffMethod method, - double relative_eps); - -} // namespace internal -} // namespace ceres - -#endif // CERES_INTERNAL_RUNTIME_NUMERIC_DIFF_COST_FUNCTION_H_ diff --git a/extern/libmv/third_party/ceres/internal/ceres/schur_complement_solver.cc b/extern/libmv/third_party/ceres/internal/ceres/schur_complement_solver.cc index b192aa1172b..2f407fdd260 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/schur_complement_solver.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/schur_complement_solver.cc @@ -75,24 +75,19 @@ LinearSolver::Summary SchurComplementSolver::SolveImpl( fill(x, x + A->num_cols(), 0.0); event_logger.AddEvent("Setup"); - LinearSolver::Summary summary; - summary.num_iterations = 1; - summary.termination_type = FAILURE; eliminator_->Eliminate(A, b, per_solve_options.D, lhs_.get(), rhs_.get()); event_logger.AddEvent("Eliminate"); double* reduced_solution = x + A->num_cols() - lhs_->num_cols(); - const bool status = SolveReducedLinearSystem(reduced_solution); + const LinearSolver::Summary summary = + SolveReducedLinearSystem(reduced_solution); event_logger.AddEvent("ReducedSolve"); - if (!status) { - return summary; + if (summary.termination_type == LINEAR_SOLVER_SUCCESS) { + eliminator_->BackSubstitute(A, b, per_solve_options.D, reduced_solution, x); + event_logger.AddEvent("BackSubstitute"); } - eliminator_->BackSubstitute(A, b, per_solve_options.D, reduced_solution, x); - summary.termination_type = TOLERANCE; - - event_logger.AddEvent("BackSubstitute"); return summary; } @@ -117,7 +112,13 @@ void DenseSchurComplementSolver::InitStorage( // Solve the system Sx = r, assuming that the matrix S is stored in a // BlockRandomAccessDenseMatrix. The linear system is solved using // Eigen's Cholesky factorization. -bool DenseSchurComplementSolver::SolveReducedLinearSystem(double* solution) { +LinearSolver::Summary +DenseSchurComplementSolver::SolveReducedLinearSystem(double* solution) { + LinearSolver::Summary summary; + summary.num_iterations = 0; + summary.termination_type = LINEAR_SOLVER_SUCCESS; + summary.message = "Success."; + const BlockRandomAccessDenseMatrix* m = down_cast<const BlockRandomAccessDenseMatrix*>(lhs()); const int num_rows = m->num_rows(); @@ -125,25 +126,33 @@ bool DenseSchurComplementSolver::SolveReducedLinearSystem(double* solution) { // The case where there are no f blocks, and the system is block // diagonal. if (num_rows == 0) { - return true; + return summary; } + summary.num_iterations = 1; + if (options().dense_linear_algebra_library_type == EIGEN) { - // TODO(sameeragarwal): Add proper error handling; this completely ignores - // the quality of the solution to the solve. - VectorRef(solution, num_rows) = + Eigen::LLT<Matrix, Eigen::Upper> llt = ConstMatrixRef(m->values(), num_rows, num_rows) .selfadjointView<Eigen::Upper>() - .llt() - .solve(ConstVectorRef(rhs(), num_rows)); - return true; + .llt(); + if (llt.info() != Eigen::Success) { + summary.termination_type = LINEAR_SOLVER_FAILURE; + summary.message = "Eigen LLT decomposition failed."; + return summary; + } + + VectorRef(solution, num_rows) = llt.solve(ConstVectorRef(rhs(), num_rows)); + } else { + VectorRef(solution, num_rows) = ConstVectorRef(rhs(), num_rows); + summary.termination_type = + LAPACK::SolveInPlaceUsingCholesky(num_rows, + m->values(), + solution, + &summary.message); } - VectorRef(solution, num_rows) = ConstVectorRef(rhs(), num_rows); - const int info = LAPACK::SolveInPlaceUsingCholesky(num_rows, - m->values(), - solution); - return (info == 0); + return summary; } #if !defined(CERES_NO_SUITESPARSE) || !defined(CERES_NO_CXSPARE) @@ -242,7 +251,8 @@ void SparseSchurComplementSolver::InitStorage( set_rhs(new double[lhs()->num_rows()]); } -bool SparseSchurComplementSolver::SolveReducedLinearSystem(double* solution) { +LinearSolver::Summary +SparseSchurComplementSolver::SolveReducedLinearSystem(double* solution) { switch (options().sparse_linear_algebra_library_type) { case SUITE_SPARSE: return SolveReducedLinearSystemUsingSuiteSparse(solution); @@ -253,29 +263,33 @@ bool SparseSchurComplementSolver::SolveReducedLinearSystem(double* solution) { << options().sparse_linear_algebra_library_type; } - LOG(FATAL) << "Unknown sparse linear algebra library : " - << options().sparse_linear_algebra_library_type; - return false; + return LinearSolver::Summary(); } #ifndef CERES_NO_SUITESPARSE // Solve the system Sx = r, assuming that the matrix S is stored in a // BlockRandomAccessSparseMatrix. The linear system is solved using // CHOLMOD's sparse cholesky factorization routines. -bool SparseSchurComplementSolver::SolveReducedLinearSystemUsingSuiteSparse( +LinearSolver::Summary +SparseSchurComplementSolver::SolveReducedLinearSystemUsingSuiteSparse( double* solution) { + LinearSolver::Summary summary; + summary.num_iterations = 0; + summary.termination_type = LINEAR_SOLVER_SUCCESS; + summary.message = "Success."; + TripletSparseMatrix* tsm = const_cast<TripletSparseMatrix*>( down_cast<const BlockRandomAccessSparseMatrix*>(lhs())->matrix()); - const int num_rows = tsm->num_rows(); // The case where there are no f blocks, and the system is block // diagonal. if (num_rows == 0) { - return true; + return summary; } + summary.num_iterations = 1; cholmod_sparse* cholmod_lhs = NULL; if (options().use_postordering) { // If we are going to do a full symbolic analysis of the schur @@ -288,7 +302,10 @@ bool SparseSchurComplementSolver::SolveReducedLinearSystemUsingSuiteSparse( cholmod_lhs->stype = 1; if (factor_ == NULL) { - factor_ = ss_.BlockAnalyzeCholesky(cholmod_lhs, blocks_, blocks_); + factor_ = ss_.BlockAnalyzeCholesky(cholmod_lhs, + blocks_, + blocks_, + &summary.message); } } else { // If we are going to use the natural ordering (i.e. rely on the @@ -301,33 +318,47 @@ bool SparseSchurComplementSolver::SolveReducedLinearSystemUsingSuiteSparse( cholmod_lhs->stype = -1; if (factor_ == NULL) { - factor_ = ss_.AnalyzeCholeskyWithNaturalOrdering(cholmod_lhs); + factor_ = ss_.AnalyzeCholeskyWithNaturalOrdering(cholmod_lhs, + &summary.message); } } + if (factor_ == NULL) { + ss_.Free(cholmod_lhs); + summary.termination_type = LINEAR_SOLVER_FATAL_ERROR; + return summary; + } + + summary.termination_type = + ss_.Cholesky(cholmod_lhs, factor_, &summary.message); + if (summary.termination_type != LINEAR_SOLVER_SUCCESS) { + return summary; + } + cholmod_dense* cholmod_rhs = ss_.CreateDenseVector(const_cast<double*>(rhs()), num_rows, num_rows); - cholmod_dense* cholmod_solution = - ss_.SolveCholesky(cholmod_lhs, factor_, cholmod_rhs); - + cholmod_dense* cholmod_solution = ss_.Solve(factor_, + cholmod_rhs, + &summary.message); ss_.Free(cholmod_lhs); ss_.Free(cholmod_rhs); if (cholmod_solution == NULL) { - LOG(WARNING) << "CHOLMOD solve failed."; - return false; + summary.termination_type = LINEAR_SOLVER_FAILURE; + return summary; } VectorRef(solution, num_rows) = VectorRef(static_cast<double*>(cholmod_solution->x), num_rows); ss_.Free(cholmod_solution); - return true; + return summary; } #else -bool SparseSchurComplementSolver::SolveReducedLinearSystemUsingSuiteSparse( +LinearSolver::Summary +SparseSchurComplementSolver::SolveReducedLinearSystemUsingSuiteSparse( double* solution) { LOG(FATAL) << "No SuiteSparse support in Ceres."; - return false; + return LinearSolver::Summary(); } #endif // CERES_NO_SUITESPARSE @@ -335,19 +366,24 @@ bool SparseSchurComplementSolver::SolveReducedLinearSystemUsingSuiteSparse( // Solve the system Sx = r, assuming that the matrix S is stored in a // BlockRandomAccessSparseMatrix. The linear system is solved using // CXSparse's sparse cholesky factorization routines. -bool SparseSchurComplementSolver::SolveReducedLinearSystemUsingCXSparse( +LinearSolver::Summary +SparseSchurComplementSolver::SolveReducedLinearSystemUsingCXSparse( double* solution) { + LinearSolver::Summary summary; + summary.num_iterations = 0; + summary.termination_type = LINEAR_SOLVER_SUCCESS; + summary.message = "Success."; + // Extract the TripletSparseMatrix that is used for actually storing S. TripletSparseMatrix* tsm = const_cast<TripletSparseMatrix*>( down_cast<const BlockRandomAccessSparseMatrix*>(lhs())->matrix()); - const int num_rows = tsm->num_rows(); // The case where there are no f blocks, and the system is block // diagonal. if (num_rows == 0) { - return true; + return summary; } cs_di* lhs = CHECK_NOTNULL(cxsparse_.CreateSparseMatrix(tsm)); @@ -355,21 +391,27 @@ bool SparseSchurComplementSolver::SolveReducedLinearSystemUsingCXSparse( // Compute symbolic factorization if not available. if (cxsparse_factor_ == NULL) { - cxsparse_factor_ = - CHECK_NOTNULL(cxsparse_.BlockAnalyzeCholesky(lhs, blocks_, blocks_)); + cxsparse_factor_ = cxsparse_.BlockAnalyzeCholesky(lhs, blocks_, blocks_); } - // Solve the linear system. - bool ok = cxsparse_.SolveCholesky(lhs, cxsparse_factor_, solution); + if (cxsparse_factor_ == NULL) { + summary.termination_type = LINEAR_SOLVER_FATAL_ERROR; + summary.message = + "CXSparse failure. Unable to find symbolic factorization."; + } else if (!cxsparse_.SolveCholesky(lhs, cxsparse_factor_, solution)) { + summary.termination_type = LINEAR_SOLVER_FAILURE; + summary.message = "CXSparse::SolveCholesky failed."; + } cxsparse_.Free(lhs); - return ok; + return summary; } #else -bool SparseSchurComplementSolver::SolveReducedLinearSystemUsingCXSparse( +LinearSolver::Summary +SparseSchurComplementSolver::SolveReducedLinearSystemUsingCXSparse( double* solution) { LOG(FATAL) << "No CXSparse support in Ceres."; - return false; + return LinearSolver::Summary(); } #endif // CERES_NO_CXPARSE diff --git a/extern/libmv/third_party/ceres/internal/ceres/schur_complement_solver.h b/extern/libmv/third_party/ceres/internal/ceres/schur_complement_solver.h index b5a1c74ab1a..a9978518b17 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/schur_complement_solver.h +++ b/extern/libmv/third_party/ceres/internal/ceres/schur_complement_solver.h @@ -126,7 +126,8 @@ class SchurComplementSolver : public BlockSparseMatrixSolver { private: virtual void InitStorage(const CompressedRowBlockStructure* bs) = 0; - virtual bool SolveReducedLinearSystem(double* solution) = 0; + virtual LinearSolver::Summary SolveReducedLinearSystem( + double* solution) = 0; LinearSolver::Options options_; @@ -146,7 +147,8 @@ class DenseSchurComplementSolver : public SchurComplementSolver { private: virtual void InitStorage(const CompressedRowBlockStructure* bs); - virtual bool SolveReducedLinearSystem(double* solution); + virtual LinearSolver::Summary SolveReducedLinearSystem( + double* solution); CERES_DISALLOW_COPY_AND_ASSIGN(DenseSchurComplementSolver); }; @@ -160,9 +162,12 @@ class SparseSchurComplementSolver : public SchurComplementSolver { private: virtual void InitStorage(const CompressedRowBlockStructure* bs); - virtual bool SolveReducedLinearSystem(double* solution); - bool SolveReducedLinearSystemUsingSuiteSparse(double* solution); - bool SolveReducedLinearSystemUsingCXSparse(double* solution); + virtual LinearSolver::Summary SolveReducedLinearSystem( + double* solution); + LinearSolver::Summary SolveReducedLinearSystemUsingSuiteSparse( + double* solution); + LinearSolver::Summary SolveReducedLinearSystemUsingCXSparse( + double* solution); // Size of the blocks in the Schur complement. vector<int> blocks_; diff --git a/extern/libmv/third_party/ceres/internal/ceres/schur_eliminator.cc b/extern/libmv/third_party/ceres/internal/ceres/schur_eliminator.cc index 31f83547b5f..25103dc542a 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/schur_eliminator.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/schur_eliminator.cc @@ -37,7 +37,7 @@ // THIS FILE IS AUTOGENERATED. DO NOT EDIT. //========================================= // -// This file is generated using generate_eliminator_specializations.py. +// This file is generated using generate_eliminator_specialization.py. // Editing it manually is not recommended. #include "ceres/linear_solver.h" @@ -105,6 +105,11 @@ SchurEliminatorBase::Create(const LinearSolver::Options& options) { (options.f_block_size == Eigen::Dynamic)) { return new SchurEliminator<2, 4, Eigen::Dynamic>(options); } + if ((options.row_block_size == 2) && + (options.e_block_size == Eigen::Dynamic) && + (options.f_block_size == Eigen::Dynamic)) { + return new SchurEliminator<2, Eigen::Dynamic, Eigen::Dynamic>(options); + } if ((options.row_block_size == 4) && (options.e_block_size == 4) && (options.f_block_size == 2)) { diff --git a/extern/libmv/third_party/ceres/internal/ceres/schur_jacobi_preconditioner.cc b/extern/libmv/third_party/ceres/internal/ceres/schur_jacobi_preconditioner.cc index 338df715c0a..6dc9e89d3cc 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/schur_jacobi_preconditioner.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/schur_jacobi_preconditioner.cc @@ -33,10 +33,9 @@ #include <utility> #include <vector> #include "Eigen/Dense" -#include "ceres/block_random_access_sparse_matrix.h" +#include "ceres/block_random_access_diagonal_matrix.h" #include "ceres/block_sparse_matrix.h" #include "ceres/collections_port.h" -#include "ceres/detect_structure.h" #include "ceres/internal/scoped_ptr.h" #include "ceres/linear_solver.h" #include "ceres/schur_eliminator.h" @@ -57,16 +56,11 @@ SchurJacobiPreconditioner::SchurJacobiPreconditioner( << "SCHUR_JACOBI preconditioner."; block_size_.resize(num_blocks); - set<pair<int, int> > block_pairs; - - int num_block_diagonal_entries = 0; for (int i = 0; i < num_blocks; ++i) { block_size_[i] = bs.cols[i + options_.elimination_groups[0]].size; - block_pairs.insert(make_pair(i, i)); - num_block_diagonal_entries += block_size_[i] * block_size_[i]; } - m_.reset(new BlockRandomAccessSparseMatrix(block_size_, block_pairs)); + m_.reset(new BlockRandomAccessDiagonalMatrix(block_size_)); InitEliminator(bs); } @@ -77,17 +71,13 @@ SchurJacobiPreconditioner::~SchurJacobiPreconditioner() { void SchurJacobiPreconditioner::InitEliminator( const CompressedRowBlockStructure& bs) { LinearSolver::Options eliminator_options; - eliminator_options.elimination_groups = options_.elimination_groups; eliminator_options.num_threads = options_.num_threads; - - DetectStructure(bs, options_.elimination_groups[0], - &eliminator_options.row_block_size, - &eliminator_options.e_block_size, - &eliminator_options.f_block_size); - + eliminator_options.e_block_size = options_.e_block_size; + eliminator_options.f_block_size = options_.f_block_size; + eliminator_options.row_block_size = options_.row_block_size; eliminator_.reset(SchurEliminatorBase::Create(eliminator_options)); - eliminator_->Init(options_.elimination_groups[0], &bs); + eliminator_->Init(eliminator_options.elimination_groups[0], &bs); } // Update the values of the preconditioner matrix and factorize it. @@ -118,7 +108,7 @@ void SchurJacobiPreconditioner::RightMultiply(const double* x, CHECK_NOTNULL(y); const double* lhs_values = - down_cast<BlockRandomAccessSparseMatrix*>(m_.get())->matrix()->values(); + down_cast<BlockRandomAccessDiagonalMatrix*>(m_.get())->matrix()->values(); // This loop can be easily multi-threaded with OpenMP if need be. for (int i = 0; i < block_size_.size(); ++i) { diff --git a/extern/libmv/third_party/ceres/internal/ceres/schur_jacobi_preconditioner.h b/extern/libmv/third_party/ceres/internal/ceres/schur_jacobi_preconditioner.h index f6e7b0d37ef..aecb0151083 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/schur_jacobi_preconditioner.h +++ b/extern/libmv/third_party/ceres/internal/ceres/schur_jacobi_preconditioner.h @@ -49,7 +49,7 @@ namespace ceres { namespace internal { -class BlockRandomAccessSparseMatrix; +class BlockRandomAccessDiagonalMatrix; class BlockSparseMatrix; struct CompressedRowBlockStructure; class SchurEliminatorBase; @@ -100,7 +100,7 @@ class SchurJacobiPreconditioner : public BlockSparseMatrixPreconditioner { scoped_ptr<SchurEliminatorBase> eliminator_; // Preconditioner matrix. - scoped_ptr<BlockRandomAccessSparseMatrix> m_; + scoped_ptr<BlockRandomAccessDiagonalMatrix> m_; CERES_DISALLOW_COPY_AND_ASSIGN(SchurJacobiPreconditioner); }; diff --git a/extern/libmv/third_party/ceres/internal/ceres/single_linkage_clustering.cc b/extern/libmv/third_party/ceres/internal/ceres/single_linkage_clustering.cc new file mode 100644 index 00000000000..54b4379d5f2 --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/single_linkage_clustering.cc @@ -0,0 +1,107 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2013 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + +#ifndef CERES_NO_SUITESPARSE + +#include "ceres/single_linkage_clustering.h" + +#include "ceres/graph.h" +#include "ceres/collections_port.h" +#include "ceres/graph_algorithms.h" + +namespace ceres { +namespace internal { + +int ComputeSingleLinkageClustering( + const SingleLinkageClusteringOptions& options, + const Graph<int>& graph, + HashMap<int, int>* membership) { + CHECK_NOTNULL(membership)->clear(); + + // Initially each vertex is in its own cluster. + const HashSet<int>& vertices = graph.vertices(); + for (HashSet<int>::const_iterator it = vertices.begin(); + it != vertices.end(); + ++it) { + (*membership)[*it] = *it; + } + + for (HashSet<int>::const_iterator it1 = vertices.begin(); + it1 != vertices.end(); + ++it1) { + const int vertex1 = *it1; + const HashSet<int>& neighbors = graph.Neighbors(vertex1); + for (HashSet<int>::const_iterator it2 = neighbors.begin(); + it2 != neighbors.end(); + ++it2) { + const int vertex2 = *it2; + + // Since the graph is undirected, only pay attention to one side + // of the edge and ignore weak edges. + if ((vertex1 > vertex2) || + (graph.EdgeWeight(vertex1, vertex2) < options.min_similarity)) { + continue; + } + + // Use a union-find algorithm to keep track of the clusters. + const int c1 = FindConnectedComponent(vertex1, membership); + const int c2 = FindConnectedComponent(vertex2, membership); + + if (c1 == c2) { + continue; + } + + if (c1 < c2) { + (*membership)[c2] = c1; + } else { + (*membership)[c1] = c2; + } + } + } + + // Make sure that every vertex is connected directly to the vertex + // identifying the cluster. + int num_clusters = 0; + for (HashMap<int, int>::iterator it = membership->begin(); + it != membership->end(); + ++it) { + it->second = FindConnectedComponent(it->first, membership); + if (it->first == it->second) { + ++num_clusters; + } + } + + return num_clusters; +} + +} // namespace internal +} // namespace ceres + +#endif // CERES_NO_SUITESPARSE diff --git a/extern/libmv/third_party/ceres/internal/ceres/single_linkage_clustering.h b/extern/libmv/third_party/ceres/internal/ceres/single_linkage_clustering.h new file mode 100644 index 00000000000..9b137cfa67f --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/single_linkage_clustering.h @@ -0,0 +1,71 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2013 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + +#ifndef CERES_INTERNAL_SINGLE_LINKAGE_CLUSTERING_H_ +#define CERES_INTERNAL_SINGLE_LINKAGE_CLUSTERING_H_ + +#ifndef CERES_NO_SUITESPARSE + +#include "ceres/collections_port.h" +#include "ceres/graph.h" + +namespace ceres { +namespace internal { + +struct SingleLinkageClusteringOptions { + SingleLinkageClusteringOptions() + : min_similarity(0.99) { + } + + // Graph edges with edge weight less than min_similarity are ignored + // during the clustering process. + double min_similarity; +}; + +// Compute a partitioning of the vertices of the graph using the +// single linkage clustering algorithm. Edges with weight less than +// SingleLinkageClusteringOptions::min_similarity will be ignored. +// +// membership upon return will contain a mapping from the vertices of +// the graph to an integer indicating the identity of the cluster that +// it belongs to. +// +// The return value of this function is the number of clusters +// identified by the algorithm. +int ComputeSingleLinkageClustering( + const SingleLinkageClusteringOptions& options, + const Graph<int>& graph, + HashMap<int, int>* membership); + +} // namespace internal +} // namespace ceres + +#endif // CERES_NO_SUITESPARSE +#endif // CERES_INTERNAL_SINGLE_LINKAGE_CLUSTERING_H_ diff --git a/extern/libmv/third_party/ceres/internal/ceres/solver.cc b/extern/libmv/third_party/ceres/internal/ceres/solver.cc index 3b67746044c..67e168cd2d4 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/solver.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/solver.cc @@ -86,6 +86,7 @@ Solver::Summary::Summary() // accidentally reporting default values. : minimizer_type(TRUST_REGION), termination_type(DID_NOT_RUN), + error("ceres::Solve was not called."), initial_cost(-1.0), final_cost(-1.0), fixed_cost(-1.0), @@ -119,20 +120,20 @@ Solver::Summary::Summary() inner_iterations_given(false), inner_iterations_used(false), preconditioner_type(IDENTITY), + visibility_clustering_type(CANONICAL_VIEWS), trust_region_strategy_type(LEVENBERG_MARQUARDT), dense_linear_algebra_library_type(EIGEN), sparse_linear_algebra_library_type(SUITE_SPARSE), line_search_direction_type(LBFGS), - line_search_type(ARMIJO) { + line_search_type(ARMIJO), + line_search_interpolation_type(BISECTION), + nonlinear_conjugate_gradient_type(FLETCHER_REEVES), + max_lbfgs_rank(-1) { } string Solver::Summary::BriefReport() const { string report = "Ceres Solver Report: "; if (termination_type == DID_NOT_RUN) { - CHECK(!error.empty()) - << "Solver terminated with DID_NOT_RUN but the solver did not " - << "return a reason. This is a Ceres error. Please report this " - << "to the Ceres team"; return report + "Termination: DID_NOT_RUN, because " + error; } @@ -237,6 +238,14 @@ string Solver::Summary::FullReport() const { PreconditionerTypeToString(preconditioner_type)); } + if (preconditioner_type == CLUSTER_JACOBI || + preconditioner_type == CLUSTER_TRIDIAGONAL) { + StringAppendF(&report, "Visibility clustering%24s%25s\n", + VisibilityClusteringTypeToString( + visibility_clustering_type), + VisibilityClusteringTypeToString( + visibility_clustering_type)); + } StringAppendF(&report, "Threads % 25d% 25d\n", num_threads_given, num_threads_used); StringAppendF(&report, "Linear solver threads % 23d% 25d\n", @@ -306,10 +315,6 @@ string Solver::Summary::FullReport() const { } if (termination_type == DID_NOT_RUN) { - CHECK(!error.empty()) - << "Solver terminated with DID_NOT_RUN but the solver did not " - << "return a reason. This is a Ceres error. Please report this " - << "to the Ceres team"; StringAppendF(&report, "Termination: %20s\n", "DID_NOT_RUN"); StringAppendF(&report, "Reason: %s\n", error.c_str()); diff --git a/extern/libmv/third_party/ceres/internal/ceres/solver_impl.cc b/extern/libmv/third_party/ceres/internal/ceres/solver_impl.cc index 83faa0510c0..1ebf442131a 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/solver_impl.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/solver_impl.cc @@ -454,6 +454,13 @@ void SolverImpl::TrustRegionSolve(const Solver::Options& original_options, event_logger.AddEvent("ConstructOrdering"); } + if (original_options.inner_iteration_ordering != NULL) { + // Make a copy, as the options struct takes ownership of the + // ordering objects. + options.inner_iteration_ordering = + new ParameterBlockOrdering(*original_options.inner_iteration_ordering); + } + // Create the three objects needed to minimize: the transformed program, the // evaluator, and the linear solver. scoped_ptr<Program> reduced_program(CreateReducedProgram(&options, @@ -494,6 +501,7 @@ void SolverImpl::TrustRegionSolve(const Solver::Options& original_options, // Ensure the program state is set to the user parameters on the way out. original_program->SetParameterBlockStatePtrsToUserStatePtrs(); + original_program->SetParameterOffsetsAndIndex(); summary->postprocessor_time_in_seconds = WallTimeInSeconds() - post_process_start_time; @@ -511,6 +519,7 @@ void SolverImpl::TrustRegionSolve(const Solver::Options& original_options, summary->linear_solver_type_used = options.linear_solver_type; summary->preconditioner_type = options.preconditioner_type; + summary->visibility_clustering_type = options.visibility_clustering_type; summary->num_linear_solver_threads_given = original_options.num_linear_solver_threads; @@ -542,7 +551,7 @@ void SolverImpl::TrustRegionSolve(const Solver::Options& original_options, << "Disabling inner iterations."; } else { inner_iteration_minimizer.reset( - CreateInnerIterationMinimizer(original_options, + CreateInnerIterationMinimizer(options, *reduced_program, problem_impl->parameter_map(), summary)); @@ -596,6 +605,7 @@ void SolverImpl::TrustRegionSolve(const Solver::Options& original_options, // Ensure the program state is set to the user parameters on the way // out. original_program->SetParameterBlockStatePtrsToUserStatePtrs(); + original_program->SetParameterOffsetsAndIndex(); const map<string, double>& linear_solver_time_statistics = linear_solver->TimeStatistics(); @@ -839,6 +849,8 @@ void SolverImpl::LineSearchSolve(const Solver::Options& original_options, // Ensure the program state is set to the user parameters on the way out. original_program->SetParameterBlockStatePtrsToUserStatePtrs(); + original_program->SetParameterOffsetsAndIndex(); + summary->postprocessor_time_in_seconds = WallTimeInSeconds() - post_process_start_time; return; @@ -889,6 +901,7 @@ void SolverImpl::LineSearchSolve(const Solver::Options& original_options, // Ensure the program state is set to the user parameters on the way out. original_program->SetParameterBlockStatePtrsToUserStatePtrs(); + original_program->SetParameterOffsetsAndIndex(); const map<string, double>& evaluator_time_statistics = evaluator->TimeStatistics(); @@ -968,14 +981,13 @@ bool SolverImpl::IsParameterBlockSetIndependent( // Strips varying parameters and residuals, maintaining order, and updating -// num_eliminate_blocks. -bool SolverImpl::RemoveFixedBlocksFromProgram(Program* program, - ParameterBlockOrdering* ordering, - double* fixed_cost, - string* error) { - vector<ParameterBlock*>* parameter_blocks = - program->mutable_parameter_blocks(); - +// orderings. +bool SolverImpl::RemoveFixedBlocksFromProgram( + Program* program, + ParameterBlockOrdering* linear_solver_ordering, + ParameterBlockOrdering* inner_iteration_ordering, + double* fixed_cost, + string* error) { scoped_array<double> residual_block_evaluate_scratch; if (fixed_cost != NULL) { residual_block_evaluate_scratch.reset( @@ -983,70 +995,79 @@ bool SolverImpl::RemoveFixedBlocksFromProgram(Program* program, *fixed_cost = 0.0; } - // Mark all the parameters as unused. Abuse the index member of the parameter - // blocks for the marking. + vector<ParameterBlock*>* parameter_blocks = + program->mutable_parameter_blocks(); + vector<ResidualBlock*>* residual_blocks = + program->mutable_residual_blocks(); + + // Mark all the parameters as unused. Abuse the index member of the + // parameter blocks for the marking. for (int i = 0; i < parameter_blocks->size(); ++i) { (*parameter_blocks)[i]->set_index(-1); } // Filter out residual that have all-constant parameters, and mark all the // parameter blocks that appear in residuals. - { - vector<ResidualBlock*>* residual_blocks = - program->mutable_residual_blocks(); - int j = 0; - for (int i = 0; i < residual_blocks->size(); ++i) { - ResidualBlock* residual_block = (*residual_blocks)[i]; - int num_parameter_blocks = residual_block->NumParameterBlocks(); - - // Determine if the residual block is fixed, and also mark varying - // parameters that appear in the residual block. - bool all_constant = true; - for (int k = 0; k < num_parameter_blocks; k++) { - ParameterBlock* parameter_block = residual_block->parameter_blocks()[k]; - if (!parameter_block->IsConstant()) { - all_constant = false; - parameter_block->set_index(1); - } + int num_active_residual_blocks = 0; + for (int i = 0; i < residual_blocks->size(); ++i) { + ResidualBlock* residual_block = (*residual_blocks)[i]; + int num_parameter_blocks = residual_block->NumParameterBlocks(); + + // Determine if the residual block is fixed, and also mark varying + // parameters that appear in the residual block. + bool all_constant = true; + for (int k = 0; k < num_parameter_blocks; k++) { + ParameterBlock* parameter_block = residual_block->parameter_blocks()[k]; + if (!parameter_block->IsConstant()) { + all_constant = false; + parameter_block->set_index(1); } + } - if (!all_constant) { - (*residual_blocks)[j++] = (*residual_blocks)[i]; - } else if (fixed_cost != NULL) { - // The residual is constant and will be removed, so its cost is - // added to the variable fixed_cost. - double cost = 0.0; - if (!residual_block->Evaluate(true, - &cost, - NULL, - NULL, - residual_block_evaluate_scratch.get())) { - *error = StringPrintf("Evaluation of the residual %d failed during " - "removal of fixed residual blocks.", i); - return false; - } - *fixed_cost += cost; + if (!all_constant) { + (*residual_blocks)[num_active_residual_blocks++] = residual_block; + } else if (fixed_cost != NULL) { + // The residual is constant and will be removed, so its cost is + // added to the variable fixed_cost. + double cost = 0.0; + if (!residual_block->Evaluate(true, + &cost, + NULL, + NULL, + residual_block_evaluate_scratch.get())) { + *error = StringPrintf("Evaluation of the residual %d failed during " + "removal of fixed residual blocks.", i); + return false; } + *fixed_cost += cost; } - residual_blocks->resize(j); - } - - // Filter out unused or fixed parameter blocks, and update - // the ordering. - { - vector<ParameterBlock*>* parameter_blocks = - program->mutable_parameter_blocks(); - int j = 0; - for (int i = 0; i < parameter_blocks->size(); ++i) { - ParameterBlock* parameter_block = (*parameter_blocks)[i]; - if (parameter_block->index() == 1) { - (*parameter_blocks)[j++] = parameter_block; - } else { - ordering->Remove(parameter_block->mutable_user_state()); + } + residual_blocks->resize(num_active_residual_blocks); + + // Filter out unused or fixed parameter blocks, and update the + // linear_solver_ordering and the inner_iteration_ordering (if + // present). + int num_active_parameter_blocks = 0; + for (int i = 0; i < parameter_blocks->size(); ++i) { + ParameterBlock* parameter_block = (*parameter_blocks)[i]; + if (parameter_block->index() == -1) { + // Parameter block is constant. + if (linear_solver_ordering != NULL) { + linear_solver_ordering->Remove(parameter_block->mutable_user_state()); + } + + // It is not necessary that the inner iteration ordering contain + // this parameter block. But calling Remove is safe, as it will + // just return false. + if (inner_iteration_ordering != NULL) { + inner_iteration_ordering->Remove(parameter_block->mutable_user_state()); } + continue; } - parameter_blocks->resize(j); + + (*parameter_blocks)[num_active_parameter_blocks++] = parameter_block; } + parameter_blocks->resize(num_active_parameter_blocks); if (!(((program->NumResidualBlocks() == 0) && (program->NumParameterBlocks() == 0)) || @@ -1071,9 +1092,11 @@ Program* SolverImpl::CreateReducedProgram(Solver::Options* options, options->linear_solver_ordering; const int min_group_id = linear_solver_ordering->group_to_elements().begin()->first; - + ParameterBlockOrdering* inner_iteration_ordering = + options->inner_iteration_ordering; if (!RemoveFixedBlocksFromProgram(transformed_program.get(), linear_solver_ordering, + inner_iteration_ordering, fixed_cost, error)) { return NULL; @@ -1239,6 +1262,8 @@ LinearSolver* SolverImpl::CreateLinearSolver(Solver::Options* options, options->max_linear_solver_iterations; linear_solver_options.type = options->linear_solver_type; linear_solver_options.preconditioner_type = options->preconditioner_type; + linear_solver_options.visibility_clustering_type = + options->visibility_clustering_type; linear_solver_options.sparse_linear_algebra_library_type = options->sparse_linear_algebra_library_type; linear_solver_options.dense_linear_algebra_library_type = diff --git a/extern/libmv/third_party/ceres/internal/ceres/solver_impl.h b/extern/libmv/third_party/ceres/internal/ceres/solver_impl.h index 2b7ca3e3310..aef63b044dd 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/solver_impl.h +++ b/extern/libmv/third_party/ceres/internal/ceres/solver_impl.h @@ -120,15 +120,24 @@ class SolverImpl { string* error); // Remove the fixed or unused parameter blocks and residuals - // depending only on fixed parameters from the problem. Also updates - // num_eliminate_blocks, since removed parameters changes the point - // at which the eliminated blocks is valid. If fixed_cost is not - // NULL, the residual blocks that are removed are evaluated and the - // sum of their cost is returned in fixed_cost. - static bool RemoveFixedBlocksFromProgram(Program* program, - ParameterBlockOrdering* ordering, - double* fixed_cost, - string* error); + // depending only on fixed parameters from the program. + // + // If either linear_solver_ordering or inner_iteration_ordering are + // not NULL, the constant parameter blocks are removed from them + // too. + // + // If fixed_cost is not NULL, the residual blocks that are removed + // are evaluated and the sum of their cost is returned in + // fixed_cost. + // + // If a failure is encountered, the function returns false with a + // description of the failure in error. + static bool RemoveFixedBlocksFromProgram( + Program* program, + ParameterBlockOrdering* linear_solver_ordering, + ParameterBlockOrdering* inner_iteration_ordering, + double* fixed_cost, + string* error); static bool IsOrderingValid(const Solver::Options& options, const ProblemImpl* problem_impl, diff --git a/extern/libmv/third_party/ceres/internal/ceres/sparse_normal_cholesky_solver.cc b/extern/libmv/third_party/ceres/internal/ceres/sparse_normal_cholesky_solver.cc index f1a52378e2b..1ead8f70ae2 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/sparse_normal_cholesky_solver.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/sparse_normal_cholesky_solver.cc @@ -102,6 +102,9 @@ LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingCXSparse( LinearSolver::Summary summary; summary.num_iterations = 1; + summary.termination_type = LINEAR_SOLVER_SUCCESS; + summary.message = "Success."; + const int num_cols = A->num_cols(); Vector Atb = Vector::Zero(num_cols); A->LeftMultiply(b, Atb.data()); @@ -137,21 +140,23 @@ LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingCXSparse( // Compute symbolic factorization if not available. if (cxsparse_factor_ == NULL) { if (options_.use_postordering) { - cxsparse_factor_ = - CHECK_NOTNULL(cxsparse_.BlockAnalyzeCholesky(AtA, - A->col_blocks(), - A->col_blocks())); + cxsparse_factor_ = cxsparse_.BlockAnalyzeCholesky(AtA, + A->col_blocks(), + A->col_blocks()); } else { - cxsparse_factor_ = - CHECK_NOTNULL(cxsparse_.AnalyzeCholeskyWithNaturalOrdering(AtA)); + cxsparse_factor_ = cxsparse_.AnalyzeCholeskyWithNaturalOrdering(AtA); } } event_logger.AddEvent("Analysis"); - // Solve the linear system. - if (cxsparse_.SolveCholesky(AtA, cxsparse_factor_, Atb.data())) { + if (cxsparse_factor_ == NULL) { + summary.termination_type = LINEAR_SOLVER_FATAL_ERROR; + summary.message = + "CXSparse failure. Unable to find symbolic factorization."; + } else if (cxsparse_.SolveCholesky(AtA, cxsparse_factor_, Atb.data())) { VectorRef(x, Atb.rows()) = Atb; - summary.termination_type = TOLERANCE; + } else { + summary.termination_type = LINEAR_SOLVER_FAILURE; } event_logger.AddEvent("Solve"); @@ -179,56 +184,68 @@ LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingSuiteSparse( const LinearSolver::PerSolveOptions& per_solve_options, double * x) { EventLogger event_logger("SparseNormalCholeskySolver::SuiteSparse::Solve"); + LinearSolver::Summary summary; + summary.termination_type = LINEAR_SOLVER_SUCCESS; + summary.num_iterations = 1; + summary.message = "Success."; const int num_cols = A->num_cols(); - LinearSolver::Summary summary; Vector Atb = Vector::Zero(num_cols); A->LeftMultiply(b, Atb.data()); if (per_solve_options.D != NULL) { - // Temporarily append a diagonal block to the A matrix, but undo it before - // returning the matrix to the user. + // Temporarily append a diagonal block to the A matrix, but undo + // it before returning the matrix to the user. CompressedRowSparseMatrix D(per_solve_options.D, num_cols); A->AppendRows(D); } VectorRef(x, num_cols).setZero(); - cholmod_sparse lhs = ss_.CreateSparseMatrixTransposeView(A); - cholmod_dense* rhs = ss_.CreateDenseVector(Atb.data(), num_cols, num_cols); event_logger.AddEvent("Setup"); if (factor_ == NULL) { if (options_.use_postordering) { - factor_ = - CHECK_NOTNULL(ss_.BlockAnalyzeCholesky(&lhs, - A->col_blocks(), - A->row_blocks())); + factor_ = ss_.BlockAnalyzeCholesky(&lhs, + A->col_blocks(), + A->row_blocks(), + &summary.message); } else { - factor_ = - CHECK_NOTNULL(ss_.AnalyzeCholeskyWithNaturalOrdering(&lhs)); + factor_ = ss_.AnalyzeCholeskyWithNaturalOrdering(&lhs, &summary.message); } } - event_logger.AddEvent("Analysis"); - cholmod_dense* sol = ss_.SolveCholesky(&lhs, factor_, rhs); + if (factor_ == NULL) { + if (per_solve_options.D != NULL) { + A->DeleteRows(num_cols); + } + summary.termination_type = LINEAR_SOLVER_FATAL_ERROR; + return summary; + } + + summary.termination_type = ss_.Cholesky(&lhs, factor_, &summary.message); + if (summary.termination_type != LINEAR_SOLVER_SUCCESS) { + if (per_solve_options.D != NULL) { + A->DeleteRows(num_cols); + } + return summary; + } + + cholmod_dense* rhs = ss_.CreateDenseVector(Atb.data(), num_cols, num_cols); + cholmod_dense* sol = ss_.Solve(factor_, rhs, &summary.message); event_logger.AddEvent("Solve"); ss_.Free(rhs); - rhs = NULL; - if (per_solve_options.D != NULL) { A->DeleteRows(num_cols); } - summary.num_iterations = 1; if (sol != NULL) { memcpy(x, sol->x, num_cols * sizeof(*x)); - ss_.Free(sol); - sol = NULL; - summary.termination_type = TOLERANCE; + } else { + summary.termination_type = LINEAR_SOLVER_FAILURE; } event_logger.AddEvent("Teardown"); diff --git a/extern/libmv/third_party/ceres/internal/ceres/stringprintf.cc b/extern/libmv/third_party/ceres/internal/ceres/stringprintf.cc index ce204674dce..eabdcb697c9 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/stringprintf.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/stringprintf.cc @@ -43,7 +43,7 @@ namespace internal { #ifdef _MSC_VER enum { IS_COMPILER_MSVC = 1 }; -#define va_copy(d,s) ((d) = (s)) +#define va_copy(d, s) ((d) = (s)) #else enum { IS_COMPILER_MSVC = 0 }; #endif diff --git a/extern/libmv/third_party/ceres/internal/ceres/suitesparse.cc b/extern/libmv/third_party/ceres/internal/ceres/suitesparse.cc index 9de32fd76ad..06cc0a8e6fa 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/suitesparse.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/suitesparse.cc @@ -35,6 +35,7 @@ #include "cholmod.h" #include "ceres/compressed_col_sparse_matrix_utils.h" #include "ceres/compressed_row_sparse_matrix.h" +#include "ceres/linear_solver.h" #include "ceres/triplet_sparse_matrix.h" namespace ceres { @@ -120,7 +121,8 @@ cholmod_dense* SuiteSparse::CreateDenseVector(const double* x, return v; } -cholmod_factor* SuiteSparse::AnalyzeCholesky(cholmod_sparse* A) { +cholmod_factor* SuiteSparse::AnalyzeCholesky(cholmod_sparse* A, + string* status) { // Cholmod can try multiple re-ordering strategies to find a fill // reducing ordering. Here we just tell it use AMD with automatic // matrix dependence choice of supernodal versus simplicial @@ -130,31 +132,35 @@ cholmod_factor* SuiteSparse::AnalyzeCholesky(cholmod_sparse* A) { cc_.supernodal = CHOLMOD_AUTO; cholmod_factor* factor = cholmod_analyze(A, &cc_); - CHECK_EQ(cc_.status, CHOLMOD_OK) - << "Cholmod symbolic analysis failed " << cc_.status; - CHECK_NOTNULL(factor); - if (VLOG_IS_ON(2)) { cholmod_print_common(const_cast<char*>("Symbolic Analysis"), &cc_); } - return factor; + if (cc_.status != CHOLMOD_OK) { + *status = StringPrintf("cholmod_analyze failed. error code: %d", + cc_.status); + return NULL; + } + + return CHECK_NOTNULL(factor); } cholmod_factor* SuiteSparse::BlockAnalyzeCholesky( cholmod_sparse* A, const vector<int>& row_blocks, - const vector<int>& col_blocks) { + const vector<int>& col_blocks, + string* status) { vector<int> ordering; if (!BlockAMDOrdering(A, row_blocks, col_blocks, &ordering)) { return NULL; } - return AnalyzeCholeskyWithUserOrdering(A, ordering); + return AnalyzeCholeskyWithUserOrdering(A, ordering, status); } cholmod_factor* SuiteSparse::AnalyzeCholeskyWithUserOrdering( cholmod_sparse* A, - const vector<int>& ordering) { + const vector<int>& ordering, + string* status) { CHECK_EQ(ordering.size(), A->nrow); cc_.nmethods = 1; @@ -162,33 +168,36 @@ cholmod_factor* SuiteSparse::AnalyzeCholeskyWithUserOrdering( cholmod_factor* factor = cholmod_analyze_p(A, const_cast<int*>(&ordering[0]), NULL, 0, &cc_); - CHECK_EQ(cc_.status, CHOLMOD_OK) - << "Cholmod symbolic analysis failed " << cc_.status; - CHECK_NOTNULL(factor); - if (VLOG_IS_ON(2)) { cholmod_print_common(const_cast<char*>("Symbolic Analysis"), &cc_); } + if (cc_.status != CHOLMOD_OK) { + *status = StringPrintf("cholmod_analyze failed. error code: %d", + cc_.status); + return NULL; + } - return factor; + return CHECK_NOTNULL(factor); } cholmod_factor* SuiteSparse::AnalyzeCholeskyWithNaturalOrdering( - cholmod_sparse* A) { + cholmod_sparse* A, + string* status) { cc_.nmethods = 1; cc_.method[0].ordering = CHOLMOD_NATURAL; cc_.postorder = 0; cholmod_factor* factor = cholmod_analyze(A, &cc_); - CHECK_EQ(cc_.status, CHOLMOD_OK) - << "Cholmod symbolic analysis failed " << cc_.status; - CHECK_NOTNULL(factor); - if (VLOG_IS_ON(2)) { cholmod_print_common(const_cast<char*>("Symbolic Analysis"), &cc_); } + if (cc_.status != CHOLMOD_OK) { + *status = StringPrintf("cholmod_analyze failed. error code: %d", + cc_.status); + return NULL; + } - return factor; + return CHECK_NOTNULL(factor); } bool SuiteSparse::BlockAMDOrdering(const cholmod_sparse* A, @@ -233,7 +242,9 @@ bool SuiteSparse::BlockAMDOrdering(const cholmod_sparse* A, return true; } -bool SuiteSparse::Cholesky(cholmod_sparse* A, cholmod_factor* L) { +LinearSolverTerminationType SuiteSparse::Cholesky(cholmod_sparse* A, + cholmod_factor* L, + string* status) { CHECK_NOTNULL(A); CHECK_NOTNULL(L); @@ -245,7 +256,7 @@ bool SuiteSparse::Cholesky(cholmod_sparse* A, cholmod_factor* L) { cc_.print = 0; cc_.quick_return_if_not_posdef = 1; - int status = cholmod_factorize(A, L, &cc_); + int cholmod_status = cholmod_factorize(A, L, &cc_); cc_.print = old_print_level; // TODO(sameeragarwal): This switch statement is not consistent. It @@ -257,84 +268,73 @@ bool SuiteSparse::Cholesky(cholmod_sparse* A, cholmod_factor* L) { // (e.g. out of memory). switch (cc_.status) { case CHOLMOD_NOT_INSTALLED: - LOG(WARNING) << "CHOLMOD failure: Method not installed."; - return false; + *status = "CHOLMOD failure: Method not installed."; + return LINEAR_SOLVER_FATAL_ERROR; case CHOLMOD_OUT_OF_MEMORY: - LOG(WARNING) << "CHOLMOD failure: Out of memory."; - return false; + *status = "CHOLMOD failure: Out of memory."; + return LINEAR_SOLVER_FATAL_ERROR; case CHOLMOD_TOO_LARGE: - LOG(WARNING) << "CHOLMOD failure: Integer overflow occured."; - return false; + *status = "CHOLMOD failure: Integer overflow occured."; + return LINEAR_SOLVER_FATAL_ERROR; case CHOLMOD_INVALID: - LOG(WARNING) << "CHOLMOD failure: Invalid input."; - return false; + *status = "CHOLMOD failure: Invalid input."; + return LINEAR_SOLVER_FATAL_ERROR; case CHOLMOD_NOT_POSDEF: - // TODO(sameeragarwal): These two warnings require more - // sophisticated handling going forward. For now we will be - // strict and treat them as failures. - LOG(WARNING) << "CHOLMOD warning: Matrix not positive definite."; - return false; + *status = "CHOLMOD warning: Matrix not positive definite."; + return LINEAR_SOLVER_FAILURE; case CHOLMOD_DSMALL: - LOG(WARNING) << "CHOLMOD warning: D for LDL' or diag(L) or " - << "LL' has tiny absolute value."; - return false; + *status = "CHOLMOD warning: D for LDL' or diag(L) or " + "LL' has tiny absolute value."; + return LINEAR_SOLVER_FAILURE; case CHOLMOD_OK: - if (status != 0) { - return true; + if (cholmod_status != 0) { + return LINEAR_SOLVER_SUCCESS; } - LOG(WARNING) << "CHOLMOD failure: cholmod_factorize returned zero " - << "but cholmod_common::status is CHOLMOD_OK." - << "Please report this to ceres-solver@googlegroups.com."; - return false; + + *status = "CHOLMOD failure: cholmod_factorize returned false " + "but cholmod_common::status is CHOLMOD_OK." + "Please report this to ceres-solver@googlegroups.com."; + return LINEAR_SOLVER_FATAL_ERROR; default: - LOG(WARNING) << "Unknown cholmod return code. " - << "Please report this to ceres-solver@googlegroups.com."; - return false; + *status = + StringPrintf("Unknown cholmod return code: %d. " + "Please report this to ceres-solver@googlegroups.com.", + cc_.status); + return LINEAR_SOLVER_FATAL_ERROR; } - return false; + + return LINEAR_SOLVER_FATAL_ERROR; } cholmod_dense* SuiteSparse::Solve(cholmod_factor* L, - cholmod_dense* b) { + cholmod_dense* b, + string* status) { if (cc_.status != CHOLMOD_OK) { - LOG(WARNING) << "CHOLMOD status NOT OK"; + *status = "cholmod_solve failed. CHOLMOD status is not CHOLMOD_OK"; return NULL; } return cholmod_solve(CHOLMOD_A, L, b, &cc_); } -cholmod_dense* SuiteSparse::SolveCholesky(cholmod_sparse* A, - cholmod_factor* L, - cholmod_dense* b) { - CHECK_NOTNULL(A); - CHECK_NOTNULL(L); - CHECK_NOTNULL(b); - - if (Cholesky(A, L)) { - return Solve(L, b); - } - - return NULL; -} - -void SuiteSparse::ApproximateMinimumDegreeOrdering(cholmod_sparse* matrix, +bool SuiteSparse::ApproximateMinimumDegreeOrdering(cholmod_sparse* matrix, int* ordering) { - cholmod_amd(matrix, NULL, 0, ordering, &cc_); + return cholmod_amd(matrix, NULL, 0, ordering, &cc_); } -void SuiteSparse::ConstrainedApproximateMinimumDegreeOrdering( +bool SuiteSparse::ConstrainedApproximateMinimumDegreeOrdering( cholmod_sparse* matrix, int* constraints, int* ordering) { #ifndef CERES_NO_CAMD - cholmod_camd(matrix, NULL, 0, constraints, ordering, &cc_); + return cholmod_camd(matrix, NULL, 0, constraints, ordering, &cc_); #else LOG(FATAL) << "Congratulations you have found a bug in Ceres." << "Ceres Solver was compiled with SuiteSparse " << "version 4.1.0 or less. Calling this function " << "in that case is a bug. Please contact the" << "the Ceres Solver developers."; + return false; #endif } diff --git a/extern/libmv/third_party/ceres/internal/ceres/suitesparse.h b/extern/libmv/third_party/ceres/internal/ceres/suitesparse.h index 16f298ea79c..0604654f7c8 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/suitesparse.h +++ b/extern/libmv/third_party/ceres/internal/ceres/suitesparse.h @@ -41,6 +41,7 @@ #include <vector> #include "ceres/internal/port.h" +#include "ceres/linear_solver.h" #include "cholmod.h" #include "glog/logging.h" #include "SuiteSparseQR.hpp" @@ -138,12 +139,15 @@ class SuiteSparse { // A is not modified, only the pattern of non-zeros of A is used, // the actual numerical values in A are of no consequence. // + // status contains an explanation of the failures if any. + // // Caller owns the result. - cholmod_factor* AnalyzeCholesky(cholmod_sparse* A); + cholmod_factor* AnalyzeCholesky(cholmod_sparse* A, string* status); cholmod_factor* BlockAnalyzeCholesky(cholmod_sparse* A, const vector<int>& row_blocks, - const vector<int>& col_blocks); + const vector<int>& col_blocks, + string* status); // If A is symmetric, then compute the symbolic Cholesky // factorization of A(ordering, ordering). If A is unsymmetric, then @@ -153,33 +157,38 @@ class SuiteSparse { // A is not modified, only the pattern of non-zeros of A is used, // the actual numerical values in A are of no consequence. // + // status contains an explanation of the failures if any. + // // Caller owns the result. cholmod_factor* AnalyzeCholeskyWithUserOrdering(cholmod_sparse* A, - const vector<int>& ordering); + const vector<int>& ordering, + string* status); // Perform a symbolic factorization of A without re-ordering A. No // postordering of the elimination tree is performed. This ensures // that the symbolic factor does not introduce an extra permutation // on the matrix. See the documentation for CHOLMOD for more details. - cholmod_factor* AnalyzeCholeskyWithNaturalOrdering(cholmod_sparse* A); + // + // status contains an explanation of the failures if any. + cholmod_factor* AnalyzeCholeskyWithNaturalOrdering(cholmod_sparse* A, + string* status); // Use the symbolic factorization in L, to find the numerical // factorization for the matrix A or AA^T. Return true if // successful, false otherwise. L contains the numeric factorization // on return. - bool Cholesky(cholmod_sparse* A, cholmod_factor* L); + // + // status contains an explanation of the failures if any. + LinearSolverTerminationType Cholesky(cholmod_sparse* A, + cholmod_factor* L, + string* status); // Given a Cholesky factorization of a matrix A = LL^T, solve the // linear system Ax = b, and return the result. If the Solve fails // NULL is returned. Caller owns the result. - cholmod_dense* Solve(cholmod_factor* L, cholmod_dense* b); - - // Combine the calls to Cholesky and Solve into a single call. If - // the cholesky factorization or the solve fails, return - // NULL. Caller owns the result. - cholmod_dense* SolveCholesky(cholmod_sparse* A, - cholmod_factor* L, - cholmod_dense* b); + // + // status contains an explanation of the failures if any. + cholmod_dense* Solve(cholmod_factor* L, cholmod_dense* b, string* solve); // By virtue of the modeling layer in Ceres being block oriented, // all the matrices used by Ceres are also block oriented. When @@ -211,7 +220,7 @@ class SuiteSparse { // Find a fill reducing approximate minimum degree // ordering. ordering is expected to be large enough to hold the // ordering. - void ApproximateMinimumDegreeOrdering(cholmod_sparse* matrix, int* ordering); + bool ApproximateMinimumDegreeOrdering(cholmod_sparse* matrix, int* ordering); // Before SuiteSparse version 4.2.0, cholmod_camd was only enabled @@ -241,7 +250,7 @@ class SuiteSparse { // // If CERES_NO_CAMD is defined then calling this function will // result in a crash. - void ConstrainedApproximateMinimumDegreeOrdering(cholmod_sparse* matrix, + bool ConstrainedApproximateMinimumDegreeOrdering(cholmod_sparse* matrix, int* constraints, int* ordering); diff --git a/extern/libmv/third_party/ceres/internal/ceres/trust_region_minimizer.cc b/extern/libmv/third_party/ceres/internal/ceres/trust_region_minimizer.cc index 03d6c8e6b94..a613a655559 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/trust_region_minimizer.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/trust_region_minimizer.cc @@ -81,6 +81,7 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options, double start_time = WallTimeInSeconds(); double iteration_start_time = start_time; Init(options); + const bool is_not_silent = !options.is_silent; summary->termination_type = NO_CONVERGENCE; summary->num_successful_steps = 0; @@ -112,11 +113,10 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options, iteration_summary.step_is_successful = false; iteration_summary.cost_change = 0.0; iteration_summary.gradient_max_norm = 0.0; + iteration_summary.gradient_norm = 0.0; iteration_summary.step_norm = 0.0; iteration_summary.relative_decrease = 0.0; iteration_summary.trust_region_radius = strategy->Radius(); - // TODO(sameeragarwal): Rename eta to linear_solver_accuracy or - // something similar across the board. iteration_summary.eta = options_.eta; iteration_summary.linear_solver_iterations = 0; iteration_summary.step_solver_time_in_seconds = 0; @@ -128,8 +128,9 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options, residuals.data(), gradient.data(), jacobian)) { - LOG(WARNING) << "Terminating: Residual and Jacobian evaluation failed."; + summary->error = "Terminating: Residual and Jacobian evaluation failed."; summary->termination_type = NUMERICAL_FAILURE; + LOG_IF(WARNING, is_not_silent) << summary->error; return; } @@ -143,6 +144,7 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options, summary->initial_cost = cost + summary->fixed_cost; iteration_summary.cost = cost + summary->fixed_cost; iteration_summary.gradient_max_norm = gradient.lpNorm<Eigen::Infinity>(); + iteration_summary.gradient_norm = gradient.norm(); // The initial gradient max_norm is bounded from below so that we do // not divide by zero. @@ -152,11 +154,13 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options, options_.gradient_tolerance * initial_gradient_max_norm; if (iteration_summary.gradient_max_norm <= absolute_gradient_tolerance) { + summary->error = StringPrintf("Terminating: Gradient tolerance reached. " + "Relative gradient max norm: %e <= %e", + (iteration_summary.gradient_max_norm / + initial_gradient_max_norm), + options_.gradient_tolerance); summary->termination_type = GRADIENT_TOLERANCE; - VLOG(1) << "Terminating: Gradient tolerance reached." - << "Relative gradient max norm: " - << iteration_summary.gradient_max_norm / initial_gradient_max_norm - << " <= " << options_.gradient_tolerance; + VLOG_IF(1, is_not_silent) << summary->error; return; } @@ -184,17 +188,19 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options, iteration_start_time = WallTimeInSeconds(); if (iteration_summary.iteration >= options_.max_num_iterations) { + summary->error = "Terminating: Maximum number of iterations reached."; summary->termination_type = NO_CONVERGENCE; - VLOG(1) << "Terminating: Maximum number of iterations reached."; - break; + VLOG_IF(1, is_not_silent) << summary->error; + return; } const double total_solver_time = iteration_start_time - start_time + summary->preprocessor_time_in_seconds; if (total_solver_time >= options_.max_solver_time_in_seconds) { + summary->error = "Terminating: Maximum solver time reached."; summary->termination_type = NO_CONVERGENCE; - VLOG(1) << "Terminating: Maximum solver time reached."; - break; + VLOG_IF(1, is_not_silent) << summary->error; + return; } const double strategy_start_time = WallTimeInSeconds(); @@ -221,6 +227,15 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options, residuals.data(), trust_region_step.data()); + if (strategy_summary.termination_type == LINEAR_SOLVER_FATAL_ERROR) { + summary->error = + "Terminating. Linear solver failed due to unrecoverable " + "non-numeric causes. Please see the error log for clues. "; + summary->termination_type = NUMERICAL_FAILURE; + LOG_IF(WARNING, is_not_silent) << summary->error; + return; + } + iteration_summary = IterationSummary(); iteration_summary.iteration = summary->iterations.back().iteration + 1; iteration_summary.step_solver_time_in_seconds = @@ -230,8 +245,17 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options, iteration_summary.step_is_valid = false; iteration_summary.step_is_successful = false; + if (strategy_summary.termination_type == LINEAR_SOLVER_FATAL_ERROR) { + summary->error = + "Terminating. Linear solver failed due to unrecoverable " + "non-numeric causes. Please see the error log for clues. "; + summary->termination_type = NUMERICAL_FAILURE; + LOG_IF(WARNING, is_not_silent) << summary->error; + return; + } + double model_cost_change = 0.0; - if (strategy_summary.termination_type != FAILURE) { + if (strategy_summary.termination_type != LINEAR_SOLVER_FAILURE) { // new_model_cost // = 1/2 [f + J * step]^2 // = 1/2 [ f'f + 2f'J * step + step' * J' * J * step ] @@ -245,9 +269,10 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options, - model_residuals.dot(residuals + model_residuals / 2.0); if (model_cost_change < 0.0) { - VLOG(1) << "Invalid step: current_cost: " << cost - << " absolute difference " << model_cost_change - << " relative difference " << (model_cost_change / cost); + VLOG_IF(1, is_not_silent) + << "Invalid step: current_cost: " << cost + << " absolute difference " << model_cost_change + << " relative difference " << (model_cost_change / cost); } else { iteration_summary.step_is_valid = true; } @@ -259,13 +284,12 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options, // NUMERICAL_FAILURE if this limit is exceeded. if (++num_consecutive_invalid_steps >= options_.max_num_consecutive_invalid_steps) { - summary->termination_type = NUMERICAL_FAILURE; summary->error = StringPrintf( "Terminating. Number of successive invalid steps more " "than Solver::Options::max_num_consecutive_invalid_steps: %d", options_.max_num_consecutive_invalid_steps); - - LOG(WARNING) << summary->error; + summary->termination_type = NUMERICAL_FAILURE; + LOG_IF(WARNING, is_not_silent) << summary->error; return; } @@ -278,6 +302,8 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options, iteration_summary.cost_change = 0.0; iteration_summary.gradient_max_norm = summary->iterations.back().gradient_max_norm; + iteration_summary.gradient_norm = + summary->iterations.back().gradient_norm; iteration_summary.step_norm = 0.0; iteration_summary.relative_decrease = 0.0; iteration_summary.eta = options_.eta; @@ -287,24 +313,18 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options, // Undo the Jacobian column scaling. delta = (trust_region_step.array() * scale.array()).matrix(); - if (!evaluator->Plus(x.data(), delta.data(), x_plus_delta.data())) { - summary->termination_type = NUMERICAL_FAILURE; - summary->error = - "Terminating. Failed to compute Plus(x, delta, x_plus_delta)."; - - LOG(WARNING) << summary->error; - return; - } - // Try this step. double new_cost = numeric_limits<double>::max(); - if (!evaluator->Evaluate(x_plus_delta.data(), - &new_cost, - NULL, NULL, NULL)) { - // If the evaluation of the new cost fails, treat it as a step - // with high cost. + if (!evaluator->Plus(x.data(), delta.data(), x_plus_delta.data())) { + LOG(WARNING) << "x_plus_delta = Plus(x, delta) failed. " + << "Treating it as a step with infinite cost"; + } else if (!evaluator->Evaluate(x_plus_delta.data(), + &new_cost, + NULL, + NULL, + NULL)) { LOG(WARNING) << "Step failed to evaluate. " - << "Treating it as step with infinite cost"; + << "Treating it as a step with infinite cost"; new_cost = numeric_limits<double>::max(); } else { // Check if performing an inner iteration will make it better. @@ -320,30 +340,30 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options, if (!evaluator->Evaluate(inner_iteration_x.data(), &new_cost, NULL, NULL, NULL)) { - VLOG(2) << "Inner iteration failed."; + VLOG_IF(2, is_not_silent) << "Inner iteration failed."; new_cost = x_plus_delta_cost; } else { x_plus_delta = inner_iteration_x; // Boost the model_cost_change, since the inner iteration // improvements are not accounted for by the trust region. model_cost_change += x_plus_delta_cost - new_cost; - VLOG(2) << "Inner iteration succeeded; current cost: " << cost - << " x_plus_delta_cost: " << x_plus_delta_cost - << " new_cost: " << new_cost; - const double inner_iteration_relative_progress = - 1.0 - new_cost / x_plus_delta_cost; - inner_iterations_are_enabled = - (inner_iteration_relative_progress > - options.inner_iteration_tolerance); + VLOG_IF(2, is_not_silent) + << "Inner iteration succeeded; Current cost: " << cost + << " Trust region step cost: " << x_plus_delta_cost + << " Inner iteration cost: " << new_cost; inner_iterations_were_useful = new_cost < cost; + const double inner_iteration_relative_progress = + 1.0 - new_cost / x_plus_delta_cost; // Disable inner iterations once the relative improvement // drops below tolerance. - if (!inner_iterations_are_enabled) { - VLOG(2) << "Disabling inner iterations. Progress : " - << inner_iteration_relative_progress; - } + inner_iterations_are_enabled = + (inner_iteration_relative_progress > + options.inner_iteration_tolerance); + VLOG_IF(2, is_not_silent && !inner_iterations_are_enabled) + << "Disabling inner iterations. Progress : " + << inner_iteration_relative_progress; } summary->inner_iteration_time_in_seconds += WallTimeInSeconds() - inner_iteration_start_time; @@ -356,12 +376,14 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options, const double step_size_tolerance = options_.parameter_tolerance * (x_norm + options_.parameter_tolerance); if (iteration_summary.step_norm <= step_size_tolerance) { - VLOG(1) << "Terminating. Parameter tolerance reached. " - << "relative step_norm: " - << iteration_summary.step_norm / - (x_norm + options_.parameter_tolerance) - << " <= " << options_.parameter_tolerance; + summary->error = + StringPrintf("Terminating. Parameter tolerance reached. " + "relative step_norm: %e <= %e.", + (iteration_summary.step_norm / + (x_norm + options_.parameter_tolerance)), + options_.parameter_tolerance); summary->termination_type = PARAMETER_TOLERANCE; + VLOG_IF(1, is_not_silent) << summary->error; return; } @@ -369,11 +391,13 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options, const double absolute_function_tolerance = options_.function_tolerance * cost; if (fabs(iteration_summary.cost_change) < absolute_function_tolerance) { - VLOG(1) << "Terminating. Function tolerance reached. " - << "|cost_change|/cost: " - << fabs(iteration_summary.cost_change) / cost - << " <= " << options_.function_tolerance; + summary->error = + StringPrintf("Terminating. Function tolerance reached. " + "|cost_change|/cost: %e <= %e", + fabs(iteration_summary.cost_change) / cost, + options_.function_tolerance); summary->termination_type = FUNCTION_TOLERANCE; + VLOG_IF(1, is_not_silent) << summary->error; return; } @@ -447,10 +471,12 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options, if (!inner_iterations_were_useful && relative_decrease <= options_.min_relative_decrease) { iteration_summary.step_is_nonmonotonic = true; - VLOG(2) << "Non-monotonic step! " - << " relative_decrease: " << relative_decrease - << " historical_relative_decrease: " - << historical_relative_decrease; + VLOG_IF(2, is_not_silent) + << "Non-monotonic step! " + << " relative_decrease: " + << relative_decrease + << " historical_relative_decrease: " + << historical_relative_decrease; } } } @@ -468,22 +494,25 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options, residuals.data(), gradient.data(), jacobian)) { - summary->termination_type = NUMERICAL_FAILURE; summary->error = "Terminating: Residual and Jacobian evaluation failed."; - LOG(WARNING) << summary->error; + summary->termination_type = NUMERICAL_FAILURE; + LOG_IF(WARNING, is_not_silent) << summary->error; return; } iteration_summary.gradient_max_norm = gradient.lpNorm<Eigen::Infinity>(); + iteration_summary.gradient_norm = gradient.norm(); if (iteration_summary.gradient_max_norm <= absolute_gradient_tolerance) { + summary->error = + StringPrintf("Terminating: Gradient tolerance reached. " + "Relative gradient max norm: %e <= %e", + (iteration_summary.gradient_max_norm / + initial_gradient_max_norm), + options_.gradient_tolerance); summary->termination_type = GRADIENT_TOLERANCE; - VLOG(1) << "Terminating: Gradient tolerance reached." - << "Relative gradient max norm: " - << (iteration_summary.gradient_max_norm / - initial_gradient_max_norm) - << " <= " << options_.gradient_tolerance; + VLOG_IF(1, is_not_silent) << summary->error; return; } @@ -511,7 +540,8 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options, if (cost > candidate_cost) { // The current iterate is has a higher cost than the // candidate iterate. Set the candidate to this point. - VLOG(2) << "Updating the candidate iterate to the current point."; + VLOG_IF(2, is_not_silent) + << "Updating the candidate iterate to the current point."; candidate_cost = cost; accumulated_candidate_model_cost_change = 0.0; } @@ -525,7 +555,8 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options, // iterate. if (num_consecutive_nonmonotonic_steps == options.max_consecutive_nonmonotonic_steps) { - VLOG(2) << "Resetting the reference point to the candidate point"; + VLOG_IF(2, is_not_silent) + << "Resetting the reference point to the candidate point"; reference_cost = candidate_cost; accumulated_reference_model_cost_change = accumulated_candidate_model_cost_change; @@ -544,8 +575,9 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options, iteration_summary.trust_region_radius = strategy->Radius(); if (iteration_summary.trust_region_radius < options_.min_trust_region_radius) { + summary->error = "Termination. Minimum trust region radius reached."; summary->termination_type = PARAMETER_TOLERANCE; - VLOG(1) << "Termination. Minimum trust region radius reached."; + VLOG_IF(1, is_not_silent) << summary->error; return; } diff --git a/extern/libmv/third_party/ceres/internal/ceres/trust_region_strategy.h b/extern/libmv/third_party/ceres/internal/ceres/trust_region_strategy.h index 0dcdbfef016..998514fbfa5 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/trust_region_strategy.h +++ b/extern/libmv/third_party/ceres/internal/ceres/trust_region_strategy.h @@ -33,7 +33,7 @@ #include <string> #include "ceres/internal/port.h" -#include "ceres/types.h" +#include "ceres/linear_solver.h" namespace ceres { namespace internal { @@ -106,7 +106,7 @@ class TrustRegionStrategy { Summary() : residual_norm(0.0), num_iterations(-1), - termination_type(FAILURE) { + termination_type(LINEAR_SOLVER_FAILURE) { } // If the trust region problem is, diff --git a/extern/libmv/third_party/ceres/internal/ceres/types.cc b/extern/libmv/third_party/ceres/internal/ceres/types.cc index a97f1a55e6b..5f3455fdd41 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/types.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/types.cc @@ -278,6 +278,25 @@ bool StringToCovarianceAlgorithmType( return false; } +const char* VisibilityClusteringTypeToString( + VisibilityClusteringType type) { + switch (type) { + CASESTR(CANONICAL_VIEWS); + CASESTR(SINGLE_LINKAGE); + default: + return "UNKNOWN"; + } +} + +bool StringToVisibilityClusteringType( + string value, + VisibilityClusteringType* type) { + UpperCase(&value); + STRENUM(CANONICAL_VIEWS); + STRENUM(SINGLE_LINKAGE); + return false; +} + const char* SolverTerminationTypeToString(SolverTerminationType type) { switch (type) { CASESTR(NO_CONVERGENCE); @@ -293,18 +312,6 @@ const char* SolverTerminationTypeToString(SolverTerminationType type) { } } -const char* LinearSolverTerminationTypeToString( - LinearSolverTerminationType type) { - switch (type) { - CASESTR(TOLERANCE); - CASESTR(MAX_ITERATIONS); - CASESTR(STAGNATION); - CASESTR(FAILURE); - default: - return "UNKNOWN"; - } -} - #undef CASESTR #undef STRENUM diff --git a/extern/libmv/third_party/ceres/internal/ceres/visibility_based_preconditioner.cc b/extern/libmv/third_party/ceres/internal/ceres/visibility_based_preconditioner.cc index 7af133905b3..a3bebed2c1d 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/visibility_based_preconditioner.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/visibility_based_preconditioner.cc @@ -43,12 +43,12 @@ #include "ceres/block_sparse_matrix.h" #include "ceres/canonical_views_clustering.h" #include "ceres/collections_port.h" -#include "ceres/detect_structure.h" #include "ceres/graph.h" #include "ceres/graph_algorithms.h" #include "ceres/internal/scoped_ptr.h" #include "ceres/linear_solver.h" #include "ceres/schur_eliminator.h" +#include "ceres/single_linkage_clustering.h" #include "ceres/visibility.h" #include "glog/logging.h" @@ -61,8 +61,9 @@ namespace internal { // // This will require some more work on the clustering algorithm and // possibly some more refactoring of the code. -static const double kSizePenaltyWeight = 3.0; -static const double kSimilarityPenaltyWeight = 0.0; +static const double kCanonicalViewsSizePenaltyWeight = 3.0; +static const double kCanonicalViewsSimilarityPenaltyWeight = 0.0; +static const double kSingleLinkageMinSimilarity = 0.9; VisibilityBasedPreconditioner::VisibilityBasedPreconditioner( const CompressedRowBlockStructure& bs, @@ -188,17 +189,31 @@ void VisibilityBasedPreconditioner::ClusterCameras( scoped_ptr<Graph<int> > schur_complement_graph( CHECK_NOTNULL(CreateSchurComplementGraph(visibility))); - CanonicalViewsClusteringOptions options; - options.size_penalty_weight = kSizePenaltyWeight; - options.similarity_penalty_weight = kSimilarityPenaltyWeight; - - vector<int> centers; HashMap<int, int> membership; - ComputeCanonicalViewsClustering(*schur_complement_graph, - options, - ¢ers, - &membership); - num_clusters_ = centers.size(); + + if (options_.visibility_clustering_type == CANONICAL_VIEWS) { + vector<int> centers; + CanonicalViewsClusteringOptions clustering_options; + clustering_options.size_penalty_weight = + kCanonicalViewsSizePenaltyWeight; + clustering_options.similarity_penalty_weight = + kCanonicalViewsSimilarityPenaltyWeight; + ComputeCanonicalViewsClustering(clustering_options, + *schur_complement_graph, + ¢ers, + &membership); + num_clusters_ = centers.size(); + } else if (options_.visibility_clustering_type == SINGLE_LINKAGE) { + SingleLinkageClusteringOptions clustering_options; + clustering_options.min_similarity = + kSingleLinkageMinSimilarity; + num_clusters_ = ComputeSingleLinkageClustering(clustering_options, + *schur_complement_graph, + &membership); + } else { + LOG(FATAL) << "Unknown visibility clustering algorithm."; + } + CHECK_GT(num_clusters_, 0); VLOG(2) << "num_clusters: " << num_clusters_; FlattenMembershipMap(membership, &cluster_membership_); @@ -313,14 +328,11 @@ void VisibilityBasedPreconditioner::InitEliminator( LinearSolver::Options eliminator_options; eliminator_options.elimination_groups = options_.elimination_groups; eliminator_options.num_threads = options_.num_threads; - - DetectStructure(bs, options_.elimination_groups[0], - &eliminator_options.row_block_size, - &eliminator_options.e_block_size, - &eliminator_options.f_block_size); - + eliminator_options.e_block_size = options_.e_block_size; + eliminator_options.f_block_size = options_.f_block_size; + eliminator_options.row_block_size = options_.row_block_size; eliminator_.reset(SchurEliminatorBase::Create(eliminator_options)); - eliminator_->Init(options_.elimination_groups[0], &bs); + eliminator_->Init(eliminator_options.elimination_groups[0], &bs); } // Update the values of the preconditioner matrix and factorize it. @@ -356,14 +368,18 @@ bool VisibilityBasedPreconditioner::UpdateImpl(const BlockSparseMatrix& A, // // Doing the factorization like this saves us matrix mass when // scaling is not needed, which is quite often in our experience. - bool status = Factorize(); + LinearSolverTerminationType status = Factorize(); + + if (status == LINEAR_SOLVER_FATAL_ERROR) { + return false; + } // The scaling only affects the tri-diagonal case, since // ScaleOffDiagonalBlocks only pays attenion to the cells that // belong to the edges of the degree-2 forest. In the CLUSTER_JACOBI // case, the preconditioner is guaranteed to be positive // semidefinite. - if (!status && options_.type == CLUSTER_TRIDIAGONAL) { + if (status == LINEAR_SOLVER_FAILURE && options_.type == CLUSTER_TRIDIAGONAL) { VLOG(1) << "Unscaled factorization failed. Retrying with off-diagonal " << "scaling"; ScaleOffDiagonalCells(); @@ -371,7 +387,7 @@ bool VisibilityBasedPreconditioner::UpdateImpl(const BlockSparseMatrix& A, } VLOG(2) << "Compute time: " << time(NULL) - start_time; - return status; + return (status == LINEAR_SOLVER_SUCCESS); } // Consider the preconditioner matrix as meta-block matrix, whose @@ -408,7 +424,7 @@ void VisibilityBasedPreconditioner::ScaleOffDiagonalCells() { // Compute the sparse Cholesky factorization of the preconditioner // matrix. -bool VisibilityBasedPreconditioner::Factorize() { +LinearSolverTerminationType VisibilityBasedPreconditioner::Factorize() { // Extract the TripletSparseMatrix that is used for actually storing // S and convert it into a cholmod_sparse object. cholmod_sparse* lhs = ss_.CreateSparseMatrix( @@ -419,14 +435,21 @@ bool VisibilityBasedPreconditioner::Factorize() { // matrix contains the values. lhs->stype = 1; + // TODO(sameeragarwal): Refactor to pipe this up and out. + string status; + // Symbolic factorization is computed if we don't already have one handy. if (factor_ == NULL) { - factor_ = ss_.BlockAnalyzeCholesky(lhs, block_size_, block_size_); + factor_ = ss_.BlockAnalyzeCholesky(lhs, block_size_, block_size_, &status); } - bool status = ss_.Cholesky(lhs, factor_); + const LinearSolverTerminationType termination_type = + (factor_ != NULL) + ? ss_.Cholesky(lhs, factor_, &status) + : LINEAR_SOLVER_FATAL_ERROR; + ss_.Free(lhs); - return status; + return termination_type; } void VisibilityBasedPreconditioner::RightMultiply(const double* x, @@ -437,7 +460,10 @@ void VisibilityBasedPreconditioner::RightMultiply(const double* x, const int num_rows = m_->num_rows(); memcpy(CHECK_NOTNULL(tmp_rhs_)->x, x, m_->num_rows() * sizeof(*x)); - cholmod_dense* solution = CHECK_NOTNULL(ss->Solve(factor_, tmp_rhs_)); + // TODO(sameeragarwal): Better error handling. + string status; + cholmod_dense* solution = + CHECK_NOTNULL(ss->Solve(factor_, tmp_rhs_, &status)); memcpy(y, solution->x, sizeof(*y) * num_rows); ss->Free(solution); } @@ -546,11 +572,17 @@ Graph<int>* VisibilityBasedPreconditioner::CreateClusterGraph( // cluster ids. Convert this into a flat array for quick lookup. It is // possible that some of the vertices may not be associated with any // cluster. In that case, randomly assign them to one of the clusters. +// +// The cluster ids can be non-contiguous integers. So as we flatten +// the membership_map, we also map the cluster ids to a contiguous set +// of integers so that the cluster ids are in [0, num_clusters_). void VisibilityBasedPreconditioner::FlattenMembershipMap( const HashMap<int, int>& membership_map, vector<int>* membership_vector) const { CHECK_NOTNULL(membership_vector)->resize(0); membership_vector->resize(num_blocks_, -1); + + HashMap<int, int> cluster_id_to_index; // Iterate over the cluster membership map and update the // cluster_membership_ vector assigning arbitrary cluster ids to // the few cameras that have not been clustered. @@ -571,7 +603,16 @@ void VisibilityBasedPreconditioner::FlattenMembershipMap( cluster_id = camera_id % num_clusters_; } - membership_vector->at(camera_id) = cluster_id; + const int index = FindWithDefault(cluster_id_to_index, + cluster_id, + cluster_id_to_index.size()); + + if (index == cluster_id_to_index.size()) { + cluster_id_to_index[cluster_id] = index; + } + + CHECK_LT(index, num_clusters_); + membership_vector->at(camera_id) = index; } } diff --git a/extern/libmv/third_party/ceres/internal/ceres/visibility_based_preconditioner.h b/extern/libmv/third_party/ceres/internal/ceres/visibility_based_preconditioner.h index c58b1a7a90a..70cea83bf56 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/visibility_based_preconditioner.h +++ b/extern/libmv/third_party/ceres/internal/ceres/visibility_based_preconditioner.h @@ -55,6 +55,7 @@ #include "ceres/graph.h" #include "ceres/internal/macros.h" #include "ceres/internal/scoped_ptr.h" +#include "ceres/linear_solver.h" #include "ceres/preconditioner.h" #include "ceres/suitesparse.h" @@ -147,7 +148,7 @@ class VisibilityBasedPreconditioner : public BlockSparseMatrixPreconditioner { void ComputeClusterTridiagonalSparsity(const CompressedRowBlockStructure& bs); void InitStorage(const CompressedRowBlockStructure& bs); void InitEliminator(const CompressedRowBlockStructure& bs); - bool Factorize(); + LinearSolverTerminationType Factorize(); void ScaleOffDiagonalCells(); void ClusterCameras(const vector< set<int> >& visibility); |