diff options
122 files changed, 8745 insertions, 2211 deletions
diff --git a/extern/libmv/third_party/ceres/CMakeLists.txt b/extern/libmv/third_party/ceres/CMakeLists.txt index f0c0a4b269f..5e58c2964ce 100644 --- a/extern/libmv/third_party/ceres/CMakeLists.txt +++ b/extern/libmv/third_party/ceres/CMakeLists.txt @@ -54,6 +54,7 @@ set(SRC internal/ceres/compressed_row_sparse_matrix.cc internal/ceres/conditioned_cost_function.cc internal/ceres/conjugate_gradients_solver.cc + internal/ceres/coordinate_descent_minimizer.cc internal/ceres/corrector.cc internal/ceres/cxsparse.cc internal/ceres/dense_normal_cholesky_solver.cc @@ -71,11 +72,18 @@ set(SRC internal/ceres/linear_least_squares_problems.cc internal/ceres/linear_operator.cc internal/ceres/linear_solver.cc + internal/ceres/line_search.cc + internal/ceres/line_search_direction.cc + internal/ceres/line_search_minimizer.cc internal/ceres/local_parameterization.cc internal/ceres/loss_function.cc + internal/ceres/low_rank_inverse_hessian.cc + internal/ceres/minimizer.cc internal/ceres/normal_prior.cc + internal/ceres/parameter_block_ordering.cc internal/ceres/partitioned_matrix_view.cc - internal/ceres/polynomial_solver.cc + internal/ceres/polynomial.cc + internal/ceres/preconditioner.cc internal/ceres/problem.cc internal/ceres/problem_impl.cc internal/ceres/program.cc @@ -84,7 +92,7 @@ set(SRC internal/ceres/runtime_numeric_diff_cost_function.cc internal/ceres/schur_complement_solver.cc internal/ceres/schur_eliminator.cc - internal/ceres/schur_ordering.cc + internal/ceres/schur_jacobi_preconditioner.cc internal/ceres/scratch_evaluate_preparer.cc internal/ceres/solver.cc internal/ceres/solver_impl.cc @@ -99,26 +107,34 @@ set(SRC internal/ceres/types.cc internal/ceres/visibility_based_preconditioner.cc internal/ceres/visibility.cc + internal/ceres/wall_time.cc include/ceres/autodiff_cost_function.h include/ceres/ceres.h include/ceres/conditioned_cost_function.h include/ceres/cost_function.h + include/ceres/cost_function_to_functor.h include/ceres/crs_matrix.h + include/ceres/dynamic_autodiff_cost_function.h include/ceres/fpclassify.h + include/ceres/gradient_checker.h include/ceres/internal/autodiff.h include/ceres/internal/eigen.h include/ceres/internal/fixed_array.h include/ceres/internal/macros.h include/ceres/internal/manual_constructor.h + include/ceres/internal/numeric_diff.h include/ceres/internal/port.h include/ceres/internal/scoped_ptr.h + include/ceres/internal/variadic_evaluate.h include/ceres/iteration_callback.h include/ceres/jet.h include/ceres/local_parameterization.h include/ceres/loss_function.h include/ceres/normal_prior.h include/ceres/numeric_diff_cost_function.h + include/ceres/numeric_diff_functor.h + include/ceres/ordered_groups.h include/ceres/problem.h include/ceres/rotation.h include/ceres/sized_cost_function.h @@ -141,6 +157,7 @@ set(SRC internal/ceres/compressed_row_jacobian_writer.h internal/ceres/compressed_row_sparse_matrix.h internal/ceres/conjugate_gradients_solver.h + internal/ceres/coordinate_descent_minimizer.h internal/ceres/corrector.h internal/ceres/cxsparse.h internal/ceres/dense_jacobian_writer.h @@ -150,6 +167,7 @@ set(SRC internal/ceres/detect_structure.h internal/ceres/dogleg_strategy.h internal/ceres/evaluator.h + internal/ceres/execution_summary.h internal/ceres/file.h internal/ceres/gradient_checking_cost_function.h internal/ceres/graph_algorithms.h @@ -161,13 +179,19 @@ set(SRC internal/ceres/linear_least_squares_problems.h internal/ceres/linear_operator.h internal/ceres/linear_solver.h + internal/ceres/line_search_direction.h + internal/ceres/line_search.h + internal/ceres/line_search_minimizer.h + internal/ceres/low_rank_inverse_hessian.h internal/ceres/map_util.h internal/ceres/matrix_proto.h internal/ceres/minimizer.h internal/ceres/mutex.h internal/ceres/parameter_block.h + internal/ceres/parameter_block_ordering.h internal/ceres/partitioned_matrix_view.h - internal/ceres/polynomial_solver.h + internal/ceres/polynomial.h + internal/ceres/preconditioner.h internal/ceres/problem_impl.h internal/ceres/program_evaluator.h internal/ceres/program.h @@ -178,7 +202,7 @@ set(SRC internal/ceres/schur_complement_solver.h internal/ceres/schur_eliminator.h internal/ceres/schur_eliminator_impl.h - internal/ceres/schur_ordering.h + internal/ceres/schur_jacobi_preconditioner.h internal/ceres/scratch_evaluate_preparer.h internal/ceres/solver_impl.h internal/ceres/sparse_matrix.h @@ -192,6 +216,7 @@ set(SRC internal/ceres/trust_region_strategy.h internal/ceres/visibility_based_preconditioner.h internal/ceres/visibility.h + internal/ceres/wall_time.h ) #if(FALSE) diff --git a/extern/libmv/third_party/ceres/ChangeLog b/extern/libmv/third_party/ceres/ChangeLog index 8b84328cf98..ebfb771be02 100644 --- a/extern/libmv/third_party/ceres/ChangeLog +++ b/extern/libmv/third_party/ceres/ChangeLog @@ -1,524 +1,572 @@ -commit 552f9f85bba89f00ca307bc18fbda1dff23bd0e4 +commit d2a5195b512164fec286c6a52b40d7766977caa3 Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Fri Aug 31 07:27:22 2012 -0700 +Date: Sun Feb 24 15:09:17 2013 -0800 - Various minor bug fixes to the solver logic. + Version history update. - 1. CostFunction returning false is handled better. - If only the cost is being evaluated, it is possible to - use the false value as an infinite value signal/outside - a region of validity. This allows a weak form of constraint - handling. Useful for example in handling infinities. + Change-Id: I477ec05a78ca4cd735a525253c9b6adfa3bddea7 + +commit 2160c5b757c44206c6face6ca62d381f1db7a291 +Author: Sameer Agarwal <sameeragarwal@google.com> +Date: Sun Feb 24 14:15:45 2013 -0800 + + Minor release script fixes. - 2. Changed the way how the slop around zero when model_cost - is larger than the current cost. Relative instead of absolute - tolerances are used. The same logic is propagated how the - corresponding clamping of the model_cost is done. + Change-Id: Ifd0a7f4f584c85d4d9574eca46094b372a8d7aff + +commit b53c9667f508c125b8aa833e7a063fa44ef8a98e +Author: Sergey Sharybin <sergey.vfx@gmail.com> +Date: Mon Feb 25 01:14:26 2013 +0600 + + Solve No Previous Prototype GCC warning - 3. Fixed a minor indexing bug in nist.cc. + In some cases there were missing includes of own + header files from implementation files. - 4. Some minor logging fixes to nist.cc to make it more - compatible with the rest of ceres. + In other cases moved function which are only used + within single file into an anonymous namespace. - Together these changes, take the successful solve count from - 41/54 to 46/54 and eliminate all NUMERICAL_FAILURE problems. + Change-Id: I2c6b411bcfbc521e2a5f21265dc8e009a548b1c8 + +commit 267ccc45a3e875bf87832a8ad615be690b4926d3 +Author: Sergey Sharybin <sergey.vfx@gmail.com> +Date: Mon Feb 25 01:04:16 2013 +0600 + + Fix for MinGW build on Windows - Change-Id: If94170ea4731af5b243805c0200963dd31aa94a7 + GG_LONGLONG and GG_ULONGLONG shall use LL and ULL suffixes, + since MinGW is actuall a GCC compiler. + + Solved by checking whether compilation happens with MinGW + or not using standard MinGW's __MINGW32__ and __MINGW64__ + definitions. + + Change-Id: I789b34f6342a56ba42f4b280f7242700022ab7a1 -commit 0b776b5cc9634d3b88d623905b96006f7647ce3e +commit 509f68cfe3fd13b794c4e67ff38c761407c858cf Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Thu Aug 30 15:26:17 2012 -0700 +Date: Wed Feb 20 01:39:03 2013 -0800 - Update docs. + Problem::Evaluate implementation. + + 1. Add Problem::Evaluate and tests. + 2. Remove Solver::Summary::initial/final_* + 3. Remove Solver::Options::return_* members. + 4. Various cpplint cleanups. - Change-Id: I69d50bcd37aed3bea2190ca614f023e83172901b + Change-Id: I4266de53489896f72d9c6798c5efde6748d68a47 -commit 2d7176ad7c8fb7238ca8abd6de73415d95877494 -Author: Petter Strandmark <petter.strandmark@gmail.com> -Date: Thu Aug 30 19:51:24 2012 -0700 +commit d4a0bf86d688d1b68e00ff302858de5a4e0d9727 +Author: Keir Mierle <mierle@gmail.com> +Date: Sun Feb 24 10:35:44 2013 -0800 - max_consecutive_nonmonotonic_steps should be int + Fix threading build on Windows. + + On Windows, including the "windows.h" header defines an enormous number of + symbols; some of which are macros with common names. In particular, "ERROR" and + "min" and "max" get defined. This causes clashes when user code references + these names in a context other than the intended use in windows.h. + + To deal with this, the Microsoft engineers added the ability to control the + definition of these symbols by adding extra defines. In particular, including + windows.h in the following way - Found via Visual Studio warning. + #define NOGDI + #define NOMINMAX - Change-Id: Id2cd7de562dfc8cd35df5d5f5220dd2d7350eb2c + will reduce the number of macros defined. This way they will not conflict with + other uses in Ceres. For example, numeric_limits<double>::max() is impossible + to call without defining NOMINMAX. + + Change-Id: I166f5d3bb6dc0e2e4b2ebf800fb19e49206f7874 -commit 1a89bcc94e88933f89b20427a45bc40cdd23c056 +commit beb4505311011130a7e54632137b0fbb5824cc9b Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Thu Aug 30 15:26:17 2012 -0700 +Date: Fri Feb 22 13:37:01 2013 -0800 - Better reporting on the NIST problems. + Minor fixes + + Based on William Rucklidge's review, including + a nasty bug in parameter block removal. - Change-Id: I7cf774ec3242c0612dbe52fc233c3fc6cff3f031 + Change-Id: I3a692e589f600ff560ecae9fa85bb0b76063d403 -commit ea11704857a1e4a735e096896e4d775d83981499 +commit 9a88bd7c4b40e2a1e0cd9b0dc09a3517c467e04e Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Wed Aug 29 18:18:48 2012 -0700 +Date: Tue Feb 19 13:09:12 2013 -0800 - Basic harness for testing NIST problems. + Minor bug fixes - Change-Id: I5baaa24dbf0506ceedf4a9be4ed17c84974d71a1 + Change-Id: I94e4521adf76a6c77db954c4a8955168e9d37b55 -commit 98bf14d2b95386c2c4a6c29154637943dae4c36c +commit 956ed7e8f2054623615e6ae3601055d613897f26 Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Thu Aug 30 10:26:44 2012 -0700 +Date: Tue Feb 19 07:06:15 2013 -0800 - Miscellaneous fixes. + Various minor fixes. + + 1. Unused variable warnings and fixes. + 2. Minor documentation update. - Change-Id: I521e11f2d20bf24960bbc6b5dab4ec8bb1503d23 + Change-Id: I815588a5806df1030a7c8750f4fb594c503f8998 -commit 1e3cbd9a4442cdd8fda43a7fb452f19dac8c74af -Author: Petter Strandmark <strandmark@google.com> -Date: Wed Aug 29 09:39:56 2012 -0700 +commit 3e2c4ef9ad35e94198f4f3367b99fd91e26996a1 +Author: Keir Mierle <mierle@gmail.com> +Date: Sun Feb 17 12:37:55 2013 -0800 - Caching the symbolic Cholesky factorization when using CXSparse - - Average factorization times for bundle adjustment test problem: - SuiteSparse: 0.2794 s. - CXSparse: 0.4039 s. - CXSparse cached: 0.2399 s. + Add adapters for column/row-major matrices to rotation.h - CXSparse will still be slower, though, because it has to compute - the transpose and J^T * J. + This patch introduces a matrix wrapper (MatrixAdapter) that allows to + transparently pass pointers to row-major or column-major matrices + to the conversion functions. - Change-Id: If9cdaa3dd520bee84b56e5fd4953b56a93db6bde + Change-Id: I7f1683a8722088cffcc542f593ce7eb46fca109b -commit 8b64140878ccd1e183d3715c38942a81fdecefde -Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Wed Aug 29 05:41:22 2012 -0700 +commit 04938efe4bedec112083c5ceb227ba004f96bd01 +Author: Keir Mierle <mierle@gmail.com> +Date: Sun Feb 17 12:37:55 2013 -0800 - Documentation update + Add support for removing parameter and residual blocks. - Change-Id: I271a0422e7f6f42bcfd1dc6b5dc10c7a18f6a179 - -commit a5353acd85a9fd19370b3d74035d87b0f0bac230 -Author: Petter Strandmark <petter.strandmark@gmail.com> -Date: Tue Aug 28 18:16:41 2012 -0700 - - Adding gflags include to test_util.cc + This adds support for removing parameter and residual blocks. + There are two modes of operation: in the first, removals of + paremeter blocks are expensive, since each remove requires + scanning all residual blocks to find ones that depend on the + removed parameter. In the other, extra memory is sacrificed to + maintain a list of the residuals a parameter block depends on, + removing the need to scan. In both cases, removing residual blocks + is fast. - test_util seems to need gflags. + As a caveat, any removals destroys the ordering of the parameters, + so the residuals or jacobian returned from Solver::Solve() is + meaningless. There is some debate on the best way to handle this; + the details remain for a future change. - Change-Id: I0c4757960f8ac69ad599c138aea58e3c88a4ea28 - -commit 87ca1b2ba28ec512752bbcf5fc994ce1434eb765 -Author: Petter Strandmark <petter.strandmark@gmail.com> -Date: Tue Aug 28 18:05:20 2012 -0700 - - Changing random.h to use cstdlib for Windows compability. + This also adds some overhead, even in the case that fast removals + are not requested: - As discussed with Sameer today. + - 1 int32 to each residual, to track its position in the program. + - 1 pointer to each parameter, to store the dependent residuals. - Change-Id: If3d0284830c6591c71cc77b8400cafb45c0da61f + Change-Id: I71dcac8656679329a15ee7fc12c0df07030c12af -commit aeb00a07323808a0a1816e733ad18a87d5109ea3 -Author: Petter Strandmark <strandmark@google.com> -Date: Mon Aug 27 22:22:57 2012 -0700 +commit fa21df8cd969bb257b87c9ef7c0147d8d5ea8725 +Author: Sameer Agarwal <sameeragarwal@google.com> +Date: Mon Feb 18 08:48:52 2013 -0800 - Removing gomp for Visual Studio + Add script for building documentation. + + Update make_release - Linking currently fails in Visual Studio due to a missing library - "gomp.lib". This is not needed in Visual Studio. OpenMP works - without it. + Minor documentation fixes. - Change-Id: I39e204a8dd4f1b7425df7d4b222d86a8bb961432 + Change-Id: I1248ec3f58be66b5929aee6f2aa392c15d53ed83 -commit 6f362464ba99b800494d2f15c27768a342ddaa68 -Author: Markus Moll <markus.moll@esat.kuleuven.be> -Date: Tue Aug 28 01:03:38 2012 +0200 +commit 290b975d1d4eba44205bbeb0fa6b3ce8a6fa4a0c +Author: Sameer Agarwal <sameeragarwal@google.com> +Date: Sun Feb 17 16:50:37 2013 -0800 - Add some tests for DoglegStrategy. + Preconditioner refactoring. - Not necessarily a complete set. + 1. Added a Preconditioner interface. + 2. SCHUR_JACOBI is now its own class and is independent of + SuiteSparse. - Change-Id: I14eb3a38c6fe976c8212f3934655411b6d1e0aa4 + Change-Id: Id912ab19cf3736e61d1b90ddaf5bfba33e877ec4 -commit 122cf836a6dc9726489ce2fbecc6143bddc1caaf +commit d010de543530001fa917501a13ba02879c8ea52f Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Fri Aug 24 16:28:27 2012 -0700 +Date: Fri Feb 15 14:26:56 2013 -0800 - Documentation update. + Solver::Summary::FullReport() supports line search now. - Change-Id: I0a3c5ae4bc981a8f5bdd5a8905f923dc5f09a024 + Change-Id: Ib08d300198b85d9732cfb5785af4235ca4bd5226 -commit 69081719f73da8de2935774a42d237837a91952a -Author: Keir Mierle <mierle@gmail.com> -Date: Mon Aug 27 13:28:56 2012 -0700 +commit fbbea464d1c9575d8224220d3e61f92d93fe9739 +Author: Sameer Agarwal <sameeragarwal@google.com> +Date: Fri Feb 15 11:25:03 2013 -0800 - Remove unnecessary overload for hash<> + Update documentation. - The overload for pointers in hash tables was applied in normal - usage of schur_ordering.cc. However, the tests did not include the - overload since they only included collections_port.h. As a result, - the routines in schur_ordering.cc were using a different hash - function than that inside the tests. - - The fix is to remove the specialization. If this breaks one of the - compiler configurations, we will find a workaround at that time. - - Change-Id: Idbf60415d5e2aec0c865b514ad0c577d21b91405 + Change-Id: Idb03741fab9facbbbda85d5a82723f0b4c1c6c60 -commit 1762420b6ed76b1c4d30b913b2cac1927b666534 +commit 8e1f83c4c457fb7238eb342eab744c5570b73c4d Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Wed Aug 22 10:01:31 2012 -0700 +Date: Fri Feb 15 08:35:40 2013 -0800 - Update changelog. + Speed up Problem construction and destruction. - Change-Id: Idf5af69d5a9dbe35f58e30a8afcbfcd29bb7ebfe + Change-Id: I3147b0b60eedf40f8453d5a39ff04a572c445a2f -commit 976ab7aca908309b8282cb40bc080ca859136854 -Author: Keir Mierle <mierle@gmail.com> -Date: Thu Aug 23 18:21:36 2012 -0700 +commit efb47f39c31f0ef1bb9c015c8c0d114153df6379 +Author: Sameer Agarwal <sameeragarwal@google.com> +Date: Thu Feb 14 19:44:11 2013 -0800 - Remove Google-era vestigial unit test. + Documentation update - Change-Id: Ia7a295a5c759a17c1675a3055d287d3e40e9e0fe + Change-Id: I0fec43bff4fe0ea6cd2d2a8b34dac2330a517da0 -commit 6ad6257de0e2152ac5e77dc003758de45187d6ea -Author: Keir Mierle <mierle@gmail.com> -Date: Wed Aug 22 11:10:31 2012 -0700 +commit be418a336cae5672111e0f6989e6d8d6c1fa24a6 +Author: Markus Moll <markus.moll@esat.kuleuven.be> +Date: Fri Feb 15 17:19:28 2013 +0100 - Add a workaround for an Android NDK compiler bug. - - On certain NDK build configurations, one of the innermost - parts of the Schur eliminator would get compiled - incorrectly. The compiler changed a -= to a +=. + Fix evaluation of initial cost and corresponding test - The normal Ceres unit tests caught the problem; however, - since it is not possible to build the tests with the NDK - (only with the standalone toolchain) this was difficult to - track down. Finding the issue involved pasting the schur - eliminator unit test inside of solver_impl.cc and other such - hacks. + Commit f102a68e411d11b4864e17b69a2d781e9c2692ad seems to have introduced + a bug in both solver_impl.cc and solver_impl_test.cc + solver_impl_test showed 3 errors, where two were due to ceres NOT + failing when the test expected that, and one was due to the initial cost + being wrong (-1 instead of 0.5) + Ceres now does not attempt to evaluate the initial cost if + options.return_initial_xxx is not set. It therefore did not fail in + the tests. + It also seems that the CERES_EVALUATE macro erroneously always sets + final_cost, even when called with 'initial' as argument. - Change-Id: Ie91bb545d74fe39f0c8cbd1a6eb69ee4d8b25fb2 + Change-Id: Ia3c3eeb476e7023a3f80b201124010d6c67e9824 -commit aecb2dc92b4aa7f3bf77a1ac918e62953602392b +commit 974513a41ff1ddc671d3dc6aa09ce708bbe447da Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Wed Aug 22 10:08:17 2012 -0700 +Date: Tue Feb 12 14:22:40 2013 -0800 - Fix relative path bug in bibtex call. + Bug fix in DynamicAutoDiffCostFunction + + Add handling of constant parameter blocks. - Change-Id: I0d31786564320a6831259bcdf4c75a6b665c43ad + Change-Id: I8b2ea79f47e190604fc4bed27705798240689f71 -commit 1e2892009e591804df6286caebd5c960e7e3b099 -Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Tue Aug 21 18:00:54 2012 -0700 +commit 3130b3cea4028c71d9ae18b7465d7627f29fef7d +Author: Keir Mierle <mierle@gmail.com> +Date: Mon Feb 11 19:39:29 2013 -0800 - Update Summary::FullReport to report dogleg type. + Add support for dynamic autodiff - Change-Id: I0b4be8d7486c1c4b36b299693b3fe8b0d3426537 + Change-Id: I17d573696172ab691a9653db99a620e4bc1bd0d0 -commit 295ade1122a86b83e1ea605d5ca394f315874717 +commit c58e6dc3ea550302c8151003b17e9bc2a1acc316 Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Wed Aug 22 06:51:22 2012 -0700 +Date: Mon Feb 11 16:41:06 2013 -0800 - Fix Eigen3 Row/Column Major storage issue. - - Eigen3 does not allow column vectors to be stored in row-major - format. NumericDiffCostFunction by default stores its Jacobian - matrices in row-major format. This works fine if the residual - contains more than one variable. But if the residual block - depends on one variable and has more than one residuals, the - resulting Jacobian matrix is a column matrix in row-major format - resulting in a compile time error. + More refined event logging in solver_impl.cc - The fix is to check the template parameters and switch to column-major - storage as needed. - - Thanks to Lena Gieseke for reporting this. - - Change-Id: Icc51c5b38e1f3609e0e1ecb3c4e4a02aecd72c3b + Change-Id: Ie3061c921c006d2600d16185c690f52ccf816f68 -commit 9ad27e8e9fb1bbd2054e2f6ae37623e01428f1c0 -Author: Arnaud Gelas <arnaudgelas@gmail.com> -Date: Tue Aug 21 09:56:30 2012 +0200 +commit f102a68e411d11b4864e17b69a2d781e9c2692ad +Author: Sameer Agarwal <sameeragarwal@google.com> +Date: Mon Feb 11 15:08:40 2013 -0800 - Add one uninstall target to remove all installed files + Remove extraneous initial and final evals. - Change-Id: Ifcf89a6c27b25f28403d95a50e29c093a525298f + Change-Id: I80ed87435f399cbf452c68be7ea1e7139696aa4a -commit 0c3a748ee49e04fe334f8f5a433649d18003d550 -Author: Markus Moll <markus.moll@esat.kuleuven.be> -Date: Tue Aug 21 14:44:59 2012 +0200 +commit 0593747ee09e21a9c0a2b604d51e21a6cdd21315 +Author: Sameer Agarwal <sameeragarwal@google.com> +Date: Mon Feb 11 13:57:12 2013 -0800 - Allow equal lower and upper bound for diagonal scaling. + Fix a memory leak in cxsparse.cc - This way, setting the lower and upper bound both to 1.0, one can disable - the automatic trust region scaling. + Thanks to Alexander Mordvintsev for reporting it. - Change-Id: Ifa317a6911b813a89c1cf7fdfde25af603705319 + Change-Id: Ia872be42ce80209e46722fc16a928496cf97e256 -commit 3d644b76adefac6475b91dc53c3ae5e01c4f4d66 -Author: Arnaud Gelas <arnaudgelas@gmail.com> -Date: Thu Aug 16 17:33:21 2012 +0200 +commit 0abfb8f46f534b05413bb4d64b960d6fd0a9befb +Author: Sameer Agarwal <sameeragarwal@google.com> +Date: Mon Feb 11 13:40:04 2013 -0800 - Install headers, libraries and pdf + Delete the tex documentation. - Headers are installed in ${CMAKE_INSTALL_PREFIX}/include/ceres - Libraries are installed in ${CMAKE_INSTALL_PREFIX}/lib - pdf is installed in ${CMAKE_INSTALL_PREFIX}/share/ceres/docs - - Change-Id: Ic175f2c2f5fa86820a1e8c64c2ed171f4a302a68 + Change-Id: I15c78a8b33c5fd94941238814ac023a8fb251a20 -commit d2fb5adea4d8c2aeb43c4289c6976798a54d3cf1 -Author: Arnaud Gelas <arnaudgelas@gmail.com> -Date: Fri Aug 17 10:11:02 2012 +0200 +commit 085cd4a6641c404334d17e5ea38f9e5b68a06ba7 +Author: Sameer Agarwal <sameeragarwal@google.com> +Date: Wed Feb 6 14:31:07 2013 -0800 - Configure gerrit hook at CMake time + Rewrite of the tutorial. - If the source directory is a clone, at CMake time the commit-msg hook gets - downloaded and installed in the right location. + 1. Quicker starting point. + 2. Better discussion of derivatives. + 3. Better hyperlinking to code and class documentation. + 4. New robust estimation example. + 5. Better naming of example code. + 6. Removed dependency on gflags in all the core examples covered + in the tutorial. - Change-Id: I5fee17d050ca22d8b92a49fdcc2a1cd6659f209b + Change-Id: Ibf3c7fe946fa2b4d22f8916a9366df267d34ca26 -commit 73166098fc4b1072adc30321c666188a3909c43c -Author: Arnaud Gelas <arnaudgelas@gmail.com> -Date: Mon Aug 20 15:40:41 2012 +0200 +commit c0fdc9753909fc37fed2cb5e0fcc02fc65789d68 +Author: Sameer Agarwal <sameeragarwal@google.com> +Date: Wed Feb 6 14:31:07 2013 -0800 - Add one CMake option to build the examples. + Update nist.cc to better evaluate results. - Currently the examples are always built. For external projects, it is useful - not to compile the examples. + Ceres beats Minpack and HBN handily. - Change-Id: I41d3bde19c7e742818e60f78222d39c43992ca8b + Change-Id: I7df8a47b753202ed0b53ab128ce48466bf9f8083 -commit 86d4f1ba41ef14eb1b6b61a7936af83387b35eb2 -Author: Keir Mierle <mierle@gmail.com> -Date: Mon Aug 20 11:52:04 2012 -0700 +commit d91b671798125fd4889914d92a29cf0f7a5fef21 +Author: Sameer Agarwal <sameeragarwal@google.com> +Date: Wed Feb 6 01:08:40 2013 -0800 - Add missing return statement. + More small changes + More small changes to the docs. - Change-Id: I5eaf718318e27040e3c97e32ee46cf0a11176a37 - -commit 51eb229da34187a4e8ce73ed9cc0e731998bb2be -Author: Keir Mierle <mierle@gmail.com> -Date: Mon Aug 20 11:46:12 2012 -0700 - - Add Program::ToString() to aid debugging. + 1. Better landing page. + 2. Minor tweaks to the side bar. + 3. Reference to more example code. + 4. Local MathJax references. - Change-Id: I0ab37ed2fe0947ca87a152919d4e7dc9b56dedc6 + Change-Id: I39b9436dc2803732a875bbbee7f15802c4934031 -commit bcc7100635e2047dc2b77df19a4ded8a6ab4d4b9 -Author: Keir Mierle <mierle@gmail.com> -Date: Mon Aug 20 11:45:04 2012 -0700 +commit 42a84b87fa5cc34551244a3b2b6a3e1f13a29514 +Author: Sameer Agarwal <sameeragarwal@google.com> +Date: Fri Feb 1 12:22:53 2013 -0800 - Ignore minted.sty. + Expand reporting of timing information. - Change-Id: I2467a6f801812b9007b51bf14b00757f026e4322 + 1. Add an ExecutionSummary object to record execution + information about Ceres objects. + 2. Add an EventLogger object to log events in a function call. + 3. Add a ScopedExecutionTimer object to log times in ExecutionSummary. + 4. Instrument ProgramEvaluator and all the linear solvers + to report their timing statistics. + 5. Connect the timing statistics to Summary::FullReport. + 6. Add high precision timer on unix systems using + gettimeofday() call. + 7. Various minor clean ups all around. + + Change-Id: I5e09804b730b09535484124be7dbc1c58eccd1d4 -commit 9705a736dd3d6fbead0d8a6ff77102c69bbcdc08 -Author: Keir Mierle <mierle@gmail.com> -Date: Mon Aug 20 11:24:05 2012 -0700 +commit 08c891fcb6ea1bf66e6d4619273765a644605dfc +Author: Sameer Agarwal <sameeragarwal@google.com> +Date: Mon Feb 4 20:18:58 2013 -0800 - Add ParameterBlock::ToString() to aid debugging. + Various small changes. - Change-Id: Id3f5cb27b855c536dd65a986f345bd8eb2799dfa + 1. Compact build instructions. + 2. Lots of small edits by Simon Fuhrmann. + + Change-Id: I8c0c67922021041dcf7f4ecdb6c6e6dd2e2fd7e5 -commit 0c714a70e6123ceb68e5cfcd3cfbee0d09deb1db +commit e2e857ad6be322e9cf750d4b11ccf10800e57d96 Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Mon Aug 20 11:18:16 2012 -0700 +Date: Mon Feb 4 19:40:45 2013 -0800 - Fix blanks before private in loss_function.h + Sidebar has global table of contents. - Change-Id: I068bed6431bc7c9b7958af391655df61499000b2 + Change-Id: I7fe9053868a4660b0db8d7607ee618fc30ddaefd -commit 51cf7cbe3bac45c6807c2703a2fc3175d76a1b47 -Author: Markus Moll <markus.moll@esat.kuleuven.be> -Date: Mon Aug 20 20:10:20 2012 +0200 +commit b9182147d96f865673c2756ced4cbb127ca082a3 +Author: Sameer Agarwal <sameeragarwal@google.com> +Date: Mon Feb 4 17:55:25 2013 -0800 - Add the two-dimensional subspace search to DoglegStrategy + Change the theme to be closer to ReadTheDocs.org - Change-Id: I5163744c100cdf07dd93343d0734ffe0e80364f3 + Change-Id: I61a76f5b5e5c292b54fdf51b66940ce80bd1cd5f -commit ad1f7b772e559a911ac3a3b078b0aee1836fe785 +commit 3d87b72c895835bbfc10965d50dc96608632114d Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Mon Aug 20 11:10:34 2012 -0700 +Date: Sat Feb 2 00:49:31 2013 -0800 - Add ArcTanLoss, TolerantLoss and ComposedLossFunction. + Convert documentation from LaTeX to Sphinx. - Based on work by James Roseborough. + A live version of the doc can be found at - Change-Id: Idc4e0b099028f67702bfc7fe3e43dbd96b6f9256 + http://homes.cs.washington.edu/~sagarwal/ceres-solver/ + + As it stands, the documentation has better hyperlinking + and coverage than the latex documentation now. + + Change-Id: I7ede3aa83b9b9ef25104caf331e5727b4f5beae5 -commit 05292bf8fc5208b86b4a13544615b584f6efa936 +commit 71c8058478311ff9b3087360827e048dec5dd69a Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Mon Aug 20 07:40:45 2012 -0700 +Date: Thu Jan 31 17:33:01 2013 -0800 - Add a TrustRegionStrategy::Summary object. + Remove ExecutionSummary from Evaluator and LinearSolver - Change-Id: I7caee35a3408ee4a0ec16ba407410d822929340d + Change-Id: If4dbaf516a8b14e0a79e1a2116ce66a99ed4a592 -commit b12b906c4d21c3949f0dce62c4c0d083c8edecf1 -Author: Arnaud Gelas <arnaudgelas@gmail.com> -Date: Wed Aug 15 16:27:38 2012 +0200 +commit fa1c31eee33051d6483bc90fa7b66c3664b23bf3 +Author: Sameer Agarwal <sameeragarwal@google.com> +Date: Tue Jan 29 17:24:54 2013 -0800 - Add one option to generate the PDF from CMake at build time + Correct the documentation for crs_matrix.h - Make sure pygmentize is installed + Thanks to Joydeep Biswas for reporting this. - Change-Id: I068ba45c33a8e96acc906a464b12d10d58b3e231 + Change-Id: Iae5fc2274644aab40f2f922a671f65da15ae71fc -commit b9f15a59361c609ffc4a328aea9be3d265b5da81 +commit bdd87c03ed1cbac62990bf79aa6faed0a132bba9 Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Sat Aug 18 13:06:19 2012 -0700 +Date: Tue Jan 29 16:24:31 2013 -0800 - Add a dense Cholesky factorization based linear solver. + Add an ExecutionSummary object that the Evaluator and LinearSolver can use to + report execution statistics of all kinds. - For problems with a small number of variables, but a large - number of residuals, it is sometimes beneficial to use the - Cholesky factorization on the normal equations, instead of - the dense QR factorization of the Jacobian, even though it - is numerically the better thing to do. + Currently a single map which maps arbitrary strings to doubles is supported, + which allows for precise timing information to be communicated. - Change-Id: I3506b006195754018deec964e6e190b7e8c9ac8f + Change-Id: Ibd930aca5c9e6cae89bcfeffe9b13e2887644881 -commit b3fa009435acf476cd373052e62988f6437970b1 -Author: Arnaud Gelas <arnaudgelas@gmail.com> -Date: Fri Aug 17 10:31:41 2012 +0200 +commit a2fd9ca8beb5aa11fcc5d2b32e23f161edc93d28 +Author: Sameer Agarwal <sameeragarwal@google.com> +Date: Tue Jan 29 16:02:41 2013 -0800 - Set CMAKE_*_OUTPUT_DIRECTORY - - Gather - * all executables in ${CMAKE_BINARY_DIR}/bin - * all libraries (static and dynamic) in ${CMAKE_BINARY_DIR}/lib + Fix Android.mk - Change-Id: Ibc2fa1adfb6f0aea65d66d570259b79546bf3b07 + Change-Id: I1093c2731283890d1f3792bf8e6741f448f1465d -commit 1b8a4d5d11671ed83cf6077e363dd95333f08ef8 +commit 977be7cac37316524038fa0168cc5994a5654acd Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Fri Aug 17 16:49:11 2012 -0700 +Date: Sat Jan 26 16:01:54 2013 -0800 - Fix a minor bug in detect_structure logging. + Add support for reporting linear solver and inner iteration + orderings. - Change-Id: I117f7745e4c67595b3ff9244cde82b5b5b34ee4b + Change-Id: I0588a4285e0925ce689e47bd48ddcc61ce596a1f -commit 31c1e784ab2cb9294c6e05414cf06aae2b3766de -Author: Keir Mierle <mierle@gmail.com> -Date: Fri Aug 17 16:16:32 2012 -0700 +commit 146b9acb4d5570da311fedb5222ad65fe12f233c +Author: Sameer Agarwal <sameeragarwal@google.com> +Date: Mon Jan 21 16:16:58 2013 -0800 - Minor cleanups. + Update include/ceres.h to export headers. + Update the ABI version. - Change-Id: Ida4866997deeaa1bc2cebd6b69313a05ac82e457 + Change-Id: I5c1c4f110cddc816bbb5a737634f55b4cbea98e1 -commit e83f7879a8b21c6976e116958caf35bcdcf41cb0 +commit e837aeaf9e63936d745519fa53c726a2ca9d5822 Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Fri Aug 17 15:34:42 2012 -0700 +Date: Mon Jan 21 13:05:01 2013 -0800 - Fix SuiteSparse3 UFConfig.h detection really. + Documentation update. - Change-Id: Id187102e755b7d778dff4363f22f9a4697ed12dd + Change-Id: Ica8681f4bb58c60349d0dae453c652f2522eebf6 -commit 96f25dc57658d296ee6b6633818b4f1e51d7d587 +commit 2f0d7249ccedac8183e6e5a9cb45ca7c51bb6b41 Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Fri Aug 17 15:34:42 2012 -0700 +Date: Fri Jan 18 13:11:32 2013 -0800 - Fix SuiteSparse3 UFConfig.h detection. + NumericDiffFunctor. - Change-Id: Ia59aefdb0ad7f713f76ed79692f2db4fa2821e5b - -commit c497bd6cd9aa944f518aa491d3bc645851ff9594 -Author: Markus Moll <markus.moll@esat.kuleuven.be> -Date: Fri Aug 17 14:40:13 2012 +0200 - - Add UFconfig and/or SuiteSparse_config test to CMakeLists.txt + A wrapper class that takes a variadic functor evaluating a + function, numerically differentiates it and makes it available as a + templated functor so that it can be easily used as part of Ceres' + automatic differentiation framework. - SuiteSparse 4 requires linking to libsuitesparseconfig.a. - Both SuiteSparse 3 and SuiteSparse 4 require an additional header - (either UFconfig.h or SuiteSparse_config.h) that is not found if it is - in a separate path. Therefore, add explicit checks. + The tests for NumericDiffCostFunction and NumericDiffFunctor have + a lot of stuff that is common, so refactor them to reduce code. - Change-Id: I699902b5db4f1b7f17134b5a54f9aa681445e294 + Change-Id: I83b01e58b05e575fb2530d15cbd611928298646a -commit 383c04f4236d92801c7c674892814362dedf7ad6 +commit 2fc0ed6143ad499d6dc82d621ff5ec69170beb52 Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Fri Aug 17 10:14:04 2012 -0700 +Date: Tue Jan 15 11:34:10 2013 -0800 - Fix QuaternionToAngleAxis to ensure rotations are between -pi and pi. + Change NumericDiffCostFunction to accept variadic functors. + + The interface for NumericDiffCostFunction and AutoDiffCostFunction + are not comparable. They both accept variadic functors. - Thanks to Guoxuan Zhang for reporting this. + The change is backward compatible, as it still supports numeric + differentiation of CostFunction objects. - Change-Id: I2831ca3a04d5dc6467849c290461adbe23faaea3 + Some refactoring of documentation and code in auto_diff_cost_function + and its relatives was also done to make things consistent. + + Change-Id: Ib5f230a1d4a85738eb187803b9c1cd7166bb3b92 -commit dd2b17d7dd9750801ba4720bdece2062e59b7ae3 +commit 9c5acce674e3ec1ba08509123ff519f106cc4348 Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Thu Aug 16 19:34:57 2012 -0700 +Date: Sun Jan 13 22:14:12 2013 -0800 - CERES_DONT_HAVE_PROTOCOL_BUFFERS -> CERES_NO_PROTOCOL_BUFFERS. + Add CostFunctionToFunctor. + + CostFunctionToFunctor wraps a CostFunction, and makes it available + as a templated functor that can be called from other templated + functors. This is useful for when one wants to mix automatic, + numeric and analytic differentiated functions. + + Also a bug fix in autodiff.h - Change-Id: I6c9f50e4c006faf4e75a8f417455db18357f3187 + Change-Id: If8ba281a89fda976ef2ce10a5844a74c4ac7b84a -commit 8b4cb7aa2c74a0da62c638b2023566aa242af995 +commit c89ea4b9de588e2e2e82c54cd1c30cddb11454c5 Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Thu Aug 16 19:26:55 2012 -0700 +Date: Wed Jan 9 16:09:35 2013 -0800 - Fix sparse linear algebra library logging in Summary::FullReport. + Minor corrections based on Jim Roseborough's comments - Change-Id: Id2c902dc86c00954fde7749c7b4a67dd94215a31 + Change-Id: I4a8c7a454ddf038a3ed2567c101f9aee582044bf -commit 47d26bcd3b38b5ff53b34768c33b499d47b26bd0 -Author: Markus Moll <markus.moll@esat.kuleuven.be> -Date: Thu Aug 16 00:23:38 2012 +0200 +commit 00c8a061929b912bda3cfd4615fb8688c246c969 +Author: Keir Mierle <mierle@gmail.com> +Date: Sat Dec 1 13:22:59 2012 -0800 - Do not implicitly negate the step in the TrustRegionMinimizer. - - In the TrustRegionMinimizer, the step is currently implicitly negated. - This is done so that the linearized residual is |r - J*step|^2, which - corresponds to J*step = r, so neither J nor r have to be modified. - However, it leads to the rather unintuitive situation that the strategy - returns a step in positive gradient direction, which you would expect to - increase the function value. One way is to rename the "step" parameter in - the strategy to "negative_step" and document it. - This patch instead moves the negation inside the strategy, just around - the linear solver call, so that it is done in a local context and easier - to document. + Fix bug in DenseSparseMatrix::ToDenseMatrix(). - Change-Id: Idb258149a01f61c64e22128ea221c5a30cd89c89 + Change-Id: I74a1a03149d74fbc4268ec3ce9d20e09746a7227 -commit 51da590c8457e6664f76fe9813425a0c71351497 -Author: Markus Moll <markus.moll@esat.kuleuven.be> -Date: Fri Aug 17 12:56:09 2012 +0200 +commit bcac4de5b75cae210c5557c81239222176d2709a +Author: Sameer Agarwal <sameeragarwal@google.com> +Date: Fri Nov 30 23:11:26 2012 -0800 - Remove tmp file + Speedup corrector.cc + + Add a specialization for the common case where the residual block + outputs exactly one residual. - Change-Id: I07496fafae7b0c5c12cc26ae336e0db3b5592735 + The matrix routines used by Corrector can be then specialized to + a scalar and be made considerably faster. + + For denoising upto 400% speedup is observed. + + Change-Id: I8e3f24b8ba41caa8e62ad97c5f5e96ab6ea47150 -commit 7006a1f2b1701b8d89b8d1525fc0101943802221 +commit 9883fc396b2913fbc597afa795c39d365229c299 Author: Sameer Agarwal <sameeragarwal@google.com> -Date: Thu Aug 16 18:04:22 2012 -0700 +Date: Fri Nov 30 12:32:43 2012 -0800 - Correct example code in Powell's function example. + Refactoring of the LineSearchMinimizer. - Thanks to Petter Strandmark for pointing this out. + 1. New LineSearchDirection interface, factory and instances. + 2. Cleanup of LineSearchMinimizer to use the State and Direction objects. + 3. LBFGS -> LowRankInverseHessian. + 4. Refactoring of the RunCallbacks function and share it across + LineSearchMinimizer and TrustRegionMinimizer. - Change-Id: I967632235dccdb481396e94904bb911c9a1efe1e + Change-Id: I19354afc6f5d6567b28918710c2012dc30ef8f32 -commit 57a44b27bc6fc95b4e70fdc25c25c9925a2072a0 -Author: Keir Mierle <mierle@gmail.com> -Date: Thu Aug 16 17:04:50 2012 -0700 +commit 2293cb5bc96a5b317ed4ca52aa3494cadecbc07c +Author: Sameer Agarwal <sameeragarwal@google.com> +Date: Thu Nov 29 16:00:18 2012 -0800 - Remove unnecessary flags in NDK build. + Add missing documentation to solver.h - Change-Id: Ib5b4d0b7f2d898671252734978c789b8171d96a8 + Change-Id: I86e7c4f1f6cc1e15d5eb2cf23e73c32d94d458c1 -commit f21bee247251a8b2e836c215a84c4668c31d75cd -Author: Keir Mierle <mierle@gmail.com> -Date: Thu Aug 16 16:27:10 2012 -0700 +commit aed99615c017839df09c98f518dcc0a59a9819ec +Author: Sameer Agarwal <sameeragarwal@google.com> +Date: Thu Nov 29 10:33:19 2012 -0800 - Fix for fpclassify.h NDK porting work. + Expose lbfgs rank in solver.h - Change-Id: I69df1b4caf2941ed96a53e35e43ec54073f84f59 + Change-Id: Ibc184b1a2f94a4057fa6569d539ca3a55d6d6098 -commit 8ceb02cb75b66602de44a35e413225386cb21c27 -Author: Keir Mierle <mierle@gmail.com> -Date: Thu Aug 16 14:23:47 2012 -0700 +commit 1afd498f50ef520868c18a0f26b55409d8471ceb +Author: Sameer Agarwal <sameeragarwal@google.com> +Date: Thu Nov 29 10:28:11 2012 -0800 - Add Android NDK build files. + String to and from enum conversion routines. + + Update types.h/cc with stringication and unstringication + routines for the newly introduced enums. - This adds a Android.mk build that builds a Ceres static library - suitable for embetting in larger Android applications. This is - useful when needing to build Ceres without GPL'd components, since - the standalone toolchain (needed for the CMake Android build) does - not work with STLPort. + Change-Id: I0fe2842b5b1c75ba351f4ab87ec9fa60af2f9ed2 + +commit 3e8d192f2871bcf6d5f248c119c8a6eef19186d3 +Author: Sameer Agarwal <sameeragarwal@google.com> +Date: Wed Nov 28 17:20:22 2012 -0800 + + Add a rough implementation of LBFGS. - Change-Id: I8d857237f6f82658741017d161b2e31d9a20e5a7 + Change-Id: I2bc816adfe0c02773a23035ea31de3cddc1322a4 diff --git a/extern/libmv/third_party/ceres/bundle.sh b/extern/libmv/third_party/ceres/bundle.sh index 76630bc0626..b2c8330ec3e 100755 --- a/extern/libmv/third_party/ceres/bundle.sh +++ b/extern/libmv/third_party/ceres/bundle.sh @@ -9,7 +9,8 @@ fi repo="https://ceres-solver.googlesource.com/ceres-solver" branch="master" -tag="1.3.0" +#tag="1.4.0" +tag="" tmp=`mktemp -d` checkout="$tmp/ceres" @@ -120,7 +121,7 @@ set(INC include internal ../gflags - ../.. + ../../ ) set(INC_SYS diff --git a/extern/libmv/third_party/ceres/files.txt b/extern/libmv/third_party/ceres/files.txt index 55083572977..8f4b7b97b50 100644 --- a/extern/libmv/third_party/ceres/files.txt +++ b/extern/libmv/third_party/ceres/files.txt @@ -2,21 +2,28 @@ include/ceres/autodiff_cost_function.h include/ceres/ceres.h include/ceres/conditioned_cost_function.h include/ceres/cost_function.h +include/ceres/cost_function_to_functor.h include/ceres/crs_matrix.h +include/ceres/dynamic_autodiff_cost_function.h include/ceres/fpclassify.h +include/ceres/gradient_checker.h include/ceres/internal/autodiff.h include/ceres/internal/eigen.h include/ceres/internal/fixed_array.h include/ceres/internal/macros.h include/ceres/internal/manual_constructor.h +include/ceres/internal/numeric_diff.h include/ceres/internal/port.h include/ceres/internal/scoped_ptr.h +include/ceres/internal/variadic_evaluate.h include/ceres/iteration_callback.h include/ceres/jet.h include/ceres/local_parameterization.h include/ceres/loss_function.h include/ceres/normal_prior.h include/ceres/numeric_diff_cost_function.h +include/ceres/numeric_diff_functor.h +include/ceres/ordered_groups.h include/ceres/problem.h include/ceres/rotation.h include/ceres/sized_cost_function.h @@ -54,6 +61,8 @@ internal/ceres/compressed_row_sparse_matrix.h internal/ceres/conditioned_cost_function.cc internal/ceres/conjugate_gradients_solver.cc internal/ceres/conjugate_gradients_solver.h +internal/ceres/coordinate_descent_minimizer.cc +internal/ceres/coordinate_descent_minimizer.h internal/ceres/corrector.cc internal/ceres/corrector.h internal/ceres/cxsparse.cc @@ -71,6 +80,7 @@ internal/ceres/dogleg_strategy.cc internal/ceres/dogleg_strategy.h internal/ceres/evaluator.cc internal/ceres/evaluator.h +internal/ceres/execution_summary.h internal/ceres/file.cc internal/ceres/file.h internal/ceres/generated/schur_eliminator_2_2_2.cc @@ -107,18 +117,31 @@ internal/ceres/linear_operator.cc internal/ceres/linear_operator.h internal/ceres/linear_solver.cc internal/ceres/linear_solver.h +internal/ceres/line_search.cc +internal/ceres/line_search_direction.cc +internal/ceres/line_search_direction.h +internal/ceres/line_search.h +internal/ceres/line_search_minimizer.cc +internal/ceres/line_search_minimizer.h internal/ceres/local_parameterization.cc internal/ceres/loss_function.cc +internal/ceres/low_rank_inverse_hessian.cc +internal/ceres/low_rank_inverse_hessian.h internal/ceres/map_util.h internal/ceres/matrix_proto.h +internal/ceres/minimizer.cc internal/ceres/minimizer.h internal/ceres/mutex.h internal/ceres/normal_prior.cc internal/ceres/parameter_block.h +internal/ceres/parameter_block_ordering.cc +internal/ceres/parameter_block_ordering.h internal/ceres/partitioned_matrix_view.cc internal/ceres/partitioned_matrix_view.h -internal/ceres/polynomial_solver.cc -internal/ceres/polynomial_solver.h +internal/ceres/polynomial.cc +internal/ceres/polynomial.h +internal/ceres/preconditioner.cc +internal/ceres/preconditioner.h internal/ceres/problem.cc internal/ceres/problem_impl.cc internal/ceres/problem_impl.h @@ -137,8 +160,8 @@ internal/ceres/schur_complement_solver.h internal/ceres/schur_eliminator.cc internal/ceres/schur_eliminator.h internal/ceres/schur_eliminator_impl.h -internal/ceres/schur_ordering.cc -internal/ceres/schur_ordering.h +internal/ceres/schur_jacobi_preconditioner.cc +internal/ceres/schur_jacobi_preconditioner.h internal/ceres/scratch_evaluate_preparer.cc internal/ceres/scratch_evaluate_preparer.h internal/ceres/solver.cc @@ -166,3 +189,5 @@ internal/ceres/visibility_based_preconditioner.cc internal/ceres/visibility_based_preconditioner.h internal/ceres/visibility.cc internal/ceres/visibility.h +internal/ceres/wall_time.cc +internal/ceres/wall_time.h diff --git a/extern/libmv/third_party/ceres/include/ceres/autodiff_cost_function.h b/extern/libmv/third_party/ceres/include/ceres/autodiff_cost_function.h index da9ee2c7993..e758d3a2bd5 100644 --- a/extern/libmv/third_party/ceres/include/ceres/autodiff_cost_function.h +++ b/extern/libmv/third_party/ceres/include/ceres/autodiff_cost_function.h @@ -28,10 +28,10 @@ // // Author: sameeragarwal@google.com (Sameer Agarwal) // -// Helpers for making CostFunctions as needed by the least squares framework, -// with Jacobians computed via automatic differentiation. For more information -// on automatic differentation, see the wikipedia article at -// http://en.wikipedia.org/wiki/Automatic_differentiation +// Create CostFunctions as needed by the least squares framework, with +// Jacobians computed via automatic differentiation. For more +// information on automatic differentation, see the wikipedia article +// at http://en.wikipedia.org/wiki/Automatic_differentiation // // To get an auto differentiated cost function, you must define a class with a // templated operator() (a functor) that computes the cost function in terms of @@ -57,8 +57,8 @@ // To write an auto-differentiable cost function for the above model, first // define the object // -// class MyScalarCostFunction { -// MyScalarCostFunction(double k): k_(k) {} +// class MyScalarCostFunctor { +// MyScalarCostFunctor(double k): k_(k) {} // // template <typename T> // bool operator()(const T* const x , const T* const y, T* e) const { @@ -80,32 +80,32 @@ // it can be constructed as follows. // // CostFunction* cost_function -// = new AutoDiffCostFunction<MyScalarCostFunction, 1, 2, 2>( -// new MyScalarCostFunction(1.0)); ^ ^ ^ -// | | | -// Dimension of residual ------+ | | -// Dimension of x ----------------+ | -// Dimension of y -------------------+ +// = new AutoDiffCostFunction<MyScalarCostFunctor, 1, 2, 2>( +// new MyScalarCostFunctor(1.0)); ^ ^ ^ +// | | | +// Dimension of residual -----+ | | +// Dimension of x ---------------+ | +// Dimension of y ------------------+ // // In this example, there is usually an instance for each measumerent of k. // // In the instantiation above, the template parameters following -// "MyScalarCostFunction", "1, 2, 2", describe the functor as computing a +// "MyScalarCostFunctor", "1, 2, 2", describe the functor as computing a // 1-dimensional output from two arguments, both 2-dimensional. // // The autodiff cost function also supports cost functions with a // runtime-determined number of residuals. For example: // // CostFunction* cost_function -// = new AutoDiffCostFunction<MyScalarCostFunction, DYNAMIC, 2, 2>( -// new CostFunctionWithDynamicNumResiduals(1.0), ^ ^ ^ -// runtime_number_of_residuals); <----+ | | | -// | | | | -// | | | | -// Actual number of residuals ------+ | | | -// Indicate dynamic number of residuals ---------+ | | -// Dimension of x -------------------------------------+ | -// Dimension of y ----------------------------------------+ +// = new AutoDiffCostFunction<MyScalarCostFunctor, DYNAMIC, 2, 2>( +// new CostFunctorWithDynamicNumResiduals(1.0), ^ ^ ^ +// runtime_number_of_residuals); <----+ | | | +// | | | | +// | | | | +// Actual number of residuals ------+ | | | +// Indicate dynamic number of residuals --------+ | | +// Dimension of x ------------------------------------+ | +// Dimension of y ---------------------------------------+ // // The framework can currently accommodate cost functions of up to 6 independent // variables, and there is no limit on the dimensionality of each of them. @@ -119,7 +119,7 @@ // functions is to get the sizing wrong. In particular, there is a tendency to // set the template parameters to (dimension of residual, number of parameters) // instead of passing a dimension parameter for *every parameter*. In the -// example above, that would be <MyScalarCostFunction, 1, 2>, which is missing +// example above, that would be <MyScalarCostFunctor, 1, 2>, which is missing // the last '2' argument. Please be careful when setting the size parameters. #ifndef CERES_PUBLIC_AUTODIFF_COST_FUNCTION_H_ @@ -154,16 +154,21 @@ template <typename CostFunctor, int N2 = 0, // Number of parameters in block 2. int N3 = 0, // Number of parameters in block 3. int N4 = 0, // Number of parameters in block 4. - int N5 = 0> // Number of parameters in block 5. -class AutoDiffCostFunction : - public SizedCostFunction<M, N0, N1, N2, N3, N4, N5> { + int N5 = 0, // Number of parameters in block 5. + int N6 = 0, // Number of parameters in block 6. + int N7 = 0, // Number of parameters in block 7. + int N8 = 0, // Number of parameters in block 8. + int N9 = 0> // Number of parameters in block 9. +class AutoDiffCostFunction : public SizedCostFunction<M, + N0, N1, N2, N3, N4, + N5, N6, N7, N8, N9> { public: // Takes ownership of functor. Uses the template-provided value for the // number of residuals ("M"). explicit AutoDiffCostFunction(CostFunctor* functor) : functor_(functor) { CHECK_NE(M, DYNAMIC) << "Can't run the fixed-size constructor if the " - << "number of residuals is set to ceres::DYNAMIC."; + << "number of residuals is set to ceres::DYNAMIC."; } // Takes ownership of functor. Ignores the template-provided number of @@ -174,8 +179,9 @@ class AutoDiffCostFunction : AutoDiffCostFunction(CostFunctor* functor, int num_residuals) : functor_(functor) { CHECK_EQ(M, DYNAMIC) << "Can't run the dynamic-size constructor if the " - << "number of residuals is not ceres::DYNAMIC."; - SizedCostFunction<M, N0, N1, N2, N3, N4, N5>::set_num_residuals(num_residuals); + << "number of residuals is not ceres::DYNAMIC."; + SizedCostFunction<M, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9> + ::set_num_residuals(num_residuals); } virtual ~AutoDiffCostFunction() {} @@ -190,14 +196,15 @@ class AutoDiffCostFunction : double** jacobians) const { if (!jacobians) { return internal::VariadicEvaluate< - CostFunctor, double, N0, N1, N2, N3, N4, N5> + CostFunctor, double, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9> ::Call(*functor_, parameters, residuals); } return internal::AutoDiff<CostFunctor, double, - N0, N1, N2, N3, N4, N5>::Differentiate( + N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>::Differentiate( *functor_, parameters, - SizedCostFunction<M, N0, N1, N2, N3, N4, N5>::num_residuals(), + SizedCostFunction<M, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9> + ::num_residuals(), residuals, jacobians); } diff --git a/extern/libmv/third_party/ceres/include/ceres/ceres.h b/extern/libmv/third_party/ceres/include/ceres/ceres.h index 22aaf8ff21a..7878806aa45 100644 --- a/extern/libmv/third_party/ceres/include/ceres/ceres.h +++ b/extern/libmv/third_party/ceres/include/ceres/ceres.h @@ -34,12 +34,20 @@ #ifndef CERES_PUBLIC_CERES_H_ #define CERES_PUBLIC_CERES_H_ +#define CERES_VERSION 1.5.0 +#define CERES_ABI_VERSION 1.5.0 + #include "ceres/autodiff_cost_function.h" #include "ceres/cost_function.h" +#include "ceres/cost_function_to_functor.h" +#include "ceres/crs_matrix.h" #include "ceres/iteration_callback.h" +#include "ceres/jet.h" #include "ceres/local_parameterization.h" #include "ceres/loss_function.h" #include "ceres/numeric_diff_cost_function.h" +#include "ceres/numeric_diff_functor.h" +#include "ceres/ordered_groups.h" #include "ceres/problem.h" #include "ceres/sized_cost_function.h" #include "ceres/solver.h" diff --git a/extern/libmv/third_party/ceres/include/ceres/cost_function_to_functor.h b/extern/libmv/third_party/ceres/include/ceres/cost_function_to_functor.h new file mode 100644 index 00000000000..b30ecd06983 --- /dev/null +++ b/extern/libmv/third_party/ceres/include/ceres/cost_function_to_functor.h @@ -0,0 +1,752 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2013 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// +// CostFunctionToFunctor is an adapter class that allows users to use +// CostFunction objects in templated functors which are to be used for +// automatic differentiation. This allows the user to seamlessly mix +// analytic, numeric and automatic differentiation. +// +// For example, let us assume that +// +// class IntrinsicProjection : public SizedCostFunction<2, 5, 3> { +// public: +// IntrinsicProjection(const double* observations); +// virtual bool Evaluate(double const* const* parameters, +// double* residuals, +// double** jacobians) const; +// }; +// +// is a cost function that implements the projection of a point in its +// local coordinate system onto its image plane and subtracts it from +// the observed point projection. It can compute its residual and +// either via analytic or numerical differentiation can compute its +// jacobians. +// +// Now we would like to compose the action of this CostFunction with +// the action of camera extrinsics, i.e., rotation and +// translation. Say we have a templated function +// +// template<typename T> +// void RotateAndTranslatePoint(const T* rotation, +// const T* translation, +// const T* point, +// T* result); +// +// Then we can now do the following, +// +// struct CameraProjection { +// CameraProjection(double* observation) { +// intrinsic_projection_.reset( +// new CostFunctionToFunctor<2, 5, 3>( +// new IntrinsicProjection(observation_))); +// } +// template <typename T> +// bool operator(const T* rotation, +// const T* translation, +// const T* intrinsics, +// const T* point, +// T* residual) const { +// T transformed_point[3]; +// RotateAndTranslatePoint(rotation, translation, point, transformed_point); +// +// // Note that we call intrinsic_projection_, just like it was +// // any other templated functor. +// +// return (*intrinsic_projection_)(intrinsics, transformed_point, residual); +// } +// +// private: +// scoped_ptr<CostFunctionToFunctor<2,5,3> > intrinsic_projection_; +// }; + +#ifndef CERES_PUBLIC_COST_FUNCTION_TO_FUNCTOR_H_ +#define CERES_PUBLIC_COST_FUNCTION_TO_FUNCTOR_H_ + +#include <numeric> +#include <vector> + +#include "ceres/cost_function.h" +#include "ceres/internal/fixed_array.h" +#include "ceres/internal/port.h" +#include "ceres/internal/scoped_ptr.h" + +namespace ceres { + +template <int kNumResiduals, + int N0, int N1 = 0, int N2 = 0, int N3 = 0, int N4 = 0, + int N5 = 0, int N6 = 0, int N7 = 0, int N8 = 0, int N9 = 0> +class CostFunctionToFunctor { + public: + explicit CostFunctionToFunctor(CostFunction* cost_function) + : cost_function_(cost_function) { + CHECK_NOTNULL(cost_function); + + CHECK_GE(kNumResiduals, 0); + CHECK_EQ(cost_function->num_residuals(), kNumResiduals); + + // This block breaks the 80 column rule to keep it somewhat readable. + CHECK((!N1 && !N2 && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) || + ((N1 > 0) && !N2 && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) || + ((N1 > 0) && (N2 > 0) && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) || + ((N1 > 0) && (N2 > 0) && (N3 > 0) && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) || + ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && !N5 && !N6 && !N7 && !N8 && !N9) || + ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && !N6 && !N7 && !N8 && !N9) || + ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && !N7 && !N8 && !N9) || + ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && !N8 && !N9) || + ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && !N9) || + ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && (N9 > 0))) + << "Zero block cannot precede a non-zero block. Block sizes are " + << "(ignore trailing 0s): " << N0 << ", " << N1 << ", " << N2 << ", " + << N3 << ", " << N4 << ", " << N5 << ", " << N6 << ", " << N7 << ", " + << N8 << ", " << N9; + + const vector<int16>& parameter_block_sizes = + cost_function->parameter_block_sizes(); + const int num_parameter_blocks = + (N0 > 0) + (N1 > 0) + (N2 > 0) + (N3 > 0) + (N4 > 0) + + (N5 > 0) + (N6 > 0) + (N7 > 0) + (N8 > 0) + (N9 > 0); + CHECK_EQ(parameter_block_sizes.size(), num_parameter_blocks); + + CHECK_EQ(N0, parameter_block_sizes[0]); + if (parameter_block_sizes.size() > 1) CHECK_EQ(N1, parameter_block_sizes[1]); // NOLINT + if (parameter_block_sizes.size() > 2) CHECK_EQ(N2, parameter_block_sizes[2]); // NOLINT + if (parameter_block_sizes.size() > 3) CHECK_EQ(N3, parameter_block_sizes[3]); // NOLINT + if (parameter_block_sizes.size() > 4) CHECK_EQ(N4, parameter_block_sizes[4]); // NOLINT + if (parameter_block_sizes.size() > 5) CHECK_EQ(N5, parameter_block_sizes[5]); // NOLINT + if (parameter_block_sizes.size() > 6) CHECK_EQ(N6, parameter_block_sizes[6]); // NOLINT + if (parameter_block_sizes.size() > 7) CHECK_EQ(N7, parameter_block_sizes[7]); // NOLINT + if (parameter_block_sizes.size() > 8) CHECK_EQ(N8, parameter_block_sizes[8]); // NOLINT + if (parameter_block_sizes.size() > 9) CHECK_EQ(N9, parameter_block_sizes[9]); // NOLINT + + CHECK_EQ(accumulate(parameter_block_sizes.begin(), + parameter_block_sizes.end(), 0), + N0 + N1 + N2 + N3 + N4 + N5 + N6 + N7 + N8 + N9); + } + + bool operator()(const double* x0, double* residuals) const { + CHECK_NE(N0, 0); + CHECK_EQ(N1, 0); + CHECK_EQ(N2, 0); + CHECK_EQ(N3, 0); + CHECK_EQ(N4, 0); + CHECK_EQ(N5, 0); + CHECK_EQ(N6, 0); + CHECK_EQ(N7, 0); + CHECK_EQ(N8, 0); + CHECK_EQ(N9, 0); + + return cost_function_->Evaluate(&x0, residuals, NULL); + } + + bool operator()(const double* x0, + const double* x1, + double* residuals) const { + CHECK_NE(N0, 0); + CHECK_NE(N1, 0); + CHECK_EQ(N2, 0); + CHECK_EQ(N3, 0); + CHECK_EQ(N4, 0); + CHECK_EQ(N5, 0); + CHECK_EQ(N6, 0); + CHECK_EQ(N7, 0); + CHECK_EQ(N8, 0); + CHECK_EQ(N9, 0); + internal::FixedArray<const double*> parameter_blocks(2); + parameter_blocks[0] = x0; + parameter_blocks[1] = x1; + return cost_function_->Evaluate(parameter_blocks.get(), residuals, NULL); + } + + bool operator()(const double* x0, + const double* x1, + const double* x2, + double* residuals) const { + CHECK_NE(N0, 0); + CHECK_NE(N1, 0); + CHECK_NE(N2, 0); + CHECK_EQ(N3, 0); + CHECK_EQ(N4, 0); + CHECK_EQ(N5, 0); + CHECK_EQ(N6, 0); + CHECK_EQ(N7, 0); + CHECK_EQ(N8, 0); + CHECK_EQ(N9, 0); + internal::FixedArray<const double*> parameter_blocks(3); + parameter_blocks[0] = x0; + parameter_blocks[1] = x1; + parameter_blocks[2] = x2; + return cost_function_->Evaluate(parameter_blocks.get(), residuals, NULL); + } + + bool operator()(const double* x0, + const double* x1, + const double* x2, + const double* x3, + double* residuals) const { + CHECK_NE(N0, 0); + CHECK_NE(N1, 0); + CHECK_NE(N2, 0); + CHECK_NE(N3, 0); + CHECK_EQ(N4, 0); + CHECK_EQ(N5, 0); + CHECK_EQ(N6, 0); + CHECK_EQ(N7, 0); + CHECK_EQ(N8, 0); + CHECK_EQ(N9, 0); + internal::FixedArray<const double*> parameter_blocks(4); + parameter_blocks[0] = x0; + parameter_blocks[1] = x1; + parameter_blocks[2] = x2; + parameter_blocks[3] = x3; + return cost_function_->Evaluate(parameter_blocks.get(), residuals, NULL); + } + + bool operator()(const double* x0, + const double* x1, + const double* x2, + const double* x3, + const double* x4, + double* residuals) const { + CHECK_NE(N0, 0); + CHECK_NE(N1, 0); + CHECK_NE(N2, 0); + CHECK_NE(N3, 0); + CHECK_NE(N4, 0); + CHECK_EQ(N5, 0); + CHECK_EQ(N6, 0); + CHECK_EQ(N7, 0); + CHECK_EQ(N8, 0); + CHECK_EQ(N9, 0); + internal::FixedArray<const double*> parameter_blocks(5); + parameter_blocks[0] = x0; + parameter_blocks[1] = x1; + parameter_blocks[2] = x2; + parameter_blocks[3] = x3; + parameter_blocks[4] = x4; + return cost_function_->Evaluate(parameter_blocks.get(), residuals, NULL); + } + + bool operator()(const double* x0, + const double* x1, + const double* x2, + const double* x3, + const double* x4, + const double* x5, + double* residuals) const { + CHECK_NE(N0, 0); + CHECK_NE(N1, 0); + CHECK_NE(N2, 0); + CHECK_NE(N3, 0); + CHECK_NE(N4, 0); + CHECK_NE(N5, 0); + CHECK_EQ(N6, 0); + CHECK_EQ(N7, 0); + CHECK_EQ(N8, 0); + CHECK_EQ(N9, 0); + internal::FixedArray<const double*> parameter_blocks(6); + parameter_blocks[0] = x0; + parameter_blocks[1] = x1; + parameter_blocks[2] = x2; + parameter_blocks[3] = x3; + parameter_blocks[4] = x4; + parameter_blocks[5] = x5; + return cost_function_->Evaluate(parameter_blocks.get(), residuals, NULL); + } + + bool operator()(const double* x0, + const double* x1, + const double* x2, + const double* x3, + const double* x4, + const double* x5, + const double* x6, + double* residuals) const { + CHECK_NE(N0, 0); + CHECK_NE(N1, 0); + CHECK_NE(N2, 0); + CHECK_NE(N3, 0); + CHECK_NE(N4, 0); + CHECK_NE(N5, 0); + CHECK_NE(N6, 0); + CHECK_EQ(N7, 0); + CHECK_EQ(N8, 0); + CHECK_EQ(N9, 0); + internal::FixedArray<const double*> parameter_blocks(7); + parameter_blocks[0] = x0; + parameter_blocks[1] = x1; + parameter_blocks[2] = x2; + parameter_blocks[3] = x3; + parameter_blocks[4] = x4; + parameter_blocks[5] = x5; + parameter_blocks[6] = x6; + return cost_function_->Evaluate(parameter_blocks.get(), residuals, NULL); + } + + bool operator()(const double* x0, + const double* x1, + const double* x2, + const double* x3, + const double* x4, + const double* x5, + const double* x6, + const double* x7, + double* residuals) const { + CHECK_NE(N0, 0); + CHECK_NE(N1, 0); + CHECK_NE(N2, 0); + CHECK_NE(N3, 0); + CHECK_NE(N4, 0); + CHECK_NE(N5, 0); + CHECK_NE(N6, 0); + CHECK_NE(N7, 0); + CHECK_EQ(N8, 0); + CHECK_EQ(N9, 0); + internal::FixedArray<const double*> parameter_blocks(8); + parameter_blocks[0] = x0; + parameter_blocks[1] = x1; + parameter_blocks[2] = x2; + parameter_blocks[3] = x3; + parameter_blocks[4] = x4; + parameter_blocks[5] = x5; + parameter_blocks[6] = x6; + parameter_blocks[7] = x7; + return cost_function_->Evaluate(parameter_blocks.get(), residuals, NULL); + } + + bool operator()(const double* x0, + const double* x1, + const double* x2, + const double* x3, + const double* x4, + const double* x5, + const double* x6, + const double* x7, + const double* x8, + double* residuals) const { + CHECK_NE(N0, 0); + CHECK_NE(N1, 0); + CHECK_NE(N2, 0); + CHECK_NE(N3, 0); + CHECK_NE(N4, 0); + CHECK_NE(N5, 0); + CHECK_NE(N6, 0); + CHECK_NE(N7, 0); + CHECK_NE(N8, 0); + CHECK_EQ(N9, 0); + internal::FixedArray<const double*> parameter_blocks(9); + parameter_blocks[0] = x0; + parameter_blocks[1] = x1; + parameter_blocks[2] = x2; + parameter_blocks[3] = x3; + parameter_blocks[4] = x4; + parameter_blocks[5] = x5; + parameter_blocks[6] = x6; + parameter_blocks[7] = x7; + parameter_blocks[8] = x8; + return cost_function_->Evaluate(parameter_blocks.get(), residuals, NULL); + } + + bool operator()(const double* x0, + const double* x1, + const double* x2, + const double* x3, + const double* x4, + const double* x5, + const double* x6, + const double* x7, + const double* x8, + const double* x9, + double* residuals) const { + CHECK_NE(N0, 0); + CHECK_NE(N1, 0); + CHECK_NE(N2, 0); + CHECK_NE(N3, 0); + CHECK_NE(N4, 0); + CHECK_NE(N5, 0); + CHECK_NE(N6, 0); + CHECK_NE(N7, 0); + CHECK_NE(N8, 0); + CHECK_NE(N9, 0); + internal::FixedArray<const double*> parameter_blocks(10); + parameter_blocks[0] = x0; + parameter_blocks[1] = x1; + parameter_blocks[2] = x2; + parameter_blocks[3] = x3; + parameter_blocks[4] = x4; + parameter_blocks[5] = x5; + parameter_blocks[6] = x6; + parameter_blocks[7] = x7; + parameter_blocks[8] = x8; + parameter_blocks[9] = x9; + return cost_function_->Evaluate(parameter_blocks.get(), residuals, NULL); + } + + template <typename JetT> + bool operator()(const JetT* x0, JetT* residuals) const { + CHECK_NE(N0, 0); + CHECK_EQ(N1, 0); + CHECK_EQ(N2, 0); + CHECK_EQ(N3, 0); + CHECK_EQ(N4, 0); + CHECK_EQ(N5, 0); + CHECK_EQ(N6, 0); + CHECK_EQ(N7, 0); + CHECK_EQ(N8, 0); + CHECK_EQ(N9, 0); + return EvaluateWithJets(&x0, residuals); + } + + template <typename JetT> + bool operator()(const JetT* x0, + const JetT* x1, + JetT* residuals) const { + CHECK_NE(N0, 0); + CHECK_NE(N1, 0); + CHECK_EQ(N2, 0); + CHECK_EQ(N3, 0); + CHECK_EQ(N4, 0); + CHECK_EQ(N5, 0); + CHECK_EQ(N6, 0); + CHECK_EQ(N7, 0); + CHECK_EQ(N8, 0); + CHECK_EQ(N9, 0); + internal::FixedArray<const JetT*> jets(2); + jets[0] = x0; + jets[1] = x1; + return EvaluateWithJets(jets.get(), residuals); + } + + template <typename JetT> + bool operator()(const JetT* x0, + const JetT* x1, + const JetT* x2, + JetT* residuals) const { + CHECK_NE(N0, 0); + CHECK_NE(N1, 0); + CHECK_NE(N2, 0); + CHECK_EQ(N3, 0); + CHECK_EQ(N4, 0); + CHECK_EQ(N5, 0); + CHECK_EQ(N6, 0); + CHECK_EQ(N7, 0); + CHECK_EQ(N8, 0); + CHECK_EQ(N9, 0); + internal::FixedArray<const JetT*> jets(3); + jets[0] = x0; + jets[1] = x1; + jets[2] = x2; + return EvaluateWithJets(jets.get(), residuals); + } + + template <typename JetT> + bool operator()(const JetT* x0, + const JetT* x1, + const JetT* x2, + const JetT* x3, + JetT* residuals) const { + CHECK_NE(N0, 0); + CHECK_NE(N1, 0); + CHECK_NE(N2, 0); + CHECK_NE(N3, 0); + CHECK_EQ(N4, 0); + CHECK_EQ(N5, 0); + CHECK_EQ(N6, 0); + CHECK_EQ(N7, 0); + CHECK_EQ(N8, 0); + CHECK_EQ(N9, 0); + internal::FixedArray<const JetT*> jets(4); + jets[0] = x0; + jets[1] = x1; + jets[2] = x2; + jets[3] = x3; + return EvaluateWithJets(jets.get(), residuals); + } + + template <typename JetT> + bool operator()(const JetT* x0, + const JetT* x1, + const JetT* x2, + const JetT* x3, + const JetT* x4, + JetT* residuals) const { + CHECK_NE(N0, 0); + CHECK_NE(N1, 0); + CHECK_NE(N2, 0); + CHECK_NE(N3, 0); + CHECK_NE(N4, 0); + CHECK_EQ(N5, 0); + CHECK_EQ(N6, 0); + CHECK_EQ(N7, 0); + CHECK_EQ(N8, 0); + CHECK_EQ(N9, 0); + internal::FixedArray<const JetT*> jets(5); + jets[0] = x0; + jets[1] = x1; + jets[2] = x2; + jets[3] = x3; + jets[4] = x4; + return EvaluateWithJets(jets.get(), residuals); + } + + template <typename JetT> + bool operator()(const JetT* x0, + const JetT* x1, + const JetT* x2, + const JetT* x3, + const JetT* x4, + const JetT* x5, + JetT* residuals) const { + CHECK_NE(N0, 0); + CHECK_NE(N1, 0); + CHECK_NE(N2, 0); + CHECK_NE(N3, 0); + CHECK_NE(N4, 0); + CHECK_NE(N5, 0); + CHECK_EQ(N6, 0); + CHECK_EQ(N7, 0); + CHECK_EQ(N8, 0); + CHECK_EQ(N9, 0); + internal::FixedArray<const JetT*> jets(6); + jets[0] = x0; + jets[1] = x1; + jets[2] = x2; + jets[3] = x3; + jets[4] = x4; + jets[5] = x5; + return EvaluateWithJets(jets.get(), residuals); + } + + template <typename JetT> + bool operator()(const JetT* x0, + const JetT* x1, + const JetT* x2, + const JetT* x3, + const JetT* x4, + const JetT* x5, + const JetT* x6, + JetT* residuals) const { + CHECK_NE(N0, 0); + CHECK_NE(N1, 0); + CHECK_NE(N2, 0); + CHECK_NE(N3, 0); + CHECK_NE(N4, 0); + CHECK_NE(N5, 0); + CHECK_NE(N6, 0); + CHECK_EQ(N7, 0); + CHECK_EQ(N8, 0); + CHECK_EQ(N9, 0); + internal::FixedArray<const JetT*> jets(7); + jets[0] = x0; + jets[1] = x1; + jets[2] = x2; + jets[3] = x3; + jets[4] = x4; + jets[5] = x5; + jets[6] = x6; + return EvaluateWithJets(jets.get(), residuals); + } + + template <typename JetT> + bool operator()(const JetT* x0, + const JetT* x1, + const JetT* x2, + const JetT* x3, + const JetT* x4, + const JetT* x5, + const JetT* x6, + const JetT* x7, + JetT* residuals) const { + CHECK_NE(N0, 0); + CHECK_NE(N1, 0); + CHECK_NE(N2, 0); + CHECK_NE(N3, 0); + CHECK_NE(N4, 0); + CHECK_NE(N5, 0); + CHECK_NE(N6, 0); + CHECK_NE(N7, 0); + CHECK_EQ(N8, 0); + CHECK_EQ(N9, 0); + internal::FixedArray<const JetT*> jets(8); + jets[0] = x0; + jets[1] = x1; + jets[2] = x2; + jets[3] = x3; + jets[4] = x4; + jets[5] = x5; + jets[6] = x6; + jets[7] = x7; + return EvaluateWithJets(jets.get(), residuals); + } + + template <typename JetT> + bool operator()(const JetT* x0, + const JetT* x1, + const JetT* x2, + const JetT* x3, + const JetT* x4, + const JetT* x5, + const JetT* x6, + const JetT* x7, + const JetT* x8, + JetT* residuals) const { + CHECK_NE(N0, 0); + CHECK_NE(N1, 0); + CHECK_NE(N2, 0); + CHECK_NE(N3, 0); + CHECK_NE(N4, 0); + CHECK_NE(N5, 0); + CHECK_NE(N6, 0); + CHECK_NE(N7, 0); + CHECK_NE(N8, 0); + CHECK_EQ(N9, 0); + internal::FixedArray<const JetT*> jets(9); + jets[0] = x0; + jets[1] = x1; + jets[2] = x2; + jets[3] = x3; + jets[4] = x4; + jets[5] = x5; + jets[6] = x6; + jets[7] = x7; + jets[8] = x8; + return EvaluateWithJets(jets.get(), residuals); + } + + template <typename JetT> + bool operator()(const JetT* x0, + const JetT* x1, + const JetT* x2, + const JetT* x3, + const JetT* x4, + const JetT* x5, + const JetT* x6, + const JetT* x7, + const JetT* x8, + const JetT* x9, + JetT* residuals) const { + CHECK_NE(N0, 0); + CHECK_NE(N1, 0); + CHECK_NE(N2, 0); + CHECK_NE(N3, 0); + CHECK_NE(N4, 0); + CHECK_NE(N5, 0); + CHECK_NE(N6, 0); + CHECK_NE(N7, 0); + CHECK_NE(N8, 0); + CHECK_NE(N9, 0); + internal::FixedArray<const JetT*> jets(10); + jets[0] = x0; + jets[1] = x1; + jets[2] = x2; + jets[3] = x3; + jets[4] = x4; + jets[5] = x5; + jets[6] = x6; + jets[7] = x7; + jets[8] = x8; + jets[9] = x9; + return EvaluateWithJets(jets.get(), residuals); + } + + private: + template <typename JetT> + bool EvaluateWithJets(const JetT** inputs, JetT* output) const { + const int kNumParameters = N0 + N1 + N2 + N3 + N4 + N5 + N6 + N7 + N8 + N9; + const vector<int16>& parameter_block_sizes = + cost_function_->parameter_block_sizes(); + const int num_parameter_blocks = parameter_block_sizes.size(); + const int num_residuals = cost_function_->num_residuals(); + + internal::FixedArray<double> parameters(kNumParameters); + internal::FixedArray<double*> parameter_blocks(num_parameter_blocks); + internal::FixedArray<double> jacobians(num_residuals * kNumParameters); + internal::FixedArray<double*> jacobian_blocks(num_parameter_blocks); + internal::FixedArray<double> residuals(num_residuals); + + // Build a set of arrays to get the residuals and jacobians from + // the CostFunction wrapped by this functor. + double* parameter_ptr = parameters.get(); + double* jacobian_ptr = jacobians.get(); + for (int i = 0; i < num_parameter_blocks; ++i) { + parameter_blocks[i] = parameter_ptr; + jacobian_blocks[i] = jacobian_ptr; + for (int j = 0; j < parameter_block_sizes[i]; ++j) { + *parameter_ptr++ = inputs[i][j].a; + } + jacobian_ptr += num_residuals * parameter_block_sizes[i]; + } + + if (!cost_function_->Evaluate(parameter_blocks.get(), + residuals.get(), + jacobian_blocks.get())) { + return false; + } + + // Now that we have the incoming Jets, which are carrying the + // partial derivatives of each of the inputs w.r.t to some other + // underlying parameters. The derivative of the outputs of the + // cost function w.r.t to the same underlying parameters can now + // be computed by applying the chain rule. + // + // d output[i] d output[i] d input[j] + // -------------- = sum_j ----------- * ------------ + // d parameter[k] d input[j] d parameter[k] + // + // d input[j] + // -------------- = inputs[j], so + // d parameter[k] + // + // outputJet[i] = sum_k jacobian[i][k] * inputJet[k] + // + // The following loop, iterates over the residuals, computing one + // output jet at a time. + for (int i = 0; i < num_residuals; ++i) { + output[i].a = residuals[i]; + output[i].v.setZero(); + + for (int j = 0; j < num_parameter_blocks; ++j) { + const int16 block_size = parameter_block_sizes[j]; + for (int k = 0; k < parameter_block_sizes[j]; ++k) { + output[i].v += + jacobian_blocks[j][i * block_size + k] * inputs[j][k].v; + } + } + } + + return true; + } + + private: + internal::scoped_ptr<CostFunction> cost_function_; +}; + +} // namespace ceres + +#endif // CERES_PUBLIC_COST_FUNCTION_TO_FUNCTOR_H_ diff --git a/extern/libmv/third_party/ceres/include/ceres/crs_matrix.h b/extern/libmv/third_party/ceres/include/ceres/crs_matrix.h index c9fe8f78b7c..8c470cd33f2 100644 --- a/extern/libmv/third_party/ceres/include/ceres/crs_matrix.h +++ b/extern/libmv/third_party/ceres/include/ceres/crs_matrix.h @@ -44,17 +44,35 @@ struct CRSMatrix { int num_rows; int num_cols; - // A compressed row matrix stores its contents in three arrays. - // The non-zero pattern of the i^th row is given by + // A compressed row matrix stores its contents in three arrays, + // rows, cols and values. // - // rows[cols[i] ... cols[i + 1]] + // rows is a num_rows + 1 sized array that points into the cols and + // values array. For each row i: // - // and the corresponding values by + // cols[rows[i]] ... cols[rows[i + 1] - 1] are the indices of the + // non-zero columns of row i. // - // values[cols[i] ... cols[i + 1]] + // values[rows[i]] .. values[rows[i + 1] - 1] are the values of the + // corresponding entries. // - // Thus, cols is a vector of size num_cols + 1, and rows and values - // have as many entries as number of non-zeros in the matrix. + // cols and values contain as many entries as there are non-zeros in + // the matrix. + // + // e.g, consider the 3x4 sparse matrix + // + // [ 0 10 0 4 ] + // [ 0 2 -3 2 ] + // [ 1 2 0 0 ] + // + // The three arrays will be: + // + // + // -row0- ---row1--- -row2- + // rows = [ 0, 2, 5, 7] + // cols = [ 1, 3, 1, 2, 3, 0, 1] + // values = [10, 4, 2, -3, 2, 1, 2] + vector<int> cols; vector<int> rows; vector<double> values; diff --git a/extern/libmv/third_party/ceres/include/ceres/dynamic_autodiff_cost_function.h b/extern/libmv/third_party/ceres/include/ceres/dynamic_autodiff_cost_function.h new file mode 100644 index 00000000000..861164a8253 --- /dev/null +++ b/extern/libmv/third_party/ceres/include/ceres/dynamic_autodiff_cost_function.h @@ -0,0 +1,215 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2012 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: mierle@gmail.com (Keir Mierle) +// sameeragarwal@google.com (Sameer Agarwal) +// thadh@gmail.com (Thad Hughes) +// +// This autodiff implementation differs from the one found in +// autodiff_cost_function.h by supporting autodiff on cost functions with +// variable numbers of parameters with variable sizes. With the other +// implementation, all the sizes (both the number of parameter blocks and the +// size of each block) must be fixed at compile time. +// +// The functor API differs slightly from the API for fixed size autodiff; the +// expected interface for the cost functors is: +// +// struct MyCostFunctor { +// template<typename T> +// bool operator()(T const* const* parameters, T* residuals) const { +// // Use parameters[i] to access the i'th parameter block. +// } +// } +// +// Since the sizing of the parameters is done at runtime, you must also specify +// the sizes after creating the dynamic autodiff cost function. For example: +// +// DynamicAutoDiffCostFunction<MyCostFunctor, 3> cost_function( +// new MyCostFunctor()); +// cost_function.AddParameterBlock(5); +// cost_function.AddParameterBlock(10); +// cost_function.SetNumResiduals(21); +// +// Under the hood, the implementation evaluates the cost function multiple +// times, computing a small set of the derivatives (four by default, controlled +// by the Stride template parameter) with each pass. There is a tradeoff with +// the size of the passes; you may want to experiment with the stride. + +#ifndef CERES_PUBLIC_DYNAMIC_AUTODIFF_COST_FUNCTION_H_ +#define CERES_PUBLIC_DYNAMIC_AUTODIFF_COST_FUNCTION_H_ + +#include <cmath> +#include <numeric> +#include <vector> + +#include "ceres/cost_function.h" +#include "ceres/internal/scoped_ptr.h" +#include "ceres/jet.h" +#include "glog/logging.h" + +namespace ceres { + +template <typename CostFunctor, int Stride = 4> +class DynamicAutoDiffCostFunction : public CostFunction { + public: + explicit DynamicAutoDiffCostFunction(CostFunctor* functor) + : functor_(functor) {} + + virtual ~DynamicAutoDiffCostFunction() {} + + void AddParameterBlock(int size) { + mutable_parameter_block_sizes()->push_back(size); + } + + void SetNumResiduals(int num_residuals) { + set_num_residuals(num_residuals); + } + + virtual bool Evaluate(double const* const* parameters, + double* residuals, + double** jacobians) const { + CHECK_GT(num_residuals(), 0) + << "You must call DynamicAutoDiffCostFunction::SetNumResiduals() " + << "before DynamicAutoDiffCostFunction::Evaluate()."; + + if (jacobians == NULL) { + return (*functor_)(parameters, residuals); + } + + // The difficulty with Jets, as implemented in Ceres, is that they were + // originally designed for strictly compile-sized use. At this point, there + // is a large body of code that assumes inside a cost functor it is + // acceptable to do e.g. T(1.5) and get an appropriately sized jet back. + // + // Unfortunately, it is impossible to communicate the expected size of a + // dynamically sized jet to the static instantiations that existing code + // depends on. + // + // To work around this issue, the solution here is to evaluate the + // jacobians in a series of passes, each one computing Stripe * + // num_residuals() derivatives. This is done with small, fixed-size jets. + const int num_parameter_blocks = parameter_block_sizes().size(); + const int num_parameters = std::accumulate(parameter_block_sizes().begin(), + parameter_block_sizes().end(), + 0); + + // Allocate scratch space for the strided evaluation. + vector<Jet<double, Stride> > input_jets(num_parameters); + vector<Jet<double, Stride> > output_jets(num_residuals()); + + // Make the parameter pack that is sent to the functor (reused). + vector<Jet<double, Stride>* > jet_parameters(num_parameter_blocks, NULL); + int num_active_parameters = 0; + int start_derivative_section = -1; + for (int i = 0, parameter_cursor = 0; i < num_parameter_blocks; ++i) { + jet_parameters[i] = &input_jets[parameter_cursor]; + + const int parameter_block_size = parameter_block_sizes()[i]; + if (jacobians[i] != NULL) { + start_derivative_section = + (start_derivative_section == -1) + ? parameter_cursor + : start_derivative_section; + num_active_parameters += parameter_block_size; + } + + for (int j = 0; j < parameter_block_size; ++j, parameter_cursor++) { + input_jets[parameter_cursor].a = parameters[i][j]; + } + } + + // Evaluate all of the strides. Each stride is a chunk of the derivative to + // evaluate, typically some size proportional to the size of the SIMD + // registers of the CPU. + int num_strides = static_cast<int>(ceil(num_active_parameters / + static_cast<float>(Stride))); + + for (int pass = 0; pass < num_strides; ++pass) { + // Set most of the jet components to zero, except for + // non-constant #Stride parameters. + int active_parameter_count = 0; + int end_derivative_section = start_derivative_section; + for (int i = 0, parameter_cursor = 0; i < num_parameter_blocks; ++i) { + for (int j = 0; j < parameter_block_sizes()[i]; + ++j, parameter_cursor++) { + input_jets[parameter_cursor].v.setZero(); + if (parameter_cursor >= start_derivative_section && + active_parameter_count < Stride) { + if (jacobians[i] != NULL) { + input_jets[parameter_cursor] + .v[parameter_cursor - start_derivative_section] = 1.0; + ++active_parameter_count; + } + ++end_derivative_section; + } + } + } + + if (!(*functor_)(&jet_parameters[0], &output_jets[0])) { + return false; + } + + // Copy the pieces of the jacobians into their final place. + active_parameter_count = 0; + for (int i = 0, parameter_cursor = 0; i < num_parameter_blocks; ++i) { + for (int j = 0; j < parameter_block_sizes()[i]; + ++j, parameter_cursor++) { + if (parameter_cursor >= start_derivative_section && + active_parameter_count < Stride) { + if (jacobians[i] != NULL) { + for (int k = 0; k < num_residuals(); ++k) { + jacobians[i][k * parameter_block_sizes()[i] + j] = + output_jets[k].v[parameter_cursor - + start_derivative_section]; + } + ++active_parameter_count; + } + } + } + } + + // Only copy the residuals over once (even though we compute them on + // every loop). + if (pass == num_strides - 1) { + for (int k = 0; k < num_residuals(); ++k) { + residuals[k] = output_jets[k].a; + } + } + + start_derivative_section = end_derivative_section; + } + return true; + } + + private: + internal::scoped_ptr<CostFunctor> functor_; +}; + +} // namespace ceres + +#endif // CERES_PUBLIC_DYNAMIC_AUTODIFF_COST_FUNCTION_H_ diff --git a/extern/libmv/third_party/ceres/include/ceres/fpclassify.h b/extern/libmv/third_party/ceres/include/ceres/fpclassify.h index 5a9ea1599d2..b730832fd4b 100644 --- a/extern/libmv/third_party/ceres/include/ceres/fpclassify.h +++ b/extern/libmv/third_party/ceres/include/ceres/fpclassify.h @@ -41,6 +41,8 @@ #include <float.h> #endif +#include <limits> + namespace ceres { #if defined(_MSC_VER) diff --git a/extern/libmv/third_party/ceres/include/ceres/gradient_checker.h b/extern/libmv/third_party/ceres/include/ceres/gradient_checker.h new file mode 100644 index 00000000000..3ec8056a02d --- /dev/null +++ b/extern/libmv/third_party/ceres/include/ceres/gradient_checker.h @@ -0,0 +1,222 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2010, 2011, 2012 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// Copyright 2007 Google Inc. All Rights Reserved. +// +// Author: wjr@google.com (William Rucklidge) +// +// This file contains a class that exercises a cost function, to make sure +// that it is computing reasonable derivatives. It compares the Jacobians +// computed by the cost function with those obtained by finite +// differences. + +#ifndef CERES_PUBLIC_GRADIENT_CHECKER_H_ +#define CERES_PUBLIC_GRADIENT_CHECKER_H_ + +#include <cstddef> +#include <algorithm> +#include <vector> + +#include "ceres/internal/eigen.h" +#include "ceres/internal/fixed_array.h" +#include "ceres/internal/macros.h" +#include "ceres/internal/scoped_ptr.h" +#include "ceres/numeric_diff_cost_function.h" +#include "glog/logging.h" + +namespace ceres { + +// An object that exercises a cost function, to compare the answers that it +// gives with derivatives estimated using finite differencing. +// +// The only likely usage of this is for testing. +// +// How to use: Fill in an array of pointers to parameter blocks for your +// CostFunction, and then call Probe(). Check that the return value is +// 'true'. See prober_test.cc for an example. +// +// This is templated similarly to NumericDiffCostFunction, as it internally +// uses that. +template <typename CostFunctionToProbe, + int M = 0, int N0 = 0, int N1 = 0, int N2 = 0, int N3 = 0, int N4 = 0> +class GradientChecker { + public: + // Here we stash some results from the probe, for later + // inspection. + struct GradientCheckResults { + // Computed cost. + Vector cost; + + // The sizes of these matrices are dictated by the cost function's + // parameter and residual block sizes. Each vector's length will + // term->parameter_block_sizes().size(), and each matrix is the + // Jacobian of the residual with respect to the corresponding parameter + // block. + + // Derivatives as computed by the cost function. + vector<Matrix> term_jacobians; + + // Derivatives as computed by finite differencing. + vector<Matrix> finite_difference_jacobians; + + // Infinity-norm of term_jacobians - finite_difference_jacobians. + double error_jacobians; + }; + + // Checks the Jacobian computed by a cost function. + // + // probe_point: The parameter values at which to probe. + // error_tolerance: A threshold for the infinity-norm difference + // between the Jacobians. If the Jacobians differ by more than + // this amount, then the probe fails. + // + // term: The cost function to test. Not retained after this call returns. + // + // results: On return, the two Jacobians (and other information) + // will be stored here. May be NULL. + // + // Returns true if no problems are detected and the difference between the + // Jacobians is less than error_tolerance. + static bool Probe(double const* const* probe_point, + double error_tolerance, + CostFunctionToProbe *term, + GradientCheckResults* results) { + CHECK_NOTNULL(probe_point); + CHECK_NOTNULL(term); + LOG(INFO) << "-------------------- Starting Probe() --------------------"; + + // We need a GradientCheckeresults, whether or not they supplied one. + internal::scoped_ptr<GradientCheckResults> owned_results; + if (results == NULL) { + owned_results.reset(new GradientCheckResults); + results = owned_results.get(); + } + + // Do a consistency check between the term and the template parameters. + CHECK_EQ(M, term->num_residuals()); + const int num_residuals = M; + const vector<int16>& block_sizes = term->parameter_block_sizes(); + const int num_blocks = block_sizes.size(); + + CHECK_LE(num_blocks, 5) << "Unable to test functions that take more " + << "than 5 parameter blocks"; + if (N0) { + CHECK_EQ(N0, block_sizes[0]); + CHECK_GE(num_blocks, 1); + } else { + CHECK_LT(num_blocks, 1); + } + if (N1) { + CHECK_EQ(N1, block_sizes[1]); + CHECK_GE(num_blocks, 2); + } else { + CHECK_LT(num_blocks, 2); + } + if (N2) { + CHECK_EQ(N2, block_sizes[2]); + CHECK_GE(num_blocks, 3); + } else { + CHECK_LT(num_blocks, 3); + } + if (N3) { + CHECK_EQ(N3, block_sizes[3]); + CHECK_GE(num_blocks, 4); + } else { + CHECK_LT(num_blocks, 4); + } + if (N4) { + CHECK_EQ(N4, block_sizes[4]); + CHECK_GE(num_blocks, 5); + } else { + CHECK_LT(num_blocks, 5); + } + + results->term_jacobians.clear(); + results->term_jacobians.resize(num_blocks); + results->finite_difference_jacobians.clear(); + results->finite_difference_jacobians.resize(num_blocks); + + internal::FixedArray<double*> term_jacobian_pointers(num_blocks); + internal::FixedArray<double*> + finite_difference_jacobian_pointers(num_blocks); + for (int i = 0; i < num_blocks; i++) { + results->term_jacobians[i].resize(num_residuals, block_sizes[i]); + term_jacobian_pointers[i] = results->term_jacobians[i].data(); + results->finite_difference_jacobians[i].resize( + num_residuals, block_sizes[i]); + finite_difference_jacobian_pointers[i] = + results->finite_difference_jacobians[i].data(); + } + results->cost.resize(num_residuals, 1); + + CHECK(term->Evaluate(probe_point, results->cost.data(), + term_jacobian_pointers.get())); + NumericDiffCostFunction<CostFunctionToProbe, CENTRAL, M, N0, N1, N2, N3, N4> + numeric_term(term, DO_NOT_TAKE_OWNERSHIP); + CHECK(numeric_term.Evaluate(probe_point, results->cost.data(), + finite_difference_jacobian_pointers.get())); + + results->error_jacobians = 0; + for (int i = 0; i < num_blocks; i++) { + Matrix jacobian_difference = results->term_jacobians[i] - + results->finite_difference_jacobians[i]; + results->error_jacobians = + std::max(results->error_jacobians, + jacobian_difference.lpNorm<Eigen::Infinity>()); + } + + LOG(INFO) << "========== term-computed derivatives =========="; + for (int i = 0; i < num_blocks; i++) { + LOG(INFO) << "term_computed block " << i; + LOG(INFO) << "\n" << results->term_jacobians[i]; + } + + LOG(INFO) << "========== finite-difference derivatives =========="; + for (int i = 0; i < num_blocks; i++) { + LOG(INFO) << "finite_difference block " << i; + LOG(INFO) << "\n" << results->finite_difference_jacobians[i]; + } + + LOG(INFO) << "========== difference =========="; + for (int i = 0; i < num_blocks; i++) { + LOG(INFO) << "difference block " << i; + LOG(INFO) << (results->term_jacobians[i] - + results->finite_difference_jacobians[i]); + } + + LOG(INFO) << "||difference|| = " << results->error_jacobians; + + return results->error_jacobians < error_tolerance; + } + + private: + CERES_DISALLOW_IMPLICIT_CONSTRUCTORS(GradientChecker); +}; + +} // namespace ceres + +#endif // CERES_PUBLIC_GRADIENT_CHECKER_H_ diff --git a/extern/libmv/third_party/ceres/include/ceres/internal/autodiff.h b/extern/libmv/third_party/ceres/include/ceres/internal/autodiff.h index 4f5081f8f66..011f2a05b65 100644 --- a/extern/libmv/third_party/ceres/include/ceres/internal/autodiff.h +++ b/extern/libmv/third_party/ceres/include/ceres/internal/autodiff.h @@ -146,6 +146,7 @@ #include "ceres/jet.h" #include "ceres/internal/eigen.h" #include "ceres/internal/fixed_array.h" +#include "ceres/internal/variadic_evaluate.h" namespace ceres { namespace internal { @@ -191,134 +192,71 @@ inline void Take1stOrderPart(const int M, const JetT *src, T *dst) { DCHECK(src); DCHECK(dst); for (int i = 0; i < M; ++i) { - Eigen::Map<Eigen::Matrix<T, N, 1> >(dst + N * i, N) = src[i].v.template segment<N>(N0); + Eigen::Map<Eigen::Matrix<T, N, 1> >(dst + N * i, N) = + src[i].v.template segment<N>(N0); } } -// This block of quasi-repeated code calls the user-supplied functor, which may -// take a variable number of arguments. This is accomplished by specializing the -// struct based on the size of the trailing parameters; parameters with 0 size -// are assumed missing. -// -// Supporting variadic functions is the primary source of complexity in the -// autodiff implementation. - -template<typename Functor, typename T, - int N0, int N1, int N2, int N3, int N4, int N5> -struct VariadicEvaluate { - static bool Call(const Functor& functor, T const *const *input, T* output) { - return functor(input[0], - input[1], - input[2], - input[3], - input[4], - input[5], - output); - } -}; - -template<typename Functor, typename T, - int N0, int N1, int N2, int N3, int N4> -struct VariadicEvaluate<Functor, T, N0, N1, N2, N3, N4, 0> { - static bool Call(const Functor& functor, T const *const *input, T* output) { - return functor(input[0], - input[1], - input[2], - input[3], - input[4], - output); - } -}; - -template<typename Functor, typename T, - int N0, int N1, int N2, int N3> -struct VariadicEvaluate<Functor, T, N0, N1, N2, N3, 0, 0> { - static bool Call(const Functor& functor, T const *const *input, T* output) { - return functor(input[0], - input[1], - input[2], - input[3], - output); - } -}; - -template<typename Functor, typename T, - int N0, int N1, int N2> -struct VariadicEvaluate<Functor, T, N0, N1, N2, 0, 0, 0> { - static bool Call(const Functor& functor, T const *const *input, T* output) { - return functor(input[0], - input[1], - input[2], - output); - } -}; - -template<typename Functor, typename T, - int N0, int N1> -struct VariadicEvaluate<Functor, T, N0, N1, 0, 0, 0, 0> { - static bool Call(const Functor& functor, T const *const *input, T* output) { - return functor(input[0], - input[1], - output); - } -}; - -template<typename Functor, typename T, int N0> -struct VariadicEvaluate<Functor, T, N0, 0, 0, 0, 0, 0> { - static bool Call(const Functor& functor, T const *const *input, T* output) { - return functor(input[0], - output); - } -}; - -// This is in a struct because default template parameters on a function are not -// supported in C++03 (though it is available in C++0x). N0 through N5 are the -// dimension of the input arguments to the user supplied functor. +// This is in a struct because default template parameters on a +// function are not supported in C++03 (though it is available in +// C++0x). N0 through N5 are the dimension of the input arguments to +// the user supplied functor. template <typename Functor, typename T, - int N0 = 0, int N1 = 0, int N2 = 0, int N3 = 0, int N4 = 0, int N5=0> + int N0 = 0, int N1 = 0, int N2 = 0, int N3 = 0, int N4 = 0, + int N5 = 0, int N6 = 0, int N7 = 0, int N8 = 0, int N9 = 0> struct AutoDiff { static bool Differentiate(const Functor& functor, T const *const *parameters, int num_outputs, T *function_value, T **jacobians) { - typedef Jet<T, N0 + N1 + N2 + N3 + N4 + N5> JetT; - - DCHECK_GT(N0, 0) - << "Cost functions must have at least one parameter block."; - DCHECK((!N1 && !N2 && !N3 && !N4 && !N5) || - ((N1 > 0) && !N2 && !N3 && !N4 && !N5) || - ((N1 > 0) && (N2 > 0) && !N3 && !N4 && !N5) || - ((N1 > 0) && (N2 > 0) && (N3 > 0) && !N4 && !N5) || - ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && !N5) || - ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0))) + // This block breaks the 80 column rule to keep it somewhat readable. + DCHECK_GT(num_outputs, 0); + CHECK((!N1 && !N2 && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) || + ((N1 > 0) && !N2 && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) || + ((N1 > 0) && (N2 > 0) && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) || + ((N1 > 0) && (N2 > 0) && (N3 > 0) && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) || + ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && !N5 && !N6 && !N7 && !N8 && !N9) || + ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && !N6 && !N7 && !N8 && !N9) || + ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && !N7 && !N8 && !N9) || + ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && !N8 && !N9) || + ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && !N9) || + ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && (N9 > 0))) << "Zero block cannot precede a non-zero block. Block sizes are " << "(ignore trailing 0s): " << N0 << ", " << N1 << ", " << N2 << ", " - << N3 << ", " << N4 << ", " << N5; - - DCHECK_GT(num_outputs, 0); + << N3 << ", " << N4 << ", " << N5 << ", " << N6 << ", " << N7 << ", " + << N8 << ", " << N9; + typedef Jet<T, N0 + N1 + N2 + N3 + N4 + N5 + N6 + N7 + N8 + N9> JetT; FixedArray<JetT, (256 * 7) / sizeof(JetT)> x( - N0 + N1 + N2 + N3 + N4 + N5 + num_outputs); + N0 + N1 + N2 + N3 + N4 + N5 + N6 + N7 + N8 + N9 + num_outputs); - // It's ugly, but it works. - const int jet0 = 0; - const int jet1 = N0; - const int jet2 = N0 + N1; - const int jet3 = N0 + N1 + N2; - const int jet4 = N0 + N1 + N2 + N3; - const int jet5 = N0 + N1 + N2 + N3 + N4; - const int jet6 = N0 + N1 + N2 + N3 + N4 + N5; + // These are the positions of the respective jets in the fixed array x. + const int jet0 = 0; + const int jet1 = N0; + const int jet2 = N0 + N1; + const int jet3 = N0 + N1 + N2; + const int jet4 = N0 + N1 + N2 + N3; + const int jet5 = N0 + N1 + N2 + N3 + N4; + const int jet6 = N0 + N1 + N2 + N3 + N4 + N5; + const int jet7 = N0 + N1 + N2 + N3 + N4 + N5 + N6; + const int jet8 = N0 + N1 + N2 + N3 + N4 + N5 + N6 + N7; + const int jet9 = N0 + N1 + N2 + N3 + N4 + N5 + N6 + N7 + N8; - const JetT *unpacked_parameters[6] = { + const JetT *unpacked_parameters[10] = { x.get() + jet0, x.get() + jet1, x.get() + jet2, x.get() + jet3, x.get() + jet4, x.get() + jet5, + x.get() + jet6, + x.get() + jet7, + x.get() + jet8, + x.get() + jet9, }; - JetT *output = x.get() + jet6; + + JetT* output = x.get() + N0 + N1 + N2 + N3 + N4 + N5 + N6 + N7 + N8 + N9; #define CERES_MAKE_1ST_ORDER_PERTURBATION(i) \ if (N ## i) { \ @@ -333,10 +271,14 @@ struct AutoDiff { CERES_MAKE_1ST_ORDER_PERTURBATION(3); CERES_MAKE_1ST_ORDER_PERTURBATION(4); CERES_MAKE_1ST_ORDER_PERTURBATION(5); + CERES_MAKE_1ST_ORDER_PERTURBATION(6); + CERES_MAKE_1ST_ORDER_PERTURBATION(7); + CERES_MAKE_1ST_ORDER_PERTURBATION(8); + CERES_MAKE_1ST_ORDER_PERTURBATION(9); #undef CERES_MAKE_1ST_ORDER_PERTURBATION if (!VariadicEvaluate<Functor, JetT, - N0, N1, N2, N3, N4, N5>::Call( + N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>::Call( functor, unpacked_parameters, output)) { return false; } @@ -359,6 +301,10 @@ struct AutoDiff { CERES_TAKE_1ST_ORDER_PERTURBATION(3); CERES_TAKE_1ST_ORDER_PERTURBATION(4); CERES_TAKE_1ST_ORDER_PERTURBATION(5); + CERES_TAKE_1ST_ORDER_PERTURBATION(6); + CERES_TAKE_1ST_ORDER_PERTURBATION(7); + CERES_TAKE_1ST_ORDER_PERTURBATION(8); + CERES_TAKE_1ST_ORDER_PERTURBATION(9); #undef CERES_TAKE_1ST_ORDER_PERTURBATION return true; } diff --git a/extern/libmv/third_party/ceres/include/ceres/internal/fixed_array.h b/extern/libmv/third_party/ceres/include/ceres/internal/fixed_array.h index ce777d22dc7..fa4a339d757 100644 --- a/extern/libmv/third_party/ceres/include/ceres/internal/fixed_array.h +++ b/extern/libmv/third_party/ceres/include/ceres/internal/fixed_array.h @@ -168,11 +168,11 @@ inline FixedArray<T, S>::FixedArray(typename FixedArray<T, S>::size_type n) array_((n <= kInlineElements ? reinterpret_cast<InnerContainer*>(inline_space_) : new InnerContainer[n])) { - DCHECK_GE(n, 0); + DCHECK_GE(n, size_t(0)); // Construct only the elements actually used. if (array_ == reinterpret_cast<InnerContainer*>(inline_space_)) { - for (int i = 0; i != size_; ++i) { + for (size_t i = 0; i != size_; ++i) { inline_space_[i].Init(); } } @@ -183,7 +183,7 @@ inline FixedArray<T, S>::~FixedArray() { if (array_ != reinterpret_cast<InnerContainer*>(inline_space_)) { delete[] array_; } else { - for (int i = 0; i != size_; ++i) { + for (size_t i = 0; i != size_; ++i) { inline_space_[i].Destroy(); } } diff --git a/extern/libmv/third_party/ceres/include/ceres/internal/macros.h b/extern/libmv/third_party/ceres/include/ceres/internal/macros.h index 83ec31193e7..388cf30fe70 100644 --- a/extern/libmv/third_party/ceres/include/ceres/internal/macros.h +++ b/extern/libmv/third_party/ceres/include/ceres/internal/macros.h @@ -132,16 +132,16 @@ char (&ArraySizeHelper(const T (&array)[N]))[N]; // - wan 2005-11-16 // // Starting with Visual C++ 2005, WinNT.h includes ARRAYSIZE. However, -// the definition comes from the over-broad windows.h header that +// the definition comes from the over-broad windows.h header that // introduces a macro, ERROR, that conflicts with the logging framework // that Ceres uses. Instead, rename ARRAYSIZE to CERES_ARRAYSIZE. -#define CERES_ARRAYSIZE(a) \ - ((sizeof(a) / sizeof(*(a))) / \ +#define CERES_ARRAYSIZE(a) \ + ((sizeof(a) / sizeof(*(a))) / \ static_cast<size_t>(!(sizeof(a) % sizeof(*(a))))) -// Tell the compiler to warn about unused return values for functions declared -// with this macro. The macro should be used on function declarations -// following the argument list: +// Tell the compiler to warn about unused return values for functions +// declared with this macro. The macro should be used on function +// declarations following the argument list: // // Sprocket* AllocateSprocket() MUST_USE_RESULT; // diff --git a/extern/libmv/third_party/ceres/include/ceres/internal/manual_constructor.h b/extern/libmv/third_party/ceres/include/ceres/internal/manual_constructor.h index 174d35ee2bd..7ea723d2a83 100644 --- a/extern/libmv/third_party/ceres/include/ceres/internal/manual_constructor.h +++ b/extern/libmv/third_party/ceres/include/ceres/internal/manual_constructor.h @@ -110,56 +110,61 @@ class ManualConstructor { inline Type& operator*() { return *get(); } inline const Type& operator*() const { return *get(); } + // This is needed to get around the strict aliasing warning GCC generates. + inline void* space() { + return reinterpret_cast<void*>(space_); + } + // You can pass up to four constructor arguments as arguments of Init(). inline void Init() { - new(space_) Type; + new(space()) Type; } template <typename T1> inline void Init(const T1& p1) { - new(space_) Type(p1); + new(space()) Type(p1); } template <typename T1, typename T2> inline void Init(const T1& p1, const T2& p2) { - new(space_) Type(p1, p2); + new(space()) Type(p1, p2); } template <typename T1, typename T2, typename T3> inline void Init(const T1& p1, const T2& p2, const T3& p3) { - new(space_) Type(p1, p2, p3); + new(space()) Type(p1, p2, p3); } template <typename T1, typename T2, typename T3, typename T4> inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4) { - new(space_) Type(p1, p2, p3, p4); + new(space()) Type(p1, p2, p3, p4); } template <typename T1, typename T2, typename T3, typename T4, typename T5> inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4, const T5& p5) { - new(space_) Type(p1, p2, p3, p4, p5); + new(space()) Type(p1, p2, p3, p4, p5); } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6> inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4, const T5& p5, const T6& p6) { - new(space_) Type(p1, p2, p3, p4, p5, p6); + new(space()) Type(p1, p2, p3, p4, p5, p6); } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7> inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4, const T5& p5, const T6& p6, const T7& p7) { - new(space_) Type(p1, p2, p3, p4, p5, p6, p7); + new(space()) Type(p1, p2, p3, p4, p5, p6, p7); } template <typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7, typename T8> inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4, const T5& p5, const T6& p6, const T7& p7, const T8& p8) { - new(space_) Type(p1, p2, p3, p4, p5, p6, p7, p8); + new(space()) Type(p1, p2, p3, p4, p5, p6, p7, p8); } template <typename T1, typename T2, typename T3, typename T4, typename T5, @@ -167,7 +172,7 @@ class ManualConstructor { inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4, const T5& p5, const T6& p6, const T7& p7, const T8& p8, const T9& p9) { - new(space_) Type(p1, p2, p3, p4, p5, p6, p7, p8, p9); + new(space()) Type(p1, p2, p3, p4, p5, p6, p7, p8, p9); } template <typename T1, typename T2, typename T3, typename T4, typename T5, @@ -175,7 +180,7 @@ class ManualConstructor { inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4, const T5& p5, const T6& p6, const T7& p7, const T8& p8, const T9& p9, const T10& p10) { - new(space_) Type(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10); + new(space()) Type(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10); } template <typename T1, typename T2, typename T3, typename T4, typename T5, @@ -184,7 +189,7 @@ class ManualConstructor { inline void Init(const T1& p1, const T2& p2, const T3& p3, const T4& p4, const T5& p5, const T6& p6, const T7& p7, const T8& p8, const T9& p9, const T10& p10, const T11& p11) { - new(space_) Type(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11); + new(space()) Type(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11); } inline void Destroy() { diff --git a/extern/libmv/third_party/ceres/include/ceres/internal/numeric_diff.h b/extern/libmv/third_party/ceres/include/ceres/internal/numeric_diff.h new file mode 100644 index 00000000000..4058366c4a1 --- /dev/null +++ b/extern/libmv/third_party/ceres/include/ceres/internal/numeric_diff.h @@ -0,0 +1,199 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2013 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// mierle@gmail.com (Keir Mierle) +// +// Finite differencing routine used by NumericDiffCostFunction. + +#ifndef CERES_PUBLIC_INTERNAL_NUMERIC_DIFF_H_ +#define CERES_PUBLIC_INTERNAL_NUMERIC_DIFF_H_ + +#include <cstring> + +#include "Eigen/Dense" +#include "ceres/cost_function.h" +#include "ceres/internal/scoped_ptr.h" +#include "ceres/internal/variadic_evaluate.h" +#include "ceres/types.h" +#include "glog/logging.h" + + +namespace ceres { +namespace internal { + +// Helper templates that allow evaluation of a variadic functor or a +// CostFunction object. +template <typename CostFunctor, + int N0, int N1, int N2, int N3, int N4, + int N5, int N6, int N7, int N8, int N9 > +bool EvaluateImpl(const CostFunctor* functor, + double const* const* parameters, + double* residuals, + const void* /* NOT USED */) { + return VariadicEvaluate<CostFunctor, + double, + N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>::Call( + *functor, + parameters, + residuals); +} + +template <typename CostFunctor, + int N0, int N1, int N2, int N3, int N4, + int N5, int N6, int N7, int N8, int N9 > +bool EvaluateImpl(const CostFunctor* functor, + double const* const* parameters, + double* residuals, + const CostFunction* /* NOT USED */) { + return functor->Evaluate(parameters, residuals, NULL); +} + +// This is split from the main class because C++ doesn't allow partial template +// specializations for member functions. The alternative is to repeat the main +// class for differing numbers of parameters, which is also unfortunate. +template <typename CostFunctor, + NumericDiffMethod kMethod, + int kNumResiduals, + int N0, int N1, int N2, int N3, int N4, + int N5, int N6, int N7, int N8, int N9, + int kParameterBlock, + int kParameterBlockSize> +struct NumericDiff { + // Mutates parameters but must restore them before return. + static bool EvaluateJacobianForParameterBlock( + const CostFunctor* functor, + double const* residuals_at_eval_point, + const double relative_step_size, + double **parameters, + double *jacobian) { + using Eigen::Map; + using Eigen::Matrix; + using Eigen::RowMajor; + using Eigen::ColMajor; + + typedef Matrix<double, kNumResiduals, 1> ResidualVector; + typedef Matrix<double, kParameterBlockSize, 1> ParameterVector; + typedef Matrix<double, kNumResiduals, kParameterBlockSize, + (kParameterBlockSize == 1 && + kNumResiduals > 1) ? ColMajor : RowMajor> JacobianMatrix; + + + Map<JacobianMatrix> parameter_jacobian(jacobian, + kNumResiduals, + kParameterBlockSize); + + // Mutate 1 element at a time and then restore. + Map<ParameterVector> x_plus_delta(parameters[kParameterBlock], + kParameterBlockSize); + ParameterVector x(x_plus_delta); + ParameterVector step_size = x.array().abs() * relative_step_size; + + // To handle cases where a parameter is exactly zero, instead use + // the mean step_size for the other dimensions. If all the + // parameters are zero, there's no good answer. Take + // relative_step_size as a guess and hope for the best. + const double fallback_step_size = + (step_size.sum() == 0) + ? relative_step_size + : step_size.sum() / step_size.rows(); + + // For each parameter in the parameter block, use finite differences to + // compute the derivative for that parameter. + for (int j = 0; j < kParameterBlockSize; ++j) { + const double delta = + (step_size(j) == 0.0) ? fallback_step_size : step_size(j); + + x_plus_delta(j) = x(j) + delta; + + double residuals[kNumResiduals]; // NOLINT + + if (!EvaluateImpl<CostFunctor, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>( + functor, parameters, residuals, functor)) { + return false; + } + + // Compute this column of the jacobian in 3 steps: + // 1. Store residuals for the forward part. + // 2. Subtract residuals for the backward (or 0) part. + // 3. Divide out the run. + parameter_jacobian.col(j) = + Map<const ResidualVector>(residuals, kNumResiduals); + + double one_over_delta = 1.0 / delta; + if (kMethod == CENTRAL) { + // Compute the function on the other side of x(j). + x_plus_delta(j) = x(j) - delta; + + if (!EvaluateImpl<CostFunctor, N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>( + functor, parameters, residuals, functor)) { + return false; + } + + parameter_jacobian.col(j) -= + Map<ResidualVector>(residuals, kNumResiduals, 1); + one_over_delta /= 2; + } else { + // Forward difference only; reuse existing residuals evaluation. + parameter_jacobian.col(j) -= + Map<const ResidualVector>(residuals_at_eval_point, kNumResiduals); + } + x_plus_delta(j) = x(j); // Restore x_plus_delta. + + // Divide out the run to get slope. + parameter_jacobian.col(j) *= one_over_delta; + } + return true; + } +}; + +template <typename CostFunctor, + NumericDiffMethod kMethod, + int kNumResiduals, + int N0, int N1, int N2, int N3, int N4, + int N5, int N6, int N7, int N8, int N9, + int kParameterBlock> +struct NumericDiff<CostFunctor, kMethod, kNumResiduals, + N0, N1, N2, N3, N4, N5, N6, N7, N8, N9, + kParameterBlock, 0> { + // Mutates parameters but must restore them before return. + static bool EvaluateJacobianForParameterBlock( + const CostFunctor* functor, + double const* residuals_at_eval_point, + const double relative_step_size, + double **parameters, + double *jacobian) { + LOG(FATAL) << "Control should never reach here."; + return true; + } +}; + +} // namespace internal +} // namespace ceres + +#endif // CERES_PUBLIC_INTERNAL_NUMERIC_DIFF_H_ diff --git a/extern/libmv/third_party/ceres/include/ceres/internal/scoped_ptr.h b/extern/libmv/third_party/ceres/include/ceres/internal/scoped_ptr.h index 44f198b339d..5dfb551243c 100644 --- a/extern/libmv/third_party/ceres/include/ceres/internal/scoped_ptr.h +++ b/extern/libmv/third_party/ceres/include/ceres/internal/scoped_ptr.h @@ -38,6 +38,7 @@ #include <assert.h> #include <stdlib.h> #include <cstddef> +#include <algorithm> namespace ceres { namespace internal { @@ -49,18 +50,17 @@ template <class C> class scoped_array; template <class C> scoped_ptr<C> make_scoped_ptr(C *); -// A scoped_ptr<T> is like a T*, except that the destructor of scoped_ptr<T> -// automatically deletes the pointer it holds (if any). That is, scoped_ptr<T> -// owns the T object that it points to. Like a T*, a scoped_ptr<T> may hold -// either NULL or a pointer to a T object. Also like T*, scoped_ptr<T> is -// thread-compatible, and once you dereference it, you get the threadsafety -// guarantees of T. +// A scoped_ptr<T> is like a T*, except that the destructor of +// scoped_ptr<T> automatically deletes the pointer it holds (if +// any). That is, scoped_ptr<T> owns the T object that it points +// to. Like a T*, a scoped_ptr<T> may hold either NULL or a pointer to +// a T object. Also like T*, scoped_ptr<T> is thread-compatible, and +// once you dereference it, you get the threadsafety guarantees of T. // // The size of a scoped_ptr is small: sizeof(scoped_ptr<C>) == sizeof(C*) template <class C> class scoped_ptr { public: - // The element type typedef C element_type; @@ -193,7 +193,6 @@ scoped_ptr<C> make_scoped_ptr(C *p) { template <class C> class scoped_array { public: - // The element type typedef C element_type; diff --git a/extern/libmv/third_party/ceres/include/ceres/internal/variadic_evaluate.h b/extern/libmv/third_party/ceres/include/ceres/internal/variadic_evaluate.h new file mode 100644 index 00000000000..4b1e4bdc65a --- /dev/null +++ b/extern/libmv/third_party/ceres/include/ceres/internal/variadic_evaluate.h @@ -0,0 +1,182 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2013 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// mierle@gmail.com (Keir Mierle) + +#ifndef CERES_PUBLIC_INTERNAL_VARIADIC_EVALUATE_H_ +#define CERES_PUBLIC_INTERNAL_VARIADIC_EVALUATE_H_ + +#include <stddef.h> + +#include <glog/logging.h> +#include "ceres/jet.h" +#include "ceres/internal/eigen.h" +#include "ceres/internal/fixed_array.h" + +namespace ceres { +namespace internal { + +// This block of quasi-repeated code calls the user-supplied functor, which may +// take a variable number of arguments. This is accomplished by specializing the +// struct based on the size of the trailing parameters; parameters with 0 size +// are assumed missing. +template<typename Functor, typename T, int N0, int N1, int N2, int N3, int N4, + int N5, int N6, int N7, int N8, int N9> +struct VariadicEvaluate { + static bool Call(const Functor& functor, T const *const *input, T* output) { + return functor(input[0], + input[1], + input[2], + input[3], + input[4], + input[5], + input[6], + input[7], + input[8], + input[9], + output); + } +}; + +template<typename Functor, typename T, int N0, int N1, int N2, int N3, int N4, + int N5, int N6, int N7, int N8> +struct VariadicEvaluate<Functor, T, N0, N1, N2, N3, N4, N5, N6, N7, N8, 0> { + static bool Call(const Functor& functor, T const *const *input, T* output) { + return functor(input[0], + input[1], + input[2], + input[3], + input[4], + input[5], + input[6], + input[7], + input[8], + output); + } +}; + +template<typename Functor, typename T, int N0, int N1, int N2, int N3, int N4, + int N5, int N6, int N7> +struct VariadicEvaluate<Functor, T, N0, N1, N2, N3, N4, N5, N6, N7, 0, 0> { + static bool Call(const Functor& functor, T const *const *input, T* output) { + return functor(input[0], + input[1], + input[2], + input[3], + input[4], + input[5], + input[6], + input[7], + output); + } +}; + +template<typename Functor, typename T, int N0, int N1, int N2, int N3, int N4, + int N5, int N6> +struct VariadicEvaluate<Functor, T, N0, N1, N2, N3, N4, N5, N6, 0, 0, 0> { + static bool Call(const Functor& functor, T const *const *input, T* output) { + return functor(input[0], + input[1], + input[2], + input[3], + input[4], + input[5], + input[6], + output); + } +}; + +template<typename Functor, typename T, int N0, int N1, int N2, int N3, int N4, + int N5> +struct VariadicEvaluate<Functor, T, N0, N1, N2, N3, N4, N5, 0, 0, 0, 0> { + static bool Call(const Functor& functor, T const *const *input, T* output) { + return functor(input[0], + input[1], + input[2], + input[3], + input[4], + input[5], + output); + } +}; + +template<typename Functor, typename T, int N0, int N1, int N2, int N3, int N4> +struct VariadicEvaluate<Functor, T, N0, N1, N2, N3, N4, 0, 0, 0, 0, 0> { + static bool Call(const Functor& functor, T const *const *input, T* output) { + return functor(input[0], + input[1], + input[2], + input[3], + input[4], + output); + } +}; + +template<typename Functor, typename T, int N0, int N1, int N2, int N3> +struct VariadicEvaluate<Functor, T, N0, N1, N2, N3, 0, 0, 0, 0, 0, 0> { + static bool Call(const Functor& functor, T const *const *input, T* output) { + return functor(input[0], + input[1], + input[2], + input[3], + output); + } +}; + +template<typename Functor, typename T, int N0, int N1, int N2> +struct VariadicEvaluate<Functor, T, N0, N1, N2, 0, 0, 0, 0, 0, 0, 0> { + static bool Call(const Functor& functor, T const *const *input, T* output) { + return functor(input[0], + input[1], + input[2], + output); + } +}; + +template<typename Functor, typename T, int N0, int N1> +struct VariadicEvaluate<Functor, T, N0, N1, 0, 0, 0, 0, 0, 0, 0, 0> { + static bool Call(const Functor& functor, T const *const *input, T* output) { + return functor(input[0], + input[1], + output); + } +}; + +template<typename Functor, typename T, int N0> +struct VariadicEvaluate<Functor, T, N0, 0, 0, 0, 0, 0, 0, 0, 0, 0> { + static bool Call(const Functor& functor, T const *const *input, T* output) { + return functor(input[0], + output); + } +}; + +} // namespace internal +} // namespace ceres + +#endif // CERES_PUBLIC_INTERNAL_VARIADIC_EVALUATE_H_ diff --git a/extern/libmv/third_party/ceres/include/ceres/iteration_callback.h b/extern/libmv/third_party/ceres/include/ceres/iteration_callback.h index 29157d380f2..0dc4c96b441 100644 --- a/extern/libmv/third_party/ceres/include/ceres/iteration_callback.h +++ b/extern/libmv/third_party/ceres/include/ceres/iteration_callback.h @@ -42,6 +42,23 @@ namespace ceres { // This struct describes the state of the optimizer after each // iteration of the minimization. struct IterationSummary { + IterationSummary() + : iteration(0), + step_is_valid(false), + step_is_nonmonotonic(false), + step_is_successful(false), + cost(0.0), + cost_change(0.0), + gradient_max_norm(0.0), + step_norm(0.0), + eta(0.0), + step_size(0.0), + line_search_function_evaluations(0), + linear_solver_iterations(0), + iteration_time_in_seconds(0.0), + step_solver_time_in_seconds(0.0), + cumulative_time_in_seconds(0.0) {} + // Current iteration number. int32 iteration; @@ -51,7 +68,22 @@ struct IterationSummary { // Note: step_is_valid is false when iteration = 0. bool step_is_valid; - // Whether or not the algorithm made progress in this iteration. + // Step did not reduce the value of the objective function + // sufficiently, but it was accepted because of the relaxed + // acceptance criterion used by the non-monotonic trust region + // algorithm. + // + // Note: step_is_nonmonotonic is false when iteration = 0; + bool step_is_nonmonotonic; + + // Whether or not the minimizer accepted this step or not. If the + // ordinary trust region algorithm is used, this means that the + // relative reduction in the objective function value was greater + // than Solver::Options::min_relative_decrease. However, if the + // non-monotonic trust region algorithm is used + // (Solver::Options:use_nonmonotonic_steps = true), then even if the + // relative decrease is not sufficient, the algorithm may accept the + // step and the step is declared successful. // // Note: step_is_successful is false when iteration = 0. bool step_is_successful; @@ -60,8 +92,7 @@ struct IterationSummary { double cost; // Change in the value of the objective function in this - // iteration. This can be positive or negative. Negative change - // means that the step was not successful. + // iteration. This can be positive or negative. double cost_change; // Infinity norm of the gradient vector. @@ -87,6 +118,12 @@ struct IterationSummary { // ignore it. double eta; + // Step sized computed by the line search algorithm. + double step_size; + + // Number of function evaluations used by the line search algorithm. + int line_search_function_evaluations; + // Number of iterations taken by the linear solver to solve for the // Newton step. int linear_solver_iterations; diff --git a/extern/libmv/third_party/ceres/include/ceres/numeric_diff_cost_function.h b/extern/libmv/third_party/ceres/include/ceres/numeric_diff_cost_function.h index 8544e44d0bc..555bc3d073f 100644 --- a/extern/libmv/third_party/ceres/include/ceres/numeric_diff_cost_function.h +++ b/extern/libmv/third_party/ceres/include/ceres/numeric_diff_cost_function.h @@ -27,19 +27,109 @@ // POSSIBILITY OF SUCH DAMAGE. // // Author: keir@google.com (Keir Mierle) +// sameeragarwal@google.com (Sameer Agarwal) // // Create CostFunctions as needed by the least squares framework with jacobians // computed via numeric (a.k.a. finite) differentiation. For more details see // http://en.wikipedia.org/wiki/Numerical_differentiation. // -// To get a numerically differentiated cost function, define a subclass of -// CostFunction such that the Evaluate() function ignores the jacobian -// parameter. The numeric differentiation wrapper will fill in the jacobian -// parameter if nececssary by repeatedly calling the Evaluate() function with -// small changes to the appropriate parameters, and computing the slope. For -// performance, the numeric differentiation wrapper class is templated on the -// concrete cost function, even though it could be implemented only in terms of -// the virtual CostFunction interface. +// To get an numerically differentiated cost function, you must define +// a class with a operator() (a functor) that computes the residuals. +// +// The function must write the computed value in the last argument (the only +// non-const one) and return true to indicate success. +// +// For example, consider a scalar error e = k - x'y, where both x and y are +// two-dimensional column vector parameters, the prime sign indicates +// transposition, and k is a constant. The form of this error, which is the +// difference between a constant and an expression, is a common pattern in least +// squares problems. For example, the value x'y might be the model expectation +// for a series of measurements, where there is an instance of the cost function +// for each measurement k. +// +// The actual cost added to the total problem is e^2, or (k - x'k)^2; however, +// the squaring is implicitly done by the optimization framework. +// +// To write an numerically-differentiable cost function for the above model, first +// define the object +// +// class MyScalarCostFunctor { +// MyScalarCostFunctor(double k): k_(k) {} +// +// bool operator()(const double* const x, +// const double* const y, +// double* residuals) const { +// residuals[0] = k_ - x[0] * y[0] + x[1] * y[1]; +// return true; +// } +// +// private: +// double k_; +// }; +// +// Note that in the declaration of operator() the input parameters x +// and y come first, and are passed as const pointers to arrays of +// doubles. If there were three input parameters, then the third input +// parameter would come after y. The output is always the last +// parameter, and is also a pointer to an array. In the example above, +// the residual is a scalar, so only residuals[0] is set. +// +// Then given this class definition, the numerically differentiated +// cost function with central differences used for computing the +// derivative can be constructed as follows. +// +// CostFunction* cost_function +// = new NumericDiffCostFunction<MyScalarCostFunctor, CENTRAL, 1, 2, 2>( +// new MyScalarCostFunctor(1.0)); ^ ^ ^ +// | | | | +// Finite Differencing Scheme -+ | | | +// Dimension of residual ----------+ | | +// Dimension of x --------------------+ | +// Dimension of y -----------------------+ +// +// In this example, there is usually an instance for each measumerent of k. +// +// In the instantiation above, the template parameters following +// "MyScalarCostFunctor", "1, 2, 2", describe the functor as computing +// a 1-dimensional output from two arguments, both 2-dimensional. +// +// The framework can currently accommodate cost functions of up to 10 +// independent variables, and there is no limit on the dimensionality +// of each of them. +// +// The central difference method is considerably more accurate at the cost of +// twice as many function evaluations than forward difference. Consider using +// central differences begin with, and only after that works, trying forward +// difference to improve performance. +// +// TODO(sameeragarwal): Add support for dynamic number of residuals. +// +// WARNING #1: A common beginner's error when first using +// NumericDiffCostFunction is to get the sizing wrong. In particular, +// there is a tendency to set the template parameters to (dimension of +// residual, number of parameters) instead of passing a dimension +// parameter for *every parameter*. In the example above, that would +// be <MyScalarCostFunctor, 1, 2>, which is missing the last '2' +// argument. Please be careful when setting the size parameters. +// +//////////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////////// +// +// ALTERNATE INTERFACE +// +// For a variety of reason, including compatibility with legacy code, +// NumericDiffCostFunction can also take CostFunction objects as +// input. The following describes how. +// +// To get a numerically differentiated cost function, define a +// subclass of CostFunction such that the Evaluate() function ignores +// the jacobian parameter. The numeric differentiation wrapper will +// fill in the jacobian parameter if nececssary by repeatedly calling +// the Evaluate() function with small changes to the appropriate +// parameters, and computing the slope. For performance, the numeric +// differentiation wrapper class is templated on the concrete cost +// function, even though it could be implemented only in terms of the +// virtual CostFunction interface. // // The numerically differentiated version of a cost function for a cost function // can be constructed as follows: @@ -51,233 +141,153 @@ // where MyCostFunction has 1 residual and 2 parameter blocks with sizes 4 and 8 // respectively. Look at the tests for a more detailed example. // -// The central difference method is considerably more accurate at the cost of -// twice as many function evaluations than forward difference. Consider using -// central differences begin with, and only after that works, trying forward -// difference to improve performance. -// // TODO(keir): Characterize accuracy; mention pitfalls; provide alternatives. #ifndef CERES_PUBLIC_NUMERIC_DIFF_COST_FUNCTION_H_ #define CERES_PUBLIC_NUMERIC_DIFF_COST_FUNCTION_H_ -#include <cstring> #include <glog/logging.h> #include "Eigen/Dense" +#include "ceres/cost_function.h" +#include "ceres/internal/numeric_diff.h" #include "ceres/internal/scoped_ptr.h" #include "ceres/sized_cost_function.h" #include "ceres/types.h" namespace ceres { -enum NumericDiffMethod { - CENTRAL, - FORWARD -}; - -// This is split from the main class because C++ doesn't allow partial template -// specializations for member functions. The alternative is to repeat the main -// class for differing numbers of parameters, which is also unfortunate. -template <typename CostFunctionNoJacobian, - int num_residuals, - int parameter_block_size, - int parameter_block, - NumericDiffMethod method> -struct Differencer { - // Mutates parameters but must restore them before return. - static bool EvaluateJacobianForParameterBlock( - const CostFunctionNoJacobian *function, - double const* residuals_at_eval_point, - double **parameters, - double **jacobians) { - using Eigen::Map; - using Eigen::Matrix; - using Eigen::RowMajor; - using Eigen::ColMajor; - - typedef Matrix<double, num_residuals, 1> ResidualVector; - typedef Matrix<double, parameter_block_size, 1> ParameterVector; - typedef Matrix<double, num_residuals, parameter_block_size, - (parameter_block_size == 1 && - num_residuals > 1) ? ColMajor : RowMajor> JacobianMatrix; - - Map<JacobianMatrix> parameter_jacobian(jacobians[parameter_block], - num_residuals, - parameter_block_size); - - // Mutate 1 element at a time and then restore. - Map<ParameterVector> x_plus_delta(parameters[parameter_block], - parameter_block_size); - ParameterVector x(x_plus_delta); - - // TODO(keir): Pick a smarter number! In theory a good choice is sqrt(eps) * - // x, which for doubles means about 1e-8 * x. However, I have found this - // number too optimistic. This number should be exposed for users to change. - const double kRelativeStepSize = 1e-6; - - ParameterVector step_size = x.array().abs() * kRelativeStepSize; - - // To handle cases where a parameter is exactly zero, instead use the mean - // step_size for the other dimensions. - double fallback_step_size = step_size.sum() / step_size.rows(); - if (fallback_step_size == 0.0) { - // If all the parameters are zero, there's no good answer. Take - // kRelativeStepSize as a guess and hope for the best. - fallback_step_size = kRelativeStepSize; - } - - // For each parameter in the parameter block, use finite differences to - // compute the derivative for that parameter. - for (int j = 0; j < parameter_block_size; ++j) { - if (step_size(j) == 0.0) { - // The parameter is exactly zero, so compromise and use the mean - // step_size from the other parameters. This can break in many cases, - // but it's hard to pick a good number without problem specific - // knowledge. - step_size(j) = fallback_step_size; - } - x_plus_delta(j) = x(j) + step_size(j); - - double residuals[num_residuals]; // NOLINT - if (!function->Evaluate(parameters, residuals, NULL)) { - // Something went wrong; bail. - return false; - } - - // Compute this column of the jacobian in 3 steps: - // 1. Store residuals for the forward part. - // 2. Subtract residuals for the backward (or 0) part. - // 3. Divide out the run. - parameter_jacobian.col(j) = - Map<const ResidualVector>(residuals, num_residuals); - - double one_over_h = 1 / step_size(j); - if (method == CENTRAL) { - // Compute the function on the other side of x(j). - x_plus_delta(j) = x(j) - step_size(j); - - if (!function->Evaluate(parameters, residuals, NULL)) { - // Something went wrong; bail. - return false; - } - parameter_jacobian.col(j) -= - Map<ResidualVector>(residuals, num_residuals, 1); - one_over_h /= 2; - } else { - // Forward difference only; reuse existing residuals evaluation. - parameter_jacobian.col(j) -= - Map<const ResidualVector>(residuals_at_eval_point, num_residuals); - } - x_plus_delta(j) = x(j); // Restore x_plus_delta. - - // Divide out the run to get slope. - parameter_jacobian.col(j) *= one_over_h; - } - return true; - } -}; - -// Prevent invalid instantiations. -template <typename CostFunctionNoJacobian, - int num_residuals, - int parameter_block, - NumericDiffMethod method> -struct Differencer<CostFunctionNoJacobian, - num_residuals, - 0 /* parameter_block_size */, - parameter_block, - method> { - static bool EvaluateJacobianForParameterBlock( - const CostFunctionNoJacobian *function, - double const* residuals_at_eval_point, - double **parameters, - double **jacobians) { - LOG(FATAL) << "Shouldn't get here."; - return true; - } -}; - -template <typename CostFunctionNoJacobian, - NumericDiffMethod method = CENTRAL, int M = 0, - int N0 = 0, int N1 = 0, int N2 = 0, int N3 = 0, int N4 = 0, int N5 = 0> +template <typename CostFunctor, + NumericDiffMethod method = CENTRAL, + int kNumResiduals = 0, // Number of residuals, or ceres::DYNAMIC + int N0 = 0, // Number of parameters in block 0. + int N1 = 0, // Number of parameters in block 1. + int N2 = 0, // Number of parameters in block 2. + int N3 = 0, // Number of parameters in block 3. + int N4 = 0, // Number of parameters in block 4. + int N5 = 0, // Number of parameters in block 5. + int N6 = 0, // Number of parameters in block 6. + int N7 = 0, // Number of parameters in block 7. + int N8 = 0, // Number of parameters in block 8. + int N9 = 0> // Number of parameters in block 9. class NumericDiffCostFunction - : public SizedCostFunction<M, N0, N1, N2, N3, N4, N5> { + : public SizedCostFunction<kNumResiduals, + N0, N1, N2, N3, N4, + N5, N6, N7, N8, N9> { public: - NumericDiffCostFunction(CostFunctionNoJacobian* function, - Ownership ownership) - : function_(function), ownership_(ownership) {} + NumericDiffCostFunction(CostFunctor* functor, + const double relative_step_size = 1e-6) + :functor_(functor), + ownership_(TAKE_OWNERSHIP), + relative_step_size_(relative_step_size) {} - virtual ~NumericDiffCostFunction() { + NumericDiffCostFunction(CostFunctor* functor, + Ownership ownership, + const double relative_step_size = 1e-6) + : functor_(functor), + ownership_(ownership), + relative_step_size_(relative_step_size) {} + + ~NumericDiffCostFunction() { if (ownership_ != TAKE_OWNERSHIP) { - function_.release(); + functor_.release(); } } virtual bool Evaluate(double const* const* parameters, double* residuals, double** jacobians) const { + using internal::FixedArray; + using internal::NumericDiff; + + const int kNumParameters = N0 + N1 + N2 + N3 + N4 + N5 + N6 + N7 + N8 + N9; + const int kNumParameterBlocks = + (N0 > 0) + (N1 > 0) + (N2 > 0) + (N3 > 0) + (N4 > 0) + + (N5 > 0) + (N6 > 0) + (N7 > 0) + (N8 > 0) + (N9 > 0); + // Get the function value (residuals) at the the point to evaluate. - bool success = function_->Evaluate(parameters, residuals, NULL); - if (!success) { - // Something went wrong; ignore the jacobian. + if (!internal::EvaluateImpl<CostFunctor, + N0, N1, N2, N3, N4, N5, N6, N7, N8, N9>( + functor_.get(), + parameters, + residuals, + functor_.get())) { return false; } + if (!jacobians) { - // Nothing to do; just forward. return true; } // Create a copy of the parameters which will get mutated. - const int kParametersSize = N0 + N1 + N2 + N3 + N4 + N5; - double parameters_copy[kParametersSize]; - double *parameters_references_copy[6]; - parameters_references_copy[0] = ¶meters_copy[0]; - parameters_references_copy[1] = ¶meters_copy[0] + N0; - parameters_references_copy[2] = ¶meters_copy[0] + N0 + N1; - parameters_references_copy[3] = ¶meters_copy[0] + N0 + N1 + N2; - parameters_references_copy[4] = ¶meters_copy[0] + N0 + N1 + N2 + N3; - parameters_references_copy[5] = - ¶meters_copy[0] + N0 + N1 + N2 + N3 + N4; + FixedArray<double> parameters_copy(kNumParameters); + FixedArray<double*> parameters_reference_copy(kNumParameterBlocks); + + parameters_reference_copy[0] = parameters_copy.get(); + if (N1) parameters_reference_copy[1] = parameters_reference_copy[0] + N0; + if (N2) parameters_reference_copy[2] = parameters_reference_copy[1] + N1; + if (N3) parameters_reference_copy[3] = parameters_reference_copy[2] + N2; + if (N4) parameters_reference_copy[4] = parameters_reference_copy[3] + N3; + if (N5) parameters_reference_copy[5] = parameters_reference_copy[4] + N4; + if (N6) parameters_reference_copy[6] = parameters_reference_copy[5] + N5; + if (N7) parameters_reference_copy[7] = parameters_reference_copy[6] + N6; + if (N7) parameters_reference_copy[8] = parameters_reference_copy[7] + N7; + if (N8) parameters_reference_copy[9] = parameters_reference_copy[8] + N8; + +#define COPY_PARAMETER_BLOCK(block) \ + if (N ## block) memcpy(parameters_reference_copy[block], \ + parameters[block], \ + sizeof(double) * N ## block); // NOLINT -#define COPY_PARAMETER_BLOCK(block) \ - if (N ## block) memcpy(parameters_references_copy[block], \ - parameters[block], \ - sizeof(double) * N ## block); // NOLINT COPY_PARAMETER_BLOCK(0); COPY_PARAMETER_BLOCK(1); COPY_PARAMETER_BLOCK(2); COPY_PARAMETER_BLOCK(3); COPY_PARAMETER_BLOCK(4); COPY_PARAMETER_BLOCK(5); + COPY_PARAMETER_BLOCK(6); + COPY_PARAMETER_BLOCK(7); + COPY_PARAMETER_BLOCK(8); + COPY_PARAMETER_BLOCK(9); + #undef COPY_PARAMETER_BLOCK -#define EVALUATE_JACOBIAN_FOR_BLOCK(block) \ - if (N ## block && jacobians[block]) { \ - if (!Differencer<CostFunctionNoJacobian, /* NOLINT */ \ - M, \ - N ## block, \ - block, \ - method>::EvaluateJacobianForParameterBlock( \ - function_.get(), \ - residuals, \ - parameters_references_copy, \ - jacobians)) { \ - return false; \ - } \ +#define EVALUATE_JACOBIAN_FOR_BLOCK(block) \ + if (N ## block && jacobians[block] != NULL) { \ + if (!NumericDiff<CostFunctor, \ + method, \ + kNumResiduals, \ + N0, N1, N2, N3, N4, N5, N6, N7, N8, N9, \ + block, \ + N ## block >::EvaluateJacobianForParameterBlock( \ + functor_.get(), \ + residuals, \ + relative_step_size_, \ + parameters_reference_copy.get(), \ + jacobians[block])) { \ + return false; \ + } \ } + EVALUATE_JACOBIAN_FOR_BLOCK(0); EVALUATE_JACOBIAN_FOR_BLOCK(1); EVALUATE_JACOBIAN_FOR_BLOCK(2); EVALUATE_JACOBIAN_FOR_BLOCK(3); EVALUATE_JACOBIAN_FOR_BLOCK(4); EVALUATE_JACOBIAN_FOR_BLOCK(5); + EVALUATE_JACOBIAN_FOR_BLOCK(6); + EVALUATE_JACOBIAN_FOR_BLOCK(7); + EVALUATE_JACOBIAN_FOR_BLOCK(8); + EVALUATE_JACOBIAN_FOR_BLOCK(9); + #undef EVALUATE_JACOBIAN_FOR_BLOCK + return true; } private: - internal::scoped_ptr<CostFunctionNoJacobian> function_; + internal::scoped_ptr<CostFunctor> functor_; Ownership ownership_; + const double relative_step_size_; }; } // namespace ceres diff --git a/extern/libmv/third_party/ceres/include/ceres/numeric_diff_functor.h b/extern/libmv/third_party/ceres/include/ceres/numeric_diff_functor.h new file mode 100644 index 00000000000..593c3718bf5 --- /dev/null +++ b/extern/libmv/third_party/ceres/include/ceres/numeric_diff_functor.h @@ -0,0 +1,346 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2013 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// +// A wrapper class that takes a variadic functor evaluating a +// function, numerically differentiates it and makes it available as a +// templated functor so that it can be easily used as part of Ceres' +// automatic differentiation framework. +// +// For example: +// +// For example, let us assume that +// +// struct IntrinsicProjection +// IntrinsicProjection(const double* observations); +// bool operator()(const double* calibration, +// const double* point, +// double* residuals); +// }; +// +// is a functor that implements the projection of a point in its local +// coordinate system onto its image plane and subtracts it from the +// observed point projection. +// +// Now we would like to compose the action of this functor with the +// action of camera extrinsics, i.e., rotation and translation, which +// is given by the following templated function +// +// template<typename T> +// void RotateAndTranslatePoint(const T* rotation, +// const T* translation, +// const T* point, +// T* result); +// +// To compose the extrinsics and intrinsics, we can construct a +// CameraProjection functor as follows. +// +// struct CameraProjection { +// typedef NumericDiffFunctor<IntrinsicProjection, CENTRAL, 2, 5, 3> +// IntrinsicProjectionFunctor; +// +// CameraProjection(double* observation) { +// intrinsic_projection_.reset( +// new IntrinsicProjectionFunctor(observation)) { +// } +// +// template <typename T> +// bool operator(const T* rotation, +// const T* translation, +// const T* intrinsics, +// const T* point, +// T* residuals) const { +// T transformed_point[3]; +// RotateAndTranslatePoint(rotation, translation, point, transformed_point); +// return (*intrinsic_projection_)(intrinsics, transformed_point, residual); +// } +// +// private: +// scoped_ptr<IntrinsicProjectionFunctor> intrinsic_projection_; +// }; +// +// Here, we made the choice of using CENTRAL differences to compute +// the jacobian of IntrinsicProjection. +// +// Now, we are ready to construct an automatically differentiated cost +// function as +// +// CostFunction* cost_function = +// new AutoDiffCostFunction<CameraProjection, 2, 3, 3, 5>( +// new CameraProjection(observations)); +// +// cost_function now seamlessly integrates automatic differentiation +// of RotateAndTranslatePoint with a numerically differentiated +// version of IntrinsicProjection. + +#ifndef CERES_PUBLIC_NUMERIC_DIFF_FUNCTOR_H_ +#define CERES_PUBLIC_NUMERIC_DIFF_FUNCTOR_H_ + +#include "ceres/numeric_diff_cost_function.h" +#include "ceres/types.h" +#include "ceres/cost_function_to_functor.h" + +namespace ceres { + +template<typename Functor, + NumericDiffMethod kMethod = CENTRAL, + int kNumResiduals = 0, + int N0 = 0, int N1 = 0 , int N2 = 0, int N3 = 0, int N4 = 0, + int N5 = 0, int N6 = 0 , int N7 = 0, int N8 = 0, int N9 = 0> +class NumericDiffFunctor { + public: + // relative_step_size controls the step size used by the numeric + // differentiation process. + explicit NumericDiffFunctor(double relative_step_size = 1e-6) + : functor_( + new NumericDiffCostFunction<Functor, + kMethod, + kNumResiduals, + N0, N1, N2, N3, N4, + N5, N6, N7, N8, N9>(new Functor, + relative_step_size)) { + } + + NumericDiffFunctor(Functor* functor, double relative_step_size = 1e-6) + : functor_(new NumericDiffCostFunction<Functor, + kMethod, + kNumResiduals, + N0, N1, N2, N3, N4, + N5, N6, N7, N8, N9>( + functor, relative_step_size)) { + } + + bool operator()(const double* x0, double* residuals) const { + return functor_(x0, residuals); + } + + bool operator()(const double* x0, + const double* x1, + double* residuals) const { + return functor_(x0, x1, residuals); + } + + bool operator()(const double* x0, + const double* x1, + const double* x2, + double* residuals) const { + return functor_(x0, x1, x2, residuals); + } + + bool operator()(const double* x0, + const double* x1, + const double* x2, + const double* x3, + double* residuals) const { + return functor_(x0, x1, x2, x3, residuals); + } + + bool operator()(const double* x0, + const double* x1, + const double* x2, + const double* x3, + const double* x4, + double* residuals) const { + return functor_(x0, x1, x2, x3, x4, residuals); + } + + bool operator()(const double* x0, + const double* x1, + const double* x2, + const double* x3, + const double* x4, + const double* x5, + double* residuals) const { + return functor_(x0, x1, x2, x3, x4, x5, residuals); + } + + bool operator()(const double* x0, + const double* x1, + const double* x2, + const double* x3, + const double* x4, + const double* x5, + const double* x6, + double* residuals) const { + return functor_(x0, x1, x2, x3, x4, x5, x6, residuals); + } + + bool operator()(const double* x0, + const double* x1, + const double* x2, + const double* x3, + const double* x4, + const double* x5, + const double* x6, + const double* x7, + double* residuals) const { + return functor_(x0, x1, x2, x3, x4, x5, x6, x7, residuals); + } + + bool operator()(const double* x0, + const double* x1, + const double* x2, + const double* x3, + const double* x4, + const double* x5, + const double* x6, + const double* x7, + const double* x8, + double* residuals) const { + return functor_(x0, x1, x2, x3, x4, x5, x6, x7, x8, residuals); + } + + bool operator()(const double* x0, + const double* x1, + const double* x2, + const double* x3, + const double* x4, + const double* x5, + const double* x6, + const double* x7, + const double* x8, + const double* x9, + double* residuals) const { + return functor_(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, residuals); + } + + template <typename T> + bool operator()(const T* x0, T* residuals) const { + return functor_(x0, residuals); + } + + template <typename T> + bool operator()(const T* x0, + const T* x1, + T* residuals) const { + return functor_(x0, x1, residuals); + } + + template <typename T> + bool operator()(const T* x0, + const T* x1, + const T* x2, + T* residuals) const { + return functor_(x0, x1, x2, residuals); + } + + template <typename T> + bool operator()(const T* x0, + const T* x1, + const T* x2, + const T* x3, + T* residuals) const { + return functor_(x0, x1, x2, x3, residuals); + } + + template <typename T> + bool operator()(const T* x0, + const T* x1, + const T* x2, + const T* x3, + const T* x4, + T* residuals) const { + return functor_(x0, x1, x2, x3, x4, residuals); + } + + template <typename T> + bool operator()(const T* x0, + const T* x1, + const T* x2, + const T* x3, + const T* x4, + const T* x5, + T* residuals) const { + return functor_(x0, x1, x2, x3, x4, x5, residuals); + } + + template <typename T> + bool operator()(const T* x0, + const T* x1, + const T* x2, + const T* x3, + const T* x4, + const T* x5, + const T* x6, + T* residuals) const { + return functor_(x0, x1, x2, x3, x4, x5, x6, residuals); + } + + template <typename T> + bool operator()(const T* x0, + const T* x1, + const T* x2, + const T* x3, + const T* x4, + const T* x5, + const T* x6, + const T* x7, + T* residuals) const { + return functor_(x0, x1, x2, x3, x4, x5, x6, x7, residuals); + } + + template <typename T> + bool operator()(const T* x0, + const T* x1, + const T* x2, + const T* x3, + const T* x4, + const T* x5, + const T* x6, + const T* x7, + const T* x8, + T* residuals) const { + return functor_(x0, x1, x2, x3, x4, x5, x6, x7, x8, residuals); + } + + template <typename T> + bool operator()(const T* x0, + const T* x1, + const T* x2, + const T* x3, + const T* x4, + const T* x5, + const T* x6, + const T* x7, + const T* x8, + const T* x9, + T* residuals) const { + return functor_(x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, residuals); + } + + + private: + CostFunctionToFunctor<kNumResiduals, + N0, N1, N2, N3, N4, + N5, N6, N7, N8, N9> functor_; +}; + +} // namespace ceres + +#endif // CERES_PUBLIC_NUMERIC_DIFF_FUNCTOR_H_ diff --git a/extern/libmv/third_party/ceres/include/ceres/ordered_groups.h b/extern/libmv/third_party/ceres/include/ceres/ordered_groups.h new file mode 100644 index 00000000000..e373d35b9d7 --- /dev/null +++ b/extern/libmv/third_party/ceres/include/ceres/ordered_groups.h @@ -0,0 +1,176 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2012 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + +#ifndef CERES_PUBLIC_ORDERED_GROUPS_H_ +#define CERES_PUBLIC_ORDERED_GROUPS_H_ + +#include <map> +#include <set> +#include "ceres/internal/port.h" + +namespace ceres { + +// A class for storing and manipulating an ordered collection of +// groups/sets with the following semantics: +// +// Group ids are non-negative integer values. Elements are any type +// that can serve as a key in a map or an element of a set. +// +// An element can only belong to one group at a time. A group may +// contain an arbitrary number of elements. +// +// Groups are ordered by their group id. +template <typename T> +class OrderedGroups { + public: + // Add an element to a group. If a group with this id does not + // exist, one is created. This method can be called any number of + // times for the same element. Group ids should be non-negative + // numbers. + // + // Return value indicates if adding the element was a success. + bool AddElementToGroup(const T element, const int group) { + if (group < 0) { + return false; + } + + typename map<T, int>::const_iterator it = element_to_group_.find(element); + if (it != element_to_group_.end()) { + if (it->second == group) { + // Element is already in the right group, nothing to do. + return true; + } + + group_to_elements_[it->second].erase(element); + if (group_to_elements_[it->second].size() == 0) { + group_to_elements_.erase(it->second); + } + } + + element_to_group_[element] = group; + group_to_elements_[group].insert(element); + return true; + } + + void Clear() { + group_to_elements_.clear(); + element_to_group_.clear(); + } + + // Remove the element, no matter what group it is in. If the element + // is not a member of any group, calling this method will result in + // a crash. + // + // Return value indicates if the element was actually removed. + bool Remove(const T element) { + const int current_group = GroupId(element); + if (current_group < 0) { + return false; + } + + group_to_elements_[current_group].erase(element); + + if (group_to_elements_[current_group].size() == 0) { + // If the group is empty, then get rid of it. + group_to_elements_.erase(current_group); + } + + element_to_group_.erase(element); + return true; + } + + // Reverse the order of the groups in place. + void Reverse() { + typename map<int, set<T> >::reverse_iterator it = + group_to_elements_.rbegin(); + map<int, set<T> > new_group_to_elements; + new_group_to_elements[it->first] = it->second; + + int new_group_id = it->first + 1; + for (++it; it != group_to_elements_.rend(); ++it) { + for (typename set<T>::const_iterator element_it = it->second.begin(); + element_it != it->second.end(); + ++element_it) { + element_to_group_[*element_it] = new_group_id; + } + new_group_to_elements[new_group_id] = it->second; + new_group_id++; + } + + group_to_elements_.swap(new_group_to_elements); + } + + // Return the group id for the element. If the element is not a + // member of any group, return -1. + int GroupId(const T element) const { + typename map<T, int>::const_iterator it = element_to_group_.find(element); + if (it == element_to_group_.end()) { + return -1; + } + return it->second; + } + + bool IsMember(const T element) const { + typename map<T, int>::const_iterator it = element_to_group_.find(element); + return (it != element_to_group_.end()); + } + + // This function always succeeds, i.e., implicitly there exists a + // group for every integer. + int GroupSize(const int group) const { + typename map<int, set<T> >::const_iterator it = + group_to_elements_.find(group); + return (it == group_to_elements_.end()) ? 0 : it->second.size(); + } + + int NumElements() const { + return element_to_group_.size(); + } + + // Number of groups with one or more elements. + int NumGroups() const { + return group_to_elements_.size(); + } + + const map<int, set<T> >& group_to_elements() const { + return group_to_elements_; + } + + private: + map<int, set<T> > group_to_elements_; + map<T, int> element_to_group_; +}; + +// Typedef for the most commonly used version of OrderedGroups. +typedef OrderedGroups<double*> ParameterBlockOrdering; + +} // namespace ceres + +#endif // CERES_PUBLIC_ORDERED_GROUP_H_ diff --git a/extern/libmv/third_party/ceres/include/ceres/problem.h b/extern/libmv/third_party/ceres/include/ceres/problem.h index 2b08c6723e8..bccb329dc55 100644 --- a/extern/libmv/third_party/ceres/include/ceres/problem.h +++ b/extern/libmv/third_party/ceres/include/ceres/problem.h @@ -39,11 +39,12 @@ #include <set> #include <vector> -#include <glog/logging.h> #include "ceres/internal/macros.h" #include "ceres/internal/port.h" #include "ceres/internal/scoped_ptr.h" #include "ceres/types.h" +#include "glog/logging.h" + namespace ceres { @@ -51,6 +52,7 @@ class CostFunction; class LossFunction; class LocalParameterization; class Solver; +struct CRSMatrix; namespace internal { class Preprocessor; @@ -59,10 +61,9 @@ class ParameterBlock; class ResidualBlock; } // namespace internal -// A ResidualBlockId is a handle clients can use to delete residual -// blocks after creating them. They are opaque for any purposes other -// than that. -typedef const internal::ResidualBlock* ResidualBlockId; +// A ResidualBlockId is an opaque handle clients can use to remove residual +// blocks from a Problem after adding them. +typedef internal::ResidualBlock* ResidualBlockId; // A class to represent non-linear least squares problems. Such // problems have a cost function that is a sum of error terms (known @@ -122,7 +123,9 @@ class Problem { Options() : cost_function_ownership(TAKE_OWNERSHIP), loss_function_ownership(TAKE_OWNERSHIP), - local_parameterization_ownership(TAKE_OWNERSHIP) {} + local_parameterization_ownership(TAKE_OWNERSHIP), + enable_fast_parameter_block_removal(false), + disable_all_safety_checks(false) {} // These flags control whether the Problem object owns the cost // functions, loss functions, and parameterizations passed into @@ -134,6 +137,29 @@ class Problem { Ownership cost_function_ownership; Ownership loss_function_ownership; Ownership local_parameterization_ownership; + + // If true, trades memory for a faster RemoveParameterBlock() operation. + // + // RemoveParameterBlock() takes time proportional to the size of the entire + // Problem. If you only remove parameter blocks from the Problem + // occassionaly, this may be acceptable. However, if you are modifying the + // Problem frequently, and have memory to spare, then flip this switch to + // make RemoveParameterBlock() take time proportional to the number of + // residual blocks that depend on it. The increase in memory usage is an + // additonal hash set per parameter block containing all the residuals that + // depend on the parameter block. + bool enable_fast_parameter_block_removal; + + // By default, Ceres performs a variety of safety checks when constructing + // the problem. There is a small but measurable performance penalty to + // these checks, typically around 5% of construction time. If you are sure + // your problem construction is correct, and 5% of the problem construction + // time is truly an overhead you want to avoid, then you can set + // disable_all_safety_checks to true. + // + // WARNING: Do not set this to true, unless you are absolutely sure of what + // you are doing. + bool disable_all_safety_checks; }; // The default constructor is equivalent to the @@ -208,6 +234,27 @@ class Problem { LossFunction* loss_function, double* x0, double* x1, double* x2, double* x3, double* x4, double* x5); + ResidualBlockId AddResidualBlock(CostFunction* cost_function, + LossFunction* loss_function, + double* x0, double* x1, double* x2, + double* x3, double* x4, double* x5, + double* x6); + ResidualBlockId AddResidualBlock(CostFunction* cost_function, + LossFunction* loss_function, + double* x0, double* x1, double* x2, + double* x3, double* x4, double* x5, + double* x6, double* x7); + ResidualBlockId AddResidualBlock(CostFunction* cost_function, + LossFunction* loss_function, + double* x0, double* x1, double* x2, + double* x3, double* x4, double* x5, + double* x6, double* x7, double* x8); + ResidualBlockId AddResidualBlock(CostFunction* cost_function, + LossFunction* loss_function, + double* x0, double* x1, double* x2, + double* x3, double* x4, double* x5, + double* x6, double* x7, double* x8, + double* x9); // Add a parameter block with appropriate size to the problem. // Repeated calls with the same arguments are ignored. Repeated @@ -223,6 +270,33 @@ class Problem { int size, LocalParameterization* local_parameterization); + // Remove a parameter block from the problem. The parameterization of the + // parameter block, if it exists, will persist until the deletion of the + // problem (similar to cost/loss functions in residual block removal). Any + // residual blocks that depend on the parameter are also removed, as + // described above in RemoveResidualBlock(). + // + // If Problem::Options::enable_fast_parameter_block_removal is true, then the + // removal is fast (almost constant time). Otherwise, removing a parameter + // block will incur a scan of the entire Problem object. + // + // WARNING: Removing a residual or parameter block will destroy the implicit + // ordering, rendering the jacobian or residuals returned from the solver + // uninterpretable. If you depend on the evaluated jacobian, do not use + // remove! This may change in a future release. + void RemoveParameterBlock(double* values); + + // Remove a residual block from the problem. Any parameters that the residual + // block depends on are not removed. The cost and loss functions for the + // residual block will not get deleted immediately; won't happen until the + // problem itself is deleted. + // + // WARNING: Removing a residual or parameter block will destroy the implicit + // ordering, rendering the jacobian or residuals returned from the solver + // uninterpretable. If you depend on the evaluated jacobian, do not use + // remove! This may change in a future release. + void RemoveResidualBlock(ResidualBlockId residual_block); + // Hold the indicated parameter block constant during optimization. void SetParameterBlockConstant(double* values); @@ -254,6 +328,76 @@ class Problem { // sizes of all of the residual blocks. int NumResiduals() const; + // Options struct to control Problem::Evaluate. + struct EvaluateOptions { + EvaluateOptions() + : num_threads(1) { + } + + // The set of parameter blocks for which evaluation should be + // performed. This vector determines the order that parameter + // blocks occur in the gradient vector and in the columns of the + // jacobian matrix. If parameter_blocks is empty, then it is + // assumed to be equal to vector containing ALL the parameter + // blocks. Generally speaking the parameter blocks will occur in + // the order in which they were added to the problem. But, this + // may change if the user removes any parameter blocks from the + // problem. + // + // NOTE: This vector should contain the same pointers as the ones + // used to add parameter blocks to the Problem. These parmeter + // block should NOT point to new memory locations. Bad things will + // happen otherwise. + vector<double*> parameter_blocks; + + // The set of residual blocks to evaluate. This vector determines + // the order in which the residuals occur, and how the rows of the + // jacobian are ordered. If residual_blocks is empty, then it is + // assumed to be equal to the vector containing all the residual + // blocks. If this vector is empty, then it is assumed to be equal + // to a vector containing ALL the residual blocks. Generally + // speaking the residual blocks will occur in the order in which + // they were added to the problem. But, this may change if the + // user removes any residual blocks from the problem. + vector<ResidualBlockId> residual_blocks; + int num_threads; + }; + + // Evaluate Problem. Any of the output pointers can be NULL. Which + // residual blocks and parameter blocks are used is controlled by + // the EvaluateOptions struct above. + // + // Note 1: The evaluation will use the values stored in the memory + // locations pointed to by the parameter block pointers used at the + // time of the construction of the problem. i.e., + // + // Problem problem; + // double x = 1; + // problem.Add(new MyCostFunction, NULL, &x); + // + // double cost = 0.0; + // problem.Evaluate(Problem::EvaluateOptions(), &cost, NULL, NULL, NULL); + // + // The cost is evaluated at x = 1. If you wish to evaluate the + // problem at x = 2, then + // + // x = 2; + // problem.Evaluate(Problem::EvaluateOptions(), &cost, NULL, NULL, NULL); + // + // is the way to do so. + // + // Note 2: If no local parameterizations are used, then the size of + // the gradient vector (and the number of columns in the jacobian) + // is the sum of the sizes of all the parameter blocks. If a + // parameter block has a local parameterization, then it contributes + // "LocalSize" entries to the gradient vecto (and the number of + // columns in the jacobian). + bool Evaluate(const EvaluateOptions& options, + double* cost, + vector<double>* residuals, + vector<double>* gradient, + CRSMatrix* jacobian); + private: friend class Solver; internal::scoped_ptr<internal::ProblemImpl> problem_impl_; diff --git a/extern/libmv/third_party/ceres/include/ceres/rotation.h b/extern/libmv/third_party/ceres/include/ceres/rotation.h index 0d8a390d5d1..ffac4f1dc0c 100644 --- a/extern/libmv/third_party/ceres/include/ceres/rotation.h +++ b/extern/libmv/third_party/ceres/include/ceres/rotation.h @@ -51,6 +51,31 @@ namespace ceres { +// Trivial wrapper to index linear arrays as matrices, given a fixed +// column and row stride. When an array "T* array" is wrapped by a +// +// (const) MatrixAdapter<T, row_stride, col_stride> M" +// +// the expression M(i, j) is equivalent to +// +// arrary[i * row_stride + j * col_stride] +// +// Conversion functions to and from rotation matrices accept +// MatrixAdapters to permit using row-major and column-major layouts, +// and rotation matrices embedded in larger matrices (such as a 3x4 +// projection matrix). +template <typename T, int row_stride, int col_stride> +struct MatrixAdapter; + +// Convenience functions to create a MatrixAdapter that treats the +// array pointed to by "pointer" as a 3x3 (contiguous) column-major or +// row-major matrix. +template <typename T> +MatrixAdapter<T, 1, 3> ColumnMajorAdapter3x3(T* pointer); + +template <typename T> +MatrixAdapter<T, 3, 1> RowMajorAdapter3x3(T* pointer); + // Convert a value in combined axis-angle representation to a quaternion. // The value angle_axis is a triple whose norm is an angle in radians, // and whose direction is aligned with the axis of rotation, @@ -73,9 +98,20 @@ void QuaternionToAngleAxis(T const* quaternion, T* angle_axis); // axis-angle rotation representations. Templated for use with // autodifferentiation. template <typename T> -void RotationMatrixToAngleAxis(T const * R, T * angle_axis); +void RotationMatrixToAngleAxis(T const* R, T* angle_axis); + +template <typename T, int row_stride, int col_stride> +void RotationMatrixToAngleAxis( + const MatrixAdapter<const T, row_stride, col_stride>& R, + T* angle_axis); + template <typename T> -void AngleAxisToRotationMatrix(T const * angle_axis, T * R); +void AngleAxisToRotationMatrix(T const* angle_axis, T* R); + +template <typename T, int row_stride, int col_stride> +void AngleAxisToRotationMatrix( + T const* angle_axis, + const MatrixAdapter<T, row_stride, col_stride>& R); // Conversions between 3x3 rotation matrix (in row major order) and // Euler angle (in degrees) rotation representations. @@ -86,6 +122,11 @@ void AngleAxisToRotationMatrix(T const * angle_axis, T * R); template <typename T> void EulerAnglesToRotationMatrix(const T* euler, int row_stride, T* R); +template <typename T, int row_stride, int col_stride> +void EulerAnglesToRotationMatrix( + const T* euler, + const MatrixAdapter<T, row_stride, col_stride>& R); + // Convert a 4-vector to a 3x3 scaled rotation matrix. // // The choice of rotation is such that the quaternion [1 0 0 0] goes to an @@ -108,11 +149,21 @@ void EulerAnglesToRotationMatrix(const T* euler, int row_stride, T* R); template <typename T> inline void QuaternionToScaledRotation(const T q[4], T R[3 * 3]); +template <typename T, int row_stride, int col_stride> inline +void QuaternionToScaledRotation( + const T q[4], + const MatrixAdapter<T, row_stride, col_stride>& R); + // Same as above except that the rotation matrix is normalized by the // Frobenius norm, so that R * R' = I (and det(R) = 1). template <typename T> inline void QuaternionToRotation(const T q[4], T R[3 * 3]); +template <typename T, int row_stride, int col_stride> inline +void QuaternionToRotation( + const T q[4], + const MatrixAdapter<T, row_stride, col_stride>& R); + // Rotates a point pt by a quaternion q: // // result = R(q) * pt @@ -146,6 +197,28 @@ void AngleAxisRotatePoint(const T angle_axis[3], const T pt[3], T result[3]); // --- IMPLEMENTATION +template<typename T, int row_stride, int col_stride> +struct MatrixAdapter { + T* pointer_; + explicit MatrixAdapter(T* pointer) + : pointer_(pointer) + {} + + T& operator()(int r, int c) const { + return pointer_[r * row_stride + c * col_stride]; + } +}; + +template <typename T> +MatrixAdapter<T, 1, 3> ColumnMajorAdapter3x3(T* pointer) { + return MatrixAdapter<T, 1, 3>(pointer); +} + +template <typename T> +MatrixAdapter<T, 3, 1> RowMajorAdapter3x3(T* pointer) { + return MatrixAdapter<T, 3, 1>(pointer); +} + template<typename T> inline void AngleAxisToQuaternion(const T* angle_axis, T* quaternion) { const T& a0 = angle_axis[0]; @@ -228,17 +301,24 @@ inline void QuaternionToAngleAxis(const T* quaternion, T* angle_axis) { // to not perform division by a small number. template <typename T> inline void RotationMatrixToAngleAxis(const T * R, T * angle_axis) { + RotationMatrixToAngleAxis(ColumnMajorAdapter3x3(R), angle_axis); +} + +template <typename T, int row_stride, int col_stride> +void RotationMatrixToAngleAxis( + const MatrixAdapter<const T, row_stride, col_stride>& R, + T * angle_axis) { // x = k * 2 * sin(theta), where k is the axis of rotation. - angle_axis[0] = R[5] - R[7]; - angle_axis[1] = R[6] - R[2]; - angle_axis[2] = R[1] - R[3]; + angle_axis[0] = R(2, 1) - R(1, 2); + angle_axis[1] = R(0, 2) - R(2, 0); + angle_axis[2] = R(1, 0) - R(0, 1); static const T kOne = T(1.0); static const T kTwo = T(2.0); // Since the right hand side may give numbers just above 1.0 or // below -1.0 leading to atan misbehaving, we threshold. - T costheta = std::min(std::max((R[0] + R[4] + R[8] - kOne) / kTwo, + T costheta = std::min(std::max((R(0, 0) + R(1, 1) + R(2, 2) - kOne) / kTwo, T(-1.0)), kOne); @@ -296,7 +376,7 @@ inline void RotationMatrixToAngleAxis(const T * R, T * angle_axis) { // with the sign of sin(theta). If they are the same, then // angle_axis[i] should be positive, otherwise negative. for (int i = 0; i < 3; ++i) { - angle_axis[i] = theta * sqrt((R[i*4] - costheta) * inv_one_minus_costheta); + angle_axis[i] = theta * sqrt((R(i, i) - costheta) * inv_one_minus_costheta); if (((sintheta < 0.0) && (angle_axis[i] > 0.0)) || ((sintheta > 0.0) && (angle_axis[i] < 0.0))) { angle_axis[i] = -angle_axis[i]; @@ -306,6 +386,13 @@ inline void RotationMatrixToAngleAxis(const T * R, T * angle_axis) { template <typename T> inline void AngleAxisToRotationMatrix(const T * angle_axis, T * R) { + AngleAxisToRotationMatrix(angle_axis, ColumnMajorAdapter3x3(R)); +} + +template <typename T, int row_stride, int col_stride> +void AngleAxisToRotationMatrix( + const T * angle_axis, + const MatrixAdapter<T, row_stride, col_stride>& R) { static const T kOne = T(1.0); const T theta2 = DotProduct(angle_axis, angle_axis); if (theta2 > 0.0) { @@ -320,33 +407,41 @@ inline void AngleAxisToRotationMatrix(const T * angle_axis, T * R) { const T costheta = cos(theta); const T sintheta = sin(theta); - R[0] = costheta + wx*wx*(kOne - costheta); - R[1] = wz*sintheta + wx*wy*(kOne - costheta); - R[2] = -wy*sintheta + wx*wz*(kOne - costheta); - R[3] = wx*wy*(kOne - costheta) - wz*sintheta; - R[4] = costheta + wy*wy*(kOne - costheta); - R[5] = wx*sintheta + wy*wz*(kOne - costheta); - R[6] = wy*sintheta + wx*wz*(kOne - costheta); - R[7] = -wx*sintheta + wy*wz*(kOne - costheta); - R[8] = costheta + wz*wz*(kOne - costheta); + R(0, 0) = costheta + wx*wx*(kOne - costheta); + R(1, 0) = wz*sintheta + wx*wy*(kOne - costheta); + R(2, 0) = -wy*sintheta + wx*wz*(kOne - costheta); + R(0, 1) = wx*wy*(kOne - costheta) - wz*sintheta; + R(1, 1) = costheta + wy*wy*(kOne - costheta); + R(2, 1) = wx*sintheta + wy*wz*(kOne - costheta); + R(0, 2) = wy*sintheta + wx*wz*(kOne - costheta); + R(1, 2) = -wx*sintheta + wy*wz*(kOne - costheta); + R(2, 2) = costheta + wz*wz*(kOne - costheta); } else { // At zero, we switch to using the first order Taylor expansion. - R[0] = kOne; - R[1] = -angle_axis[2]; - R[2] = angle_axis[1]; - R[3] = angle_axis[2]; - R[4] = kOne; - R[5] = -angle_axis[0]; - R[6] = -angle_axis[1]; - R[7] = angle_axis[0]; - R[8] = kOne; + R(0, 0) = kOne; + R(1, 0) = -angle_axis[2]; + R(2, 0) = angle_axis[1]; + R(0, 1) = angle_axis[2]; + R(1, 1) = kOne; + R(2, 1) = -angle_axis[0]; + R(0, 2) = -angle_axis[1]; + R(1, 2) = angle_axis[0]; + R(2, 2) = kOne; } } template <typename T> inline void EulerAnglesToRotationMatrix(const T* euler, - const int row_stride, + const int row_stride_parameter, T* R) { + CHECK_EQ(row_stride_parameter, 3); + EulerAnglesToRotationMatrix(euler, RowMajorAdapter3x3(R)); +} + +template <typename T, int row_stride, int col_stride> +void EulerAnglesToRotationMatrix( + const T* euler, + const MatrixAdapter<T, row_stride, col_stride>& R) { const double kPi = 3.14159265358979323846; const T degrees_to_radians(kPi / 180.0); @@ -361,26 +456,28 @@ inline void EulerAnglesToRotationMatrix(const T* euler, const T c3 = cos(pitch); const T s3 = sin(pitch); - // Rows of the rotation matrix. - T* R1 = R; - T* R2 = R1 + row_stride; - T* R3 = R2 + row_stride; - - R1[0] = c1*c2; - R1[1] = -s1*c3 + c1*s2*s3; - R1[2] = s1*s3 + c1*s2*c3; + R(0, 0) = c1*c2; + R(0, 1) = -s1*c3 + c1*s2*s3; + R(0, 2) = s1*s3 + c1*s2*c3; - R2[0] = s1*c2; - R2[1] = c1*c3 + s1*s2*s3; - R2[2] = -c1*s3 + s1*s2*c3; + R(1, 0) = s1*c2; + R(1, 1) = c1*c3 + s1*s2*s3; + R(1, 2) = -c1*s3 + s1*s2*c3; - R3[0] = -s2; - R3[1] = c2*s3; - R3[2] = c2*c3; + R(2, 0) = -s2; + R(2, 1) = c2*s3; + R(2, 2) = c2*c3; } template <typename T> inline void QuaternionToScaledRotation(const T q[4], T R[3 * 3]) { + QuaternionToScaledRotation(q, RowMajorAdapter3x3(R)); +} + +template <typename T, int row_stride, int col_stride> inline +void QuaternionToScaledRotation( + const T q[4], + const MatrixAdapter<T, row_stride, col_stride>& R) { // Make convenient names for elements of q. T a = q[0]; T b = q[1]; @@ -399,21 +496,29 @@ void QuaternionToScaledRotation(const T q[4], T R[3 * 3]) { T cd = c * d; T dd = d * d; - R[0] = aa + bb - cc - dd; R[1] = T(2) * (bc - ad); R[2] = T(2) * (ac + bd); // NOLINT - R[3] = T(2) * (ad + bc); R[4] = aa - bb + cc - dd; R[5] = T(2) * (cd - ab); // NOLINT - R[6] = T(2) * (bd - ac); R[7] = T(2) * (ab + cd); R[8] = aa - bb - cc + dd; // NOLINT + R(0, 0) = aa + bb - cc - dd; R(0, 1) = T(2) * (bc - ad); R(0, 2) = T(2) * (ac + bd); // NOLINT + R(1, 0) = T(2) * (ad + bc); R(1, 1) = aa - bb + cc - dd; R(1, 2) = T(2) * (cd - ab); // NOLINT + R(2, 0) = T(2) * (bd - ac); R(2, 1) = T(2) * (ab + cd); R(2, 2) = aa - bb - cc + dd; // NOLINT } template <typename T> inline void QuaternionToRotation(const T q[4], T R[3 * 3]) { + QuaternionToRotation(q, RowMajorAdapter3x3(R)); +} + +template <typename T, int row_stride, int col_stride> inline +void QuaternionToRotation(const T q[4], + const MatrixAdapter<T, row_stride, col_stride>& R) { QuaternionToScaledRotation(q, R); T normalizer = q[0]*q[0] + q[1]*q[1] + q[2]*q[2] + q[3]*q[3]; CHECK_NE(normalizer, T(0)); normalizer = T(1) / normalizer; - for (int i = 0; i < 9; ++i) { - R[i] *= normalizer; + for (int i = 0; i < 3; ++i) { + for (int j = 0; j < 3; ++j) { + R(i, j) *= normalizer; + } } } @@ -433,7 +538,6 @@ void UnitQuaternionRotatePoint(const T q[4], const T pt[3], T result[3]) { result[2] = T(2) * ((t7 - t3) * pt[0] + (t2 + t9) * pt[1] + (t5 + t8) * pt[2]) + pt[2]; // NOLINT } - template <typename T> inline void QuaternionRotatePoint(const T q[4], const T pt[3], T result[3]) { // 'scale' is 1 / norm(q). diff --git a/extern/libmv/third_party/ceres/include/ceres/sized_cost_function.h b/extern/libmv/third_party/ceres/include/ceres/sized_cost_function.h index 2894a9fba5c..6bfc1af31a2 100644 --- a/extern/libmv/third_party/ceres/include/ceres/sized_cost_function.h +++ b/extern/libmv/third_party/ceres/include/ceres/sized_cost_function.h @@ -45,25 +45,29 @@ namespace ceres { template<int kNumResiduals, - int N0 = 0, int N1 = 0, int N2 = 0, int N3 = 0, int N4 = 0, int N5 = 0> + int N0 = 0, int N1 = 0, int N2 = 0, int N3 = 0, int N4 = 0, + int N5 = 0, int N6 = 0, int N7 = 0, int N8 = 0, int N9 = 0> class SizedCostFunction : public CostFunction { public: SizedCostFunction() { - // Sanity checking. CHECK(kNumResiduals > 0 || kNumResiduals == DYNAMIC) << "Cost functions must have at least one residual block."; - CHECK_GT(N0, 0) - << "Cost functions must have at least one parameter block."; - CHECK((!N1 && !N2 && !N3 && !N4 && !N5) || - ((N1 > 0) && !N2 && !N3 && !N4 && !N5) || - ((N1 > 0) && (N2 > 0) && !N3 && !N4 && !N5) || - ((N1 > 0) && (N2 > 0) && (N3 > 0) && !N4 && !N5) || - ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && !N5) || - ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0))) + // This block breaks the 80 column rule to keep it somewhat readable. + CHECK((!N1 && !N2 && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) || + ((N1 > 0) && !N2 && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) || + ((N1 > 0) && (N2 > 0) && !N3 && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) || + ((N1 > 0) && (N2 > 0) && (N3 > 0) && !N4 && !N5 && !N6 && !N7 && !N8 && !N9) || + ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && !N5 && !N6 && !N7 && !N8 && !N9) || + ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && !N6 && !N7 && !N8 && !N9) || + ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && !N7 && !N8 && !N9) || + ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && !N8 && !N9) || + ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && !N9) || + ((N1 > 0) && (N2 > 0) && (N3 > 0) && (N4 > 0) && (N5 > 0) && (N6 > 0) && (N7 > 0) && (N8 > 0) && (N9 > 0))) << "Zero block cannot precede a non-zero block. Block sizes are " << "(ignore trailing 0s): " << N0 << ", " << N1 << ", " << N2 << ", " - << N3 << ", " << N4 << ", " << N5; + << N3 << ", " << N4 << ", " << N5 << ", " << N6 << ", " << N7 << ", " + << N8 << ", " << N9; set_num_residuals(kNumResiduals); @@ -75,6 +79,10 @@ class SizedCostFunction : public CostFunction { ADD_PARAMETER_BLOCK(N3); ADD_PARAMETER_BLOCK(N4); ADD_PARAMETER_BLOCK(N5); + ADD_PARAMETER_BLOCK(N6); + ADD_PARAMETER_BLOCK(N7); + ADD_PARAMETER_BLOCK(N8); + ADD_PARAMETER_BLOCK(N9); #undef ADD_PARAMETER_BLOCK } diff --git a/extern/libmv/third_party/ceres/include/ceres/solver.h b/extern/libmv/third_party/ceres/include/ceres/solver.h index 31d5e8d7987..122870c86c8 100644 --- a/extern/libmv/third_party/ceres/include/ceres/solver.h +++ b/extern/libmv/third_party/ceres/include/ceres/solver.h @@ -38,6 +38,7 @@ #include "ceres/internal/macros.h" #include "ceres/internal/port.h" #include "ceres/iteration_callback.h" +#include "ceres/ordered_groups.h" #include "ceres/types.h" namespace ceres { @@ -57,6 +58,11 @@ class Solver { struct Options { // Default constructor that sets up a generic sparse problem. Options() { + minimizer_type = TRUST_REGION; + line_search_direction_type = LBFGS; + line_search_type = ARMIJO; + nonlinear_conjugate_gradient_type = FLETCHER_REEVES; + max_lbfgs_rank = 20; trust_region_strategy_type = LEVENBERG_MARQUARDT; dogleg_type = TRADITIONAL_DOGLEG; use_nonmonotonic_steps = false; @@ -89,27 +95,21 @@ class Solver { #endif num_linear_solver_threads = 1; - num_eliminate_blocks = 0; - ordering_type = NATURAL; #if defined(CERES_NO_SUITESPARSE) use_block_amd = false; #else use_block_amd = true; #endif - + linear_solver_ordering = NULL; + use_inner_iterations = false; + inner_iteration_ordering = NULL; linear_solver_min_num_iterations = 1; linear_solver_max_num_iterations = 500; eta = 1e-1; jacobi_scaling = true; logging_type = PER_MINIMIZER_ITERATION; minimizer_progress_to_stdout = false; - return_initial_residuals = false; - return_initial_gradient = false; - return_initial_jacobian = false; - return_final_residuals = false; - return_final_gradient = false; - return_final_jacobian = false; lsqp_dump_directory = "/tmp"; lsqp_dump_format_type = TEXTFILE; check_gradients = false; @@ -118,8 +118,64 @@ class Solver { update_state_every_iteration = false; } + ~Options(); // Minimizer options ---------------------------------------- + // Ceres supports the two major families of optimization strategies - + // Trust Region and Line Search. + // + // 1. The line search approach first finds a descent direction + // along which the objective function will be reduced and then + // computes a step size that decides how far should move along + // that direction. The descent direction can be computed by + // various methods, such as gradient descent, Newton's method and + // Quasi-Newton method. The step size can be determined either + // exactly or inexactly. + // + // 2. The trust region approach approximates the objective + // function using using a model function (often a quadratic) over + // a subset of the search space known as the trust region. If the + // model function succeeds in minimizing the true objective + // function the trust region is expanded; conversely, otherwise it + // is contracted and the model optimization problem is solved + // again. + // + // Trust region methods are in some sense dual to line search methods: + // trust region methods first choose a step size (the size of the + // trust region) and then a step direction while line search methods + // first choose a step direction and then a step size. + MinimizerType minimizer_type; + + LineSearchDirectionType line_search_direction_type; + LineSearchType line_search_type; + NonlinearConjugateGradientType nonlinear_conjugate_gradient_type; + + // The LBFGS hessian approximation is a low rank approximation to + // the inverse of the Hessian matrix. The rank of the + // approximation determines (linearly) the space and time + // complexity of using the approximation. Higher the rank, the + // better is the quality of the approximation. The increase in + // quality is however is bounded for a number of reasons. + // + // 1. The method only uses secant information and not actual + // derivatives. + // + // 2. The Hessian approximation is constrained to be positive + // definite. + // + // So increasing this rank to a large number will cost time and + // space complexity without the corresponding increase in solution + // quality. There are no hard and fast rules for choosing the + // maximum rank. The best choice usually requires some problem + // specific experimentation. + // + // For more theoretical and implementation details of the LBFGS + // method, please see: + // + // Nocedal, J. (1980). "Updating Quasi-Newton Matrices with + // Limited Storage". Mathematics of Computation 35 (151): 773–782. + int max_lbfgs_rank; + TrustRegionStrategyType trust_region_strategy_type; // Type of dogleg strategy to use. @@ -229,29 +285,76 @@ class Solver { // using this setting. int num_linear_solver_threads; - // For Schur reduction based methods, the first 0 to num blocks are - // eliminated using the Schur reduction. For example, when solving - // traditional structure from motion problems where the parameters are in - // two classes (cameras and points) then num_eliminate_blocks would be the - // number of points. - // - // This parameter is used in conjunction with the ordering. - // Applies to: Preprocessor and linear least squares solver. - int num_eliminate_blocks; - - // Internally Ceres reorders the parameter blocks to help the - // various linear solvers. This parameter allows the user to - // influence the re-ordering strategy used. For structure from - // motion problems use SCHUR, for other problems NATURAL (default) - // is a good choice. In case you wish to specify your own ordering - // scheme, for example in conjunction with num_eliminate_blocks, - // use USER. - OrderingType ordering_type; - - // The ordering of the parameter blocks. The solver pays attention - // to it if the ordering_type is set to USER and the vector is - // non-empty. - vector<double*> ordering; + // The order in which variables are eliminated in a linear solver + // can have a significant of impact on the efficiency and accuracy + // of the method. e.g., when doing sparse Cholesky factorization, + // there are matrices for which a good ordering will give a + // Cholesky factor with O(n) storage, where as a bad ordering will + // result in an completely dense factor. + // + // Ceres allows the user to provide varying amounts of hints to + // the solver about the variable elimination ordering to use. This + // can range from no hints, where the solver is free to decide the + // best possible ordering based on the user's choices like the + // linear solver being used, to an exact order in which the + // variables should be eliminated, and a variety of possibilities + // in between. + // + // Instances of the ParameterBlockOrdering class are used to + // communicate this information to Ceres. + // + // Formally an ordering is an ordered partitioning of the + // parameter blocks, i.e, each parameter block belongs to exactly + // one group, and each group has a unique non-negative integer + // associated with it, that determines its order in the set of + // groups. + // + // Given such an ordering, Ceres ensures that the parameter blocks in + // the lowest numbered group are eliminated first, and then the + // parmeter blocks in the next lowest numbered group and so on. Within + // each group, Ceres is free to order the parameter blocks as it + // chooses. + // + // If NULL, then all parameter blocks are assumed to be in the + // same group and the solver is free to decide the best + // ordering. + // + // e.g. Consider the linear system + // + // x + y = 3 + // 2x + 3y = 7 + // + // There are two ways in which it can be solved. First eliminating x + // from the two equations, solving for y and then back substituting + // for x, or first eliminating y, solving for x and back substituting + // for y. The user can construct three orderings here. + // + // {0: x}, {1: y} - eliminate x first. + // {0: y}, {1: x} - eliminate y first. + // {0: x, y} - Solver gets to decide the elimination order. + // + // Thus, to have Ceres determine the ordering automatically using + // heuristics, put all the variables in group 0 and to control the + // ordering for every variable, create groups 0..N-1, one per + // variable, in the desired order. + // + // Bundle Adjustment + // ----------------- + // + // A particular case of interest is bundle adjustment, where the user + // has two options. The default is to not specify an ordering at all, + // the solver will see that the user wants to use a Schur type solver + // and figure out the right elimination ordering. + // + // But if the user already knows what parameter blocks are points and + // what are cameras, they can save preprocessing time by partitioning + // the parameter blocks into two groups, one for the points and one + // for the cameras, where the group containing the points has an id + // smaller than the group containing cameras. + // + // Once assigned, Solver::Options owns this pointer and will + // deallocate the memory when destroyed. + ParameterBlockOrdering* linear_solver_ordering; // By virtue of the modeling layer in Ceres being block oriented, // all the matrices used by Ceres are also block oriented. When @@ -267,6 +370,77 @@ class Solver { // sparse_linear_algebra_library = SUITE_SPARSE. bool use_block_amd; + // Some non-linear least squares problems have additional + // structure in the way the parameter blocks interact that it is + // beneficial to modify the way the trust region step is computed. + // + // e.g., consider the following regression problem + // + // y = a_1 exp(b_1 x) + a_2 exp(b_3 x^2 + c_1) + // + // Given a set of pairs{(x_i, y_i)}, the user wishes to estimate + // a_1, a_2, b_1, b_2, and c_1. + // + // Notice here that the expression on the left is linear in a_1 + // and a_2, and given any value for b_1, b_2 and c_1, it is + // possible to use linear regression to estimate the optimal + // values of a_1 and a_2. Indeed, its possible to analytically + // eliminate the variables a_1 and a_2 from the problem all + // together. Problems like these are known as separable least + // squares problem and the most famous algorithm for solving them + // is the Variable Projection algorithm invented by Golub & + // Pereyra. + // + // Similar structure can be found in the matrix factorization with + // missing data problem. There the corresponding algorithm is + // known as Wiberg's algorithm. + // + // Ruhe & Wedin (Algorithms for Separable Nonlinear Least Squares + // Problems, SIAM Reviews, 22(3), 1980) present an analyis of + // various algorithms for solving separable non-linear least + // squares problems and refer to "Variable Projection" as + // Algorithm I in their paper. + // + // Implementing Variable Projection is tedious and expensive, and + // they present a simpler algorithm, which they refer to as + // Algorithm II, where once the Newton/Trust Region step has been + // computed for the whole problem (a_1, a_2, b_1, b_2, c_1) and + // additional optimization step is performed to estimate a_1 and + // a_2 exactly. + // + // This idea can be generalized to cases where the residual is not + // linear in a_1 and a_2, i.e., Solve for the trust region step + // for the full problem, and then use it as the starting point to + // further optimize just a_1 and a_2. For the linear case, this + // amounts to doing a single linear least squares solve. For + // non-linear problems, any method for solving the a_1 and a_2 + // optimization problems will do. The only constraint on a_1 and + // a_2 is that they do not co-occur in any residual block. + // + // This idea can be further generalized, by not just optimizing + // (a_1, a_2), but decomposing the graph corresponding to the + // Hessian matrix's sparsity structure in a collection of + // non-overlapping independent sets and optimizing each of them. + // + // Setting "use_inner_iterations" to true enables the use of this + // non-linear generalization of Ruhe & Wedin's Algorithm II. This + // version of Ceres has a higher iteration complexity, but also + // displays better convergence behaviour per iteration. Setting + // Solver::Options::num_threads to the maximum number possible is + // highly recommended. + bool use_inner_iterations; + + // If inner_iterations is true, then the user has two choices. + // + // 1. Let the solver heuristically decide which parameter blocks + // to optimize in each inner iteration. To do this leave + // Solver::Options::inner_iteration_ordering untouched. + // + // 2. Specify a collection of of ordered independent sets. Where + // the lower numbered groups are optimized before the higher + // number groups. Each group must be an independent set. + ParameterBlockOrdering* inner_iteration_ordering; + // Minimum number of iterations for which the linear solver should // run, even if the convergence criterion is satisfied. int linear_solver_min_num_iterations; @@ -301,14 +475,6 @@ class Solver { // is sent to STDOUT. bool minimizer_progress_to_stdout; - bool return_initial_residuals; - bool return_initial_gradient; - bool return_initial_jacobian; - - bool return_final_residuals; - bool return_final_gradient; - bool return_final_jacobian; - // List of iterations at which the optimizer should dump the // linear least squares problem to disk. Useful for testing and // benchmarking. If empty (default), no problems are dumped. @@ -398,6 +564,8 @@ class Solver { string FullReport() const; // Minimizer summary ------------------------------------------------- + MinimizerType minimizer_type; + SolverTerminationType termination_type; // If the solver did not run, or there was a failure, a @@ -414,54 +582,6 @@ class Solver { // blocks that they depend on were fixed. double fixed_cost; - // Vectors of residuals before and after the optimization. The - // entries of these vectors are in the order in which - // ResidualBlocks were added to the Problem object. - // - // Whether the residual vectors are populated with values is - // controlled by Solver::Options::return_initial_residuals and - // Solver::Options::return_final_residuals respectively. - vector<double> initial_residuals; - vector<double> final_residuals; - - // Gradient vectors, before and after the optimization. The rows - // are in the same order in which the ParameterBlocks were added - // to the Problem object. - // - // NOTE: Since AddResidualBlock adds ParameterBlocks to the - // Problem automatically if they do not already exist, if you wish - // to have explicit control over the ordering of the vectors, then - // use Problem::AddParameterBlock to explicitly add the - // ParameterBlocks in the order desired. - // - // Whether the vectors are populated with values is controlled by - // Solver::Options::return_initial_gradient and - // Solver::Options::return_final_gradient respectively. - vector<double> initial_gradient; - vector<double> final_gradient; - - // Jacobian matrices before and after the optimization. The rows - // of these matrices are in the same order in which the - // ResidualBlocks were added to the Problem object. The columns - // are in the same order in which the ParameterBlocks were added - // to the Problem object. - // - // NOTE: Since AddResidualBlock adds ParameterBlocks to the - // Problem automatically if they do not already exist, if you wish - // to have explicit control over the column ordering of the - // matrix, then use Problem::AddParameterBlock to explicitly add - // the ParameterBlocks in the order desired. - // - // The Jacobian matrices are stored as compressed row sparse - // matrices. Please see ceres/crs_matrix.h for more details of the - // format. - // - // Whether the Jacboan matrices are populated with values is - // controlled by Solver::Options::return_initial_jacobian and - // Solver::Options::return_final_jacobian respectively. - CRSMatrix initial_jacobian; - CRSMatrix final_jacobian; - vector<IterationSummary> iterations; int num_successful_steps; @@ -484,6 +604,10 @@ class Solver { // Some total of all time spent inside Ceres when Solve is called. double total_time_in_seconds; + double linear_solver_time_in_seconds; + double residual_evaluation_time_in_seconds; + double jacobian_evaluation_time_in_seconds; + // Preprocessor summary. int num_parameter_blocks; int num_parameters; @@ -507,12 +631,23 @@ class Solver { LinearSolverType linear_solver_type_given; LinearSolverType linear_solver_type_used; + vector<int> linear_solver_ordering_given; + vector<int> linear_solver_ordering_used; + PreconditionerType preconditioner_type; - OrderingType ordering_type; TrustRegionStrategyType trust_region_strategy_type; DoglegType dogleg_type; + bool inner_iterations; + SparseLinearAlgebraLibraryType sparse_linear_algebra_library; + + LineSearchDirectionType line_search_direction_type; + LineSearchType line_search_type; + int max_lbfgs_rank; + + vector<int> inner_iteration_ordering_given; + vector<int> inner_iteration_ordering_used; }; // Once a least squares problem has been built, this function takes diff --git a/extern/libmv/third_party/ceres/include/ceres/types.h b/extern/libmv/third_party/ceres/include/ceres/types.h index 3980885b53c..5512340f7b3 100644 --- a/extern/libmv/third_party/ceres/include/ceres/types.h +++ b/extern/libmv/third_party/ceres/include/ceres/types.h @@ -37,6 +37,8 @@ #ifndef CERES_PUBLIC_TYPES_H_ #define CERES_PUBLIC_TYPES_H_ +#include "ceres/internal/port.h" + namespace ceres { // Basic integer types. These typedefs are in the Ceres namespace to avoid @@ -99,8 +101,7 @@ enum PreconditionerType { JACOBI, // Block diagonal of the Schur complement. This preconditioner may - // only be used with the ITERATIVE_SCHUR solver. Requires - // SuiteSparse/CHOLMOD. + // only be used with the ITERATIVE_SCHUR solver. SCHUR_JACOBI, // Visibility clustering based preconditioners. @@ -143,18 +144,6 @@ enum LinearSolverTerminationType { FAILURE }; -enum OrderingType { - // The order in which the parameter blocks were defined. - NATURAL, - - // Use the ordering specificed in the vector ordering. - USER, - - // Automatically figure out the best ordering to use the schur - // complement based solver. - SCHUR -}; - // Logging options // The options get progressively noisier. enum LoggingType { @@ -162,6 +151,55 @@ enum LoggingType { PER_MINIMIZER_ITERATION }; +enum MinimizerType { + LINE_SEARCH, + TRUST_REGION +}; + +enum LineSearchDirectionType { + // Negative of the gradient. + STEEPEST_DESCENT, + + // A generalization of the Conjugate Gradient method to non-linear + // functions. The generalization can be performed in a number of + // different ways, resulting in a variety of search directions. The + // precise choice of the non-linear conjugate gradient algorithm + // used is determined by NonlinerConjuateGradientType. + NONLINEAR_CONJUGATE_GRADIENT, + + // A limited memory approximation to the inverse Hessian is + // maintained and used to compute a quasi-Newton step. + // + // For more details see + // + // Nocedal, J. (1980). "Updating Quasi-Newton Matrices with Limited + // Storage". Mathematics of Computation 35 (151): 773–782. + // + // Byrd, R. H.; Nocedal, J.; Schnabel, R. B. (1994). + // "Representations of Quasi-Newton Matrices and their use in + // Limited Memory Methods". Mathematical Programming 63 (4): + // 129–156. + LBFGS, +}; + +// Nonliner conjugate gradient methods are a generalization of the +// method of Conjugate Gradients for linear systems. The +// generalization can be carried out in a number of different ways +// leading to number of different rules for computing the search +// direction. Ceres provides a number of different variants. For more +// details see Numerical Optimization by Nocedal & Wright. +enum NonlinearConjugateGradientType { + FLETCHER_REEVES, + POLAK_RIBIRERE, + HESTENES_STIEFEL, +}; + +enum LineSearchType { + // Backtracking line search with polynomial interpolation or + // bisection. + ARMIJO, +}; + // Ceres supports different strategies for computing the trust region // step. enum TrustRegionStrategyType { @@ -296,18 +334,54 @@ enum DimensionType { DYNAMIC = -1 }; +enum NumericDiffMethod { + CENTRAL, + FORWARD +}; + const char* LinearSolverTypeToString(LinearSolverType type); +bool StringToLinearSolverType(string value, LinearSolverType* type); + const char* PreconditionerTypeToString(PreconditionerType type); +bool StringToPreconditionerType(string value, PreconditionerType* type); + const char* SparseLinearAlgebraLibraryTypeToString( SparseLinearAlgebraLibraryType type); +bool StringToSparseLinearAlgebraLibraryType( + string value, + SparseLinearAlgebraLibraryType* type); + +const char* TrustRegionStrategyTypeToString(TrustRegionStrategyType type); +bool StringToTrustRegionStrategyType(string value, + TrustRegionStrategyType* type); + +const char* DoglegTypeToString(DoglegType type); +bool StringToDoglegType(string value, DoglegType* type); + +const char* MinimizerTypeToString(MinimizerType type); +bool StringToMinimizerType(string value, MinimizerType* type); + +const char* LineSearchDirectionTypeToString(LineSearchDirectionType type); +bool StringToLineSearchDirectionType(string value, + LineSearchDirectionType* type); + +const char* LineSearchTypeToString(LineSearchType type); +bool StringToLineSearchType(string value, LineSearchType* type); + +const char* NonlinearConjugateGradientTypeToString( + NonlinearConjugateGradientType type); +bool StringToNonlinearConjugateGradientType( + string value, NonlinearConjugateGradientType* type); + const char* LinearSolverTerminationTypeToString( LinearSolverTerminationType type); -const char* OrderingTypeToString(OrderingType type); + const char* SolverTerminationTypeToString(SolverTerminationType type); -const char* SparseLinearAlgebraLibraryTypeToString( - SparseLinearAlgebraLibraryType type); -const char* TrustRegionStrategyTypeToString( TrustRegionStrategyType type); + bool IsSchurType(LinearSolverType type); +bool IsSparseLinearAlgebraLibraryTypeAvailable( + SparseLinearAlgebraLibraryType type); + } // namespace ceres diff --git a/extern/libmv/third_party/ceres/internal/ceres/array_utils.h b/extern/libmv/third_party/ceres/internal/ceres/array_utils.h index 99cc8d8ebbf..742f439d886 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/array_utils.h +++ b/extern/libmv/third_party/ceres/internal/ceres/array_utils.h @@ -28,7 +28,7 @@ // // Author: sameeragarwal@google.com (Sameer Agarwal) // -// Utility routines for validating arrays. +// Utility routines for validating arrays. // // These are useful for detecting two common class of errors. // diff --git a/extern/libmv/third_party/ceres/internal/ceres/block_jacobi_preconditioner.cc b/extern/libmv/third_party/ceres/internal/ceres/block_jacobi_preconditioner.cc index 474c37f7ca4..1d5f9d77ab0 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/block_jacobi_preconditioner.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/block_jacobi_preconditioner.cc @@ -40,10 +40,10 @@ namespace ceres { namespace internal { -BlockJacobiPreconditioner::BlockJacobiPreconditioner(const LinearOperator& A) +BlockJacobiPreconditioner::BlockJacobiPreconditioner( + const BlockSparseMatrixBase& A) : num_rows_(A.num_rows()), - block_structure_( - *(down_cast<const BlockSparseMatrix*>(&A)->block_structure())) { + block_structure_(*A.block_structure()) { // Calculate the amount of storage needed. int storage_needed = 0; for (int c = 0; c < block_structure_.cols.size(); ++c) { @@ -64,11 +64,10 @@ BlockJacobiPreconditioner::BlockJacobiPreconditioner(const LinearOperator& A) } } -BlockJacobiPreconditioner::~BlockJacobiPreconditioner() { -} +BlockJacobiPreconditioner::~BlockJacobiPreconditioner() {} -void BlockJacobiPreconditioner::Update(const LinearOperator& matrix, const double* D) { - const BlockSparseMatrix& A = *(down_cast<const BlockSparseMatrix*>(&matrix)); +bool BlockJacobiPreconditioner::Update(const BlockSparseMatrixBase& A, + const double* D) { const CompressedRowBlockStructure* bs = A.block_structure(); // Compute the diagonal blocks by block inner products. @@ -107,16 +106,19 @@ void BlockJacobiPreconditioner::Update(const LinearOperator& matrix, const doubl MatrixRef block(blocks_[c], size, size); if (D != NULL) { - block.diagonal() += ConstVectorRef(D + position, size).array().square().matrix(); + block.diagonal() += + ConstVectorRef(D + position, size).array().square().matrix(); } block = block.selfadjointView<Eigen::Upper>() .ldlt() .solve(Matrix::Identity(size, size)); } + return true; } -void BlockJacobiPreconditioner::RightMultiply(const double* x, double* y) const { +void BlockJacobiPreconditioner::RightMultiply(const double* x, + double* y) const { for (int c = 0; c < block_structure_.cols.size(); ++c) { const int size = block_structure_.cols[c].size; const int position = block_structure_.cols[c].position; diff --git a/extern/libmv/third_party/ceres/internal/ceres/block_jacobi_preconditioner.h b/extern/libmv/third_party/ceres/internal/ceres/block_jacobi_preconditioner.h index 91cfeddb688..ed5eebc8dc6 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/block_jacobi_preconditioner.h +++ b/extern/libmv/third_party/ceres/internal/ceres/block_jacobi_preconditioner.h @@ -32,38 +32,33 @@ #define CERES_INTERNAL_BLOCK_JACOBI_PRECONDITIONER_H_ #include <vector> -#include "ceres/linear_operator.h" +#include "ceres/preconditioner.h" namespace ceres { namespace internal { -class CompressedRowBlockStructure; +class BlockSparseMatrixBase; +struct CompressedRowBlockStructure; class LinearOperator; -class SparseMatrix; -// A block Jacobi preconditioner. This is intended for use with conjugate -// gradients, or other iterative symmetric solvers. To use the preconditioner, -// create one by passing a BlockSparseMatrix as the linear operator "A" to the -// constructor. This fixes the sparsity pattern to the pattern of the matrix -// A^TA. +// A block Jacobi preconditioner. This is intended for use with +// conjugate gradients, or other iterative symmetric solvers. To use +// the preconditioner, create one by passing a BlockSparseMatrix "A" +// to the constructor. This fixes the sparsity pattern to the pattern +// of the matrix A^TA. // // Before each use of the preconditioner in a solve with conjugate gradients, // update the matrix by running Update(A, D). The values of the matrix A are // inspected to construct the preconditioner. The vector D is applied as the // D^TD diagonal term. -class BlockJacobiPreconditioner : public LinearOperator { +class BlockJacobiPreconditioner : public Preconditioner { public: // A must remain valid while the BlockJacobiPreconditioner is. - BlockJacobiPreconditioner(const LinearOperator& A); + explicit BlockJacobiPreconditioner(const BlockSparseMatrixBase& A); virtual ~BlockJacobiPreconditioner(); - // Update the preconditioner with the values found in A. The sparsity pattern - // must match that of the A passed to the constructor. D is a vector that - // must have the same number of rows as A, and is applied as a diagonal in - // addition to the block diagonals of A. - void Update(const LinearOperator& A, const double* D); - - // LinearOperator interface. + // Preconditioner interface + virtual bool Update(const BlockSparseMatrixBase& A, const double* D); virtual void RightMultiply(const double* x, double* y) const; virtual void LeftMultiply(const double* x, double* y) const; virtual int num_rows() const { return num_rows_; } diff --git a/extern/libmv/third_party/ceres/internal/ceres/block_random_access_dense_matrix.cc b/extern/libmv/third_party/ceres/internal/ceres/block_random_access_dense_matrix.cc index 0f95e8932b8..aedfc745f22 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/block_random_access_dense_matrix.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/block_random_access_dense_matrix.cc @@ -28,12 +28,12 @@ // // Author: sameeragarwal@google.com (Sameer Agarwal) -#include "glog/logging.h" #include "ceres/block_random_access_dense_matrix.h" #include <vector> #include "ceres/internal/eigen.h" #include "ceres/internal/scoped_ptr.h" +#include "glog/logging.h" namespace ceres { namespace internal { diff --git a/extern/libmv/third_party/ceres/internal/ceres/block_random_access_sparse_matrix.cc b/extern/libmv/third_party/ceres/internal/ceres/block_random_access_sparse_matrix.cc index 9ed62ce948b..f789436364a 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/block_random_access_sparse_matrix.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/block_random_access_sparse_matrix.cc @@ -28,7 +28,6 @@ // // Author: sameeragarwal@google.com (Sameer Agarwal) -#include "glog/logging.h" #include "ceres/block_random_access_sparse_matrix.h" #include <algorithm> @@ -40,6 +39,7 @@ #include "ceres/mutex.h" #include "ceres/triplet_sparse_matrix.h" #include "ceres/types.h" +#include "glog/logging.h" namespace ceres { namespace internal { diff --git a/extern/libmv/third_party/ceres/internal/ceres/canonical_views_clustering.h b/extern/libmv/third_party/ceres/internal/ceres/canonical_views_clustering.h index 2d1eb403995..5f8e4e3e5dd 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/canonical_views_clustering.h +++ b/extern/libmv/third_party/ceres/internal/ceres/canonical_views_clustering.h @@ -43,16 +43,16 @@ #include <vector> -#include <glog/logging.h> #include "ceres/collections_port.h" #include "ceres/graph.h" -#include "ceres/map_util.h" #include "ceres/internal/macros.h" +#include "ceres/map_util.h" +#include "glog/logging.h" namespace ceres { namespace internal { -class CanonicalViewsClusteringOptions; +struct CanonicalViewsClusteringOptions; // Compute a partitioning of the vertices of the graph using the // canonical views clustering algorithm. diff --git a/extern/libmv/third_party/ceres/internal/ceres/cgnr_solver.cc b/extern/libmv/third_party/ceres/internal/ceres/cgnr_solver.cc index ccc8026f9f7..e2e799fe607 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/cgnr_solver.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/cgnr_solver.cc @@ -30,25 +30,28 @@ #include "ceres/cgnr_solver.h" -#include "glog/logging.h" -#include "ceres/linear_solver.h" +#include "ceres/block_jacobi_preconditioner.h" #include "ceres/cgnr_linear_operator.h" #include "ceres/conjugate_gradients_solver.h" -#include "ceres/block_jacobi_preconditioner.h" +#include "ceres/linear_solver.h" +#include "ceres/wall_time.h" +#include "glog/logging.h" namespace ceres { namespace internal { CgnrSolver::CgnrSolver(const LinearSolver::Options& options) : options_(options), - jacobi_preconditioner_(NULL) { + preconditioner_(NULL) { } -LinearSolver::Summary CgnrSolver::Solve( - LinearOperator* A, +LinearSolver::Summary CgnrSolver::SolveImpl( + BlockSparseMatrixBase* A, const double* b, const LinearSolver::PerSolveOptions& per_solve_options, double* x) { + EventLogger event_logger("CgnrSolver::Solve"); + // Form z = Atb. scoped_array<double> z(new double[A->num_cols()]); std::fill(z.get(), z.get() + A->num_cols(), 0.0); @@ -57,11 +60,11 @@ LinearSolver::Summary CgnrSolver::Solve( // Precondition if necessary. LinearSolver::PerSolveOptions cg_per_solve_options = per_solve_options; if (options_.preconditioner_type == JACOBI) { - if (jacobi_preconditioner_.get() == NULL) { - jacobi_preconditioner_.reset(new BlockJacobiPreconditioner(*A)); + if (preconditioner_.get() == NULL) { + preconditioner_.reset(new BlockJacobiPreconditioner(*A)); } - jacobi_preconditioner_->Update(*A, per_solve_options.D); - cg_per_solve_options.preconditioner = jacobi_preconditioner_.get(); + preconditioner_->Update(*A, per_solve_options.D); + cg_per_solve_options.preconditioner = preconditioner_.get(); } else if (options_.preconditioner_type != IDENTITY) { LOG(FATAL) << "CGNR only supports IDENTITY and JACOBI preconditioners."; } @@ -69,11 +72,14 @@ LinearSolver::Summary CgnrSolver::Solve( // Solve (AtA + DtD)x = z (= Atb). std::fill(x, x + A->num_cols(), 0.0); CgnrLinearOperator lhs(*A, per_solve_options.D); + event_logger.AddEvent("Setup"); + ConjugateGradientsSolver conjugate_gradient_solver(options_); - return conjugate_gradient_solver.Solve(&lhs, - z.get(), - cg_per_solve_options, - x); + LinearSolver::Summary summary = + conjugate_gradient_solver.Solve(&lhs, z.get(), cg_per_solve_options, x); + event_logger.AddEvent("Solve"); + + return summary; } } // namespace internal diff --git a/extern/libmv/third_party/ceres/internal/ceres/cgnr_solver.h b/extern/libmv/third_party/ceres/internal/ceres/cgnr_solver.h index 877b4c4ceea..d560a9de58d 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/cgnr_solver.h +++ b/extern/libmv/third_party/ceres/internal/ceres/cgnr_solver.h @@ -37,6 +37,8 @@ namespace ceres { namespace internal { +class Preconditioner; + class BlockJacobiPreconditioner; // A conjugate gradients on the normal equations solver. This directly solves @@ -46,17 +48,18 @@ class BlockJacobiPreconditioner; // // as required for solving for x in the least squares sense. Currently only // block diagonal preconditioning is supported. -class CgnrSolver : public LinearSolver { +class CgnrSolver : public BlockSparseMatrixBaseSolver { public: explicit CgnrSolver(const LinearSolver::Options& options); - virtual Summary Solve(LinearOperator* A, - const double* b, - const LinearSolver::PerSolveOptions& per_solve_options, - double* x); + virtual Summary SolveImpl( + BlockSparseMatrixBase* A, + const double* b, + const LinearSolver::PerSolveOptions& per_solve_options, + double* x); private: const LinearSolver::Options options_; - scoped_ptr<BlockJacobiPreconditioner> jacobi_preconditioner_; + scoped_ptr<Preconditioner> preconditioner_; CERES_DISALLOW_COPY_AND_ASSIGN(CgnrSolver); }; diff --git a/extern/libmv/third_party/ceres/internal/ceres/collections_port.h b/extern/libmv/third_party/ceres/internal/ceres/collections_port.h index c2fce9033cd..715c975e00e 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/collections_port.h +++ b/extern/libmv/third_party/ceres/internal/ceres/collections_port.h @@ -37,7 +37,7 @@ # include <map> # include <set> #else -# if defined(_MSC_VER) && _MSC_VER <= 1600 +# if defined(_MSC_VER) # include <unordered_map> # include <unordered_set> # else diff --git a/extern/libmv/third_party/ceres/internal/ceres/compressed_row_jacobian_writer.cc b/extern/libmv/third_party/ceres/internal/ceres/compressed_row_jacobian_writer.cc index 912c4845441..bbadb772805 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/compressed_row_jacobian_writer.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/compressed_row_jacobian_writer.cc @@ -135,7 +135,8 @@ SparseMatrix* CompressedRowJacobianWriter::CreateJacobian() const { // Populate the row and column block vectors for use by block // oriented ordering algorithms. This is useful when // Solver::Options::use_block_amd = true. - const vector<ParameterBlock*>& parameter_blocks = program_->parameter_blocks(); + const vector<ParameterBlock*>& parameter_blocks = + program_->parameter_blocks(); vector<int>& col_blocks = *(jacobian->mutable_col_blocks()); col_blocks.resize(parameter_blocks.size()); for (int i = 0; i < parameter_blocks.size(); ++i) { diff --git a/extern/libmv/third_party/ceres/internal/ceres/compressed_row_sparse_matrix.h b/extern/libmv/third_party/ceres/internal/ceres/compressed_row_sparse_matrix.h index 6a9d82842e5..c9c904bf63c 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/compressed_row_sparse_matrix.h +++ b/extern/libmv/third_party/ceres/internal/ceres/compressed_row_sparse_matrix.h @@ -32,17 +32,18 @@ #define CERES_INTERNAL_COMPRESSED_ROW_SPARSE_MATRIX_H_ #include <vector> -#include <glog/logging.h> -#include "ceres/sparse_matrix.h" -#include "ceres/triplet_sparse_matrix.h" + #include "ceres/internal/eigen.h" #include "ceres/internal/macros.h" #include "ceres/internal/port.h" +#include "ceres/sparse_matrix.h" +#include "ceres/triplet_sparse_matrix.h" #include "ceres/types.h" +#include "glog/logging.h" namespace ceres { -class CRSMatrix; +struct CRSMatrix; namespace internal { diff --git a/extern/libmv/third_party/ceres/internal/ceres/coordinate_descent_minimizer.cc b/extern/libmv/third_party/ceres/internal/ceres/coordinate_descent_minimizer.cc new file mode 100644 index 00000000000..c4da987919a --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/coordinate_descent_minimizer.cc @@ -0,0 +1,236 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2012 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + +#include "ceres/coordinate_descent_minimizer.h" + +#ifdef CERES_USE_OPENMP +#include <omp.h> +#endif + +#include <iterator> +#include <numeric> +#include <vector> +#include "ceres/evaluator.h" +#include "ceres/linear_solver.h" +#include "ceres/minimizer.h" +#include "ceres/ordered_groups.h" +#include "ceres/parameter_block.h" +#include "ceres/problem_impl.h" +#include "ceres/program.h" +#include "ceres/residual_block.h" +#include "ceres/solver.h" +#include "ceres/solver_impl.h" +#include "ceres/trust_region_minimizer.h" +#include "ceres/trust_region_strategy.h" + +namespace ceres { +namespace internal { + +CoordinateDescentMinimizer::~CoordinateDescentMinimizer() { +} + +bool CoordinateDescentMinimizer::Init( + const Program& program, + const ProblemImpl::ParameterMap& parameter_map, + const ParameterBlockOrdering& ordering, + string* error) { + parameter_blocks_.clear(); + independent_set_offsets_.clear(); + independent_set_offsets_.push_back(0); + + // Serialize the OrderedGroups into a vector of parameter block + // offsets for parallel access. + map<ParameterBlock*, int> parameter_block_index; + map<int, set<double*> > group_to_elements = ordering.group_to_elements(); + for (map<int, set<double*> >::const_iterator it = group_to_elements.begin(); + it != group_to_elements.end(); + ++it) { + for (set<double*>::const_iterator ptr_it = it->second.begin(); + ptr_it != it->second.end(); + ++ptr_it) { + parameter_blocks_.push_back(parameter_map.find(*ptr_it)->second); + parameter_block_index[parameter_blocks_.back()] = + parameter_blocks_.size() - 1; + } + independent_set_offsets_.push_back( + independent_set_offsets_.back() + it->second.size()); + } + + // The ordering does not have to contain all parameter blocks, so + // assign zero offsets/empty independent sets to these parameter + // blocks. + const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks(); + for (int i = 0; i < parameter_blocks.size(); ++i) { + if (!ordering.IsMember(parameter_blocks[i]->mutable_user_state())) { + parameter_blocks_.push_back(parameter_blocks[i]); + independent_set_offsets_.push_back(independent_set_offsets_.back()); + } + } + + // Compute the set of residual blocks that depend on each parameter + // block. + residual_blocks_.resize(parameter_block_index.size()); + const vector<ResidualBlock*>& residual_blocks = program.residual_blocks(); + for (int i = 0; i < residual_blocks.size(); ++i) { + ResidualBlock* residual_block = residual_blocks[i]; + const int num_parameter_blocks = residual_block->NumParameterBlocks(); + for (int j = 0; j < num_parameter_blocks; ++j) { + ParameterBlock* parameter_block = residual_block->parameter_blocks()[j]; + const map<ParameterBlock*, int>::const_iterator it = + parameter_block_index.find(parameter_block); + if (it != parameter_block_index.end()) { + residual_blocks_[it->second].push_back(residual_block); + } + } + } + + evaluator_options_.linear_solver_type = DENSE_QR; + evaluator_options_.num_eliminate_blocks = 0; + evaluator_options_.num_threads = 1; + + return true; +} + +void CoordinateDescentMinimizer::Minimize( + const Minimizer::Options& options, + double* parameters, + Solver::Summary* summary) { + // Set the state and mark all parameter blocks constant. + for (int i = 0; i < parameter_blocks_.size(); ++i) { + ParameterBlock* parameter_block = parameter_blocks_[i]; + parameter_block->SetState(parameters + parameter_block->state_offset()); + parameter_block->SetConstant(); + } + + scoped_array<LinearSolver*> linear_solvers( + new LinearSolver*[options.num_threads]); + + LinearSolver::Options linear_solver_options; + linear_solver_options.type = DENSE_QR; + + for (int i = 0; i < options.num_threads; ++i) { + linear_solvers[i] = LinearSolver::Create(linear_solver_options); + } + + for (int i = 0; i < independent_set_offsets_.size() - 1; ++i) { + // No point paying the price for an OpemMP call if the set if of + // size zero. + if (independent_set_offsets_[i] == independent_set_offsets_[i + 1]) { + continue; + } + + // The parameter blocks in each independent set can be optimized + // in parallel, since they do not co-occur in any residual block. +#pragma omp parallel for num_threads(options.num_threads) + for (int j = independent_set_offsets_[i]; + j < independent_set_offsets_[i + 1]; + ++j) { +#ifdef CERES_USE_OPENMP + int thread_id = omp_get_thread_num(); +#else + int thread_id = 0; +#endif + + ParameterBlock* parameter_block = parameter_blocks_[j]; + const int old_index = parameter_block->index(); + const int old_delta_offset = parameter_block->delta_offset(); + parameter_block->SetVarying(); + parameter_block->set_index(0); + parameter_block->set_delta_offset(0); + + Program inner_program; + inner_program.mutable_parameter_blocks()->push_back(parameter_block); + *inner_program.mutable_residual_blocks() = residual_blocks_[j]; + + // TODO(sameeragarwal): Better error handling. Right now we + // assume that this is not going to lead to problems of any + // sort. Basically we should be checking for numerical failure + // of some sort. + // + // On the other hand, if the optimization is a failure, that in + // some ways is fine, since it won't change the parameters and + // we are fine. + Solver::Summary inner_summary; + Solve(&inner_program, + linear_solvers[thread_id], + parameters + parameter_block->state_offset(), + &inner_summary); + + parameter_block->set_index(old_index); + parameter_block->set_delta_offset(old_delta_offset); + parameter_block->SetState(parameters + parameter_block->state_offset()); + parameter_block->SetConstant(); + } + } + + for (int i = 0; i < parameter_blocks_.size(); ++i) { + parameter_blocks_[i]->SetVarying(); + } + + for (int i = 0; i < options.num_threads; ++i) { + delete linear_solvers[i]; + } +} + +// Solve the optimization problem for one parameter block. +void CoordinateDescentMinimizer::Solve(Program* program, + LinearSolver* linear_solver, + double* parameter, + Solver::Summary* summary) { + *summary = Solver::Summary(); + summary->initial_cost = 0.0; + summary->fixed_cost = 0.0; + summary->final_cost = 0.0; + string error; + + scoped_ptr<Evaluator> evaluator( + Evaluator::Create(evaluator_options_, program, &error)); + CHECK_NOTNULL(evaluator.get()); + + scoped_ptr<SparseMatrix> jacobian(evaluator->CreateJacobian()); + CHECK_NOTNULL(jacobian.get()); + + TrustRegionStrategy::Options trs_options; + trs_options.linear_solver = linear_solver; + + scoped_ptr<TrustRegionStrategy>trust_region_strategy( + CHECK_NOTNULL(TrustRegionStrategy::Create(trs_options))); + + Minimizer::Options minimizer_options; + minimizer_options.evaluator = evaluator.get(); + minimizer_options.jacobian = jacobian.get(); + minimizer_options.trust_region_strategy = trust_region_strategy.get(); + + TrustRegionMinimizer minimizer; + minimizer.Minimize(minimizer_options, parameter, summary); +} + +} // namespace internal +} // namespace ceres diff --git a/extern/libmv/third_party/ceres/internal/ceres/coordinate_descent_minimizer.h b/extern/libmv/third_party/ceres/internal/ceres/coordinate_descent_minimizer.h new file mode 100644 index 00000000000..3dcf8faee59 --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/coordinate_descent_minimizer.h @@ -0,0 +1,88 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2012 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + +#ifndef CERES_INTERNAL_COORDINATE_DESCENT_MINIMIZER_H_ +#define CERES_INTERNAL_COORDINATE_DESCENT_MINIMIZER_H_ + +#include <vector> + +#include "ceres/evaluator.h" +#include "ceres/minimizer.h" +#include "ceres/problem_impl.h" +#include "ceres/program.h" +#include "ceres/solver.h" + +namespace ceres { +namespace internal { + +// Given a Program, and a ParameterBlockOrdering which partitions +// (non-exhaustively) the Hessian matrix into independent sets, +// perform coordinate descent on the parameter blocks in the +// ordering. The independent set structure allows for all parameter +// blocks in the same independent set to be optimized in parallel, and +// the order of the independent set determines the order in which the +// parameter block groups are optimized. +// +// The minimizer assumes that none of the parameter blocks in the +// program are constant. +class CoordinateDescentMinimizer : public Minimizer { + public: + bool Init(const Program& program, + const ProblemImpl::ParameterMap& parameter_map, + const ParameterBlockOrdering& ordering, + string* error); + + // Minimizer interface. + virtual ~CoordinateDescentMinimizer(); + virtual void Minimize(const Minimizer::Options& options, + double* parameters, + Solver::Summary* summary); + + private: + void Solve(Program* program, + LinearSolver* linear_solver, + double* parameters, + Solver::Summary* summary); + + vector<ParameterBlock*> parameter_blocks_; + vector<vector<ResidualBlock*> > residual_blocks_; + // The optimization is performed in rounds. In each round all the + // parameter blocks that form one independent set are optimized in + // parallel. This array, marks the boundaries of the independent + // sets in parameter_blocks_. + vector<int> independent_set_offsets_; + + Evaluator::Options evaluator_options_; +}; + +} // namespace internal +} // namespace ceres + +#endif // CERES_INTERNAL_COORDINATE_DESCENT_MINIMIZER_H_ diff --git a/extern/libmv/third_party/ceres/internal/ceres/corrector.cc b/extern/libmv/third_party/ceres/internal/ceres/corrector.cc index eff4dff8566..c3858abd2f4 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/corrector.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/corrector.cc @@ -113,12 +113,19 @@ void Corrector::CorrectJacobian(int nrow, int ncol, double* residuals, double* jacobian) { DCHECK(residuals != NULL); DCHECK(jacobian != NULL); - ConstVectorRef r_ref(residuals, nrow); - MatrixRef j_ref(jacobian, nrow, ncol); - // Equation 11 in BANS. - j_ref = sqrt_rho1_ * (j_ref - alpha_sq_norm_ * - r_ref * (r_ref.transpose() * j_ref)); + if (nrow == 1) { + // Specialization for the case where the residual is a scalar. + VectorRef j_ref(jacobian, ncol); + j_ref *= sqrt_rho1_ * (1.0 - alpha_sq_norm_ * pow(*residuals, 2)); + } else { + ConstVectorRef r_ref(residuals, nrow); + MatrixRef j_ref(jacobian, nrow, ncol); + + // Equation 11 in BANS. + j_ref = sqrt_rho1_ * (j_ref - alpha_sq_norm_ * + r_ref * (r_ref.transpose() * j_ref)); + } } } // namespace internal diff --git a/extern/libmv/third_party/ceres/internal/ceres/cxsparse.cc b/extern/libmv/third_party/ceres/internal/ceres/cxsparse.cc index ca36ce07614..19fa17cc37d 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/cxsparse.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/cxsparse.cc @@ -39,7 +39,7 @@ namespace ceres { namespace internal { -CXSparse::CXSparse() : scratch_size_(0), scratch_(NULL) { +CXSparse::CXSparse() : scratch_(NULL), scratch_size_(0) { } CXSparse::~CXSparse() { @@ -116,12 +116,12 @@ cs_di* CXSparse::CreateSparseMatrix(TripletSparseMatrix* tsm) { return cs_compress(&tsm_wrapper); } -void CXSparse::Free(cs_di* factor) { - cs_free(factor); +void CXSparse::Free(cs_di* sparse_matrix) { + cs_di_spfree(sparse_matrix); } -void CXSparse::Free(cs_dis* factor) { - cs_sfree(factor); +void CXSparse::Free(cs_dis* symbolic_factorization) { + cs_di_sfree(symbolic_factorization); } } // namespace internal diff --git a/extern/libmv/third_party/ceres/internal/ceres/cxsparse.h b/extern/libmv/third_party/ceres/internal/ceres/cxsparse.h index d3b64fcd1a5..dd5eadc8da8 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/cxsparse.h +++ b/extern/libmv/third_party/ceres/internal/ceres/cxsparse.h @@ -72,9 +72,8 @@ class CXSparse { // The returned matrix should be deallocated with Free when not used anymore. cs_dis* AnalyzeCholesky(cs_di* A); - // Deallocates the memory of a matrix obtained from AnalyzeCholesky. - void Free(cs_di* factor); - void Free(cs_dis* factor); + void Free(cs_di* sparse_matrix); + void Free(cs_dis* symbolic_factorization); private: // Cached scratch space diff --git a/extern/libmv/third_party/ceres/internal/ceres/dense_jacobian_writer.h b/extern/libmv/third_party/ceres/internal/ceres/dense_jacobian_writer.h index 1177b83a556..be743a8591c 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/dense_jacobian_writer.h +++ b/extern/libmv/third_party/ceres/internal/ceres/dense_jacobian_writer.h @@ -62,7 +62,8 @@ class DenseJacobianWriter { SparseMatrix* CreateJacobian() const { return new DenseSparseMatrix(program_->NumResiduals(), - program_->NumEffectiveParameters()); + program_->NumEffectiveParameters(), + true); } void Write(int residual_id, @@ -87,10 +88,10 @@ class DenseJacobianWriter { continue; } - int parameter_block_size = parameter_block->LocalSize(); - MatrixRef parameter_jacobian(jacobians[j], - num_residuals, - parameter_block_size); + const int parameter_block_size = parameter_block->LocalSize(); + ConstMatrixRef parameter_jacobian(jacobians[j], + num_residuals, + parameter_block_size); dense_jacobian->mutable_matrix().block( residual_offset, diff --git a/extern/libmv/third_party/ceres/internal/ceres/dense_normal_cholesky_solver.cc b/extern/libmv/third_party/ceres/internal/ceres/dense_normal_cholesky_solver.cc index f6bb99abf63..a340e1664f0 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/dense_normal_cholesky_solver.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/dense_normal_cholesky_solver.cc @@ -34,10 +34,11 @@ #include "Eigen/Dense" #include "ceres/dense_sparse_matrix.h" -#include "ceres/linear_solver.h" #include "ceres/internal/eigen.h" #include "ceres/internal/scoped_ptr.h" +#include "ceres/linear_solver.h" #include "ceres/types.h" +#include "ceres/wall_time.h" namespace ceres { namespace internal { @@ -51,6 +52,8 @@ LinearSolver::Summary DenseNormalCholeskySolver::SolveImpl( const double* b, const LinearSolver::PerSolveOptions& per_solve_options, double* x) { + EventLogger event_logger("DenseNormalCholeskySolver::Solve"); + const int num_rows = A->num_rows(); const int num_cols = A->num_cols(); @@ -58,6 +61,7 @@ LinearSolver::Summary DenseNormalCholeskySolver::SolveImpl( Matrix lhs(num_cols, num_cols); lhs.setZero(); + event_logger.AddEvent("Setup"); // lhs += A'A // // Using rankUpdate instead of GEMM, exposes the fact that its the @@ -73,12 +77,13 @@ LinearSolver::Summary DenseNormalCholeskySolver::SolveImpl( lhs += D.array().square().matrix().asDiagonal(); } - VectorRef(x, num_cols) = - lhs.selfadjointView<Eigen::Upper>().ldlt().solve(rhs); - LinearSolver::Summary summary; summary.num_iterations = 1; summary.termination_type = TOLERANCE; + VectorRef(x, num_cols) = + lhs.selfadjointView<Eigen::Upper>().ldlt().solve(rhs); + event_logger.AddEvent("Solve"); + return summary; } diff --git a/extern/libmv/third_party/ceres/internal/ceres/dense_qr_solver.cc b/extern/libmv/third_party/ceres/internal/ceres/dense_qr_solver.cc index 2b329ee0e9c..1fb9709b42a 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/dense_qr_solver.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/dense_qr_solver.cc @@ -34,10 +34,11 @@ #include "Eigen/Dense" #include "ceres/dense_sparse_matrix.h" -#include "ceres/linear_solver.h" #include "ceres/internal/eigen.h" #include "ceres/internal/scoped_ptr.h" +#include "ceres/linear_solver.h" #include "ceres/types.h" +#include "ceres/wall_time.h" namespace ceres { namespace internal { @@ -50,10 +51,10 @@ LinearSolver::Summary DenseQRSolver::SolveImpl( const double* b, const LinearSolver::PerSolveOptions& per_solve_options, double* x) { + EventLogger event_logger("DenseQRSolver::Solve"); + const int num_rows = A->num_rows(); const int num_cols = A->num_cols(); - VLOG(2) << "DenseQRSolver: " - << num_rows << " x " << num_cols << " system."; if (per_solve_options.D != NULL) { // Temporarily append a diagonal block to the A matrix, but undo @@ -62,18 +63,18 @@ LinearSolver::Summary DenseQRSolver::SolveImpl( } // rhs = [b;0] to account for the additional rows in the lhs. - Vector rhs(num_rows + ((per_solve_options.D != NULL) ? num_cols : 0)); - rhs.setZero(); - rhs.head(num_rows) = ConstVectorRef(b, num_rows); + const int augmented_num_rows = + num_rows + ((per_solve_options.D != NULL) ? num_cols : 0); + if (rhs_.rows() != augmented_num_rows) { + rhs_.resize(augmented_num_rows); + rhs_.setZero(); + } + rhs_.head(num_rows) = ConstVectorRef(b, num_rows); + event_logger.AddEvent("Setup"); // Solve the system. - VectorRef(x, num_cols) = A->matrix().colPivHouseholderQr().solve(rhs); - - VLOG(3) << "A:\n" << A->matrix(); - VLOG(3) << "x:\n" << VectorRef(x, num_cols); - VLOG(3) << "b:\n" << rhs; - VLOG(3) << "error: " << (A->matrix() * VectorRef(x, num_cols) - rhs).norm(); - + VectorRef(x, num_cols) = A->matrix().colPivHouseholderQr().solve(rhs_); + event_logger.AddEvent("Solve"); if (per_solve_options.D != NULL) { // Undo the modifications to the matrix A. @@ -86,6 +87,8 @@ LinearSolver::Summary DenseQRSolver::SolveImpl( LinearSolver::Summary summary; summary.num_iterations = 1; summary.termination_type = TOLERANCE; + + event_logger.AddEvent("TearDown"); return summary; } diff --git a/extern/libmv/third_party/ceres/internal/ceres/dense_qr_solver.h b/extern/libmv/third_party/ceres/internal/ceres/dense_qr_solver.h index dd683a8c4ea..f78fa72c5f3 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/dense_qr_solver.h +++ b/extern/libmv/third_party/ceres/internal/ceres/dense_qr_solver.h @@ -33,6 +33,7 @@ #define CERES_INTERNAL_DENSE_QR_SOLVER_H_ #include "ceres/linear_solver.h" +#include "ceres/internal/eigen.h" #include "ceres/internal/macros.h" namespace ceres { @@ -90,6 +91,7 @@ class DenseQRSolver: public DenseSparseMatrixSolver { double* x); const LinearSolver::Options options_; + Vector rhs_; CERES_DISALLOW_COPY_AND_ASSIGN(DenseQRSolver); }; diff --git a/extern/libmv/third_party/ceres/internal/ceres/dense_sparse_matrix.cc b/extern/libmv/third_party/ceres/internal/ceres/dense_sparse_matrix.cc index 86730cc101b..978ac6abe15 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/dense_sparse_matrix.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/dense_sparse_matrix.cc @@ -47,6 +47,20 @@ DenseSparseMatrix::DenseSparseMatrix(int num_rows, int num_cols) m_.setZero(); } +DenseSparseMatrix::DenseSparseMatrix(int num_rows, + int num_cols, + bool reserve_diagonal) + : has_diagonal_appended_(false), + has_diagonal_reserved_(reserve_diagonal) { + // Allocate enough space for the diagonal. + if (reserve_diagonal) { + m_.resize(num_rows + num_cols, num_cols); + } else { + m_.resize(num_rows, num_cols); + } + m_.setZero(); +} + DenseSparseMatrix::DenseSparseMatrix(const TripletSparseMatrix& m) : m_(Eigen::MatrixXd::Zero(m.num_rows(), m.num_cols())), has_diagonal_appended_(false), @@ -105,7 +119,7 @@ void DenseSparseMatrix::ScaleColumns(const double* scale) { } void DenseSparseMatrix::ToDenseMatrix(Matrix* dense_matrix) const { - *dense_matrix = m_; + *dense_matrix = m_.block(0, 0, num_rows(), num_cols()); } #ifndef CERES_NO_PROTOCOL_BUFFERS diff --git a/extern/libmv/third_party/ceres/internal/ceres/dense_sparse_matrix.h b/extern/libmv/third_party/ceres/internal/ceres/dense_sparse_matrix.h index e7ad14d0ee6..1e4d499b631 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/dense_sparse_matrix.h +++ b/extern/libmv/third_party/ceres/internal/ceres/dense_sparse_matrix.h @@ -57,6 +57,7 @@ class DenseSparseMatrix : public SparseMatrix { #endif DenseSparseMatrix(int num_rows, int num_cols); + DenseSparseMatrix(int num_rows, int num_cols, bool reserve_diagonal); virtual ~DenseSparseMatrix() {} diff --git a/extern/libmv/third_party/ceres/internal/ceres/dogleg_strategy.cc b/extern/libmv/third_party/ceres/internal/ceres/dogleg_strategy.cc index 668fa54b8b8..a330ad2c7a2 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/dogleg_strategy.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/dogleg_strategy.cc @@ -35,7 +35,7 @@ #include "ceres/array_utils.h" #include "ceres/internal/eigen.h" #include "ceres/linear_solver.h" -#include "ceres/polynomial_solver.h" +#include "ceres/polynomial.h" #include "ceres/sparse_matrix.h" #include "ceres/trust_region_strategy.h" #include "ceres/types.h" @@ -87,7 +87,7 @@ TrustRegionStrategy::Summary DoglegStrategy::ComputeStep( // Gauss-Newton and gradient vectors are always available, only a // new interpolant need to be computed. For the subspace case, // the subspace and the two-dimensional model are also still valid. - switch(dogleg_type_) { + switch (dogleg_type_) { case TRADITIONAL_DOGLEG: ComputeTraditionalDoglegStep(step); break; @@ -135,7 +135,7 @@ TrustRegionStrategy::Summary DoglegStrategy::ComputeStep( summary.termination_type = linear_solver_summary.termination_type; if (linear_solver_summary.termination_type != FAILURE) { - switch(dogleg_type_) { + switch (dogleg_type_) { // Interpolate the Cauchy point and the Gauss-Newton step. case TRADITIONAL_DOGLEG: ComputeTraditionalDoglegStep(step); @@ -415,15 +415,15 @@ Vector DoglegStrategy::MakePolynomialForBoundaryConstrainedProblem() const { const double trB = subspace_B_.trace(); const double r2 = radius_ * radius_; Matrix2d B_adj; - B_adj << subspace_B_(1,1) , -subspace_B_(0,1), - -subspace_B_(1,0) , subspace_B_(0,0); + B_adj << subspace_B_(1, 1) , -subspace_B_(0, 1), + -subspace_B_(1, 0) , subspace_B_(0, 0); Vector polynomial(5); polynomial(0) = r2; polynomial(1) = 2.0 * r2 * trB; - polynomial(2) = r2 * ( trB * trB + 2.0 * detB ) - subspace_g_.squaredNorm(); - polynomial(3) = -2.0 * ( subspace_g_.transpose() * B_adj * subspace_g_ - - r2 * detB * trB ); + polynomial(2) = r2 * (trB * trB + 2.0 * detB) - subspace_g_.squaredNorm(); + polynomial(3) = -2.0 * (subspace_g_.transpose() * B_adj * subspace_g_ + - r2 * detB * trB); polynomial(4) = r2 * detB * detB - (B_adj * subspace_g_).squaredNorm(); return polynomial; @@ -598,7 +598,7 @@ void DoglegStrategy::StepAccepted(double step_quality) { // Reduce the regularization multiplier, in the hope that whatever // was causing the rank deficiency has gone away and we can return // to doing a pure Gauss-Newton solve. - mu_ = max(min_mu_, 2.0 * mu_ / mu_increase_factor_ ); + mu_ = max(min_mu_, 2.0 * mu_ / mu_increase_factor_); reuse_ = false; } diff --git a/extern/libmv/third_party/ceres/internal/ceres/dogleg_strategy.h b/extern/libmv/third_party/ceres/internal/ceres/dogleg_strategy.h index bff1689aa4a..7131467d6ce 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/dogleg_strategy.h +++ b/extern/libmv/third_party/ceres/internal/ceres/dogleg_strategy.h @@ -53,8 +53,8 @@ namespace internal { // This finds the exact optimum over the two-dimensional subspace // spanned by the two Dogleg vectors. class DoglegStrategy : public TrustRegionStrategy { -public: - DoglegStrategy(const TrustRegionStrategy::Options& options); + public: + explicit DoglegStrategy(const TrustRegionStrategy::Options& options); virtual ~DoglegStrategy() {} // TrustRegionStrategy interface diff --git a/extern/libmv/third_party/ceres/internal/ceres/evaluator.h b/extern/libmv/third_party/ceres/internal/ceres/evaluator.h index 6aa30d7b739..14a88188145 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/evaluator.h +++ b/extern/libmv/third_party/ceres/internal/ceres/evaluator.h @@ -32,14 +32,17 @@ #ifndef CERES_INTERNAL_EVALUATOR_H_ #define CERES_INTERNAL_EVALUATOR_H_ +#include <map> #include <string> #include <vector> + +#include "ceres/execution_summary.h" #include "ceres/internal/port.h" #include "ceres/types.h" namespace ceres { -class CRSMatrix; +struct CRSMatrix; namespace internal { @@ -152,6 +155,18 @@ class Evaluator { // The number of residuals in the optimization problem. virtual int NumResiduals() const = 0; + + // The following two methods return copies instead of references so + // that the base class implementation does not have to worry about + // life time issues. Further, these calls are not expected to be + // frequent or performance sensitive. + virtual map<string, int> CallStatistics() const { + return map<string, int>(); + } + + virtual map<string, double> TimeStatistics() const { + return map<string, double>(); + } }; } // namespace internal diff --git a/extern/libmv/third_party/ceres/internal/ceres/execution_summary.h b/extern/libmv/third_party/ceres/internal/ceres/execution_summary.h new file mode 100644 index 00000000000..29bdc69ecd7 --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/execution_summary.h @@ -0,0 +1,90 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2013 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + +#ifndef CERES_INTERNAL_EXECUTION_SUMMARY_H_ +#define CERES_INTERNAL_EXECUTION_SUMMARY_H_ + +#include <map> +#include <string> + +#include "ceres/internal/port.h" +#include "ceres/wall_time.h" +#include "ceres/mutex.h" + +namespace ceres { +namespace internal { + +// Struct used by various objects to report statistics and other +// information about their execution. e.g., ExecutionSummary::times +// can be used for reporting times associated with various activities. +class ExecutionSummary { + public: + void IncrementTimeBy(const string& name, const double value) { + CeresMutexLock l(×_mutex_); + times_[name] += value; + } + + void IncrementCall(const string& name) { + CeresMutexLock l(&calls_mutex_); + calls_[name] += 1; + } + + const map<string, double>& times() const { return times_; } + const map<string, int>& calls() const { return calls_; } + + private: + Mutex times_mutex_; + map<string, double> times_; + + Mutex calls_mutex_; + map<string, int> calls_; +}; + +class ScopedExecutionTimer { + public: + ScopedExecutionTimer(const string& name, ExecutionSummary* summary) + : start_time_(WallTimeInSeconds()), + name_(name), + summary_(summary) {} + + ~ScopedExecutionTimer() { + summary_->IncrementTimeBy(name_, WallTimeInSeconds() - start_time_); + } + + private: + const double start_time_; + const string name_; + ExecutionSummary* summary_; +}; + +} // namespace internal +} // namespace ceres + +#endif // CERES_INTERNAL_EXECUTION_SUMMARY_H_ diff --git a/extern/libmv/third_party/ceres/internal/ceres/file.cc b/extern/libmv/third_party/ceres/internal/ceres/file.cc index 6fe7557246d..5226c85e6ee 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/file.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/file.cc @@ -30,8 +30,9 @@ // // Really simple file IO. +#include "ceres/file.h" + #include <cstdio> -#include "file.h" #include "glog/logging.h" namespace ceres { diff --git a/extern/libmv/third_party/ceres/internal/ceres/gradient_checking_cost_function.cc b/extern/libmv/third_party/ceres/internal/ceres/gradient_checking_cost_function.cc index 7fb3ed7b3a8..3edf95da6e0 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/gradient_checking_cost_function.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/gradient_checking_cost_function.cc @@ -154,8 +154,8 @@ class GradientCheckingCostFunction : public CostFunction { "Jacobian for " "block %d: (%ld by %ld)) " "==========\n", k, - term_jacobians[k].rows(), - term_jacobians[k].cols()); + static_cast<long>(term_jacobians[k].rows()), + static_cast<long>(term_jacobians[k].cols())); // The funny spacing creates appropriately aligned column headers. m += " block row col user dx/dy num diff dx/dy " "abs error relative error parameter residual\n"; diff --git a/extern/libmv/third_party/ceres/internal/ceres/graph.h b/extern/libmv/third_party/ceres/internal/ceres/graph.h index 2c0f6d28e54..5f92d4d4df2 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/graph.h +++ b/extern/libmv/third_party/ceres/internal/ceres/graph.h @@ -32,12 +32,13 @@ #define CERES_INTERNAL_GRAPH_H_ #include <limits> -#include <glog/logging.h> +#include <utility> #include "ceres/integral_types.h" #include "ceres/map_util.h" #include "ceres/collections_port.h" #include "ceres/internal/macros.h" #include "ceres/types.h" +#include "glog/logging.h" namespace ceres { namespace internal { @@ -65,6 +66,28 @@ class Graph { AddVertex(vertex, 1.0); } + bool RemoveVertex(const Vertex& vertex) { + if (vertices_.find(vertex) == vertices_.end()) { + return false; + } + + vertices_.erase(vertex); + vertex_weights_.erase(vertex); + const HashSet<Vertex>& sinks = edges_[vertex]; + for (typename HashSet<Vertex>::const_iterator it = sinks.begin(); + it != sinks.end(); ++it) { + if (vertex < *it) { + edge_weights_.erase(make_pair(vertex, *it)); + } else { + edge_weights_.erase(make_pair(*it, vertex)); + } + edges_[*it].erase(vertex); + } + + edges_.erase(vertex); + return true; + } + // Add a weighted edge between the vertex1 and vertex2. Calling // AddEdge on a pair of vertices which do not exist in the graph yet // will result in undefined behavior. diff --git a/extern/libmv/third_party/ceres/internal/ceres/graph_algorithms.h b/extern/libmv/third_party/ceres/internal/ceres/graph_algorithms.h index 3b42d936336..2e6eec0e6d8 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/graph_algorithms.h +++ b/extern/libmv/third_party/ceres/internal/ceres/graph_algorithms.h @@ -33,10 +33,12 @@ #ifndef CERES_INTERNAL_GRAPH_ALGORITHMS_H_ #define CERES_INTERNAL_GRAPH_ALGORITHMS_H_ +#include <algorithm> #include <vector> -#include <glog/logging.h> +#include <utility> #include "ceres/collections_port.h" #include "ceres/graph.h" +#include "glog/logging.h" namespace ceres { namespace internal { diff --git a/extern/libmv/third_party/ceres/internal/ceres/iterative_schur_complement_solver.cc b/extern/libmv/third_party/ceres/internal/ceres/iterative_schur_complement_solver.cc index 679c41f2431..cf5562ac771 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/iterative_schur_complement_solver.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/iterative_schur_complement_solver.cc @@ -33,6 +33,7 @@ #include <algorithm> #include <cstring> #include <vector> + #include "Eigen/Dense" #include "ceres/block_sparse_matrix.h" #include "ceres/block_structure.h" @@ -41,9 +42,12 @@ #include "ceres/internal/eigen.h" #include "ceres/internal/scoped_ptr.h" #include "ceres/linear_solver.h" +#include "ceres/preconditioner.h" +#include "ceres/schur_jacobi_preconditioner.h" #include "ceres/triplet_sparse_matrix.h" #include "ceres/types.h" #include "ceres/visibility_based_preconditioner.h" +#include "ceres/wall_time.h" #include "glog/logging.h" namespace ceres { @@ -62,12 +66,14 @@ LinearSolver::Summary IterativeSchurComplementSolver::SolveImpl( const double* b, const LinearSolver::PerSolveOptions& per_solve_options, double* x) { + EventLogger event_logger("IterativeSchurComplementSolver::Solve"); + CHECK_NOTNULL(A->block_structure()); // Initialize a ImplicitSchurComplement object. if (schur_complement_ == NULL) { schur_complement_.reset( - new ImplicitSchurComplement(options_.num_eliminate_blocks, + new ImplicitSchurComplement(options_.elimination_groups[0], options_.preconditioner_type == JACOBI)); } schur_complement_->Init(*A, per_solve_options.D, b); @@ -89,44 +95,58 @@ LinearSolver::Summary IterativeSchurComplementSolver::SolveImpl( cg_per_solve_options.r_tolerance = per_solve_options.r_tolerance; cg_per_solve_options.q_tolerance = per_solve_options.q_tolerance; - bool is_preconditioner_good = false; + Preconditioner::Options preconditioner_options; + preconditioner_options.type = options_.preconditioner_type; + preconditioner_options.sparse_linear_algebra_library = + options_.sparse_linear_algebra_library; + preconditioner_options.use_block_amd = options_.use_block_amd; + preconditioner_options.num_threads = options_.num_threads; + preconditioner_options.row_block_size = options_.row_block_size; + preconditioner_options.e_block_size = options_.e_block_size; + preconditioner_options.f_block_size = options_.f_block_size; + preconditioner_options.elimination_groups = options_.elimination_groups; + switch (options_.preconditioner_type) { case IDENTITY: - is_preconditioner_good = true; break; case JACOBI: - // We need to strip the constness of the block_diagonal_FtF_inverse - // matrix here because the only other way to initialize the struct - // cg_solve_options would be to add a constructor to it. We know - // that the only method ever called on the preconditioner is the - // RightMultiply which is a const method so we don't need to worry - // about the object getting modified. - cg_per_solve_options.preconditioner = - const_cast<BlockSparseMatrix*>( - schur_complement_->block_diagonal_FtF_inverse()); - is_preconditioner_good = true; + preconditioner_.reset( + new SparseMatrixPreconditionerWrapper( + schur_complement_->block_diagonal_FtF_inverse())); break; case SCHUR_JACOBI: + if (preconditioner_.get() == NULL) { + preconditioner_.reset( + new SchurJacobiPreconditioner( + *A->block_structure(), preconditioner_options)); + } + break; case CLUSTER_JACOBI: case CLUSTER_TRIDIAGONAL: - if (visibility_based_preconditioner_.get() == NULL) { - visibility_based_preconditioner_.reset( - new VisibilityBasedPreconditioner(*A->block_structure(), options_)); + if (preconditioner_.get() == NULL) { + preconditioner_.reset( + new VisibilityBasedPreconditioner( + *A->block_structure(), preconditioner_options)); } - is_preconditioner_good = - visibility_based_preconditioner_->Update(*A, per_solve_options.D); - cg_per_solve_options.preconditioner = - visibility_based_preconditioner_.get(); break; default: LOG(FATAL) << "Unknown Preconditioner Type"; } + bool preconditioner_update_was_successful = true; + if (preconditioner_.get() != NULL) { + preconditioner_update_was_successful = + preconditioner_->Update(*A, per_solve_options.D); + cg_per_solve_options.preconditioner = preconditioner_.get(); + } + + event_logger.AddEvent("Setup"); + LinearSolver::Summary cg_summary; cg_summary.num_iterations = 0; cg_summary.termination_type = FAILURE; - if (is_preconditioner_good) { + if (preconditioner_update_was_successful) { cg_summary = cg_solver.Solve(schur_complement_.get(), schur_complement_->rhs().data(), cg_per_solve_options, @@ -138,6 +158,8 @@ LinearSolver::Summary IterativeSchurComplementSolver::SolveImpl( } VLOG(2) << "CG Iterations : " << cg_summary.num_iterations; + + event_logger.AddEvent("Solve"); return cg_summary; } diff --git a/extern/libmv/third_party/ceres/internal/ceres/iterative_schur_complement_solver.h b/extern/libmv/third_party/ceres/internal/ceres/iterative_schur_complement_solver.h index cfeb65e1eec..f8abe04c142 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/iterative_schur_complement_solver.h +++ b/extern/libmv/third_party/ceres/internal/ceres/iterative_schur_complement_solver.h @@ -39,13 +39,12 @@ namespace ceres { namespace internal { -class BlockSparseMatrix; class BlockSparseMatrixBase; class ImplicitSchurComplement; -class VisibilityBasedPreconditioner; +class Preconditioner; // This class implements an iterative solver for the linear least -// squares problems that have a bi-partitte sparsity structure common +// squares problems that have a bi-partite sparsity structure common // to Structure from Motion problems. // // The algorithm used by this solver was developed in a series of @@ -70,9 +69,7 @@ class VisibilityBasedPreconditioner; // "Iterative Methods for Sparse Linear Systems". class IterativeSchurComplementSolver : public BlockSparseMatrixBaseSolver { public: - explicit IterativeSchurComplementSolver( - const LinearSolver::Options& options); - + explicit IterativeSchurComplementSolver(const LinearSolver::Options& options); virtual ~IterativeSchurComplementSolver(); private: @@ -84,8 +81,9 @@ class IterativeSchurComplementSolver : public BlockSparseMatrixBaseSolver { LinearSolver::Options options_; scoped_ptr<internal::ImplicitSchurComplement> schur_complement_; - scoped_ptr<VisibilityBasedPreconditioner> visibility_based_preconditioner_; + scoped_ptr<Preconditioner> preconditioner_; Vector reduced_linear_system_solution_; + CERES_DISALLOW_COPY_AND_ASSIGN(IterativeSchurComplementSolver); }; } // namespace internal diff --git a/extern/libmv/third_party/ceres/internal/ceres/levenberg_marquardt_strategy.h b/extern/libmv/third_party/ceres/internal/ceres/levenberg_marquardt_strategy.h index 90c21789797..344e3285422 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/levenberg_marquardt_strategy.h +++ b/extern/libmv/third_party/ceres/internal/ceres/levenberg_marquardt_strategy.h @@ -43,8 +43,9 @@ namespace internal { // // http://www2.imm.dtu.dk/pubdb/views/edoc_download.php/3215/pdf/imm3215.pdf class LevenbergMarquardtStrategy : public TrustRegionStrategy { -public: - LevenbergMarquardtStrategy(const TrustRegionStrategy::Options& options); + public: + explicit LevenbergMarquardtStrategy( + const TrustRegionStrategy::Options& options); virtual ~LevenbergMarquardtStrategy(); // TrustRegionStrategy interface diff --git a/extern/libmv/third_party/ceres/internal/ceres/line_search.cc b/extern/libmv/third_party/ceres/internal/ceres/line_search.cc new file mode 100644 index 00000000000..e7508caec56 --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/line_search.cc @@ -0,0 +1,211 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2012 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + +#include "ceres/line_search.h" + +#include <glog/logging.h> +#include "ceres/fpclassify.h" +#include "ceres/evaluator.h" +#include "ceres/internal/eigen.h" +#include "ceres/polynomial.h" + + +namespace ceres { +namespace internal { +namespace { + +FunctionSample ValueSample(const double x, const double value) { + FunctionSample sample; + sample.x = x; + sample.value = value; + sample.value_is_valid = true; + return sample; +}; + +FunctionSample ValueAndGradientSample(const double x, + const double value, + const double gradient) { + FunctionSample sample; + sample.x = x; + sample.value = value; + sample.gradient = gradient; + sample.value_is_valid = true; + sample.gradient_is_valid = true; + return sample; +}; + +} // namespace + +LineSearchFunction::LineSearchFunction(Evaluator* evaluator) + : evaluator_(evaluator), + position_(evaluator->NumParameters()), + direction_(evaluator->NumEffectiveParameters()), + evaluation_point_(evaluator->NumParameters()), + scaled_direction_(evaluator->NumEffectiveParameters()), + gradient_(evaluator->NumEffectiveParameters()) { +} + +void LineSearchFunction::Init(const Vector& position, + const Vector& direction) { + position_ = position; + direction_ = direction; +} + +bool LineSearchFunction::Evaluate(const double x, double* f, double* g) { + scaled_direction_ = x * direction_; + if (!evaluator_->Plus(position_.data(), + scaled_direction_.data(), + evaluation_point_.data())) { + return false; + } + + if (g == NULL) { + return (evaluator_->Evaluate(evaluation_point_.data(), + f, NULL, NULL, NULL) && + IsFinite(*f)); + } + + if (!evaluator_->Evaluate(evaluation_point_.data(), + f, + NULL, + gradient_.data(), NULL)) { + return false; + } + + *g = direction_.dot(gradient_); + return IsFinite(*f) && IsFinite(*g); +} + +void ArmijoLineSearch::Search(const LineSearch::Options& options, + const double initial_step_size, + const double initial_cost, + const double initial_gradient, + Summary* summary) { + *CHECK_NOTNULL(summary) = LineSearch::Summary(); + Function* function = options.function; + + double previous_step_size = 0.0; + double previous_cost = 0.0; + double previous_gradient = 0.0; + bool previous_step_size_is_valid = false; + + double step_size = initial_step_size; + double cost = 0.0; + double gradient = 0.0; + bool step_size_is_valid = false; + + ++summary->num_evaluations; + step_size_is_valid = + function->Evaluate(step_size, + &cost, + options.interpolation_degree < 2 ? NULL : &gradient); + while (!step_size_is_valid || cost > (initial_cost + + options.sufficient_decrease + * initial_gradient + * step_size)) { + // If step_size_is_valid is not true we treat it as if the cost at + // that point is not large enough to satisfy the sufficient + // decrease condition. + + const double current_step_size = step_size; + // Backtracking search. Each iteration of this loop finds a new point + + if ((options.interpolation_degree == 0) || !step_size_is_valid) { + // Backtrack by halving the step_size; + step_size *= 0.5; + } else { + // Backtrack by interpolating the function and gradient values + // and minimizing the corresponding polynomial. + + vector<FunctionSample> samples; + samples.push_back(ValueAndGradientSample(0.0, + initial_cost, + initial_gradient)); + + if (options.interpolation_degree == 1) { + // Two point interpolation using function values and the + // initial gradient. + samples.push_back(ValueSample(step_size, cost)); + + if (options.use_higher_degree_interpolation_when_possible && + summary->num_evaluations > 1 && + previous_step_size_is_valid) { + // Three point interpolation, using function values and the + // initial gradient. + samples.push_back(ValueSample(previous_step_size, previous_cost)); + } + } else { + // Two point interpolation using the function values and the gradients. + samples.push_back(ValueAndGradientSample(step_size, + cost, + gradient)); + + if (options.use_higher_degree_interpolation_when_possible && + summary->num_evaluations > 1 && + previous_step_size_is_valid) { + // Three point interpolation using the function values and + // the gradients. + samples.push_back(ValueAndGradientSample(previous_step_size, + previous_cost, + previous_gradient)); + } + } + + double min_value; + MinimizeInterpolatingPolynomial(samples, 0.0, current_step_size, + &step_size, &min_value); + step_size = + min(max(step_size, + options.min_relative_step_size_change * current_step_size), + options.max_relative_step_size_change * current_step_size); + } + + previous_step_size = current_step_size; + previous_cost = cost; + previous_gradient = gradient; + + if (fabs(initial_gradient) * step_size < options.step_size_threshold) { + LOG(WARNING) << "Line search failed: step_size too small: " << step_size; + return; + } + + ++summary->num_evaluations; + step_size_is_valid = + function->Evaluate(step_size, + &cost, + options.interpolation_degree < 2 ? NULL : &gradient); + } + + summary->optimal_step_size = step_size; + summary->success = true; +} + +} // namespace internal +} // namespace ceres diff --git a/extern/libmv/third_party/ceres/internal/ceres/line_search.h b/extern/libmv/third_party/ceres/internal/ceres/line_search.h new file mode 100644 index 00000000000..fccf63b598a --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/line_search.h @@ -0,0 +1,212 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2012 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// +// Interface for and implementation of various Line search algorithms. + +#ifndef CERES_INTERNAL_LINE_SEARCH_H_ +#define CERES_INTERNAL_LINE_SEARCH_H_ + +#include <glog/logging.h> +#include <vector> +#include "ceres/internal/eigen.h" +#include "ceres/internal/port.h" + +namespace ceres { +namespace internal { + +class Evaluator; + +// Line search is another name for a one dimensional optimization +// algorithm. The name "line search" comes from the fact one +// dimensional optimization problems that arise as subproblems of +// general multidimensional optimization problems. +// +// While finding the exact minimum of a one dimensionl function is +// hard, instances of LineSearch find a point that satisfies a +// sufficient decrease condition. Depending on the particular +// condition used, we get a variety of different line search +// algorithms, e.g., Armijo, Wolfe etc. +class LineSearch { + public: + class Function; + + struct Options { + Options() + : interpolation_degree(1), + use_higher_degree_interpolation_when_possible(false), + sufficient_decrease(1e-4), + min_relative_step_size_change(1e-3), + max_relative_step_size_change(0.6), + step_size_threshold(1e-9), + function(NULL) {} + + // TODO(sameeragarwal): Replace this with enums which are common + // across various line searches. + // + // Degree of the polynomial used to approximate the objective + // function. Valid values are {0, 1, 2}. + // + // For Armijo line search + // + // 0: Bisection based backtracking search. + // 1: Quadratic interpolation. + // 2: Cubic interpolation. + int interpolation_degree; + + // Usually its possible to increase the degree of the + // interpolation polynomial by storing and using an extra point. + bool use_higher_degree_interpolation_when_possible; + + // Armijo line search parameters. + + // Solving the line search problem exactly is computationally + // prohibitive. Fortunately, line search based optimization + // algorithms can still guarantee convergence if instead of an + // exact solution, the line search algorithm returns a solution + // which decreases the value of the objective function + // sufficiently. More precisely, we are looking for a step_size + // s.t. + // + // f(step_size) <= f(0) + sufficient_decrease * f'(0) * step_size + double sufficient_decrease; + + // In each iteration of the Armijo line search, + // + // new_step_size >= min_relative_step_size_change * step_size + double min_relative_step_size_change; + + // In each iteration of the Armijo line search, + // + // new_step_size <= max_relative_step_size_change * step_size + double max_relative_step_size_change; + + // If during the line search, the step_size falls below this + // value, it is truncated to zero. + double step_size_threshold; + + // The one dimensional function that the line search algorithm + // minimizes. + Function* function; + }; + + // An object used by the line search to access the function values + // and gradient of the one dimensional function being optimized. + // + // In practice, this object will provide access to the objective + // function value and the directional derivative of the underlying + // optimization problem along a specific search direction. + // + // See LineSearchFunction for an example implementation. + class Function { + public: + virtual ~Function() {} + // Evaluate the line search objective + // + // f(x) = p(position + x * direction) + // + // Where, p is the objective function of the general optimization + // problem. + // + // g is the gradient f'(x) at x. + // + // f must not be null. The gradient is computed only if g is not null. + virtual bool Evaluate(double x, double* f, double* g) = 0; + }; + + // Result of the line search. + struct Summary { + Summary() + : success(false), + optimal_step_size(0.0), + num_evaluations(0) {} + + bool success; + double optimal_step_size; + int num_evaluations; + }; + + virtual ~LineSearch() {} + + // Perform the line search. + // + // initial_step_size must be a positive number. + // + // initial_cost and initial_gradient are the values and gradient of + // the function at zero. + // summary must not be null and will contain the result of the line + // search. + // + // Summary::success is true if a non-zero step size is found. + virtual void Search(const LineSearch::Options& options, + double initial_step_size, + double initial_cost, + double initial_gradient, + Summary* summary) = 0; +}; + +class LineSearchFunction : public LineSearch::Function { + public: + explicit LineSearchFunction(Evaluator* evaluator); + virtual ~LineSearchFunction() {} + void Init(const Vector& position, const Vector& direction); + virtual bool Evaluate(const double x, double* f, double* g); + + private: + Evaluator* evaluator_; + Vector position_; + Vector direction_; + + // evaluation_point = Evaluator::Plus(position_, x * direction_); + Vector evaluation_point_; + + // scaled_direction = x * direction_; + Vector scaled_direction_; + Vector gradient_; +}; + +// Backtracking and interpolation based Armijo line search. This +// implementation is based on the Armijo line search that ships in the +// minFunc package by Mark Schmidt. +// +// For more details: http://www.di.ens.fr/~mschmidt/Software/minFunc.html +class ArmijoLineSearch : public LineSearch { + public: + virtual ~ArmijoLineSearch() {} + virtual void Search(const LineSearch::Options& options, + double initial_step_size, + double initial_cost, + double initial_gradient, + Summary* summary); +}; + +} // namespace internal +} // namespace ceres + +#endif // CERES_INTERNAL_LINE_SEARCH_H_ diff --git a/extern/libmv/third_party/ceres/internal/ceres/line_search_direction.cc b/extern/libmv/third_party/ceres/internal/ceres/line_search_direction.cc new file mode 100644 index 00000000000..2f27a78301a --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/line_search_direction.cc @@ -0,0 +1,145 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2012 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + +#include "ceres/line_search_direction.h" +#include "ceres/line_search_minimizer.h" +#include "ceres/low_rank_inverse_hessian.h" +#include "ceres/internal/eigen.h" +#include "glog/logging.h" + +namespace ceres { +namespace internal { + +class SteepestDescent : public LineSearchDirection { + public: + virtual ~SteepestDescent() {} + bool NextDirection(const LineSearchMinimizer::State& previous, + const LineSearchMinimizer::State& current, + Vector* search_direction) { + *search_direction = -current.gradient; + return true; + } +}; + +class NonlinearConjugateGradient : public LineSearchDirection { + public: + NonlinearConjugateGradient(const NonlinearConjugateGradientType type, + const double function_tolerance) + : type_(type), + function_tolerance_(function_tolerance) { + } + + bool NextDirection(const LineSearchMinimizer::State& previous, + const LineSearchMinimizer::State& current, + Vector* search_direction) { + double beta = 0.0; + Vector gradient_change; + switch (type_) { + case FLETCHER_REEVES: + beta = current.gradient_squared_norm / previous.gradient_squared_norm; + break; + case POLAK_RIBIRERE: + gradient_change = current.gradient - previous.gradient; + beta = (current.gradient.dot(gradient_change) / + previous.gradient_squared_norm); + break; + case HESTENES_STIEFEL: + gradient_change = current.gradient - previous.gradient; + beta = (current.gradient.dot(gradient_change) / + previous.search_direction.dot(gradient_change)); + break; + default: + LOG(FATAL) << "Unknown nonlinear conjugate gradient type: " << type_; + } + + *search_direction = -current.gradient + beta * previous.search_direction; + const double directional_derivative = + current. gradient.dot(*search_direction); + if (directional_derivative > -function_tolerance_) { + LOG(WARNING) << "Restarting non-linear conjugate gradients: " + << directional_derivative; + *search_direction = -current.gradient; + }; + + return true; + } + + private: + const NonlinearConjugateGradientType type_; + const double function_tolerance_; +}; + +class LBFGS : public LineSearchDirection { + public: + LBFGS(const int num_parameters, const int max_lbfgs_rank) + : low_rank_inverse_hessian_(num_parameters, max_lbfgs_rank) {} + + virtual ~LBFGS() {} + + bool NextDirection(const LineSearchMinimizer::State& previous, + const LineSearchMinimizer::State& current, + Vector* search_direction) { + low_rank_inverse_hessian_.Update( + previous.search_direction * previous.step_size, + current.gradient - previous.gradient); + search_direction->setZero(); + low_rank_inverse_hessian_.RightMultiply(current.gradient.data(), + search_direction->data()); + *search_direction *= -1.0; + return true; + } + + private: + LowRankInverseHessian low_rank_inverse_hessian_; +}; + +LineSearchDirection* +LineSearchDirection::Create(const LineSearchDirection::Options& options) { + if (options.type == STEEPEST_DESCENT) { + return new SteepestDescent; + } + + if (options.type == NONLINEAR_CONJUGATE_GRADIENT) { + return new NonlinearConjugateGradient( + options.nonlinear_conjugate_gradient_type, + options.function_tolerance); + } + + if (options.type == ceres::LBFGS) { + return new ceres::internal::LBFGS(options.num_parameters, + options.max_lbfgs_rank); + } + + LOG(ERROR) << "Unknown line search direction type: " << options.type; + return NULL; +} + +} // namespace internal +} // namespace ceres diff --git a/extern/libmv/third_party/ceres/internal/ceres/polynomial_solver.h b/extern/libmv/third_party/ceres/internal/ceres/line_search_direction.h index 1cf07ddb549..71063ab8414 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/polynomial_solver.h +++ b/extern/libmv/third_party/ceres/internal/ceres/line_search_direction.h @@ -26,40 +26,45 @@ // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. // -// Author: moll.markus@arcor.de (Markus Moll) +// Author: sameeragarwal@google.com (Sameer Agarwal) -#ifndef CERES_INTERNAL_POLYNOMIAL_SOLVER_H_ -#define CERES_INTERNAL_POLYNOMIAL_SOLVER_H_ +#ifndef CERES_INTERNAL_LINE_SEARCH_DIRECTION_H_ +#define CERES_INTERNAL_LINE_SEARCH_DIRECTION_H_ #include "ceres/internal/eigen.h" +#include "ceres/line_search_minimizer.h" +#include "ceres/types.h" namespace ceres { namespace internal { -// Use the companion matrix eigenvalues to determine the roots of the polynomial -// -// sum_{i=0}^N polynomial(i) x^{N-i}. -// -// This function returns true on success, false otherwise. -// Failure indicates that the polynomial is invalid (of size 0) or -// that the eigenvalues of the companion matrix could not be computed. -// On failure, a more detailed message will be written to LOG(ERROR). -// If real is not NULL, the real parts of the roots will be returned in it. -// Likewise, if imaginary is not NULL, imaginary parts will be returned in it. -bool FindPolynomialRoots(const Vector& polynomial, - Vector* real, - Vector* imaginary); +class LineSearchDirection { + public: + struct Options { + Options() + : num_parameters(0), + type(LBFGS), + nonlinear_conjugate_gradient_type(FLETCHER_REEVES), + function_tolerance(1e-12), + max_lbfgs_rank(20) { + } + + int num_parameters; + LineSearchDirectionType type; + NonlinearConjugateGradientType nonlinear_conjugate_gradient_type; + double function_tolerance; + int max_lbfgs_rank; + }; + + static LineSearchDirection* Create(const Options& options); -// Evaluate the polynomial at x using the Horner scheme. -inline double EvaluatePolynomial(const Vector& polynomial, double x) { - double v = 0.0; - for (int i = 0; i < polynomial.size(); ++i) { - v = v * x + polynomial(i); - } - return v; -} + virtual ~LineSearchDirection() {} + virtual bool NextDirection(const LineSearchMinimizer::State& previous, + const LineSearchMinimizer::State& current, + Vector* search_direction) = 0; +}; } // namespace internal } // namespace ceres -#endif // CERES_INTERNAL_POLYNOMIAL_SOLVER_H_ +#endif // CERES_INTERNAL_LINE_SEARCH_DIRECTION_H_ diff --git a/extern/libmv/third_party/ceres/internal/ceres/line_search_minimizer.cc b/extern/libmv/third_party/ceres/internal/ceres/line_search_minimizer.cc new file mode 100644 index 00000000000..ca7d639c5ef --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/line_search_minimizer.cc @@ -0,0 +1,283 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2012 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// +// Generic loop for line search based optimization algorithms. +// +// This is primarily inpsired by the minFunc packaged written by Mark +// Schmidt. +// +// http://www.di.ens.fr/~mschmidt/Software/minFunc.html +// +// For details on the theory and implementation see "Numerical +// Optimization" by Nocedal & Wright. + +#include "ceres/line_search_minimizer.h" + +#include <algorithm> +#include <cstdlib> +#include <cmath> +#include <string> +#include <vector> + +#include "Eigen/Dense" +#include "ceres/array_utils.h" +#include "ceres/evaluator.h" +#include "ceres/internal/eigen.h" +#include "ceres/internal/port.h" +#include "ceres/internal/scoped_ptr.h" +#include "ceres/line_search.h" +#include "ceres/line_search_direction.h" +#include "ceres/stringprintf.h" +#include "ceres/types.h" +#include "ceres/wall_time.h" +#include "glog/logging.h" + +namespace ceres { +namespace internal { +namespace { +// Small constant for various floating point issues. +// TODO(sameeragarwal): Change to a better name if this has only one +// use. +const double kEpsilon = 1e-12; + +bool Evaluate(Evaluator* evaluator, + const Vector& x, + LineSearchMinimizer::State* state) { + const bool status = evaluator->Evaluate(x.data(), + &(state->cost), + NULL, + state->gradient.data(), + NULL); + if (status) { + state->gradient_squared_norm = state->gradient.squaredNorm(); + state->gradient_max_norm = state->gradient.lpNorm<Eigen::Infinity>(); + } + + return status; +} + +} // namespace + +void LineSearchMinimizer::Minimize(const Minimizer::Options& options, + double* parameters, + Solver::Summary* summary) { + double start_time = WallTimeInSeconds(); + double iteration_start_time = start_time; + + Evaluator* evaluator = CHECK_NOTNULL(options.evaluator); + const int num_parameters = evaluator->NumParameters(); + const int num_effective_parameters = evaluator->NumEffectiveParameters(); + + summary->termination_type = NO_CONVERGENCE; + summary->num_successful_steps = 0; + summary->num_unsuccessful_steps = 0; + + VectorRef x(parameters, num_parameters); + + State current_state(num_parameters, num_effective_parameters); + State previous_state(num_parameters, num_effective_parameters); + + Vector delta(num_effective_parameters); + Vector x_plus_delta(num_parameters); + + IterationSummary iteration_summary; + iteration_summary.iteration = 0; + iteration_summary.step_is_valid = false; + iteration_summary.step_is_successful = false; + iteration_summary.cost_change = 0.0; + iteration_summary.gradient_max_norm = 0.0; + iteration_summary.step_norm = 0.0; + iteration_summary.linear_solver_iterations = 0; + iteration_summary.step_solver_time_in_seconds = 0; + + // Do initial cost and Jacobian evaluation. + if (!Evaluate(evaluator, x, ¤t_state)) { + LOG(WARNING) << "Terminating: Cost and gradient evaluation failed."; + summary->termination_type = NUMERICAL_FAILURE; + return; + } + + summary->initial_cost = current_state.cost + summary->fixed_cost; + iteration_summary.cost = current_state.cost + summary->fixed_cost; + + iteration_summary.gradient_max_norm = current_state.gradient_max_norm; + + // The initial gradient max_norm is bounded from below so that we do + // not divide by zero. + const double initial_gradient_max_norm = + max(iteration_summary.gradient_max_norm, kEpsilon); + const double absolute_gradient_tolerance = + options.gradient_tolerance * initial_gradient_max_norm; + + if (iteration_summary.gradient_max_norm <= absolute_gradient_tolerance) { + summary->termination_type = GRADIENT_TOLERANCE; + VLOG(1) << "Terminating: Gradient tolerance reached." + << "Relative gradient max norm: " + << iteration_summary.gradient_max_norm / initial_gradient_max_norm + << " <= " << options.gradient_tolerance; + return; + } + + iteration_summary.iteration_time_in_seconds = + WallTimeInSeconds() - iteration_start_time; + iteration_summary.cumulative_time_in_seconds = + WallTimeInSeconds() - start_time + + summary->preprocessor_time_in_seconds; + summary->iterations.push_back(iteration_summary); + + LineSearchDirection::Options line_search_direction_options; + line_search_direction_options.num_parameters = num_effective_parameters; + line_search_direction_options.type = options.line_search_direction_type; + line_search_direction_options.nonlinear_conjugate_gradient_type = + options.nonlinear_conjugate_gradient_type; + line_search_direction_options.max_lbfgs_rank = options.max_lbfgs_rank; + scoped_ptr<LineSearchDirection> line_search_direction( + LineSearchDirection::Create(line_search_direction_options)); + + LineSearchFunction line_search_function(evaluator); + LineSearch::Options line_search_options; + line_search_options.function = &line_search_function; + + // TODO(sameeragarwal): Make this parameterizable over different + // line searches. + ArmijoLineSearch line_search; + LineSearch::Summary line_search_summary; + + while (true) { + if (!RunCallbacks(options.callbacks, iteration_summary, summary)) { + return; + } + + iteration_start_time = WallTimeInSeconds(); + if (iteration_summary.iteration >= options.max_num_iterations) { + summary->termination_type = NO_CONVERGENCE; + VLOG(1) << "Terminating: Maximum number of iterations reached."; + break; + } + + const double total_solver_time = iteration_start_time - start_time + + summary->preprocessor_time_in_seconds; + if (total_solver_time >= options.max_solver_time_in_seconds) { + summary->termination_type = NO_CONVERGENCE; + VLOG(1) << "Terminating: Maximum solver time reached."; + break; + } + + iteration_summary = IterationSummary(); + iteration_summary.iteration = summary->iterations.back().iteration + 1; + + bool line_search_status = true; + if (iteration_summary.iteration == 1) { + current_state.search_direction = -current_state.gradient; + } else { + line_search_status = line_search_direction->NextDirection( + previous_state, + current_state, + ¤t_state.search_direction); + } + + if (!line_search_status) { + LOG(WARNING) << "Line search direction computation failed. " + "Resorting to steepest descent."; + current_state.search_direction = -current_state.gradient; + } + + line_search_function.Init(x, current_state.search_direction); + current_state.directional_derivative = + current_state.gradient.dot(current_state.search_direction); + + // TODO(sameeragarwal): Refactor this into its own object and add + // explanations for the various choices. + const double initial_step_size = (iteration_summary.iteration == 1) + ? min(1.0, 1.0 / current_state.gradient_max_norm) + : min(1.0, 2.0 * (current_state.cost - previous_state.cost) / + current_state.directional_derivative); + + line_search.Search(line_search_options, + initial_step_size, + current_state.cost, + current_state.directional_derivative, + &line_search_summary); + + current_state.step_size = line_search_summary.optimal_step_size; + delta = current_state.step_size * current_state.search_direction; + + previous_state = current_state; + + // TODO(sameeragarwal): Collect stats. + if (!evaluator->Plus(x.data(), delta.data(), x_plus_delta.data()) || + !Evaluate(evaluator, x_plus_delta, ¤t_state)) { + LOG(WARNING) << "Evaluation failed."; + } else { + x = x_plus_delta; + } + + iteration_summary.gradient_max_norm = current_state.gradient_max_norm; + if (iteration_summary.gradient_max_norm <= absolute_gradient_tolerance) { + summary->termination_type = GRADIENT_TOLERANCE; + VLOG(1) << "Terminating: Gradient tolerance reached." + << "Relative gradient max norm: " + << iteration_summary.gradient_max_norm / initial_gradient_max_norm + << " <= " << options.gradient_tolerance; + break; + } + + iteration_summary.cost_change = previous_state.cost - current_state.cost; + const double absolute_function_tolerance = + options.function_tolerance * previous_state.cost; + if (fabs(iteration_summary.cost_change) < absolute_function_tolerance) { + VLOG(1) << "Terminating. Function tolerance reached. " + << "|cost_change|/cost: " + << fabs(iteration_summary.cost_change) / previous_state.cost + << " <= " << options.function_tolerance; + summary->termination_type = FUNCTION_TOLERANCE; + return; + } + + iteration_summary.cost = current_state.cost + summary->fixed_cost; + iteration_summary.step_norm = delta.norm(); + iteration_summary.step_is_valid = true; + iteration_summary.step_is_successful = true; + iteration_summary.step_norm = delta.norm(); + iteration_summary.step_size = current_state.step_size; + iteration_summary.line_search_function_evaluations = + line_search_summary.num_evaluations; + iteration_summary.iteration_time_in_seconds = + WallTimeInSeconds() - iteration_start_time; + iteration_summary.cumulative_time_in_seconds = + WallTimeInSeconds() - start_time + + summary->preprocessor_time_in_seconds; + + summary->iterations.push_back(iteration_summary); + } +} + +} // namespace internal +} // namespace ceres diff --git a/extern/libmv/third_party/ceres/internal/ceres/line_search_minimizer.h b/extern/libmv/third_party/ceres/internal/ceres/line_search_minimizer.h new file mode 100644 index 00000000000..f82f13984a8 --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/line_search_minimizer.h @@ -0,0 +1,77 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2012 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + +#ifndef CERES_INTERNAL_LINE_SEARCH_MINIMIZER_H_ +#define CERES_INTERNAL_LINE_SEARCH_MINIMIZER_H_ + +#include "ceres/minimizer.h" +#include "ceres/solver.h" +#include "ceres/types.h" +#include "ceres/internal/eigen.h" +#include "glog/logging.h" + +namespace ceres { +namespace internal { + +// Generic line search minimization algorithm. +// +// For example usage, see SolverImpl::Minimize. +class LineSearchMinimizer : public Minimizer { + public: + struct State { + State(int num_parameters, + int num_effective_parameters) + : cost(0.0), + gradient(num_effective_parameters), + gradient_squared_norm(0.0), + search_direction(num_effective_parameters), + directional_derivative(0.0), + step_size(0.0) { + } + + double cost; + Vector gradient; + double gradient_squared_norm; + double gradient_max_norm; + Vector search_direction; + double directional_derivative; + double step_size; + }; + + ~LineSearchMinimizer() {} + virtual void Minimize(const Minimizer::Options& options, + double* parameters, + Solver::Summary* summary); +}; + +} // namespace internal +} // namespace ceres + +#endif // CERES_INTERNAL_LINE_SEARCH_MINIMIZER_H_ diff --git a/extern/libmv/third_party/ceres/internal/ceres/linear_least_squares_problems.cc b/extern/libmv/third_party/ceres/internal/ceres/linear_least_squares_problems.cc index a91e254a663..6c886a1be38 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/linear_least_squares_problems.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/linear_least_squares_problems.cc @@ -573,13 +573,14 @@ LinearLeastSquaresProblem* LinearLeastSquaresProblem3() { return problem; } -static bool DumpLinearLeastSquaresProblemToConsole(const string& directory, - int iteration, - const SparseMatrix* A, - const double* D, - const double* b, - const double* x, - int num_eliminate_blocks) { +namespace { +bool DumpLinearLeastSquaresProblemToConsole(const string& directory, + int iteration, + const SparseMatrix* A, + const double* D, + const double* b, + const double* x, + int num_eliminate_blocks) { CHECK_NOTNULL(A); Matrix AA; A->ToDenseMatrix(&AA); @@ -601,13 +602,13 @@ static bool DumpLinearLeastSquaresProblemToConsole(const string& directory, }; #ifndef CERES_NO_PROTOCOL_BUFFERS -static bool DumpLinearLeastSquaresProblemToProtocolBuffer(const string& directory, - int iteration, - const SparseMatrix* A, - const double* D, - const double* b, - const double* x, - int num_eliminate_blocks) { +bool DumpLinearLeastSquaresProblemToProtocolBuffer(const string& directory, + int iteration, + const SparseMatrix* A, + const double* D, + const double* b, + const double* x, + int num_eliminate_blocks) { CHECK_NOTNULL(A); LinearLeastSquaresProblemProto lsqp; A->ToProto(lsqp.mutable_a()); @@ -641,13 +642,13 @@ static bool DumpLinearLeastSquaresProblemToProtocolBuffer(const string& director return true; } #else -static bool DumpLinearLeastSquaresProblemToProtocolBuffer(const string& directory, - int iteration, - const SparseMatrix* A, - const double* D, - const double* b, - const double* x, - int num_eliminate_blocks) { +bool DumpLinearLeastSquaresProblemToProtocolBuffer(const string& directory, + int iteration, + const SparseMatrix* A, + const double* D, + const double* b, + const double* x, + int num_eliminate_blocks) { LOG(ERROR) << "Dumping least squares problems is only " << "supported when Ceres is compiled with " << "protocol buffer support."; @@ -655,9 +656,9 @@ static bool DumpLinearLeastSquaresProblemToProtocolBuffer(const string& director } #endif -static void WriteArrayToFileOrDie(const string& filename, - const double* x, - const int size) { +void WriteArrayToFileOrDie(const string& filename, + const double* x, + const int size) { CHECK_NOTNULL(x); VLOG(2) << "Writing array to: " << filename; FILE* fptr = fopen(filename.c_str(), "w"); @@ -668,13 +669,13 @@ static void WriteArrayToFileOrDie(const string& filename, fclose(fptr); } -static bool DumpLinearLeastSquaresProblemToTextFile(const string& directory, - int iteration, - const SparseMatrix* A, - const double* D, - const double* b, - const double* x, - int num_eliminate_blocks) { +bool DumpLinearLeastSquaresProblemToTextFile(const string& directory, + int iteration, + const SparseMatrix* A, + const double* D, + const double* b, + const double* x, + int num_eliminate_blocks) { CHECK_NOTNULL(A); string format_string = JoinPath(directory, "lm_iteration_%03d"); @@ -732,9 +733,10 @@ static bool DumpLinearLeastSquaresProblemToTextFile(const string& directory, WriteStringToFileOrDie(matlab_script, matlab_filename); return true; } +} // namespace bool DumpLinearLeastSquaresProblem(const string& directory, - int iteration, + int iteration, DumpFormatType dump_format_type, const SparseMatrix* A, const double* D, @@ -742,18 +744,18 @@ bool DumpLinearLeastSquaresProblem(const string& directory, const double* x, int num_eliminate_blocks) { switch (dump_format_type) { - case (CONSOLE): + case CONSOLE: return DumpLinearLeastSquaresProblemToConsole(directory, iteration, A, D, b, x, num_eliminate_blocks); - case (PROTOBUF): + case PROTOBUF: return DumpLinearLeastSquaresProblemToProtocolBuffer( directory, iteration, A, D, b, x, num_eliminate_blocks); - case (TEXTFILE): + case TEXTFILE: return DumpLinearLeastSquaresProblemToTextFile(directory, iteration, A, D, b, x, diff --git a/extern/libmv/third_party/ceres/internal/ceres/linear_least_squares_problems.h b/extern/libmv/third_party/ceres/internal/ceres/linear_least_squares_problems.h index 553cc0d3db3..c76ae91c7d8 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/linear_least_squares_problems.h +++ b/extern/libmv/third_party/ceres/internal/ceres/linear_least_squares_problems.h @@ -74,7 +74,7 @@ LinearLeastSquaresProblem* LinearLeastSquaresProblem3(); // Write the linear least squares problem to disk. The exact format // depends on dump_format_type. bool DumpLinearLeastSquaresProblem(const string& directory, - int iteration, + int iteration, DumpFormatType dump_format_type, const SparseMatrix* A, const double* D, diff --git a/extern/libmv/third_party/ceres/internal/ceres/linear_solver.h b/extern/libmv/third_party/ceres/internal/ceres/linear_solver.h index 31f88740b9f..a98051468e7 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/linear_solver.h +++ b/extern/libmv/third_party/ceres/internal/ceres/linear_solver.h @@ -35,14 +35,16 @@ #define CERES_INTERNAL_LINEAR_SOLVER_H_ #include <cstddef> - -#include <glog/logging.h> +#include <map> +#include <vector> #include "ceres/block_sparse_matrix.h" #include "ceres/casts.h" #include "ceres/compressed_row_sparse_matrix.h" #include "ceres/dense_sparse_matrix.h" +#include "ceres/execution_summary.h" #include "ceres/triplet_sparse_matrix.h" #include "ceres/types.h" +#include "glog/logging.h" namespace ceres { namespace internal { @@ -76,7 +78,6 @@ class LinearSolver { min_num_iterations(1), max_num_iterations(1), num_threads(1), - num_eliminate_blocks(0), residual_reset_period(10), row_block_size(Dynamic), e_block_size(Dynamic), @@ -100,15 +101,23 @@ class LinearSolver { // If possible, how many threads can the solver use. int num_threads; - // Eliminate 0 to num_eliminate_blocks - 1 from the Normal - // equations to form a schur complement. Only used by the Schur - // complement based solver. The most common use for this parameter - // is in the case of structure from motion problems where we have - // camera blocks and point blocks. Then setting the - // num_eliminate_blocks to the number of points allows the solver - // to use the Schur complement trick. For more details see the - // description of this parameter in solver.h. - int num_eliminate_blocks; + // Hints about the order in which the parameter blocks should be + // eliminated by the linear solver. + // + // For example if elimination_groups is a vector of size k, then + // the linear solver is informed that it should eliminate the + // parameter blocks 0 - elimination_groups[0] - 1 first, and then + // elimination_groups[0] - elimination_groups[1] and so on. Within + // each elimination group, the linear solver is free to choose how + // the parameter blocks are ordered. Different linear solvers have + // differing requirements on elimination_groups. + // + // The most common use is for Schur type solvers, where there + // should be at least two elimination groups and the first + // elimination group must form an independent set in the normal + // equations. The first elimination group corresponds to the + // num_eliminate_blocks in the Schur type solvers. + vector<int> elimination_groups; // Iterative solvers, e.g. Preconditioned Conjugate Gradients // maintain a cheap estimate of the residual which may become @@ -247,6 +256,18 @@ class LinearSolver { const PerSolveOptions& per_solve_options, double* x) = 0; + // The following two methods return copies instead of references so + // that the base class implementation does not have to worry about + // life time issues. Further, these calls are not expected to be + // frequent or performance sensitive. + virtual map<string, int> CallStatistics() const { + return map<string, int>(); + } + + virtual map<string, double> TimeStatistics() const { + return map<string, double>(); + } + // Factory static LinearSolver* Create(const Options& options); }; @@ -267,18 +288,29 @@ class TypedLinearSolver : public LinearSolver { const double* b, const LinearSolver::PerSolveOptions& per_solve_options, double* x) { + ScopedExecutionTimer total_time("LinearSolver::Solve", &execution_summary_); CHECK_NOTNULL(A); CHECK_NOTNULL(b); CHECK_NOTNULL(x); return SolveImpl(down_cast<MatrixType*>(A), b, per_solve_options, x); } + virtual map<string, int> CallStatistics() const { + return execution_summary_.calls(); + } + + virtual map<string, double> TimeStatistics() const { + return execution_summary_.times(); + } + private: virtual LinearSolver::Summary SolveImpl( MatrixType* A, const double* b, const LinearSolver::PerSolveOptions& per_solve_options, double* x) = 0; + + ExecutionSummary execution_summary_; }; // Linear solvers that depend on acccess to the low level structure of diff --git a/extern/libmv/third_party/ceres/internal/ceres/low_rank_inverse_hessian.cc b/extern/libmv/third_party/ceres/internal/ceres/low_rank_inverse_hessian.cc new file mode 100644 index 00000000000..3fe113f1afb --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/low_rank_inverse_hessian.cc @@ -0,0 +1,109 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2012 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + +#include "ceres/internal/eigen.h" +#include "ceres/low_rank_inverse_hessian.h" +#include "glog/logging.h" + +namespace ceres { +namespace internal { + +LowRankInverseHessian::LowRankInverseHessian(int num_parameters, + int max_num_corrections) + : num_parameters_(num_parameters), + max_num_corrections_(max_num_corrections), + num_corrections_(0), + diagonal_(1.0), + delta_x_history_(num_parameters, max_num_corrections), + delta_gradient_history_(num_parameters, max_num_corrections), + delta_x_dot_delta_gradient_(max_num_corrections) { +} + +bool LowRankInverseHessian::Update(const Vector& delta_x, + const Vector& delta_gradient) { + const double delta_x_dot_delta_gradient = delta_x.dot(delta_gradient); + if (delta_x_dot_delta_gradient <= 1e-10) { + VLOG(2) << "Skipping LBFGS Update. " << delta_x_dot_delta_gradient; + return false; + } + + if (num_corrections_ == max_num_corrections_) { + // TODO(sameeragarwal): This can be done more efficiently using + // a circular buffer/indexing scheme, but for simplicity we will + // do the expensive copy for now. + delta_x_history_.block(0, 0, num_parameters_, max_num_corrections_ - 2) = + delta_x_history_ + .block(0, 1, num_parameters_, max_num_corrections_ - 1); + + delta_gradient_history_ + .block(0, 0, num_parameters_, max_num_corrections_ - 2) = + delta_gradient_history_ + .block(0, 1, num_parameters_, max_num_corrections_ - 1); + + delta_x_dot_delta_gradient_.head(num_corrections_ - 2) = + delta_x_dot_delta_gradient_.tail(num_corrections_ - 1); + } else { + ++num_corrections_; + } + + delta_x_history_.col(num_corrections_ - 1) = delta_x; + delta_gradient_history_.col(num_corrections_ - 1) = delta_gradient; + delta_x_dot_delta_gradient_(num_corrections_ - 1) = + delta_x_dot_delta_gradient; + diagonal_ = delta_x_dot_delta_gradient / delta_gradient.squaredNorm(); + return true; +} + +void LowRankInverseHessian::RightMultiply(const double* x_ptr, + double* y_ptr) const { + ConstVectorRef gradient(x_ptr, num_parameters_); + VectorRef search_direction(y_ptr, num_parameters_); + + search_direction = gradient; + + Vector alpha(num_corrections_); + + for (int i = num_corrections_ - 1; i >= 0; --i) { + alpha(i) = delta_x_history_.col(i).dot(search_direction) / + delta_x_dot_delta_gradient_(i); + search_direction -= alpha(i) * delta_gradient_history_.col(i); + } + + search_direction *= diagonal_; + + for (int i = 0; i < num_corrections_; ++i) { + const double beta = delta_gradient_history_.col(i).dot(search_direction) / + delta_x_dot_delta_gradient_(i); + search_direction += delta_x_history_.col(i) * (alpha(i) - beta); + } +} + +} // namespace internal +} // namespace ceres diff --git a/extern/libmv/third_party/ceres/internal/ceres/low_rank_inverse_hessian.h b/extern/libmv/third_party/ceres/internal/ceres/low_rank_inverse_hessian.h new file mode 100644 index 00000000000..6f3fc0c9d00 --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/low_rank_inverse_hessian.h @@ -0,0 +1,99 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2012 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// +// Limited memory positive definite approximation to the inverse +// Hessian, using the LBFGS algorithm + +#ifndef CERES_INTERNAL_LOW_RANK_INVERSE_HESSIAN_H_ +#define CERES_INTERNAL_LOW_RANK_INVERSE_HESSIAN_H_ + +#include "ceres/internal/eigen.h" +#include "ceres/linear_operator.h" + +namespace ceres { +namespace internal { + +// LowRankInverseHessian is a positive definite approximation to the +// Hessian using the limited memory variant of the +// Broyden-Fletcher-Goldfarb-Shanno (BFGS)secant formula for +// approximating the Hessian. +// +// Other update rules like the Davidon-Fletcher-Powell (DFP) are +// possible, but the BFGS rule is considered the best performing one. +// +// The limited memory variant was developed by Nocedal and further +// enhanced with scaling rule by Byrd, Nocedal and Schanbel. +// +// Nocedal, J. (1980). "Updating Quasi-Newton Matrices with Limited +// Storage". Mathematics of Computation 35 (151): 773–782. +// +// Byrd, R. H.; Nocedal, J.; Schnabel, R. B. (1994). +// "Representations of Quasi-Newton Matrices and their use in +// Limited Memory Methods". Mathematical Programming 63 (4): +class LowRankInverseHessian : public LinearOperator { + public: + // num_parameters is the row/column size of the Hessian. + // max_num_corrections is the rank of the Hessian approximation. + // The approximation uses: + // 2 * max_num_corrections * num_parameters + max_num_corrections + // doubles. + LowRankInverseHessian(int num_parameters, int max_num_corrections); + virtual ~LowRankInverseHessian() {} + + // Update the low rank approximation. delta_x is the change in the + // domain of Hessian, and delta_gradient is the change in the + // gradient. The update copies the delta_x and delta_gradient + // vectors, and gets rid of the oldest delta_x and delta_gradient + // vectors if the number of corrections is already equal to + // max_num_corrections. + bool Update(const Vector& delta_x, const Vector& delta_gradient); + + // LinearOperator interface + virtual void RightMultiply(const double* x, double* y) const; + virtual void LeftMultiply(const double* x, double* y) const { + RightMultiply(x, y); + } + virtual int num_rows() const { return num_parameters_; } + virtual int num_cols() const { return num_parameters_; } + + private: + const int num_parameters_; + const int max_num_corrections_; + int num_corrections_; + double diagonal_; + Matrix delta_x_history_; + Matrix delta_gradient_history_; + Vector delta_x_dot_delta_gradient_; +}; + +} // namespace internal +} // namespace ceres + +#endif // CERES_INTERNAL_LOW_RANK_INVERSE_HESSIAN_H_ diff --git a/extern/libmv/third_party/ceres/internal/ceres/map_util.h b/extern/libmv/third_party/ceres/internal/ceres/map_util.h index ddf1252f674..929c6b36982 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/map_util.h +++ b/extern/libmv/third_party/ceres/internal/ceres/map_util.h @@ -35,6 +35,7 @@ #include <utility> #include "ceres/internal/port.h" +#include "glog/logging.h" namespace ceres { diff --git a/extern/libmv/third_party/ceres/internal/ceres/minimizer.cc b/extern/libmv/third_party/ceres/internal/ceres/minimizer.cc new file mode 100644 index 00000000000..2e2c15ac612 --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/minimizer.cc @@ -0,0 +1,67 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2010, 2011, 2012 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + +#include "ceres/minimizer.h" +#include "ceres/types.h" +#include "glog/logging.h" + +namespace ceres { +namespace internal { + +Minimizer::~Minimizer() {} + +bool Minimizer::RunCallbacks(const vector<IterationCallback*> callbacks, + const IterationSummary& iteration_summary, + Solver::Summary* summary) { + CallbackReturnType status = SOLVER_CONTINUE; + int i = 0; + while (status == SOLVER_CONTINUE && i < callbacks.size()) { + status = (*callbacks[i])(iteration_summary); + ++i; + } + switch (status) { + case SOLVER_CONTINUE: + return true; + case SOLVER_TERMINATE_SUCCESSFULLY: + summary->termination_type = USER_SUCCESS; + VLOG(1) << "Terminating: User callback returned USER_SUCCESS."; + return false; + case SOLVER_ABORT: + summary->termination_type = USER_ABORT; + VLOG(1) << "Terminating: User callback returned USER_ABORT."; + return false; + default: + LOG(FATAL) << "Unknown type of user callback status"; + } + return false; +} + +} // namespace internal +} // namespace ceres diff --git a/extern/libmv/third_party/ceres/internal/ceres/minimizer.h b/extern/libmv/third_party/ceres/internal/ceres/minimizer.h index cfc98a3ebd0..708974d63c2 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/minimizer.h +++ b/extern/libmv/third_party/ceres/internal/ceres/minimizer.h @@ -32,8 +32,9 @@ #define CERES_INTERNAL_MINIMIZER_H_ #include <vector> -#include "ceres/solver.h" +#include "ceres/internal/port.h" #include "ceres/iteration_callback.h" +#include "ceres/solver.h" namespace ceres { namespace internal { @@ -59,6 +60,7 @@ class Minimizer { } void Init(const Solver::Options& options) { + num_threads = options.num_threads; max_num_iterations = options.max_num_iterations; max_solver_time_in_seconds = options.max_solver_time_in_seconds; max_step_solver_retries = 5; @@ -74,18 +76,24 @@ class Minimizer { lsqp_dump_directory = options.lsqp_dump_directory; lsqp_iterations_to_dump = options.lsqp_iterations_to_dump; lsqp_dump_format_type = options.lsqp_dump_format_type; - num_eliminate_blocks = options.num_eliminate_blocks; max_num_consecutive_invalid_steps = options.max_num_consecutive_invalid_steps; min_trust_region_radius = options.min_trust_region_radius; + line_search_direction_type = options.line_search_direction_type; + line_search_type = options.line_search_type; + nonlinear_conjugate_gradient_type = + options.nonlinear_conjugate_gradient_type; + max_lbfgs_rank = options.max_lbfgs_rank; evaluator = NULL; trust_region_strategy = NULL; jacobian = NULL; callbacks = options.callbacks; + inner_iteration_minimizer = NULL; } int max_num_iterations; double max_solver_time_in_seconds; + int num_threads; // Number of times the linear solver should be retried in case of // numerical failure. The retries are done by exponentially scaling up @@ -104,9 +112,12 @@ class Minimizer { vector<int> lsqp_iterations_to_dump; DumpFormatType lsqp_dump_format_type; string lsqp_dump_directory; - int num_eliminate_blocks; int max_num_consecutive_invalid_steps; int min_trust_region_radius; + LineSearchDirectionType line_search_direction_type; + LineSearchType line_search_type; + NonlinearConjugateGradientType nonlinear_conjugate_gradient_type; + int max_lbfgs_rank; // List of callbacks that are executed by the Minimizer at the end // of each iteration. @@ -128,10 +139,15 @@ class Minimizer { // and will remain constant for the life time of the // optimization. The Options struct does not own this pointer. SparseMatrix* jacobian; + + Minimizer* inner_iteration_minimizer; }; - virtual ~Minimizer() {} + static bool RunCallbacks(const vector<IterationCallback*> callbacks, + const IterationSummary& iteration_summary, + Solver::Summary* summary); + virtual ~Minimizer(); // Note: The minimizer is expected to update the state of the // parameters array every iteration. This is required for the // StateUpdatingCallback to work. diff --git a/extern/libmv/third_party/ceres/internal/ceres/mutex.h b/extern/libmv/third_party/ceres/internal/ceres/mutex.h index 5090a71b78d..410748ff0ab 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/mutex.h +++ b/extern/libmv/third_party/ceres/internal/ceres/mutex.h @@ -107,10 +107,12 @@ # define _WIN32_WINNT 0x0400 # endif # endif +// Unfortunately, windows.h defines a bunch of macros with common +// names. Two in particular need avoiding: ERROR and min/max. // To avoid macro definition of ERROR. -# define CERES_NOGDI +# define NOGDI // To avoid macro definition of min/max. -# define CERES_NOMINMAX +# define NOMINMAX # include <windows.h> typedef CRITICAL_SECTION MutexType; #elif defined(CERES_HAVE_PTHREAD) && defined(CERES_HAVE_RWLOCK) diff --git a/extern/libmv/third_party/ceres/internal/ceres/parameter_block.h b/extern/libmv/third_party/ceres/internal/ceres/parameter_block.h index f20805ca873..b1e8d938b8a 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/parameter_block.h +++ b/extern/libmv/third_party/ceres/internal/ceres/parameter_block.h @@ -1,5 +1,5 @@ // Ceres Solver - A fast non-linear least squares minimizer -// Copyright 2010, 2011, 2012 Google Inc. All rights reserved. +// Copyright 2010, 2011, 2012, 2013 Google Inc. All rights reserved. // http://code.google.com/p/ceres-solver/ // // Redistribution and use in source and binary forms, with or without @@ -34,6 +34,7 @@ #include <cstdlib> #include <string> #include "ceres/array_utils.h" +#include "ceres/collections_port.h" #include "ceres/integral_types.h" #include "ceres/internal/eigen.h" #include "ceres/internal/port.h" @@ -46,6 +47,7 @@ namespace ceres { namespace internal { class ProblemImpl; +class ResidualBlock; // The parameter block encodes the location of the user's original value, and // also the "current state" of the parameter. The evaluator uses whatever is in @@ -58,13 +60,29 @@ class ProblemImpl; // responsible for the proper disposal of the local parameterization. class ParameterBlock { public: - ParameterBlock(double* user_state, int size) { - Init(user_state, size, NULL); + // TODO(keir): Decide what data structure is best here. Should this be a set? + // Probably not, because sets are memory inefficient. However, if it's a + // vector, you can get into pathological linear performance when removing a + // residual block from a problem where all the residual blocks depend on one + // parameter; for example, shared focal length in a bundle adjustment + // problem. It might be worth making a custom structure that is just an array + // when it is small, but transitions to a hash set when it has more elements. + // + // For now, use a hash set. + typedef HashSet<ResidualBlock*> ResidualBlockSet; + + // Create a parameter block with the user state, size, and index specified. + // The size is the size of the parameter block and the index is the position + // of the parameter block inside a Program (if any). + ParameterBlock(double* user_state, int size, int index) { + Init(user_state, size, index, NULL); } + ParameterBlock(double* user_state, int size, + int index, LocalParameterization* local_parameterization) { - Init(user_state, size, local_parameterization); + Init(user_state, size, index, local_parameterization); } // The size of the parameter block. @@ -187,12 +205,43 @@ class ParameterBlock { delta_offset_); } + void EnableResidualBlockDependencies() { + CHECK(residual_blocks_.get() == NULL) + << "Ceres bug: There is already a residual block collection " + << "for parameter block: " << ToString(); + residual_blocks_.reset(new ResidualBlockSet); + } + + void AddResidualBlock(ResidualBlock* residual_block) { + CHECK(residual_blocks_.get() != NULL) + << "Ceres bug: The residual block collection is null for parameter " + << "block: " << ToString(); + residual_blocks_->insert(residual_block); + } + + void RemoveResidualBlock(ResidualBlock* residual_block) { + CHECK(residual_blocks_.get() != NULL) + << "Ceres bug: The residual block collection is null for parameter " + << "block: " << ToString(); + CHECK(residual_blocks_->find(residual_block) != residual_blocks_->end()) + << "Ceres bug: Missing residual for parameter block: " << ToString(); + residual_blocks_->erase(residual_block); + } + + // This is only intended for iterating; perhaps this should only expose + // .begin() and .end(). + ResidualBlockSet* mutable_residual_blocks() { + return residual_blocks_.get(); + } + private: void Init(double* user_state, int size, + int index, LocalParameterization* local_parameterization) { user_state_ = user_state; size_ = size; + index_ = index; is_constant_ = false; state_ = user_state_; @@ -201,7 +250,6 @@ class ParameterBlock { SetParameterization(local_parameterization); } - index_ = -1; state_offset_ = -1; delta_offset_ = -1; } @@ -261,6 +309,9 @@ class ParameterBlock { // The offset of this parameter block inside a larger delta vector. int32 delta_offset_; + // If non-null, contains the residual blocks this parameter block is in. + scoped_ptr<ResidualBlockSet> residual_blocks_; + // Necessary so ProblemImpl can clean up the parameterizations. friend class ProblemImpl; }; diff --git a/extern/libmv/third_party/ceres/internal/ceres/schur_ordering.cc b/extern/libmv/third_party/ceres/internal/ceres/parameter_block_ordering.cc index 1cdff4e6dec..e8f626f8e80 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/schur_ordering.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/parameter_block_ordering.cc @@ -28,7 +28,7 @@ // // Author: sameeragarwal@google.com (Sameer Agarwal) -#include "ceres/schur_ordering.h" +#include "ceres/parameter_block_ordering.h" #include "ceres/graph.h" #include "ceres/graph_algorithms.h" @@ -46,8 +46,7 @@ int ComputeSchurOrdering(const Program& program, vector<ParameterBlock*>* ordering) { CHECK_NOTNULL(ordering)->clear(); - scoped_ptr<Graph< ParameterBlock*> > graph( - CHECK_NOTNULL(CreateHessianGraph(program))); + scoped_ptr<Graph< ParameterBlock*> > graph(CreateHessianGraph(program)); int independent_set_size = IndependentSetOrdering(*graph, ordering); const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks(); @@ -62,9 +61,31 @@ int ComputeSchurOrdering(const Program& program, return independent_set_size; } +void ComputeRecursiveIndependentSetOrdering(const Program& program, + ParameterBlockOrdering* ordering) { + CHECK_NOTNULL(ordering)->Clear(); + const vector<ParameterBlock*> parameter_blocks = program.parameter_blocks(); + scoped_ptr<Graph< ParameterBlock*> > graph(CreateHessianGraph(program)); + + int num_covered = 0; + int round = 0; + while (num_covered < parameter_blocks.size()) { + vector<ParameterBlock*> independent_set_ordering; + const int independent_set_size = + IndependentSetOrdering(*graph, &independent_set_ordering); + for (int i = 0; i < independent_set_size; ++i) { + ParameterBlock* parameter_block = independent_set_ordering[i]; + ordering->AddElementToGroup(parameter_block->mutable_user_state(), round); + graph->RemoveVertex(parameter_block); + } + num_covered += independent_set_size; + ++round; + } +} + Graph<ParameterBlock*>* CreateHessianGraph(const Program& program) { - Graph<ParameterBlock*>* graph = new Graph<ParameterBlock*>; + Graph<ParameterBlock*>* graph = CHECK_NOTNULL(new Graph<ParameterBlock*>); const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks(); for (int i = 0; i < parameter_blocks.size(); ++i) { ParameterBlock* parameter_block = parameter_blocks[i]; diff --git a/extern/libmv/third_party/ceres/internal/ceres/schur_ordering.h b/extern/libmv/third_party/ceres/internal/ceres/parameter_block_ordering.h index 1f9a4ff354f..a5277a44c70 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/schur_ordering.h +++ b/extern/libmv/third_party/ceres/internal/ceres/parameter_block_ordering.h @@ -27,14 +27,12 @@ // POSSIBILITY OF SUCH DAMAGE. // // Author: sameeragarwal@google.com (Sameer Agarwal) -// -// Compute a parameter block ordering for use with the Schur -// complement based algorithms. -#ifndef CERES_INTERNAL_SCHUR_ORDERING_H_ -#define CERES_INTERNAL_SCHUR_ORDERING_H_ +#ifndef CERES_INTERNAL_PARAMETER_BLOCK_ORDERING_H_ +#define CERES_INTERNAL_PARAMETER_BLOCK_ORDERING_H_ #include <vector> +#include "ceres/ordered_groups.h" #include "ceres/graph.h" #include "ceres/types.h" @@ -60,6 +58,12 @@ class ParameterBlock; int ComputeSchurOrdering(const Program& program, vector<ParameterBlock* >* ordering); +// Use an approximate independent set ordering to decompose the +// parameter blocks of a problem in a sequence of independent +// sets. The ordering covers all the non-constant parameter blocks in +// the program. +void ComputeRecursiveIndependentSetOrdering(const Program& program, + ParameterBlockOrdering* ordering); // Builds a graph on the parameter blocks of a Problem, whose // structure reflects the sparsity structure of the Hessian. Each @@ -71,4 +75,4 @@ Graph<ParameterBlock*>* CreateHessianGraph(const Program& program); } // namespace internal } // namespace ceres -#endif // CERES_INTERNAL_SCHUR_ORDERING_H_ +#endif // CERES_INTERNAL_PARAMETER_BLOCK_ORDERING_H_ diff --git a/extern/libmv/third_party/ceres/internal/ceres/polynomial_solver.cc b/extern/libmv/third_party/ceres/internal/ceres/polynomial.cc index 20c01566a89..3238b89670e 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/polynomial_solver.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/polynomial.cc @@ -27,11 +27,14 @@ // POSSIBILITY OF SUCH DAMAGE. // // Author: moll.markus@arcor.de (Markus Moll) +// sameeragarwal@google.com (Sameer Agarwal) -#include "ceres/polynomial_solver.h" +#include "ceres/polynomial.h" #include <cmath> #include <cstddef> +#include <vector> + #include "Eigen/Dense" #include "ceres/internal/port.h" #include "glog/logging.h" @@ -159,8 +162,7 @@ bool FindPolynomialRoots(const Vector& polynomial_in, BalanceCompanionMatrix(&companion_matrix); // Find its (complex) eigenvalues. - Eigen::EigenSolver<Matrix> solver(companion_matrix, - Eigen::EigenvaluesOnly); + Eigen::EigenSolver<Matrix> solver(companion_matrix, false); if (solver.info() != Eigen::Success) { LOG(ERROR) << "Failed to extract eigenvalues from companion matrix."; return false; @@ -180,5 +182,138 @@ bool FindPolynomialRoots(const Vector& polynomial_in, return true; } +Vector DifferentiatePolynomial(const Vector& polynomial) { + const int degree = polynomial.rows() - 1; + CHECK_GE(degree, 0); + + // Degree zero polynomials are constants, and their derivative does + // not result in a smaller degree polynomial, just a degree zero + // polynomial with value zero. + if (degree == 0) { + return Eigen::VectorXd::Zero(1); + } + + Vector derivative(degree); + for (int i = 0; i < degree; ++i) { + derivative(i) = (degree - i) * polynomial(i); + } + + return derivative; +} + +void MinimizePolynomial(const Vector& polynomial, + const double x_min, + const double x_max, + double* optimal_x, + double* optimal_value) { + // Find the minimum of the polynomial at the two ends. + // + // We start by inspecting the middle of the interval. Technically + // this is not needed, but we do this to make this code as close to + // the minFunc package as possible. + *optimal_x = (x_min + x_max) / 2.0; + *optimal_value = EvaluatePolynomial(polynomial, *optimal_x); + + const double x_min_value = EvaluatePolynomial(polynomial, x_min); + if (x_min_value < *optimal_value) { + *optimal_value = x_min_value; + *optimal_x = x_min; + } + + const double x_max_value = EvaluatePolynomial(polynomial, x_max); + if (x_max_value < *optimal_value) { + *optimal_value = x_max_value; + *optimal_x = x_max; + } + + // If the polynomial is linear or constant, we are done. + if (polynomial.rows() <= 2) { + return; + } + + const Vector derivative = DifferentiatePolynomial(polynomial); + Vector roots_real; + if (!FindPolynomialRoots(derivative, &roots_real, NULL)) { + LOG(WARNING) << "Unable to find the critical points of " + << "the interpolating polynomial."; + return; + } + + // This is a bit of an overkill, as some of the roots may actually + // have a complex part, but its simpler to just check these values. + for (int i = 0; i < roots_real.rows(); ++i) { + const double root = roots_real(i); + if ((root < x_min) || (root > x_max)) { + continue; + } + + const double value = EvaluatePolynomial(polynomial, root); + if (value < *optimal_value) { + *optimal_value = value; + *optimal_x = root; + } + } +} + +Vector FindInterpolatingPolynomial(const vector<FunctionSample>& samples) { + const int num_samples = samples.size(); + int num_constraints = 0; + for (int i = 0; i < num_samples; ++i) { + if (samples[i].value_is_valid) { + ++num_constraints; + } + if (samples[i].gradient_is_valid) { + ++num_constraints; + } + } + + const int degree = num_constraints - 1; + Matrix lhs = Matrix::Zero(num_constraints, num_constraints); + Vector rhs = Vector::Zero(num_constraints); + + int row = 0; + for (int i = 0; i < num_samples; ++i) { + const FunctionSample& sample = samples[i]; + if (sample.value_is_valid) { + for (int j = 0; j <= degree; ++j) { + lhs(row, j) = pow(sample.x, degree - j); + } + rhs(row) = sample.value; + ++row; + } + + if (sample.gradient_is_valid) { + for (int j = 0; j < degree; ++j) { + lhs(row, j) = (degree - j) * pow(sample.x, degree - j - 1); + } + rhs(row) = sample.gradient; + ++row; + } + } + + return lhs.fullPivLu().solve(rhs); +} + +void MinimizeInterpolatingPolynomial(const vector<FunctionSample>& samples, + double x_min, + double x_max, + double* optimal_x, + double* optimal_value) { + const Vector polynomial = FindInterpolatingPolynomial(samples); + MinimizePolynomial(polynomial, x_min, x_max, optimal_x, optimal_value); + for (int i = 0; i < samples.size(); ++i) { + const FunctionSample& sample = samples[i]; + if ((sample.x < x_min) || (sample.x > x_max)) { + continue; + } + + const double value = EvaluatePolynomial(polynomial, sample.x); + if (value < *optimal_value) { + *optimal_x = sample.x; + *optimal_value = value; + } + } +} + } // namespace internal } // namespace ceres diff --git a/extern/libmv/third_party/ceres/internal/ceres/polynomial.h b/extern/libmv/third_party/ceres/internal/ceres/polynomial.h new file mode 100644 index 00000000000..42ffdcb13c5 --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/polynomial.h @@ -0,0 +1,134 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2012 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: moll.markus@arcor.de (Markus Moll) +// sameeragarwal@google.com (Sameer Agarwal) + +#ifndef CERES_INTERNAL_POLYNOMIAL_SOLVER_H_ +#define CERES_INTERNAL_POLYNOMIAL_SOLVER_H_ + +#include <vector> +#include "ceres/internal/eigen.h" +#include "ceres/internal/port.h" + +namespace ceres { +namespace internal { + +// All polynomials are assumed to be the form +// +// sum_{i=0}^N polynomial(i) x^{N-i}. +// +// and are given by a vector of coefficients of size N + 1. + +// Evaluate the polynomial at x using the Horner scheme. +inline double EvaluatePolynomial(const Vector& polynomial, double x) { + double v = 0.0; + for (int i = 0; i < polynomial.size(); ++i) { + v = v * x + polynomial(i); + } + return v; +} + +// Use the companion matrix eigenvalues to determine the roots of the +// polynomial. +// +// This function returns true on success, false otherwise. +// Failure indicates that the polynomial is invalid (of size 0) or +// that the eigenvalues of the companion matrix could not be computed. +// On failure, a more detailed message will be written to LOG(ERROR). +// If real is not NULL, the real parts of the roots will be returned in it. +// Likewise, if imaginary is not NULL, imaginary parts will be returned in it. +bool FindPolynomialRoots(const Vector& polynomial, + Vector* real, + Vector* imaginary); + +// Return the derivative of the given polynomial. It is assumed that +// the input polynomial is at least of degree zero. +Vector DifferentiatePolynomial(const Vector& polynomial); + +// Find the minimum value of the polynomial in the interval [x_min, +// x_max]. The minimum is obtained by computing all the roots of the +// derivative of the input polynomial. All real roots within the +// interval [x_min, x_max] are considered as well as the end points +// x_min and x_max. Since polynomials are differentiable functions, +// this ensures that the true minimum is found. +void MinimizePolynomial(const Vector& polynomial, + double x_min, + double x_max, + double* optimal_x, + double* optimal_value); + +// Structure for storing sample values of a function. +// +// Clients can use this struct to communicate the value of the +// function and or its gradient at a given point x. +struct FunctionSample { + FunctionSample() + : x(0.0), + value(0.0), + value_is_valid(false), + gradient(0.0), + gradient_is_valid(false) { + } + + double x; + double value; // value = f(x) + bool value_is_valid; + double gradient; // gradient = f'(x) + bool gradient_is_valid; +}; + +// Given a set of function value and/or gradient samples, find a +// polynomial whose value and gradients are exactly equal to the ones +// in samples. +// +// Generally speaking, +// +// degree = # values + # gradients - 1 +// +// Of course its possible to sample a polynomial any number of times, +// in which case, generally speaking the spurious higher order +// coefficients will be zero. +Vector FindInterpolatingPolynomial(const vector<FunctionSample>& samples); + +// Interpolate the function described by samples with a polynomial, +// and minimize it on the interval [x_min, x_max]. Depending on the +// input samples, it is possible that the interpolation or the root +// finding algorithms may fail due to numerical difficulties. But the +// function is guaranteed to return its best guess of an answer, by +// considering the samples and the end points as possible solutions. +void MinimizeInterpolatingPolynomial(const vector<FunctionSample>& samples, + double x_min, + double x_max, + double* optimal_x, + double* optimal_value); + +} // namespace internal +} // namespace ceres + +#endif // CERES_INTERNAL_POLYNOMIAL_SOLVER_H_ diff --git a/extern/libmv/third_party/ceres/internal/ceres/preconditioner.cc b/extern/libmv/third_party/ceres/internal/ceres/preconditioner.cc new file mode 100644 index 00000000000..05e539f9fb1 --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/preconditioner.cc @@ -0,0 +1,63 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2013 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + +#include "ceres/preconditioner.h" +#include "glog/logging.h" + +namespace ceres { +namespace internal { + +Preconditioner::~Preconditioner() { +} + +SparseMatrixPreconditionerWrapper::SparseMatrixPreconditionerWrapper( + const SparseMatrix* matrix) + : matrix_(CHECK_NOTNULL(matrix)) { +} + +SparseMatrixPreconditionerWrapper::~SparseMatrixPreconditionerWrapper() { +} + +bool SparseMatrixPreconditionerWrapper::Update(const BlockSparseMatrixBase& A, + const double* D) { + return true; +} + +void SparseMatrixPreconditionerWrapper::RightMultiply(const double* x, + double* y) const { + matrix_->RightMultiply(x, y); +} + +int SparseMatrixPreconditionerWrapper::num_rows() const { + return matrix_->num_rows(); +} + +} // namespace internal +} // namespace ceres diff --git a/extern/libmv/third_party/ceres/internal/ceres/preconditioner.h b/extern/libmv/third_party/ceres/internal/ceres/preconditioner.h new file mode 100644 index 00000000000..5bb077e0e33 --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/preconditioner.h @@ -0,0 +1,148 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2013 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + +#ifndef CERES_INTERNAL_PRECONDITIONER_H_ +#define CERES_INTERNAL_PRECONDITIONER_H_ + +#include <vector> +#include "ceres/linear_operator.h" +#include "ceres/sparse_matrix.h" + +namespace ceres { +namespace internal { + +class BlockSparseMatrixBase; +class SparseMatrix; + +class Preconditioner : public LinearOperator { + public: + struct Options { + Options() + : type(JACOBI), + sparse_linear_algebra_library(SUITE_SPARSE), + use_block_amd(true), + num_threads(1), + row_block_size(Dynamic), + e_block_size(Dynamic), + f_block_size(Dynamic) { + } + + PreconditionerType type; + + SparseLinearAlgebraLibraryType sparse_linear_algebra_library; + + // See solver.h for explanation of this option. + bool use_block_amd; + + // If possible, how many threads the preconditioner can use. + int num_threads; + + // Hints about the order in which the parameter blocks should be + // eliminated by the linear solver. + // + // For example if elimination_groups is a vector of size k, then + // the linear solver is informed that it should eliminate the + // parameter blocks 0 ... elimination_groups[0] - 1 first, and + // then elimination_groups[0] ... elimination_groups[1] and so + // on. Within each elimination group, the linear solver is free to + // choose how the parameter blocks are ordered. Different linear + // solvers have differing requirements on elimination_groups. + // + // The most common use is for Schur type solvers, where there + // should be at least two elimination groups and the first + // elimination group must form an independent set in the normal + // equations. The first elimination group corresponds to the + // num_eliminate_blocks in the Schur type solvers. + vector<int> elimination_groups; + + // If the block sizes in a BlockSparseMatrix are fixed, then in + // some cases the Schur complement based solvers can detect and + // specialize on them. + // + // It is expected that these parameters are set programmatically + // rather than manually. + // + // Please see schur_complement_solver.h and schur_eliminator.h for + // more details. + int row_block_size; + int e_block_size; + int f_block_size; + }; + + virtual ~Preconditioner(); + + // Update the numerical value of the preconditioner for the linear + // system: + // + // | A | x = |b| + // |diag(D)| |0| + // + // for some vector b. It is important that the matrix A have the + // same block structure as the one used to construct this object. + // + // D can be NULL, in which case its interpreted as a diagonal matrix + // of size zero. + virtual bool Update(const BlockSparseMatrixBase& A, const double* D) = 0; + + // LinearOperator interface. Since the operator is symmetric, + // LeftMultiply and num_cols are just calls to RightMultiply and + // num_rows respectively. Update() must be called before + // RightMultiply can be called. + virtual void RightMultiply(const double* x, double* y) const = 0; + virtual void LeftMultiply(const double* x, double* y) const { + return RightMultiply(x, y); + } + + virtual int num_rows() const = 0; + virtual int num_cols() const { + return num_rows(); + } +}; + +// Wrap a SparseMatrix object as a preconditioner. +class SparseMatrixPreconditionerWrapper : public Preconditioner { + public: + // Wrapper does NOT take ownership of the matrix pointer. + explicit SparseMatrixPreconditionerWrapper(const SparseMatrix* matrix); + virtual ~SparseMatrixPreconditionerWrapper(); + + // Preconditioner interface + virtual bool Update(const BlockSparseMatrixBase& A, const double* D); + virtual void RightMultiply(const double* x, double* y) const; + virtual int num_rows() const; + + private: + const SparseMatrix* matrix_; +}; + +} // namespace internal +} // namespace ceres + +#endif // CERES_INTERNAL_PRECONDITIONER_H_ diff --git a/extern/libmv/third_party/ceres/internal/ceres/problem.cc b/extern/libmv/third_party/ceres/internal/ceres/problem.cc index b8c25d9db84..43e78835b15 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/problem.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/problem.cc @@ -32,12 +32,11 @@ #include "ceres/problem.h" #include <vector> +#include "ceres/crs_matrix.h" #include "ceres/problem_impl.h" namespace ceres { -class ResidualBlock; - Problem::Problem() : problem_impl_(new internal::ProblemImpl) {} Problem::Problem(const Problem::Options& options) : problem_impl_(new internal::ProblemImpl(options)) {} @@ -106,6 +105,47 @@ ResidualBlockId Problem::AddResidualBlock( x0, x1, x2, x3, x4, x5); } +ResidualBlockId Problem::AddResidualBlock( + CostFunction* cost_function, + LossFunction* loss_function, + double* x0, double* x1, double* x2, double* x3, double* x4, double* x5, + double* x6) { + return problem_impl_->AddResidualBlock(cost_function, + loss_function, + x0, x1, x2, x3, x4, x5, x6); +} + +ResidualBlockId Problem::AddResidualBlock( + CostFunction* cost_function, + LossFunction* loss_function, + double* x0, double* x1, double* x2, double* x3, double* x4, double* x5, + double* x6, double* x7) { + return problem_impl_->AddResidualBlock(cost_function, + loss_function, + x0, x1, x2, x3, x4, x5, x6, x7); +} + +ResidualBlockId Problem::AddResidualBlock( + CostFunction* cost_function, + LossFunction* loss_function, + double* x0, double* x1, double* x2, double* x3, double* x4, double* x5, + double* x6, double* x7, double* x8) { + return problem_impl_->AddResidualBlock(cost_function, + loss_function, + x0, x1, x2, x3, x4, x5, x6, x7, x8); +} + +ResidualBlockId Problem::AddResidualBlock( + CostFunction* cost_function, + LossFunction* loss_function, + double* x0, double* x1, double* x2, double* x3, double* x4, double* x5, + double* x6, double* x7, double* x8, double* x9) { + return problem_impl_->AddResidualBlock( + cost_function, + loss_function, + x0, x1, x2, x3, x4, x5, x6, x7, x8, x9); +} + void Problem::AddParameterBlock(double* values, int size) { problem_impl_->AddParameterBlock(values, size); } @@ -116,6 +156,14 @@ void Problem::AddParameterBlock(double* values, problem_impl_->AddParameterBlock(values, size, local_parameterization); } +void Problem::RemoveResidualBlock(ResidualBlockId residual_block) { + problem_impl_->RemoveResidualBlock(residual_block); +} + +void Problem::RemoveParameterBlock(double* values) { + problem_impl_->RemoveParameterBlock(values); +} + void Problem::SetParameterBlockConstant(double* values) { problem_impl_->SetParameterBlockConstant(values); } @@ -130,6 +178,18 @@ void Problem::SetParameterization( problem_impl_->SetParameterization(values, local_parameterization); } +bool Problem::Evaluate(const EvaluateOptions& evaluate_options, + double* cost, + vector<double>* residuals, + vector<double>* gradient, + CRSMatrix* jacobian) { + return problem_impl_->Evaluate(evaluate_options, + cost, + residuals, + gradient, + jacobian); +} + int Problem::NumParameterBlocks() const { return problem_impl_->NumParameterBlocks(); } diff --git a/extern/libmv/third_party/ceres/internal/ceres/problem_impl.cc b/extern/libmv/third_party/ceres/internal/ceres/problem_impl.cc index c186f527be8..bc378aaafff 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/problem_impl.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/problem_impl.cc @@ -37,7 +37,11 @@ #include <string> #include <utility> #include <vector> +#include "ceres/casts.h" +#include "ceres/compressed_row_sparse_matrix.h" #include "ceres/cost_function.h" +#include "ceres/crs_matrix.h" +#include "ceres/evaluator.h" #include "ceres/loss_function.h" #include "ceres/map_util.h" #include "ceres/parameter_block.h" @@ -73,54 +77,103 @@ static void CheckForNoAliasing(double* existing_block, << "size " << new_block_size << "."; } -static ParameterBlock* InternalAddParameterBlock( - double* values, - int size, - ParameterMap* parameter_map, - vector<ParameterBlock*>* parameter_blocks) { - CHECK(values) << "Null pointer passed to AddParameterBlock for a parameter " - << "with size " << size; +ParameterBlock* ProblemImpl::InternalAddParameterBlock(double* values, + int size) { + CHECK(values != NULL) << "Null pointer passed to AddParameterBlock " + << "for a parameter with size " << size; // Ignore the request if there is a block for the given pointer already. - ParameterMap::iterator it = parameter_map->find(values); - if (it != parameter_map->end()) { - int existing_size = it->second->Size(); - CHECK(size == existing_size) - << "Tried adding a parameter block with the same double pointer, " - << values << ", twice, but with different block sizes. Original " - << "size was " << existing_size << " but new size is " - << size; + ParameterMap::iterator it = parameter_block_map_.find(values); + if (it != parameter_block_map_.end()) { + if (!options_.disable_all_safety_checks) { + int existing_size = it->second->Size(); + CHECK(size == existing_size) + << "Tried adding a parameter block with the same double pointer, " + << values << ", twice, but with different block sizes. Original " + << "size was " << existing_size << " but new size is " + << size; + } return it->second; } - // Before adding the parameter block, also check that it doesn't alias any - // other parameter blocks. - if (!parameter_map->empty()) { - ParameterMap::iterator lb = parameter_map->lower_bound(values); - - // If lb is not the first block, check the previous block for aliasing. - if (lb != parameter_map->begin()) { - ParameterMap::iterator previous = lb; - --previous; - CheckForNoAliasing(previous->first, - previous->second->Size(), - values, - size); - } - // If lb is not off the end, check lb for aliasing. - if (lb != parameter_map->end()) { - CheckForNoAliasing(lb->first, - lb->second->Size(), - values, - size); + if (!options_.disable_all_safety_checks) { + // Before adding the parameter block, also check that it doesn't alias any + // other parameter blocks. + if (!parameter_block_map_.empty()) { + ParameterMap::iterator lb = parameter_block_map_.lower_bound(values); + + // If lb is not the first block, check the previous block for aliasing. + if (lb != parameter_block_map_.begin()) { + ParameterMap::iterator previous = lb; + --previous; + CheckForNoAliasing(previous->first, + previous->second->Size(), + values, + size); + } + + // If lb is not off the end, check lb for aliasing. + if (lb != parameter_block_map_.end()) { + CheckForNoAliasing(lb->first, + lb->second->Size(), + values, + size); + } } } - ParameterBlock* new_parameter_block = new ParameterBlock(values, size); - (*parameter_map)[values] = new_parameter_block; - parameter_blocks->push_back(new_parameter_block); + + // Pass the index of the new parameter block as well to keep the index in + // sync with the position of the parameter in the program's parameter vector. + ParameterBlock* new_parameter_block = + new ParameterBlock(values, size, program_->parameter_blocks_.size()); + + // For dynamic problems, add the list of dependent residual blocks, which is + // empty to start. + if (options_.enable_fast_parameter_block_removal) { + new_parameter_block->EnableResidualBlockDependencies(); + } + parameter_block_map_[values] = new_parameter_block; + program_->parameter_blocks_.push_back(new_parameter_block); return new_parameter_block; } +// Deletes the residual block in question, assuming there are no other +// references to it inside the problem (e.g. by another parameter). Referenced +// cost and loss functions are tucked away for future deletion, since it is not +// possible to know whether other parts of the problem depend on them without +// doing a full scan. +void ProblemImpl::DeleteBlock(ResidualBlock* residual_block) { + // The const casts here are legit, since ResidualBlock holds these + // pointers as const pointers but we have ownership of them and + // have the right to destroy them when the destructor is called. + if (options_.cost_function_ownership == TAKE_OWNERSHIP && + residual_block->cost_function() != NULL) { + cost_functions_to_delete_.push_back( + const_cast<CostFunction*>(residual_block->cost_function())); + } + if (options_.loss_function_ownership == TAKE_OWNERSHIP && + residual_block->loss_function() != NULL) { + loss_functions_to_delete_.push_back( + const_cast<LossFunction*>(residual_block->loss_function())); + } + delete residual_block; +} + +// Deletes the parameter block in question, assuming there are no other +// references to it inside the problem (e.g. by any residual blocks). +// Referenced parameterizations are tucked away for future deletion, since it +// is not possible to know whether other parts of the problem depend on them +// without doing a full scan. +void ProblemImpl::DeleteBlock(ParameterBlock* parameter_block) { + if (options_.local_parameterization_ownership == TAKE_OWNERSHIP && + parameter_block->local_parameterization() != NULL) { + local_parameterizations_to_delete_.push_back( + parameter_block->mutable_local_parameterization()); + } + parameter_block_map_.erase(parameter_block->mutable_user_state()); + delete parameter_block; +} + ProblemImpl::ProblemImpl() : program_(new internal::Program) {} ProblemImpl::ProblemImpl(const Problem::Options& options) : options_(options), @@ -128,48 +181,28 @@ ProblemImpl::ProblemImpl(const Problem::Options& options) ProblemImpl::~ProblemImpl() { // Collect the unique cost/loss functions and delete the residuals. - set<CostFunction*> cost_functions; - set<LossFunction*> loss_functions; + const int num_residual_blocks = program_->residual_blocks_.size(); + cost_functions_to_delete_.reserve(num_residual_blocks); + loss_functions_to_delete_.reserve(num_residual_blocks); for (int i = 0; i < program_->residual_blocks_.size(); ++i) { - ResidualBlock* residual_block = program_->residual_blocks_[i]; - - // The const casts here are legit, since ResidualBlock holds these - // pointers as const pointers but we have ownership of them and - // have the right to destroy them when the destructor is called. - if (options_.cost_function_ownership == TAKE_OWNERSHIP) { - cost_functions.insert( - const_cast<CostFunction*>(residual_block->cost_function())); - } - if (options_.loss_function_ownership == TAKE_OWNERSHIP) { - loss_functions.insert( - const_cast<LossFunction*>(residual_block->loss_function())); - } - - delete residual_block; + DeleteBlock(program_->residual_blocks_[i]); } // Collect the unique parameterizations and delete the parameters. - set<LocalParameterization*> local_parameterizations; for (int i = 0; i < program_->parameter_blocks_.size(); ++i) { - ParameterBlock* parameter_block = program_->parameter_blocks_[i]; - - if (options_.local_parameterization_ownership == TAKE_OWNERSHIP) { - local_parameterizations.insert(parameter_block->local_parameterization_); - } - - delete parameter_block; + DeleteBlock(program_->parameter_blocks_[i]); } // Delete the owned cost/loss functions and parameterizations. - STLDeleteContainerPointers(local_parameterizations.begin(), - local_parameterizations.end()); - STLDeleteContainerPointers(cost_functions.begin(), - cost_functions.end()); - STLDeleteContainerPointers(loss_functions.begin(), - loss_functions.end()); + STLDeleteUniqueContainerPointers(local_parameterizations_to_delete_.begin(), + local_parameterizations_to_delete_.end()); + STLDeleteUniqueContainerPointers(cost_functions_to_delete_.begin(), + cost_functions_to_delete_.end()); + STLDeleteUniqueContainerPointers(loss_functions_to_delete_.begin(), + loss_functions_to_delete_.end()); } -const ResidualBlock* ProblemImpl::AddResidualBlock( +ResidualBlock* ProblemImpl::AddResidualBlock( CostFunction* cost_function, LossFunction* loss_function, const vector<double*>& parameter_blocks) { @@ -180,25 +213,28 @@ const ResidualBlock* ProblemImpl::AddResidualBlock( // Check the sizes match. const vector<int16>& parameter_block_sizes = cost_function->parameter_block_sizes(); - CHECK_EQ(parameter_block_sizes.size(), parameter_blocks.size()) - << "Number of blocks input is different than the number of blocks " - << "that the cost function expects."; - - // Check for duplicate parameter blocks. - vector<double*> sorted_parameter_blocks(parameter_blocks); - sort(sorted_parameter_blocks.begin(), sorted_parameter_blocks.end()); - vector<double*>::const_iterator duplicate_items = - unique(sorted_parameter_blocks.begin(), - sorted_parameter_blocks.end()); - if (duplicate_items != sorted_parameter_blocks.end()) { - string blocks; - for (int i = 0; i < parameter_blocks.size(); ++i) { - blocks += internal::StringPrintf(" %p ", parameter_blocks[i]); - } - LOG(FATAL) << "Duplicate parameter blocks in a residual parameter " - << "are not allowed. Parameter block pointers: [" - << blocks << "]"; + if (!options_.disable_all_safety_checks) { + CHECK_EQ(parameter_block_sizes.size(), parameter_blocks.size()) + << "Number of blocks input is different than the number of blocks " + << "that the cost function expects."; + + // Check for duplicate parameter blocks. + vector<double*> sorted_parameter_blocks(parameter_blocks); + sort(sorted_parameter_blocks.begin(), sorted_parameter_blocks.end()); + vector<double*>::const_iterator duplicate_items = + unique(sorted_parameter_blocks.begin(), + sorted_parameter_blocks.end()); + if (duplicate_items != sorted_parameter_blocks.end()) { + string blocks; + for (int i = 0; i < parameter_blocks.size(); ++i) { + blocks += StringPrintf(" %p ", parameter_blocks[i]); + } + + LOG(FATAL) << "Duplicate parameter blocks in a residual parameter " + << "are not allowed. Parameter block pointers: [" + << blocks << "]"; + } } // Add parameter blocks and convert the double*'s to parameter blocks. @@ -206,33 +242,42 @@ const ResidualBlock* ProblemImpl::AddResidualBlock( for (int i = 0; i < parameter_blocks.size(); ++i) { parameter_block_ptrs[i] = InternalAddParameterBlock(parameter_blocks[i], - parameter_block_sizes[i], - ¶meter_block_map_, - &program_->parameter_blocks_); + parameter_block_sizes[i]); } - // Check that the block sizes match the block sizes expected by the - // cost_function. - for (int i = 0; i < parameter_block_ptrs.size(); ++i) { - CHECK_EQ(cost_function->parameter_block_sizes()[i], - parameter_block_ptrs[i]->Size()) - << "The cost function expects parameter block " << i - << " of size " << cost_function->parameter_block_sizes()[i] - << " but was given a block of size " - << parameter_block_ptrs[i]->Size(); + if (!options_.disable_all_safety_checks) { + // Check that the block sizes match the block sizes expected by the + // cost_function. + for (int i = 0; i < parameter_block_ptrs.size(); ++i) { + CHECK_EQ(cost_function->parameter_block_sizes()[i], + parameter_block_ptrs[i]->Size()) + << "The cost function expects parameter block " << i + << " of size " << cost_function->parameter_block_sizes()[i] + << " but was given a block of size " + << parameter_block_ptrs[i]->Size(); + } } ResidualBlock* new_residual_block = new ResidualBlock(cost_function, loss_function, - parameter_block_ptrs); + parameter_block_ptrs, + program_->residual_blocks_.size()); + + // Add dependencies on the residual to the parameter blocks. + if (options_.enable_fast_parameter_block_removal) { + for (int i = 0; i < parameter_blocks.size(); ++i) { + parameter_block_ptrs[i]->AddResidualBlock(new_residual_block); + } + } + program_->residual_blocks_.push_back(new_residual_block); return new_residual_block; } // Unfortunately, macros don't help much to reduce this code, and var args don't // work because of the ambiguous case that there is no loss function. -const ResidualBlock* ProblemImpl::AddResidualBlock( +ResidualBlock* ProblemImpl::AddResidualBlock( CostFunction* cost_function, LossFunction* loss_function, double* x0) { @@ -241,7 +286,7 @@ const ResidualBlock* ProblemImpl::AddResidualBlock( return AddResidualBlock(cost_function, loss_function, residual_parameters); } -const ResidualBlock* ProblemImpl::AddResidualBlock( +ResidualBlock* ProblemImpl::AddResidualBlock( CostFunction* cost_function, LossFunction* loss_function, double* x0, double* x1) { @@ -251,7 +296,7 @@ const ResidualBlock* ProblemImpl::AddResidualBlock( return AddResidualBlock(cost_function, loss_function, residual_parameters); } -const ResidualBlock* ProblemImpl::AddResidualBlock( +ResidualBlock* ProblemImpl::AddResidualBlock( CostFunction* cost_function, LossFunction* loss_function, double* x0, double* x1, double* x2) { @@ -262,7 +307,7 @@ const ResidualBlock* ProblemImpl::AddResidualBlock( return AddResidualBlock(cost_function, loss_function, residual_parameters); } -const ResidualBlock* ProblemImpl::AddResidualBlock( +ResidualBlock* ProblemImpl::AddResidualBlock( CostFunction* cost_function, LossFunction* loss_function, double* x0, double* x1, double* x2, double* x3) { @@ -274,7 +319,7 @@ const ResidualBlock* ProblemImpl::AddResidualBlock( return AddResidualBlock(cost_function, loss_function, residual_parameters); } -const ResidualBlock* ProblemImpl::AddResidualBlock( +ResidualBlock* ProblemImpl::AddResidualBlock( CostFunction* cost_function, LossFunction* loss_function, double* x0, double* x1, double* x2, double* x3, double* x4) { @@ -287,7 +332,7 @@ const ResidualBlock* ProblemImpl::AddResidualBlock( return AddResidualBlock(cost_function, loss_function, residual_parameters); } -const ResidualBlock* ProblemImpl::AddResidualBlock( +ResidualBlock* ProblemImpl::AddResidualBlock( CostFunction* cost_function, LossFunction* loss_function, double* x0, double* x1, double* x2, double* x3, double* x4, double* x5) { @@ -301,12 +346,78 @@ const ResidualBlock* ProblemImpl::AddResidualBlock( return AddResidualBlock(cost_function, loss_function, residual_parameters); } +ResidualBlock* ProblemImpl::AddResidualBlock( + CostFunction* cost_function, + LossFunction* loss_function, + double* x0, double* x1, double* x2, double* x3, double* x4, double* x5, + double* x6) { + vector<double*> residual_parameters; + residual_parameters.push_back(x0); + residual_parameters.push_back(x1); + residual_parameters.push_back(x2); + residual_parameters.push_back(x3); + residual_parameters.push_back(x4); + residual_parameters.push_back(x5); + residual_parameters.push_back(x6); + return AddResidualBlock(cost_function, loss_function, residual_parameters); +} + +ResidualBlock* ProblemImpl::AddResidualBlock( + CostFunction* cost_function, + LossFunction* loss_function, + double* x0, double* x1, double* x2, double* x3, double* x4, double* x5, + double* x6, double* x7) { + vector<double*> residual_parameters; + residual_parameters.push_back(x0); + residual_parameters.push_back(x1); + residual_parameters.push_back(x2); + residual_parameters.push_back(x3); + residual_parameters.push_back(x4); + residual_parameters.push_back(x5); + residual_parameters.push_back(x6); + residual_parameters.push_back(x7); + return AddResidualBlock(cost_function, loss_function, residual_parameters); +} + +ResidualBlock* ProblemImpl::AddResidualBlock( + CostFunction* cost_function, + LossFunction* loss_function, + double* x0, double* x1, double* x2, double* x3, double* x4, double* x5, + double* x6, double* x7, double* x8) { + vector<double*> residual_parameters; + residual_parameters.push_back(x0); + residual_parameters.push_back(x1); + residual_parameters.push_back(x2); + residual_parameters.push_back(x3); + residual_parameters.push_back(x4); + residual_parameters.push_back(x5); + residual_parameters.push_back(x6); + residual_parameters.push_back(x7); + residual_parameters.push_back(x8); + return AddResidualBlock(cost_function, loss_function, residual_parameters); +} + +ResidualBlock* ProblemImpl::AddResidualBlock( + CostFunction* cost_function, + LossFunction* loss_function, + double* x0, double* x1, double* x2, double* x3, double* x4, double* x5, + double* x6, double* x7, double* x8, double* x9) { + vector<double*> residual_parameters; + residual_parameters.push_back(x0); + residual_parameters.push_back(x1); + residual_parameters.push_back(x2); + residual_parameters.push_back(x3); + residual_parameters.push_back(x4); + residual_parameters.push_back(x5); + residual_parameters.push_back(x6); + residual_parameters.push_back(x7); + residual_parameters.push_back(x8); + residual_parameters.push_back(x9); + return AddResidualBlock(cost_function, loss_function, residual_parameters); +} void ProblemImpl::AddParameterBlock(double* values, int size) { - InternalAddParameterBlock(values, - size, - ¶meter_block_map_, - &program_->parameter_blocks_); + InternalAddParameterBlock(values, size); } void ProblemImpl::AddParameterBlock( @@ -314,15 +425,83 @@ void ProblemImpl::AddParameterBlock( int size, LocalParameterization* local_parameterization) { ParameterBlock* parameter_block = - InternalAddParameterBlock(values, - size, - ¶meter_block_map_, - &program_->parameter_blocks_); + InternalAddParameterBlock(values, size); if (local_parameterization != NULL) { parameter_block->SetParameterization(local_parameterization); } } +// Delete a block from a vector of blocks, maintaining the indexing invariant. +// This is done in constant time by moving an element from the end of the +// vector over the element to remove, then popping the last element. It +// destroys the ordering in the interest of speed. +template<typename Block> +void ProblemImpl::DeleteBlockInVector(vector<Block*>* mutable_blocks, + Block* block_to_remove) { + CHECK_EQ((*mutable_blocks)[block_to_remove->index()], block_to_remove) + << "You found a Ceres bug! Block: " << block_to_remove->ToString(); + + // Prepare the to-be-moved block for the new, lower-in-index position by + // setting the index to the blocks final location. + Block* tmp = mutable_blocks->back(); + tmp->set_index(block_to_remove->index()); + + // Overwrite the to-be-deleted residual block with the one at the end. + (*mutable_blocks)[block_to_remove->index()] = tmp; + + DeleteBlock(block_to_remove); + + // The block is gone so shrink the vector of blocks accordingly. + mutable_blocks->pop_back(); +} + +void ProblemImpl::RemoveResidualBlock(ResidualBlock* residual_block) { + CHECK_NOTNULL(residual_block); + + // If needed, remove the parameter dependencies on this residual block. + if (options_.enable_fast_parameter_block_removal) { + const int num_parameter_blocks_for_residual = + residual_block->NumParameterBlocks(); + for (int i = 0; i < num_parameter_blocks_for_residual; ++i) { + residual_block->parameter_blocks()[i] + ->RemoveResidualBlock(residual_block); + } + } + DeleteBlockInVector(program_->mutable_residual_blocks(), residual_block); +} + +void ProblemImpl::RemoveParameterBlock(double* values) { + ParameterBlock* parameter_block = FindOrDie(parameter_block_map_, values); + + if (options_.enable_fast_parameter_block_removal) { + // Copy the dependent residuals from the parameter block because the set of + // dependents will change after each call to RemoveResidualBlock(). + vector<ResidualBlock*> residual_blocks_to_remove( + parameter_block->mutable_residual_blocks()->begin(), + parameter_block->mutable_residual_blocks()->end()); + for (int i = 0; i < residual_blocks_to_remove.size(); ++i) { + RemoveResidualBlock(residual_blocks_to_remove[i]); + } + } else { + // Scan all the residual blocks to remove ones that depend on the parameter + // block. Do the scan backwards since the vector changes while iterating. + const int num_residual_blocks = NumResidualBlocks(); + for (int i = num_residual_blocks - 1; i >= 0; --i) { + ResidualBlock* residual_block = + (*(program_->mutable_residual_blocks()))[i]; + const int num_parameter_blocks = residual_block->NumParameterBlocks(); + for (int j = 0; j < num_parameter_blocks; ++j) { + if (residual_block->parameter_blocks()[j] == parameter_block) { + RemoveResidualBlock(residual_block); + // The parameter blocks are guaranteed unique. + break; + } + } + } + } + DeleteBlockInVector(program_->mutable_parameter_blocks(), parameter_block); +} + void ProblemImpl::SetParameterBlockConstant(double* values) { FindOrDie(parameter_block_map_, values)->SetConstant(); } @@ -338,6 +517,164 @@ void ProblemImpl::SetParameterization( ->SetParameterization(local_parameterization); } +bool ProblemImpl::Evaluate(const Problem::EvaluateOptions& evaluate_options, + double* cost, + vector<double>* residuals, + vector<double>* gradient, + CRSMatrix* jacobian) { + if (cost == NULL && + residuals == NULL && + gradient == NULL && + jacobian == NULL) { + LOG(INFO) << "Nothing to do."; + return true; + } + + // If the user supplied residual blocks, then use them, otherwise + // take the residual blocks from the underlying program. + Program program; + *program.mutable_residual_blocks() = + ((evaluate_options.residual_blocks.size() > 0) + ? evaluate_options.residual_blocks : program_->residual_blocks()); + + const vector<double*>& parameter_block_ptrs = + evaluate_options.parameter_blocks; + + vector<ParameterBlock*> variable_parameter_blocks; + vector<ParameterBlock*>& parameter_blocks = + *program.mutable_parameter_blocks(); + + if (parameter_block_ptrs.size() == 0) { + // The user did not provide any parameter blocks, so default to + // using all the parameter blocks in the order that they are in + // the underlying program object. + parameter_blocks = program_->parameter_blocks(); + } else { + // The user supplied a vector of parameter blocks. Using this list + // requires a number of steps. + + // 1. Convert double* into ParameterBlock* + parameter_blocks.resize(parameter_block_ptrs.size()); + for (int i = 0; i < parameter_block_ptrs.size(); ++i) { + parameter_blocks[i] = + FindOrDie(parameter_block_map_, parameter_block_ptrs[i]); + } + + // 2. The user may have only supplied a subset of parameter + // blocks, so identify the ones that are not supplied by the user + // and are NOT constant. These parameter blocks are stored in + // variable_parameter_blocks. + // + // To ensure that the parameter blocks are not included in the + // columns of the jacobian, we need to make sure that they are + // constant during evaluation and then make them variable again + // after we are done. + vector<ParameterBlock*> all_parameter_blocks(program_->parameter_blocks()); + vector<ParameterBlock*> included_parameter_blocks( + program.parameter_blocks()); + + vector<ParameterBlock*> excluded_parameter_blocks; + sort(all_parameter_blocks.begin(), all_parameter_blocks.end()); + sort(included_parameter_blocks.begin(), included_parameter_blocks.end()); + set_difference(all_parameter_blocks.begin(), + all_parameter_blocks.end(), + included_parameter_blocks.begin(), + included_parameter_blocks.end(), + back_inserter(excluded_parameter_blocks)); + + variable_parameter_blocks.reserve(excluded_parameter_blocks.size()); + for (int i = 0; i < excluded_parameter_blocks.size(); ++i) { + ParameterBlock* parameter_block = excluded_parameter_blocks[i]; + if (!parameter_block->IsConstant()) { + variable_parameter_blocks.push_back(parameter_block); + parameter_block->SetConstant(); + } + } + } + + // Setup the Parameter indices and offsets before an evaluator can + // be constructed and used. + program.SetParameterOffsetsAndIndex(); + + Evaluator::Options evaluator_options; + + // Even though using SPARSE_NORMAL_CHOLESKY requires SuiteSparse or + // CXSparse, here it just being used for telling the evaluator to + // use a SparseRowCompressedMatrix for the jacobian. This is because + // the Evaluator decides the storage for the Jacobian based on the + // type of linear solver being used. + evaluator_options.linear_solver_type = SPARSE_NORMAL_CHOLESKY; + evaluator_options.num_threads = evaluate_options.num_threads; + + string error; + scoped_ptr<Evaluator> evaluator( + Evaluator::Create(evaluator_options, &program, &error)); + if (evaluator.get() == NULL) { + LOG(ERROR) << "Unable to create an Evaluator object. " + << "Error: " << error + << "This is a Ceres bug; please contact the developers!"; + + // Make the parameter blocks that were temporarily marked + // constant, variable again. + for (int i = 0; i < variable_parameter_blocks.size(); ++i) { + variable_parameter_blocks[i]->SetVarying(); + } + return false; + } + + if (residuals !=NULL) { + residuals->resize(evaluator->NumResiduals()); + } + + if (gradient != NULL) { + gradient->resize(evaluator->NumEffectiveParameters()); + } + + scoped_ptr<CompressedRowSparseMatrix> tmp_jacobian; + if (jacobian != NULL) { + tmp_jacobian.reset( + down_cast<CompressedRowSparseMatrix*>(evaluator->CreateJacobian())); + } + + // Point the state pointers to the user state pointers. This is + // needed so that we can extract a parameter vector which is then + // passed to Evaluator::Evaluate. + program.SetParameterBlockStatePtrsToUserStatePtrs(); + + // Copy the value of the parameter blocks into a vector, since the + // Evaluate::Evaluate method needs its input as such. The previous + // call to SetParameterBlockStatePtrsToUserStatePtrs ensures that + // these values are the ones corresponding to the actual state of + // the parameter blocks, rather than the temporary state pointer + // used for evaluation. + Vector parameters(program.NumParameters()); + program.ParameterBlocksToStateVector(parameters.data()); + + double tmp_cost = 0; + bool status = evaluator->Evaluate(parameters.data(), + &tmp_cost, + residuals != NULL ? &(*residuals)[0] : NULL, + gradient != NULL ? &(*gradient)[0] : NULL, + tmp_jacobian.get()); + + // Make the parameter blocks that were temporarirly marked + // constant, variable again. + for (int i = 0; i < variable_parameter_blocks.size(); ++i) { + variable_parameter_blocks[i]->SetVarying(); + } + + if (status) { + if (cost != NULL) { + *cost = tmp_cost; + } + if (jacobian != NULL) { + tmp_jacobian->ToCRSMatrix(jacobian); + } + } + + return status; +} + int ProblemImpl::NumParameterBlocks() const { return program_->NumParameterBlocks(); } diff --git a/extern/libmv/third_party/ceres/internal/ceres/problem_impl.h b/extern/libmv/third_party/ceres/internal/ceres/problem_impl.h index 2ca055448c3..ccc315de6b6 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/problem_impl.h +++ b/extern/libmv/third_party/ceres/internal/ceres/problem_impl.h @@ -53,6 +53,7 @@ namespace ceres { class CostFunction; class LossFunction; class LocalParameterization; +struct CRSMatrix; namespace internal { @@ -93,14 +94,46 @@ class ProblemImpl { LossFunction* loss_function, double* x0, double* x1, double* x2, double* x3, double* x4, double* x5); + ResidualBlockId AddResidualBlock(CostFunction* cost_function, + LossFunction* loss_function, + double* x0, double* x1, double* x2, + double* x3, double* x4, double* x5, + double* x6); + ResidualBlockId AddResidualBlock(CostFunction* cost_function, + LossFunction* loss_function, + double* x0, double* x1, double* x2, + double* x3, double* x4, double* x5, + double* x6, double* x7); + ResidualBlockId AddResidualBlock(CostFunction* cost_function, + LossFunction* loss_function, + double* x0, double* x1, double* x2, + double* x3, double* x4, double* x5, + double* x6, double* x7, double* x8); + ResidualBlockId AddResidualBlock(CostFunction* cost_function, + LossFunction* loss_function, + double* x0, double* x1, double* x2, + double* x3, double* x4, double* x5, + double* x6, double* x7, double* x8, + double* x9); void AddParameterBlock(double* values, int size); void AddParameterBlock(double* values, int size, LocalParameterization* local_parameterization); + + void RemoveResidualBlock(ResidualBlock* residual_block); + void RemoveParameterBlock(double* values); + void SetParameterBlockConstant(double* values); void SetParameterBlockVariable(double* values); void SetParameterization(double* values, LocalParameterization* local_parameterization); + + bool Evaluate(const Problem::EvaluateOptions& options, + double* cost, + vector<double>* residuals, + vector<double>* gradient, + CRSMatrix* jacobian); + int NumParameterBlocks() const; int NumParameters() const; int NumResidualBlocks() const; @@ -112,12 +145,41 @@ class ProblemImpl { const ParameterMap& parameter_map() const { return parameter_block_map_; } private: + ParameterBlock* InternalAddParameterBlock(double* values, int size); + + bool InternalEvaluate(Program* program, + double* cost, + vector<double>* residuals, + vector<double>* gradient, + CRSMatrix* jacobian); + + // Delete the arguments in question. These differ from the Remove* functions + // in that they do not clean up references to the block to delete; they + // merely delete them. + template<typename Block> + void DeleteBlockInVector(vector<Block*>* mutable_blocks, + Block* block_to_remove); + void DeleteBlock(ResidualBlock* residual_block); + void DeleteBlock(ParameterBlock* parameter_block); + const Problem::Options options_; // The mapping from user pointers to parameter blocks. map<double*, ParameterBlock*> parameter_block_map_; + // The actual parameter and residual blocks. internal::scoped_ptr<internal::Program> program_; + + // When removing residual and parameter blocks, cost/loss functions and + // parameterizations have ambiguous ownership. Instead of scanning the entire + // problem to see if the cost/loss/parameterization is shared with other + // residual or parameter blocks, buffer them until destruction. + // + // TODO(keir): See if it makes sense to use sets instead. + vector<CostFunction*> cost_functions_to_delete_; + vector<LossFunction*> loss_functions_to_delete_; + vector<LocalParameterization*> local_parameterizations_to_delete_; + CERES_DISALLOW_COPY_AND_ASSIGN(ProblemImpl); }; diff --git a/extern/libmv/third_party/ceres/internal/ceres/program_evaluator.h b/extern/libmv/third_party/ceres/internal/ceres/program_evaluator.h index 6c48e7d7643..a19cdf8a86a 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/program_evaluator.h +++ b/extern/libmv/third_party/ceres/internal/ceres/program_evaluator.h @@ -83,11 +83,14 @@ #include <omp.h> #endif +#include <map> +#include <vector> +#include "ceres/execution_summary.h" +#include "ceres/internal/eigen.h" +#include "ceres/internal/scoped_ptr.h" #include "ceres/parameter_block.h" #include "ceres/program.h" #include "ceres/residual_block.h" -#include "ceres/internal/eigen.h" -#include "ceres/internal/scoped_ptr.h" namespace ceres { namespace internal { @@ -122,6 +125,12 @@ class ProgramEvaluator : public Evaluator { double* residuals, double* gradient, SparseMatrix* jacobian) { + ScopedExecutionTimer total_timer("Evaluator::Total", &execution_summary_); + ScopedExecutionTimer call_type_timer(gradient == NULL && jacobian == NULL + ? "Evaluator::Residual" + : "Evaluator::Jacobian", + &execution_summary_); + // The parameters are stateful, so set the state before evaluating. if (!program_->StateVectorToParameterBlocks(state)) { return false; @@ -129,7 +138,7 @@ class ProgramEvaluator : public Evaluator { if (residuals != NULL) { VectorRef(residuals, program_->NumResiduals()).setZero(); - } + } if (jacobian != NULL) { jacobian->SetZero(); @@ -138,6 +147,10 @@ class ProgramEvaluator : public Evaluator { // Each thread gets it's own cost and evaluate scratch space. for (int i = 0; i < options_.num_threads; ++i) { evaluate_scratch_[i].cost = 0.0; + if (gradient != NULL) { + VectorRef(evaluate_scratch_[i].gradient.get(), + program_->NumEffectiveParameters()).setZero(); + } } // This bool is used to disable the loop if an error is encountered @@ -262,6 +275,14 @@ class ProgramEvaluator : public Evaluator { return program_->NumResiduals(); } + virtual map<string, int> CallStatistics() const { + return execution_summary_.calls(); + } + + virtual map<string, double> TimeStatistics() const { + return execution_summary_.times(); + } + private: // Per-thread scratch space needed to evaluate and store each residual block. struct EvaluateScratch { @@ -327,6 +348,7 @@ class ProgramEvaluator : public Evaluator { scoped_array<EvaluatePreparer> evaluate_preparers_; scoped_array<EvaluateScratch> evaluate_scratch_; vector<int> residual_layout_; + ::ceres::internal::ExecutionSummary execution_summary_; }; } // namespace internal diff --git a/extern/libmv/third_party/ceres/internal/ceres/residual_block.cc b/extern/libmv/third_party/ceres/internal/ceres/residual_block.cc index bdb88b1dd97..7f789605e5f 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/residual_block.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/residual_block.cc @@ -49,12 +49,14 @@ namespace internal { ResidualBlock::ResidualBlock(const CostFunction* cost_function, const LossFunction* loss_function, - const vector<ParameterBlock*>& parameter_blocks) + const vector<ParameterBlock*>& parameter_blocks, + int index) : cost_function_(cost_function), loss_function_(loss_function), parameter_blocks_( new ParameterBlock* [ - cost_function->parameter_block_sizes().size()]) { + cost_function->parameter_block_sizes().size()]), + index_(index) { std::copy(parameter_blocks.begin(), parameter_blocks.end(), parameter_blocks_.get()); diff --git a/extern/libmv/third_party/ceres/internal/ceres/residual_block.h b/extern/libmv/third_party/ceres/internal/ceres/residual_block.h index e0a06e78958..3921d1d4678 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/residual_block.h +++ b/extern/libmv/third_party/ceres/internal/ceres/residual_block.h @@ -34,11 +34,13 @@ #ifndef CERES_INTERNAL_RESIDUAL_BLOCK_H_ #define CERES_INTERNAL_RESIDUAL_BLOCK_H_ +#include <string> #include <vector> #include "ceres/cost_function.h" #include "ceres/internal/port.h" #include "ceres/internal/scoped_ptr.h" +#include "ceres/stringprintf.h" #include "ceres/types.h" namespace ceres { @@ -64,9 +66,13 @@ class ParameterBlock; // loss functions, and parameter blocks. class ResidualBlock { public: + // Construct the residual block with the given cost/loss functions. Loss may + // be null. The index is the index of the residual block in the Program's + // residual_blocks array. ResidualBlock(const CostFunction* cost_function, const LossFunction* loss_function, - const vector<ParameterBlock*>& parameter_blocks); + const vector<ParameterBlock*>& parameter_blocks, + int index); // Evaluates the residual term, storing the scalar cost in *cost, the residual // components in *residuals, and the jacobians between the parameters and @@ -112,10 +118,23 @@ class ResidualBlock { // The minimum amount of scratch space needed to pass to Evaluate(). int NumScratchDoublesForEvaluate() const; + // This residual block's index in an array. + int index() const { return index_; } + void set_index(int index) { index_ = index; } + + string ToString() { + return StringPrintf("{residual block; index=%d}", index_); + } + private: const CostFunction* cost_function_; const LossFunction* loss_function_; scoped_array<ParameterBlock*> parameter_blocks_; + + // The index of the residual, typically in a Program. This is only to permit + // switching from a ResidualBlock* to an index in the Program's array, needed + // to do efficient removals. + int32 index_; }; } // namespace internal diff --git a/extern/libmv/third_party/ceres/internal/ceres/residual_block_utils.cc b/extern/libmv/third_party/ceres/internal/ceres/residual_block_utils.cc index 9442bb2a1c1..4d88a9f4f8a 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/residual_block_utils.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/residual_block_utils.cc @@ -63,7 +63,8 @@ void InvalidateEvaluation(const ResidualBlock& block, // Utility routine to print an array of doubles to a string. If the // array pointer is NULL, it is treated as an array of zeros. -static void AppendArrayToString(const int size, const double* x, string* result) { +namespace { +void AppendArrayToString(const int size, const double* x, string* result) { for (int i = 0; i < size; ++i) { if (x == NULL) { StringAppendF(result, "Not Computed "); @@ -76,6 +77,7 @@ static void AppendArrayToString(const int size, const double* x, string* result) } } } +} // namespace string EvaluationToString(const ResidualBlock& block, double const* const* parameters, diff --git a/extern/libmv/third_party/ceres/internal/ceres/schur_complement_solver.cc b/extern/libmv/third_party/ceres/internal/ceres/schur_complement_solver.cc index 2cbe78d133a..17537596c75 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/schur_complement_solver.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/schur_complement_solver.cc @@ -38,22 +38,21 @@ #endif // CERES_NO_CXSPARSE #include "Eigen/Dense" -#include "glog/logging.h" #include "ceres/block_random_access_dense_matrix.h" #include "ceres/block_random_access_matrix.h" #include "ceres/block_random_access_sparse_matrix.h" #include "ceres/block_sparse_matrix.h" #include "ceres/block_structure.h" #include "ceres/detect_structure.h" +#include "ceres/internal/eigen.h" +#include "ceres/internal/port.h" +#include "ceres/internal/scoped_ptr.h" #include "ceres/linear_solver.h" #include "ceres/schur_complement_solver.h" #include "ceres/suitesparse.h" #include "ceres/triplet_sparse_matrix.h" -#include "ceres/internal/eigen.h" -#include "ceres/internal/port.h" -#include "ceres/internal/scoped_ptr.h" #include "ceres/types.h" - +#include "ceres/wall_time.h" namespace ceres { namespace internal { @@ -63,43 +62,39 @@ LinearSolver::Summary SchurComplementSolver::SolveImpl( const double* b, const LinearSolver::PerSolveOptions& per_solve_options, double* x) { - const time_t start_time = time(NULL); + EventLogger event_logger("SchurComplementSolver::Solve"); + if (eliminator_.get() == NULL) { InitStorage(A->block_structure()); DetectStructure(*A->block_structure(), - options_.num_eliminate_blocks, + options_.elimination_groups[0], &options_.row_block_size, &options_.e_block_size, &options_.f_block_size); eliminator_.reset(CHECK_NOTNULL(SchurEliminatorBase::Create(options_))); - eliminator_->Init(options_.num_eliminate_blocks, A->block_structure()); + eliminator_->Init(options_.elimination_groups[0], A->block_structure()); }; - const time_t init_time = time(NULL); fill(x, x + A->num_cols(), 0.0); + event_logger.AddEvent("Setup"); LinearSolver::Summary summary; summary.num_iterations = 1; summary.termination_type = FAILURE; eliminator_->Eliminate(A, b, per_solve_options.D, lhs_.get(), rhs_.get()); - const time_t eliminate_time = time(NULL); + event_logger.AddEvent("Eliminate"); double* reduced_solution = x + A->num_cols() - lhs_->num_cols(); const bool status = SolveReducedLinearSystem(reduced_solution); - const time_t solve_time = time(NULL); + event_logger.AddEvent("ReducedSolve"); if (!status) { return summary; } eliminator_->BackSubstitute(A, b, per_solve_options.D, reduced_solution, x); - const time_t backsubstitute_time = time(NULL); summary.termination_type = TOLERANCE; - VLOG(2) << "time (sec) total: " << (backsubstitute_time - start_time) - << " init: " << (init_time - start_time) - << " eliminate: " << (eliminate_time - init_time) - << " solve: " << (solve_time - eliminate_time) - << " backsubstitute: " << (backsubstitute_time - solve_time); + event_logger.AddEvent("BackSubstitute"); return summary; } @@ -107,7 +102,7 @@ LinearSolver::Summary SchurComplementSolver::SolveImpl( // complement. void DenseSchurComplementSolver::InitStorage( const CompressedRowBlockStructure* bs) { - const int num_eliminate_blocks = options().num_eliminate_blocks; + const int num_eliminate_blocks = options().elimination_groups[0]; const int num_col_blocks = bs->cols.size(); vector<int> blocks(num_col_blocks - num_eliminate_blocks, 0); @@ -179,7 +174,7 @@ SparseSchurComplementSolver::~SparseSchurComplementSolver() { // initialize a BlockRandomAccessSparseMatrix object. void SparseSchurComplementSolver::InitStorage( const CompressedRowBlockStructure* bs) { - const int num_eliminate_blocks = options().num_eliminate_blocks; + const int num_eliminate_blocks = options().elimination_groups[0]; const int num_col_blocks = bs->cols.size(); const int num_row_blocks = bs->rows.size(); @@ -268,8 +263,6 @@ bool SparseSchurComplementSolver::SolveReducedLinearSystem(double* solution) { // CHOLMOD's sparse cholesky factorization routines. bool SparseSchurComplementSolver::SolveReducedLinearSystemUsingSuiteSparse( double* solution) { - const time_t start_time = time(NULL); - TripletSparseMatrix* tsm = const_cast<TripletSparseMatrix*>( down_cast<const BlockRandomAccessSparseMatrix*>(lhs())->matrix()); @@ -286,11 +279,9 @@ bool SparseSchurComplementSolver::SolveReducedLinearSystemUsingSuiteSparse( // The matrix is symmetric, and the upper triangular part of the // matrix contains the values. cholmod_lhs->stype = 1; - const time_t lhs_time = time(NULL); cholmod_dense* cholmod_rhs = ss_.CreateDenseVector(const_cast<double*>(rhs()), num_rows, num_rows); - const time_t rhs_time = time(NULL); // Symbolic factorization is computed if we don't already have one handy. if (factor_ == NULL) { @@ -307,32 +298,22 @@ bool SparseSchurComplementSolver::SolveReducedLinearSystemUsingSuiteSparse( CHECK_NOTNULL(factor_); - const time_t symbolic_time = time(NULL); cholmod_dense* cholmod_solution = ss_.SolveCholesky(cholmod_lhs, factor_, cholmod_rhs); - const time_t solve_time = time(NULL); - ss_.Free(cholmod_lhs); cholmod_lhs = NULL; ss_.Free(cholmod_rhs); cholmod_rhs = NULL; if (cholmod_solution == NULL) { - LOG(ERROR) << "CHOLMOD solve failed."; + LOG(WARNING) << "CHOLMOD solve failed."; return false; } VectorRef(solution, num_rows) = VectorRef(static_cast<double*>(cholmod_solution->x), num_rows); ss_.Free(cholmod_solution); - const time_t final_time = time(NULL); - VLOG(2) << "time: " << (final_time - start_time) - << " lhs : " << (lhs_time - start_time) - << " rhs: " << (rhs_time - lhs_time) - << " analyze: " << (symbolic_time - rhs_time) - << " factor_and_solve: " << (solve_time - symbolic_time) - << " cleanup: " << (final_time - solve_time); return true; } #else diff --git a/extern/libmv/third_party/ceres/internal/ceres/schur_complement_solver.h b/extern/libmv/third_party/ceres/internal/ceres/schur_complement_solver.h index ea1b3184c33..7e98f316255 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/schur_complement_solver.h +++ b/extern/libmv/third_party/ceres/internal/ceres/schur_complement_solver.h @@ -33,6 +33,7 @@ #include <set> #include <utility> +#include <vector> #include "ceres/block_random_access_matrix.h" #include "ceres/block_sparse_matrix.h" @@ -97,13 +98,14 @@ class BlockSparseMatrixBase; // The two solvers can be instantiated by calling // LinearSolver::CreateLinearSolver with LinearSolver::Options::type // set to DENSE_SCHUR and SPARSE_SCHUR -// respectively. LinearSolver::Options::num_eliminate_blocks should be +// respectively. LinearSolver::Options::elimination_groups[0] should be // at least 1. class SchurComplementSolver : public BlockSparseMatrixBaseSolver { public: explicit SchurComplementSolver(const LinearSolver::Options& options) : options_(options) { - CHECK_GT(options.num_eliminate_blocks, 0); + CHECK_GT(options.elimination_groups.size(), 1); + CHECK_GT(options.elimination_groups[0], 0); } // LinearSolver methods diff --git a/extern/libmv/third_party/ceres/internal/ceres/schur_eliminator_impl.h b/extern/libmv/third_party/ceres/internal/ceres/schur_eliminator_impl.h index 6120db9b009..339c44bc41c 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/schur_eliminator_impl.h +++ b/extern/libmv/third_party/ceres/internal/ceres/schur_eliminator_impl.h @@ -50,7 +50,6 @@ #include <algorithm> #include <map> -#include <glog/logging.h> #include "Eigen/Dense" #include "ceres/block_random_access_matrix.h" #include "ceres/block_sparse_matrix.h" @@ -60,6 +59,7 @@ #include "ceres/stl_util.h" #include "ceres/internal/eigen.h" #include "ceres/internal/scoped_ptr.h" +#include "glog/logging.h" namespace ceres { namespace internal { diff --git a/extern/libmv/third_party/ceres/internal/ceres/schur_jacobi_preconditioner.cc b/extern/libmv/third_party/ceres/internal/ceres/schur_jacobi_preconditioner.cc new file mode 100644 index 00000000000..33a666ed037 --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/schur_jacobi_preconditioner.cc @@ -0,0 +1,145 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2013 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + +#include "ceres/schur_jacobi_preconditioner.h" + +#include <utility> +#include <vector> +#include "Eigen/Dense" +#include "ceres/block_random_access_sparse_matrix.h" +#include "ceres/block_sparse_matrix.h" +#include "ceres/collections_port.h" +#include "ceres/detect_structure.h" +#include "ceres/internal/scoped_ptr.h" +#include "ceres/linear_solver.h" +#include "ceres/schur_eliminator.h" +#include "glog/logging.h" + +namespace ceres { +namespace internal { + +SchurJacobiPreconditioner::SchurJacobiPreconditioner( + const CompressedRowBlockStructure& bs, + const Preconditioner::Options& options) + : options_(options) { + CHECK_GT(options_.elimination_groups.size(), 1); + CHECK_GT(options_.elimination_groups[0], 0); + const int num_blocks = bs.cols.size() - options_.elimination_groups[0]; + CHECK_GT(num_blocks, 0) + << "Jacobian should have atleast 1 f_block for " + << "SCHUR_JACOBI preconditioner."; + + block_size_.resize(num_blocks); + set<pair<int, int> > block_pairs; + + int num_block_diagonal_entries = 0; + for (int i = 0; i < num_blocks; ++i) { + block_size_[i] = bs.cols[i + options_.elimination_groups[0]].size; + block_pairs.insert(make_pair(i, i)); + num_block_diagonal_entries += block_size_[i] * block_size_[i]; + } + + m_.reset(new BlockRandomAccessSparseMatrix(block_size_, block_pairs)); + InitEliminator(bs); +} + +SchurJacobiPreconditioner::~SchurJacobiPreconditioner() { +} + +// Initialize the SchurEliminator. +void SchurJacobiPreconditioner::InitEliminator( + const CompressedRowBlockStructure& bs) { + LinearSolver::Options eliminator_options; + + eliminator_options.elimination_groups = options_.elimination_groups; + eliminator_options.num_threads = options_.num_threads; + + DetectStructure(bs, options_.elimination_groups[0], + &eliminator_options.row_block_size, + &eliminator_options.e_block_size, + &eliminator_options.f_block_size); + + eliminator_.reset(SchurEliminatorBase::Create(eliminator_options)); + eliminator_->Init(options_.elimination_groups[0], &bs); +} + +// Update the values of the preconditioner matrix and factorize it. +bool SchurJacobiPreconditioner::Update(const BlockSparseMatrixBase& A, + const double* D) { + const int num_rows = m_->num_rows(); + CHECK_GT(num_rows, 0); + + // We need a dummy rhs vector and a dummy b vector since the Schur + // eliminator combines the computation of the reduced camera matrix + // with the computation of the right hand side of that linear + // system. + // + // TODO(sameeragarwal): Perhaps its worth refactoring the + // SchurEliminator::Eliminate function to allow NULL for the rhs. As + // of now it does not seem to be worth the effort. + Vector rhs = Vector::Zero(m_->num_rows()); + Vector b = Vector::Zero(A.num_rows()); + + // Compute a subset of the entries of the Schur complement. + eliminator_->Eliminate(&A, b.data(), D, m_.get(), rhs.data()); + return true; +} + +void SchurJacobiPreconditioner::RightMultiply(const double* x, + double* y) const { + CHECK_NOTNULL(x); + CHECK_NOTNULL(y); + + const double* lhs_values = + down_cast<BlockRandomAccessSparseMatrix*>(m_.get())->matrix()->values(); + + // This loop can be easily multi-threaded with OpenMP if need be. + for (int i = 0; i < block_size_.size(); ++i) { + const int block_size = block_size_[i]; + ConstMatrixRef block(lhs_values, block_size, block_size); + + VectorRef(y, block_size) = + block + .selfadjointView<Eigen::Upper>() + .ldlt() + .solve(ConstVectorRef(x, block_size)); + + x += block_size; + y += block_size; + lhs_values += block_size * block_size; + } +} + +int SchurJacobiPreconditioner::num_rows() const { + return m_->num_rows(); +} + +} // namespace internal +} // namespace ceres diff --git a/extern/libmv/third_party/ceres/internal/ceres/schur_jacobi_preconditioner.h b/extern/libmv/third_party/ceres/internal/ceres/schur_jacobi_preconditioner.h new file mode 100644 index 00000000000..3addd73abd2 --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/schur_jacobi_preconditioner.h @@ -0,0 +1,110 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2013 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// +// Detailed descriptions of these preconditions beyond what is +// documented here can be found in +// +// Bundle Adjustment in the Large +// S. Agarwal, N. Snavely, S. Seitz & R. Szeliski, ECCV 2010 +// http://www.cs.washington.edu/homes/sagarwal/bal.pdf + +#ifndef CERES_INTERNAL_SCHUR_JACOBI_PRECONDITIONER_H_ +#define CERES_INTERNAL_SCHUR_JACOBI_PRECONDITIONER_H_ + +#include <set> +#include <vector> +#include <utility> +#include "ceres/collections_port.h" +#include "ceres/internal/macros.h" +#include "ceres/internal/scoped_ptr.h" +#include "ceres/preconditioner.h" + +namespace ceres { +namespace internal { + +class BlockRandomAccessSparseMatrix; +class BlockSparseMatrixBase; +struct CompressedRowBlockStructure; +class SchurEliminatorBase; + +// This class implements the SCHUR_JACOBI preconditioner for Structure +// from Motion/Bundle Adjustment problems. Full mathematical details +// can be found in +// +// Bundle Adjustment in the Large +// S. Agarwal, N. Snavely, S. Seitz & R. Szeliski, ECCV 2010 +// http://www.cs.washington.edu/homes/sagarwal/bal.pdf +// +// Example usage: +// +// Preconditioner::Options options; +// options.preconditioner_type = SCHUR_JACOBI; +// options.elimination_groups.push_back(num_points); +// options.elimination_groups.push_back(num_cameras); +// SchurJacobiPreconditioner preconditioner( +// *A.block_structure(), options); +// preconditioner.Update(A, NULL); +// preconditioner.RightMultiply(x, y); +// +class SchurJacobiPreconditioner : public Preconditioner { + public: + // Initialize the symbolic structure of the preconditioner. bs is + // the block structure of the linear system to be solved. It is used + // to determine the sparsity structure of the preconditioner matrix. + // + // It has the same structural requirement as other Schur complement + // based solvers. Please see schur_eliminator.h for more details. + SchurJacobiPreconditioner(const CompressedRowBlockStructure& bs, + const Preconditioner::Options& options); + virtual ~SchurJacobiPreconditioner(); + + // Preconditioner interface. + virtual bool Update(const BlockSparseMatrixBase& A, const double* D); + virtual void RightMultiply(const double* x, double* y) const; + virtual int num_rows() const; + + private: + void InitEliminator(const CompressedRowBlockStructure& bs); + + Preconditioner::Options options_; + + // Sizes of the blocks in the schur complement. + vector<int> block_size_; + scoped_ptr<SchurEliminatorBase> eliminator_; + + // Preconditioner matrix. + scoped_ptr<BlockRandomAccessSparseMatrix> m_; + CERES_DISALLOW_COPY_AND_ASSIGN(SchurJacobiPreconditioner); +}; + +} // namespace internal +} // namespace ceres + +#endif // CERES_INTERNAL_SCHUR_JACOBI_PRECONDITIONER_H_ diff --git a/extern/libmv/third_party/ceres/internal/ceres/solver.cc b/extern/libmv/third_party/ceres/internal/ceres/solver.cc index 66ca93283a1..6436d2df2a7 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/solver.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/solver.cc @@ -37,20 +37,41 @@ #include "ceres/program.h" #include "ceres/solver_impl.h" #include "ceres/stringprintf.h" +#include "ceres/wall_time.h" namespace ceres { +namespace { + +void StringifyOrdering(const vector<int>& ordering, string* report) { + if (ordering.size() == 0) { + internal::StringAppendF(report, "AUTOMATIC"); + return; + } + + for (int i = 0; i < ordering.size() - 1; ++i) { + internal::StringAppendF(report, "%d, ", ordering[i]); + } + internal::StringAppendF(report, "%d", ordering.back()); +} + +} // namespace + +Solver::Options::~Options() { + delete linear_solver_ordering; + delete inner_iteration_ordering; +} Solver::~Solver() {} -// TODO(sameeragarwal): Use subsecond timers. void Solver::Solve(const Solver::Options& options, Problem* problem, Solver::Summary* summary) { - time_t start_time_seconds = time(NULL); + double start_time_seconds = internal::WallTimeInSeconds(); internal::ProblemImpl* problem_impl = CHECK_NOTNULL(problem)->problem_impl_.get(); internal::SolverImpl::Solve(options, problem_impl, summary); - summary->total_time_in_seconds = time(NULL) - start_time_seconds; + summary->total_time_in_seconds = + internal::WallTimeInSeconds() - start_time_seconds; } void Solve(const Solver::Options& options, @@ -63,7 +84,8 @@ void Solve(const Solver::Options& options, Solver::Summary::Summary() // Invalid values for most fields, to ensure that we are not // accidentally reporting default values. - : termination_type(DID_NOT_RUN), + : minimizer_type(TRUST_REGION), + termination_type(DID_NOT_RUN), initial_cost(-1.0), final_cost(-1.0), fixed_cost(-1.0), @@ -73,6 +95,9 @@ Solver::Summary::Summary() minimizer_time_in_seconds(-1.0), postprocessor_time_in_seconds(-1.0), total_time_in_seconds(-1.0), + linear_solver_time_in_seconds(-1.0), + residual_evaluation_time_in_seconds(-1.0), + jacobian_evaluation_time_in_seconds(-1.0), num_parameter_blocks(-1), num_parameters(-1), num_residual_blocks(-1), @@ -81,8 +106,6 @@ Solver::Summary::Summary() num_parameters_reduced(-1), num_residual_blocks_reduced(-1), num_residuals_reduced(-1), - num_eliminate_blocks_given(-1), - num_eliminate_blocks_used(-1), num_threads_given(-1), num_threads_used(-1), num_linear_solver_threads_given(-1), @@ -90,9 +113,11 @@ Solver::Summary::Summary() linear_solver_type_given(SPARSE_NORMAL_CHOLESKY), linear_solver_type_used(SPARSE_NORMAL_CHOLESKY), preconditioner_type(IDENTITY), - ordering_type(NATURAL), trust_region_strategy_type(LEVENBERG_MARQUARDT), - sparse_linear_algebra_library(SUITE_SPARSE) { + inner_iterations(false), + sparse_linear_algebra_library(SUITE_SPARSE), + line_search_direction_type(LBFGS), + line_search_type(ARMIJO) { } string Solver::Summary::BriefReport() const { @@ -121,6 +146,8 @@ string Solver::Summary::BriefReport() const { return report; }; +using internal::StringAppendF; + string Solver::Summary::FullReport() const { string report = "\n" @@ -128,124 +155,210 @@ string Solver::Summary::FullReport() const { "-------------------\n"; if (termination_type == DID_NOT_RUN) { - internal::StringAppendF(&report, " Original\n"); - internal::StringAppendF(&report, "Parameter blocks % 10d\n", - num_parameter_blocks); - internal::StringAppendF(&report, "Parameters % 10d\n", - num_parameters); - internal::StringAppendF(&report, "Residual blocks % 10d\n", - num_residual_blocks); - internal::StringAppendF(&report, "Residuals % 10d\n\n", - num_residuals); + StringAppendF(&report, " Original\n"); + StringAppendF(&report, "Parameter blocks % 10d\n", + num_parameter_blocks); + StringAppendF(&report, "Parameters % 10d\n", + num_parameters); + StringAppendF(&report, "Residual blocks % 10d\n", + num_residual_blocks); + StringAppendF(&report, "Residuals % 10d\n\n", + num_residuals); } else { - internal::StringAppendF(&report, "%45s %21s\n", "Original", "Reduced"); - internal::StringAppendF(&report, "Parameter blocks % 25d% 25d\n", - num_parameter_blocks, num_parameter_blocks_reduced); - internal::StringAppendF(&report, "Parameters % 25d% 25d\n", - num_parameters, num_parameters_reduced); - internal::StringAppendF(&report, "Residual blocks % 25d% 25d\n", - num_residual_blocks, num_residual_blocks_reduced); - internal::StringAppendF(&report, "Residual % 25d% 25d\n\n", - num_residuals, num_residuals_reduced); + StringAppendF(&report, "%45s %21s\n", "Original", "Reduced"); + StringAppendF(&report, "Parameter blocks % 25d% 25d\n", + num_parameter_blocks, num_parameter_blocks_reduced); + StringAppendF(&report, "Parameters % 25d% 25d\n", + num_parameters, num_parameters_reduced); + StringAppendF(&report, "Residual blocks % 25d% 25d\n", + num_residual_blocks, num_residual_blocks_reduced); + StringAppendF(&report, "Residual % 25d% 25d\n", + num_residuals, num_residuals_reduced); } - internal::StringAppendF(&report, "%45s %21s\n", "Given", "Used"); - internal::StringAppendF(&report, "Linear solver %25s%25s\n", - LinearSolverTypeToString(linear_solver_type_given), - LinearSolverTypeToString(linear_solver_type_used)); + // TODO(sameeragarwal): Refactor this into separate functions. - if (linear_solver_type_given == CGNR || - linear_solver_type_given == ITERATIVE_SCHUR) { - internal::StringAppendF(&report, "Preconditioner %25s%25s\n", - PreconditionerTypeToString(preconditioner_type), - PreconditionerTypeToString(preconditioner_type)); - } else { - internal::StringAppendF(&report, "Preconditioner %25s%25s\n", - "N/A", "N/A"); - } + if (minimizer_type == TRUST_REGION) { + StringAppendF(&report, "\nMinimizer %19s\n", + "TRUST_REGION"); + if (linear_solver_type_used == SPARSE_NORMAL_CHOLESKY || + linear_solver_type_used == SPARSE_SCHUR || + (linear_solver_type_used == ITERATIVE_SCHUR && + (preconditioner_type == CLUSTER_JACOBI || + preconditioner_type == CLUSTER_TRIDIAGONAL))) { + StringAppendF(&report, "\nSparse Linear Algebra Library %15s\n", + SparseLinearAlgebraLibraryTypeToString( + sparse_linear_algebra_library)); + } - internal::StringAppendF(&report, "Ordering %25s%25s\n", - OrderingTypeToString(ordering_type), - OrderingTypeToString(ordering_type)); + StringAppendF(&report, "Trust Region Strategy %19s", + TrustRegionStrategyTypeToString( + trust_region_strategy_type)); + if (trust_region_strategy_type == DOGLEG) { + if (dogleg_type == TRADITIONAL_DOGLEG) { + StringAppendF(&report, " (TRADITIONAL)"); + } else { + StringAppendF(&report, " (SUBSPACE)"); + } + } + StringAppendF(&report, "\n"); + StringAppendF(&report, "\n"); + + StringAppendF(&report, "%45s %21s\n", "Given", "Used"); + StringAppendF(&report, "Linear solver %25s%25s\n", + LinearSolverTypeToString(linear_solver_type_given), + LinearSolverTypeToString(linear_solver_type_used)); - if (IsSchurType(linear_solver_type_given)) { - if (ordering_type == SCHUR) { - internal::StringAppendF(&report, "num_eliminate_blocks%25s% 25d\n", - "N/A", - num_eliminate_blocks_used); + if (linear_solver_type_given == CGNR || + linear_solver_type_given == ITERATIVE_SCHUR) { + StringAppendF(&report, "Preconditioner %25s%25s\n", + PreconditionerTypeToString(preconditioner_type), + PreconditionerTypeToString(preconditioner_type)); } else { - internal::StringAppendF(&report, "num_eliminate_blocks% 25d% 25d\n", - num_eliminate_blocks_given, - num_eliminate_blocks_used); + StringAppendF(&report, "Preconditioner %25s%25s\n", + "N/A", "N/A"); } - } - internal::StringAppendF(&report, "Threads: % 25d% 25d\n", - num_threads_given, num_threads_used); - internal::StringAppendF(&report, "Linear solver threads % 23d% 25d\n", - num_linear_solver_threads_given, - num_linear_solver_threads_used); - - if (linear_solver_type_used == SPARSE_NORMAL_CHOLESKY || - linear_solver_type_used == SPARSE_SCHUR || - (linear_solver_type_used == ITERATIVE_SCHUR && - (preconditioner_type == SCHUR_JACOBI || - preconditioner_type == CLUSTER_JACOBI || - preconditioner_type == CLUSTER_TRIDIAGONAL))) { - internal::StringAppendF(&report, "\nSparse Linear Algebra Library %15s\n", - SparseLinearAlgebraLibraryTypeToString( - sparse_linear_algebra_library)); - } + StringAppendF(&report, "Threads: % 25d% 25d\n", + num_threads_given, num_threads_used); + StringAppendF(&report, "Linear solver threads % 23d% 25d\n", + num_linear_solver_threads_given, + num_linear_solver_threads_used); + + if (IsSchurType(linear_solver_type_used)) { + string given; + StringifyOrdering(linear_solver_ordering_given, &given); + string used; + StringifyOrdering(linear_solver_ordering_used, &used); + StringAppendF(&report, + "Linear solver ordering %22s %24s\n", + given.c_str(), + used.c_str()); + } + + if (inner_iterations) { + string given; + StringifyOrdering(inner_iteration_ordering_given, &given); + string used; + StringifyOrdering(inner_iteration_ordering_used, &used); + StringAppendF(&report, + "Inner iteration ordering %20s %24s\n", + given.c_str(), + used.c_str()); + } + + if (termination_type == DID_NOT_RUN) { + CHECK(!error.empty()) + << "Solver terminated with DID_NOT_RUN but the solver did not " + << "return a reason. This is a Ceres error. Please report this " + << "to the Ceres team"; + StringAppendF(&report, "Termination: %20s\n", + "DID_NOT_RUN"); + StringAppendF(&report, "Reason: %s\n", error.c_str()); + return report; + } + + StringAppendF(&report, "\nCost:\n"); + StringAppendF(&report, "Initial % 30e\n", initial_cost); + if (termination_type != NUMERICAL_FAILURE && + termination_type != USER_ABORT) { + StringAppendF(&report, "Final % 30e\n", final_cost); + StringAppendF(&report, "Change % 30e\n", + initial_cost - final_cost); + } + + StringAppendF(&report, "\nNumber of iterations:\n"); + StringAppendF(&report, "Successful % 20d\n", + num_successful_steps); + StringAppendF(&report, "Unsuccessful % 20d\n", + num_unsuccessful_steps); + StringAppendF(&report, "Total % 20d\n", + num_successful_steps + num_unsuccessful_steps); + + StringAppendF(&report, "\nTime (in seconds):\n"); + StringAppendF(&report, "Preprocessor %25.3f\n", + preprocessor_time_in_seconds); + StringAppendF(&report, "\n Residual Evaluations %22.3f\n", + residual_evaluation_time_in_seconds); + StringAppendF(&report, " Jacobian Evaluations %22.3f\n", + jacobian_evaluation_time_in_seconds); + StringAppendF(&report, " Linear Solver %23.3f\n", + linear_solver_time_in_seconds); + StringAppendF(&report, "Minimizer %25.3f\n\n", + minimizer_time_in_seconds); + + StringAppendF(&report, "Postprocessor %24.3f\n", + postprocessor_time_in_seconds); - internal::StringAppendF(&report, "Trust Region Strategy %19s", - TrustRegionStrategyTypeToString( - trust_region_strategy_type)); - if (trust_region_strategy_type == DOGLEG) { - if (dogleg_type == TRADITIONAL_DOGLEG) { - internal::StringAppendF(&report, " (TRADITIONAL)"); + StringAppendF(&report, "Total %25.3f\n\n", + total_time_in_seconds); + + StringAppendF(&report, "Termination: %25s\n", + SolverTerminationTypeToString(termination_type)); + } else { + // LINE_SEARCH + StringAppendF(&report, "\nMinimizer %19s\n", "LINE_SEARCH"); + if (line_search_direction_type == LBFGS) { + StringAppendF(&report, "Line search direction %19s(%d)\n", + LineSearchDirectionTypeToString(line_search_direction_type), + max_lbfgs_rank); } else { - internal::StringAppendF(&report, " (SUBSPACE)"); + StringAppendF(&report, "Line search direction %19s\n", + LineSearchDirectionTypeToString( + line_search_direction_type)); } - } - internal::StringAppendF(&report, "\n"); + StringAppendF(&report, "Line search type %19s\n", + LineSearchTypeToString(line_search_type)); + StringAppendF(&report, "\n"); - if (termination_type == DID_NOT_RUN) { - CHECK(!error.empty()) - << "Solver terminated with DID_NOT_RUN but the solver did not " - << "return a reason. This is a Ceres error. Please report this " - << "to the Ceres team"; - internal::StringAppendF(&report, "Termination: %20s\n", - "DID_NOT_RUN"); - internal::StringAppendF(&report, "Reason: %s\n", error.c_str()); - return report; - } + StringAppendF(&report, "%45s %21s\n", "Given", "Used"); + StringAppendF(&report, "Threads: % 25d% 25d\n", + num_threads_given, num_threads_used); - internal::StringAppendF(&report, "\nCost:\n"); - internal::StringAppendF(&report, "Initial % 30e\n", initial_cost); - if (termination_type != NUMERICAL_FAILURE && termination_type != USER_ABORT) { - internal::StringAppendF(&report, "Final % 30e\n", final_cost); - internal::StringAppendF(&report, "Change % 30e\n", - initial_cost - final_cost); + if (termination_type == DID_NOT_RUN) { + CHECK(!error.empty()) + << "Solver terminated with DID_NOT_RUN but the solver did not " + << "return a reason. This is a Ceres error. Please report this " + << "to the Ceres team"; + StringAppendF(&report, "Termination: %20s\n", + "DID_NOT_RUN"); + StringAppendF(&report, "Reason: %s\n", error.c_str()); + return report; + } + + StringAppendF(&report, "\nCost:\n"); + StringAppendF(&report, "Initial % 30e\n", initial_cost); + if (termination_type != NUMERICAL_FAILURE && + termination_type != USER_ABORT) { + StringAppendF(&report, "Final % 30e\n", final_cost); + StringAppendF(&report, "Change % 30e\n", + initial_cost - final_cost); + } + + StringAppendF(&report, "\nNumber of iterations: % 20ld\n", + iterations.size() - 1); + + StringAppendF(&report, "\nTime (in seconds):\n"); + StringAppendF(&report, "Preprocessor %25.3f\n", + preprocessor_time_in_seconds); + StringAppendF(&report, "\n Residual Evaluations %22.3f\n", + residual_evaluation_time_in_seconds); + StringAppendF(&report, " Jacobian Evaluations %22.3f\n", + jacobian_evaluation_time_in_seconds); + StringAppendF(&report, "Minimizer %25.3f\n\n", + minimizer_time_in_seconds); + + StringAppendF(&report, "Postprocessor %24.3f\n", + postprocessor_time_in_seconds); + + StringAppendF(&report, "Total %25.3f\n\n", + total_time_in_seconds); + + StringAppendF(&report, "Termination: %25s\n", + SolverTerminationTypeToString(termination_type)); } - internal::StringAppendF(&report, "\nNumber of iterations:\n"); - internal::StringAppendF(&report, "Successful % 20d\n", - num_successful_steps); - internal::StringAppendF(&report, "Unsuccessful % 20d\n", - num_unsuccessful_steps); - internal::StringAppendF(&report, "Total % 20d\n", - num_successful_steps + num_unsuccessful_steps); - internal::StringAppendF(&report, "\nTime (in seconds):\n"); - internal::StringAppendF(&report, "Preprocessor % 25e\n", - preprocessor_time_in_seconds); - internal::StringAppendF(&report, "Minimizer % 25e\n", - minimizer_time_in_seconds); - internal::StringAppendF(&report, "Total % 25e\n", - total_time_in_seconds); - - internal::StringAppendF(&report, "Termination: %25s\n", - SolverTerminationTypeToString(termination_type)); return report; }; diff --git a/extern/libmv/third_party/ceres/internal/ceres/solver_impl.cc b/extern/libmv/third_party/ceres/internal/ceres/solver_impl.cc index 8ef5b98e35f..5bcfdc6312f 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/solver_impl.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/solver_impl.cc @@ -33,21 +33,25 @@ #include <cstdio> #include <iostream> // NOLINT #include <numeric> +#include "ceres/coordinate_descent_minimizer.h" #include "ceres/evaluator.h" #include "ceres/gradient_checking_cost_function.h" #include "ceres/iteration_callback.h" #include "ceres/levenberg_marquardt_strategy.h" #include "ceres/linear_solver.h" +#include "ceres/line_search_minimizer.h" #include "ceres/map_util.h" #include "ceres/minimizer.h" +#include "ceres/ordered_groups.h" #include "ceres/parameter_block.h" +#include "ceres/parameter_block_ordering.h" #include "ceres/problem.h" #include "ceres/problem_impl.h" #include "ceres/program.h" #include "ceres/residual_block.h" -#include "ceres/schur_ordering.h" #include "ceres/stringprintf.h" #include "ceres/trust_region_minimizer.h" +#include "ceres/wall_time.h" namespace ceres { namespace internal { @@ -73,14 +77,24 @@ class StateUpdatingCallback : public IterationCallback { double* parameters_; }; +void SetSummaryFinalCost(Solver::Summary* summary) { + summary->final_cost = summary->initial_cost; + // We need the loop here, instead of just looking at the last + // iteration because the minimizer maybe making non-monotonic steps. + for (int i = 0; i < summary->iterations.size(); ++i) { + const IterationSummary& iteration_summary = summary->iterations[i]; + summary->final_cost = min(iteration_summary.cost, summary->final_cost); + } +} + // Callback for logging the state of the minimizer to STDERR or STDOUT // depending on the user's preferences and logging level. -class LoggingCallback : public IterationCallback { +class TrustRegionLoggingCallback : public IterationCallback { public: - explicit LoggingCallback(bool log_to_stdout) + explicit TrustRegionLoggingCallback(bool log_to_stdout) : log_to_stdout_(log_to_stdout) {} - ~LoggingCallback() {} + ~TrustRegionLoggingCallback() {} CallbackReturnType operator()(const IterationSummary& summary) { const char* kReportRowFormat = @@ -109,6 +123,42 @@ class LoggingCallback : public IterationCallback { const bool log_to_stdout_; }; +// Callback for logging the state of the minimizer to STDERR or STDOUT +// depending on the user's preferences and logging level. +class LineSearchLoggingCallback : public IterationCallback { + public: + explicit LineSearchLoggingCallback(bool log_to_stdout) + : log_to_stdout_(log_to_stdout) {} + + ~LineSearchLoggingCallback() {} + + CallbackReturnType operator()(const IterationSummary& summary) { + const char* kReportRowFormat = + "% 4d: f:% 8e d:% 3.2e g:% 3.2e h:% 3.2e " + "s:% 3.2e e:% 3d it:% 3.2e tt:% 3.2e"; + string output = StringPrintf(kReportRowFormat, + summary.iteration, + summary.cost, + summary.cost_change, + summary.gradient_max_norm, + summary.step_norm, + summary.step_size, + summary.line_search_function_evaluations, + summary.iteration_time_in_seconds, + summary.cumulative_time_in_seconds); + if (log_to_stdout_) { + cout << output << endl; + } else { + VLOG(1) << output; + } + return SOLVER_CONTINUE; + } + + private: + const bool log_to_stdout_; +}; + + // Basic callback to record the execution of the solver to a file for // offline analysis. class FileLoggingCallback : public IterationCallback { @@ -137,14 +187,34 @@ class FileLoggingCallback : public IterationCallback { FILE* fptr_; }; +// Iterate over each of the groups in order of their priority and fill +// summary with their sizes. +void SummarizeOrdering(ParameterBlockOrdering* ordering, + vector<int>* summary) { + CHECK_NOTNULL(summary)->clear(); + if (ordering == NULL) { + return; + } + + const map<int, set<double*> >& group_to_elements = + ordering->group_to_elements(); + for (map<int, set<double*> >::const_iterator it = group_to_elements.begin(); + it != group_to_elements.end(); + ++it) { + summary->push_back(it->second.size()); + } +} + } // namespace -void SolverImpl::Minimize(const Solver::Options& options, - Program* program, - Evaluator* evaluator, - LinearSolver* linear_solver, - double* parameters, - Solver::Summary* summary) { +void SolverImpl::TrustRegionMinimize( + const Solver::Options& options, + Program* program, + CoordinateDescentMinimizer* inner_iteration_minimizer, + Evaluator* evaluator, + LinearSolver* linear_solver, + double* parameters, + Solver::Summary* summary) { Minimizer::Options minimizer_options(options); // TODO(sameeragarwal): Add support for logging the configuration @@ -156,7 +226,8 @@ void SolverImpl::Minimize(const Solver::Options& options, file_logging_callback.get()); } - LoggingCallback logging_callback(options.minimizer_progress_to_stdout); + TrustRegionLoggingCallback logging_callback( + options.minimizer_progress_to_stdout); if (options.logging_type != SILENT) { minimizer_options.callbacks.insert(minimizer_options.callbacks.begin(), &logging_callback); @@ -172,7 +243,9 @@ void SolverImpl::Minimize(const Solver::Options& options, minimizer_options.evaluator = evaluator; scoped_ptr<SparseMatrix> jacobian(evaluator->CreateJacobian()); + minimizer_options.jacobian = jacobian.get(); + minimizer_options.inner_iteration_minimizer = inner_iteration_minimizer; TrustRegionStrategy::Options trust_region_strategy_options; trust_region_strategy_options.linear_solver = linear_solver; @@ -189,78 +262,140 @@ void SolverImpl::Minimize(const Solver::Options& options, minimizer_options.trust_region_strategy = strategy.get(); TrustRegionMinimizer minimizer; - time_t minimizer_start_time = time(NULL); + double minimizer_start_time = WallTimeInSeconds(); minimizer.Minimize(minimizer_options, parameters, summary); - summary->minimizer_time_in_seconds = time(NULL) - minimizer_start_time; + summary->minimizer_time_in_seconds = + WallTimeInSeconds() - minimizer_start_time; } -void SolverImpl::Solve(const Solver::Options& original_options, - ProblemImpl* original_problem_impl, + +void SolverImpl::LineSearchMinimize( + const Solver::Options& options, + Program* program, + Evaluator* evaluator, + double* parameters, + Solver::Summary* summary) { + Minimizer::Options minimizer_options(options); + + // TODO(sameeragarwal): Add support for logging the configuration + // and more detailed stats. + scoped_ptr<IterationCallback> file_logging_callback; + if (!options.solver_log.empty()) { + file_logging_callback.reset(new FileLoggingCallback(options.solver_log)); + minimizer_options.callbacks.insert(minimizer_options.callbacks.begin(), + file_logging_callback.get()); + } + + LineSearchLoggingCallback logging_callback( + options.minimizer_progress_to_stdout); + if (options.logging_type != SILENT) { + minimizer_options.callbacks.insert(minimizer_options.callbacks.begin(), + &logging_callback); + } + + StateUpdatingCallback updating_callback(program, parameters); + if (options.update_state_every_iteration) { + // This must get pushed to the front of the callbacks so that it is run + // before any of the user callbacks. + minimizer_options.callbacks.insert(minimizer_options.callbacks.begin(), + &updating_callback); + } + + minimizer_options.evaluator = evaluator; + + LineSearchMinimizer minimizer; + double minimizer_start_time = WallTimeInSeconds(); + minimizer.Minimize(minimizer_options, parameters, summary); + summary->minimizer_time_in_seconds = + WallTimeInSeconds() - minimizer_start_time; +} + +void SolverImpl::Solve(const Solver::Options& options, + ProblemImpl* problem_impl, Solver::Summary* summary) { - time_t solver_start_time = time(NULL); - Solver::Options options(original_options); + if (options.minimizer_type == TRUST_REGION) { + TrustRegionSolve(options, problem_impl, summary); + } else { + LineSearchSolve(options, problem_impl, summary); + } +} + +void SolverImpl::TrustRegionSolve(const Solver::Options& original_options, + ProblemImpl* original_problem_impl, + Solver::Summary* summary) { + EventLogger event_logger("TrustRegionSolve"); + double solver_start_time = WallTimeInSeconds(); + Program* original_program = original_problem_impl->mutable_program(); ProblemImpl* problem_impl = original_problem_impl; + // Reset the summary object to its default values. *CHECK_NOTNULL(summary) = Solver::Summary(); + summary->minimizer_type = TRUST_REGION; + summary->num_parameter_blocks = problem_impl->NumParameterBlocks(); + summary->num_parameters = problem_impl->NumParameters(); + summary->num_residual_blocks = problem_impl->NumResidualBlocks(); + summary->num_residuals = problem_impl->NumResiduals(); + + // Empty programs are usually a user error. + if (summary->num_parameter_blocks == 0) { + summary->error = "Problem contains no parameter blocks."; + LOG(ERROR) << summary->error; + return; + } + + if (summary->num_residual_blocks == 0) { + summary->error = "Problem contains no residual blocks."; + LOG(ERROR) << summary->error; + return; + } + + SummarizeOrdering(original_options.linear_solver_ordering, + &(summary->linear_solver_ordering_given)); + + SummarizeOrdering(original_options.inner_iteration_ordering, + &(summary->inner_iteration_ordering_given)); + + Solver::Options options(original_options); + options.linear_solver_ordering = NULL; + options.inner_iteration_ordering = NULL; #ifndef CERES_USE_OPENMP if (options.num_threads > 1) { LOG(WARNING) << "OpenMP support is not compiled into this binary; " - << "only options.num_threads=1 is supported. Switching" + << "only options.num_threads=1 is supported. Switching " << "to single threaded mode."; options.num_threads = 1; } if (options.num_linear_solver_threads > 1) { LOG(WARNING) << "OpenMP support is not compiled into this binary; " - << "only options.num_linear_solver_threads=1 is supported. Switching" + << "only options.num_linear_solver_threads=1 is supported. Switching " << "to single threaded mode."; options.num_linear_solver_threads = 1; } #endif - summary->linear_solver_type_given = options.linear_solver_type; - summary->num_eliminate_blocks_given = original_options.num_eliminate_blocks; summary->num_threads_given = original_options.num_threads; - summary->num_linear_solver_threads_given = - original_options.num_linear_solver_threads; - summary->ordering_type = original_options.ordering_type; + summary->num_threads_used = options.num_threads; - summary->num_parameter_blocks = problem_impl->NumParameterBlocks(); - summary->num_parameters = problem_impl->NumParameters(); - summary->num_residual_blocks = problem_impl->NumResidualBlocks(); - summary->num_residuals = problem_impl->NumResiduals(); + if (options.lsqp_iterations_to_dump.size() > 0) { + LOG(WARNING) << "Dumping linear least squares problems to disk is" + " currently broken. Ignoring Solver::Options::lsqp_iterations_to_dump"; + } - summary->num_threads_used = options.num_threads; - summary->sparse_linear_algebra_library = - options.sparse_linear_algebra_library; - summary->trust_region_strategy_type = options.trust_region_strategy_type; - summary->dogleg_type = options.dogleg_type; + event_logger.AddEvent("Init"); - // Evaluate the initial cost, residual vector and the jacobian - // matrix if requested by the user. The initial cost needs to be - // computed on the original unpreprocessed problem, as it is used to - // determine the value of the "fixed" part of the objective function - // after the problem has undergone reduction. - Evaluator::Evaluate( - original_program, - options.num_threads, - &(summary->initial_cost), - options.return_initial_residuals ? &summary->initial_residuals : NULL, - options.return_initial_gradient ? &summary->initial_gradient : NULL, - options.return_initial_jacobian ? &summary->initial_jacobian : NULL); - original_program->SetParameterBlockStatePtrsToUserStatePtrs(); + original_program->SetParameterBlockStatePtrsToUserStatePtrs(); + event_logger.AddEvent("SetParameterBlockPtrs"); // If the user requests gradient checking, construct a new // ProblemImpl by wrapping the CostFunctions of problem_impl inside // GradientCheckingCostFunction and replacing problem_impl with // gradient_checking_problem_impl. scoped_ptr<ProblemImpl> gradient_checking_problem_impl; - // Save the original problem impl so we don't use the gradient - // checking one when computing the residuals. if (options.check_gradients) { VLOG(1) << "Checking Gradients"; gradient_checking_problem_impl.reset( @@ -269,45 +404,344 @@ void SolverImpl::Solve(const Solver::Options& original_options, options.numeric_derivative_relative_step_size, options.gradient_check_relative_precision)); - // From here on, problem_impl will point to the GradientChecking version. + // From here on, problem_impl will point to the gradient checking + // version. problem_impl = gradient_checking_problem_impl.get(); } + if (original_options.linear_solver_ordering != NULL) { + if (!IsOrderingValid(original_options, problem_impl, &summary->error)) { + LOG(ERROR) << summary->error; + return; + } + event_logger.AddEvent("CheckOrdering"); + options.linear_solver_ordering = + new ParameterBlockOrdering(*original_options.linear_solver_ordering); + event_logger.AddEvent("CopyOrdering"); + } else { + options.linear_solver_ordering = new ParameterBlockOrdering; + const ProblemImpl::ParameterMap& parameter_map = + problem_impl->parameter_map(); + for (ProblemImpl::ParameterMap::const_iterator it = parameter_map.begin(); + it != parameter_map.end(); + ++it) { + options.linear_solver_ordering->AddElementToGroup(it->first, 0); + } + event_logger.AddEvent("ConstructOrdering"); + } + // Create the three objects needed to minimize: the transformed program, the // evaluator, and the linear solver. - scoped_ptr<Program> reduced_program(CreateReducedProgram(&options, problem_impl, &summary->fixed_cost, &summary->error)); + + event_logger.AddEvent("CreateReducedProgram"); if (reduced_program == NULL) { return; } + SummarizeOrdering(options.linear_solver_ordering, + &(summary->linear_solver_ordering_used)); + summary->num_parameter_blocks_reduced = reduced_program->NumParameterBlocks(); summary->num_parameters_reduced = reduced_program->NumParameters(); summary->num_residual_blocks_reduced = reduced_program->NumResidualBlocks(); summary->num_residuals_reduced = reduced_program->NumResiduals(); + if (summary->num_parameter_blocks_reduced == 0) { + summary->preprocessor_time_in_seconds = + WallTimeInSeconds() - solver_start_time; + + double post_process_start_time = WallTimeInSeconds(); + LOG(INFO) << "Terminating: FUNCTION_TOLERANCE reached. " + << "No non-constant parameter blocks found."; + + summary->initial_cost = summary->fixed_cost; + summary->final_cost = summary->fixed_cost; + + // FUNCTION_TOLERANCE is the right convergence here, as we know + // that the objective function is constant and cannot be changed + // any further. + summary->termination_type = FUNCTION_TOLERANCE; + + // Ensure the program state is set to the user parameters on the way out. + original_program->SetParameterBlockStatePtrsToUserStatePtrs(); + + summary->postprocessor_time_in_seconds = + WallTimeInSeconds() - post_process_start_time; + return; + } + scoped_ptr<LinearSolver> linear_solver(CreateLinearSolver(&options, &summary->error)); + event_logger.AddEvent("CreateLinearSolver"); + if (linear_solver == NULL) { + return; + } + + summary->linear_solver_type_given = original_options.linear_solver_type; summary->linear_solver_type_used = options.linear_solver_type; + summary->preconditioner_type = options.preconditioner_type; - summary->num_eliminate_blocks_used = options.num_eliminate_blocks; + + summary->num_linear_solver_threads_given = + original_options.num_linear_solver_threads; summary->num_linear_solver_threads_used = options.num_linear_solver_threads; - if (linear_solver == NULL) { + summary->sparse_linear_algebra_library = + options.sparse_linear_algebra_library; + + summary->trust_region_strategy_type = options.trust_region_strategy_type; + summary->dogleg_type = options.dogleg_type; + + // Only Schur types require the lexicographic reordering. + if (IsSchurType(options.linear_solver_type)) { + const int num_eliminate_blocks = + options.linear_solver_ordering + ->group_to_elements().begin() + ->second.size(); + if (!LexicographicallyOrderResidualBlocks(num_eliminate_blocks, + reduced_program.get(), + &summary->error)) { + return; + } + } + + scoped_ptr<Evaluator> evaluator(CreateEvaluator(options, + problem_impl->parameter_map(), + reduced_program.get(), + &summary->error)); + + event_logger.AddEvent("CreateEvaluator"); + + if (evaluator == NULL) { + return; + } + + scoped_ptr<CoordinateDescentMinimizer> inner_iteration_minimizer; + if (options.use_inner_iterations) { + if (reduced_program->parameter_blocks().size() < 2) { + LOG(WARNING) << "Reduced problem only contains one parameter block." + << "Disabling inner iterations."; + } else { + inner_iteration_minimizer.reset( + CreateInnerIterationMinimizer(original_options, + *reduced_program, + problem_impl->parameter_map(), + summary)); + if (inner_iteration_minimizer == NULL) { + LOG(ERROR) << summary->error; + return; + } + } + } + + event_logger.AddEvent("CreateIIM"); + + // The optimizer works on contiguous parameter vectors; allocate some. + Vector parameters(reduced_program->NumParameters()); + + // Collect the discontiguous parameters into a contiguous state vector. + reduced_program->ParameterBlocksToStateVector(parameters.data()); + + Vector original_parameters = parameters; + + double minimizer_start_time = WallTimeInSeconds(); + summary->preprocessor_time_in_seconds = + minimizer_start_time - solver_start_time; + + // Run the optimization. + TrustRegionMinimize(options, + reduced_program.get(), + inner_iteration_minimizer.get(), + evaluator.get(), + linear_solver.get(), + parameters.data(), + summary); + event_logger.AddEvent("Minimize"); + + SetSummaryFinalCost(summary); + + // If the user aborted mid-optimization or the optimization + // terminated because of a numerical failure, then return without + // updating user state. + if (summary->termination_type == USER_ABORT || + summary->termination_type == NUMERICAL_FAILURE) { return; } - if (!MaybeReorderResidualBlocks(options, - reduced_program.get(), - &summary->error)) { + double post_process_start_time = WallTimeInSeconds(); + + // Push the contiguous optimized parameters back to the user's + // parameters. + reduced_program->StateVectorToParameterBlocks(parameters.data()); + reduced_program->CopyParameterBlockStateToUserState(); + + // Ensure the program state is set to the user parameters on the way + // out. + original_program->SetParameterBlockStatePtrsToUserStatePtrs(); + + const map<string, double>& linear_solver_time_statistics = + linear_solver->TimeStatistics(); + summary->linear_solver_time_in_seconds = + FindWithDefault(linear_solver_time_statistics, + "LinearSolver::Solve", + 0.0); + + const map<string, double>& evaluator_time_statistics = + evaluator->TimeStatistics(); + + summary->residual_evaluation_time_in_seconds = + FindWithDefault(evaluator_time_statistics, "Evaluator::Residual", 0.0); + summary->jacobian_evaluation_time_in_seconds = + FindWithDefault(evaluator_time_statistics, "Evaluator::Jacobian", 0.0); + + // Stick a fork in it, we're done. + summary->postprocessor_time_in_seconds = + WallTimeInSeconds() - post_process_start_time; + event_logger.AddEvent("PostProcess"); +} + +void SolverImpl::LineSearchSolve(const Solver::Options& original_options, + ProblemImpl* original_problem_impl, + Solver::Summary* summary) { + double solver_start_time = WallTimeInSeconds(); + + Program* original_program = original_problem_impl->mutable_program(); + ProblemImpl* problem_impl = original_problem_impl; + + // Reset the summary object to its default values. + *CHECK_NOTNULL(summary) = Solver::Summary(); + + summary->minimizer_type = LINE_SEARCH; + summary->line_search_direction_type = + original_options.line_search_direction_type; + summary->max_lbfgs_rank = original_options.max_lbfgs_rank; + summary->line_search_type = original_options.line_search_type; + summary->num_parameter_blocks = problem_impl->NumParameterBlocks(); + summary->num_parameters = problem_impl->NumParameters(); + summary->num_residual_blocks = problem_impl->NumResidualBlocks(); + summary->num_residuals = problem_impl->NumResiduals(); + + // Empty programs are usually a user error. + if (summary->num_parameter_blocks == 0) { + summary->error = "Problem contains no parameter blocks."; + LOG(ERROR) << summary->error; return; } - scoped_ptr<Evaluator> evaluator( - CreateEvaluator(options, reduced_program.get(), &summary->error)); + if (summary->num_residual_blocks == 0) { + summary->error = "Problem contains no residual blocks."; + LOG(ERROR) << summary->error; + return; + } + + Solver::Options options(original_options); + + // This ensures that we get a Block Jacobian Evaluator along with + // none of the Schur nonsense. This file will have to be extensively + // refactored to deal with the various bits of cleanups related to + // line search. + options.linear_solver_type = CGNR; + + options.linear_solver_ordering = NULL; + options.inner_iteration_ordering = NULL; + +#ifndef CERES_USE_OPENMP + if (options.num_threads > 1) { + LOG(WARNING) + << "OpenMP support is not compiled into this binary; " + << "only options.num_threads=1 is supported. Switching " + << "to single threaded mode."; + options.num_threads = 1; + } +#endif + + summary->num_threads_given = original_options.num_threads; + summary->num_threads_used = options.num_threads; + + if (original_options.linear_solver_ordering != NULL) { + if (!IsOrderingValid(original_options, problem_impl, &summary->error)) { + LOG(ERROR) << summary->error; + return; + } + options.linear_solver_ordering = + new ParameterBlockOrdering(*original_options.linear_solver_ordering); + } else { + options.linear_solver_ordering = new ParameterBlockOrdering; + const ProblemImpl::ParameterMap& parameter_map = + problem_impl->parameter_map(); + for (ProblemImpl::ParameterMap::const_iterator it = parameter_map.begin(); + it != parameter_map.end(); + ++it) { + options.linear_solver_ordering->AddElementToGroup(it->first, 0); + } + } + + original_program->SetParameterBlockStatePtrsToUserStatePtrs(); + + // If the user requests gradient checking, construct a new + // ProblemImpl by wrapping the CostFunctions of problem_impl inside + // GradientCheckingCostFunction and replacing problem_impl with + // gradient_checking_problem_impl. + scoped_ptr<ProblemImpl> gradient_checking_problem_impl; + if (options.check_gradients) { + VLOG(1) << "Checking Gradients"; + gradient_checking_problem_impl.reset( + CreateGradientCheckingProblemImpl( + problem_impl, + options.numeric_derivative_relative_step_size, + options.gradient_check_relative_precision)); + + // From here on, problem_impl will point to the gradient checking + // version. + problem_impl = gradient_checking_problem_impl.get(); + } + + // Create the three objects needed to minimize: the transformed program, the + // evaluator, and the linear solver. + scoped_ptr<Program> reduced_program(CreateReducedProgram(&options, + problem_impl, + &summary->fixed_cost, + &summary->error)); + if (reduced_program == NULL) { + return; + } + + summary->num_parameter_blocks_reduced = reduced_program->NumParameterBlocks(); + summary->num_parameters_reduced = reduced_program->NumParameters(); + summary->num_residual_blocks_reduced = reduced_program->NumResidualBlocks(); + summary->num_residuals_reduced = reduced_program->NumResiduals(); + + if (summary->num_parameter_blocks_reduced == 0) { + summary->preprocessor_time_in_seconds = + WallTimeInSeconds() - solver_start_time; + + LOG(INFO) << "Terminating: FUNCTION_TOLERANCE reached. " + << "No non-constant parameter blocks found."; + + // FUNCTION_TOLERANCE is the right convergence here, as we know + // that the objective function is constant and cannot be changed + // any further. + summary->termination_type = FUNCTION_TOLERANCE; + + const double post_process_start_time = WallTimeInSeconds(); + + SetSummaryFinalCost(summary); + + // Ensure the program state is set to the user parameters on the way out. + original_program->SetParameterBlockStatePtrsToUserStatePtrs(); + summary->postprocessor_time_in_seconds = + WallTimeInSeconds() - post_process_start_time; + return; + } + + scoped_ptr<Evaluator> evaluator(CreateEvaluator(options, + problem_impl->parameter_map(), + reduced_program.get(), + &summary->error)); if (evaluator == NULL) { return; } @@ -318,17 +752,18 @@ void SolverImpl::Solve(const Solver::Options& original_options, // Collect the discontiguous parameters into a contiguous state vector. reduced_program->ParameterBlocksToStateVector(parameters.data()); - time_t minimizer_start_time = time(NULL); + Vector original_parameters = parameters; + + const double minimizer_start_time = WallTimeInSeconds(); summary->preprocessor_time_in_seconds = minimizer_start_time - solver_start_time; // Run the optimization. - Minimize(options, - reduced_program.get(), - evaluator.get(), - linear_solver.get(), - parameters.data(), - summary); + LineSearchMinimize(options, + reduced_program.get(), + evaluator.get(), + parameters.data(), + summary); // If the user aborted mid-optimization or the optimization // terminated because of a numerical failure, then return without @@ -338,35 +773,100 @@ void SolverImpl::Solve(const Solver::Options& original_options, return; } - time_t post_process_start_time = time(NULL); + const double post_process_start_time = WallTimeInSeconds(); // Push the contiguous optimized parameters back to the user's parameters. reduced_program->StateVectorToParameterBlocks(parameters.data()); reduced_program->CopyParameterBlockStateToUserState(); - // Evaluate the final cost, residual vector and the jacobian - // matrix if requested by the user. - Evaluator::Evaluate( - original_program, - options.num_threads, - &summary->final_cost, - options.return_final_residuals ? &summary->final_residuals : NULL, - options.return_final_gradient ? &summary->final_gradient : NULL, - options.return_final_jacobian ? &summary->final_jacobian : NULL); + SetSummaryFinalCost(summary); // Ensure the program state is set to the user parameters on the way out. original_program->SetParameterBlockStatePtrsToUserStatePtrs(); + + const map<string, double>& evaluator_time_statistics = + evaluator->TimeStatistics(); + + summary->residual_evaluation_time_in_seconds = + FindWithDefault(evaluator_time_statistics, "Evaluator::Residual", 0.0); + summary->jacobian_evaluation_time_in_seconds = + FindWithDefault(evaluator_time_statistics, "Evaluator::Jacobian", 0.0); + // Stick a fork in it, we're done. - summary->postprocessor_time_in_seconds = time(NULL) - post_process_start_time; + summary->postprocessor_time_in_seconds = + WallTimeInSeconds() - post_process_start_time; +} + + +bool SolverImpl::IsOrderingValid(const Solver::Options& options, + const ProblemImpl* problem_impl, + string* error) { + if (options.linear_solver_ordering->NumElements() != + problem_impl->NumParameterBlocks()) { + *error = "Number of parameter blocks in user supplied ordering " + "does not match the number of parameter blocks in the problem"; + return false; + } + + const Program& program = problem_impl->program(); + const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks(); + for (vector<ParameterBlock*>::const_iterator it = parameter_blocks.begin(); + it != parameter_blocks.end(); + ++it) { + if (!options.linear_solver_ordering + ->IsMember(const_cast<double*>((*it)->user_state()))) { + *error = "Problem contains a parameter block that is not in " + "the user specified ordering."; + return false; + } + } + + if (IsSchurType(options.linear_solver_type) && + options.linear_solver_ordering->NumGroups() > 1) { + const vector<ResidualBlock*>& residual_blocks = program.residual_blocks(); + const set<double*>& e_blocks = + options.linear_solver_ordering->group_to_elements().begin()->second; + if (!IsParameterBlockSetIndependent(e_blocks, residual_blocks)) { + *error = "The user requested the use of a Schur type solver. " + "But the first elimination group in the ordering is not an " + "independent set."; + return false; + } + } + return true; +} + +bool SolverImpl::IsParameterBlockSetIndependent( + const set<double*>& parameter_block_ptrs, + const vector<ResidualBlock*>& residual_blocks) { + // Loop over each residual block and ensure that no two parameter + // blocks in the same residual block are part of + // parameter_block_ptrs as that would violate the assumption that it + // is an independent set in the Hessian matrix. + for (vector<ResidualBlock*>::const_iterator it = residual_blocks.begin(); + it != residual_blocks.end(); + ++it) { + ParameterBlock* const* parameter_blocks = (*it)->parameter_blocks(); + const int num_parameter_blocks = (*it)->NumParameterBlocks(); + int count = 0; + for (int i = 0; i < num_parameter_blocks; ++i) { + count += parameter_block_ptrs.count( + parameter_blocks[i]->mutable_user_state()); + } + if (count > 1) { + return false; + } + } + return true; } + // Strips varying parameters and residuals, maintaining order, and updating // num_eliminate_blocks. bool SolverImpl::RemoveFixedBlocksFromProgram(Program* program, - int* num_eliminate_blocks, + ParameterBlockOrdering* ordering, double* fixed_cost, string* error) { - int original_num_eliminate_blocks = *num_eliminate_blocks; vector<ParameterBlock*>* parameter_blocks = program->mutable_parameter_blocks(); @@ -423,7 +923,7 @@ bool SolverImpl::RemoveFixedBlocksFromProgram(Program* program, } // Filter out unused or fixed parameter blocks, and update - // num_eliminate_blocks as necessary. + // the ordering. { vector<ParameterBlock*>* parameter_blocks = program->mutable_parameter_blocks(); @@ -432,8 +932,8 @@ bool SolverImpl::RemoveFixedBlocksFromProgram(Program* program, ParameterBlock* parameter_block = (*parameter_blocks)[i]; if (parameter_block->index() == 1) { (*parameter_blocks)[j++] = parameter_block; - } else if (i < original_num_eliminate_blocks) { - (*num_eliminate_blocks)--; + } else { + ordering->Remove(parameter_block->mutable_user_state()); } } parameter_blocks->resize(j); @@ -451,70 +951,127 @@ Program* SolverImpl::CreateReducedProgram(Solver::Options* options, ProblemImpl* problem_impl, double* fixed_cost, string* error) { + EventLogger event_logger("CreateReducedProgram"); + + CHECK_NOTNULL(options->linear_solver_ordering); Program* original_program = problem_impl->mutable_program(); scoped_ptr<Program> transformed_program(new Program(*original_program)); + event_logger.AddEvent("TransformedProgram"); - if (options->ordering_type == USER && - !ApplyUserOrdering(*problem_impl, - options->ordering, - transformed_program.get(), - error)) { - return NULL; - } - - if (options->ordering_type == SCHUR && options->num_eliminate_blocks != 0) { - *error = "Can't specify SCHUR ordering and num_eliminate_blocks " - "at the same time; SCHUR ordering determines " - "num_eliminate_blocks automatically."; - return NULL; - } - - if (options->ordering_type == SCHUR && options->ordering.size() != 0) { - *error = "Can't specify SCHUR ordering type and the ordering " - "vector at the same time; SCHUR ordering determines " - "a suitable parameter ordering automatically."; - return NULL; - } + ParameterBlockOrdering* linear_solver_ordering = + options->linear_solver_ordering; - int num_eliminate_blocks = options->num_eliminate_blocks; + const int min_group_id = + linear_solver_ordering->group_to_elements().begin()->first; + const int original_num_groups = linear_solver_ordering->NumGroups(); if (!RemoveFixedBlocksFromProgram(transformed_program.get(), - &num_eliminate_blocks, + linear_solver_ordering, fixed_cost, error)) { return NULL; } + event_logger.AddEvent("RemoveFixedBlocks"); + if (transformed_program->NumParameterBlocks() == 0) { + if (transformed_program->NumResidualBlocks() > 0) { + *error = "Zero parameter blocks but non-zero residual blocks" + " in the reduced program. Congratulations, you found a " + "Ceres bug! Please report this error to the developers."; + return NULL; + } + LOG(WARNING) << "No varying parameter blocks to optimize; " << "bailing early."; return transformed_program.release(); } - if (options->ordering_type == SCHUR) { + // If the user supplied an linear_solver_ordering with just one + // group, it is equivalent to the user supplying NULL as + // ordering. Ceres is completely free to choose the parameter block + // ordering as it sees fit. For Schur type solvers, this means that + // the user wishes for Ceres to identify the e_blocks, which we do + // by computing a maximal independent set. + if (original_num_groups == 1 && IsSchurType(options->linear_solver_type)) { vector<ParameterBlock*> schur_ordering; - num_eliminate_blocks = ComputeSchurOrdering(*transformed_program, - &schur_ordering); + const int num_eliminate_blocks = ComputeSchurOrdering(*transformed_program, + &schur_ordering); CHECK_EQ(schur_ordering.size(), transformed_program->NumParameterBlocks()) << "Congratulations, you found a Ceres bug! Please report this error " << "to the developers."; - // Replace the transformed program's ordering with the schur ordering. - swap(*transformed_program->mutable_parameter_blocks(), schur_ordering); + for (int i = 0; i < schur_ordering.size(); ++i) { + linear_solver_ordering->AddElementToGroup( + schur_ordering[i]->mutable_user_state(), + (i < num_eliminate_blocks) ? 0 : 1); + } } - options->num_eliminate_blocks = num_eliminate_blocks; - CHECK_GE(options->num_eliminate_blocks, 0) - << "Congratulations, you found a Ceres bug! Please report this error " - << "to the developers."; + event_logger.AddEvent("SchurOrdering"); + + if (!ApplyUserOrdering(problem_impl->parameter_map(), + linear_solver_ordering, + transformed_program.get(), + error)) { + return NULL; + } + event_logger.AddEvent("ApplyOrdering"); + + // If the user requested the use of a Schur type solver, and + // supplied a non-NULL linear_solver_ordering object with more than + // one elimination group, then it can happen that after all the + // parameter blocks which are fixed or unused have been removed from + // the program and the ordering, there are no more parameter blocks + // in the first elimination group. + // + // In such a case, the use of a Schur type solver is not possible, + // as they assume there is at least one e_block. Thus, we + // automatically switch to one of the other solvers, depending on + // the user's indicated preferences. + if (IsSchurType(options->linear_solver_type) && + original_num_groups > 1 && + linear_solver_ordering->GroupSize(min_group_id) == 0) { + string msg = "No e_blocks remaining. Switching from "; + if (options->linear_solver_type == SPARSE_SCHUR) { + options->linear_solver_type = SPARSE_NORMAL_CHOLESKY; + msg += "SPARSE_SCHUR to SPARSE_NORMAL_CHOLESKY."; + } else if (options->linear_solver_type == DENSE_SCHUR) { + // TODO(sameeragarwal): This is probably not a great choice. + // Ideally, we should have a DENSE_NORMAL_CHOLESKY, that can + // take a BlockSparseMatrix as input. + options->linear_solver_type = DENSE_QR; + msg += "DENSE_SCHUR to DENSE_QR."; + } else if (options->linear_solver_type == ITERATIVE_SCHUR) { + msg += StringPrintf("ITERATIVE_SCHUR with %s preconditioner " + "to CGNR with JACOBI preconditioner.", + PreconditionerTypeToString( + options->preconditioner_type)); + options->linear_solver_type = CGNR; + if (options->preconditioner_type != IDENTITY) { + // CGNR currently only supports the JACOBI preconditioner. + options->preconditioner_type = JACOBI; + } + } - // Since the transformed program is the "active" program, and it is mutated, - // update the parameter offsets and indices. + LOG(WARNING) << msg; + } + + event_logger.AddEvent("AlternateSolver"); + + // Since the transformed program is the "active" program, and it is + // mutated, update the parameter offsets and indices. transformed_program->SetParameterOffsetsAndIndex(); + + event_logger.AddEvent("SetOffsets"); return transformed_program.release(); } LinearSolver* SolverImpl::CreateLinearSolver(Solver::Options* options, string* error) { + CHECK_NOTNULL(options); + CHECK_NOTNULL(options->linear_solver_ordering); + CHECK_NOTNULL(error); + if (options->trust_region_strategy_type == DOGLEG) { if (options->linear_solver_type == ITERATIVE_SCHUR || options->linear_solver_type == CGNR) { @@ -532,6 +1089,18 @@ LinearSolver* SolverImpl::CreateLinearSolver(Solver::Options* options, "SuiteSparse was not enabled when Ceres was built."; return NULL; } + + if (options->preconditioner_type == CLUSTER_JACOBI) { + *error = "CLUSTER_JACOBI preconditioner not suppored. Please build Ceres " + "with SuiteSparse support."; + return NULL; + } + + if (options->preconditioner_type == CLUSTER_TRIDIAGONAL) { + *error = "CLUSTER_TRIDIAGONAL preconditioner not suppored. Please build " + "Ceres with SuiteSparse support."; + return NULL; + } #endif #ifdef CERES_NO_CXSPARSE @@ -543,6 +1112,13 @@ LinearSolver* SolverImpl::CreateLinearSolver(Solver::Options* options, } #endif +#if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE) + if (options->linear_solver_type == SPARSE_SCHUR) { + *error = "Can't use SPARSE_SCHUR because neither SuiteSparse nor" + "CXSparse was enabled when Ceres was compiled."; + return NULL; + } +#endif if (options->linear_solver_max_num_iterations <= 0) { *error = "Solver::Options::linear_solver_max_num_iterations is 0."; @@ -568,52 +1144,8 @@ LinearSolver* SolverImpl::CreateLinearSolver(Solver::Options* options, linear_solver_options.preconditioner_type = options->preconditioner_type; linear_solver_options.sparse_linear_algebra_library = options->sparse_linear_algebra_library; - linear_solver_options.use_block_amd = options->use_block_amd; - -#ifdef CERES_NO_SUITESPARSE - if (linear_solver_options.preconditioner_type == SCHUR_JACOBI) { - *error = "SCHUR_JACOBI preconditioner not suppored. Please build Ceres " - "with SuiteSparse support."; - return NULL; - } - - if (linear_solver_options.preconditioner_type == CLUSTER_JACOBI) { - *error = "CLUSTER_JACOBI preconditioner not suppored. Please build Ceres " - "with SuiteSparse support."; - return NULL; - } - - if (linear_solver_options.preconditioner_type == CLUSTER_TRIDIAGONAL) { - *error = "CLUSTER_TRIDIAGONAL preconditioner not suppored. Please build " - "Ceres with SuiteSparse support."; - return NULL; - } -#endif linear_solver_options.num_threads = options->num_linear_solver_threads; - linear_solver_options.num_eliminate_blocks = - options->num_eliminate_blocks; - - if ((linear_solver_options.num_eliminate_blocks == 0) && - IsSchurType(linear_solver_options.type)) { -#if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE) - LOG(INFO) << "No elimination block remaining switching to DENSE_QR."; - linear_solver_options.type = DENSE_QR; -#else - LOG(INFO) << "No elimination block remaining " - << "switching to SPARSE_NORMAL_CHOLESKY."; - linear_solver_options.type = SPARSE_NORMAL_CHOLESKY; -#endif - } - -#if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE) - if (linear_solver_options.type == SPARSE_SCHUR) { - *error = "Can't use SPARSE_SCHUR because neither SuiteSparse nor" - "CXSparse was enabled when Ceres was compiled."; - return NULL; - } -#endif - // The matrix used for storing the dense Schur complement has a // single lock guarding the whole matrix. Running the // SchurComplementSolver with multiple threads leads to maximum @@ -628,56 +1160,67 @@ LinearSolver* SolverImpl::CreateLinearSolver(Solver::Options* options, << "switching to single-threaded."; linear_solver_options.num_threads = 1; } - - options->linear_solver_type = linear_solver_options.type; options->num_linear_solver_threads = linear_solver_options.num_threads; + linear_solver_options.use_block_amd = options->use_block_amd; + const map<int, set<double*> >& groups = + options->linear_solver_ordering->group_to_elements(); + for (map<int, set<double*> >::const_iterator it = groups.begin(); + it != groups.end(); + ++it) { + linear_solver_options.elimination_groups.push_back(it->second.size()); + } + // Schur type solvers, expect at least two elimination groups. If + // there is only one elimination group, then CreateReducedProgram + // guarantees that this group only contains e_blocks. Thus we add a + // dummy elimination group with zero blocks in it. + if (IsSchurType(linear_solver_options.type) && + linear_solver_options.elimination_groups.size() == 1) { + linear_solver_options.elimination_groups.push_back(0); + } + return LinearSolver::Create(linear_solver_options); } -bool SolverImpl::ApplyUserOrdering(const ProblemImpl& problem_impl, - vector<double*>& ordering, - Program* program, - string* error) { - if (ordering.size() != program->NumParameterBlocks()) { +bool SolverImpl::ApplyUserOrdering( + const ProblemImpl::ParameterMap& parameter_map, + const ParameterBlockOrdering* ordering, + Program* program, + string* error) { + if (ordering->NumElements() != program->NumParameterBlocks()) { *error = StringPrintf("User specified ordering does not have the same " "number of parameters as the problem. The problem" - "has %d blocks while the ordering has %ld blocks.", + "has %d blocks while the ordering has %d blocks.", program->NumParameterBlocks(), - ordering.size()); + ordering->NumElements()); return false; } - // Ensure that there are no duplicates in the user's ordering. - { - vector<double*> ordering_copy(ordering); - sort(ordering_copy.begin(), ordering_copy.end()); - if (unique(ordering_copy.begin(), ordering_copy.end()) - != ordering_copy.end()) { - *error = "User specified ordering contains duplicates."; - return false; - } - } - vector<ParameterBlock*>* parameter_blocks = program->mutable_parameter_blocks(); - - fill(parameter_blocks->begin(), - parameter_blocks->end(), - static_cast<ParameterBlock*>(NULL)); - - const ProblemImpl::ParameterMap& parameter_map = problem_impl.parameter_map(); - for (int i = 0; i < ordering.size(); ++i) { - ProblemImpl::ParameterMap::const_iterator it = - parameter_map.find(ordering[i]); - if (it == parameter_map.end()) { - *error = StringPrintf("User specified ordering contains a pointer " - "to a double that is not a parameter block in the " - "problem. The invalid double is at position %d " - " in options.ordering.", i); - return false; + parameter_blocks->clear(); + + const map<int, set<double*> >& groups = + ordering->group_to_elements(); + + for (map<int, set<double*> >::const_iterator group_it = groups.begin(); + group_it != groups.end(); + ++group_it) { + const set<double*>& group = group_it->second; + for (set<double*>::const_iterator parameter_block_ptr_it = group.begin(); + parameter_block_ptr_it != group.end(); + ++parameter_block_ptr_it) { + ProblemImpl::ParameterMap::const_iterator parameter_block_it = + parameter_map.find(*parameter_block_ptr_it); + if (parameter_block_it == parameter_map.end()) { + *error = StringPrintf("User specified ordering contains a pointer " + "to a double that is not a parameter block in " + "the problem. The invalid double is in group: %d", + group_it->first); + return false; + } + parameter_blocks->push_back(parameter_block_it->second); } - (*parameter_blocks)[i] = it->second; } return true; } @@ -704,36 +1247,31 @@ static int MinParameterBlock(const ResidualBlock* residual_block, // Reorder the residuals for program, if necessary, so that the residuals // involving each E block occur together. This is a necessary condition for the // Schur eliminator, which works on these "row blocks" in the jacobian. -bool SolverImpl::MaybeReorderResidualBlocks(const Solver::Options& options, - Program* program, - string* error) { - // Only Schur types require the lexicographic reordering. - if (!IsSchurType(options.linear_solver_type)) { - return true; - } - - CHECK_NE(0, options.num_eliminate_blocks) - << "Congratulations, you found a Ceres bug! Please report this error " - << "to the developers."; +bool SolverImpl::LexicographicallyOrderResidualBlocks( + const int num_eliminate_blocks, + Program* program, + string* error) { + CHECK_GE(num_eliminate_blocks, 1) + << "Congratulations, you found a Ceres bug! Please report this error " + << "to the developers."; // Create a histogram of the number of residuals for each E block. There is an // extra bucket at the end to catch all non-eliminated F blocks. - vector<int> residual_blocks_per_e_block(options.num_eliminate_blocks + 1); + vector<int> residual_blocks_per_e_block(num_eliminate_blocks + 1); vector<ResidualBlock*>* residual_blocks = program->mutable_residual_blocks(); vector<int> min_position_per_residual(residual_blocks->size()); for (int i = 0; i < residual_blocks->size(); ++i) { ResidualBlock* residual_block = (*residual_blocks)[i]; - int position = MinParameterBlock(residual_block, - options.num_eliminate_blocks); + int position = MinParameterBlock(residual_block, num_eliminate_blocks); min_position_per_residual[i] = position; - DCHECK_LE(position, options.num_eliminate_blocks); + DCHECK_LE(position, num_eliminate_blocks); residual_blocks_per_e_block[position]++; } // Run a cumulative sum on the histogram, to obtain offsets to the start of // each histogram bucket (where each bucket is for the residuals for that // E-block). - vector<int> offsets(options.num_eliminate_blocks + 1); + vector<int> offsets(num_eliminate_blocks + 1); std::partial_sum(residual_blocks_per_e_block.begin(), residual_blocks_per_e_block.end(), offsets.begin()); @@ -772,7 +1310,7 @@ bool SolverImpl::MaybeReorderResidualBlocks(const Solver::Options& options, // Sanity check #1: The difference in bucket offsets should match the // histogram sizes. - for (int i = 0; i < options.num_eliminate_blocks; ++i) { + for (int i = 0; i < num_eliminate_blocks; ++i) { CHECK_EQ(residual_blocks_per_e_block[i], offsets[i + 1] - offsets[i]) << "Congratulations, you found a Ceres bug! Please report this error " << "to the developers."; @@ -789,15 +1327,76 @@ bool SolverImpl::MaybeReorderResidualBlocks(const Solver::Options& options, return true; } -Evaluator* SolverImpl::CreateEvaluator(const Solver::Options& options, - Program* program, - string* error) { +Evaluator* SolverImpl::CreateEvaluator( + const Solver::Options& options, + const ProblemImpl::ParameterMap& parameter_map, + Program* program, + string* error) { Evaluator::Options evaluator_options; evaluator_options.linear_solver_type = options.linear_solver_type; - evaluator_options.num_eliminate_blocks = options.num_eliminate_blocks; + evaluator_options.num_eliminate_blocks = + (options.linear_solver_ordering->NumGroups() > 0 && + IsSchurType(options.linear_solver_type)) + ? (options.linear_solver_ordering + ->group_to_elements().begin() + ->second.size()) + : 0; evaluator_options.num_threads = options.num_threads; return Evaluator::Create(evaluator_options, program, error); } +CoordinateDescentMinimizer* SolverImpl::CreateInnerIterationMinimizer( + const Solver::Options& options, + const Program& program, + const ProblemImpl::ParameterMap& parameter_map, + Solver::Summary* summary) { + scoped_ptr<CoordinateDescentMinimizer> inner_iteration_minimizer( + new CoordinateDescentMinimizer); + scoped_ptr<ParameterBlockOrdering> inner_iteration_ordering; + ParameterBlockOrdering* ordering_ptr = NULL; + + if (options.inner_iteration_ordering == NULL) { + // Find a recursive decomposition of the Hessian matrix as a set + // of independent sets of decreasing size and invert it. This + // seems to work better in practice, i.e., Cameras before + // points. + inner_iteration_ordering.reset(new ParameterBlockOrdering); + ComputeRecursiveIndependentSetOrdering(program, + inner_iteration_ordering.get()); + inner_iteration_ordering->Reverse(); + ordering_ptr = inner_iteration_ordering.get(); + } else { + const map<int, set<double*> >& group_to_elements = + options.inner_iteration_ordering->group_to_elements(); + + // Iterate over each group and verify that it is an independent + // set. + map<int, set<double*> >::const_iterator it = group_to_elements.begin(); + for ( ; it != group_to_elements.end(); ++it) { + if (!IsParameterBlockSetIndependent(it->second, + program.residual_blocks())) { + summary->error = + StringPrintf("The user-provided " + "parameter_blocks_for_inner_iterations does not " + "form an independent set. Group Id: %d", it->first); + return NULL; + } + } + ordering_ptr = options.inner_iteration_ordering; + } + + if (!inner_iteration_minimizer->Init(program, + parameter_map, + *ordering_ptr, + &summary->error)) { + return NULL; + } + + summary->inner_iterations = true; + SummarizeOrdering(ordering_ptr, &(summary->inner_iteration_ordering_used)); + + return inner_iteration_minimizer.release(); +} + } // namespace internal } // namespace ceres diff --git a/extern/libmv/third_party/ceres/internal/ceres/solver_impl.h b/extern/libmv/third_party/ceres/internal/ceres/solver_impl.h index 11b44de6f42..c5f5efad3d7 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/solver_impl.h +++ b/extern/libmv/third_party/ceres/internal/ceres/solver_impl.h @@ -31,17 +31,20 @@ #ifndef CERES_INTERNAL_SOLVER_IMPL_H_ #define CERES_INTERNAL_SOLVER_IMPL_H_ +#include <set> #include <string> #include <vector> #include "ceres/internal/port.h" +#include "ceres/ordered_groups.h" +#include "ceres/problem_impl.h" #include "ceres/solver.h" namespace ceres { namespace internal { +class CoordinateDescentMinimizer; class Evaluator; class LinearSolver; -class ProblemImpl; class Program; class SolverImpl { @@ -52,10 +55,19 @@ class SolverImpl { ProblemImpl* problem_impl, Solver::Summary* summary); + static void TrustRegionSolve(const Solver::Options& options, + ProblemImpl* problem_impl, + Solver::Summary* summary); + + static void LineSearchSolve(const Solver::Options& options, + ProblemImpl* problem_impl, + Solver::Summary* summary); + // Create the transformed Program, which has all the fixed blocks // and residuals eliminated, and in the case of automatic schur // ordering, has the E blocks first in the resulting program, with // options.num_eliminate_blocks set appropriately. + // // If fixed_cost is not NULL, the residual blocks that are removed // are evaluated and the sum of their cost is returned in fixed_cost. static Program* CreateReducedProgram(Solver::Options* options, @@ -71,46 +83,73 @@ class SolverImpl { static LinearSolver* CreateLinearSolver(Solver::Options* options, string* error); - // Reorder the parameter blocks in program using the vector - // ordering. A return value of true indicates success and false - // indicates an error was encountered whose cause is logged to - // LOG(ERROR). - static bool ApplyUserOrdering(const ProblemImpl& problem_impl, - vector<double*>& ordering, + // Reorder the parameter blocks in program using the ordering. A + // return value of true indicates success and false indicates an + // error was encountered whose cause is logged to LOG(ERROR). + static bool ApplyUserOrdering(const ProblemImpl::ParameterMap& parameter_map, + const ParameterBlockOrdering* ordering, Program* program, string* error); + // Reorder the residuals for program, if necessary, so that the - // residuals involving each E block occur together. This is a - // necessary condition for the Schur eliminator, which works on - // these "row blocks" in the jacobian. - static bool MaybeReorderResidualBlocks(const Solver::Options& options, - Program* program, - string* error); + // residuals involving e block (i.e., the first num_eliminate_block + // parameter blocks) occur together. This is a necessary condition + // for the Schur eliminator. + static bool LexicographicallyOrderResidualBlocks( + const int num_eliminate_blocks, + Program* program, + string* error); // Create the appropriate evaluator for the transformed program. - static Evaluator* CreateEvaluator(const Solver::Options& options, - Program* program, - string* error); - - // Run the minimization for the given evaluator and configuration. - static void Minimize(const Solver::Options &options, - Program* program, - Evaluator* evaluator, - LinearSolver* linear_solver, - double* parameters, - Solver::Summary* summary); + static Evaluator* CreateEvaluator( + const Solver::Options& options, + const ProblemImpl::ParameterMap& parameter_map, + Program* program, + string* error); + + // Run the TrustRegionMinimizer for the given evaluator and configuration. + static void TrustRegionMinimize( + const Solver::Options &options, + Program* program, + CoordinateDescentMinimizer* inner_iteration_minimizer, + Evaluator* evaluator, + LinearSolver* linear_solver, + double* parameters, + Solver::Summary* summary); + + // Run the LineSearchMinimizer for the given evaluator and configuration. + static void LineSearchMinimize( + const Solver::Options &options, + Program* program, + Evaluator* evaluator, + double* parameters, + Solver::Summary* summary); // Remove the fixed or unused parameter blocks and residuals // depending only on fixed parameters from the problem. Also updates // num_eliminate_blocks, since removed parameters changes the point - // at which the eliminated blocks is valid. - // If fixed_cost is not NULL, the residual blocks that are removed - // are evaluated and the sum of their cost is returned in fixed_cost. + // at which the eliminated blocks is valid. If fixed_cost is not + // NULL, the residual blocks that are removed are evaluated and the + // sum of their cost is returned in fixed_cost. static bool RemoveFixedBlocksFromProgram(Program* program, - int* num_eliminate_blocks, + ParameterBlockOrdering* ordering, double* fixed_cost, string* error); + + static bool IsOrderingValid(const Solver::Options& options, + const ProblemImpl* problem_impl, + string* error); + + static bool IsParameterBlockSetIndependent( + const set<double*>& parameter_block_ptrs, + const vector<ResidualBlock*>& residual_blocks); + + static CoordinateDescentMinimizer* CreateInnerIterationMinimizer( + const Solver::Options& options, + const Program& program, + const ProblemImpl::ParameterMap& parameter_map, + Solver::Summary* summary); }; } // namespace internal diff --git a/extern/libmv/third_party/ceres/internal/ceres/sparse_normal_cholesky_solver.cc b/extern/libmv/third_party/ceres/internal/ceres/sparse_normal_cholesky_solver.cc index 9e00b4402dc..dd05f0c6f41 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/sparse_normal_cholesky_solver.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/sparse_normal_cholesky_solver.cc @@ -39,12 +39,13 @@ #endif #include "ceres/compressed_row_sparse_matrix.h" +#include "ceres/internal/eigen.h" +#include "ceres/internal/scoped_ptr.h" #include "ceres/linear_solver.h" #include "ceres/suitesparse.h" #include "ceres/triplet_sparse_matrix.h" -#include "ceres/internal/eigen.h" -#include "ceres/internal/scoped_ptr.h" #include "ceres/types.h" +#include "ceres/wall_time.h" namespace ceres { namespace internal { @@ -103,6 +104,8 @@ LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingCXSparse( const double* b, const LinearSolver::PerSolveOptions& per_solve_options, double * x) { + EventLogger event_logger("SparseNormalCholeskySolver::CXSparse::Solve"); + LinearSolver::Summary summary; summary.num_iterations = 1; const int num_cols = A->num_cols(); @@ -129,25 +132,34 @@ LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingCXSparse( // off of Jt to compute the Cholesky factorization of the normal // equations. cs_di* A2 = cs_transpose(&At, 1); - cs_di* AtA = cs_multiply(&At,A2); + cs_di* AtA = cs_multiply(&At, A2); cxsparse_.Free(A2); if (per_solve_options.D != NULL) { A->DeleteRows(num_cols); } + event_logger.AddEvent("Setup"); + // Compute symbolic factorization if not available. if (cxsparse_factor_ == NULL) { cxsparse_factor_ = CHECK_NOTNULL(cxsparse_.AnalyzeCholesky(AtA)); } + event_logger.AddEvent("Analysis"); + + // Solve the linear system. if (cxsparse_.SolveCholesky(AtA, cxsparse_factor_, Atb.data())) { VectorRef(x, Atb.rows()) = Atb; summary.termination_type = TOLERANCE; } + event_logger.AddEvent("Solve"); + cxsparse_.Free(AtA); + + event_logger.AddEvent("Teardown"); return summary; } #else @@ -169,9 +181,9 @@ LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingSuiteSparse( const double* b, const LinearSolver::PerSolveOptions& per_solve_options, double * x) { - const time_t start_time = time(NULL); - const int num_cols = A->num_cols(); + EventLogger event_logger("SparseNormalCholeskySolver::SuiteSparse::Solve"); + const int num_cols = A->num_cols(); LinearSolver::Summary summary; Vector Atb = Vector::Zero(num_cols); A->LeftMultiply(b, Atb.data()); @@ -189,7 +201,7 @@ LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingSuiteSparse( CHECK_NOTNULL(lhs.get()); cholmod_dense* rhs = ss_.CreateDenseVector(Atb.data(), num_cols, num_cols); - const time_t init_time = time(NULL); + event_logger.AddEvent("Setup"); if (factor_ == NULL) { if (options_.use_block_amd) { @@ -206,11 +218,10 @@ LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingSuiteSparse( } CHECK_NOTNULL(factor_); - - const time_t symbolic_time = time(NULL); + event_logger.AddEvent("Analysis"); cholmod_dense* sol = ss_.SolveCholesky(lhs.get(), factor_, rhs); - const time_t solve_time = time(NULL); + event_logger.AddEvent("Solve"); ss_.Free(rhs); rhs = NULL; @@ -228,12 +239,7 @@ LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingSuiteSparse( summary.termination_type = TOLERANCE; } - const time_t cleanup_time = time(NULL); - VLOG(2) << "time (sec) total: " << (cleanup_time - start_time) - << " init: " << (init_time - start_time) - << " symbolic: " << (symbolic_time - init_time) - << " solve: " << (solve_time - symbolic_time) - << " cleanup: " << (cleanup_time - solve_time); + event_logger.AddEvent("Teardown"); return summary; } #else diff --git a/extern/libmv/third_party/ceres/internal/ceres/sparse_normal_cholesky_solver.h b/extern/libmv/third_party/ceres/internal/ceres/sparse_normal_cholesky_solver.h index 40d9e0a0327..8d48096d4c6 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/sparse_normal_cholesky_solver.h +++ b/extern/libmv/third_party/ceres/internal/ceres/sparse_normal_cholesky_solver.h @@ -35,8 +35,8 @@ #define CERES_INTERNAL_SPARSE_NORMAL_CHOLESKY_SOLVER_H_ #include "ceres/cxsparse.h" -#include "ceres/linear_solver.h" #include "ceres/internal/macros.h" +#include "ceres/linear_solver.h" #include "ceres/suitesparse.h" namespace ceres { diff --git a/extern/libmv/third_party/ceres/internal/ceres/split.cc b/extern/libmv/third_party/ceres/internal/ceres/split.cc index c65c8a5bb5d..3edbc281340 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/split.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/split.cc @@ -28,10 +28,11 @@ // // Author: keir@google.com (Keir Mierle) +#include "ceres/split.h" + #include <string> #include <vector> #include <iterator> -#include "ceres/split.h" #include "ceres/internal/port.h" namespace ceres { diff --git a/extern/libmv/third_party/ceres/internal/ceres/split.h b/extern/libmv/third_party/ceres/internal/ceres/split.h index ec579e974da..4df48c3a7cd 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/split.h +++ b/extern/libmv/third_party/ceres/internal/ceres/split.h @@ -2,7 +2,7 @@ // Author: keir@google.com (Keir Mierle) #ifndef CERES_INTERNAL_SPLIT_H_ -#define VISION_OPTIMIZATION_LEAST_SQUARES_INTERNAL_SPLIT_H_ +#define CERES_INTERNAL_SPLIT_H_ #include <string> #include <vector> diff --git a/extern/libmv/third_party/ceres/internal/ceres/stl_util.h b/extern/libmv/third_party/ceres/internal/ceres/stl_util.h index a1a19e8b3ce..08f15ec8398 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/stl_util.h +++ b/extern/libmv/third_party/ceres/internal/ceres/stl_util.h @@ -31,6 +31,8 @@ #ifndef CERES_INTERNAL_STL_UTIL_H_ #define CERES_INTERNAL_STL_UTIL_H_ +#include <algorithm> + namespace ceres { // STLDeleteContainerPointers() @@ -53,6 +55,20 @@ void STLDeleteContainerPointers(ForwardIterator begin, } } +// Variant of STLDeleteContainerPointers which allows the container to +// contain duplicates. +template <class ForwardIterator> +void STLDeleteUniqueContainerPointers(ForwardIterator begin, + ForwardIterator end) { + sort(begin, end); + ForwardIterator new_end = unique(begin, end); + while (begin != new_end) { + ForwardIterator temp = begin; + ++begin; + delete *temp; + } +} + // STLDeleteElements() deletes all the elements in an STL container and clears // the container. This function is suitable for use with a vector, set, // hash_set, or any other STL container which defines sensible begin(), end(), diff --git a/extern/libmv/third_party/ceres/internal/ceres/stringprintf.cc b/extern/libmv/third_party/ceres/internal/ceres/stringprintf.cc index 396a48b7d97..ce204674dce 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/stringprintf.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/stringprintf.cc @@ -28,13 +28,14 @@ // // Author: Sanjay Ghemawat +#include "ceres/stringprintf.h" + #include <cerrno> #include <cstdarg> // For va_list and related operations #include <cstdio> // MSVC requires this for _vsnprintf #include <string> #include <vector> -#include "ceres/stringprintf.h" #include "ceres/internal/port.h" namespace ceres { diff --git a/extern/libmv/third_party/ceres/internal/ceres/stringprintf.h b/extern/libmv/third_party/ceres/internal/ceres/stringprintf.h index f2f907ab32d..cd1be142aed 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/stringprintf.h +++ b/extern/libmv/third_party/ceres/internal/ceres/stringprintf.h @@ -65,17 +65,17 @@ namespace internal { // Return a C++ string. extern string StringPrintf(const char* format, ...) // Tell the compiler to do printf format string checking. - CERES_PRINTF_ATTRIBUTE(1,2); + CERES_PRINTF_ATTRIBUTE(1, 2); // Store result into a supplied string and return it. extern const string& SStringPrintf(string* dst, const char* format, ...) // Tell the compiler to do printf format string checking. - CERES_PRINTF_ATTRIBUTE(2,3); + CERES_PRINTF_ATTRIBUTE(2, 3); // Append result to a supplied string. extern void StringAppendF(string* dst, const char* format, ...) // Tell the compiler to do printf format string checking. - CERES_PRINTF_ATTRIBUTE(2,3); + CERES_PRINTF_ATTRIBUTE(2, 3); // Lower-level routine that takes a va_list and appends to a specified string. // All other routines are just convenience wrappers around it. diff --git a/extern/libmv/third_party/ceres/internal/ceres/suitesparse.cc b/extern/libmv/third_party/ceres/internal/ceres/suitesparse.cc index cf3c48f84e6..d200aeb82f3 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/suitesparse.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/suitesparse.cc @@ -135,10 +135,11 @@ cholmod_factor* SuiteSparse::BlockAnalyzeCholesky( return AnalyzeCholeskyWithUserOrdering(A, ordering); } -cholmod_factor* SuiteSparse::AnalyzeCholeskyWithUserOrdering(cholmod_sparse* A, - const vector<int>& ordering) { +cholmod_factor* SuiteSparse::AnalyzeCholeskyWithUserOrdering( + cholmod_sparse* A, + const vector<int>& ordering) { CHECK_EQ(ordering.size(), A->nrow); - cc_.nmethods = 1 ; + cc_.nmethods = 1; cc_.method[0].ordering = CHOLMOD_GIVEN; cholmod_factor* factor = cholmod_analyze_p(A, const_cast<int*>(&ordering[0]), NULL, 0, &cc_); diff --git a/extern/libmv/third_party/ceres/internal/ceres/suitesparse.h b/extern/libmv/third_party/ceres/internal/ceres/suitesparse.h index eb691c0c0ed..3fe79080d5d 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/suitesparse.h +++ b/extern/libmv/third_party/ceres/internal/ceres/suitesparse.h @@ -39,9 +39,9 @@ #include <string> #include <vector> -#include <glog/logging.h> -#include "cholmod.h" #include "ceres/internal/port.h" +#include "cholmod.h" +#include "glog/logging.h" namespace ceres { namespace internal { diff --git a/extern/libmv/third_party/ceres/internal/ceres/triplet_sparse_matrix.cc b/extern/libmv/third_party/ceres/internal/ceres/triplet_sparse_matrix.cc index ed8677ea18a..a09f38ee24e 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/triplet_sparse_matrix.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/triplet_sparse_matrix.cc @@ -71,7 +71,8 @@ TripletSparseMatrix::TripletSparseMatrix(int num_rows, } TripletSparseMatrix::TripletSparseMatrix(const TripletSparseMatrix& orig) - : num_rows_(orig.num_rows_), + : SparseMatrix(), + num_rows_(orig.num_rows_), num_cols_(orig.num_cols_), max_num_nonzeros_(orig.max_num_nonzeros_), num_nonzeros_(orig.num_nonzeros_), diff --git a/extern/libmv/third_party/ceres/internal/ceres/trust_region_minimizer.cc b/extern/libmv/third_party/ceres/internal/ceres/trust_region_minimizer.cc index 76c4f8a7580..981c60a12e7 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/trust_region_minimizer.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/trust_region_minimizer.cc @@ -45,8 +45,10 @@ #include "ceres/internal/scoped_ptr.h" #include "ceres/linear_least_squares_problems.h" #include "ceres/sparse_matrix.h" +#include "ceres/stringprintf.h" #include "ceres/trust_region_strategy.h" #include "ceres/types.h" +#include "ceres/wall_time.h" #include "glog/logging.h" namespace ceres { @@ -56,28 +58,13 @@ namespace { const double kEpsilon = 1e-12; } // namespace -// Execute the list of IterationCallbacks sequentially. If any one of -// the callbacks does not return SOLVER_CONTINUE, then stop and return -// its status. -CallbackReturnType TrustRegionMinimizer::RunCallbacks( - const IterationSummary& iteration_summary) { - for (int i = 0; i < options_.callbacks.size(); ++i) { - const CallbackReturnType status = - (*options_.callbacks[i])(iteration_summary); - if (status != SOLVER_CONTINUE) { - return status; - } - } - return SOLVER_CONTINUE; -} - // Compute a scaling vector that is used to improve the conditioning // of the Jacobian. void TrustRegionMinimizer::EstimateScale(const SparseMatrix& jacobian, double* scale) const { jacobian.SquaredColumnNorm(scale); for (int i = 0; i < jacobian.num_cols(); ++i) { - scale[i] = 1.0 / (kEpsilon + sqrt(scale[i])); + scale[i] = 1.0 / (1.0 + sqrt(scale[i])); } } @@ -96,29 +83,19 @@ bool TrustRegionMinimizer::MaybeDumpLinearLeastSquaresProblem( // moved inside TrustRegionStrategy, its not clear how we dump the // regularization vector/matrix anymore. // - // Doing this right requires either an API change to the - // TrustRegionStrategy and/or how LinearLeastSquares problems are - // stored on disk. + // Also num_eliminate_blocks is not visible to the trust region + // minimizer either. // - // For now, we will just not dump the regularizer. - return (!binary_search(options_.lsqp_iterations_to_dump.begin(), - options_.lsqp_iterations_to_dump.end(), - iteration) || - DumpLinearLeastSquaresProblem(options_.lsqp_dump_directory, - iteration, - options_.lsqp_dump_format_type, - jacobian, - NULL, - residuals, - step, - options_.num_eliminate_blocks)); + // Both of these indicate that this is the wrong place for this + // code, and going forward this should needs fixing/refactoring. + return true; } void TrustRegionMinimizer::Minimize(const Minimizer::Options& options, double* parameters, Solver::Summary* summary) { - time_t start_time = time(NULL); - time_t iteration_start_time = start_time; + double start_time = WallTimeInSeconds(); + double iteration_start_time = start_time; Init(options); summary->termination_type = NO_CONVERGENCE; @@ -149,7 +126,6 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options, iteration_summary.iteration = 0; iteration_summary.step_is_valid = false; iteration_summary.step_is_successful = false; - iteration_summary.cost = summary->initial_cost; iteration_summary.cost_change = 0.0; iteration_summary.gradient_max_norm = 0.0; iteration_summary.step_norm = 0.0; @@ -169,6 +145,9 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options, return; } + summary->initial_cost = cost + summary->fixed_cost; + iteration_summary.cost = cost + summary->fixed_cost; + int num_consecutive_nonmonotonic_steps = 0; double minimum_cost = cost; double reference_cost = cost; @@ -189,45 +168,34 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options, // The initial gradient max_norm is bounded from below so that we do // not divide by zero. - const double gradient_max_norm_0 = + const double initial_gradient_max_norm = max(iteration_summary.gradient_max_norm, kEpsilon); const double absolute_gradient_tolerance = - options_.gradient_tolerance * gradient_max_norm_0; + options_.gradient_tolerance * initial_gradient_max_norm; if (iteration_summary.gradient_max_norm <= absolute_gradient_tolerance) { summary->termination_type = GRADIENT_TOLERANCE; VLOG(1) << "Terminating: Gradient tolerance reached." << "Relative gradient max norm: " - << iteration_summary.gradient_max_norm / gradient_max_norm_0 + << iteration_summary.gradient_max_norm / initial_gradient_max_norm << " <= " << options_.gradient_tolerance; return; } iteration_summary.iteration_time_in_seconds = - time(NULL) - iteration_start_time; - iteration_summary.cumulative_time_in_seconds = time(NULL) - start_time + - summary->preprocessor_time_in_seconds; + WallTimeInSeconds() - iteration_start_time; + iteration_summary.cumulative_time_in_seconds = + WallTimeInSeconds() - start_time + + summary->preprocessor_time_in_seconds; summary->iterations.push_back(iteration_summary); - // Call the various callbacks. - switch (RunCallbacks(iteration_summary)) { - case SOLVER_TERMINATE_SUCCESSFULLY: - summary->termination_type = USER_SUCCESS; - VLOG(1) << "Terminating: User callback returned USER_SUCCESS."; - return; - case SOLVER_ABORT: - summary->termination_type = USER_ABORT; - VLOG(1) << "Terminating: User callback returned USER_ABORT."; - return; - case SOLVER_CONTINUE: - break; - default: - LOG(FATAL) << "Unknown type of user callback status"; - } - int num_consecutive_invalid_steps = 0; while (true) { - iteration_start_time = time(NULL); + if (!RunCallbacks(options.callbacks, iteration_summary, summary)) { + return; + } + + iteration_start_time = WallTimeInSeconds(); if (iteration_summary.iteration >= options_.max_num_iterations) { summary->termination_type = NO_CONVERGENCE; VLOG(1) << "Terminating: Maximum number of iterations reached."; @@ -248,7 +216,7 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options, iteration_summary.step_is_valid = false; iteration_summary.step_is_successful = false; - const time_t strategy_start_time = time(NULL); + const double strategy_start_time = WallTimeInSeconds(); TrustRegionStrategy::PerSolveOptions per_solve_options; per_solve_options.eta = options_.eta; TrustRegionStrategy::Summary strategy_summary = @@ -258,7 +226,7 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options, trust_region_step.data()); iteration_summary.step_solver_time_in_seconds = - time(NULL) - strategy_start_time; + WallTimeInSeconds() - strategy_start_time; iteration_summary.linear_solver_iterations = strategy_summary.num_iterations; @@ -270,23 +238,24 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options, << options.lsqp_dump_directory << "but failed."; } - double new_model_cost = 0.0; + double model_cost_change = 0.0; if (strategy_summary.termination_type != FAILURE) { - // new_model_cost = 1/2 |f + J * step|^2 - model_residuals = residuals; + // new_model_cost + // = 1/2 [f + J * step]^2 + // = 1/2 [ f'f + 2f'J * step + step' * J' * J * step ] + // model_cost_change + // = cost - new_model_cost + // = f'f/2 - 1/2 [ f'f + 2f'J * step + step' * J' * J * step] + // = -f'J * step - step' * J' * J * step / 2 + model_residuals.setZero(); jacobian->RightMultiply(trust_region_step.data(), model_residuals.data()); - new_model_cost = model_residuals.squaredNorm() / 2.0; - - // In exact arithmetic, this would never be the case. But poorly - // conditioned matrices can give rise to situations where the - // new_model_cost can actually be larger than half the squared - // norm of the residual vector. We allow for small tolerance - // around cost and beyond that declare the step to be invalid. - if ((1.0 - new_model_cost / cost) < -kEpsilon) { + model_cost_change = -(residuals.dot(model_residuals) + + model_residuals.squaredNorm() / 2.0); + + if (model_cost_change < 0.0) { VLOG(1) << "Invalid step: current_cost: " << cost - << " new_model_cost " << new_model_cost - << " absolute difference " << (cost - new_model_cost) - << " relative difference " << (1.0 - new_model_cost/cost); + << " absolute difference " << model_cost_change + << " relative difference " << (model_cost_change / cost); } else { iteration_summary.step_is_valid = true; } @@ -299,10 +268,12 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options, if (++num_consecutive_invalid_steps >= options_.max_num_consecutive_invalid_steps) { summary->termination_type = NUMERICAL_FAILURE; - LOG(WARNING) << "Terminating. Number of successive invalid steps more " - << "than " - << "Solver::Options::max_num_consecutive_invalid_steps: " - << options_.max_num_consecutive_invalid_steps; + summary->error = StringPrintf( + "Terminating. Number of successive invalid steps more " + "than Solver::Options::max_num_consecutive_invalid_steps: %d", + options_.max_num_consecutive_invalid_steps); + + LOG(WARNING) << summary->error; return; } @@ -311,7 +282,7 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options, // as an unsuccessful iteration. Since the various callbacks are // still executed, we are going to fill the iteration summary // with data that assumes a step of length zero and no progress. - iteration_summary.cost = cost; + iteration_summary.cost = cost + summary->fixed_cost; iteration_summary.cost_change = 0.0; iteration_summary.gradient_max_norm = summary->iterations.back().gradient_max_norm; @@ -322,51 +293,19 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options, // The step is numerically valid, so now we can judge its quality. num_consecutive_invalid_steps = 0; - // We allow some slop around 0, and clamp the model_cost_change - // at kEpsilon * min(1.0, cost) from below. - // - // In exact arithmetic this should never be needed, as we are - // guaranteed to new_model_cost <= cost. However, due to various - // numerical issues, it is possible that new_model_cost is - // nearly equal to cost, and the difference is a small negative - // number. To make sure that the relative_decrease computation - // remains sane, as clamp the difference (cost - new_model_cost) - // from below at a small positive number. - // - // This number is the minimum of kEpsilon * (cost, 1.0), which - // ensures that it will never get too large in absolute value, - // while scaling down proportionally with the magnitude of the - // cost. This is important for problems where the minimum of the - // objective function is near zero. - const double model_cost_change = - max(kEpsilon * min(1.0, cost), cost - new_model_cost); - // Undo the Jacobian column scaling. delta = (trust_region_step.array() * scale.array()).matrix(); - iteration_summary.step_norm = delta.norm(); - - // Convergence based on parameter_tolerance. - const double step_size_tolerance = options_.parameter_tolerance * - (x_norm + options_.parameter_tolerance); - if (iteration_summary.step_norm <= step_size_tolerance) { - VLOG(1) << "Terminating. Parameter tolerance reached. " - << "relative step_norm: " - << iteration_summary.step_norm / - (x_norm + options_.parameter_tolerance) - << " <= " << options_.parameter_tolerance; - summary->termination_type = PARAMETER_TOLERANCE; - return; - } - if (!evaluator->Plus(x.data(), delta.data(), x_plus_delta.data())) { summary->termination_type = NUMERICAL_FAILURE; - LOG(WARNING) << "Terminating. Failed to compute " - << "Plus(x, delta, x_plus_delta)."; + summary->error = + "Terminating. Failed to compute Plus(x, delta, x_plus_delta)."; + + LOG(WARNING) << summary->error; return; } // Try this step. - double new_cost; + double new_cost = numeric_limits<double>::max(); if (!evaluator->Evaluate(x_plus_delta.data(), &new_cost, NULL, NULL, NULL)) { @@ -375,6 +314,45 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options, LOG(WARNING) << "Step failed to evaluate. " << "Treating it as step with infinite cost"; new_cost = numeric_limits<double>::max(); + } else { + // Check if performing an inner iteration will make it better. + if (options.inner_iteration_minimizer != NULL) { + const double x_plus_delta_cost = new_cost; + Vector inner_iteration_x = x_plus_delta; + Solver::Summary inner_iteration_summary; + options.inner_iteration_minimizer->Minimize(options, + inner_iteration_x.data(), + &inner_iteration_summary); + if (!evaluator->Evaluate(inner_iteration_x.data(), + &new_cost, + NULL, NULL, NULL)) { + VLOG(2) << "Inner iteration failed."; + new_cost = x_plus_delta_cost; + } else { + x_plus_delta = inner_iteration_x; + // Boost the model_cost_change, since the inner iteration + // improvements are not accounted for by the trust region. + model_cost_change += x_plus_delta_cost - new_cost; + VLOG(2) << "Inner iteration succeeded; current cost: " << cost + << " x_plus_delta_cost: " << x_plus_delta_cost + << " new_cost: " << new_cost; + } + } + } + + iteration_summary.step_norm = (x - x_plus_delta).norm(); + + // Convergence based on parameter_tolerance. + const double step_size_tolerance = options_.parameter_tolerance * + (x_norm + options_.parameter_tolerance); + if (iteration_summary.step_norm <= step_size_tolerance) { + VLOG(1) << "Terminating. Parameter tolerance reached. " + << "relative step_norm: " + << iteration_summary.step_norm / + (x_norm + options_.parameter_tolerance) + << " <= " << options_.parameter_tolerance; + summary->termination_type = PARAMETER_TOLERANCE; + return; } VLOG(2) << "old cost: " << cost << " new cost: " << new_cost; @@ -421,6 +399,7 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options, accumulated_candidate_model_cost_change += model_cost_change; accumulated_reference_model_cost_change += model_cost_change; if (relative_decrease <= options_.min_relative_decrease) { + iteration_summary.step_is_nonmonotonic = true; VLOG(2) << "Non-monotonic step! " << " relative_decrease: " << relative_decrease << " historical_relative_decrease: " @@ -443,7 +422,9 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options, NULL, jacobian)) { summary->termination_type = NUMERICAL_FAILURE; - LOG(WARNING) << "Terminating: Residual and Jacobian evaluation failed."; + summary->error = + "Terminating: Residual and Jacobian evaluation failed."; + LOG(WARNING) << summary->error; return; } @@ -455,7 +436,8 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options, summary->termination_type = GRADIENT_TOLERANCE; VLOG(1) << "Terminating: Gradient tolerance reached." << "Relative gradient max norm: " - << iteration_summary.gradient_max_norm / gradient_max_norm_0 + << (iteration_summary.gradient_max_norm / + initial_gradient_max_norm) << " <= " << options_.gradient_tolerance; return; } @@ -523,25 +505,11 @@ void TrustRegionMinimizer::Minimize(const Minimizer::Options& options, } iteration_summary.iteration_time_in_seconds = - time(NULL) - iteration_start_time; - iteration_summary.cumulative_time_in_seconds = time(NULL) - start_time + - summary->preprocessor_time_in_seconds; + WallTimeInSeconds() - iteration_start_time; + iteration_summary.cumulative_time_in_seconds = + WallTimeInSeconds() - start_time + + summary->preprocessor_time_in_seconds; summary->iterations.push_back(iteration_summary); - - switch (RunCallbacks(iteration_summary)) { - case SOLVER_TERMINATE_SUCCESSFULLY: - summary->termination_type = USER_SUCCESS; - VLOG(1) << "Terminating: User callback returned USER_SUCCESS."; - return; - case SOLVER_ABORT: - summary->termination_type = USER_ABORT; - VLOG(1) << "Terminating: User callback returned USER_ABORT."; - return; - case SOLVER_CONTINUE: - break; - default: - LOG(FATAL) << "Unknown type of user callback status"; - } } } diff --git a/extern/libmv/third_party/ceres/internal/ceres/trust_region_minimizer.h b/extern/libmv/third_party/ceres/internal/ceres/trust_region_minimizer.h index a4f5ba3674d..9a022843233 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/trust_region_minimizer.h +++ b/extern/libmv/third_party/ceres/internal/ceres/trust_region_minimizer.h @@ -39,8 +39,7 @@ namespace ceres { namespace internal { // Generic trust region minimization algorithm. The heavy lifting is -// done by a TrustRegionStrategy object passed in as one of the -// arguments to the Minimize method. +// done by a TrustRegionStrategy object passed in as part of options. // // For example usage, see SolverImpl::Minimize. class TrustRegionMinimizer : public Minimizer { @@ -53,11 +52,10 @@ class TrustRegionMinimizer : public Minimizer { private: void Init(const Minimizer::Options& options); void EstimateScale(const SparseMatrix& jacobian, double* scale) const; - CallbackReturnType RunCallbacks(const IterationSummary& iteration_summary); - bool MaybeDumpLinearLeastSquaresProblem( const int iteration, - const SparseMatrix* jacobian, - const double* residuals, - const double* step) const; + bool MaybeDumpLinearLeastSquaresProblem(const int iteration, + const SparseMatrix* jacobian, + const double* residuals, + const double* step) const; Minimizer::Options options_; }; diff --git a/extern/libmv/third_party/ceres/internal/ceres/trust_region_strategy.cc b/extern/libmv/third_party/ceres/internal/ceres/trust_region_strategy.cc index 89bc19d084b..c68269d0449 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/trust_region_strategy.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/trust_region_strategy.cc @@ -1,3 +1,35 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2012, 2013 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// keir@google.com (Keir Mierle) + #include "ceres/trust_region_strategy.h" #include "ceres/dogleg_strategy.h" #include "ceres/levenberg_marquardt_strategy.h" diff --git a/extern/libmv/third_party/ceres/internal/ceres/trust_region_strategy.h b/extern/libmv/third_party/ceres/internal/ceres/trust_region_strategy.h index 391da97d5eb..f150594bbd2 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/trust_region_strategy.h +++ b/extern/libmv/third_party/ceres/internal/ceres/trust_region_strategy.h @@ -52,7 +52,7 @@ class SparseMatrix; // radius to scale the damping term, which controls the step size, but // does not set a hard limit on its size. class TrustRegionStrategy { -public: + public: struct Options { Options() : trust_region_strategy_type(LEVENBERG_MARQUARDT), diff --git a/extern/libmv/third_party/ceres/internal/ceres/types.cc b/extern/libmv/third_party/ceres/internal/ceres/types.cc index 05e573ff6d5..2e19322cc76 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/types.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/types.cc @@ -28,15 +28,23 @@ // // Author: sameeragarwal@google.com (Sameer Agarwal) +#include <algorithm> +#include <cctype> #include <string> #include "ceres/types.h" +#include "glog/logging.h" namespace ceres { #define CASESTR(x) case x: return #x +#define STRENUM(x) if (value == #x) { *type = x; return true;} -const char* LinearSolverTypeToString(LinearSolverType solver_type) { - switch (solver_type) { +static void UpperCase(string* input) { + std::transform(input->begin(), input->end(), input->begin(), ::toupper); +} + +const char* LinearSolverTypeToString(LinearSolverType type) { + switch (type) { CASESTR(DENSE_NORMAL_CHOLESKY); CASESTR(DENSE_QR); CASESTR(SPARSE_NORMAL_CHOLESKY); @@ -49,9 +57,20 @@ const char* LinearSolverTypeToString(LinearSolverType solver_type) { } } -const char* PreconditionerTypeToString( - PreconditionerType preconditioner_type) { - switch (preconditioner_type) { +bool StringToLinearSolverType(string value, LinearSolverType* type) { + UpperCase(&value); + STRENUM(DENSE_NORMAL_CHOLESKY); + STRENUM(DENSE_QR); + STRENUM(SPARSE_NORMAL_CHOLESKY); + STRENUM(DENSE_SCHUR); + STRENUM(SPARSE_SCHUR); + STRENUM(ITERATIVE_SCHUR); + STRENUM(CGNR); + return false; +} + +const char* PreconditionerTypeToString(PreconditionerType type) { + switch (type) { CASESTR(IDENTITY); CASESTR(JACOBI); CASESTR(SCHUR_JACOBI); @@ -62,9 +81,19 @@ const char* PreconditionerTypeToString( } } +bool StringToPreconditionerType(string value, PreconditionerType* type) { + UpperCase(&value); + STRENUM(IDENTITY); + STRENUM(JACOBI); + STRENUM(SCHUR_JACOBI); + STRENUM(CLUSTER_JACOBI); + STRENUM(CLUSTER_TRIDIAGONAL); + return false; +} + const char* SparseLinearAlgebraLibraryTypeToString( - SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type) { - switch (sparse_linear_algebra_library_type) { + SparseLinearAlgebraLibraryType type) { + switch (type) { CASESTR(SUITE_SPARSE); CASESTR(CX_SPARSE); default: @@ -72,19 +101,121 @@ const char* SparseLinearAlgebraLibraryTypeToString( } } -const char* OrderingTypeToString(OrderingType ordering_type) { - switch (ordering_type) { - CASESTR(NATURAL); - CASESTR(USER); - CASESTR(SCHUR); + +bool StringToSparseLinearAlgebraLibraryType( + string value, + SparseLinearAlgebraLibraryType* type) { + UpperCase(&value); + STRENUM(SUITE_SPARSE); + STRENUM(CX_SPARSE); + return false; +} + +const char* TrustRegionStrategyTypeToString(TrustRegionStrategyType type) { + switch (type) { + CASESTR(LEVENBERG_MARQUARDT); + CASESTR(DOGLEG); + default: + return "UNKNOWN"; + } +} + +bool StringToTrustRegionStrategyType(string value, + TrustRegionStrategyType* type) { + UpperCase(&value); + STRENUM(LEVENBERG_MARQUARDT); + STRENUM(DOGLEG); + return false; +} + +const char* DoglegTypeToString(DoglegType type) { + switch (type) { + CASESTR(TRADITIONAL_DOGLEG); + CASESTR(SUBSPACE_DOGLEG); + default: + return "UNKNOWN"; + } +} + +bool StringToDoglegType(string value, DoglegType* type) { + UpperCase(&value); + STRENUM(TRADITIONAL_DOGLEG); + STRENUM(SUBSPACE_DOGLEG); + return false; +} + +const char* MinimizerTypeToString(MinimizerType type) { + switch (type) { + CASESTR(TRUST_REGION); + CASESTR(LINE_SEARCH); + default: + return "UNKNOWN"; + } +} + +bool StringToMinimizerType(string value, MinimizerType* type) { + UpperCase(&value); + STRENUM(TRUST_REGION); + STRENUM(LINE_SEARCH); + return false; +} + +const char* LineSearchDirectionTypeToString(LineSearchDirectionType type) { + switch (type) { + CASESTR(STEEPEST_DESCENT); + CASESTR(NONLINEAR_CONJUGATE_GRADIENT); + CASESTR(LBFGS); + default: + return "UNKNOWN"; + } +} + +bool StringToLineSearchDirectionType(string value, + LineSearchDirectionType* type) { + UpperCase(&value); + STRENUM(STEEPEST_DESCENT); + STRENUM(NONLINEAR_CONJUGATE_GRADIENT); + STRENUM(LBFGS); + return false; +} + +const char* LineSearchTypeToString(LineSearchType type) { + switch (type) { + CASESTR(ARMIJO); default: return "UNKNOWN"; } } -const char* SolverTerminationTypeToString( - SolverTerminationType termination_type) { - switch (termination_type) { +bool StringToLineSearchType(string value, LineSearchType* type) { + UpperCase(&value); + STRENUM(ARMIJO); + return false; +} + +const char* NonlinearConjugateGradientTypeToString( + NonlinearConjugateGradientType type) { + switch (type) { + CASESTR(FLETCHER_REEVES); + CASESTR(POLAK_RIBIRERE); + CASESTR(HESTENES_STIEFEL); + default: + return "UNKNOWN"; + } +} + +bool StringToNonlinearConjugateGradientType( + string value, + NonlinearConjugateGradientType* type) { + UpperCase(&value); + STRENUM(FLETCHER_REEVES); + STRENUM(POLAK_RIBIRERE); + STRENUM(HESTENES_STIEFEL); + return false; +} + +const char* SolverTerminationTypeToString(SolverTerminationType type) { + switch (type) { CASESTR(NO_CONVERGENCE); CASESTR(FUNCTION_TOLERANCE); CASESTR(GRADIENT_TOLERANCE); @@ -98,29 +229,20 @@ const char* SolverTerminationTypeToString( } } -#if 0 /* UNUSED */ -static const char* SparseLinearAlgebraTypeToString( - SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type) { - switch (sparse_linear_algebra_library_type) { - CASESTR(CX_SPARSE); - CASESTR(SUITE_SPARSE); - default: - return "UNKNOWN"; - } -} -#endif - -const char* TrustRegionStrategyTypeToString( - TrustRegionStrategyType trust_region_strategy_type) { - switch (trust_region_strategy_type) { - CASESTR(LEVENBERG_MARQUARDT); - CASESTR(DOGLEG); +const char* LinearSolverTerminationTypeToString( + LinearSolverTerminationType type) { + switch (type) { + CASESTR(TOLERANCE); + CASESTR(MAX_ITERATIONS); + CASESTR(STAGNATION); + CASESTR(FAILURE); default: return "UNKNOWN"; } } #undef CASESTR +#undef STRENUM bool IsSchurType(LinearSolverType type) { return ((type == SPARSE_SCHUR) || @@ -128,4 +250,26 @@ bool IsSchurType(LinearSolverType type) { (type == ITERATIVE_SCHUR)); } +bool IsSparseLinearAlgebraLibraryTypeAvailable( + SparseLinearAlgebraLibraryType type) { + if (type == SUITE_SPARSE) { +#ifdef CERES_NO_SUITESPARSE + return false; +#else + return true; +#endif + } + + if (type == CX_SPARSE) { +#ifdef CERES_NO_CXSPARSE + return false; +#else + return true; +#endif + } + + LOG(WARNING) << "Unknown sparse linear algebra library " << type; + return false; +} + } // namespace ceres diff --git a/extern/libmv/third_party/ceres/internal/ceres/visibility.cc b/extern/libmv/third_party/ceres/internal/ceres/visibility.cc index 564cc54493e..8e80fd121bb 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/visibility.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/visibility.cc @@ -28,6 +28,8 @@ // // Author: kushalav@google.com (Avanish Kushal) +#include "ceres/visibility.h" + #include <cmath> #include <ctime> #include <algorithm> @@ -36,7 +38,6 @@ #include <utility> #include "ceres/block_structure.h" #include "ceres/collections_port.h" -#include "ceres/visibility.h" #include "ceres/graph.h" #include "glog/logging.h" diff --git a/extern/libmv/third_party/ceres/internal/ceres/visibility.h b/extern/libmv/third_party/ceres/internal/ceres/visibility.h index 692dd87201e..f29e3c6a0a8 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/visibility.h +++ b/extern/libmv/third_party/ceres/internal/ceres/visibility.h @@ -42,7 +42,7 @@ namespace ceres { namespace internal { -class CompressedRowBlockStructure; +struct CompressedRowBlockStructure; // Given a compressed row block structure, computes the set of // e_blocks "visible" to each f_block. If an e_block co-occurs with an diff --git a/extern/libmv/third_party/ceres/internal/ceres/visibility_based_preconditioner.cc b/extern/libmv/third_party/ceres/internal/ceres/visibility_based_preconditioner.cc index 4caad03d7a1..a75d6f0c17e 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/visibility_based_preconditioner.cc +++ b/extern/libmv/third_party/ceres/internal/ceres/visibility_based_preconditioner.cc @@ -65,17 +65,17 @@ static const double kSimilarityPenaltyWeight = 0.0; #ifndef CERES_NO_SUITESPARSE VisibilityBasedPreconditioner::VisibilityBasedPreconditioner( const CompressedRowBlockStructure& bs, - const LinearSolver::Options& options) + const Preconditioner::Options& options) : options_(options), num_blocks_(0), num_clusters_(0), factor_(NULL) { - CHECK_GT(options_.num_eliminate_blocks, 0); - CHECK(options_.preconditioner_type == SCHUR_JACOBI || - options_.preconditioner_type == CLUSTER_JACOBI || - options_.preconditioner_type == CLUSTER_TRIDIAGONAL) - << "Unknown preconditioner type: " << options_.preconditioner_type; - num_blocks_ = bs.cols.size() - options_.num_eliminate_blocks; + CHECK_GT(options_.elimination_groups.size(), 1); + CHECK_GT(options_.elimination_groups[0], 0); + CHECK(options_.type == CLUSTER_JACOBI || + options_.type == CLUSTER_TRIDIAGONAL) + << "Unknown preconditioner type: " << options_.type; + num_blocks_ = bs.cols.size() - options_.elimination_groups[0]; CHECK_GT(num_blocks_, 0) << "Jacobian should have atleast 1 f_block for " << "visibility based preconditioning."; @@ -83,14 +83,11 @@ VisibilityBasedPreconditioner::VisibilityBasedPreconditioner( // Vector of camera block sizes block_size_.resize(num_blocks_); for (int i = 0; i < num_blocks_; ++i) { - block_size_[i] = bs.cols[i + options_.num_eliminate_blocks].size; + block_size_[i] = bs.cols[i + options_.elimination_groups[0]].size; } const time_t start_time = time(NULL); - switch (options_.preconditioner_type) { - case SCHUR_JACOBI: - ComputeSchurJacobiSparsity(bs); - break; + switch (options_.type) { case CLUSTER_JACOBI: ComputeClusterJacobiSparsity(bs); break; @@ -130,24 +127,6 @@ VisibilityBasedPreconditioner::~VisibilityBasedPreconditioner() { } } -// Determine the sparsity structure of the SCHUR_JACOBI -// preconditioner. SCHUR_JACOBI is an extreme case of a visibility -// based preconditioner where each camera block corresponds to a -// cluster and there is no interaction between clusters. -void VisibilityBasedPreconditioner::ComputeSchurJacobiSparsity( - const CompressedRowBlockStructure& bs) { - num_clusters_ = num_blocks_; - cluster_membership_.resize(num_blocks_); - cluster_pairs_.clear(); - - // Each camea block is a member of its own cluster and the only - // cluster pairs are the self edges (i,i). - for (int i = 0; i < num_clusters_; ++i) { - cluster_membership_[i] = i; - cluster_pairs_.insert(make_pair(i, i)); - } -} - // Determine the sparsity structure of the CLUSTER_JACOBI // preconditioner. It clusters cameras using their scene // visibility. The clusters form the diagonal blocks of the @@ -155,7 +134,7 @@ void VisibilityBasedPreconditioner::ComputeSchurJacobiSparsity( void VisibilityBasedPreconditioner::ComputeClusterJacobiSparsity( const CompressedRowBlockStructure& bs) { vector<set<int> > visibility; - ComputeVisibility(bs, options_.num_eliminate_blocks, &visibility); + ComputeVisibility(bs, options_.elimination_groups[0], &visibility); CHECK_EQ(num_blocks_, visibility.size()); ClusterCameras(visibility); cluster_pairs_.clear(); @@ -173,7 +152,7 @@ void VisibilityBasedPreconditioner::ComputeClusterJacobiSparsity( void VisibilityBasedPreconditioner::ComputeClusterTridiagonalSparsity( const CompressedRowBlockStructure& bs) { vector<set<int> > visibility; - ComputeVisibility(bs, options_.num_eliminate_blocks, &visibility); + ComputeVisibility(bs, options_.elimination_groups[0], &visibility); CHECK_EQ(num_blocks_, visibility.size()); ClusterCameras(visibility); @@ -253,7 +232,7 @@ void VisibilityBasedPreconditioner::ComputeBlockPairsInPreconditioner( int r = 0; const int num_row_blocks = bs.rows.size(); - const int num_eliminate_blocks = options_.num_eliminate_blocks; + const int num_eliminate_blocks = options_.elimination_groups[0]; // Iterate over each row of the matrix. The block structure of the // matrix is assumed to be sorted in order of the e_blocks/point @@ -331,16 +310,16 @@ void VisibilityBasedPreconditioner::ComputeBlockPairsInPreconditioner( void VisibilityBasedPreconditioner::InitEliminator( const CompressedRowBlockStructure& bs) { LinearSolver::Options eliminator_options; - eliminator_options.num_eliminate_blocks = options_.num_eliminate_blocks; + eliminator_options.elimination_groups = options_.elimination_groups; eliminator_options.num_threads = options_.num_threads; - DetectStructure(bs, options_.num_eliminate_blocks, + DetectStructure(bs, options_.elimination_groups[0], &eliminator_options.row_block_size, &eliminator_options.e_block_size, &eliminator_options.f_block_size); eliminator_.reset(SchurEliminatorBase::Create(eliminator_options)); - eliminator_->Init(options_.num_eliminate_blocks, &bs); + eliminator_->Init(options_.elimination_groups[0], &bs); } // Update the values of the preconditioner matrix and factorize it. @@ -364,13 +343,13 @@ bool VisibilityBasedPreconditioner::Update(const BlockSparseMatrixBase& A, // Compute a subset of the entries of the Schur complement. eliminator_->Eliminate(&A, b.data(), D, m_.get(), rhs.data()); - // Try factorizing the matrix. For SCHUR_JACOBI and CLUSTER_JACOBI, - // this should always succeed modulo some numerical/conditioning - // problems. For CLUSTER_TRIDIAGONAL, in general the preconditioner - // matrix as constructed is not positive definite. However, we will - // go ahead and try factorizing it. If it works, great, otherwise we - // scale all the cells in the preconditioner corresponding to the - // edges in the degree-2 forest and that guarantees positive + // Try factorizing the matrix. For CLUSTER_JACOBI, this should + // always succeed modulo some numerical/conditioning problems. For + // CLUSTER_TRIDIAGONAL, in general the preconditioner matrix as + // constructed is not positive definite. However, we will go ahead + // and try factorizing it. If it works, great, otherwise we scale + // all the cells in the preconditioner corresponding to the edges in + // the degree-2 forest and that guarantees positive // definiteness. The proof of this fact can be found in Lemma 1 in // "Visibility Based Preconditioning for Bundle Adjustment". // @@ -380,10 +359,10 @@ bool VisibilityBasedPreconditioner::Update(const BlockSparseMatrixBase& A, // The scaling only affects the tri-diagonal case, since // ScaleOffDiagonalBlocks only pays attenion to the cells that - // belong to the edges of the degree-2 forest. In the SCHUR_JACOBI - // and the CLUSTER_JACOBI cases, the preconditioner is guaranteed to - // be positive semidefinite. - if (!status && options_.preconditioner_type == CLUSTER_TRIDIAGONAL) { + // belong to the edges of the degree-2 forest. In the CLUSTER_JACOBI + // case, the preconditioner is guaranteed to be positive + // semidefinite. + if (!status && options_.type == CLUSTER_TRIDIAGONAL) { VLOG(1) << "Unscaled factorization failed. Retrying with off-diagonal " << "scaling"; ScaleOffDiagonalCells(); diff --git a/extern/libmv/third_party/ceres/internal/ceres/visibility_based_preconditioner.h b/extern/libmv/third_party/ceres/internal/ceres/visibility_based_preconditioner.h index 888c65eba3a..8a09c78d36a 100644 --- a/extern/libmv/third_party/ceres/internal/ceres/visibility_based_preconditioner.h +++ b/extern/libmv/third_party/ceres/internal/ceres/visibility_based_preconditioner.h @@ -29,25 +29,19 @@ // Author: sameeragarwal@google.com (Sameer Agarwal) // // Preconditioners for linear systems that arise in Structure from -// Motion problems. VisibilityBasedPreconditioner implements three -// preconditioners: +// Motion problems. VisibilityBasedPreconditioner implements: // -// SCHUR_JACOBI // CLUSTER_JACOBI // CLUSTER_TRIDIAGONAL // // Detailed descriptions of these preconditions beyond what is // documented here can be found in // -// Bundle Adjustment in the Large -// S. Agarwal, N. Snavely, S. Seitz & R. Szeliski, ECCV 2010 -// http://www.cs.washington.edu/homes/sagarwal/bal.pdf -// // Visibility Based Preconditioning for Bundle Adjustment // A. Kushal & S. Agarwal, submitted to CVPR 2012 // http://www.cs.washington.edu/homes/sagarwal/vbp.pdf // -// The three preconditioners share enough code that its most efficient +// The two preconditioners share enough code that its most efficient // to implement them as part of the same code base. #ifndef CERES_INTERNAL_VISIBILITY_BASED_PRECONDITIONER_H_ @@ -58,36 +52,26 @@ #include <utility> #include "ceres/collections_port.h" #include "ceres/graph.h" -#include "ceres/linear_solver.h" -#include "ceres/linear_operator.h" -#include "ceres/suitesparse.h" #include "ceres/internal/macros.h" #include "ceres/internal/scoped_ptr.h" +#include "ceres/preconditioner.h" +#include "ceres/suitesparse.h" namespace ceres { namespace internal { class BlockRandomAccessSparseMatrix; class BlockSparseMatrixBase; -class CompressedRowBlockStructure; +struct CompressedRowBlockStructure; class SchurEliminatorBase; -// This class implements three preconditioners for Structure from -// Motion/Bundle Adjustment problems. The name +// This class implements visibility based preconditioners for +// Structure from Motion/Bundle Adjustment problems. The name // VisibilityBasedPreconditioner comes from the fact that the sparsity // structure of the preconditioner matrix is determined by analyzing // the visibility structure of the scene, i.e. which cameras see which // points. // -// Strictly speaking, SCHUR_JACOBI is not a visibility based -// preconditioner but it is an extreme case of CLUSTER_JACOBI, where -// every cluster contains exactly one camera block. Treating it as a -// special case of CLUSTER_JACOBI makes it easy to implement as part -// of the same code base with no significant loss of performance. -// -// In the following, we will only discuss CLUSTER_JACOBI and -// CLUSTER_TRIDIAGONAL. -// // The key idea of visibility based preconditioning is to identify // cameras that we expect have strong interactions, and then using the // entries in the Schur complement matrix corresponding to these @@ -130,15 +114,15 @@ class SchurEliminatorBase; // // LinearSolver::Options options; // options.preconditioner_type = CLUSTER_JACOBI; -// options.num_eliminate_blocks = num_points; +// options.elimination_groups.push_back(num_points); +// options.elimination_groups.push_back(num_cameras); // VisibilityBasedPreconditioner preconditioner( // *A.block_structure(), options); // preconditioner.Update(A, NULL); // preconditioner.RightMultiply(x, y); // - #ifndef CERES_NO_SUITESPARSE -class VisibilityBasedPreconditioner : public LinearOperator { +class VisibilityBasedPreconditioner : public Preconditioner { public: // Initialize the symbolic structure of the preconditioner. bs is // the block structure of the linear system to be solved. It is used @@ -146,48 +130,17 @@ class VisibilityBasedPreconditioner : public LinearOperator { // // It has the same structural requirement as other Schur complement // based solvers. Please see schur_eliminator.h for more details. - // - // LinearSolver::Options::num_eliminate_blocks should be set to the - // number of e_blocks in the block structure. - // - // TODO(sameeragarwal): The use of LinearSolver::Options should - // ultimately be replaced with Preconditioner::Options and some sort - // of preconditioner factory along the lines of - // LinearSolver::CreateLinearSolver. I will wait to do this till I - // create a general purpose block Jacobi preconditioner for general - // sparse problems along with a CGLS solver. VisibilityBasedPreconditioner(const CompressedRowBlockStructure& bs, - const LinearSolver::Options& options); + const Preconditioner::Options& options); virtual ~VisibilityBasedPreconditioner(); - // Update the numerical value of the preconditioner for the linear - // system: - // - // | A | x = |b| - // |diag(D)| |0| - // - // for some vector b. It is important that the matrix A have the - // same block structure as the one used to construct this object. - // - // D can be NULL, in which case its interpreted as a diagonal matrix - // of size zero. - bool Update(const BlockSparseMatrixBase& A, const double* D); - - - // LinearOperator interface. Since the operator is symmetric, - // LeftMultiply and num_cols are just calls to RightMultiply and - // num_rows respectively. Update() must be called before - // RightMultiply can be called. + // Preconditioner interface + virtual bool Update(const BlockSparseMatrixBase& A, const double* D); virtual void RightMultiply(const double* x, double* y) const; - virtual void LeftMultiply(const double* x, double* y) const { - RightMultiply(x, y); - } virtual int num_rows() const; - virtual int num_cols() const { return num_rows(); } friend class VisibilityBasedPreconditionerTest; private: - void ComputeSchurJacobiSparsity(const CompressedRowBlockStructure& bs); void ComputeClusterJacobiSparsity(const CompressedRowBlockStructure& bs); void ComputeClusterTridiagonalSparsity(const CompressedRowBlockStructure& bs); void InitStorage(const CompressedRowBlockStructure& bs); @@ -207,7 +160,7 @@ class VisibilityBasedPreconditioner : public LinearOperator { bool IsBlockPairInPreconditioner(int block1, int block2) const; bool IsBlockPairOffDiagonal(int block1, int block2) const; - LinearSolver::Options options_; + Preconditioner::Options options_; // Number of parameter blocks in the schur complement. int num_blocks_; @@ -249,10 +202,10 @@ class VisibilityBasedPreconditioner : public LinearOperator { #else // SuiteSparse // If SuiteSparse is not compiled in, the preconditioner is not // available. -class VisibilityBasedPreconditioner : public LinearOperator { +class VisibilityBasedPreconditioner : public Preconditioner { public: VisibilityBasedPreconditioner(const CompressedRowBlockStructure& bs, - const LinearSolver::Options& options) { + const Preconditioner::Options& options) { LOG(FATAL) << "Visibility based preconditioning is not available. Please " "build Ceres with SuiteSparse."; } diff --git a/extern/libmv/third_party/ceres/internal/ceres/wall_time.cc b/extern/libmv/third_party/ceres/internal/ceres/wall_time.cc new file mode 100644 index 00000000000..e63d20c0ab1 --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/wall_time.cc @@ -0,0 +1,96 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2012 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: strandmark@google.com (Petter Strandmark) + +#include "ceres/wall_time.h" + +#ifdef CERES_USE_OPENMP +#include <omp.h> +#else +#include <ctime> +#endif + +#ifdef _WIN32 +#include <windows.h> +#else +#include <sys/time.h> +#endif + +namespace ceres { +namespace internal { + +double WallTimeInSeconds() { +#ifdef CERES_USE_OPENMP + return omp_get_wtime(); +#else +#ifdef _WIN32 + return static_cast<double>(std::time(NULL)); +#else + timeval time_val; + gettimeofday(&time_val, NULL); + return (time_val.tv_sec + time_val.tv_usec * 1e-6); +#endif +#endif +} + +EventLogger::EventLogger(const string& logger_name) + : start_time_(WallTimeInSeconds()), + last_event_time_(start_time_), + events_("") { + StringAppendF(&events_, + "\n%s\n Delta Cumulative\n", + logger_name.c_str()); +} + +EventLogger::~EventLogger() { + if (VLOG_IS_ON(3)) { + AddEvent("Total"); + VLOG(2) << "\n" << events_ << "\n"; + } +} + +void EventLogger::AddEvent(const string& event_name) { + if (!VLOG_IS_ON(3)) { + return; + } + + const double current_time = WallTimeInSeconds(); + const double relative_time_delta = current_time - last_event_time_; + const double absolute_time_delta = current_time - start_time_; + last_event_time_ = current_time; + + StringAppendF(&events_, + " %25s : %10.5f %10.5f\n", + event_name.c_str(), + relative_time_delta, + absolute_time_delta); +} + +} // namespace internal +} // namespace ceres diff --git a/extern/libmv/third_party/ceres/internal/ceres/wall_time.h b/extern/libmv/third_party/ceres/internal/ceres/wall_time.h new file mode 100644 index 00000000000..45f65ca1aa5 --- /dev/null +++ b/extern/libmv/third_party/ceres/internal/ceres/wall_time.h @@ -0,0 +1,88 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2012 Google Inc. All rights reserved. +// http://code.google.com/p/ceres-solver/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: strandmark@google.com (Petter Strandmark) + +#ifndef CERES_INTERNAL_WALL_TIME_H_ +#define CERES_INTERNAL_WALL_TIME_H_ + +#include <map> + +#include "ceres/internal/port.h" +#include "ceres/stringprintf.h" +#include "glog/logging.h" + +namespace ceres { +namespace internal { + +// Returns time, in seconds, from some arbitrary starting point. If +// OpenMP is available then the high precision openmp_get_wtime() +// function is used. Otherwise on unixes, gettimeofday is used. The +// granularity is in seconds on windows systems. +double WallTimeInSeconds(); + +// Log a series of events, recording for each event the time elapsed +// since the last event and since the creation of the object. +// +// The information is output to VLOG(3) upon destruction. A +// name::Total event is added as the final event right before +// destruction. +// +// Example usage: +// +// void Foo() { +// EventLogger event_logger("Foo"); +// Bar1(); +// event_logger.AddEvent("Bar1") +// Bar2(); +// event_logger.AddEvent("Bar2") +// Bar3(); +// } +// +// Will produce output that looks like +// +// Foo +// Bar1: time1 time1 +// Bar2: time2 time1 + time2; +// Total: time3 time1 + time2 + time3; +class EventLogger { + public: + explicit EventLogger(const string& logger_name); + ~EventLogger(); + void AddEvent(const string& event_name); + + private: + const double start_time_; + double last_event_time_; + string events_; +}; + +} // namespace internal +} // namespace ceres + +#endif // CERES_INTERNAL_WALL_TIME_H_ diff --git a/extern/libmv/third_party/ceres/patches/collections_port.h.mingw.patch b/extern/libmv/third_party/ceres/patches/collections_port.h.mingw.patch deleted file mode 100644 index c01a17c7992..00000000000 --- a/extern/libmv/third_party/ceres/patches/collections_port.h.mingw.patch +++ /dev/null @@ -1,13 +0,0 @@ -diff --git a/internal/ceres/collections_port.h b/internal/ceres/collections_port.h -index a356cc0..c2fce90 100644 ---- a/internal/ceres/collections_port.h -+++ b/internal/ceres/collections_port.h -@@ -77,7 +77,7 @@ struct HashMap : std::tr1::unordered_map<K, V> {}; - template<typename K> - struct HashSet : std::tr1::unordered_set<K> {}; - --#ifdef _WIN32 -+#if defined(_WIN32) && !defined(__MINGW64__) && !defined(__MINGW32__) - #define GG_LONGLONG(x) x##I64 - #define GG_ULONGLONG(x) x##UI64 - #else diff --git a/extern/libmv/third_party/ceres/patches/msvc_glog_fix.patch b/extern/libmv/third_party/ceres/patches/msvc_glog_fix.patch deleted file mode 100644 index f3200fb8e0a..00000000000 --- a/extern/libmv/third_party/ceres/patches/msvc_glog_fix.patch +++ /dev/null @@ -1,50 +0,0 @@ -diff --git a/internal/ceres/block_random_access_dense_matrix.cc b/internal/ceres/block_random_access_dense_matrix.cc -index aedfc74..0f95e89 100644 ---- a/internal/ceres/block_random_access_dense_matrix.cc -+++ b/internal/ceres/block_random_access_dense_matrix.cc -@@ -28,12 +28,12 @@ - // - // Author: sameeragarwal@google.com (Sameer Agarwal) - -+#include "glog/logging.h" - #include "ceres/block_random_access_dense_matrix.h" - - #include <vector> - #include "ceres/internal/eigen.h" - #include "ceres/internal/scoped_ptr.h" --#include "glog/logging.h" - - namespace ceres { - namespace internal { -diff --git a/internal/ceres/block_random_access_sparse_matrix.cc b/internal/ceres/block_random_access_sparse_matrix.cc -index f789436..9ed62ce 100644 ---- a/internal/ceres/block_random_access_sparse_matrix.cc -+++ b/internal/ceres/block_random_access_sparse_matrix.cc -@@ -28,6 +28,7 @@ - // - // Author: sameeragarwal@google.com (Sameer Agarwal) - -+#include "glog/logging.h" - #include "ceres/block_random_access_sparse_matrix.h" - - #include <algorithm> -@@ -39,7 +40,6 @@ - #include "ceres/mutex.h" - #include "ceres/triplet_sparse_matrix.h" - #include "ceres/types.h" --#include "glog/logging.h" - - namespace ceres { - namespace internal { -diff --git a/internal/ceres/schur_complement_solver.cc b/internal/ceres/schur_complement_solver.cc -index b9224d8..2cbe78d 100644 ---- a/internal/ceres/schur_complement_solver.cc -+++ b/internal/ceres/schur_complement_solver.cc -@@ -38,6 +38,7 @@ - #endif // CERES_NO_CXSPARSE - - #include "Eigen/Dense" -+#include "glog/logging.h" - #include "ceres/block_random_access_dense_matrix.h" - #include "ceres/block_random_access_matrix.h" - #include "ceres/block_random_access_sparse_matrix.h" diff --git a/extern/libmv/third_party/ceres/patches/no_previous_declaration_fix.patch b/extern/libmv/third_party/ceres/patches/no_previous_declaration_fix.patch deleted file mode 100644 index 03f1c500d9a..00000000000 --- a/extern/libmv/third_party/ceres/patches/no_previous_declaration_fix.patch +++ /dev/null @@ -1,199 +0,0 @@ -diff --git a/internal/ceres/file.cc b/internal/ceres/file.cc -index 387f359..6fe7557 100644 ---- a/internal/ceres/file.cc -+++ b/internal/ceres/file.cc -@@ -31,6 +31,7 @@ - // Really simple file IO. - - #include <cstdio> -+#include "file.h" - #include "glog/logging.h" - - namespace ceres { -diff --git a/internal/ceres/linear_least_squares_problems.cc b/internal/ceres/linear_least_squares_problems.cc -index 3e3bcd0..a91e254 100644 ---- a/internal/ceres/linear_least_squares_problems.cc -+++ b/internal/ceres/linear_least_squares_problems.cc -@@ -573,13 +573,13 @@ LinearLeastSquaresProblem* LinearLeastSquaresProblem3() { - return problem; - } - --bool DumpLinearLeastSquaresProblemToConsole(const string& directory, -- int iteration, -- const SparseMatrix* A, -- const double* D, -- const double* b, -- const double* x, -- int num_eliminate_blocks) { -+static bool DumpLinearLeastSquaresProblemToConsole(const string& directory, -+ int iteration, -+ const SparseMatrix* A, -+ const double* D, -+ const double* b, -+ const double* x, -+ int num_eliminate_blocks) { - CHECK_NOTNULL(A); - Matrix AA; - A->ToDenseMatrix(&AA); -@@ -601,13 +601,13 @@ bool DumpLinearLeastSquaresProblemToConsole(const string& directory, - }; - - #ifndef CERES_NO_PROTOCOL_BUFFERS --bool DumpLinearLeastSquaresProblemToProtocolBuffer(const string& directory, -- int iteration, -- const SparseMatrix* A, -- const double* D, -- const double* b, -- const double* x, -- int num_eliminate_blocks) { -+static bool DumpLinearLeastSquaresProblemToProtocolBuffer(const string& directory, -+ int iteration, -+ const SparseMatrix* A, -+ const double* D, -+ const double* b, -+ const double* x, -+ int num_eliminate_blocks) { - CHECK_NOTNULL(A); - LinearLeastSquaresProblemProto lsqp; - A->ToProto(lsqp.mutable_a()); -@@ -641,13 +641,13 @@ bool DumpLinearLeastSquaresProblemToProtocolBuffer(const string& directory, - return true; - } - #else --bool DumpLinearLeastSquaresProblemToProtocolBuffer(const string& directory, -- int iteration, -- const SparseMatrix* A, -- const double* D, -- const double* b, -- const double* x, -- int num_eliminate_blocks) { -+static bool DumpLinearLeastSquaresProblemToProtocolBuffer(const string& directory, -+ int iteration, -+ const SparseMatrix* A, -+ const double* D, -+ const double* b, -+ const double* x, -+ int num_eliminate_blocks) { - LOG(ERROR) << "Dumping least squares problems is only " - << "supported when Ceres is compiled with " - << "protocol buffer support."; -@@ -655,9 +655,9 @@ bool DumpLinearLeastSquaresProblemToProtocolBuffer(const string& directory, - } - #endif - --void WriteArrayToFileOrDie(const string& filename, -- const double* x, -- const int size) { -+static void WriteArrayToFileOrDie(const string& filename, -+ const double* x, -+ const int size) { - CHECK_NOTNULL(x); - VLOG(2) << "Writing array to: " << filename; - FILE* fptr = fopen(filename.c_str(), "w"); -@@ -668,13 +668,13 @@ void WriteArrayToFileOrDie(const string& filename, - fclose(fptr); - } - --bool DumpLinearLeastSquaresProblemToTextFile(const string& directory, -- int iteration, -- const SparseMatrix* A, -- const double* D, -- const double* b, -- const double* x, -- int num_eliminate_blocks) { -+static bool DumpLinearLeastSquaresProblemToTextFile(const string& directory, -+ int iteration, -+ const SparseMatrix* A, -+ const double* D, -+ const double* b, -+ const double* x, -+ int num_eliminate_blocks) { - CHECK_NOTNULL(A); - string format_string = JoinPath(directory, - "lm_iteration_%03d"); -diff --git a/internal/ceres/residual_block_utils.cc b/internal/ceres/residual_block_utils.cc -index ff18e21..9442bb2 100644 ---- a/internal/ceres/residual_block_utils.cc -+++ b/internal/ceres/residual_block_utils.cc -@@ -63,7 +63,7 @@ void InvalidateEvaluation(const ResidualBlock& block, - - // Utility routine to print an array of doubles to a string. If the - // array pointer is NULL, it is treated as an array of zeros. --void AppendArrayToString(const int size, const double* x, string* result) { -+static void AppendArrayToString(const int size, const double* x, string* result) { - for (int i = 0; i < size; ++i) { - if (x == NULL) { - StringAppendF(result, "Not Computed "); -diff --git a/internal/ceres/solver_impl.cc b/internal/ceres/solver_impl.cc -index 2802a75..8ef5b98 100644 ---- a/internal/ceres/solver_impl.cc -+++ b/internal/ceres/solver_impl.cc -@@ -685,8 +685,8 @@ bool SolverImpl::ApplyUserOrdering(const ProblemImpl& problem_impl, - // Find the minimum index of any parameter block to the given residual. - // Parameter blocks that have indices greater than num_eliminate_blocks are - // considered to have an index equal to num_eliminate_blocks. --int MinParameterBlock(const ResidualBlock* residual_block, -- int num_eliminate_blocks) { -+static int MinParameterBlock(const ResidualBlock* residual_block, -+ int num_eliminate_blocks) { - int min_parameter_block_position = num_eliminate_blocks; - for (int i = 0; i < residual_block->NumParameterBlocks(); ++i) { - ParameterBlock* parameter_block = residual_block->parameter_blocks()[i]; -diff --git a/internal/ceres/split.cc b/internal/ceres/split.cc -index 4fa1bd4..c65c8a5 100644 ---- a/internal/ceres/split.cc -+++ b/internal/ceres/split.cc -@@ -31,6 +31,7 @@ - #include <string> - #include <vector> - #include <iterator> -+#include "ceres/split.h" - #include "ceres/internal/port.h" - - namespace ceres { -diff --git a/internal/ceres/stringprintf.cc b/internal/ceres/stringprintf.cc -index c0f3522..396a48b 100644 ---- a/internal/ceres/stringprintf.cc -+++ b/internal/ceres/stringprintf.cc -@@ -34,6 +34,7 @@ - #include <string> - #include <vector> - -+#include "ceres/stringprintf.h" - #include "ceres/internal/port.h" - - namespace ceres { -diff --git a/internal/ceres/types.cc b/internal/ceres/types.cc -index 2e950c5..05e573f 100644 ---- a/internal/ceres/types.cc -+++ b/internal/ceres/types.cc -@@ -98,7 +98,8 @@ const char* SolverTerminationTypeToString( - } - } - --const char* SparseLinearAlgebraTypeToString( -+#if 0 /* UNUSED */ -+static const char* SparseLinearAlgebraTypeToString( - SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type) { - switch (sparse_linear_algebra_library_type) { - CASESTR(CX_SPARSE); -@@ -107,6 +108,7 @@ const char* SparseLinearAlgebraTypeToString( - return "UNKNOWN"; - } - } -+#endif - - const char* TrustRegionStrategyTypeToString( - TrustRegionStrategyType trust_region_strategy_type) { -diff --git a/internal/ceres/visibility.cc b/internal/ceres/visibility.cc -index 9d80654..564cc54 100644 ---- a/internal/ceres/visibility.cc -+++ b/internal/ceres/visibility.cc -@@ -36,6 +36,7 @@ - #include <utility> - #include "ceres/block_structure.h" - #include "ceres/collections_port.h" -+#include "ceres/visibility.h" - #include "ceres/graph.h" - #include "glog/logging.h" - diff --git a/extern/libmv/third_party/ceres/patches/series b/extern/libmv/third_party/ceres/patches/series index a6874318923..e69de29bb2d 100644 --- a/extern/libmv/third_party/ceres/patches/series +++ b/extern/libmv/third_party/ceres/patches/series @@ -1,3 +0,0 @@ -collections_port.h.mingw.patch -msvc_glog_fix.patch -no_previous_declaration_fix.patch
\ No newline at end of file |