Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/extern
diff options
context:
space:
mode:
authorTamito Kajiyama <rd6t-kjym@asahi-net.or.jp>2012-09-23 22:50:56 +0400
committerTamito Kajiyama <rd6t-kjym@asahi-net.or.jp>2012-09-23 22:50:56 +0400
commita42ba82f638e481d7fd3c3ed2ba05c331ef6717e (patch)
tree81a2b9221799707b49b74a5df8bb3ff964ba78bd /extern
parent7afbdff1b6c348227e652e1c3071ab7ba7c91c44 (diff)
parenta73dd3476e7d180d3320afc04d218ce22f2f3bfc (diff)
Merged changes in the trunk up to revision 50829.
Conflicts resolved: source/blender/blenloader/intern/readfile.c source/blender/render/intern/source/convertblender.c source/blender/render/intern/source/pipeline.c Also addressed code inconsistency due to changes in the trunk revision 50628 (color management with OCIO) and 50806 (UV project material). OCIO-related changes are marked OCIO_TODO as in some other files modified in revision 50628.
Diffstat (limited to 'extern')
-rw-r--r--extern/libmv/CMakeLists.txt13
-rw-r--r--extern/libmv/libmv-capi.cpp4
-rw-r--r--extern/libmv/libmv-capi.h8
-rw-r--r--extern/libmv/libmv/multiview/euclidean_resection.cc31
-rw-r--r--extern/libmv/libmv/multiview/euclidean_resection.h3
-rw-r--r--extern/libmv/libmv/multiview/fundamental.cc4
-rw-r--r--extern/libmv/libmv/multiview/homography.cc2
-rw-r--r--extern/libmv/libmv/numeric/levenberg_marquardt.h10
-rw-r--r--extern/libmv/libmv/simple_pipeline/detect.cc2
-rw-r--r--extern/libmv/libmv/simple_pipeline/initialize_reconstruction.cc1
-rw-r--r--extern/libmv/libmv/simple_pipeline/intersect.cc1
-rw-r--r--extern/libmv/libmv/simple_pipeline/pipeline.cc4
-rw-r--r--extern/libmv/libmv/simple_pipeline/resect.cc5
-rw-r--r--extern/libmv/libmv/tracking/track_region.cc213
-rw-r--r--extern/libmv/libmv/tracking/track_region.h6
-rw-r--r--extern/libmv/third_party/ceres/CMakeLists.txt48
-rw-r--r--extern/libmv/third_party/ceres/ChangeLog608
-rw-r--r--extern/libmv/third_party/ceres/SConscript6
-rwxr-xr-xextern/libmv/third_party/ceres/bundle.sh55
-rw-r--r--extern/libmv/third_party/ceres/files.txt22
-rw-r--r--extern/libmv/third_party/ceres/include/ceres/autodiff_cost_function.h4
-rw-r--r--extern/libmv/third_party/ceres/include/ceres/cost_function.h2
-rw-r--r--extern/libmv/third_party/ceres/include/ceres/crs_matrix.h65
-rw-r--r--extern/libmv/third_party/ceres/include/ceres/fpclassify.h88
-rw-r--r--extern/libmv/third_party/ceres/include/ceres/internal/fixed_array.h3
-rw-r--r--extern/libmv/third_party/ceres/include/ceres/internal/macros.h31
-rw-r--r--extern/libmv/third_party/ceres/include/ceres/internal/manual_constructor.h71
-rw-r--r--extern/libmv/third_party/ceres/include/ceres/internal/port.h6
-rw-r--r--extern/libmv/third_party/ceres/include/ceres/iteration_callback.h36
-rw-r--r--extern/libmv/third_party/ceres/include/ceres/jet.h163
-rw-r--r--extern/libmv/third_party/ceres/include/ceres/loss_function.h79
-rw-r--r--extern/libmv/third_party/ceres/include/ceres/numeric_diff_cost_function.h6
-rw-r--r--extern/libmv/third_party/ceres/include/ceres/problem.h6
-rw-r--r--extern/libmv/third_party/ceres/include/ceres/rotation.h62
-rw-r--r--extern/libmv/third_party/ceres/include/ceres/solver.h229
-rw-r--r--extern/libmv/third_party/ceres/include/ceres/types.h68
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/array_utils.cc (renamed from extern/libmv/third_party/ceres/internal/ceres/levenberg_marquardt.h)54
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/array_utils.h65
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/block_evaluate_preparer.cc16
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/block_evaluate_preparer.h16
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/block_jacobi_preconditioner.cc9
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/block_jacobian_writer.cc5
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/block_random_access_dense_matrix.cc2
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/block_random_access_dense_matrix.h2
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/block_random_access_matrix.h2
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/block_random_access_sparse_matrix.cc6
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/block_random_access_sparse_matrix.h8
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/block_sparse_matrix.cc8
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/block_sparse_matrix.h8
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/block_structure.cc2
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/canonical_views_clustering.cc8
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/cgnr_solver.h2
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/collections_port.h50
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/compressed_row_jacobian_writer.cc18
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/compressed_row_sparse_matrix.cc28
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/compressed_row_sparse_matrix.h28
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/conditioned_cost_function.cc4
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/conjugate_gradients_solver.cc20
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/conjugate_gradients_solver.h2
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/corrector.cc2
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/cxsparse.cc130
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/cxsparse.h90
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/dense_normal_cholesky_solver.cc86
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/dense_normal_cholesky_solver.h95
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/dense_qr_solver.cc4
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/dense_qr_solver.h4
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/dense_sparse_matrix.cc6
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/dense_sparse_matrix.h4
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/detect_structure.cc14
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/detect_structure.h2
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/dogleg_strategy.cc691
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/dogleg_strategy.h163
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/evaluator.cc82
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/evaluator.h31
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/file.cc5
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/generate_eliminator_specialization.py186
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/gradient_checking_cost_function.cc10
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/graph.h2
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/implicit_schur_complement.cc23
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/implicit_schur_complement.h10
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/iterative_schur_complement_solver.cc11
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/levenberg_marquardt.cc574
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/levenberg_marquardt_strategy.cc144
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/levenberg_marquardt_strategy.h86
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/linear_least_squares_problems.cc101
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/linear_solver.cc30
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/linear_solver.h26
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/local_parameterization.cc5
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/loss_function.cc64
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/matrix_proto.h2
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/minimizer.h68
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/mutex.h98
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/normal_prior.cc3
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/parameter_block.h19
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/partitioned_matrix_view.cc2
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/polynomial_solver.cc184
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/polynomial_solver.h65
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/problem_impl.cc9
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/problem_impl.h2
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/program.cc77
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/program.h21
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/program_evaluator.h84
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/random.h29
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/residual_block.cc7
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/residual_block_utils.cc41
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/residual_block_utils.h9
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/runtime_numeric_diff_cost_function.cc3
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/schur_complement_solver.cc159
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/schur_complement_solver.h38
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/schur_eliminator_impl.h38
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/schur_ordering.cc18
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/solver.cc52
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/solver_impl.cc296
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/solver_impl.h14
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/sparse_matrix.h2
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/sparse_normal_cholesky_solver.cc166
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/sparse_normal_cholesky_solver.h36
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/split.cc1
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/split.h21
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/stringprintf.cc1
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/stringprintf.h14
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/suitesparse.cc157
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/suitesparse.h82
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/triplet_sparse_matrix.cc10
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/triplet_sparse_matrix.h4
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/trust_region_minimizer.cc550
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/trust_region_minimizer.h67
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/trust_region_strategy.cc27
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/trust_region_strategy.h148
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/types.cc35
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/visibility.cc4
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/visibility_based_preconditioner.cc36
-rw-r--r--extern/libmv/third_party/ceres/internal/ceres/visibility_based_preconditioner.h14
-rw-r--r--extern/libmv/third_party/ceres/patches/collections_port.h.mingw.patch12
-rw-r--r--extern/libmv/third_party/ceres/patches/msvc_glog_fix.patch50
-rw-r--r--extern/libmv/third_party/ceres/patches/msvc_isfinite.patch15
-rw-r--r--extern/libmv/third_party/ceres/patches/no_previous_declaration_fix.patch199
-rw-r--r--extern/libmv/third_party/ceres/patches/series4
138 files changed, 6185 insertions, 1817 deletions
diff --git a/extern/libmv/CMakeLists.txt b/extern/libmv/CMakeLists.txt
index 60cd84d89d4..602372f6ff7 100644
--- a/extern/libmv/CMakeLists.txt
+++ b/extern/libmv/CMakeLists.txt
@@ -40,19 +40,6 @@ set(INC_SYS
${ZLIB_INCLUDE_DIRS}
)
-
-# XXX - FIXME
-# this is a momentary hack to find unwind.h in 10.6.sdk
-if(APPLE)
- if(${CMAKE_OSX_DEPLOYMENT_TARGET} STREQUAL "10.6")
- list(APPEND INC_SYS
- ${CMAKE_OSX_SYSROOT}/Developer/usr/llvm-gcc-4.2/lib/gcc/i686-apple-darwin10/4.2.1/include
- )
- endif()
-endif()
-# XXX - END
-
-
set(SRC
libmv-capi.cpp
libmv/image/array_nd.cc
diff --git a/extern/libmv/libmv-capi.cpp b/extern/libmv/libmv-capi.cpp
index 3d3b7398c9b..0f73234f49c 100644
--- a/extern/libmv/libmv-capi.cpp
+++ b/extern/libmv/libmv-capi.cpp
@@ -523,7 +523,7 @@ int libmv_refineParametersAreValid(int parameters) {
LIBMV_REFINE_RADIAL_DISTORTION_K1));
}
-void libmv_solveRefineIntrinsics(libmv::Tracks *tracks, libmv::CameraIntrinsics *intrinsics,
+static void libmv_solveRefineIntrinsics(libmv::Tracks *tracks, libmv::CameraIntrinsics *intrinsics,
libmv::EuclideanReconstruction *reconstruction, int refine_intrinsics,
reconstruct_progress_update_cb progress_update_callback, void *callback_customdata)
{
@@ -1027,7 +1027,7 @@ void libmv_InvertIntrinsics(double focal_length, double principal_x, double prin
/* ************ point clouds ************ */
-void libmvTransformToMat4(libmv::Mat3 &R, libmv::Vec3 &S, libmv::Vec3 &t, double M[4][4])
+static void libmvTransformToMat4(libmv::Mat3 &R, libmv::Vec3 &S, libmv::Vec3 &t, double M[4][4])
{
for (int j = 0; j < 3; ++j)
for (int k = 0; k < 3; ++k)
diff --git a/extern/libmv/libmv-capi.h b/extern/libmv/libmv-capi.h
index fc3b6f94f62..5253ba76a21 100644
--- a/extern/libmv/libmv-capi.h
+++ b/extern/libmv/libmv-capi.h
@@ -147,14 +147,14 @@ void libmv_CameraIntrinsicsDistortFloat(struct libmv_CameraIntrinsics *libmvIntr
/* dsitortion */
void libmv_undistortByte(double focal_length, double principal_x, double principal_y, double k1, double k2, double k3,
- unsigned char *src, unsigned char *dst, int width, int height, int channels);
+ unsigned char *src, unsigned char *dst, int width, int height, float overscan, int channels);
void libmv_undistortFloat(double focal_length, double principal_x, double principal_y, double k1, double k2, double k3,
- float *src, float *dst, int width, int height, int channels);
+ float *src, float *dst, int width, int height, float overscan, int channels);
void libmv_distortByte(double focal_length, double principal_x, double principal_y, double k1, double k2, double k3,
- unsigned char *src, unsigned char *dst, int width, int height, int channels);
+ unsigned char *src, unsigned char *dst, int width, int height, float overscan, int channels);
void libmv_distortFloat(double focal_length, double principal_x, double principal_y, double k1, double k2, double k3,
- float *src, float *dst, int width, int height, int channels);
+ float *src, float *dst, int width, int height, float overscan, int channels);
/* utils */
void libmv_applyCameraIntrinsics(double focal_length, double principal_x, double principal_y, double k1, double k2, double k3,
diff --git a/extern/libmv/libmv/multiview/euclidean_resection.cc b/extern/libmv/libmv/multiview/euclidean_resection.cc
index 92862515d7e..6cad4cac049 100644
--- a/extern/libmv/libmv/multiview/euclidean_resection.cc
+++ b/extern/libmv/libmv/multiview/euclidean_resection.cc
@@ -351,9 +351,9 @@ void EuclideanResectionAnsarDaniilidis(const Mat2X &x_camera,
}
// Selects 4 virtual control points using mean and PCA.
-void SelectControlPoints(const Mat3X &X_world,
- Mat *X_centered,
- Mat34 *X_control_points) {
+static void SelectControlPoints(const Mat3X &X_world,
+ Mat *X_centered,
+ Mat34 *X_control_points) {
size_t num_points = X_world.cols();
// The first virtual control point, C0, is the centroid.
@@ -377,9 +377,9 @@ void SelectControlPoints(const Mat3X &X_world,
}
// Computes the barycentric coordinates for all real points
-void ComputeBarycentricCoordinates(const Mat3X &X_world_centered,
- const Mat34 &X_control_points,
- Mat4X *alphas) {
+static void ComputeBarycentricCoordinates(const Mat3X &X_world_centered,
+ const Mat34 &X_control_points,
+ Mat4X *alphas) {
size_t num_points = X_world_centered.cols();
Mat3 C2 ;
for (size_t c = 1; c < 4; c++) {
@@ -398,7 +398,7 @@ void ComputeBarycentricCoordinates(const Mat3X &X_world_centered,
}
// Estimates the coordinates of all real points in the camera coordinate frame
-void ComputePointsCoordinatesInCameraFrame(
+static void ComputePointsCoordinatesInCameraFrame(
const Mat4X &alphas,
const Vec4 &betas,
const Eigen::Matrix<double, 12, 12> &U,
@@ -535,7 +535,16 @@ bool EuclideanResectionEPnP(const Mat2X &x_camera,
vector<Vec3> ts(3);
Vec rmse(3);
- // TODO(julien): Document where the "1e-3" magical constant comes from below.
+ // At one point this threshold was 1e-3, and caused no end of problems in
+ // Blender by causing frames to not resect when they would have worked fine.
+ // When the resect failed, the projective followup is run leading to worse
+ // results, and often the dreaded "flipping" where objects get flipped
+ // between frames. Instead, disable the check for now, always succeeding. The
+ // ultimate check is always reprojection error anyway.
+ //
+ // TODO(keir): Decide if setting this to infinity, effectively disabling the
+ // check, is the right approach. So far this seems the case.
+ double kSuccessThreshold = std::numeric_limits<double>::max();
// Find the first possible solution for R, t corresponding to:
// Betas = [b00 b01 b11 b02 b12 b22 b03 b13 b23 b33]
@@ -548,7 +557,7 @@ bool EuclideanResectionEPnP(const Mat2X &x_camera,
Eigen::JacobiSVD<Mat> svd_of_l4(l_6x4,
Eigen::ComputeFullU | Eigen::ComputeFullV);
Vec4 b4 = svd_of_l4.solve(rho);
- if ((l_6x4 * b4).isApprox(rho, 1e-3)) {
+ if ((l_6x4 * b4).isApprox(rho, kSuccessThreshold)) {
if (b4(0) < 0) {
b4 = -b4;
}
@@ -574,7 +583,7 @@ bool EuclideanResectionEPnP(const Mat2X &x_camera,
Vec3 b3 = svdOfL3.solve(rho);
VLOG(2) << " rho = " << rho;
VLOG(2) << " l_6x3 * b3 = " << l_6x3 * b3;
- if ((l_6x3 * b3).isApprox(rho, 1e-3)) {
+ if ((l_6x3 * b3).isApprox(rho, kSuccessThreshold)) {
if (b3(0) < 0) {
betas(0) = std::sqrt(-b3(0));
betas(1) = (b3(2) < 0) ? std::sqrt(-b3(2)) : 0;
@@ -605,7 +614,7 @@ bool EuclideanResectionEPnP(const Mat2X &x_camera,
Eigen::JacobiSVD<Mat> svdOfL5(l_6x5,
Eigen::ComputeFullU | Eigen::ComputeFullV);
Vec5 b5 = svdOfL5.solve(rho);
- if ((l_6x5 * b5).isApprox(rho, 1e-3)) {
+ if ((l_6x5 * b5).isApprox(rho, kSuccessThreshold)) {
if (b5(0) < 0) {
betas(0) = std::sqrt(-b5(0));
if (b5(2) < 0) {
diff --git a/extern/libmv/libmv/multiview/euclidean_resection.h b/extern/libmv/libmv/multiview/euclidean_resection.h
index 08fa3d90bd3..f20513d3fbe 100644
--- a/extern/libmv/libmv/multiview/euclidean_resection.h
+++ b/extern/libmv/libmv/multiview/euclidean_resection.h
@@ -29,6 +29,9 @@ namespace euclidean_resection {
enum ResectionMethod {
RESECTION_ANSAR_DANIILIDIS,
+
+ // The "EPnP" algorithm by Lepetit et al.
+ // http://cvlab.epfl.ch/~lepetit/papers/lepetit_ijcv08.pdf
RESECTION_EPNP,
};
diff --git a/extern/libmv/libmv/multiview/fundamental.cc b/extern/libmv/libmv/multiview/fundamental.cc
index 7a6b4a08439..80f155e804d 100644
--- a/extern/libmv/libmv/multiview/fundamental.cc
+++ b/extern/libmv/libmv/multiview/fundamental.cc
@@ -28,7 +28,7 @@
namespace libmv {
-void EliminateRow(const Mat34 &P, int row, Mat *X) {
+static void EliminateRow(const Mat34 &P, int row, Mat *X) {
X->resize(2, 4);
int first_row = (row + 1) % 3;
@@ -69,7 +69,7 @@ void FundamentalFromProjections(const Mat34 &P1, const Mat34 &P2, Mat3 *F) {
// HZ 11.1 pag.279 (x1 = x, x2 = x')
// http://www.cs.unc.edu/~marc/tutorial/node54.html
-double EightPointSolver(const Mat &x1, const Mat &x2, Mat3 *F) {
+static double EightPointSolver(const Mat &x1, const Mat &x2, Mat3 *F) {
DCHECK_EQ(x1.rows(), 2);
DCHECK_GE(x1.cols(), 8);
DCHECK_EQ(x1.rows(), x2.rows());
diff --git a/extern/libmv/libmv/multiview/homography.cc b/extern/libmv/libmv/multiview/homography.cc
index 366392f3923..b5c483998d8 100644
--- a/extern/libmv/libmv/multiview/homography.cc
+++ b/extern/libmv/libmv/multiview/homography.cc
@@ -40,7 +40,7 @@ namespace libmv {
* (a-x1*g)*y1 + (b-x1*h)*y2 + c-x1 = |0|
* (-x2*a+x1*d)*y1 + (-x2*b+x1*e)*y2 + -x2*c+x1*f |0|
*/
-bool Homography2DFromCorrespondencesLinearEuc(
+static bool Homography2DFromCorrespondencesLinearEuc(
const Mat &x1,
const Mat &x2,
Mat3 *H,
diff --git a/extern/libmv/libmv/numeric/levenberg_marquardt.h b/extern/libmv/libmv/numeric/levenberg_marquardt.h
index 4473b72f156..a7877e0270b 100644
--- a/extern/libmv/libmv/numeric/levenberg_marquardt.h
+++ b/extern/libmv/libmv/numeric/levenberg_marquardt.h
@@ -124,11 +124,11 @@ class LevenbergMarquardt {
Parameters dx, x_new;
int i;
for (i = 0; results.status == RUNNING && i < params.max_iterations; ++i) {
- VLOG(1) << "iteration: " << i;
- VLOG(1) << "||f(x)||: " << f_(x).norm();
- VLOG(1) << "max(g): " << g.array().abs().maxCoeff();
- VLOG(1) << "u: " << u;
- VLOG(1) << "v: " << v;
+ VLOG(3) << "iteration: " << i;
+ VLOG(3) << "||f(x)||: " << f_(x).norm();
+ VLOG(3) << "max(g): " << g.array().abs().maxCoeff();
+ VLOG(3) << "u: " << u;
+ VLOG(3) << "v: " << v;
AMatrixType A_augmented = A + u*AMatrixType::Identity(J.cols(), J.cols());
Solver solver(A_augmented);
diff --git a/extern/libmv/libmv/simple_pipeline/detect.cc b/extern/libmv/libmv/simple_pipeline/detect.cc
index 8a093dadeca..9e3edf32d71 100644
--- a/extern/libmv/libmv/simple_pipeline/detect.cc
+++ b/extern/libmv/libmv/simple_pipeline/detect.cc
@@ -35,7 +35,7 @@ namespace libmv {
typedef unsigned int uint;
-int featurecmp(const void *a_v, const void *b_v)
+static int featurecmp(const void *a_v, const void *b_v)
{
Feature *a = (Feature*)a_v;
Feature *b = (Feature*)b_v;
diff --git a/extern/libmv/libmv/simple_pipeline/initialize_reconstruction.cc b/extern/libmv/libmv/simple_pipeline/initialize_reconstruction.cc
index 77fe2a530c4..9c06d1ef4e6 100644
--- a/extern/libmv/libmv/simple_pipeline/initialize_reconstruction.cc
+++ b/extern/libmv/libmv/simple_pipeline/initialize_reconstruction.cc
@@ -24,6 +24,7 @@
#include "libmv/multiview/projection.h"
#include "libmv/numeric/levenberg_marquardt.h"
#include "libmv/numeric/numeric.h"
+#include "libmv/simple_pipeline/initialize_reconstruction.h"
#include "libmv/simple_pipeline/reconstruction.h"
#include "libmv/simple_pipeline/tracks.h"
diff --git a/extern/libmv/libmv/simple_pipeline/intersect.cc b/extern/libmv/libmv/simple_pipeline/intersect.cc
index b1518e04651..0c2f744dc1c 100644
--- a/extern/libmv/libmv/simple_pipeline/intersect.cc
+++ b/extern/libmv/libmv/simple_pipeline/intersect.cc
@@ -26,6 +26,7 @@
#include "libmv/multiview/projection.h"
#include "libmv/numeric/numeric.h"
#include "libmv/numeric/levenberg_marquardt.h"
+#include "libmv/simple_pipeline/intersect.h"
#include "libmv/simple_pipeline/reconstruction.h"
#include "libmv/simple_pipeline/tracks.h"
diff --git a/extern/libmv/libmv/simple_pipeline/pipeline.cc b/extern/libmv/libmv/simple_pipeline/pipeline.cc
index 2459d059114..4773a70d69b 100644
--- a/extern/libmv/libmv/simple_pipeline/pipeline.cc
+++ b/extern/libmv/libmv/simple_pipeline/pipeline.cc
@@ -242,9 +242,9 @@ void InternalCompleteReconstruction(
(double)tot_resects/(max_image));
if (PipelineRoutines::Resect(reconstructed_markers, reconstruction, true)) {
num_resects++;
- LG << "Ran Resect() for image " << image;
+ LG << "Ran final Resect() for image " << image;
} else {
- LG << "Failed Resect() for image " << image;
+ LG << "Failed final Resect() for image " << image;
}
}
}
diff --git a/extern/libmv/libmv/simple_pipeline/resect.cc b/extern/libmv/libmv/simple_pipeline/resect.cc
index b30d959b512..8737182dd43 100644
--- a/extern/libmv/libmv/simple_pipeline/resect.cc
+++ b/extern/libmv/libmv/simple_pipeline/resect.cc
@@ -27,6 +27,7 @@
#include "libmv/multiview/projection.h"
#include "libmv/numeric/numeric.h"
#include "libmv/numeric/levenberg_marquardt.h"
+#include "libmv/simple_pipeline/resect.h"
#include "libmv/simple_pipeline/reconstruction.h"
#include "libmv/simple_pipeline/tracks.h"
@@ -107,6 +108,10 @@ bool EuclideanResect(const vector<Marker> &markers,
// printf("Resection for image %d failed\n", markers[0].image);
LG << "Resection for image " << markers[0].image << " failed;"
<< " trying fallback projective resection.";
+
+ LG << "No fallback; failing resection for " << markers[0].image;
+ return false;
+
if (!final_pass) return false;
// Euclidean resection failed. Fall back to projective resection, which is
// less reliable but better conditioned when there are many points.
diff --git a/extern/libmv/libmv/tracking/track_region.cc b/extern/libmv/libmv/tracking/track_region.cc
index f52919b2a61..472d58a1547 100644
--- a/extern/libmv/libmv/tracking/track_region.cc
+++ b/extern/libmv/libmv/tracking/track_region.cc
@@ -18,7 +18,7 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
//
-// Author: mierle@google.com (Keir Mierle)
+// Author: mierle@gmail.com (Keir Mierle)
//
// TODO(keir): While this tracking code works rather well, it has some
// outragous inefficiencies. There is probably a 5-10x speedup to be had if a
@@ -41,6 +41,85 @@
#include "libmv/multiview/homography.h"
#include "libmv/numeric/numeric.h"
+// Expand the Jet functionality of Ceres to allow mixed numeric/autodiff.
+//
+// TODO(keir): Push this (or something similar) into upstream Ceres.
+namespace ceres {
+
+// A jet traits class to make it easier to work with mixed auto / numeric diff.
+template<typename T>
+struct JetOps {
+ static bool IsScalar() {
+ return true;
+ }
+ static T GetScalar(const T& t) {
+ return t;
+ }
+ static void SetScalar(const T& scalar, T* t) {
+ *t = scalar;
+ }
+ static void ScaleDerivative(double scale_by, T *value) {
+ // For double, there is no derivative to scale.
+ }
+};
+
+template<typename T, int N>
+struct JetOps<Jet<T, N> > {
+ static bool IsScalar() {
+ return false;
+ }
+ static T GetScalar(const Jet<T, N>& t) {
+ return t.a;
+ }
+ static void SetScalar(const T& scalar, Jet<T, N>* t) {
+ t->a = scalar;
+ }
+ static void ScaleDerivative(double scale_by, Jet<T, N> *value) {
+ value->v *= scale_by;
+ }
+};
+
+template<typename FunctionType, int kNumArgs, typename ArgumentType>
+struct Chain {
+ static ArgumentType Rule(const FunctionType &f,
+ const FunctionType dfdx[kNumArgs],
+ const ArgumentType x[kNumArgs]) {
+ // In the default case of scalars, there's nothing to do since there are no
+ // derivatives to propagate.
+ return f;
+ }
+};
+
+// XXX Add documentation here!
+template<typename FunctionType, int kNumArgs, typename T, int N>
+struct Chain<FunctionType, kNumArgs, Jet<T, N> > {
+ static Jet<T, N> Rule(const FunctionType &f,
+ const FunctionType dfdx[kNumArgs],
+ const Jet<T, N> x[kNumArgs]) {
+ // x is itself a function of another variable ("z"); what this function
+ // needs to return is "f", but with the derivative with respect to z
+ // attached to the jet. So combine the derivative part of x's jets to form
+ // a Jacobian matrix between x and z (i.e. dx/dz).
+ Eigen::Matrix<T, kNumArgs, N> dxdz;
+ for (int i = 0; i < kNumArgs; ++i) {
+ dxdz.row(i) = x[i].v.transpose();
+ }
+
+ // Map the input gradient dfdx into an Eigen row vector.
+ Eigen::Map<const Eigen::Matrix<FunctionType, 1, kNumArgs> >
+ vector_dfdx(dfdx, 1, kNumArgs);
+
+ // Now apply the chain rule to obtain df/dz. Combine the derivative with
+ // the scalar part to obtain f with full derivative information.
+ Jet<T, N> jet_f;
+ jet_f.a = f;
+ jet_f.v = vector_dfdx.template cast<T>() * dxdz; // Also known as dfdz.
+ return jet_f;
+ }
+};
+
+} // namespace ceres
+
namespace libmv {
using ceres::Jet;
@@ -57,6 +136,7 @@ TrackRegionOptions::TrackRegionOptions()
sigma(0.9),
num_extra_points(0),
regularization_coefficient(0.0),
+ minimum_corner_shift_tolerance_pixels(0.005),
image1_mask(NULL) {
}
@@ -108,45 +188,93 @@ static T SampleWithDerivative(const FloatImage &image_and_gradient,
}
template<typename Warp>
-class BoundaryCheckingCallback : public ceres::IterationCallback {
+class TerminationCheckingCallback : public ceres::IterationCallback {
public:
- BoundaryCheckingCallback(const FloatImage& image2,
- const Warp &warp,
- const double *x1, const double *y1)
- : image2_(image2), warp_(warp), x1_(x1), y1_(y1) {}
+ TerminationCheckingCallback(const TrackRegionOptions &options,
+ const FloatImage& image2,
+ const Warp &warp,
+ const double *x1, const double *y1)
+ : options_(options), image2_(image2), warp_(warp), x1_(x1), y1_(y1),
+ have_last_successful_step_(false) {}
virtual ceres::CallbackReturnType operator()(
const ceres::IterationSummary& summary) {
+ // If the step wasn't successful, there's nothing to do.
+ if (!summary.step_is_successful) {
+ return ceres::SOLVER_CONTINUE;
+ }
// Warp the original 4 points with the current warp into image2.
double x2[4];
double y2[4];
for (int i = 0; i < 4; ++i) {
warp_.Forward(warp_.parameters, x1_[i], y1_[i], x2 + i, y2 + i);
}
- // Enusre they are all in bounds.
+ // Ensure the corners are all in bounds.
if (!AllInBounds(image2_, x2, y2)) {
+ LG << "Successful step fell outside of the pattern bounds; aborting.";
return ceres::SOLVER_ABORT;
}
+
+ // Ensure the minimizer is making large enough shifts to bother continuing.
+ // Ideally, this check would happen on the parameters themselves which
+ // Ceres supports directly; however, the mapping from parameter change
+ // magnitude to corner movement in pixels is not a simple norm. Hence, the
+ // need for a stateful callback which tracks the last successful set of
+ // parameters (and the position of the projected patch corners).
+ if (have_last_successful_step_) {
+ // Compute the maximum shift of any corner in pixels since the last
+ // successful iteration.
+ double max_change_pixels = 0;
+ for (int i = 0; i < 4; ++i) {
+ double dx = x2[i] - x2_last_successful_[i];
+ double dy = y2[i] - y2_last_successful_[i];
+ double change_pixels = dx*dx + dy*dy;
+ if (change_pixels > max_change_pixels) {
+ max_change_pixels = change_pixels;
+ }
+ }
+ max_change_pixels = sqrt(max_change_pixels);
+ LG << "Max patch corner shift is " << max_change_pixels;
+
+ // Bail if the shift is too small.
+ if (max_change_pixels < options_.minimum_corner_shift_tolerance_pixels) {
+ LG << "Max patch corner shift is " << max_change_pixels
+ << " from the last iteration; returning success.";
+ return ceres::SOLVER_TERMINATE_SUCCESSFULLY;
+ }
+ }
+
+ // Save the projected corners for checking on the next successful iteration.
+ for (int i = 0; i < 4; ++i) {
+ x2_last_successful_[i] = x2[i];
+ y2_last_successful_[i] = y2[i];
+ }
+ have_last_successful_step_ = true;
return ceres::SOLVER_CONTINUE;
}
private:
+ const TrackRegionOptions &options_;
const FloatImage &image2_;
const Warp &warp_;
const double *x1_;
const double *y1_;
+
+ bool have_last_successful_step_;
+ double x2_last_successful_[4];
+ double y2_last_successful_[4];
};
template<typename Warp>
class PixelDifferenceCostFunctor {
public:
PixelDifferenceCostFunctor(const TrackRegionOptions &options,
- const FloatImage &image_and_gradient1,
- const FloatImage &image_and_gradient2,
- const Mat3 &canonical_to_image1,
- int num_samples_x,
- int num_samples_y,
- const Warp &warp)
+ const FloatImage &image_and_gradient1,
+ const FloatImage &image_and_gradient2,
+ const Mat3 &canonical_to_image1,
+ int num_samples_x,
+ int num_samples_y,
+ const Warp &warp)
: options_(options),
image_and_gradient1_(image_and_gradient1),
image_and_gradient2_(image_and_gradient2),
@@ -1044,6 +1172,9 @@ void CreateBrutePattern(const double *x1, const double *y1,
// correlation. Instead, this is a dumb implementation. Surprisingly, it is
// fast enough in practice.
//
+// Returns true if any alignment was found, and false if the projected pattern
+// is zero sized.
+//
// TODO(keir): The normalization is less effective for the brute force search
// than it is with the Ceres solver. It's unclear if this is a bug or due to
// the original frame being too different from the reprojected reference in the
@@ -1054,7 +1185,7 @@ void CreateBrutePattern(const double *x1, const double *y1,
// totally different warping interface, since access to more than a the source
// and current destination frame is necessary.
template<typename Warp>
-void BruteTranslationOnlyInitialize(const FloatImage &image1,
+bool BruteTranslationOnlyInitialize(const FloatImage &image1,
const FloatImage *image1_mask,
const FloatImage &image2,
const int num_extra_points,
@@ -1100,6 +1231,7 @@ void BruteTranslationOnlyInitialize(const FloatImage &image1,
int best_c = -1;
int w = pattern.cols();
int h = pattern.rows();
+
for (int r = 0; r < (image2.Height() - h); ++r) {
for (int c = 0; c < (image2.Width() - w); ++c) {
// Compute the weighted sum of absolute differences, Eigen style. Note
@@ -1124,8 +1256,12 @@ void BruteTranslationOnlyInitialize(const FloatImage &image1,
}
}
}
- CHECK_NE(best_r, -1);
- CHECK_NE(best_c, -1);
+
+ // This mean the effective pattern area is zero. This check could go earlier,
+ // but this is less code.
+ if (best_r == -1 || best_c == -1) {
+ return false;
+ }
LG << "Brute force translation found a shift. "
<< "best_c: " << best_c << ", best_r: " << best_r << ", "
@@ -1140,6 +1276,7 @@ void BruteTranslationOnlyInitialize(const FloatImage &image1,
x2[i] += best_c - origin_x;
y2[i] += best_r - origin_y;
}
+ return true;
}
} // namespace
@@ -1191,12 +1328,19 @@ void TemplatedTrackRegion(const FloatImage &image1,
if (SearchAreaTooBigForDescent(image2, x2, y2) &&
options.use_brute_initialization) {
LG << "Running brute initialization...";
- BruteTranslationOnlyInitialize<Warp>(image_and_gradient1,
- options.image1_mask,
- image2,
- options.num_extra_points,
- options.use_normalized_intensities,
- x1, y1, x2, y2);
+ bool found_any_alignment = BruteTranslationOnlyInitialize<Warp>(
+ image_and_gradient1,
+ options.image1_mask,
+ image2,
+ options.num_extra_points,
+ options.use_normalized_intensities,
+ x1, y1, x2, y2);
+ if (!found_any_alignment) {
+ LG << "Brute failed to find an alignment; pattern too small. "
+ << "Failing entire track operation.";
+ result->termination = TrackRegionResult::INSUFFICIENT_PATTERN_AREA;
+ return;
+ }
for (int i = 0; i < 4; ++i) {
LG << "P" << i << ": (" << x1[i] << ", " << y1[i] << "); brute ("
<< x2[i] << ", " << y2[i] << "); (dx, dy): (" << (x2[i] - x1[i])
@@ -1260,14 +1404,15 @@ void TemplatedTrackRegion(const FloatImage &image1,
// Configure the solve.
ceres::Solver::Options solver_options;
- solver_options.linear_solver_type = ceres::DENSE_QR;
+ solver_options.linear_solver_type = ceres::DENSE_NORMAL_CHOLESKY;
solver_options.max_num_iterations = options.max_iterations;
solver_options.update_state_every_iteration = true;
solver_options.parameter_tolerance = 1e-16;
solver_options.function_tolerance = 1e-16;
- // Prevent the corners from going outside the destination image.
- BoundaryCheckingCallback<Warp> callback(image2, warp, x1, y1);
+ // Prevent the corners from going outside the destination image and
+ // terminate if the optimizer is making tiny moves (converged).
+ TerminationCheckingCallback<Warp> callback(options, image2, warp, x1, y1);
solver_options.callbacks.push_back(&callback);
// Run the solve.
@@ -1290,11 +1435,21 @@ void TemplatedTrackRegion(const FloatImage &image1,
// TODO(keir): Update the result statistics.
// TODO(keir): Add a normalize-cross-correlation variant.
- CHECK_NE(summary.termination_type, ceres::USER_ABORT) << "Libmv bug.";
if (summary.termination_type == ceres::USER_ABORT) {
result->termination = TrackRegionResult::FELL_OUT_OF_BOUNDS;
return;
}
+
+ // This happens when the minimum corner shift tolerance is reached. Due to
+ // how the tolerance is computed this can't be done by Ceres. So return the
+ // same termination enum as Ceres, even though this is slightly different
+ // than Ceres's parameter tolerance, which operates on the raw parameter
+ // values rather than the pixel shifts of the patch corners.
+ if (summary.termination_type == ceres::USER_SUCCESS) {
+ result->termination = TrackRegionResult::PARAMETER_TOLERANCE;
+ return;
+ }
+
#define HANDLE_TERMINATION(termination_enum) \
if (summary.termination_type == ceres::termination_enum) { \
result->termination = TrackRegionResult::termination_enum; \
@@ -1377,11 +1532,11 @@ bool SamplePlanarPatch(const FloatImage &image,
image_position(0),
&(*patch)(r, c, 0));
if (mask) {
- float maskValue = SampleLinear(*mask, image_position(1),
- image_position(0), 0);
+ float mask_value = SampleLinear(*mask, image_position(1),
+ image_position(0), 0);
for (int d = 0; d < image.Depth(); d++)
- (*patch)(r, c, d) *= maskValue;
+ (*patch)(r, c, d) *= mask_value;
}
}
}
diff --git a/extern/libmv/libmv/tracking/track_region.h b/extern/libmv/libmv/tracking/track_region.h
index 22ecfc54a15..cd7ee0aa2ba 100644
--- a/extern/libmv/libmv/tracking/track_region.h
+++ b/extern/libmv/libmv/tracking/track_region.h
@@ -90,6 +90,11 @@ struct TrackRegionOptions {
// If zero, no regularization is used.
double regularization_coefficient;
+ // If the maximum shift of any patch corner between successful iterations of
+ // the solver is less than this amount, then the tracking is declared
+ // successful. The solver termination becomes PARAMETER_TOLERANCE.
+ double minimum_corner_shift_tolerance_pixels;
+
// If non-null, this is used as the pattern mask. It should match the size of
// image1, even though only values inside the image1 quad are examined. The
// values must be in the range 0.0 to 0.1.
@@ -111,6 +116,7 @@ struct TrackRegionResult {
DESTINATION_OUT_OF_BOUNDS,
FELL_OUT_OF_BOUNDS,
INSUFFICIENT_CORRELATION,
+ INSUFFICIENT_PATTERN_AREA,
CONFIGURATION_ERROR,
};
Termination termination;
diff --git a/extern/libmv/third_party/ceres/CMakeLists.txt b/extern/libmv/third_party/ceres/CMakeLists.txt
index e6a9e430c47..0ce8e273f4a 100644
--- a/extern/libmv/third_party/ceres/CMakeLists.txt
+++ b/extern/libmv/third_party/ceres/CMakeLists.txt
@@ -38,6 +38,7 @@ set(INC_SYS
)
set(SRC
+ internal/ceres/array_utils.cc
internal/ceres/block_evaluate_preparer.cc
internal/ceres/block_jacobian_writer.cc
internal/ceres/block_jacobi_preconditioner.cc
@@ -53,16 +54,19 @@ set(SRC
internal/ceres/conditioned_cost_function.cc
internal/ceres/conjugate_gradients_solver.cc
internal/ceres/corrector.cc
+ internal/ceres/cxsparse.cc
+ internal/ceres/dense_normal_cholesky_solver.cc
internal/ceres/dense_qr_solver.cc
internal/ceres/dense_sparse_matrix.cc
internal/ceres/detect_structure.cc
+ internal/ceres/dogleg_strategy.cc
internal/ceres/evaluator.cc
internal/ceres/file.cc
internal/ceres/generated/schur_eliminator_d_d_d.cc
internal/ceres/gradient_checking_cost_function.cc
internal/ceres/implicit_schur_complement.cc
internal/ceres/iterative_schur_complement_solver.cc
- internal/ceres/levenberg_marquardt.cc
+ internal/ceres/levenberg_marquardt_strategy.cc
internal/ceres/linear_least_squares_problems.cc
internal/ceres/linear_operator.cc
internal/ceres/linear_solver.cc
@@ -70,6 +74,7 @@ set(SRC
internal/ceres/loss_function.cc
internal/ceres/normal_prior.cc
internal/ceres/partitioned_matrix_view.cc
+ internal/ceres/polynomial_solver.cc
internal/ceres/problem.cc
internal/ceres/problem_impl.cc
internal/ceres/program.cc
@@ -88,6 +93,8 @@ set(SRC
internal/ceres/stringprintf.cc
internal/ceres/suitesparse.cc
internal/ceres/triplet_sparse_matrix.cc
+ internal/ceres/trust_region_minimizer.cc
+ internal/ceres/trust_region_strategy.cc
internal/ceres/types.cc
internal/ceres/visibility_based_preconditioner.cc
internal/ceres/visibility.cc
@@ -96,6 +103,8 @@ set(SRC
include/ceres/ceres.h
include/ceres/conditioned_cost_function.h
include/ceres/cost_function.h
+ include/ceres/crs_matrix.h
+ include/ceres/fpclassify.h
include/ceres/internal/autodiff.h
include/ceres/internal/eigen.h
include/ceres/internal/fixed_array.h
@@ -114,6 +123,7 @@ set(SRC
include/ceres/sized_cost_function.h
include/ceres/solver.h
include/ceres/types.h
+ internal/ceres/array_utils.h
internal/ceres/block_evaluate_preparer.h
internal/ceres/block_jacobian_writer.h
internal/ceres/block_jacobi_preconditioner.h
@@ -131,10 +141,13 @@ set(SRC
internal/ceres/compressed_row_sparse_matrix.h
internal/ceres/conjugate_gradients_solver.h
internal/ceres/corrector.h
+ internal/ceres/cxsparse.h
internal/ceres/dense_jacobian_writer.h
+ internal/ceres/dense_normal_cholesky_solver.h
internal/ceres/dense_qr_solver.h
internal/ceres/dense_sparse_matrix.h
internal/ceres/detect_structure.h
+ internal/ceres/dogleg_strategy.h
internal/ceres/evaluator.h
internal/ceres/file.h
internal/ceres/gradient_checking_cost_function.h
@@ -143,7 +156,7 @@ set(SRC
internal/ceres/implicit_schur_complement.h
internal/ceres/integral_types.h
internal/ceres/iterative_schur_complement_solver.h
- internal/ceres/levenberg_marquardt.h
+ internal/ceres/levenberg_marquardt_strategy.h
internal/ceres/linear_least_squares_problems.h
internal/ceres/linear_operator.h
internal/ceres/linear_solver.h
@@ -153,6 +166,7 @@ set(SRC
internal/ceres/mutex.h
internal/ceres/parameter_block.h
internal/ceres/partitioned_matrix_view.h
+ internal/ceres/polynomial_solver.h
internal/ceres/problem_impl.h
internal/ceres/program_evaluator.h
internal/ceres/program.h
@@ -168,10 +182,13 @@ set(SRC
internal/ceres/solver_impl.h
internal/ceres/sparse_matrix.h
internal/ceres/sparse_normal_cholesky_solver.h
+ internal/ceres/split.h
internal/ceres/stl_util.h
internal/ceres/stringprintf.h
internal/ceres/suitesparse.h
internal/ceres/triplet_sparse_matrix.h
+ internal/ceres/trust_region_minimizer.h
+ internal/ceres/trust_region_strategy.h
internal/ceres/visibility_based_preconditioner.h
internal/ceres/visibility.h
)
@@ -203,7 +220,7 @@ if(WIN32)
if(NOT MINGW)
list(APPEND INC
- third_party/msinttypes
+ ../msinttypes
)
endif()
else()
@@ -214,11 +231,30 @@ endif()
add_definitions(
-DCERES_HAVE_PTHREAD
- -D"CERES_HASH_NAMESPACE_START=namespace std { namespace tr1 {"
- -D"CERES_HASH_NAMESPACE_END=}}"
-DCERES_NO_SUITESPARSE
- -DCERES_DONT_HAVE_PROTOCOL_BUFFERS
+ -DCERES_NO_CXSPARSE
+ -DCERES_NO_PROTOCOL_BUFFERS
-DCERES_RESTRICT_SCHUR_SPECIALIZATION
)
+if(MSVC10)
+ add_definitions(
+ -D"CERES_HASH_NAMESPACE_START=namespace std {"
+ -D"CERES_HASH_NAMESPACE_END=}"
+ )
+else()
+ add_definitions(
+ -D"CERES_HASH_NAMESPACE_START=namespace std { namespace tr1 {"
+ -D"CERES_HASH_NAMESPACE_END=}}"
+ )
+endif()
+
+if(APPLE)
+ if(CMAKE_OSX_DEPLOYMENT_TARGET STREQUAL "10.5")
+ add_definitions(
+ -DCERES_NO_TR1
+ )
+ endif()
+endif()
+
blender_add_lib(extern_ceres "${SRC}" "${INC}" "${INC_SYS}")
diff --git a/extern/libmv/third_party/ceres/ChangeLog b/extern/libmv/third_party/ceres/ChangeLog
index 6e919658f13..8b84328cf98 100644
--- a/extern/libmv/third_party/ceres/ChangeLog
+++ b/extern/libmv/third_party/ceres/ChangeLog
@@ -1,324 +1,524 @@
-commit ca72152362ae1f4b9928c012e74b4d49d094a4ca
-Merge: d297f8d 0a04199
-Author: Keir Mierle <mierle@gmail.com>
-Date: Wed May 9 13:10:59 2012 -0700
+commit 552f9f85bba89f00ca307bc18fbda1dff23bd0e4
+Author: Sameer Agarwal <sameeragarwal@google.com>
+Date: Fri Aug 31 07:27:22 2012 -0700
- Merge branch 'master' into windows
+ Various minor bug fixes to the solver logic.
+
+ 1. CostFunction returning false is handled better.
+ If only the cost is being evaluated, it is possible to
+ use the false value as an infinite value signal/outside
+ a region of validity. This allows a weak form of constraint
+ handling. Useful for example in handling infinities.
+
+ 2. Changed the way how the slop around zero when model_cost
+ is larger than the current cost. Relative instead of absolute
+ tolerances are used. The same logic is propagated how the
+ corresponding clamping of the model_cost is done.
+
+ 3. Fixed a minor indexing bug in nist.cc.
+
+ 4. Some minor logging fixes to nist.cc to make it more
+ compatible with the rest of ceres.
+
+ Together these changes, take the successful solve count from
+ 41/54 to 46/54 and eliminate all NUMERICAL_FAILURE problems.
+
+ Change-Id: If94170ea4731af5b243805c0200963dd31aa94a7
-commit 0a04199ef279cc9ea97f665fed8e7fae717813c3
-Merge: fdeb577 f2571f1
-Author: Keir Mierle <mierle@gmail.com>
-Date: Wed May 9 12:54:56 2012 -0700
+commit 0b776b5cc9634d3b88d623905b96006f7647ce3e
+Author: Sameer Agarwal <sameeragarwal@google.com>
+Date: Thu Aug 30 15:26:17 2012 -0700
- Merge branch 'master' of https://code.google.com/p/ceres-solver
+ Update docs.
+
+ Change-Id: I69d50bcd37aed3bea2190ca614f023e83172901b
-commit fdeb5772cc5eeebca4d776d220d80cc91b6d0f74
-Author: Keir Mierle <mierle@gmail.com>
-Date: Wed May 9 07:38:07 2012 -0700
+commit 2d7176ad7c8fb7238ca8abd6de73415d95877494
+Author: Petter Strandmark <petter.strandmark@gmail.com>
+Date: Thu Aug 30 19:51:24 2012 -0700
- Support varying numbers of residuals in autodiff.
+ max_consecutive_nonmonotonic_steps should be int
- This commit modifies the only function in autodiff that takes a
- templated number of outputs (i.e. residuals) and makes that
- template parameter a normal parameter. With that change, it
- is a trivial matter to support a dynamic number of residuals.
+ Found via Visual Studio warning.
- The API for dynamic residuals is to pass a fake number of
- residuals as the second template argument to
- AutoDiffCostFunction, and to pass the real number of
- parameters as a second constructor argument.
+ Change-Id: Id2cd7de562dfc8cd35df5d5f5220dd2d7350eb2c
-commit da3e0563cc12e08e7b3e0fbf11d9cc8cfe9658aa
+commit 1a89bcc94e88933f89b20427a45bc40cdd23c056
Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Wed May 9 11:57:47 2012 -0700
+Date: Thu Aug 30 15:26:17 2012 -0700
- Typo corrections in the documentation from Bing
+ Better reporting on the NIST problems.
+
+ Change-Id: I7cf774ec3242c0612dbe52fc233c3fc6cff3f031
-commit aa9526d8e8fb34c23d63e3af5bf9239b0c4ea603
+commit ea11704857a1e4a735e096896e4d775d83981499
Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Tue May 8 21:22:09 2012 -0700
+Date: Wed Aug 29 18:18:48 2012 -0700
- Share search paths across various library searches.
- Fix typos in glog search.
- Split the error messages for include and lib.
- Enable building of tests by default.
- Made building on homebrew installations a bit better.
- Remove temporary variables for glog and gflags.
+ Basic harness for testing NIST problems.
+
+ Change-Id: I5baaa24dbf0506ceedf4a9be4ed17c84974d71a1
-commit f2571f186850ed3dd316236ac4be488979df7d30
+commit 98bf14d2b95386c2c4a6c29154637943dae4c36c
Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Wed May 9 11:57:47 2012 -0700
+Date: Thu Aug 30 10:26:44 2012 -0700
- Typo corrections in the documentation from Bing
+ Miscellaneous fixes.
+
+ Change-Id: I521e11f2d20bf24960bbc6b5dab4ec8bb1503d23
-commit 8f7f11ff7d07737435428a2620c52419cf99f98e
-Merge: e6c17c4 eaccbb3
-Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Wed May 9 11:34:15 2012 -0700
+commit 1e3cbd9a4442cdd8fda43a7fb452f19dac8c74af
+Author: Petter Strandmark <strandmark@google.com>
+Date: Wed Aug 29 09:39:56 2012 -0700
- Merge branch 'master' of https://code.google.com/p/ceres-solver
+ Caching the symbolic Cholesky factorization when using CXSparse
+
+ Average factorization times for bundle adjustment test problem:
+ SuiteSparse: 0.2794 s.
+ CXSparse: 0.4039 s.
+ CXSparse cached: 0.2399 s.
+
+ CXSparse will still be slower, though, because it has to compute
+ the transpose and J^T * J.
+
+ Change-Id: If9cdaa3dd520bee84b56e5fd4953b56a93db6bde
-commit e6c17c4c9d9307218f6f739cea39bc2d87733d4d
+commit 8b64140878ccd1e183d3715c38942a81fdecefde
Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Tue May 8 21:22:09 2012 -0700
+Date: Wed Aug 29 05:41:22 2012 -0700
- Share search paths across various library searches.
- Fix typos in glog search.
- Split the error messages for include and lib.
- Enable building of tests by default.
- Made building on homebrew installations a bit better.
- Remove temporary variables for glog and gflags.
+ Documentation update
+
+ Change-Id: I271a0422e7f6f42bcfd1dc6b5dc10c7a18f6a179
-commit eaccbb345614c0d24c5e21fa931f470cfda874df
-Author: Keir Mierle <mierle@gmail.com>
-Date: Wed May 9 05:31:29 2012 -0700
+commit a5353acd85a9fd19370b3d74035d87b0f0bac230
+Author: Petter Strandmark <petter.strandmark@gmail.com>
+Date: Tue Aug 28 18:16:41 2012 -0700
- Remove unused template parameter from VariadicEvaluate.
+ Adding gflags include to test_util.cc
+
+ test_util seems to need gflags.
+
+ Change-Id: I0c4757960f8ac69ad599c138aea58e3c88a4ea28
-commit 82f4b88c34b0b2cf85064e5fc20e374e978b2e3b
-Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Sun May 6 21:05:28 2012 -0700
+commit 87ca1b2ba28ec512752bbcf5fc994ce1434eb765
+Author: Petter Strandmark <petter.strandmark@gmail.com>
+Date: Tue Aug 28 18:05:20 2012 -0700
- Extend support writing linear least squares problems to disk.
+ Changing random.h to use cstdlib for Windows compability.
- 1. Make the mechanism for writing problems to disk, generic and
- controllable using an enum DumpType visible in the API.
+ As discussed with Sameer today.
- 2. Instead of single file containing protocol buffers, now matrices can
- be written in a matlab/octave friendly format. This is now the default.
+ Change-Id: If3d0284830c6591c71cc77b8400cafb45c0da61f
+
+commit aeb00a07323808a0a1816e733ad18a87d5109ea3
+Author: Petter Strandmark <strandmark@google.com>
+Date: Mon Aug 27 22:22:57 2012 -0700
+
+ Removing gomp for Visual Studio
- 3. The support for writing problems to disk is moved into
- linear_least_squares_problem.cc/h
+ Linking currently fails in Visual Studio due to a missing library
+ "gomp.lib". This is not needed in Visual Studio. OpenMP works
+ without it.
+
+ Change-Id: I39e204a8dd4f1b7425df7d4b222d86a8bb961432
+
+commit 6f362464ba99b800494d2f15c27768a342ddaa68
+Author: Markus Moll <markus.moll@esat.kuleuven.be>
+Date: Tue Aug 28 01:03:38 2012 +0200
+
+ Add some tests for DoglegStrategy.
- 4. SparseMatrix now has a ToTextFile virtual method which is
- implemented by each of its subclasses to write a (i,j,s) triplets.
+ Not necessarily a complete set.
- 5. Minor changes to simple_bundle_adjuster to enable logging at startup.
+ Change-Id: I14eb3a38c6fe976c8212f3934655411b6d1e0aa4
-commit d297f8d3d3f5025c24752f0f4c1ec2469a769f99
-Merge: 7e74d81 f8bd7fa
-Author: Keir Mierle <mierle@gmail.com>
-Date: Tue May 8 05:39:56 2012 -0700
+commit 122cf836a6dc9726489ce2fbecc6143bddc1caaf
+Author: Sameer Agarwal <sameeragarwal@google.com>
+Date: Fri Aug 24 16:28:27 2012 -0700
- Merge branch 'master' into windows
+ Documentation update.
+
+ Change-Id: I0a3c5ae4bc981a8f5bdd5a8905f923dc5f09a024
-commit f8bd7fa9aa9dbf64b6165606630287cf8cf21194
+commit 69081719f73da8de2935774a42d237837a91952a
Author: Keir Mierle <mierle@gmail.com>
-Date: Tue May 8 05:39:32 2012 -0700
+Date: Mon Aug 27 13:28:56 2012 -0700
- Small tweaks to the block jacobi preconditioner.
+ Remove unnecessary overload for hash<>
+
+ The overload for pointers in hash tables was applied in normal
+ usage of schur_ordering.cc. However, the tests did not include the
+ overload since they only included collections_port.h. As a result,
+ the routines in schur_ordering.cc were using a different hash
+ function than that inside the tests.
+
+ The fix is to remove the specialization. If this breaks one of the
+ compiler configurations, we will find a workaround at that time.
+
+ Change-Id: Idbf60415d5e2aec0c865b514ad0c577d21b91405
-commit 7e74d81ad57a159f14110eb5348b3bc7990b8bd4
-Merge: ecd7c8d e2a6cdc
-Author: Keir Mierle <mierle@gmail.com>
-Date: Mon May 7 07:02:49 2012 -0700
+commit 1762420b6ed76b1c4d30b913b2cac1927b666534
+Author: Sameer Agarwal <sameeragarwal@google.com>
+Date: Wed Aug 22 10:01:31 2012 -0700
- Merge branch 'master' into windows
+ Update changelog.
+
+ Change-Id: Idf5af69d5a9dbe35f58e30a8afcbfcd29bb7ebfe
-commit e2a6cdc0816af9d0c77933f5017f137da3d52a35
+commit 976ab7aca908309b8282cb40bc080ca859136854
Author: Keir Mierle <mierle@gmail.com>
-Date: Mon May 7 06:39:56 2012 -0700
+Date: Thu Aug 23 18:21:36 2012 -0700
- Address some of the comments on CGNR patch
+ Remove Google-era vestigial unit test.
- - Rename BlockDiagonalPreconditioner to BlockJacobiPreconditioner
- - Include the diagonal in the block jacobi preconditioner.
- - Better flag help for eta.
- - Enable test for CGNR
- - Rename CONJUGATE_GRADIENTS to CGNR.
- - etc.
+ Change-Id: Ia7a295a5c759a17c1675a3055d287d3e40e9e0fe
-commit 1b95dc580aa5d89be021c0915e26df83f18013bb
-Merge: 211812a 7646039
+commit 6ad6257de0e2152ac5e77dc003758de45187d6ea
Author: Keir Mierle <mierle@gmail.com>
-Date: Mon May 7 04:34:10 2012 -0700
+Date: Wed Aug 22 11:10:31 2012 -0700
- Merge branch 'master' of https://code.google.com/p/ceres-solver
+ Add a workaround for an Android NDK compiler bug.
+
+ On certain NDK build configurations, one of the innermost
+ parts of the Schur eliminator would get compiled
+ incorrectly. The compiler changed a -= to a +=.
+
+ The normal Ceres unit tests caught the problem; however,
+ since it is not possible to build the tests with the NDK
+ (only with the standalone toolchain) this was difficult to
+ track down. Finding the issue involved pasting the schur
+ eliminator unit test inside of solver_impl.cc and other such
+ hacks.
+
+ Change-Id: Ie91bb545d74fe39f0c8cbd1a6eb69ee4d8b25fb2
-commit 211812a57360d2011cbcfd115cd55e0eb73600db
-Author: Keir Mierle <mierle@gmail.com>
-Date: Mon May 7 04:33:50 2012 -0700
+commit aecb2dc92b4aa7f3bf77a1ac918e62953602392b
+Author: Sameer Agarwal <sameeragarwal@google.com>
+Date: Wed Aug 22 10:08:17 2012 -0700
- Better error handling in bundle_adjuster.cc
+ Fix relative path bug in bibtex call.
+
+ Change-Id: I0d31786564320a6831259bcdf4c75a6b665c43ad
-commit 7646039ad9672b267495f5b31925473ad3022ac8
+commit 1e2892009e591804df6286caebd5c960e7e3b099
Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Sun May 6 22:02:19 2012 -0700
+Date: Tue Aug 21 18:00:54 2012 -0700
- Kashif's corrections to the docs
+ Update Summary::FullReport to report dogleg type.
+
+ Change-Id: I0b4be8d7486c1c4b36b299693b3fe8b0d3426537
-commit 0d2d34148d10c5c7e924b3ca82ad2b237573ef64
+commit 295ade1122a86b83e1ea605d5ca394f315874717
Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Sun May 6 21:16:03 2012 -0700
+Date: Wed Aug 22 06:51:22 2012 -0700
- glog minimum version requirements
+ Fix Eigen3 Row/Column Major storage issue.
- Building Ceres requires version 0.3.1 or better of glog.
- Fedora 16 ships with a busted version 0.3.
+ Eigen3 does not allow column vectors to be stored in row-major
+ format. NumericDiffCostFunction by default stores its Jacobian
+ matrices in row-major format. This works fine if the residual
+ contains more than one variable. But if the residual block
+ depends on one variable and has more than one residuals, the
+ resulting Jacobian matrix is a column matrix in row-major format
+ resulting in a compile time error.
- issue 15 contains the gory details.
+ The fix is to check the template parameters and switch to column-major
+ storage as needed.
- Added a note to the build documentation to this effect.
+ Thanks to Lena Gieseke for reporting this.
+
+ Change-Id: Icc51c5b38e1f3609e0e1ecb3c4e4a02aecd72c3b
-commit 39efc5ec4b64b8f5a2c5a3dbacdbc45421221547
-Author: Keir Mierle <mierle@gmail.com>
-Date: Sun May 6 16:09:52 2012 -0700
+commit 9ad27e8e9fb1bbd2054e2f6ae37623e01428f1c0
+Author: Arnaud Gelas <arnaudgelas@gmail.com>
+Date: Tue Aug 21 09:56:30 2012 +0200
- Fix tests broken by the CGNR change.
+ Add one uninstall target to remove all installed files
+
+ Change-Id: Ifcf89a6c27b25f28403d95a50e29c093a525298f
-commit 3faa08b7f7c4ac73661c6a15a6824c12080dfcb1
-Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Sun May 6 16:08:22 2012 -0700
+commit 0c3a748ee49e04fe334f8f5a433649d18003d550
+Author: Markus Moll <markus.moll@esat.kuleuven.be>
+Date: Tue Aug 21 14:44:59 2012 +0200
- Formatting fixed based on Keir's comments and extended the tests
+ Allow equal lower and upper bound for diagonal scaling.
+
+ This way, setting the lower and upper bound both to 1.0, one can disable
+ the automatic trust region scaling.
+
+ Change-Id: Ifa317a6911b813a89c1cf7fdfde25af603705319
-commit 4f21c68409bc478c431a9b6aedf9e5cfdf11d2f3
-Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Sun May 6 15:33:47 2012 -0700
+commit 3d644b76adefac6475b91dc53c3ae5e01c4f4d66
+Author: Arnaud Gelas <arnaudgelas@gmail.com>
+Date: Thu Aug 16 17:33:21 2012 +0200
- Fix the struct weak ordering used by independent set ordering, tests for it
+ Install headers, libraries and pdf
+
+ Headers are installed in ${CMAKE_INSTALL_PREFIX}/include/ceres
+ Libraries are installed in ${CMAKE_INSTALL_PREFIX}/lib
+ pdf is installed in ${CMAKE_INSTALL_PREFIX}/share/ceres/docs
+
+ Change-Id: Ic175f2c2f5fa86820a1e8c64c2ed171f4a302a68
-commit 887b156b917ccd4c172484452b059d33ea45f4f0
-Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Sun May 6 15:14:47 2012 -0700
+commit d2fb5adea4d8c2aeb43c4289c6976798a54d3cf1
+Author: Arnaud Gelas <arnaudgelas@gmail.com>
+Date: Fri Aug 17 10:11:02 2012 +0200
- fix he degree ordering routine
+ Configure gerrit hook at CMake time
+
+ If the source directory is a clone, at CMake time the commit-msg hook gets
+ downloaded and installed in the right location.
+
+ Change-Id: I5fee17d050ca22d8b92a49fdcc2a1cd6659f209b
-commit ecd7c8df2af19404dc394b36bbe96e9db3bce840
-Author: Keir Mierle <mierle@gmail.com>
-Date: Sun May 6 00:09:41 2012 -0700
+commit 73166098fc4b1072adc30321c666188a3909c43c
+Author: Arnaud Gelas <arnaudgelas@gmail.com>
+Date: Mon Aug 20 15:40:41 2012 +0200
- First step towards windows compatibilty
+ Add one CMake option to build the examples.
+
+ Currently the examples are always built. For external projects, it is useful
+ not to compile the examples.
- This adds some small changes to Ceres to make it mostly
- compile on Windows. There are still issues with the
- hash map use in schur_ordering.cc but I will fix those
- shortly.
+ Change-Id: I41d3bde19c7e742818e60f78222d39c43992ca8b
-commit f7898fba1b92f0e996571b5bfa22a37f5e3644de
+commit 86d4f1ba41ef14eb1b6b61a7936af83387b35eb2
Author: Keir Mierle <mierle@gmail.com>
-Date: Sat May 5 20:55:08 2012 -0700
+Date: Mon Aug 20 11:52:04 2012 -0700
- Add a general sparse iterative solver: CGNR
+ Add missing return statement.
- This adds a new LinearOperator which implements symmetric
- products of a matrix, and a new CGNR solver to leverage
- CG to directly solve the normal equations. This also
- includes a block diagonal preconditioner. In experiments
- on problem-16, the non-preconditioned version is about
- 1/5 the speed of SPARSE_SCHUR, and the preconditioned
- version using block cholesky is about 20% slower than
- SPARSE_SCHUR.
+ Change-Id: I5eaf718318e27040e3c97e32ee46cf0a11176a37
-commit 0a359d6198d257776a8831c3eb98f64ee91cf836
+commit 51eb229da34187a4e8ce73ed9cc0e731998bb2be
Author: Keir Mierle <mierle@gmail.com>
-Date: Sat May 5 20:33:46 2012 -0700
+Date: Mon Aug 20 11:46:12 2012 -0700
- Comment formatting.
+ Add Program::ToString() to aid debugging.
+
+ Change-Id: I0ab37ed2fe0947ca87a152919d4e7dc9b56dedc6
-commit db4ec9312bb2f1ca7b2337812f6bad6cdd75b227
+commit bcc7100635e2047dc2b77df19a4ded8a6ab4d4b9
Author: Keir Mierle <mierle@gmail.com>
-Date: Sat May 5 20:33:16 2012 -0700
+Date: Mon Aug 20 11:45:04 2012 -0700
- Comment formatting
+ Ignore minted.sty.
+
+ Change-Id: I2467a6f801812b9007b51bf14b00757f026e4322
-commit f10163aaf3e57f52551bcd60bbdae873890a49dd
+commit 9705a736dd3d6fbead0d8a6ff77102c69bbcdc08
Author: Keir Mierle <mierle@gmail.com>
-Date: Fri May 4 21:33:53 2012 -0700
+Date: Mon Aug 20 11:24:05 2012 -0700
- Warn about disabled schur specializations.
+ Add ParameterBlock::ToString() to aid debugging.
- This commit brought to you from 30,000ft.
+ Change-Id: Id3f5cb27b855c536dd65a986f345bd8eb2799dfa
-commit ad7b2b4aaf3ccc51f2b854febd53a9df54686cfe
-Author: Keir Mierle <mierle@gmail.com>
-Date: Fri May 4 20:15:28 2012 -0700
+commit 0c714a70e6123ceb68e5cfcd3cfbee0d09deb1db
+Author: Sameer Agarwal <sameeragarwal@google.com>
+Date: Mon Aug 20 11:18:16 2012 -0700
+
+ Fix blanks before private in loss_function.h
+
+ Change-Id: I068bed6431bc7c9b7958af391655df61499000b2
+
+commit 51cf7cbe3bac45c6807c2703a2fc3175d76a1b47
+Author: Markus Moll <markus.moll@esat.kuleuven.be>
+Date: Mon Aug 20 20:10:20 2012 +0200
+
+ Add the two-dimensional subspace search to DoglegStrategy
+
+ Change-Id: I5163744c100cdf07dd93343d0734ffe0e80364f3
- Add vim swapfiles to .gitignore
+commit ad1f7b772e559a911ac3a3b078b0aee1836fe785
+Author: Sameer Agarwal <sameeragarwal@google.com>
+Date: Mon Aug 20 11:10:34 2012 -0700
+
+ Add ArcTanLoss, TolerantLoss and ComposedLossFunction.
+
+ Based on work by James Roseborough.
+
+ Change-Id: Idc4e0b099028f67702bfc7fe3e43dbd96b6f9256
-commit 6447219826bf6e47b0c99d9ff0eaf5e2ba573d79
+commit 05292bf8fc5208b86b4a13544615b584f6efa936
Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Thu May 3 21:53:07 2012 -0700
+Date: Mon Aug 20 07:40:45 2012 -0700
+
+ Add a TrustRegionStrategy::Summary object.
+
+ Change-Id: I7caee35a3408ee4a0ec16ba407410d822929340d
- 1. Changes the tutorial to refer to BriefReport.
- 2. Some of the enums have commas at the end.
- 3. Fix a bug in the default value of circle_fit.cc in the examples.
+commit b12b906c4d21c3949f0dce62c4c0d083c8edecf1
+Author: Arnaud Gelas <arnaudgelas@gmail.com>
+Date: Wed Aug 15 16:27:38 2012 +0200
-commit 30c5f93c7f88dec49f76168663372772e06f17f5
+ Add one option to generate the PDF from CMake at build time
+
+ Make sure pygmentize is installed
+
+ Change-Id: I068ba45c33a8e96acc906a464b12d10d58b3e231
+
+commit b9f15a59361c609ffc4a328aea9be3d265b5da81
Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Thu May 3 10:44:43 2012 -0700
+Date: Sat Aug 18 13:06:19 2012 -0700
+
+ Add a dense Cholesky factorization based linear solver.
+
+ For problems with a small number of variables, but a large
+ number of residuals, it is sometimes beneficial to use the
+ Cholesky factorization on the normal equations, instead of
+ the dense QR factorization of the Jacobian, even though it
+ is numerically the better thing to do.
+
+ Change-Id: I3506b006195754018deec964e6e190b7e8c9ac8f
- Rework the glog and gtest path checking to be consistent with the rest of the file and disable the dashboard support enabled by the earlier ctesting related patch.
+commit b3fa009435acf476cd373052e62988f6437970b1
+Author: Arnaud Gelas <arnaudgelas@gmail.com>
+Date: Fri Aug 17 10:31:41 2012 +0200
-commit f10b033eb4aca77919987bc551d16d8a88b10110
-Merge: cc38774 e0a52a9
+ Set CMAKE_*_OUTPUT_DIRECTORY
+
+ Gather
+ * all executables in ${CMAKE_BINARY_DIR}/bin
+ * all libraries (static and dynamic) in ${CMAKE_BINARY_DIR}/lib
+
+ Change-Id: Ibc2fa1adfb6f0aea65d66d570259b79546bf3b07
+
+commit 1b8a4d5d11671ed83cf6077e363dd95333f08ef8
Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Thu May 3 08:45:20 2012 -0700
+Date: Fri Aug 17 16:49:11 2012 -0700
+
+ Fix a minor bug in detect_structure logging.
+
+ Change-Id: I117f7745e4c67595b3ff9244cde82b5b5b34ee4b
- Merge branch 'ctest'
+commit 31c1e784ab2cb9294c6e05414cf06aae2b3766de
+Author: Keir Mierle <mierle@gmail.com>
+Date: Fri Aug 17 16:16:32 2012 -0700
+
+ Minor cleanups.
+
+ Change-Id: Ida4866997deeaa1bc2cebd6b69313a05ac82e457
-commit e0a52a993394e73bc7f7db8d520728926feab83e
+commit e83f7879a8b21c6976e116958caf35bcdcf41cb0
Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Thu May 3 08:43:34 2012 -0700
+Date: Fri Aug 17 15:34:42 2012 -0700
- Arnaus Gelas' patch to add better path searching for gflags and glog
+ Fix SuiteSparse3 UFConfig.h detection really.
+
+ Change-Id: Id187102e755b7d778dff4363f22f9a4697ed12dd
-commit a9b8e815e1c026599734510399b10f4cf014c9cd
+commit 96f25dc57658d296ee6b6633818b4f1e51d7d587
Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Thu May 3 08:41:52 2012 -0700
+Date: Fri Aug 17 15:34:42 2012 -0700
+
+ Fix SuiteSparse3 UFConfig.h detection.
+
+ Change-Id: Ia59aefdb0ad7f713f76ed79692f2db4fa2821e5b
+
+commit c497bd6cd9aa944f518aa491d3bc645851ff9594
+Author: Markus Moll <markus.moll@esat.kuleuven.be>
+Date: Fri Aug 17 14:40:13 2012 +0200
- Arnaus Gelas' patch to add .gitignore
+ Add UFconfig and/or SuiteSparse_config test to CMakeLists.txt
+
+ SuiteSparse 4 requires linking to libsuitesparseconfig.a.
+ Both SuiteSparse 3 and SuiteSparse 4 require an additional header
+ (either UFconfig.h or SuiteSparse_config.h) that is not found if it is
+ in a separate path. Therefore, add explicit checks.
+
+ Change-Id: I699902b5db4f1b7f17134b5a54f9aa681445e294
-commit a0cefc3347c32b2065053bbaff4f34d11529d931
+commit 383c04f4236d92801c7c674892814362dedf7ad6
Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Thu May 3 08:38:33 2012 -0700
+Date: Fri Aug 17 10:14:04 2012 -0700
- Arnaus Gelas' patch to move to Ctest
+ Fix QuaternionToAngleAxis to ensure rotations are between -pi and pi.
+
+ Thanks to Guoxuan Zhang for reporting this.
+
+ Change-Id: I2831ca3a04d5dc6467849c290461adbe23faaea3
-commit cc38774d74e287704915282425fbd16818a72ec3
-Author: Keir Mierle <mierle@gmail.com>
-Date: Thu May 3 01:27:50 2012 -0700
+commit dd2b17d7dd9750801ba4720bdece2062e59b7ae3
+Author: Sameer Agarwal <sameeragarwal@google.com>
+Date: Thu Aug 16 19:34:57 2012 -0700
- Clarify ProgramEvaluator comments.
+ CERES_DONT_HAVE_PROTOCOL_BUFFERS -> CERES_NO_PROTOCOL_BUFFERS.
+
+ Change-Id: I6c9f50e4c006faf4e75a8f417455db18357f3187
-commit 017c9530df557863f78212fb5ccd02814baa9fa8
+commit 8b4cb7aa2c74a0da62c638b2023566aa242af995
Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Wed May 2 08:21:59 2012 -0700
+Date: Thu Aug 16 19:26:55 2012 -0700
- Mac OS X build instructions are much simpler, as homebrew takes care of gflags when glog is brought in. Also CMAKE does not need any flags to do the default thing
+ Fix sparse linear algebra library logging in Summary::FullReport.
+
+ Change-Id: Id2c902dc86c00954fde7749c7b4a67dd94215a31
-commit 92d5ab5f8ae6fe355c30b606a5f230415ee0494b
-Author: Keir Mierle <mierle@gmail.com>
-Date: Tue May 1 18:33:08 2012 -0700
+commit 47d26bcd3b38b5ff53b34768c33b499d47b26bd0
+Author: Markus Moll <markus.moll@esat.kuleuven.be>
+Date: Thu Aug 16 00:23:38 2012 +0200
- Link BLAS explicitly on non-Mac platforms
+ Do not implicitly negate the step in the TrustRegionMinimizer.
- Fixes issue #3.
+ In the TrustRegionMinimizer, the step is currently implicitly negated.
+ This is done so that the linearized residual is |r - J*step|^2, which
+ corresponds to J*step = r, so neither J nor r have to be modified.
+ However, it leads to the rather unintuitive situation that the strategy
+ returns a step in positive gradient direction, which you would expect to
+ increase the function value. One way is to rename the "step" parameter in
+ the strategy to "negative_step" and document it.
+ This patch instead moves the negation inside the strategy, just around
+ the linear solver call, so that it is done in a local context and easier
+ to document.
+
+ Change-Id: Idb258149a01f61c64e22128ea221c5a30cd89c89
-commit df3e54eb4a6b001b7f0560a2da73a5bd7f18615e
-Author: Keir Mierle <mierle@gmail.com>
-Date: Tue May 1 18:22:51 2012 -0700
+commit 51da590c8457e6664f76fe9813425a0c71351497
+Author: Markus Moll <markus.moll@esat.kuleuven.be>
+Date: Fri Aug 17 12:56:09 2012 +0200
+
+ Remove tmp file
+
+ Change-Id: I07496fafae7b0c5c12cc26ae336e0db3b5592735
+
+commit 7006a1f2b1701b8d89b8d1525fc0101943802221
+Author: Sameer Agarwal <sameeragarwal@google.com>
+Date: Thu Aug 16 18:04:22 2012 -0700
- Fix link order of CHOLMOD
+ Correct example code in Powell's function example.
+
+ Thanks to Petter Strandmark for pointing this out.
- This was working by accident due to dynamic linking. Fixes issue #2.
+ Change-Id: I967632235dccdb481396e94904bb911c9a1efe1e
-commit f477a3835329e2b48eb20c34c631a480b0f0d5bf
+commit 57a44b27bc6fc95b4e70fdc25c25c9925a2072a0
Author: Keir Mierle <mierle@gmail.com>
-Date: Tue May 1 18:10:48 2012 -0700
+Date: Thu Aug 16 17:04:50 2012 -0700
- Fix Eigen search paths
+ Remove unnecessary flags in NDK build.
- Fixes issue #1 on http://code.google.com/p/ceres-solver.
+ Change-Id: Ib5b4d0b7f2d898671252734978c789b8171d96a8
-commit 17fbc8ebb894c1d22bb3b0b02ea1394b580120f8
-Author: Sameer Agarwal <sameeragarwal@google.com>
-Date: Tue May 1 00:21:19 2012 -0700
+commit f21bee247251a8b2e836c215a84c4668c31d75cd
+Author: Keir Mierle <mierle@gmail.com>
+Date: Thu Aug 16 16:27:10 2012 -0700
- Minor changes to the documentation. Formatting, and typos.
+ Fix for fpclassify.h NDK porting work.
+
+ Change-Id: I69df1b4caf2941ed96a53e35e43ec54073f84f59
-commit 8ebb0730388045570f22b89fe8672c860cd2ad1b
+commit 8ceb02cb75b66602de44a35e413225386cb21c27
Author: Keir Mierle <mierle@gmail.com>
-Date: Mon Apr 30 23:09:08 2012 -0700
+Date: Thu Aug 16 14:23:47 2012 -0700
- Initial commit of Ceres Solver.
+ Add Android NDK build files.
+
+ This adds a Android.mk build that builds a Ceres static library
+ suitable for embetting in larger Android applications. This is
+ useful when needing to build Ceres without GPL'd components, since
+ the standalone toolchain (needed for the CMake Android build) does
+ not work with STLPort.
+
+ Change-Id: I8d857237f6f82658741017d161b2e31d9a20e5a7
diff --git a/extern/libmv/third_party/ceres/SConscript b/extern/libmv/third_party/ceres/SConscript
index c629fa00176..6d0d2cd5c40 100644
--- a/extern/libmv/third_party/ceres/SConscript
+++ b/extern/libmv/third_party/ceres/SConscript
@@ -20,9 +20,13 @@ defs.append('CERES_HAVE_PTHREAD')
defs.append('CERES_HASH_NAMESPACE_START=namespace std { namespace tr1 {')
defs.append('CERES_HASH_NAMESPACE_END=}}')
defs.append('CERES_NO_SUITESPARSE')
-defs.append('CERES_DONT_HAVE_PROTOCOL_BUFFERS')
+defs.append('CERES_NO_CXSPARSE')
+defs.append('CERES_NO_PROTOCOL_BUFFERS')
defs.append('CERES_RESTRICT_SCHUR_SPECIALIZATION')
+if 'Mac OS X 10.5' in env['MACOSX_SDK_CHECK']:
+ defs.append('CERES_NO_TR1')
+
incs = '. ../../ ../../../Eigen3 ./include ./internal ../gflags'
# work around broken hashtable in 10.5 SDK
diff --git a/extern/libmv/third_party/ceres/bundle.sh b/extern/libmv/third_party/ceres/bundle.sh
index 99aaadd8d87..902fce5f398 100755
--- a/extern/libmv/third_party/ceres/bundle.sh
+++ b/extern/libmv/third_party/ceres/bundle.sh
@@ -1,6 +1,5 @@
#!/bin/sh
-if false; then
if [ "x$1" = "x--i-really-know-what-im-doing" ] ; then
echo Proceeding as requested by command line ...
else
@@ -8,16 +7,22 @@ else
exit 1
fi
-repo="https://code.google.com/p/ceres-solver/"
-branch="windows"
+repo="https://ceres-solver.googlesource.com/ceres-solver"
+branch="master"
+tag="1.3.0"
tmp=`mktemp -d`
+checkout="$tmp/ceres"
-GIT="git --git-dir $tmp/ceres/.git --work-tree $tmp/ceres"
+GIT="git --git-dir $tmp/ceres/.git --work-tree $checkout"
-git clone $repo $tmp/ceres
+git clone $repo $checkout
if [ $branch != "master" ]; then
$GIT checkout -t remotes/origin/$branch
+else
+ if [ "x$tag" != "x" ]; then
+ $GIT checkout $tag
+ fi
fi
$GIT log -n 50 > ChangeLog
@@ -37,8 +42,6 @@ done
rm -rf $tmp
-fi
-
sources=`find ./include ./internal -type f -iname '*.cc' -or -iname '*.cpp' -or -iname '*.c' | sed -r 's/^\.\//\t/' | grep -v -E 'schur_eliminator_[0-9]_[0-9]_[0-9d].cc' | sort -d`
generated_sources=`find ./include ./internal -type f -iname '*.cc' -or -iname '*.cpp' -or -iname '*.c' | sed -r 's/^\.\//#\t\t/' | grep -E 'schur_eliminator_[0-9]_[0-9]_[0-9d].cc' | sort -d`
headers=`find ./include ./internal -type f -iname '*.h' | sed -r 's/^\.\//\t/' | sort -d`
@@ -142,7 +145,7 @@ if(WIN32)
if(NOT MINGW)
list(APPEND INC
- third_party/msinttypes
+ ../msinttypes
)
endif()
else()
@@ -153,13 +156,32 @@ endif()
add_definitions(
-DCERES_HAVE_PTHREAD
- -D"CERES_HASH_NAMESPACE_START=namespace std { namespace tr1 {"
- -D"CERES_HASH_NAMESPACE_END=}}"
-DCERES_NO_SUITESPARSE
- -DCERES_DONT_HAVE_PROTOCOL_BUFFERS
+ -DCERES_NO_CXSPARSE
+ -DCERES_NO_PROTOCOL_BUFFERS
-DCERES_RESTRICT_SCHUR_SPECIALIZATION
)
+if(MSVC10)
+ add_definitions(
+ -D"CERES_HASH_NAMESPACE_START=namespace std {"
+ -D"CERES_HASH_NAMESPACE_END=}"
+ )
+else()
+ add_definitions(
+ -D"CERES_HASH_NAMESPACE_START=namespace std { namespace tr1 {"
+ -D"CERES_HASH_NAMESPACE_END=}}"
+ )
+endif()
+
+if(APPLE)
+ if(CMAKE_OSX_DEPLOYMENT_TARGET STREQUAL "10.5")
+ add_definitions(
+ -DCERES_NO_TR1
+ )
+ endif()
+endif()
+
blender_add_lib(extern_ceres "\${SRC}" "\${INC}" "\${INC_SYS}")
EOF
@@ -186,11 +208,20 @@ defs.append('CERES_HAVE_PTHREAD')
defs.append('CERES_HASH_NAMESPACE_START=namespace std { namespace tr1 {')
defs.append('CERES_HASH_NAMESPACE_END=}}')
defs.append('CERES_NO_SUITESPARSE')
-defs.append('CERES_DONT_HAVE_PROTOCOL_BUFFERS')
+defs.append('CERES_NO_CXSPARSE')
+defs.append('CERES_NO_PROTOCOL_BUFFERS')
defs.append('CERES_RESTRICT_SCHUR_SPECIALIZATION')
+if 'Mac OS X 10.5' in env['MACOSX_SDK_CHECK']:
+ defs.append('CERES_NO_TR1')
+
incs = '. ../../ ../../../Eigen3 ./include ./internal ../gflags'
+# work around broken hashtable in 10.5 SDK
+if env['OURPLATFORM'] == 'darwin' and env['WITH_BF_BOOST']:
+ incs += ' ' + env['BF_BOOST_INC']
+ defs.append('CERES_HASH_BOOST')
+
if env['OURPLATFORM'] in ('win32-vc', 'win32-mingw', 'linuxcross', 'win64-vc', 'win64-mingw'):
if env['OURPLATFORM'] in ('win32-vc', 'win64-vc'):
incs += ' ../msinttypes'
diff --git a/extern/libmv/third_party/ceres/files.txt b/extern/libmv/third_party/ceres/files.txt
index e9d7f585260..55083572977 100644
--- a/extern/libmv/third_party/ceres/files.txt
+++ b/extern/libmv/third_party/ceres/files.txt
@@ -2,6 +2,8 @@ include/ceres/autodiff_cost_function.h
include/ceres/ceres.h
include/ceres/conditioned_cost_function.h
include/ceres/cost_function.h
+include/ceres/crs_matrix.h
+include/ceres/fpclassify.h
include/ceres/internal/autodiff.h
include/ceres/internal/eigen.h
include/ceres/internal/fixed_array.h
@@ -20,6 +22,8 @@ include/ceres/rotation.h
include/ceres/sized_cost_function.h
include/ceres/solver.h
include/ceres/types.h
+internal/ceres/array_utils.cc
+internal/ceres/array_utils.h
internal/ceres/block_evaluate_preparer.cc
internal/ceres/block_evaluate_preparer.h
internal/ceres/block_jacobian_writer.cc
@@ -52,13 +56,19 @@ internal/ceres/conjugate_gradients_solver.cc
internal/ceres/conjugate_gradients_solver.h
internal/ceres/corrector.cc
internal/ceres/corrector.h
+internal/ceres/cxsparse.cc
+internal/ceres/cxsparse.h
internal/ceres/dense_jacobian_writer.h
+internal/ceres/dense_normal_cholesky_solver.cc
+internal/ceres/dense_normal_cholesky_solver.h
internal/ceres/dense_qr_solver.cc
internal/ceres/dense_qr_solver.h
internal/ceres/dense_sparse_matrix.cc
internal/ceres/dense_sparse_matrix.h
internal/ceres/detect_structure.cc
internal/ceres/detect_structure.h
+internal/ceres/dogleg_strategy.cc
+internal/ceres/dogleg_strategy.h
internal/ceres/evaluator.cc
internal/ceres/evaluator.h
internal/ceres/file.cc
@@ -79,6 +89,7 @@ internal/ceres/generated/schur_eliminator_4_4_3.cc
internal/ceres/generated/schur_eliminator_4_4_4.cc
internal/ceres/generated/schur_eliminator_4_4_d.cc
internal/ceres/generated/schur_eliminator_d_d_d.cc
+internal/ceres/generate_eliminator_specialization.py
internal/ceres/gradient_checking_cost_function.cc
internal/ceres/gradient_checking_cost_function.h
internal/ceres/graph_algorithms.h
@@ -88,8 +99,8 @@ internal/ceres/implicit_schur_complement.h
internal/ceres/integral_types.h
internal/ceres/iterative_schur_complement_solver.cc
internal/ceres/iterative_schur_complement_solver.h
-internal/ceres/levenberg_marquardt.cc
-internal/ceres/levenberg_marquardt.h
+internal/ceres/levenberg_marquardt_strategy.cc
+internal/ceres/levenberg_marquardt_strategy.h
internal/ceres/linear_least_squares_problems.cc
internal/ceres/linear_least_squares_problems.h
internal/ceres/linear_operator.cc
@@ -106,6 +117,8 @@ internal/ceres/normal_prior.cc
internal/ceres/parameter_block.h
internal/ceres/partitioned_matrix_view.cc
internal/ceres/partitioned_matrix_view.h
+internal/ceres/polynomial_solver.cc
+internal/ceres/polynomial_solver.h
internal/ceres/problem.cc
internal/ceres/problem_impl.cc
internal/ceres/problem_impl.h
@@ -136,6 +149,7 @@ internal/ceres/sparse_matrix.h
internal/ceres/sparse_normal_cholesky_solver.cc
internal/ceres/sparse_normal_cholesky_solver.h
internal/ceres/split.cc
+internal/ceres/split.h
internal/ceres/stl_util.h
internal/ceres/stringprintf.cc
internal/ceres/stringprintf.h
@@ -143,6 +157,10 @@ internal/ceres/suitesparse.cc
internal/ceres/suitesparse.h
internal/ceres/triplet_sparse_matrix.cc
internal/ceres/triplet_sparse_matrix.h
+internal/ceres/trust_region_minimizer.cc
+internal/ceres/trust_region_minimizer.h
+internal/ceres/trust_region_strategy.cc
+internal/ceres/trust_region_strategy.h
internal/ceres/types.cc
internal/ceres/visibility_based_preconditioner.cc
internal/ceres/visibility_based_preconditioner.h
diff --git a/extern/libmv/third_party/ceres/include/ceres/autodiff_cost_function.h b/extern/libmv/third_party/ceres/include/ceres/autodiff_cost_function.h
index e86d6993864..da9ee2c7993 100644
--- a/extern/libmv/third_party/ceres/include/ceres/autodiff_cost_function.h
+++ b/extern/libmv/third_party/ceres/include/ceres/autodiff_cost_function.h
@@ -163,7 +163,7 @@ class AutoDiffCostFunction :
explicit AutoDiffCostFunction(CostFunctor* functor)
: functor_(functor) {
CHECK_NE(M, DYNAMIC) << "Can't run the fixed-size constructor if the "
- << "number of residuals is set to ceres::DYNAMIC.";
+ << "number of residuals is set to ceres::DYNAMIC.";
}
// Takes ownership of functor. Ignores the template-provided number of
@@ -174,7 +174,7 @@ class AutoDiffCostFunction :
AutoDiffCostFunction(CostFunctor* functor, int num_residuals)
: functor_(functor) {
CHECK_EQ(M, DYNAMIC) << "Can't run the dynamic-size constructor if the "
- << "number of residuals is not ceres::DYNAMIC.";
+ << "number of residuals is not ceres::DYNAMIC.";
SizedCostFunction<M, N0, N1, N2, N3, N4, N5>::set_num_residuals(num_residuals);
}
diff --git a/extern/libmv/third_party/ceres/include/ceres/cost_function.h b/extern/libmv/third_party/ceres/include/ceres/cost_function.h
index 84403d90636..9b010f78f9d 100644
--- a/extern/libmv/third_party/ceres/include/ceres/cost_function.h
+++ b/extern/libmv/third_party/ceres/include/ceres/cost_function.h
@@ -119,7 +119,7 @@ class CostFunction {
// number of outputs (residuals).
vector<int16> parameter_block_sizes_;
int num_residuals_;
- DISALLOW_COPY_AND_ASSIGN(CostFunction);
+ CERES_DISALLOW_COPY_AND_ASSIGN(CostFunction);
};
} // namespace ceres
diff --git a/extern/libmv/third_party/ceres/include/ceres/crs_matrix.h b/extern/libmv/third_party/ceres/include/ceres/crs_matrix.h
new file mode 100644
index 00000000000..c9fe8f78b7c
--- /dev/null
+++ b/extern/libmv/third_party/ceres/include/ceres/crs_matrix.h
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_PUBLIC_CRS_MATRIX_H_
+#define CERES_PUBLIC_CRS_MATRIX_H_
+
+#include <vector>
+#include "ceres/internal/port.h"
+
+namespace ceres {
+
+// A compressed row sparse matrix used primarily for communicating the
+// Jacobian matrix to the user.
+struct CRSMatrix {
+ CRSMatrix() : num_rows(0), num_cols(0) {}
+
+ int num_rows;
+ int num_cols;
+
+ // A compressed row matrix stores its contents in three arrays.
+ // The non-zero pattern of the i^th row is given by
+ //
+ // rows[cols[i] ... cols[i + 1]]
+ //
+ // and the corresponding values by
+ //
+ // values[cols[i] ... cols[i + 1]]
+ //
+ // Thus, cols is a vector of size num_cols + 1, and rows and values
+ // have as many entries as number of non-zeros in the matrix.
+ vector<int> cols;
+ vector<int> rows;
+ vector<double> values;
+};
+
+} // namespace ceres
+
+#endif // CERES_PUBLIC_CRS_MATRIX_H_
diff --git a/extern/libmv/third_party/ceres/include/ceres/fpclassify.h b/extern/libmv/third_party/ceres/include/ceres/fpclassify.h
new file mode 100644
index 00000000000..5a9ea1599d2
--- /dev/null
+++ b/extern/libmv/third_party/ceres/include/ceres/fpclassify.h
@@ -0,0 +1,88 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keir@google.com (Keir Mierle)
+//
+// Portable floating point classification. The names are picked such that they
+// do not collide with macros. For example, "isnan" in C99 is a macro and hence
+// does not respect namespaces.
+//
+// TODO(keir): Finish porting!
+
+#ifndef CERES_PUBLIC_FPCLASSIFY_H_
+#define CERES_PUBLIC_FPCLASSIFY_H_
+
+#if defined(_MSC_VER)
+#include <float.h>
+#endif
+
+namespace ceres {
+
+#if defined(_MSC_VER)
+inline bool IsFinite (double x) { return _finite(x); }
+inline bool IsInfinite(double x) { return !_finite(x) && !_isnan(x); }
+inline bool IsNaN (double x) { return _isnan(x); }
+inline bool IsNormal (double x) {
+ int classification = _fpclass(x);
+ return classification == _FPCLASS_NN ||
+ classification == _FPCLASS_PN;
+}
+#elif defined(ANDROID)
+
+// On Android when using the GNU STL, the C++ fpclassify functions are not
+// available. Strictly speaking, the std functions are are not standard until
+// C++11. Instead use the C99 macros on Android.
+inline bool IsNaN (double x) { return isnan(x); }
+inline bool IsNormal (double x) { return isnormal(x); }
+
+// On Android NDK r6, when using STLPort, the isinf and isfinite functions are
+// not available, so reimplement them.
+# if defined(_STLPORT_VERSION)
+inline bool IsInfinite(double x) {
+ return x == std::numeric_limits<double>::infinity() ||
+ x == -std::numeric_limits<double>::infinity();
+}
+inline bool IsFinite(double x) {
+ return !isnan(x) && !IsInfinite(x);
+}
+# else
+inline bool IsFinite (double x) { return isfinite(x); }
+inline bool IsInfinite(double x) { return isinf(x); }
+# endif // defined(_STLPORT_VERSION)
+#else
+// These definitions are for the normal Unix suspects.
+// TODO(keir): Test the "else" with more platforms.
+inline bool IsFinite (double x) { return std::isfinite(x); }
+inline bool IsInfinite(double x) { return std::isinf(x); }
+inline bool IsNaN (double x) { return std::isnan(x); }
+inline bool IsNormal (double x) { return std::isnormal(x); }
+#endif
+
+} // namespace ceres
+
+#endif // CERES_PUBLIC_FPCLASSIFY_H_
diff --git a/extern/libmv/third_party/ceres/include/ceres/internal/fixed_array.h b/extern/libmv/third_party/ceres/include/ceres/internal/fixed_array.h
index 84617c4fa06..ce777d22dc7 100644
--- a/extern/libmv/third_party/ceres/include/ceres/internal/fixed_array.h
+++ b/extern/libmv/third_party/ceres/include/ceres/internal/fixed_array.h
@@ -34,6 +34,8 @@
#include <cstddef>
#include <glog/logging.h>
+#include "Eigen/Core"
+#include "ceres/internal/macros.h"
#include "ceres/internal/manual_constructor.h"
namespace ceres {
@@ -136,7 +138,6 @@ class FixedArray {
// and T must be the same, otherwise callers' assumptions about use
// of this code will be broken.
struct InnerContainer {
- EIGEN_MAKE_ALIGNED_OPERATOR_NEW
T element;
};
diff --git a/extern/libmv/third_party/ceres/include/ceres/internal/macros.h b/extern/libmv/third_party/ceres/include/ceres/internal/macros.h
index 0cfd773bcca..83ec31193e7 100644
--- a/extern/libmv/third_party/ceres/include/ceres/internal/macros.h
+++ b/extern/libmv/third_party/ceres/include/ceres/internal/macros.h
@@ -43,11 +43,13 @@
//
// For disallowing only assign or copy, write the code directly, but declare
// the intend in a comment, for example:
-// void operator=(const TypeName&); // DISALLOW_ASSIGN
-// Note, that most uses of DISALLOW_ASSIGN and DISALLOW_COPY are broken
-// semantically, one should either use disallow both or neither. Try to
-// avoid these in new code.
-#define DISALLOW_COPY_AND_ASSIGN(TypeName) \
+//
+// void operator=(const TypeName&); // _DISALLOW_ASSIGN
+
+// Note, that most uses of CERES_DISALLOW_ASSIGN and CERES_DISALLOW_COPY
+// are broken semantically, one should either use disallow both or
+// neither. Try to avoid these in new code.
+#define CERES_DISALLOW_COPY_AND_ASSIGN(TypeName) \
TypeName(const TypeName&); \
void operator=(const TypeName&)
@@ -57,9 +59,9 @@
// This should be used in the private: declarations for a class
// that wants to prevent anyone from instantiating it. This is
// especially useful for classes containing only static methods.
-#define DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
+#define CERES_DISALLOW_IMPLICIT_CONSTRUCTORS(TypeName) \
TypeName(); \
- DISALLOW_COPY_AND_ASSIGN(TypeName)
+ CERES_DISALLOW_COPY_AND_ASSIGN(TypeName)
// The arraysize(arr) macro returns the # of elements in an array arr.
// The expression is a compile-time constant, and therefore can be
@@ -151,4 +153,19 @@ char (&ArraySizeHelper(const T (&array)[N]))[N];
#define MUST_USE_RESULT
#endif
+// Platform independent macros to get aligned memory allocations.
+// For example
+//
+// MyFoo my_foo CERES_ALIGN_ATTRIBUTE(16);
+//
+// Gives us an instance of MyFoo which is aligned at a 16 byte
+// boundary.
+#if defined(_MSC_VER)
+#define CERES_ALIGN_ATTRIBUTE(n) __declspec(align(n))
+#define CERES_ALIGN_OF(T) __alignof(T)
+#elif defined(__GNUC__)
+#define CERES_ALIGN_ATTRIBUTE(n) __attribute__((aligned(n)))
+#define CERES_ALIGN_OF(T) __alignof(T)
+#endif
+
#endif // CERES_PUBLIC_INTERNAL_MACROS_H_
diff --git a/extern/libmv/third_party/ceres/include/ceres/internal/manual_constructor.h b/extern/libmv/third_party/ceres/include/ceres/internal/manual_constructor.h
index a1d1f444e36..174d35ee2bd 100644
--- a/extern/libmv/third_party/ceres/include/ceres/internal/manual_constructor.h
+++ b/extern/libmv/third_party/ceres/include/ceres/internal/manual_constructor.h
@@ -45,60 +45,49 @@
namespace ceres {
namespace internal {
-// ------- Define ALIGNED_CHAR_ARRAY --------------------------------
+// ------- Define CERES_ALIGNED_CHAR_ARRAY --------------------------------
-#ifndef ALIGNED_CHAR_ARRAY
+#ifndef CERES_ALIGNED_CHAR_ARRAY
// Because MSVC and older GCCs require that the argument to their alignment
// construct to be a literal constant integer, we use a template instantiated
// at all the possible powers of two.
template<int alignment, int size> struct AlignType { };
template<int size> struct AlignType<0, size> { typedef char result[size]; };
-#if defined(_MSC_VER)
-#define BASE_PORT_H_ALIGN_ATTRIBUTE(X) __declspec(align(X))
-#define BASE_PORT_H_ALIGN_OF(T) __alignof(T)
-#elif defined(__GNUC__)
-#define BASE_PORT_H_ALIGN_ATTRIBUTE(X) __attribute__((aligned(X)))
-#define BASE_PORT_H_ALIGN_OF(T) __alignof__(T)
-#endif
-#if defined(BASE_PORT_H_ALIGN_ATTRIBUTE)
+#if !defined(CERES_ALIGN_ATTRIBUTE)
+#define CERES_ALIGNED_CHAR_ARRAY you_must_define_CERES_ALIGNED_CHAR_ARRAY_for_your_compiler
+#else // !defined(CERES_ALIGN_ATTRIBUTE)
-#define BASE_PORT_H_ALIGNTYPE_TEMPLATE(X) \
+#define CERES_ALIGN_TYPE_TEMPLATE(X) \
template<int size> struct AlignType<X, size> { \
- typedef BASE_PORT_H_ALIGN_ATTRIBUTE(X) char result[size]; \
- }
-
-BASE_PORT_H_ALIGNTYPE_TEMPLATE(1);
-BASE_PORT_H_ALIGNTYPE_TEMPLATE(2);
-BASE_PORT_H_ALIGNTYPE_TEMPLATE(4);
-BASE_PORT_H_ALIGNTYPE_TEMPLATE(8);
-BASE_PORT_H_ALIGNTYPE_TEMPLATE(16);
-BASE_PORT_H_ALIGNTYPE_TEMPLATE(32);
-BASE_PORT_H_ALIGNTYPE_TEMPLATE(64);
-BASE_PORT_H_ALIGNTYPE_TEMPLATE(128);
-BASE_PORT_H_ALIGNTYPE_TEMPLATE(256);
-BASE_PORT_H_ALIGNTYPE_TEMPLATE(512);
-BASE_PORT_H_ALIGNTYPE_TEMPLATE(1024);
-BASE_PORT_H_ALIGNTYPE_TEMPLATE(2048);
-BASE_PORT_H_ALIGNTYPE_TEMPLATE(4096);
-BASE_PORT_H_ALIGNTYPE_TEMPLATE(8192);
+ typedef CERES_ALIGN_ATTRIBUTE(X) char result[size]; \
+ }
+
+CERES_ALIGN_TYPE_TEMPLATE(1);
+CERES_ALIGN_TYPE_TEMPLATE(2);
+CERES_ALIGN_TYPE_TEMPLATE(4);
+CERES_ALIGN_TYPE_TEMPLATE(8);
+CERES_ALIGN_TYPE_TEMPLATE(16);
+CERES_ALIGN_TYPE_TEMPLATE(32);
+CERES_ALIGN_TYPE_TEMPLATE(64);
+CERES_ALIGN_TYPE_TEMPLATE(128);
+CERES_ALIGN_TYPE_TEMPLATE(256);
+CERES_ALIGN_TYPE_TEMPLATE(512);
+CERES_ALIGN_TYPE_TEMPLATE(1024);
+CERES_ALIGN_TYPE_TEMPLATE(2048);
+CERES_ALIGN_TYPE_TEMPLATE(4096);
+CERES_ALIGN_TYPE_TEMPLATE(8192);
// Any larger and MSVC++ will complain.
-#define ALIGNED_CHAR_ARRAY(T, Size) \
- typename AlignType<BASE_PORT_H_ALIGN_OF(T), sizeof(T) * Size>::result
+#undef CERES_ALIGN_TYPE_TEMPLATE
-#undef BASE_PORT_H_ALIGNTYPE_TEMPLATE
-#undef BASE_PORT_H_ALIGN_ATTRIBUTE
+#define CERES_ALIGNED_CHAR_ARRAY(T, Size) \
+ typename AlignType<CERES_ALIGN_OF(T), sizeof(T) * Size>::result
-#else // defined(BASE_PORT_H_ALIGN_ATTRIBUTE)
-#define ALIGNED_CHAR_ARRAY you_must_define_ALIGNED_CHAR_ARRAY_for_your_compiler
-#endif // defined(BASE_PORT_H_ALIGN_ATTRIBUTE)
+#endif // !defined(CERES_ALIGN_ATTRIBUTE)
-#undef BASE_PORT_H_ALIGNTYPE_TEMPLATE
-#undef BASE_PORT_H_ALIGN_ATTRIBUTE
-
-#endif // ALIGNED_CHAR_ARRAY
+#endif // CERES_ALIGNED_CHAR_ARRAY
template <typename Type>
class ManualConstructor {
@@ -203,10 +192,10 @@ class ManualConstructor {
}
private:
- ALIGNED_CHAR_ARRAY(Type, 1) space_;
+ CERES_ALIGNED_CHAR_ARRAY(Type, 1) space_;
};
-#undef ALIGNED_CHAR_ARRAY
+#undef CERES_ALIGNED_CHAR_ARRAY
} // namespace internal
} // namespace ceres
diff --git a/extern/libmv/third_party/ceres/include/ceres/internal/port.h b/extern/libmv/third_party/ceres/include/ceres/internal/port.h
index 9a3e5cced58..a9fe247cef5 100644
--- a/extern/libmv/third_party/ceres/include/ceres/internal/port.h
+++ b/extern/libmv/third_party/ceres/include/ceres/internal/port.h
@@ -31,6 +31,8 @@
#ifndef CERES_PUBLIC_INTERNAL_PORT_H_
#define CERES_PUBLIC_INTERNAL_PORT_H_
+#include <string>
+
namespace ceres {
// It is unfortunate that this import of the entire standard namespace is
@@ -39,6 +41,10 @@ namespace ceres {
// things outside of the Ceres optimization package.
using namespace std;
+// This is necessary to properly handle the case that there is a different
+// "string" implementation in the global namespace.
+using std::string;
+
} // namespace ceres
#endif // CERES_PUBLIC_INTERNAL_PORT_H_
diff --git a/extern/libmv/third_party/ceres/include/ceres/iteration_callback.h b/extern/libmv/third_party/ceres/include/ceres/iteration_callback.h
index 88da992d0c5..29157d380f2 100644
--- a/extern/libmv/third_party/ceres/include/ceres/iteration_callback.h
+++ b/extern/libmv/third_party/ceres/include/ceres/iteration_callback.h
@@ -28,8 +28,9 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
//
-// When an iteration callback is specified, Ceres calls the callback after each
-// optimizer step and pass it an IterationSummary object, defined below.
+// When an iteration callback is specified, Ceres calls the callback
+// after each minimizer step (if the minimizer has not converged) and
+// passes it an IterationSummary object, defined below.
#ifndef CERES_PUBLIC_ITERATION_CALLBACK_H_
#define CERES_PUBLIC_ITERATION_CALLBACK_H_
@@ -44,7 +45,15 @@ struct IterationSummary {
// Current iteration number.
int32 iteration;
+ // Step was numerically valid, i.e., all values are finite and the
+ // step reduces the value of the linearized model.
+ //
+ // Note: step_is_valid is false when iteration = 0.
+ bool step_is_valid;
+
// Whether or not the algorithm made progress in this iteration.
+ //
+ // Note: step_is_successful is false when iteration = 0.
bool step_is_successful;
// Value of the objective function.
@@ -66,9 +75,10 @@ struct IterationSummary {
// cost and the change in the cost of the linearized approximation.
double relative_decrease;
- // Value of the regularization parameter for Levenberg-Marquardt
- // algorithm at the end of the current iteration.
- double mu;
+ // Size of the trust region at the end of the current iteration. For
+ // the Levenberg-Marquardt algorithm, the regularization parameter
+ // mu = 1.0 / trust_region_radius.
+ double trust_region_radius;
// For the inexact step Levenberg-Marquardt algorithm, this is the
// relative accuracy with which the Newton(LM) step is solved. This
@@ -81,13 +91,15 @@ struct IterationSummary {
// Newton step.
int linear_solver_iterations;
- // TODO(sameeragarwal): Change to use a higher precision timer using
- // clock_gettime.
- // Time (in seconds) spent inside the linear least squares solver.
- int iteration_time_sec;
+ // Time (in seconds) spent inside the minimizer loop in the current
+ // iteration.
+ double iteration_time_in_seconds;
+
+ // Time (in seconds) spent inside the trust region step solver.
+ double step_solver_time_in_seconds;
- // Time (in seconds) spent inside the linear least squares solver.
- int linear_solver_time_sec;
+ // Time (in seconds) since the user called Solve().
+ double cumulative_time_in_seconds;
};
// Interface for specifying callbacks that are executed at the end of
@@ -133,7 +145,7 @@ struct IterationSummary {
// summary.gradient_max_norm,
// summary.step_norm,
// summary.relative_decrease,
-// summary.mu,
+// summary.trust_region_radius,
// summary.eta,
// summary.linear_solver_iterations);
// if (log_to_stdout_) {
diff --git a/extern/libmv/third_party/ceres/include/ceres/jet.h b/extern/libmv/third_party/ceres/include/ceres/jet.h
index a37870210f1..96e2256fd02 100644
--- a/extern/libmv/third_party/ceres/include/ceres/jet.h
+++ b/extern/libmv/third_party/ceres/include/ceres/jet.h
@@ -162,16 +162,7 @@
#include <string>
#include "Eigen/Core"
-
-// Visual Studio 2012 or older version
-#if defined(_MSC_VER) && _MSC_VER <= 1700
-namespace std {
-inline bool isfinite(double x) { return _finite(x); }
-inline bool isinf (double x) { return !_finite(x) && !_isnan(x); }
-inline bool isnan (double x) { return _isnan(x); }
-inline bool isnormal(double x) { return _finite(x) && x != 0.0; }
-} // namespace std
-#endif
+#include "ceres/fpclassify.h"
namespace ceres {
@@ -184,7 +175,9 @@ struct Jet {
// (where T is a Jet<T, N>). This usually only happens in opt mode. Note that
// the C++ standard mandates that e.g. default constructed doubles are
// initialized to 0.0; see sections 8.5 of the C++03 standard.
- Jet() : a() {}
+ Jet() : a() {
+ v.setZero();
+ }
// Constructor from scalar: a + 0.
explicit Jet(const T& value) {
@@ -199,18 +192,6 @@ struct Jet {
v[k] = T(1.0);
}
- /*
-
- // Construct from an array where the first element is the scalar.
- // This is templated to support converting from other data types.
- template<typename D>
- Jet(const D* scalar_and_derivatives) {
- a = T(scalar_and_derivatives[0]);
- v = Eigen::Map<const Eigen::Matrix<D, N, 1> >(
- scalar_and_derivatives + 1, N).cast<T>();
- }
- */
-
// Compound operators
Jet<T, N>& operator+=(const Jet<T, N> &y) {
*this = *this + y;
@@ -232,8 +213,25 @@ struct Jet {
return *this;
}
- T a; // The scalar part.
- Eigen::Matrix<T, N, 1> v; // The infinitesimal part.
+ // The scalar part.
+ T a;
+
+ // The infinitesimal part.
+ //
+ // Note the Eigen::DontAlign bit is needed here because this object
+ // gets allocated on the stack and as part of other arrays and
+ // structs. Forcing the right alignment there is the source of much
+ // pain and suffering. Even if that works, passing Jets around to
+ // functions by value has problems because the C++ ABI does not
+ // guarantee alignment for function arguments.
+ //
+ // Setting the DontAlign bit prevents Eigen from using SSE for the
+ // various operations on Jets. This is a small performance penalty
+ // since the AutoDiff code will still expose much of the code as
+ // statically sized loops to the compiler. But given the subtle
+ // issues that arise due to alignment, especially when dealing with
+ // multiple platforms, it seems to be a trade off worth making.
+ Eigen::Matrix<T, N, 1, Eigen::DontAlign> v;
};
// Unary +
@@ -411,10 +409,6 @@ inline double cos (double x) { return std::cos(x); }
inline double acos (double x) { return std::acos(x); }
inline double sin (double x) { return std::sin(x); }
inline double asin (double x) { return std::asin(x); }
-inline bool isfinite(double x) { return std::isfinite(x); }
-inline bool isinf (double x) { return std::isinf(x); }
-inline bool isnan (double x) { return std::isnan(x); }
-inline bool isnormal(double x) { return std::isnormal(x); }
inline double pow (double x, double y) { return std::pow(x, y); }
inline double atan2(double y, double x) { return std::atan2(y, x); }
@@ -492,22 +486,23 @@ Jet<T, N> asin(const Jet<T, N>& f) {
}
// Jet Classification. It is not clear what the appropriate semantics are for
-// these classifications. This picks that isfinite and isnormal are "all"
-// operations, i.e. all elements of the jet must be finite for the jet itself to
-// be finite (or normal). For isnan and isinf, the answer is less clear. This
-// takes a "any" approach for isnan and isinf such that if any part of a jet is
-// nan or inf, then the entire jet is nan or inf. This leads to strange
-// situations like a jet can be both isinf and isnan, but in practice the "any"
-// semantics are the most useful for e.g. checking that derivatives are sane.
+// these classifications. This picks that IsFinite and isnormal are "all"
+// operations, i.e. all elements of the jet must be finite for the jet itself
+// to be finite (or normal). For IsNaN and IsInfinite, the answer is less
+// clear. This takes a "any" approach for IsNaN and IsInfinite such that if any
+// part of a jet is nan or inf, then the entire jet is nan or inf. This leads
+// to strange situations like a jet can be both IsInfinite and IsNaN, but in
+// practice the "any" semantics are the most useful for e.g. checking that
+// derivatives are sane.
// The jet is finite if all parts of the jet are finite.
template <typename T, int N> inline
-bool isfinite(const Jet<T, N>& f) {
- if (!isfinite(f.a)) {
+bool IsFinite(const Jet<T, N>& f) {
+ if (!IsFinite(f.a)) {
return false;
}
for (int i = 0; i < N; ++i) {
- if (!isfinite(f.v[i])) {
+ if (!IsFinite(f.v[i])) {
return false;
}
}
@@ -516,12 +511,12 @@ bool isfinite(const Jet<T, N>& f) {
// The jet is infinite if any part of the jet is infinite.
template <typename T, int N> inline
-bool isinf(const Jet<T, N>& f) {
- if (isinf(f.a)) {
+bool IsInfinite(const Jet<T, N>& f) {
+ if (IsInfinite(f.a)) {
return true;
}
for (int i = 0; i < N; i++) {
- if (isinf(f.v[i])) {
+ if (IsInfinite(f.v[i])) {
return true;
}
}
@@ -530,12 +525,12 @@ bool isinf(const Jet<T, N>& f) {
// The jet is NaN if any part of the jet is NaN.
template <typename T, int N> inline
-bool isnan(const Jet<T, N>& f) {
- if (isnan(f.a)) {
+bool IsNaN(const Jet<T, N>& f) {
+ if (IsNaN(f.a)) {
return true;
}
for (int i = 0; i < N; ++i) {
- if (isnan(f.v[i])) {
+ if (IsNaN(f.v[i])) {
return true;
}
}
@@ -544,12 +539,12 @@ bool isnan(const Jet<T, N>& f) {
// The jet is normal if all parts of the jet are normal.
template <typename T, int N> inline
-bool isnormal(const Jet<T, N>& f) {
- if (!isnormal(f.a)) {
+bool IsNormal(const Jet<T, N>& f) {
+ if (!IsNormal(f.a)) {
return false;
}
for (int i = 0; i < N; ++i) {
- if (!isnormal(f.v[i])) {
+ if (!IsNormal(f.v[i])) {
return false;
}
}
@@ -650,78 +645,6 @@ inline std::ostream &operator<<(std::ostream &s, const Jet<T, N>& z) {
return s << "[" << z.a << " ; " << z.v.transpose() << "]";
}
-// A jet traits class to make it easier to work with mixed auto / numeric diff.
-template<typename T>
-struct JetOps {
- static bool IsScalar() {
- return true;
- }
- static T GetScalar(const T& t) {
- return t;
- }
- static void SetScalar(const T& scalar, T* t) {
- *t = scalar;
- }
- static void ScaleDerivative(double scale_by, T *value) {
- // For double, there is no derivative to scale.
- }
-};
-
-template<typename T, int N>
-struct JetOps<Jet<T, N> > {
- static bool IsScalar() {
- return false;
- }
- static T GetScalar(const Jet<T, N>& t) {
- return t.a;
- }
- static void SetScalar(const T& scalar, Jet<T, N>* t) {
- t->a = scalar;
- }
- static void ScaleDerivative(double scale_by, Jet<T, N> *value) {
- value->v *= scale_by;
- }
-};
-
-template<typename FunctionType, int kNumArgs, typename ArgumentType>
-struct Chain {
- static ArgumentType Rule(const FunctionType &f,
- const FunctionType dfdx[kNumArgs],
- const ArgumentType x[kNumArgs]) {
- // In the default case of scalars, there's nothing to do since there are no
- // derivatives to propagate.
- return f;
- }
-};
-
-// XXX Add documentation here!
-template<typename FunctionType, int kNumArgs, typename T, int N>
-struct Chain<FunctionType, kNumArgs, Jet<T, N> > {
- static Jet<T, N> Rule(const FunctionType &f,
- const FunctionType dfdx[kNumArgs],
- const Jet<T, N> x[kNumArgs]) {
- // x is itself a function of another variable ("z"); what this function
- // needs to return is "f", but with the derivative with respect to z
- // attached to the jet. So combine the derivative part of x's jets to form
- // a Jacobian matrix between x and z (i.e. dx/dz).
- Eigen::Matrix<T, kNumArgs, N> dxdz;
- for (int i = 0; i < kNumArgs; ++i) {
- dxdz.row(i) = x[i].v.transpose();
- }
-
- // Map the input gradient dfdx into an Eigen row vector.
- Eigen::Map<const Eigen::Matrix<FunctionType, 1, kNumArgs> >
- vector_dfdx(dfdx, 1, kNumArgs);
-
- // Now apply the chain rule to obtain df/dz. Combine the derivative with
- // the scalar part to obtain f with full derivative information.
- Jet<T, N> jet_f;
- jet_f.a = f;
- jet_f.v = vector_dfdx.template cast<T>() * dxdz; // Also known as dfdz.
- return jet_f;
- }
-};
-
} // namespace ceres
namespace Eigen {
diff --git a/extern/libmv/third_party/ceres/include/ceres/loss_function.h b/extern/libmv/third_party/ceres/include/ceres/loss_function.h
index 81add02cdee..0c0ceaaecd0 100644
--- a/extern/libmv/third_party/ceres/include/ceres/loss_function.h
+++ b/extern/libmv/third_party/ceres/include/ceres/loss_function.h
@@ -175,6 +175,7 @@ class HuberLoss : public LossFunction {
public:
explicit HuberLoss(double a) : a_(a), b_(a * a) { }
virtual void Evaluate(double, double*) const;
+
private:
const double a_;
// b = a^2.
@@ -190,6 +191,7 @@ class SoftLOneLoss : public LossFunction {
public:
explicit SoftLOneLoss(double a) : b_(a * a), c_(1 / b_) { }
virtual void Evaluate(double, double*) const;
+
private:
// b = a^2.
const double b_;
@@ -206,6 +208,7 @@ class CauchyLoss : public LossFunction {
public:
explicit CauchyLoss(double a) : b_(a * a), c_(1 / b_) { }
virtual void Evaluate(double, double*) const;
+
private:
// b = a^2.
const double b_;
@@ -213,6 +216,78 @@ class CauchyLoss : public LossFunction {
const double c_;
};
+// Loss that is capped beyond a certain level using the arc-tangent function.
+// The scaling parameter 'a' determines the level where falloff occurs.
+// For costs much smaller than 'a', the loss function is linear and behaves like
+// TrivialLoss, and for values much larger than 'a' the value asymptotically
+// approaches the constant value of a * PI / 2.
+//
+// rho(s) = a atan(s / a).
+//
+// At s = 0: rho = [0, 1, 0].
+class ArctanLoss : public LossFunction {
+ public:
+ explicit ArctanLoss(double a) : a_(a), b_(1 / (a * a)) { }
+ virtual void Evaluate(double, double*) const;
+
+ private:
+ const double a_;
+ // b = 1 / a^2.
+ const double b_;
+};
+
+// Loss function that maps to approximately zero cost in a range around the
+// origin, and reverts to linear in error (quadratic in cost) beyond this range.
+// The tolerance parameter 'a' sets the nominal point at which the
+// transition occurs, and the transition size parameter 'b' sets the nominal
+// distance over which most of the transition occurs. Both a and b must be
+// greater than zero, and typically b will be set to a fraction of a.
+// The slope rho'[s] varies smoothly from about 0 at s <= a - b to
+// about 1 at s >= a + b.
+//
+// The term is computed as:
+//
+// rho(s) = b log(1 + exp((s - a) / b)) - c0.
+//
+// where c0 is chosen so that rho(0) == 0
+//
+// c0 = b log(1 + exp(-a / b)
+//
+// This has the following useful properties:
+//
+// rho(s) == 0 for s = 0
+// rho'(s) ~= 0 for s << a - b
+// rho'(s) ~= 1 for s >> a + b
+// rho''(s) > 0 for all s
+//
+// In addition, all derivatives are continuous, and the curvature is
+// concentrated in the range a - b to a + b.
+//
+// At s = 0: rho = [0, ~0, ~0].
+class TolerantLoss : public LossFunction {
+ public:
+ explicit TolerantLoss(double a, double b);
+ virtual void Evaluate(double, double*) const;
+
+ private:
+ const double a_, b_, c_;
+};
+
+// Composition of two loss functions. The error is the result of first
+// evaluating g followed by f to yield the composition f(g(s)).
+// The loss functions must not be NULL.
+class ComposedLoss : public LossFunction {
+ public:
+ explicit ComposedLoss(const LossFunction* f, Ownership ownership_f,
+ const LossFunction* g, Ownership ownership_g);
+ virtual ~ComposedLoss();
+ virtual void Evaluate(double, double*) const;
+
+ private:
+ internal::scoped_ptr<const LossFunction> f_, g_;
+ const Ownership ownership_f_, ownership_g_;
+};
+
// The discussion above has to do with length scaling: it affects the space
// in which s is measured. Sometimes you want to simply scale the output
// value of the robustifier. For example, you might want to weight
@@ -249,7 +324,7 @@ class ScaledLoss : public LossFunction {
internal::scoped_ptr<const LossFunction> rho_;
const double a_;
const Ownership ownership_;
- DISALLOW_COPY_AND_ASSIGN(ScaledLoss);
+ CERES_DISALLOW_COPY_AND_ASSIGN(ScaledLoss);
};
// Sometimes after the optimization problem has been constructed, we
@@ -314,7 +389,7 @@ class LossFunctionWrapper : public LossFunction {
private:
internal::scoped_ptr<const LossFunction> rho_;
Ownership ownership_;
- DISALLOW_COPY_AND_ASSIGN(LossFunctionWrapper);
+ CERES_DISALLOW_COPY_AND_ASSIGN(LossFunctionWrapper);
};
} // namespace ceres
diff --git a/extern/libmv/third_party/ceres/include/ceres/numeric_diff_cost_function.h b/extern/libmv/third_party/ceres/include/ceres/numeric_diff_cost_function.h
index bbaefca5b6c..8544e44d0bc 100644
--- a/extern/libmv/third_party/ceres/include/ceres/numeric_diff_cost_function.h
+++ b/extern/libmv/third_party/ceres/include/ceres/numeric_diff_cost_function.h
@@ -93,11 +93,13 @@ struct Differencer {
using Eigen::Map;
using Eigen::Matrix;
using Eigen::RowMajor;
+ using Eigen::ColMajor;
typedef Matrix<double, num_residuals, 1> ResidualVector;
typedef Matrix<double, parameter_block_size, 1> ParameterVector;
- typedef Matrix<double, num_residuals, parameter_block_size, RowMajor>
- JacobianMatrix;
+ typedef Matrix<double, num_residuals, parameter_block_size,
+ (parameter_block_size == 1 &&
+ num_residuals > 1) ? ColMajor : RowMajor> JacobianMatrix;
Map<JacobianMatrix> parameter_jacobian(jacobians[parameter_block],
num_residuals,
diff --git a/extern/libmv/third_party/ceres/include/ceres/problem.h b/extern/libmv/third_party/ceres/include/ceres/problem.h
index 0ca61006bdb..2b08c6723e8 100644
--- a/extern/libmv/third_party/ceres/include/ceres/problem.h
+++ b/extern/libmv/third_party/ceres/include/ceres/problem.h
@@ -50,13 +50,13 @@ namespace ceres {
class CostFunction;
class LossFunction;
class LocalParameterization;
+class Solver;
namespace internal {
class Preprocessor;
class ProblemImpl;
class ParameterBlock;
class ResidualBlock;
-class SolverImpl;
} // namespace internal
// A ResidualBlockId is a handle clients can use to delete residual
@@ -255,9 +255,9 @@ class Problem {
int NumResiduals() const;
private:
- friend class internal::SolverImpl;
+ friend class Solver;
internal::scoped_ptr<internal::ProblemImpl> problem_impl_;
- DISALLOW_COPY_AND_ASSIGN(Problem);
+ CERES_DISALLOW_COPY_AND_ASSIGN(Problem);
};
} // namespace ceres
diff --git a/extern/libmv/third_party/ceres/include/ceres/rotation.h b/extern/libmv/third_party/ceres/include/ceres/rotation.h
index e4227e78b9a..0d8a390d5d1 100644
--- a/extern/libmv/third_party/ceres/include/ceres/rotation.h
+++ b/extern/libmv/third_party/ceres/include/ceres/rotation.h
@@ -47,6 +47,7 @@
#include <algorithm>
#include <cmath>
+#include "glog/logging.h"
namespace ceres {
@@ -145,18 +146,11 @@ void AngleAxisRotatePoint(const T angle_axis[3], const T pt[3], T result[3]);
// --- IMPLEMENTATION
-// Duplicate rather than decorate every use of cmath with _USE_MATH_CONSTANTS.
-// Necessitated by Windows.
-#ifndef M_PI
-#define M_PI 3.14159265358979323846
-#define CERES_NEED_M_PI_UNDEF
-#endif
-
template<typename T>
inline void AngleAxisToQuaternion(const T* angle_axis, T* quaternion) {
- const T &a0 = angle_axis[0];
- const T &a1 = angle_axis[1];
- const T &a2 = angle_axis[2];
+ const T& a0 = angle_axis[0];
+ const T& a1 = angle_axis[1];
+ const T& a2 = angle_axis[2];
const T theta_squared = a0 * a0 + a1 * a1 + a2 * a2;
// For points not at the origin, the full conversion is numerically stable.
@@ -183,16 +177,35 @@ inline void AngleAxisToQuaternion(const T* angle_axis, T* quaternion) {
template<typename T>
inline void QuaternionToAngleAxis(const T* quaternion, T* angle_axis) {
- const T &q1 = quaternion[1];
- const T &q2 = quaternion[2];
- const T &q3 = quaternion[3];
- const T sin_squared = q1 * q1 + q2 * q2 + q3 * q3;
+ const T& q1 = quaternion[1];
+ const T& q2 = quaternion[2];
+ const T& q3 = quaternion[3];
+ const T sin_squared_theta = q1 * q1 + q2 * q2 + q3 * q3;
// For quaternions representing non-zero rotation, the conversion
// is numerically stable.
- if (sin_squared > T(0.0)) {
- const T sin_theta = sqrt(sin_squared);
- const T k = T(2.0) * atan2(sin_theta, quaternion[0]) / sin_theta;
+ if (sin_squared_theta > T(0.0)) {
+ const T sin_theta = sqrt(sin_squared_theta);
+ const T& cos_theta = quaternion[0];
+
+ // If cos_theta is negative, theta is greater than pi/2, which
+ // means that angle for the angle_axis vector which is 2 * theta
+ // would be greater than pi.
+ //
+ // While this will result in the correct rotation, it does not
+ // result in a normalized angle-axis vector.
+ //
+ // In that case we observe that 2 * theta ~ 2 * theta - 2 * pi,
+ // which is equivalent saying
+ //
+ // theta - pi = atan(sin(theta - pi), cos(theta - pi))
+ // = atan(-sin(theta), -cos(theta))
+ //
+ const T two_theta =
+ T(2.0) * ((cos_theta < 0.0)
+ ? atan2(-sin_theta, -cos_theta)
+ : atan2(sin_theta, cos_theta));
+ const T k = two_theta / sin_theta;
angle_axis[0] = q1 * k;
angle_axis[1] = q2 * k;
angle_axis[2] = q3 * k;
@@ -259,7 +272,7 @@ inline void RotationMatrixToAngleAxis(const T * R, T * angle_axis) {
// Case 2: theta ~ 0, means sin(theta) ~ theta to a good
// approximation.
- if (costheta > 0) {
+ if (costheta > 0.0) {
const T kHalf = T(0.5);
for (int i = 0; i < 3; ++i) {
angle_axis[i] *= kHalf;
@@ -284,8 +297,8 @@ inline void RotationMatrixToAngleAxis(const T * R, T * angle_axis) {
// angle_axis[i] should be positive, otherwise negative.
for (int i = 0; i < 3; ++i) {
angle_axis[i] = theta * sqrt((R[i*4] - costheta) * inv_one_minus_costheta);
- if (((sintheta < 0) && (angle_axis[i] > 0)) ||
- ((sintheta > 0) && (angle_axis[i] < 0))) {
+ if (((sintheta < 0.0) && (angle_axis[i] > 0.0)) ||
+ ((sintheta > 0.0) && (angle_axis[i] < 0.0))) {
angle_axis[i] = -angle_axis[i];
}
}
@@ -334,7 +347,8 @@ template <typename T>
inline void EulerAnglesToRotationMatrix(const T* euler,
const int row_stride,
T* R) {
- const T degrees_to_radians(M_PI / 180.0);
+ const double kPi = 3.14159265358979323846;
+ const T degrees_to_radians(kPi / 180.0);
const T pitch(euler[0] * degrees_to_radians);
const T roll(euler[1] * degrees_to_radians);
@@ -517,10 +531,4 @@ void AngleAxisRotatePoint(const T angle_axis[3], const T pt[3], T result[3]) {
} // namespace ceres
-// Clean define pollution.
-#ifdef CERES_NEED_M_PI_UNDEF
-#undef CERES_NEED_M_PI_UNDEF
-#undef M_PI
-#endif
-
#endif // CERES_PUBLIC_ROTATION_H_
diff --git a/extern/libmv/third_party/ceres/include/ceres/solver.h b/extern/libmv/third_party/ceres/include/ceres/solver.h
index bd669272023..31d5e8d7987 100644
--- a/extern/libmv/third_party/ceres/include/ceres/solver.h
+++ b/extern/libmv/third_party/ceres/include/ceres/solver.h
@@ -34,10 +34,10 @@
#include <cmath>
#include <string>
#include <vector>
-
-#include "ceres/iteration_callback.h"
+#include "ceres/crs_matrix.h"
#include "ceres/internal/macros.h"
#include "ceres/internal/port.h"
+#include "ceres/iteration_callback.h"
#include "ceres/types.h"
namespace ceres {
@@ -57,24 +57,47 @@ class Solver {
struct Options {
// Default constructor that sets up a generic sparse problem.
Options() {
- minimizer_type = LEVENBERG_MARQUARDT;
+ trust_region_strategy_type = LEVENBERG_MARQUARDT;
+ dogleg_type = TRADITIONAL_DOGLEG;
+ use_nonmonotonic_steps = false;
+ max_consecutive_nonmonotonic_steps = 5;
max_num_iterations = 50;
- max_solver_time_sec = 1.0e9;
+ max_solver_time_in_seconds = 1e9;
num_threads = 1;
- tau = 1e-4;
+ initial_trust_region_radius = 1e4;
+ max_trust_region_radius = 1e16;
+ min_trust_region_radius = 1e-32;
min_relative_decrease = 1e-3;
+ lm_min_diagonal = 1e-6;
+ lm_max_diagonal = 1e32;
+ max_num_consecutive_invalid_steps = 5;
function_tolerance = 1e-6;
gradient_tolerance = 1e-10;
parameter_tolerance = 1e-8;
-#ifndef CERES_NO_SUITESPARSE
- linear_solver_type = SPARSE_NORMAL_CHOLESKY;
-#else
+
+#if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE)
linear_solver_type = DENSE_QR;
-#endif // CERES_NO_SUITESPARSE
+#else
+ linear_solver_type = SPARSE_NORMAL_CHOLESKY;
+#endif
+
preconditioner_type = JACOBI;
+
+ sparse_linear_algebra_library = SUITE_SPARSE;
+#if defined(CERES_NO_SUITESPARSE) && !defined(CERES_NO_CXSPARSE)
+ sparse_linear_algebra_library = CX_SPARSE;
+#endif
+
num_linear_solver_threads = 1;
num_eliminate_blocks = 0;
ordering_type = NATURAL;
+
+#if defined(CERES_NO_SUITESPARSE)
+ use_block_amd = false;
+#else
+ use_block_amd = true;
+#endif
+
linear_solver_min_num_iterations = 1;
linear_solver_max_num_iterations = 500;
eta = 1e-1;
@@ -82,10 +105,13 @@ class Solver {
logging_type = PER_MINIMIZER_ITERATION;
minimizer_progress_to_stdout = false;
return_initial_residuals = false;
+ return_initial_gradient = false;
+ return_initial_jacobian = false;
return_final_residuals = false;
+ return_final_gradient = false;
+ return_final_jacobian = false;
lsqp_dump_directory = "/tmp";
lsqp_dump_format_type = TEXTFILE;
- crash_and_dump_lsqp_on_failure = false;
check_gradients = false;
gradient_check_relative_precision = 1e-8;
numeric_derivative_relative_step_size = 1e-6;
@@ -94,27 +120,78 @@ class Solver {
// Minimizer options ----------------------------------------
- MinimizerType minimizer_type;
+ TrustRegionStrategyType trust_region_strategy_type;
+
+ // Type of dogleg strategy to use.
+ DoglegType dogleg_type;
+
+ // The classical trust region methods are descent methods, in that
+ // they only accept a point if it strictly reduces the value of
+ // the objective function.
+ //
+ // Relaxing this requirement allows the algorithm to be more
+ // efficient in the long term at the cost of some local increase
+ // in the value of the objective function.
+ //
+ // This is because allowing for non-decreasing objective function
+ // values in a princpled manner allows the algorithm to "jump over
+ // boulders" as the method is not restricted to move into narrow
+ // valleys while preserving its convergence properties.
+ //
+ // Setting use_nonmonotonic_steps to true enables the
+ // non-monotonic trust region algorithm as described by Conn,
+ // Gould & Toint in "Trust Region Methods", Section 10.1.
+ //
+ // The parameter max_consecutive_nonmonotonic_steps controls the
+ // window size used by the step selection algorithm to accept
+ // non-monotonic steps.
+ //
+ // Even though the value of the objective function may be larger
+ // than the minimum value encountered over the course of the
+ // optimization, the final parameters returned to the user are the
+ // ones corresponding to the minimum cost over all iterations.
+ bool use_nonmonotonic_steps;
+ int max_consecutive_nonmonotonic_steps;
// Maximum number of iterations for the minimizer to run for.
int max_num_iterations;
// Maximum time for which the minimizer should run for.
- double max_solver_time_sec;
+ double max_solver_time_in_seconds;
// Number of threads used by Ceres for evaluating the cost and
// jacobians.
int num_threads;
- // For Levenberg-Marquardt, the initial value for the
- // regularizer. This is the inversely related to the size of the
- // initial trust region.
- double tau;
+ // Trust region minimizer settings.
+ double initial_trust_region_radius;
+ double max_trust_region_radius;
+
+ // Minimizer terminates when the trust region radius becomes
+ // smaller than this value.
+ double min_trust_region_radius;
- // For trust region methods, this is lower threshold for the
- // relative decrease before a step is accepted.
+ // Lower bound for the relative decrease before a step is
+ // accepted.
double min_relative_decrease;
+ // For the Levenberg-Marquadt algorithm, the scaled diagonal of
+ // the normal equations J'J is used to control the size of the
+ // trust region. Extremely small and large values along the
+ // diagonal can make this regularization scheme
+ // fail. lm_max_diagonal and lm_min_diagonal, clamp the values of
+ // diag(J'J) from above and below. In the normal course of
+ // operation, the user should not have to modify these parameters.
+ double lm_min_diagonal;
+ double lm_max_diagonal;
+
+ // Sometimes due to numerical conditioning problems or linear
+ // solver flakiness, the trust region strategy may return a
+ // numerically invalid step that can be fixed by reducing the
+ // trust region size. So the TrustRegionMinimizer allows for a few
+ // successive invalid steps before it declares NUMERICAL_FAILURE.
+ int max_num_consecutive_invalid_steps;
+
// Minimizer terminates when
//
// (new_cost - old_cost) < function_tolerance * old_cost;
@@ -141,6 +218,12 @@ class Solver {
// Type of preconditioner to use with the iterative linear solvers.
PreconditionerType preconditioner_type;
+ // Ceres supports using multiple sparse linear algebra libraries
+ // for sparse matrix ordering and factorizations. Currently,
+ // SUITE_SPARSE and CX_SPARSE are the valid choices, depending on
+ // whether they are linked into Ceres at build time.
+ SparseLinearAlgebraLibraryType sparse_linear_algebra_library;
+
// Number of threads used by Ceres to solve the Newton
// step. Currently only the SPARSE_SCHUR solver is capable of
// using this setting.
@@ -170,6 +253,19 @@ class Solver {
// non-empty.
vector<double*> ordering;
+ // By virtue of the modeling layer in Ceres being block oriented,
+ // all the matrices used by Ceres are also block oriented. When
+ // doing sparse direct factorization of these matrices (for
+ // SPARSE_NORMAL_CHOLESKY, SPARSE_SCHUR and ITERATIVE in
+ // conjunction with CLUSTER_TRIDIAGONAL AND CLUSTER_JACOBI
+ // preconditioners), the fill-reducing ordering algorithms can
+ // either be run on the block or the scalar form of these matrices.
+ // Running it on the block form exposes more of the super-nodal
+ // structure of the matrix to the factorization routines. Setting
+ // this parameter to true runs the ordering algorithms in block
+ // form. Currently this option only makes sense with
+ // sparse_linear_algebra_library = SUITE_SPARSE.
+ bool use_block_amd;
// Minimum number of iterations for which the linear solver should
// run, even if the convergence criterion is satisfied.
@@ -206,7 +302,12 @@ class Solver {
bool minimizer_progress_to_stdout;
bool return_initial_residuals;
+ bool return_initial_gradient;
+ bool return_initial_jacobian;
+
bool return_final_residuals;
+ bool return_final_gradient;
+ bool return_final_jacobian;
// List of iterations at which the optimizer should dump the
// linear least squares problem to disk. Useful for testing and
@@ -217,15 +318,6 @@ class Solver {
string lsqp_dump_directory;
DumpFormatType lsqp_dump_format_type;
- // Dump the linear least squares problem to disk if the minimizer
- // fails due to NUMERICAL_FAILURE and crash the process. This flag
- // is useful for generating debugging information. The problem is
- // dumped in a file whose name is determined by
- // Solver::Options::lsqp_dump_format.
- //
- // Note: This requires a version of Ceres built with protocol buffers.
- bool crash_and_dump_lsqp_on_failure;
-
// Finite differences options ----------------------------------------------
// Check all jacobians computed by each residual block with finite
@@ -273,16 +365,25 @@ class Solver {
bool update_state_every_iteration;
// Callbacks that are executed at the end of each iteration of the
- // Minimizer. They are executed in the order that they are
- // specified in this vector. By default, parameter blocks are
- // updated only at the end of the optimization, i.e when the
- // Minimizer terminates. This behaviour is controlled by
+ // Minimizer. An iteration may terminate midway, either due to
+ // numerical failures or because one of the convergence tests has
+ // been satisfied. In this case none of the callbacks are
+ // executed.
+
+ // Callbacks are executed in the order that they are specified in
+ // this vector. By default, parameter blocks are updated only at
+ // the end of the optimization, i.e when the Minimizer
+ // terminates. This behaviour is controlled by
// update_state_every_variable. If the user wishes to have access
// to the update parameter blocks when his/her callbacks are
// executed, then set update_state_every_iteration to true.
//
// The solver does NOT take ownership of these pointers.
vector<IterationCallback*> callbacks;
+
+ // If non-empty, a summary of the execution of the solver is
+ // recorded to this file.
+ string solver_log;
};
struct Summary {
@@ -313,20 +414,74 @@ class Solver {
// blocks that they depend on were fixed.
double fixed_cost;
- // Residuals before and after the optimization. Each vector
- // contains problem.NumResiduals() elements. Residuals are in the
- // same order in which they were added to the problem object when
- // constructing this problem.
+ // Vectors of residuals before and after the optimization. The
+ // entries of these vectors are in the order in which
+ // ResidualBlocks were added to the Problem object.
+ //
+ // Whether the residual vectors are populated with values is
+ // controlled by Solver::Options::return_initial_residuals and
+ // Solver::Options::return_final_residuals respectively.
vector<double> initial_residuals;
vector<double> final_residuals;
+ // Gradient vectors, before and after the optimization. The rows
+ // are in the same order in which the ParameterBlocks were added
+ // to the Problem object.
+ //
+ // NOTE: Since AddResidualBlock adds ParameterBlocks to the
+ // Problem automatically if they do not already exist, if you wish
+ // to have explicit control over the ordering of the vectors, then
+ // use Problem::AddParameterBlock to explicitly add the
+ // ParameterBlocks in the order desired.
+ //
+ // Whether the vectors are populated with values is controlled by
+ // Solver::Options::return_initial_gradient and
+ // Solver::Options::return_final_gradient respectively.
+ vector<double> initial_gradient;
+ vector<double> final_gradient;
+
+ // Jacobian matrices before and after the optimization. The rows
+ // of these matrices are in the same order in which the
+ // ResidualBlocks were added to the Problem object. The columns
+ // are in the same order in which the ParameterBlocks were added
+ // to the Problem object.
+ //
+ // NOTE: Since AddResidualBlock adds ParameterBlocks to the
+ // Problem automatically if they do not already exist, if you wish
+ // to have explicit control over the column ordering of the
+ // matrix, then use Problem::AddParameterBlock to explicitly add
+ // the ParameterBlocks in the order desired.
+ //
+ // The Jacobian matrices are stored as compressed row sparse
+ // matrices. Please see ceres/crs_matrix.h for more details of the
+ // format.
+ //
+ // Whether the Jacboan matrices are populated with values is
+ // controlled by Solver::Options::return_initial_jacobian and
+ // Solver::Options::return_final_jacobian respectively.
+ CRSMatrix initial_jacobian;
+ CRSMatrix final_jacobian;
+
vector<IterationSummary> iterations;
int num_successful_steps;
int num_unsuccessful_steps;
+ // When the user calls Solve, before the actual optimization
+ // occurs, Ceres performs a number of preprocessing steps. These
+ // include error checks, memory allocations, and reorderings. This
+ // time is accounted for as preprocessing time.
double preprocessor_time_in_seconds;
+
+ // Time spent in the TrustRegionMinimizer.
double minimizer_time_in_seconds;
+
+ // After the Minimizer is finished, some time is spent in
+ // re-evaluating residuals etc. This time is accounted for in the
+ // postprocessor time.
+ double postprocessor_time_in_seconds;
+
+ // Some total of all time spent inside Ceres when Solve is called.
double total_time_in_seconds;
// Preprocessor summary.
@@ -354,6 +509,10 @@ class Solver {
PreconditionerType preconditioner_type;
OrderingType ordering_type;
+
+ TrustRegionStrategyType trust_region_strategy_type;
+ DoglegType dogleg_type;
+ SparseLinearAlgebraLibraryType sparse_linear_algebra_library;
};
// Once a least squares problem has been built, this function takes
diff --git a/extern/libmv/third_party/ceres/include/ceres/types.h b/extern/libmv/third_party/ceres/include/ceres/types.h
index a30c79029ac..3980885b53c 100644
--- a/extern/libmv/third_party/ceres/include/ceres/types.h
+++ b/extern/libmv/third_party/ceres/include/ceres/types.h
@@ -59,14 +59,18 @@ enum LinearSolverType {
// normal equations A'A x = A'b. They are direct solvers and do not
// assume any special problem structure.
- // Solve the normal equations using a sparse cholesky solver; based
- // on CHOLMOD.
- SPARSE_NORMAL_CHOLESKY,
+ // Solve the normal equations using a dense Cholesky solver; based
+ // on Eigen.
+ DENSE_NORMAL_CHOLESKY,
// Solve the normal equations using a dense QR solver; based on
// Eigen.
DENSE_QR,
+ // Solve the normal equations using a sparse cholesky solver; requires
+ // SuiteSparse or CXSparse.
+ SPARSE_NORMAL_CHOLESKY,
+
// Specialized solvers, specific to problems with a generalized
// bi-partitite structure.
@@ -110,6 +114,15 @@ enum PreconditionerType {
CLUSTER_TRIDIAGONAL
};
+enum SparseLinearAlgebraLibraryType {
+ // High performance sparse Cholesky factorization and approximate
+ // minimum degree ordering.
+ SUITE_SPARSE,
+
+ // A lightweight replacment for SuiteSparse.
+ CX_SPARSE
+};
+
enum LinearSolverTerminationType {
// Termination criterion was met. For factorization based solvers
// the tolerance is assumed to be zero. Any user provided values are
@@ -149,8 +162,47 @@ enum LoggingType {
PER_MINIMIZER_ITERATION
};
-enum MinimizerType {
- LEVENBERG_MARQUARDT
+// Ceres supports different strategies for computing the trust region
+// step.
+enum TrustRegionStrategyType {
+ // The default trust region strategy is to use the step computation
+ // used in the Levenberg-Marquardt algorithm. For more details see
+ // levenberg_marquardt_strategy.h
+ LEVENBERG_MARQUARDT,
+
+ // Powell's dogleg algorithm interpolates between the Cauchy point
+ // and the Gauss-Newton step. It is particularly useful if the
+ // LEVENBERG_MARQUARDT algorithm is making a large number of
+ // unsuccessful steps. For more details see dogleg_strategy.h.
+ //
+ // NOTES:
+ //
+ // 1. This strategy has not been experimented with or tested as
+ // extensively as LEVENBERG_MARQUARDT, and therefore it should be
+ // considered EXPERIMENTAL for now.
+ //
+ // 2. For now this strategy should only be used with exact
+ // factorization based linear solvers, i.e., SPARSE_SCHUR,
+ // DENSE_SCHUR, DENSE_QR and SPARSE_NORMAL_CHOLESKY.
+ DOGLEG
+};
+
+// Ceres supports two different dogleg strategies.
+// The "traditional" dogleg method by Powell and the
+// "subspace" method described in
+// R. H. Byrd, R. B. Schnabel, and G. A. Shultz,
+// "Approximate solution of the trust region problem by minimization
+// over two-dimensional subspaces", Mathematical Programming,
+// 40 (1988), pp. 247--263
+enum DoglegType {
+ // The traditional approach constructs a dogleg path
+ // consisting of two line segments and finds the furthest
+ // point on that path that is still inside the trust region.
+ TRADITIONAL_DOGLEG,
+
+ // The subspace approach finds the exact minimum of the model
+ // constrained to the subspace spanned by the dogleg path.
+ SUBSPACE_DOGLEG
};
enum SolverTerminationType {
@@ -246,11 +298,15 @@ enum DimensionType {
const char* LinearSolverTypeToString(LinearSolverType type);
const char* PreconditionerTypeToString(PreconditionerType type);
+const char* SparseLinearAlgebraLibraryTypeToString(
+ SparseLinearAlgebraLibraryType type);
const char* LinearSolverTerminationTypeToString(
LinearSolverTerminationType type);
const char* OrderingTypeToString(OrderingType type);
const char* SolverTerminationTypeToString(SolverTerminationType type);
-
+const char* SparseLinearAlgebraLibraryTypeToString(
+ SparseLinearAlgebraLibraryType type);
+const char* TrustRegionStrategyTypeToString( TrustRegionStrategyType type);
bool IsSchurType(LinearSolverType type);
} // namespace ceres
diff --git a/extern/libmv/third_party/ceres/internal/ceres/levenberg_marquardt.h b/extern/libmv/third_party/ceres/internal/ceres/array_utils.cc
index d00bb9095be..673baa4f70f 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/levenberg_marquardt.h
+++ b/extern/libmv/third_party/ceres/internal/ceres/array_utils.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// Copyright 2012 Google Inc. All rights reserved.
// http://code.google.com/p/ceres-solver/
//
// Redistribution and use in source and binary forms, with or without
@@ -27,39 +27,41 @@
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
-//
-// Implmentation of Levenberg Marquardt algorithm based on "Methods for
-// Nonlinear Least Squares" by K. Madsen, H.B. Nielsen and
-// O. Tingleff. Available to download from
-//
-// http://www2.imm.dtu.dk/pubdb/views/edoc_download.php/3215/pdf/imm3215.pdf
-//
-#ifndef CERES_INTERNAL_LEVENBERG_MARQUARDT_H_
-#define CERES_INTERNAL_LEVENBERG_MARQUARDT_H_
+#include "ceres/array_utils.h"
-#include "ceres/minimizer.h"
-#include "ceres/solver.h"
+#include <cmath>
+#include <cstddef>
+#include "ceres/fpclassify.h"
namespace ceres {
namespace internal {
-class Evaluator;
-class LinearSolver;
+// It is a near impossibility that user code generates this exact
+// value in normal operation, thus we will use it to fill arrays
+// before passing them to user code. If on return an element of the
+// array still contains this value, we will assume that the user code
+// did not write to that memory location.
+const double kImpossibleValue = 1e302;
-class LevenbergMarquardt : public Minimizer {
- public:
- virtual ~LevenbergMarquardt();
+bool IsArrayValid(const int size, const double* x) {
+ if (x != NULL) {
+ for (int i = 0; i < size; ++i) {
+ if (!IsFinite(x[i]) || (x[i] == kImpossibleValue)) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
- virtual void Minimize(const Minimizer::Options& options,
- Evaluator* evaluator,
- LinearSolver* linear_solver,
- const double* initial_parameters,
- double* final_parameters,
- Solver::Summary* summary);
-};
+void InvalidateArray(const int size, double* x) {
+ if (x != NULL) {
+ for (int i = 0; i < size; ++i) {
+ x[i] = kImpossibleValue;
+ }
+ }
+}
} // namespace internal
} // namespace ceres
-
-#endif // CERES_INTERNAL_LEVENBERG_MARQUARDT_H_
diff --git a/extern/libmv/third_party/ceres/internal/ceres/array_utils.h b/extern/libmv/third_party/ceres/internal/ceres/array_utils.h
new file mode 100644
index 00000000000..99cc8d8ebbf
--- /dev/null
+++ b/extern/libmv/third_party/ceres/internal/ceres/array_utils.h
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Utility routines for validating arrays.
+//
+// These are useful for detecting two common class of errors.
+//
+// 1. Uninitialized memory - where the user for some reason did not
+// compute part of an array, but the code expects it.
+//
+// 2. Numerical failure while computing the cost/residual/jacobian,
+// e.g. NaN, infinities etc. This is particularly useful since the
+// automatic differentiation code does computations that are not
+// evident to the user and can silently generate hard to debug errors.
+
+#ifndef CERES_INTERNAL_ARRAY_UTILS_H_
+#define CERES_INTERNAL_ARRAY_UTILS_H_
+
+#include "ceres/internal/port.h"
+
+namespace ceres {
+namespace internal {
+
+// Fill the array x with an impossible value that the user code is
+// never expected to compute.
+void InvalidateArray(int size, double* x);
+
+// Check if all the entries of the array x are valid, i.e. all the
+// values in the array should be finite and none of them should be
+// equal to the "impossible" value used by InvalidateArray.
+bool IsArrayValid(int size, const double* x);
+
+extern const double kImpossibleValue;
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_ARRAY_UTILS_H_
diff --git a/extern/libmv/third_party/ceres/internal/ceres/block_evaluate_preparer.cc b/extern/libmv/third_party/ceres/internal/ceres/block_evaluate_preparer.cc
index 05e63eb560b..9edc4fa23bd 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/block_evaluate_preparer.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/block_evaluate_preparer.cc
@@ -40,16 +40,26 @@
namespace ceres {
namespace internal {
-void BlockEvaluatePreparer::Init(int** jacobian_layout) {
+void BlockEvaluatePreparer::Init(int const* const* jacobian_layout,
+ int max_derivatives_per_residual_block) {
jacobian_layout_ = jacobian_layout;
+ scratch_evaluate_preparer_.Init(max_derivatives_per_residual_block);
}
// Point the jacobian blocks directly into the block sparse matrix.
void BlockEvaluatePreparer::Prepare(const ResidualBlock* residual_block,
int residual_block_index,
SparseMatrix* jacobian,
- double** jacobians) const {
- CHECK(jacobian != NULL);
+ double** jacobians) {
+ // If the overall jacobian is not available, use the scratch space.
+ if (jacobian == NULL) {
+ scratch_evaluate_preparer_.Prepare(residual_block,
+ residual_block_index,
+ jacobian,
+ jacobians);
+ return;
+ }
+
double* jacobian_values =
down_cast<BlockSparseMatrix*>(jacobian)->mutable_values();
diff --git a/extern/libmv/third_party/ceres/internal/ceres/block_evaluate_preparer.h b/extern/libmv/third_party/ceres/internal/ceres/block_evaluate_preparer.h
index a7869311e6e..354acc031f4 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/block_evaluate_preparer.h
+++ b/extern/libmv/third_party/ceres/internal/ceres/block_evaluate_preparer.h
@@ -36,6 +36,8 @@
#ifndef CERES_INTERNAL_BLOCK_EVALUATE_PREPARER_H_
#define CERES_INTERNAL_BLOCK_EVALUATE_PREPARER_H_
+#include "ceres/scratch_evaluate_preparer.h"
+
namespace ceres {
namespace internal {
@@ -47,18 +49,26 @@ class BlockEvaluatePreparer {
// Using Init() instead of a constructor allows for allocating this structure
// with new[]. This is because C++ doesn't allow passing arguments to objects
// constructed with new[] (as opposed to plain 'new').
- void Init(int** jacobian_layout);
+ void Init(int const* const* jacobian_layout,
+ int max_derivatives_per_residual_block);
// EvaluatePreparer interface
- // Point the jacobian blocks directly into the block sparse matrix.
+ // Point the jacobian blocks directly into the block sparse matrix, if
+ // jacobian is non-null. Otherwise, uses an internal per-thread buffer to
+ // store the jacobians temporarily.
void Prepare(const ResidualBlock* residual_block,
int residual_block_index,
SparseMatrix* jacobian,
- double** jacobians) const;
+ double** jacobians);
private:
int const* const* jacobian_layout_;
+
+ // For the case that the overall jacobian is not available, but the
+ // individual jacobians are requested, use a pass-through scratch evaluate
+ // preparer.
+ ScratchEvaluatePreparer scratch_evaluate_preparer_;
};
} // namespace internal
diff --git a/extern/libmv/third_party/ceres/internal/ceres/block_jacobi_preconditioner.cc b/extern/libmv/third_party/ceres/internal/ceres/block_jacobi_preconditioner.cc
index 1a5001f9c71..474c37f7ca4 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/block_jacobi_preconditioner.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/block_jacobi_preconditioner.cc
@@ -40,11 +40,10 @@
namespace ceres {
namespace internal {
-BlockJacobiPreconditioner::BlockJacobiPreconditioner(
- const LinearOperator& A)
- : block_structure_(
- *(down_cast<const BlockSparseMatrix*>(&A)->block_structure())),
- num_rows_(A.num_rows()) {
+BlockJacobiPreconditioner::BlockJacobiPreconditioner(const LinearOperator& A)
+ : num_rows_(A.num_rows()),
+ block_structure_(
+ *(down_cast<const BlockSparseMatrix*>(&A)->block_structure())) {
// Calculate the amount of storage needed.
int storage_needed = 0;
for (int c = 0; c < block_structure_.cols.size(); ++c) {
diff --git a/extern/libmv/third_party/ceres/internal/ceres/block_jacobian_writer.cc b/extern/libmv/third_party/ceres/internal/ceres/block_jacobian_writer.cc
index 52a58bb43a6..f90c350cc80 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/block_jacobian_writer.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/block_jacobian_writer.cc
@@ -136,9 +136,12 @@ BlockJacobianWriter::BlockJacobianWriter(const Evaluator::Options& options,
// makes the final Write() a nop.
BlockEvaluatePreparer* BlockJacobianWriter::CreateEvaluatePreparers(
int num_threads) {
+ int max_derivatives_per_residual_block =
+ program_->MaxDerivativesPerResidualBlock();
+
BlockEvaluatePreparer* preparers = new BlockEvaluatePreparer[num_threads];
for (int i = 0; i < num_threads; i++) {
- preparers[i].Init(&jacobian_layout_[0]);
+ preparers[i].Init(&jacobian_layout_[0], max_derivatives_per_residual_block);
}
return preparers;
}
diff --git a/extern/libmv/third_party/ceres/internal/ceres/block_random_access_dense_matrix.cc b/extern/libmv/third_party/ceres/internal/ceres/block_random_access_dense_matrix.cc
index 2afaf5e2ea2..0f95e8932b8 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/block_random_access_dense_matrix.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/block_random_access_dense_matrix.cc
@@ -28,10 +28,10 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
+#include "glog/logging.h"
#include "ceres/block_random_access_dense_matrix.h"
#include <vector>
-#include <glog/logging.h>
#include "ceres/internal/eigen.h"
#include "ceres/internal/scoped_ptr.h"
diff --git a/extern/libmv/third_party/ceres/internal/ceres/block_random_access_dense_matrix.h b/extern/libmv/third_party/ceres/internal/ceres/block_random_access_dense_matrix.h
index 3a0096209f7..9f27a4c30f3 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/block_random_access_dense_matrix.h
+++ b/extern/libmv/third_party/ceres/internal/ceres/block_random_access_dense_matrix.h
@@ -89,7 +89,7 @@ class BlockRandomAccessDenseMatrix : public BlockRandomAccessMatrix {
vector<int> block_layout_;
scoped_array<double> values_;
- DISALLOW_COPY_AND_ASSIGN(BlockRandomAccessDenseMatrix);
+ CERES_DISALLOW_COPY_AND_ASSIGN(BlockRandomAccessDenseMatrix);
};
} // namespace internal
diff --git a/extern/libmv/third_party/ceres/internal/ceres/block_random_access_matrix.h b/extern/libmv/third_party/ceres/internal/ceres/block_random_access_matrix.h
index f398af3be87..b76cb78b160 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/block_random_access_matrix.h
+++ b/extern/libmv/third_party/ceres/internal/ceres/block_random_access_matrix.h
@@ -77,7 +77,7 @@ namespace internal {
//
// if (cell != NULL) {
// MatrixRef m(cell->values, row_stride, col_stride);
-// MutexLock l(&cell->m);
+// CeresMutexLock l(&cell->m);
// m.block(row, col, row_block_size, col_block_size) = ...
// }
diff --git a/extern/libmv/third_party/ceres/internal/ceres/block_random_access_sparse_matrix.cc b/extern/libmv/third_party/ceres/internal/ceres/block_random_access_sparse_matrix.cc
index c496fcd13de..9ed62ce948b 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/block_random_access_sparse_matrix.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/block_random_access_sparse_matrix.cc
@@ -28,17 +28,17 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
+#include "glog/logging.h"
#include "ceres/block_random_access_sparse_matrix.h"
#include <algorithm>
#include <set>
#include <utility>
#include <vector>
-#include <glog/logging.h>
-#include "ceres/mutex.h"
-#include "ceres/triplet_sparse_matrix.h"
#include "ceres/internal/port.h"
#include "ceres/internal/scoped_ptr.h"
+#include "ceres/mutex.h"
+#include "ceres/triplet_sparse_matrix.h"
#include "ceres/types.h"
namespace ceres {
diff --git a/extern/libmv/third_party/ceres/internal/ceres/block_random_access_sparse_matrix.h b/extern/libmv/third_party/ceres/internal/ceres/block_random_access_sparse_matrix.h
index 12613c3daa0..48a00437cf6 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/block_random_access_sparse_matrix.h
+++ b/extern/libmv/third_party/ceres/internal/ceres/block_random_access_sparse_matrix.h
@@ -38,6 +38,7 @@
#include "ceres/block_random_access_matrix.h"
#include "ceres/collections_port.h"
#include "ceres/triplet_sparse_matrix.h"
+#include "ceres/integral_types.h"
#include "ceres/internal/macros.h"
#include "ceres/internal/port.h"
#include "ceres/internal/scoped_ptr.h"
@@ -84,11 +85,11 @@ class BlockRandomAccessSparseMatrix : public BlockRandomAccessMatrix {
TripletSparseMatrix* mutable_matrix() { return tsm_.get(); }
private:
- long int IntPairToLong(int a, int b) {
+ int64 IntPairToLong(int a, int b) {
return a * kMaxRowBlocks + b;
}
- const int kMaxRowBlocks;
+ const int64 kMaxRowBlocks;
// row/column block sizes.
const vector<int> blocks_;
@@ -100,7 +101,8 @@ class BlockRandomAccessSparseMatrix : public BlockRandomAccessMatrix {
// The underlying matrix object which actually stores the cells.
scoped_ptr<TripletSparseMatrix> tsm_;
- DISALLOW_COPY_AND_ASSIGN(BlockRandomAccessSparseMatrix);
+ friend class BlockRandomAccessSparseMatrixTest;
+ CERES_DISALLOW_COPY_AND_ASSIGN(BlockRandomAccessSparseMatrix);
};
} // namespace internal
diff --git a/extern/libmv/third_party/ceres/internal/ceres/block_sparse_matrix.cc b/extern/libmv/third_party/ceres/internal/ceres/block_sparse_matrix.cc
index 7dd395e2975..dbe5ec93ef0 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/block_sparse_matrix.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/block_sparse_matrix.cc
@@ -33,11 +33,11 @@
#include <cstddef>
#include <algorithm>
#include <vector>
-#include <glog/logging.h>
#include "ceres/block_structure.h"
+#include "ceres/internal/eigen.h"
#include "ceres/matrix_proto.h"
#include "ceres/triplet_sparse_matrix.h"
-#include "ceres/internal/eigen.h"
+#include "glog/logging.h"
namespace ceres {
namespace internal {
@@ -81,7 +81,7 @@ BlockSparseMatrix::BlockSparseMatrix(
CHECK_NOTNULL(values_.get());
}
-#ifndef CERES_DONT_HAVE_PROTOCOL_BUFFERS
+#ifndef CERES_NO_PROTOCOL_BUFFERS
BlockSparseMatrix::BlockSparseMatrix(const SparseMatrixProto& outer_proto) {
CHECK(outer_proto.has_block_matrix());
@@ -244,7 +244,7 @@ const CompressedRowBlockStructure* BlockSparseMatrix::block_structure()
return block_structure_.get();
}
-#ifndef CERES_DONT_HAVE_PROTOCOL_BUFFERS
+#ifndef CERES_NO_PROTOCOL_BUFFERS
void BlockSparseMatrix::ToProto(SparseMatrixProto* outer_proto) const {
outer_proto->Clear();
diff --git a/extern/libmv/third_party/ceres/internal/ceres/block_sparse_matrix.h b/extern/libmv/third_party/ceres/internal/ceres/block_sparse_matrix.h
index f71446e8f58..513d398c54d 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/block_sparse_matrix.h
+++ b/extern/libmv/third_party/ceres/internal/ceres/block_sparse_matrix.h
@@ -74,7 +74,7 @@ class BlockSparseMatrixBase : public SparseMatrix {
virtual const double* RowBlockValues(int row_block_index) const = 0;
private:
- DISALLOW_COPY_AND_ASSIGN(BlockSparseMatrixBase);
+ CERES_DISALLOW_COPY_AND_ASSIGN(BlockSparseMatrixBase);
};
// This class implements the SparseMatrix interface for storing and
@@ -96,7 +96,7 @@ class BlockSparseMatrix : public BlockSparseMatrixBase {
explicit BlockSparseMatrix(CompressedRowBlockStructure* block_structure);
// Construct a block sparse matrix from a protocol buffer.
-#ifndef CERES_DONT_HAVE_PROTOCOL_BUFFERS
+#ifndef CERES_NO_PROTOCOL_BUFFERS
explicit BlockSparseMatrix(const SparseMatrixProto& proto);
#endif
@@ -110,7 +110,7 @@ class BlockSparseMatrix : public BlockSparseMatrixBase {
virtual void SquaredColumnNorm(double* x) const;
virtual void ScaleColumns(const double* scale);
virtual void ToDenseMatrix(Matrix* dense_matrix) const;
-#ifndef CERES_DONT_HAVE_PROTOCOL_BUFFERS
+#ifndef CERES_NO_PROTOCOL_BUFFERS
virtual void ToProto(SparseMatrixProto* proto) const;
#endif
virtual void ToTextFile(FILE* file) const;
@@ -135,7 +135,7 @@ class BlockSparseMatrix : public BlockSparseMatrixBase {
int num_nonzeros_;
scoped_array<double> values_;
scoped_ptr<CompressedRowBlockStructure> block_structure_;
- DISALLOW_COPY_AND_ASSIGN(BlockSparseMatrix);
+ CERES_DISALLOW_COPY_AND_ASSIGN(BlockSparseMatrix);
};
} // namespace internal
diff --git a/extern/libmv/third_party/ceres/internal/ceres/block_structure.cc b/extern/libmv/third_party/ceres/internal/ceres/block_structure.cc
index 5add4f3b94d..e61131192af 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/block_structure.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/block_structure.cc
@@ -38,7 +38,7 @@ bool CellLessThan(const Cell& lhs, const Cell& rhs) {
return (lhs.block_id < rhs.block_id);
}
-#ifndef CERES_DONT_HAVE_PROTOCOL_BUFFERS
+#ifndef CERES_NO_PROTOCOL_BUFFERS
void ProtoToBlockStructure(const BlockStructureProto &proto,
CompressedRowBlockStructure *block_structure) {
// Decode the column blocks.
diff --git a/extern/libmv/third_party/ceres/internal/ceres/canonical_views_clustering.cc b/extern/libmv/third_party/ceres/internal/ceres/canonical_views_clustering.cc
index 53190ada6fc..d0dc1e670c2 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/canonical_views_clustering.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/canonical_views_clustering.cc
@@ -31,11 +31,11 @@
#include "ceres/canonical_views_clustering.h"
-#include <glog/logging.h>
-#include "ceres/graph.h"
#include "ceres/collections_port.h"
-#include "ceres/map_util.h"
+#include "ceres/graph.h"
#include "ceres/internal/macros.h"
+#include "ceres/map_util.h"
+#include "glog/logging.h"
namespace ceres {
namespace internal {
@@ -75,7 +75,7 @@ class CanonicalViewsClustering {
IntMap view_to_canonical_view_;
// Maps a view to its similarity to its current cluster center.
HashMap<int, double> view_to_canonical_view_similarity_;
- DISALLOW_COPY_AND_ASSIGN(CanonicalViewsClustering);
+ CERES_DISALLOW_COPY_AND_ASSIGN(CanonicalViewsClustering);
};
void ComputeCanonicalViewsClustering(
diff --git a/extern/libmv/third_party/ceres/internal/ceres/cgnr_solver.h b/extern/libmv/third_party/ceres/internal/ceres/cgnr_solver.h
index dd36f99006b..877b4c4ceea 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/cgnr_solver.h
+++ b/extern/libmv/third_party/ceres/internal/ceres/cgnr_solver.h
@@ -57,7 +57,7 @@ class CgnrSolver : public LinearSolver {
private:
const LinearSolver::Options options_;
scoped_ptr<BlockJacobiPreconditioner> jacobi_preconditioner_;
- DISALLOW_COPY_AND_ASSIGN(CgnrSolver);
+ CERES_DISALLOW_COPY_AND_ASSIGN(CgnrSolver);
};
} // namespace internal
diff --git a/extern/libmv/third_party/ceres/internal/ceres/collections_port.h b/extern/libmv/third_party/ceres/internal/ceres/collections_port.h
index 9dff0efe245..c2fce9033cd 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/collections_port.h
+++ b/extern/libmv/third_party/ceres/internal/ceres/collections_port.h
@@ -33,31 +33,49 @@
#ifndef CERES_INTERNAL_COLLECTIONS_PORT_H_
#define CERES_INTERNAL_COLLECTIONS_PORT_H_
-#ifdef CERES_HASH_BOOST
-#include <boost/tr1/unordered_map.hpp>
-#include <boost/tr1/unordered_set.hpp>
+#if defined(CERES_NO_TR1)
+# include <map>
+# include <set>
#else
-#if defined(_MSC_VER) && _MSC_VER <= 1700
-#include <unordered_map>
-#include <unordered_set>
-#else
-#include <tr1/unordered_map>
-#include <tr1/unordered_set>
-#endif
+# if defined(_MSC_VER) && _MSC_VER <= 1600
+# include <unordered_map>
+# include <unordered_set>
+# else
+# include <tr1/unordered_map>
+# include <tr1/unordered_set>
+# endif
#endif
-
#include <utility>
#include "ceres/integral_types.h"
#include "ceres/internal/port.h"
+// Some systems don't have access to TR1. In that case, substitute the hash
+// map/set with normal map/set. The price to pay is slightly slower speed for
+// some operations.
+#if defined(CERES_NO_TR1)
+
namespace ceres {
namespace internal {
template<typename K, typename V>
-struct HashMap : tr1::unordered_map<K, V> {};
+struct HashMap : map<K, V> {};
template<typename K>
-struct HashSet : tr1::unordered_set<K> {};
+struct HashSet : set<K> {};
+
+} // namespace internal
+} // namespace ceres
+
+#else
+
+namespace ceres {
+namespace internal {
+
+template<typename K, typename V>
+struct HashMap : std::tr1::unordered_map<K, V> {};
+
+template<typename K>
+struct HashSet : std::tr1::unordered_set<K> {};
#if defined(_WIN32) && !defined(__MINGW64__) && !defined(__MINGW32__)
#define GG_LONGLONG(x) x##I64
@@ -124,11 +142,7 @@ CERES_HASH_NAMESPACE_START
// Hasher for STL pairs. Requires hashers for both members to be defined.
template<typename T>
-#ifdef CERES_HASH_BOOST
-struct hash {
-#else
struct hash<pair<T, T> > {
-#endif
size_t operator()(const pair<T, T>& p) const {
size_t h1 = hash<T>()(p.first);
size_t h2 = hash<T>()(p.second);
@@ -148,4 +162,6 @@ struct hash<pair<T, T> > {
CERES_HASH_NAMESPACE_END
+#endif // CERES_NO_TR1
+
#endif // CERES_INTERNAL_COLLECTIONS_PORT_H_
diff --git a/extern/libmv/third_party/ceres/internal/ceres/compressed_row_jacobian_writer.cc b/extern/libmv/third_party/ceres/internal/ceres/compressed_row_jacobian_writer.cc
index aa883b7d353..912c4845441 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/compressed_row_jacobian_writer.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/compressed_row_jacobian_writer.cc
@@ -130,8 +130,24 @@ SparseMatrix* CompressedRowJacobianWriter::CreateJacobian() const {
}
row_pos += num_residuals;
}
-
CHECK_EQ(num_jacobian_nonzeros, rows[total_num_residuals]);
+
+ // Populate the row and column block vectors for use by block
+ // oriented ordering algorithms. This is useful when
+ // Solver::Options::use_block_amd = true.
+ const vector<ParameterBlock*>& parameter_blocks = program_->parameter_blocks();
+ vector<int>& col_blocks = *(jacobian->mutable_col_blocks());
+ col_blocks.resize(parameter_blocks.size());
+ for (int i = 0; i < parameter_blocks.size(); ++i) {
+ col_blocks[i] = parameter_blocks[i]->LocalSize();
+ }
+
+ vector<int>& row_blocks = *(jacobian->mutable_row_blocks());
+ row_blocks.resize(residual_blocks.size());
+ for (int i = 0; i < residual_blocks.size(); ++i) {
+ row_blocks[i] = residual_blocks[i]->NumResiduals();
+ }
+
return jacobian;
}
diff --git a/extern/libmv/third_party/ceres/internal/ceres/compressed_row_sparse_matrix.cc b/extern/libmv/third_party/ceres/internal/ceres/compressed_row_sparse_matrix.cc
index 95edf5396af..1b61468aaae 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/compressed_row_sparse_matrix.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/compressed_row_sparse_matrix.cc
@@ -32,15 +32,22 @@
#include <algorithm>
#include <vector>
-#include "ceres/matrix_proto.h"
+#include "ceres/crs_matrix.h"
#include "ceres/internal/port.h"
+#include "ceres/matrix_proto.h"
namespace ceres {
namespace internal {
namespace {
// Helper functor used by the constructor for reordering the contents
-// of a TripletSparseMatrix.
+// of a TripletSparseMatrix. This comparator assumes thay there are no
+// duplicates in the pair of arrays rows and cols, i.e., there is no
+// indices i and j (not equal to each other) s.t.
+//
+// rows[i] == rows[j] && cols[i] == cols[j]
+//
+// If this is the case, this functor will not be a StrictWeakOrdering.
struct RowColLessThan {
RowColLessThan(const int* rows, const int* cols)
: rows(rows), cols(cols) {
@@ -128,7 +135,7 @@ CompressedRowSparseMatrix::CompressedRowSparseMatrix(
CHECK_EQ(num_nonzeros(), m.num_nonzeros());
}
-#ifndef CERES_DONT_HAVE_PROTOCOL_BUFFERS
+#ifndef CERES_NO_PROTOCOL_BUFFERS
CompressedRowSparseMatrix::CompressedRowSparseMatrix(
const SparseMatrixProto& outer_proto) {
CHECK(outer_proto.has_compressed_row_matrix());
@@ -241,7 +248,7 @@ void CompressedRowSparseMatrix::ToDenseMatrix(Matrix* dense_matrix) const {
}
}
-#ifndef CERES_DONT_HAVE_PROTOCOL_BUFFERS
+#ifndef CERES_NO_PROTOCOL_BUFFERS
void CompressedRowSparseMatrix::ToProto(SparseMatrixProto* outer_proto) const {
CHECK_NOTNULL(outer_proto);
@@ -330,5 +337,18 @@ void CompressedRowSparseMatrix::ToTextFile(FILE* file) const {
}
}
+void CompressedRowSparseMatrix::ToCRSMatrix(CRSMatrix* matrix) const {
+ matrix->num_rows = num_rows();
+ matrix->num_cols = num_cols();
+
+ matrix->rows.resize(matrix->num_rows + 1);
+ matrix->cols.resize(num_nonzeros());
+ matrix->values.resize(num_nonzeros());
+
+ copy(rows_.get(), rows_.get() + matrix->num_rows + 1, matrix->rows.begin());
+ copy(cols_.get(), cols_.get() + num_nonzeros(), matrix->cols.begin());
+ copy(values_.get(), values_.get() + num_nonzeros(), matrix->values.begin());
+}
+
} // namespace internal
} // namespace ceres
diff --git a/extern/libmv/third_party/ceres/internal/ceres/compressed_row_sparse_matrix.h b/extern/libmv/third_party/ceres/internal/ceres/compressed_row_sparse_matrix.h
index 9a39d28e111..6a9d82842e5 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/compressed_row_sparse_matrix.h
+++ b/extern/libmv/third_party/ceres/internal/ceres/compressed_row_sparse_matrix.h
@@ -31,14 +31,19 @@
#ifndef CERES_INTERNAL_COMPRESSED_ROW_SPARSE_MATRIX_H_
#define CERES_INTERNAL_COMPRESSED_ROW_SPARSE_MATRIX_H_
+#include <vector>
#include <glog/logging.h>
#include "ceres/sparse_matrix.h"
#include "ceres/triplet_sparse_matrix.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/macros.h"
+#include "ceres/internal/port.h"
#include "ceres/types.h"
namespace ceres {
+
+class CRSMatrix;
+
namespace internal {
class SparseMatrixProto;
@@ -52,7 +57,7 @@ class CompressedRowSparseMatrix : public SparseMatrix {
//
// We assume that m does not have any repeated entries.
explicit CompressedRowSparseMatrix(const TripletSparseMatrix& m);
-#ifndef CERES_DONT_HAVE_PROTOCOL_BUFFERS
+#ifndef CERES_NO_PROTOCOL_BUFFERS
explicit CompressedRowSparseMatrix(const SparseMatrixProto& proto);
#endif
@@ -85,7 +90,7 @@ class CompressedRowSparseMatrix : public SparseMatrix {
virtual void ScaleColumns(const double* scale);
virtual void ToDenseMatrix(Matrix* dense_matrix) const;
-#ifndef CERES_DONT_HAVE_PROTOCOL_BUFFERS
+#ifndef CERES_NO_PROTOCOL_BUFFERS
virtual void ToProto(SparseMatrixProto* proto) const;
#endif
virtual void ToTextFile(FILE* file) const;
@@ -103,6 +108,8 @@ class CompressedRowSparseMatrix : public SparseMatrix {
// have the same number of columns as this matrix.
void AppendRows(const CompressedRowSparseMatrix& m);
+ void ToCRSMatrix(CRSMatrix* matrix) const;
+
// Low level access methods that expose the structure of the matrix.
const int* cols() const { return cols_.get(); }
int* mutable_cols() { return cols_.get(); }
@@ -110,6 +117,12 @@ class CompressedRowSparseMatrix : public SparseMatrix {
const int* rows() const { return rows_.get(); }
int* mutable_rows() { return rows_.get(); }
+ const vector<int>& row_blocks() const { return row_blocks_; }
+ vector<int>* mutable_row_blocks() { return &row_blocks_; }
+
+ const vector<int>& col_blocks() const { return col_blocks_; }
+ vector<int>* mutable_col_blocks() { return &col_blocks_; }
+
private:
scoped_array<int> cols_;
scoped_array<int> rows_;
@@ -117,10 +130,17 @@ class CompressedRowSparseMatrix : public SparseMatrix {
int num_rows_;
int num_cols_;
-
int max_num_nonzeros_;
- DISALLOW_COPY_AND_ASSIGN(CompressedRowSparseMatrix);
+ // If the matrix has an underlying block structure, then it can also
+ // carry with it row and column block sizes. This is auxilliary and
+ // optional information for use by algorithms operating on the
+ // matrix. The class itself does not make use of this information in
+ // any way.
+ vector<int> row_blocks_;
+ vector<int> col_blocks_;
+
+ CERES_DISALLOW_COPY_AND_ASSIGN(CompressedRowSparseMatrix);
};
} // namespace internal
diff --git a/extern/libmv/third_party/ceres/internal/ceres/conditioned_cost_function.cc b/extern/libmv/third_party/ceres/internal/ceres/conditioned_cost_function.cc
index ca80bfb9c9d..7322790f717 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/conditioned_cost_function.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/conditioned_cost_function.cc
@@ -34,10 +34,10 @@
#include <cstddef>
-#include <glog/logging.h>
-#include "ceres/stl_util.h"
#include "ceres/internal/eigen.h"
+#include "ceres/stl_util.h"
#include "ceres/types.h"
+#include "glog/logging.h"
namespace ceres {
diff --git a/extern/libmv/third_party/ceres/internal/ceres/conjugate_gradients_solver.cc b/extern/libmv/third_party/ceres/internal/ceres/conjugate_gradients_solver.cc
index 75f9e043fa5..ae8e8774709 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/conjugate_gradients_solver.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/conjugate_gradients_solver.cc
@@ -41,18 +41,18 @@
#include <cmath>
#include <cstddef>
-#include <glog/logging.h>
-#include "ceres/linear_operator.h"
+#include "ceres/fpclassify.h"
#include "ceres/internal/eigen.h"
+#include "ceres/linear_operator.h"
#include "ceres/types.h"
-#include "ceres/jet.h"
+#include "glog/logging.h"
namespace ceres {
namespace internal {
namespace {
bool IsZeroOrInfinity(double x) {
- return ((x == 0.0) || (isinf(x)));
+ return ((x == 0.0) || (IsInfinite(x)));
}
// Constant used in the MATLAB implementation ~ 2 * eps.
@@ -115,7 +115,7 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve(
for (summary.num_iterations = 1;
summary.num_iterations < options_.max_num_iterations;
++summary.num_iterations) {
- VLOG(2) << "cg iteration " << summary.num_iterations;
+ VLOG(3) << "cg iteration " << summary.num_iterations;
// Apply preconditioner
if (per_solve_options.preconditioner != NULL) {
@@ -151,14 +151,14 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve(
A->RightMultiply(p.data(), q.data());
double pq = p.dot(q);
- if ((pq <= 0) || isinf(pq)) {
+ if ((pq <= 0) || IsInfinite(pq)) {
LOG(ERROR) << "Numerical failure. pq = " << pq;
summary.termination_type = FAILURE;
break;
}
double alpha = rho / pq;
- if (isinf(alpha)) {
+ if (IsInfinite(alpha)) {
LOG(ERROR) << "Numerical failure. alpha " << alpha;
summary.termination_type = FAILURE;
break;
@@ -202,13 +202,13 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve(
// 1. Stephen G. Nash & Ariela Sofer, Assessing A Search
// Direction Within A Truncated Newton Method, Operation
// Research Letters 9(1990) 219-221.
- //
+ //
// 2. Stephen G. Nash, A Survey of Truncated Newton Methods,
// Journal of Computational and Applied Mathematics,
// 124(1-2), 45-59, 2000.
//
double zeta = summary.num_iterations * (Q1 - Q0) / Q1;
- VLOG(2) << "Q termination: zeta " << zeta
+ VLOG(3) << "Q termination: zeta " << zeta
<< " " << per_solve_options.q_tolerance;
if (zeta < per_solve_options.q_tolerance) {
summary.termination_type = TOLERANCE;
@@ -218,7 +218,7 @@ LinearSolver::Summary ConjugateGradientsSolver::Solve(
// Residual based termination.
norm_r = r. norm();
- VLOG(2) << "R termination: norm_r " << norm_r
+ VLOG(3) << "R termination: norm_r " << norm_r
<< " " << tol_r;
if (norm_r <= tol_r) {
summary.termination_type = TOLERANCE;
diff --git a/extern/libmv/third_party/ceres/internal/ceres/conjugate_gradients_solver.h b/extern/libmv/third_party/ceres/internal/ceres/conjugate_gradients_solver.h
index 57f99e31db7..b8dfa56b526 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/conjugate_gradients_solver.h
+++ b/extern/libmv/third_party/ceres/internal/ceres/conjugate_gradients_solver.h
@@ -65,7 +65,7 @@ class ConjugateGradientsSolver : public LinearSolver {
private:
const LinearSolver::Options options_;
- DISALLOW_COPY_AND_ASSIGN(ConjugateGradientsSolver);
+ CERES_DISALLOW_COPY_AND_ASSIGN(ConjugateGradientsSolver);
};
} // namespace internal
diff --git a/extern/libmv/third_party/ceres/internal/ceres/corrector.cc b/extern/libmv/third_party/ceres/internal/ceres/corrector.cc
index 4ca2c6f6c86..eff4dff8566 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/corrector.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/corrector.cc
@@ -32,8 +32,8 @@
#include <cstddef>
#include <cmath>
-#include <glog/logging.h>
#include "ceres/internal/eigen.h"
+#include "glog/logging.h"
namespace ceres {
namespace internal {
diff --git a/extern/libmv/third_party/ceres/internal/ceres/cxsparse.cc b/extern/libmv/third_party/ceres/internal/ceres/cxsparse.cc
new file mode 100644
index 00000000000..ca36ce07614
--- /dev/null
+++ b/extern/libmv/third_party/ceres/internal/ceres/cxsparse.cc
@@ -0,0 +1,130 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: strandmark@google.com (Petter Strandmark)
+
+#ifndef CERES_NO_CXSPARSE
+
+#include "ceres/cxsparse.h"
+
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/triplet_sparse_matrix.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+CXSparse::CXSparse() : scratch_size_(0), scratch_(NULL) {
+}
+
+CXSparse::~CXSparse() {
+ if (scratch_size_ > 0) {
+ cs_free(scratch_);
+ }
+}
+
+bool CXSparse::SolveCholesky(cs_di* A,
+ cs_dis* symbolic_factorization,
+ double* b) {
+ // Make sure we have enough scratch space available.
+ if (scratch_size_ < A->n) {
+ if (scratch_size_ > 0) {
+ cs_free(scratch_);
+ }
+ scratch_ = reinterpret_cast<CS_ENTRY*>(cs_malloc(A->n, sizeof(CS_ENTRY)));
+ }
+
+ // Solve using Cholesky factorization
+ csn* numeric_factorization = cs_chol(A, symbolic_factorization);
+ if (numeric_factorization == NULL) {
+ LOG(WARNING) << "Cholesky factorization failed.";
+ return false;
+ }
+
+ // When the Cholesky factorization succeeded, these methods are guaranteed to
+ // succeeded as well. In the comments below, "x" refers to the scratch space.
+ //
+ // Set x = P * b.
+ cs_ipvec(symbolic_factorization->pinv, b, scratch_, A->n);
+
+ // Set x = L \ x.
+ cs_lsolve(numeric_factorization->L, scratch_);
+
+ // Set x = L' \ x.
+ cs_ltsolve(numeric_factorization->L, scratch_);
+
+ // Set b = P' * x.
+ cs_pvec(symbolic_factorization->pinv, scratch_, b, A->n);
+
+ // Free Cholesky factorization.
+ cs_nfree(numeric_factorization);
+ return true;
+}
+
+cs_dis* CXSparse::AnalyzeCholesky(cs_di* A) {
+ // order = 1 for Cholesky factorization.
+ return cs_schol(1, A);
+}
+
+cs_di CXSparse::CreateSparseMatrixTransposeView(CompressedRowSparseMatrix* A) {
+ cs_di At;
+ At.m = A->num_cols();
+ At.n = A->num_rows();
+ At.nz = -1;
+ At.nzmax = A->num_nonzeros();
+ At.p = A->mutable_rows();
+ At.i = A->mutable_cols();
+ At.x = A->mutable_values();
+ return At;
+}
+
+cs_di* CXSparse::CreateSparseMatrix(TripletSparseMatrix* tsm) {
+ cs_di_sparse tsm_wrapper;
+ tsm_wrapper.nzmax = tsm->num_nonzeros();;
+ tsm_wrapper.nz = tsm->num_nonzeros();;
+ tsm_wrapper.m = tsm->num_rows();
+ tsm_wrapper.n = tsm->num_cols();
+ tsm_wrapper.p = tsm->mutable_cols();
+ tsm_wrapper.i = tsm->mutable_rows();
+ tsm_wrapper.x = tsm->mutable_values();
+
+ return cs_compress(&tsm_wrapper);
+}
+
+void CXSparse::Free(cs_di* factor) {
+ cs_free(factor);
+}
+
+void CXSparse::Free(cs_dis* factor) {
+ cs_sfree(factor);
+}
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_NO_CXSPARSE
diff --git a/extern/libmv/third_party/ceres/internal/ceres/cxsparse.h b/extern/libmv/third_party/ceres/internal/ceres/cxsparse.h
new file mode 100644
index 00000000000..d3b64fcd1a5
--- /dev/null
+++ b/extern/libmv/third_party/ceres/internal/ceres/cxsparse.h
@@ -0,0 +1,90 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: strandmark@google.com (Petter Strandmark)
+
+#ifndef CERES_INTERNAL_CXSPARSE_H_
+#define CERES_INTERNAL_CXSPARSE_H_
+
+#ifndef CERES_NO_CXSPARSE
+
+#include "cs.h"
+
+namespace ceres {
+namespace internal {
+
+class CompressedRowSparseMatrix;
+class TripletSparseMatrix;
+
+// This object provides access to solving linear systems using Cholesky
+// factorization with a known symbolic factorization. This features does not
+// explicity exist in CXSparse. The methods in the class are nonstatic because
+// the class manages internal scratch space.
+class CXSparse {
+ public:
+ CXSparse();
+ ~CXSparse();
+
+ // Solves a symmetric linear system A * x = b using Cholesky factorization.
+ // A - The system matrix.
+ // symbolic_factorization - The symbolic factorization of A. This is obtained
+ // from AnalyzeCholesky.
+ // b - The right hand size of the linear equation. This
+ // array will also recieve the solution.
+ // Returns false if Cholesky factorization of A fails.
+ bool SolveCholesky(cs_di* A, cs_dis* symbolic_factorization, double* b);
+
+ // Creates a sparse matrix from a compressed-column form. No memory is
+ // allocated or copied; the structure A is filled out with info from the
+ // argument.
+ cs_di CreateSparseMatrixTransposeView(CompressedRowSparseMatrix* A);
+
+ // Creates a new matrix from a triplet form. Deallocate the returned matrix
+ // with Free. May return NULL if the compression or allocation fails.
+ cs_di* CreateSparseMatrix(TripletSparseMatrix* A);
+
+ // Computes a symbolic factorization of A that can be used in SolveCholesky.
+ // The returned matrix should be deallocated with Free when not used anymore.
+ cs_dis* AnalyzeCholesky(cs_di* A);
+
+ // Deallocates the memory of a matrix obtained from AnalyzeCholesky.
+ void Free(cs_di* factor);
+ void Free(cs_dis* factor);
+
+ private:
+ // Cached scratch space
+ CS_ENTRY* scratch_;
+ int scratch_size_;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_NO_CXSPARSE
+
+#endif // CERES_INTERNAL_CXSPARSE_H_
diff --git a/extern/libmv/third_party/ceres/internal/ceres/dense_normal_cholesky_solver.cc b/extern/libmv/third_party/ceres/internal/ceres/dense_normal_cholesky_solver.cc
new file mode 100644
index 00000000000..f6bb99abf63
--- /dev/null
+++ b/extern/libmv/third_party/ceres/internal/ceres/dense_normal_cholesky_solver.cc
@@ -0,0 +1,86 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/dense_normal_cholesky_solver.h"
+
+#include <cstddef>
+
+#include "Eigen/Dense"
+#include "ceres/dense_sparse_matrix.h"
+#include "ceres/linear_solver.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+DenseNormalCholeskySolver::DenseNormalCholeskySolver(
+ const LinearSolver::Options& options)
+ : options_(options) {}
+
+LinearSolver::Summary DenseNormalCholeskySolver::SolveImpl(
+ DenseSparseMatrix* A,
+ const double* b,
+ const LinearSolver::PerSolveOptions& per_solve_options,
+ double* x) {
+ const int num_rows = A->num_rows();
+ const int num_cols = A->num_cols();
+
+ ConstAlignedMatrixRef Aref = A->matrix();
+ Matrix lhs(num_cols, num_cols);
+ lhs.setZero();
+
+ // lhs += A'A
+ //
+ // Using rankUpdate instead of GEMM, exposes the fact that its the
+ // same matrix being multiplied with itself and that the product is
+ // symmetric.
+ lhs.selfadjointView<Eigen::Upper>().rankUpdate(Aref.transpose());
+
+ // rhs = A'b
+ Vector rhs = Aref.transpose() * ConstVectorRef(b, num_rows);
+
+ if (per_solve_options.D != NULL) {
+ ConstVectorRef D(per_solve_options.D, num_cols);
+ lhs += D.array().square().matrix().asDiagonal();
+ }
+
+ VectorRef(x, num_cols) =
+ lhs.selfadjointView<Eigen::Upper>().ldlt().solve(rhs);
+
+ LinearSolver::Summary summary;
+ summary.num_iterations = 1;
+ summary.termination_type = TOLERANCE;
+ return summary;
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/extern/libmv/third_party/ceres/internal/ceres/dense_normal_cholesky_solver.h b/extern/libmv/third_party/ceres/internal/ceres/dense_normal_cholesky_solver.h
new file mode 100644
index 00000000000..de47740583d
--- /dev/null
+++ b/extern/libmv/third_party/ceres/internal/ceres/dense_normal_cholesky_solver.h
@@ -0,0 +1,95 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Solve dense rectangular systems Ax = b by forming the normal
+// equations and solving them using the Cholesky factorization.
+
+#ifndef CERES_INTERNAL_DENSE_NORMAL_CHOLESKY_SOLVER_H_
+#define CERES_INTERNAL_DENSE_NORMAL_CHOLESKY_SOLVER_H_
+
+#include "ceres/linear_solver.h"
+#include "ceres/internal/macros.h"
+
+namespace ceres {
+namespace internal {
+
+class DenseSparseMatrix;
+
+// This class implements the LinearSolver interface for solving
+// rectangular/unsymmetric (well constrained) linear systems of the
+// form
+//
+// Ax = b
+//
+// Since there does not usually exist a solution that satisfies these
+// equations, the solver instead solves the linear least squares
+// problem
+//
+// min_x |Ax - b|^2
+//
+// Setting the gradient of the above optimization problem to zero
+// gives us the normal equations
+//
+// A'Ax = A'b
+//
+// A'A is a positive definite matrix (hopefully), and the resulting
+// linear system can be solved using Cholesky factorization.
+//
+// If the PerSolveOptions struct has a non-null array D, then the
+// augmented/regularized linear system
+//
+// [ A ]x = [b]
+// [ diag(D) ] [0]
+//
+// is solved.
+//
+// This class uses the LDLT factorization routines from the Eigen
+// library. This solver always returns a solution, it is the user's
+// responsibility to judge if the solution is good enough for their
+// purposes.
+class DenseNormalCholeskySolver: public DenseSparseMatrixSolver {
+ public:
+ explicit DenseNormalCholeskySolver(const LinearSolver::Options& options);
+
+ private:
+ virtual LinearSolver::Summary SolveImpl(
+ DenseSparseMatrix* A,
+ const double* b,
+ const LinearSolver::PerSolveOptions& per_solve_options,
+ double* x);
+
+ const LinearSolver::Options options_;
+ CERES_DISALLOW_COPY_AND_ASSIGN(DenseNormalCholeskySolver);
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_DENSE_NORMAL_CHOLESKY_SOLVER_H_
diff --git a/extern/libmv/third_party/ceres/internal/ceres/dense_qr_solver.cc b/extern/libmv/third_party/ceres/internal/ceres/dense_qr_solver.cc
index 328505404d7..2b329ee0e9c 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/dense_qr_solver.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/dense_qr_solver.cc
@@ -33,8 +33,8 @@
#include <cstddef>
#include "Eigen/Dense"
+#include "ceres/dense_sparse_matrix.h"
#include "ceres/linear_solver.h"
-#include "ceres/triplet_sparse_matrix.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/types.h"
@@ -62,7 +62,7 @@ LinearSolver::Summary DenseQRSolver::SolveImpl(
}
// rhs = [b;0] to account for the additional rows in the lhs.
- Vector rhs(num_rows + ((per_solve_options.D !=NULL) ? num_cols : 0));
+ Vector rhs(num_rows + ((per_solve_options.D != NULL) ? num_cols : 0));
rhs.setZero();
rhs.head(num_rows) = ConstVectorRef(b, num_rows);
diff --git a/extern/libmv/third_party/ceres/internal/ceres/dense_qr_solver.h b/extern/libmv/third_party/ceres/internal/ceres/dense_qr_solver.h
index 990c8d445eb..dd683a8c4ea 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/dense_qr_solver.h
+++ b/extern/libmv/third_party/ceres/internal/ceres/dense_qr_solver.h
@@ -28,7 +28,7 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
//
-// Solve dense rectangular systems Ax = b using the QR factoriztion.
+// Solve dense rectangular systems Ax = b using the QR factorization.
#ifndef CERES_INTERNAL_DENSE_QR_SOLVER_H_
#define CERES_INTERNAL_DENSE_QR_SOLVER_H_
@@ -90,7 +90,7 @@ class DenseQRSolver: public DenseSparseMatrixSolver {
double* x);
const LinearSolver::Options options_;
- DISALLOW_COPY_AND_ASSIGN(DenseQRSolver);
+ CERES_DISALLOW_COPY_AND_ASSIGN(DenseQRSolver);
};
} // namespace internal
diff --git a/extern/libmv/third_party/ceres/internal/ceres/dense_sparse_matrix.cc b/extern/libmv/third_party/ceres/internal/ceres/dense_sparse_matrix.cc
index 5d392ba6c3b..86730cc101b 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/dense_sparse_matrix.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/dense_sparse_matrix.cc
@@ -67,7 +67,7 @@ DenseSparseMatrix::DenseSparseMatrix(const Matrix& m)
has_diagonal_reserved_(false) {
}
-#ifndef CERES_DONT_HAVE_PROTOCOL_BUFFERS
+#ifndef CERES_NO_PROTOCOL_BUFFERS
DenseSparseMatrix::DenseSparseMatrix(const SparseMatrixProto& outer_proto)
: m_(Eigen::MatrixXd::Zero(
outer_proto.dense_matrix().num_rows(),
@@ -108,7 +108,7 @@ void DenseSparseMatrix::ToDenseMatrix(Matrix* dense_matrix) const {
*dense_matrix = m_;
}
-#ifndef CERES_DONT_HAVE_PROTOCOL_BUFFERS
+#ifndef CERES_NO_PROTOCOL_BUFFERS
void DenseSparseMatrix::ToProto(SparseMatrixProto* outer_proto) const {
CHECK(!has_diagonal_appended_) << "Not supported.";
outer_proto->Clear();
@@ -183,7 +183,7 @@ void DenseSparseMatrix::ToTextFile(FILE* file) const {
CHECK_NOTNULL(file);
const int active_rows =
(has_diagonal_reserved_ && !has_diagonal_appended_)
- ? (m_.rows() - m_.cols())
+ ? (m_.rows() - m_.cols())
: m_.rows();
for (int r = 0; r < active_rows; ++r) {
diff --git a/extern/libmv/third_party/ceres/internal/ceres/dense_sparse_matrix.h b/extern/libmv/third_party/ceres/internal/ceres/dense_sparse_matrix.h
index 416c2143c2c..e7ad14d0ee6 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/dense_sparse_matrix.h
+++ b/extern/libmv/third_party/ceres/internal/ceres/dense_sparse_matrix.h
@@ -52,7 +52,7 @@ class DenseSparseMatrix : public SparseMatrix {
// m. This assumes that m does not have any repeated entries.
explicit DenseSparseMatrix(const TripletSparseMatrix& m);
explicit DenseSparseMatrix(const Matrix& m);
-#ifndef CERES_DONT_HAVE_PROTOCOL_BUFFERS
+#ifndef CERES_NO_PROTOCOL_BUFFERS
explicit DenseSparseMatrix(const SparseMatrixProto& proto);
#endif
@@ -67,7 +67,7 @@ class DenseSparseMatrix : public SparseMatrix {
virtual void SquaredColumnNorm(double* x) const;
virtual void ScaleColumns(const double* scale);
virtual void ToDenseMatrix(Matrix* dense_matrix) const;
-#ifndef CERES_DONT_HAVE_PROTOCOL_BUFFERS
+#ifndef CERES_NO_PROTOCOL_BUFFERS
virtual void ToProto(SparseMatrixProto* proto) const;
#endif
virtual void ToTextFile(FILE* file) const;
diff --git a/extern/libmv/third_party/ceres/internal/ceres/detect_structure.cc b/extern/libmv/third_party/ceres/internal/ceres/detect_structure.cc
index e9755043bab..ea5bf2e9690 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/detect_structure.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/detect_structure.cc
@@ -28,9 +28,9 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
-#include <glog/logging.h>
#include "ceres/detect_structure.h"
#include "ceres/internal/eigen.h"
+#include "glog/logging.h"
namespace ceres {
namespace internal {
@@ -60,10 +60,10 @@ void DetectStructure(const CompressedRowBlockStructure& bs,
*row_block_size = row.block.size;
} else if (*row_block_size != Eigen::Dynamic &&
*row_block_size != row.block.size) {
- *row_block_size = Eigen::Dynamic;
VLOG(2) << "Dynamic row block size because the block size changed from "
<< *row_block_size << " to "
<< row.block.size;
+ *row_block_size = Eigen::Dynamic;
}
@@ -71,10 +71,10 @@ void DetectStructure(const CompressedRowBlockStructure& bs,
*e_block_size = bs.cols[e_block_id].size;
} else if (*e_block_size != Eigen::Dynamic &&
*e_block_size != bs.cols[e_block_id].size) {
- *e_block_size = Eigen::Dynamic;
VLOG(2) << "Dynamic e block size because the block size changed from "
<< *e_block_size << " to "
<< bs.cols[e_block_id].size;
+ *e_block_size = Eigen::Dynamic;
}
if (*f_block_size == 0) {
@@ -85,11 +85,11 @@ void DetectStructure(const CompressedRowBlockStructure& bs,
} else if (*f_block_size != Eigen::Dynamic) {
for (int c = 1; c < row.cells.size(); ++c) {
if (*f_block_size != bs.cols[row.cells[c].block_id].size) {
- *f_block_size = Eigen::Dynamic;
VLOG(2) << "Dynamic f block size because the block size "
- << "changed from " << *f_block_size << " to "
- << bs.cols[row.cells[c].block_id].size;
- break;
+ << "changed from " << *f_block_size << " to "
+ << bs.cols[row.cells[c].block_id].size;
+ *f_block_size = Eigen::Dynamic;
+ break;
}
}
}
diff --git a/extern/libmv/third_party/ceres/internal/ceres/detect_structure.h b/extern/libmv/third_party/ceres/internal/ceres/detect_structure.h
index 8af4f236690..5f8e1b4ff46 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/detect_structure.h
+++ b/extern/libmv/third_party/ceres/internal/ceres/detect_structure.h
@@ -49,7 +49,7 @@ namespace internal {
// is known as compile time.
//
// For more details about e_blocks and f_blocks, see
-// schur_complement.h. This information is used to initialized an
+// schur_eliminator.h. This information is used to initialized an
// appropriate template specialization of SchurEliminator.
void DetectStructure(const CompressedRowBlockStructure& bs,
const int num_eliminate_blocks,
diff --git a/extern/libmv/third_party/ceres/internal/ceres/dogleg_strategy.cc b/extern/libmv/third_party/ceres/internal/ceres/dogleg_strategy.cc
new file mode 100644
index 00000000000..668fa54b8b8
--- /dev/null
+++ b/extern/libmv/third_party/ceres/internal/ceres/dogleg_strategy.cc
@@ -0,0 +1,691 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/dogleg_strategy.h"
+
+#include <cmath>
+#include "Eigen/Dense"
+#include "ceres/array_utils.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/linear_solver.h"
+#include "ceres/polynomial_solver.h"
+#include "ceres/sparse_matrix.h"
+#include "ceres/trust_region_strategy.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+namespace {
+const double kMaxMu = 1.0;
+const double kMinMu = 1e-8;
+}
+
+DoglegStrategy::DoglegStrategy(const TrustRegionStrategy::Options& options)
+ : linear_solver_(options.linear_solver),
+ radius_(options.initial_radius),
+ max_radius_(options.max_radius),
+ min_diagonal_(options.lm_min_diagonal),
+ max_diagonal_(options.lm_max_diagonal),
+ mu_(kMinMu),
+ min_mu_(kMinMu),
+ max_mu_(kMaxMu),
+ mu_increase_factor_(10.0),
+ increase_threshold_(0.75),
+ decrease_threshold_(0.25),
+ dogleg_step_norm_(0.0),
+ reuse_(false),
+ dogleg_type_(options.dogleg_type) {
+ CHECK_NOTNULL(linear_solver_);
+ CHECK_GT(min_diagonal_, 0.0);
+ CHECK_LE(min_diagonal_, max_diagonal_);
+ CHECK_GT(max_radius_, 0.0);
+}
+
+// If the reuse_ flag is not set, then the Cauchy point (scaled
+// gradient) and the new Gauss-Newton step are computed from
+// scratch. The Dogleg step is then computed as interpolation of these
+// two vectors.
+TrustRegionStrategy::Summary DoglegStrategy::ComputeStep(
+ const TrustRegionStrategy::PerSolveOptions& per_solve_options,
+ SparseMatrix* jacobian,
+ const double* residuals,
+ double* step) {
+ CHECK_NOTNULL(jacobian);
+ CHECK_NOTNULL(residuals);
+ CHECK_NOTNULL(step);
+
+ const int n = jacobian->num_cols();
+ if (reuse_) {
+ // Gauss-Newton and gradient vectors are always available, only a
+ // new interpolant need to be computed. For the subspace case,
+ // the subspace and the two-dimensional model are also still valid.
+ switch(dogleg_type_) {
+ case TRADITIONAL_DOGLEG:
+ ComputeTraditionalDoglegStep(step);
+ break;
+
+ case SUBSPACE_DOGLEG:
+ ComputeSubspaceDoglegStep(step);
+ break;
+ }
+ TrustRegionStrategy::Summary summary;
+ summary.num_iterations = 0;
+ summary.termination_type = TOLERANCE;
+ return summary;
+ }
+
+ reuse_ = true;
+ // Check that we have the storage needed to hold the various
+ // temporary vectors.
+ if (diagonal_.rows() != n) {
+ diagonal_.resize(n, 1);
+ gradient_.resize(n, 1);
+ gauss_newton_step_.resize(n, 1);
+ }
+
+ // Vector used to form the diagonal matrix that is used to
+ // regularize the Gauss-Newton solve and that defines the
+ // elliptical trust region
+ //
+ // || D * step || <= radius_ .
+ //
+ jacobian->SquaredColumnNorm(diagonal_.data());
+ for (int i = 0; i < n; ++i) {
+ diagonal_[i] = min(max(diagonal_[i], min_diagonal_), max_diagonal_);
+ }
+ diagonal_ = diagonal_.array().sqrt();
+
+ ComputeGradient(jacobian, residuals);
+ ComputeCauchyPoint(jacobian);
+
+ LinearSolver::Summary linear_solver_summary =
+ ComputeGaussNewtonStep(jacobian, residuals);
+
+ TrustRegionStrategy::Summary summary;
+ summary.residual_norm = linear_solver_summary.residual_norm;
+ summary.num_iterations = linear_solver_summary.num_iterations;
+ summary.termination_type = linear_solver_summary.termination_type;
+
+ if (linear_solver_summary.termination_type != FAILURE) {
+ switch(dogleg_type_) {
+ // Interpolate the Cauchy point and the Gauss-Newton step.
+ case TRADITIONAL_DOGLEG:
+ ComputeTraditionalDoglegStep(step);
+ break;
+
+ // Find the minimum in the subspace defined by the
+ // Cauchy point and the (Gauss-)Newton step.
+ case SUBSPACE_DOGLEG:
+ if (!ComputeSubspaceModel(jacobian)) {
+ summary.termination_type = FAILURE;
+ break;
+ }
+ ComputeSubspaceDoglegStep(step);
+ break;
+ }
+ }
+
+ return summary;
+}
+
+// The trust region is assumed to be elliptical with the
+// diagonal scaling matrix D defined by sqrt(diagonal_).
+// It is implemented by substituting step' = D * step.
+// The trust region for step' is spherical.
+// The gradient, the Gauss-Newton step, the Cauchy point,
+// and all calculations involving the Jacobian have to
+// be adjusted accordingly.
+void DoglegStrategy::ComputeGradient(
+ SparseMatrix* jacobian,
+ const double* residuals) {
+ gradient_.setZero();
+ jacobian->LeftMultiply(residuals, gradient_.data());
+ gradient_.array() /= diagonal_.array();
+}
+
+// The Cauchy point is the global minimizer of the quadratic model
+// along the one-dimensional subspace spanned by the gradient.
+void DoglegStrategy::ComputeCauchyPoint(SparseMatrix* jacobian) {
+ // alpha * -gradient is the Cauchy point.
+ Vector Jg(jacobian->num_rows());
+ Jg.setZero();
+ // The Jacobian is scaled implicitly by computing J * (D^-1 * (D^-1 * g))
+ // instead of (J * D^-1) * (D^-1 * g).
+ Vector scaled_gradient =
+ (gradient_.array() / diagonal_.array()).matrix();
+ jacobian->RightMultiply(scaled_gradient.data(), Jg.data());
+ alpha_ = gradient_.squaredNorm() / Jg.squaredNorm();
+}
+
+// The dogleg step is defined as the intersection of the trust region
+// boundary with the piecewise linear path from the origin to the Cauchy
+// point and then from there to the Gauss-Newton point (global minimizer
+// of the model function). The Gauss-Newton point is taken if it lies
+// within the trust region.
+void DoglegStrategy::ComputeTraditionalDoglegStep(double* dogleg) {
+ VectorRef dogleg_step(dogleg, gradient_.rows());
+
+ // Case 1. The Gauss-Newton step lies inside the trust region, and
+ // is therefore the optimal solution to the trust-region problem.
+ const double gradient_norm = gradient_.norm();
+ const double gauss_newton_norm = gauss_newton_step_.norm();
+ if (gauss_newton_norm <= radius_) {
+ dogleg_step = gauss_newton_step_;
+ dogleg_step_norm_ = gauss_newton_norm;
+ dogleg_step.array() /= diagonal_.array();
+ VLOG(3) << "GaussNewton step size: " << dogleg_step_norm_
+ << " radius: " << radius_;
+ return;
+ }
+
+ // Case 2. The Cauchy point and the Gauss-Newton steps lie outside
+ // the trust region. Rescale the Cauchy point to the trust region
+ // and return.
+ if (gradient_norm * alpha_ >= radius_) {
+ dogleg_step = -(radius_ / gradient_norm) * gradient_;
+ dogleg_step_norm_ = radius_;
+ dogleg_step.array() /= diagonal_.array();
+ VLOG(3) << "Cauchy step size: " << dogleg_step_norm_
+ << " radius: " << radius_;
+ return;
+ }
+
+ // Case 3. The Cauchy point is inside the trust region and the
+ // Gauss-Newton step is outside. Compute the line joining the two
+ // points and the point on it which intersects the trust region
+ // boundary.
+
+ // a = alpha * -gradient
+ // b = gauss_newton_step
+ const double b_dot_a = -alpha_ * gradient_.dot(gauss_newton_step_);
+ const double a_squared_norm = pow(alpha_ * gradient_norm, 2.0);
+ const double b_minus_a_squared_norm =
+ a_squared_norm - 2 * b_dot_a + pow(gauss_newton_norm, 2);
+
+ // c = a' (b - a)
+ // = alpha * -gradient' gauss_newton_step - alpha^2 |gradient|^2
+ const double c = b_dot_a - a_squared_norm;
+ const double d = sqrt(c * c + b_minus_a_squared_norm *
+ (pow(radius_, 2.0) - a_squared_norm));
+
+ double beta =
+ (c <= 0)
+ ? (d - c) / b_minus_a_squared_norm
+ : (radius_ * radius_ - a_squared_norm) / (d + c);
+ dogleg_step = (-alpha_ * (1.0 - beta)) * gradient_
+ + beta * gauss_newton_step_;
+ dogleg_step_norm_ = dogleg_step.norm();
+ dogleg_step.array() /= diagonal_.array();
+ VLOG(3) << "Dogleg step size: " << dogleg_step_norm_
+ << " radius: " << radius_;
+}
+
+// The subspace method finds the minimum of the two-dimensional problem
+//
+// min. 1/2 x' B' H B x + g' B x
+// s.t. || B x ||^2 <= r^2
+//
+// where r is the trust region radius and B is the matrix with unit columns
+// spanning the subspace defined by the steepest descent and Newton direction.
+// This subspace by definition includes the Gauss-Newton point, which is
+// therefore taken if it lies within the trust region.
+void DoglegStrategy::ComputeSubspaceDoglegStep(double* dogleg) {
+ VectorRef dogleg_step(dogleg, gradient_.rows());
+
+ // The Gauss-Newton point is inside the trust region if |GN| <= radius_.
+ // This test is valid even though radius_ is a length in the two-dimensional
+ // subspace while gauss_newton_step_ is expressed in the (scaled)
+ // higher dimensional original space. This is because
+ //
+ // 1. gauss_newton_step_ by definition lies in the subspace, and
+ // 2. the subspace basis is orthonormal.
+ //
+ // As a consequence, the norm of the gauss_newton_step_ in the subspace is
+ // the same as its norm in the original space.
+ const double gauss_newton_norm = gauss_newton_step_.norm();
+ if (gauss_newton_norm <= radius_) {
+ dogleg_step = gauss_newton_step_;
+ dogleg_step_norm_ = gauss_newton_norm;
+ dogleg_step.array() /= diagonal_.array();
+ VLOG(3) << "GaussNewton step size: " << dogleg_step_norm_
+ << " radius: " << radius_;
+ return;
+ }
+
+ // The optimum lies on the boundary of the trust region. The above problem
+ // therefore becomes
+ //
+ // min. 1/2 x^T B^T H B x + g^T B x
+ // s.t. || B x ||^2 = r^2
+ //
+ // Notice the equality in the constraint.
+ //
+ // This can be solved by forming the Lagrangian, solving for x(y), where
+ // y is the Lagrange multiplier, using the gradient of the objective, and
+ // putting x(y) back into the constraint. This results in a fourth order
+ // polynomial in y, which can be solved using e.g. the companion matrix.
+ // See the description of MakePolynomialForBoundaryConstrainedProblem for
+ // details. The result is up to four real roots y*, not all of which
+ // correspond to feasible points. The feasible points x(y*) have to be
+ // tested for optimality.
+
+ if (subspace_is_one_dimensional_) {
+ // The subspace is one-dimensional, so both the gradient and
+ // the Gauss-Newton step point towards the same direction.
+ // In this case, we move along the gradient until we reach the trust
+ // region boundary.
+ dogleg_step = -(radius_ / gradient_.norm()) * gradient_;
+ dogleg_step_norm_ = radius_;
+ dogleg_step.array() /= diagonal_.array();
+ VLOG(3) << "Dogleg subspace step size (1D): " << dogleg_step_norm_
+ << " radius: " << radius_;
+ return;
+ }
+
+ Vector2d minimum(0.0, 0.0);
+ if (!FindMinimumOnTrustRegionBoundary(&minimum)) {
+ // For the positive semi-definite case, a traditional dogleg step
+ // is taken in this case.
+ LOG(WARNING) << "Failed to compute polynomial roots. "
+ << "Taking traditional dogleg step instead.";
+ ComputeTraditionalDoglegStep(dogleg);
+ return;
+ }
+
+ // Test first order optimality at the minimum.
+ // The first order KKT conditions state that the minimum x*
+ // has to satisfy either || x* ||^2 < r^2 (i.e. has to lie within
+ // the trust region), or
+ //
+ // (B x* + g) + y x* = 0
+ //
+ // for some positive scalar y.
+ // Here, as it is already known that the minimum lies on the boundary, the
+ // latter condition is tested. To allow for small imprecisions, we test if
+ // the angle between (B x* + g) and -x* is smaller than acos(0.99).
+ // The exact value of the cosine is arbitrary but should be close to 1.
+ //
+ // This condition should not be violated. If it is, the minimum was not
+ // correctly determined.
+ const double kCosineThreshold = 0.99;
+ const Vector2d grad_minimum = subspace_B_ * minimum + subspace_g_;
+ const double cosine_angle = -minimum.dot(grad_minimum) /
+ (minimum.norm() * grad_minimum.norm());
+ if (cosine_angle < kCosineThreshold) {
+ LOG(WARNING) << "First order optimality seems to be violated "
+ << "in the subspace method!\n"
+ << "Cosine of angle between x and B x + g is "
+ << cosine_angle << ".\n"
+ << "Taking a regular dogleg step instead.\n"
+ << "Please consider filing a bug report if this "
+ << "happens frequently or consistently.\n";
+ ComputeTraditionalDoglegStep(dogleg);
+ return;
+ }
+
+ // Create the full step from the optimal 2d solution.
+ dogleg_step = subspace_basis_ * minimum;
+ dogleg_step_norm_ = radius_;
+ dogleg_step.array() /= diagonal_.array();
+ VLOG(3) << "Dogleg subspace step size: " << dogleg_step_norm_
+ << " radius: " << radius_;
+}
+
+// Build the polynomial that defines the optimal Lagrange multipliers.
+// Let the Lagrangian be
+//
+// L(x, y) = 0.5 x^T B x + x^T g + y (0.5 x^T x - 0.5 r^2). (1)
+//
+// Stationary points of the Lagrangian are given by
+//
+// 0 = d L(x, y) / dx = Bx + g + y x (2)
+// 0 = d L(x, y) / dy = 0.5 x^T x - 0.5 r^2 (3)
+//
+// For any given y, we can solve (2) for x as
+//
+// x(y) = -(B + y I)^-1 g . (4)
+//
+// As B + y I is 2x2, we form the inverse explicitly:
+//
+// (B + y I)^-1 = (1 / det(B + y I)) adj(B + y I) (5)
+//
+// where adj() denotes adjugation. This should be safe, as B is positive
+// semi-definite and y is necessarily positive, so (B + y I) is indeed
+// invertible.
+// Plugging (5) into (4) and the result into (3), then dividing by 0.5 we
+// obtain
+//
+// 0 = (1 / det(B + y I))^2 g^T adj(B + y I)^T adj(B + y I) g - r^2
+// (6)
+//
+// or
+//
+// det(B + y I)^2 r^2 = g^T adj(B + y I)^T adj(B + y I) g (7a)
+// = g^T adj(B)^T adj(B) g
+// + 2 y g^T adj(B)^T g + y^2 g^T g (7b)
+//
+// as
+//
+// adj(B + y I) = adj(B) + y I = adj(B)^T + y I . (8)
+//
+// The left hand side can be expressed explicitly using
+//
+// det(B + y I) = det(B) + y tr(B) + y^2 . (9)
+//
+// So (7) is a polynomial in y of degree four.
+// Bringing everything back to the left hand side, the coefficients can
+// be read off as
+//
+// y^4 r^2
+// + y^3 2 r^2 tr(B)
+// + y^2 (r^2 tr(B)^2 + 2 r^2 det(B) - g^T g)
+// + y^1 (2 r^2 det(B) tr(B) - 2 g^T adj(B)^T g)
+// + y^0 (r^2 det(B)^2 - g^T adj(B)^T adj(B) g)
+//
+Vector DoglegStrategy::MakePolynomialForBoundaryConstrainedProblem() const {
+ const double detB = subspace_B_.determinant();
+ const double trB = subspace_B_.trace();
+ const double r2 = radius_ * radius_;
+ Matrix2d B_adj;
+ B_adj << subspace_B_(1,1) , -subspace_B_(0,1),
+ -subspace_B_(1,0) , subspace_B_(0,0);
+
+ Vector polynomial(5);
+ polynomial(0) = r2;
+ polynomial(1) = 2.0 * r2 * trB;
+ polynomial(2) = r2 * ( trB * trB + 2.0 * detB ) - subspace_g_.squaredNorm();
+ polynomial(3) = -2.0 * ( subspace_g_.transpose() * B_adj * subspace_g_
+ - r2 * detB * trB );
+ polynomial(4) = r2 * detB * detB - (B_adj * subspace_g_).squaredNorm();
+
+ return polynomial;
+}
+
+// Given a Lagrange multiplier y that corresponds to a stationary point
+// of the Lagrangian L(x, y), compute the corresponding x from the
+// equation
+//
+// 0 = d L(x, y) / dx
+// = B * x + g + y * x
+// = (B + y * I) * x + g
+//
+DoglegStrategy::Vector2d DoglegStrategy::ComputeSubspaceStepFromRoot(
+ double y) const {
+ const Matrix2d B_i = subspace_B_ + y * Matrix2d::Identity();
+ return -B_i.partialPivLu().solve(subspace_g_);
+}
+
+// This function evaluates the quadratic model at a point x in the
+// subspace spanned by subspace_basis_.
+double DoglegStrategy::EvaluateSubspaceModel(const Vector2d& x) const {
+ return 0.5 * x.dot(subspace_B_ * x) + subspace_g_.dot(x);
+}
+
+// This function attempts to solve the boundary-constrained subspace problem
+//
+// min. 1/2 x^T B^T H B x + g^T B x
+// s.t. || B x ||^2 = r^2
+//
+// where B is an orthonormal subspace basis and r is the trust-region radius.
+//
+// This is done by finding the roots of a fourth degree polynomial. If the
+// root finding fails, the function returns false and minimum will be set
+// to (0, 0). If it succeeds, true is returned.
+//
+// In the failure case, another step should be taken, such as the traditional
+// dogleg step.
+bool DoglegStrategy::FindMinimumOnTrustRegionBoundary(Vector2d* minimum) const {
+ CHECK_NOTNULL(minimum);
+
+ // Return (0, 0) in all error cases.
+ minimum->setZero();
+
+ // Create the fourth-degree polynomial that is a necessary condition for
+ // optimality.
+ const Vector polynomial = MakePolynomialForBoundaryConstrainedProblem();
+
+ // Find the real parts y_i of its roots (not only the real roots).
+ Vector roots_real;
+ if (!FindPolynomialRoots(polynomial, &roots_real, NULL)) {
+ // Failed to find the roots of the polynomial, i.e. the candidate
+ // solutions of the constrained problem. Report this back to the caller.
+ return false;
+ }
+
+ // For each root y, compute B x(y) and check for feasibility.
+ // Notice that there should always be four roots, as the leading term of
+ // the polynomial is r^2 and therefore non-zero. However, as some roots
+ // may be complex, the real parts are not necessarily unique.
+ double minimum_value = std::numeric_limits<double>::max();
+ bool valid_root_found = false;
+ for (int i = 0; i < roots_real.size(); ++i) {
+ const Vector2d x_i = ComputeSubspaceStepFromRoot(roots_real(i));
+
+ // Not all roots correspond to points on the trust region boundary.
+ // There are at most four candidate solutions. As we are interested
+ // in the minimum, it is safe to consider all of them after projecting
+ // them onto the trust region boundary.
+ if (x_i.norm() > 0) {
+ const double f_i = EvaluateSubspaceModel((radius_ / x_i.norm()) * x_i);
+ valid_root_found = true;
+ if (f_i < minimum_value) {
+ minimum_value = f_i;
+ *minimum = x_i;
+ }
+ }
+ }
+
+ return valid_root_found;
+}
+
+LinearSolver::Summary DoglegStrategy::ComputeGaussNewtonStep(
+ SparseMatrix* jacobian,
+ const double* residuals) {
+ const int n = jacobian->num_cols();
+ LinearSolver::Summary linear_solver_summary;
+ linear_solver_summary.termination_type = FAILURE;
+
+ // The Jacobian matrix is often quite poorly conditioned. Thus it is
+ // necessary to add a diagonal matrix at the bottom to prevent the
+ // linear solver from failing.
+ //
+ // We do this by computing the same diagonal matrix as the one used
+ // by Levenberg-Marquardt (other choices are possible), and scaling
+ // it by a small constant (independent of the trust region radius).
+ //
+ // If the solve fails, the multiplier to the diagonal is increased
+ // up to max_mu_ by a factor of mu_increase_factor_ every time. If
+ // the linear solver is still not successful, the strategy returns
+ // with FAILURE.
+ //
+ // Next time when a new Gauss-Newton step is requested, the
+ // multiplier starts out from the last successful solve.
+ //
+ // When a step is declared successful, the multiplier is decreased
+ // by half of mu_increase_factor_.
+
+ while (mu_ < max_mu_) {
+ // Dogleg, as far as I (sameeragarwal) understand it, requires a
+ // reasonably good estimate of the Gauss-Newton step. This means
+ // that we need to solve the normal equations more or less
+ // exactly. This is reflected in the values of the tolerances set
+ // below.
+ //
+ // For now, this strategy should only be used with exact
+ // factorization based solvers, for which these tolerances are
+ // automatically satisfied.
+ //
+ // The right way to combine inexact solves with trust region
+ // methods is to use Stiehaug's method.
+ LinearSolver::PerSolveOptions solve_options;
+ solve_options.q_tolerance = 0.0;
+ solve_options.r_tolerance = 0.0;
+
+ lm_diagonal_ = diagonal_ * std::sqrt(mu_);
+ solve_options.D = lm_diagonal_.data();
+
+ // As in the LevenbergMarquardtStrategy, solve Jy = r instead
+ // of Jx = -r and later set x = -y to avoid having to modify
+ // either jacobian or residuals.
+ InvalidateArray(n, gauss_newton_step_.data());
+ linear_solver_summary = linear_solver_->Solve(jacobian,
+ residuals,
+ solve_options,
+ gauss_newton_step_.data());
+
+ if (linear_solver_summary.termination_type == FAILURE ||
+ !IsArrayValid(n, gauss_newton_step_.data())) {
+ mu_ *= mu_increase_factor_;
+ VLOG(2) << "Increasing mu " << mu_;
+ linear_solver_summary.termination_type = FAILURE;
+ continue;
+ }
+ break;
+ }
+
+ if (linear_solver_summary.termination_type != FAILURE) {
+ // The scaled Gauss-Newton step is D * GN:
+ //
+ // - (D^-1 J^T J D^-1)^-1 (D^-1 g)
+ // = - D (J^T J)^-1 D D^-1 g
+ // = D -(J^T J)^-1 g
+ //
+ gauss_newton_step_.array() *= -diagonal_.array();
+ }
+
+ return linear_solver_summary;
+}
+
+void DoglegStrategy::StepAccepted(double step_quality) {
+ CHECK_GT(step_quality, 0.0);
+
+ if (step_quality < decrease_threshold_) {
+ radius_ *= 0.5;
+ }
+
+ if (step_quality > increase_threshold_) {
+ radius_ = max(radius_, 3.0 * dogleg_step_norm_);
+ }
+
+ // Reduce the regularization multiplier, in the hope that whatever
+ // was causing the rank deficiency has gone away and we can return
+ // to doing a pure Gauss-Newton solve.
+ mu_ = max(min_mu_, 2.0 * mu_ / mu_increase_factor_ );
+ reuse_ = false;
+}
+
+void DoglegStrategy::StepRejected(double step_quality) {
+ radius_ *= 0.5;
+ reuse_ = true;
+}
+
+void DoglegStrategy::StepIsInvalid() {
+ mu_ *= mu_increase_factor_;
+ reuse_ = false;
+}
+
+double DoglegStrategy::Radius() const {
+ return radius_;
+}
+
+bool DoglegStrategy::ComputeSubspaceModel(SparseMatrix* jacobian) {
+ // Compute an orthogonal basis for the subspace using QR decomposition.
+ Matrix basis_vectors(jacobian->num_cols(), 2);
+ basis_vectors.col(0) = gradient_;
+ basis_vectors.col(1) = gauss_newton_step_;
+ Eigen::ColPivHouseholderQR<Matrix> basis_qr(basis_vectors);
+
+ switch (basis_qr.rank()) {
+ case 0:
+ // This should never happen, as it implies that both the gradient
+ // and the Gauss-Newton step are zero. In this case, the minimizer should
+ // have stopped due to the gradient being too small.
+ LOG(ERROR) << "Rank of subspace basis is 0. "
+ << "This means that the gradient at the current iterate is "
+ << "zero but the optimization has not been terminated. "
+ << "You may have found a bug in Ceres.";
+ return false;
+
+ case 1:
+ // Gradient and Gauss-Newton step coincide, so we lie on one of the
+ // major axes of the quadratic problem. In this case, we simply move
+ // along the gradient until we reach the trust region boundary.
+ subspace_is_one_dimensional_ = true;
+ return true;
+
+ case 2:
+ subspace_is_one_dimensional_ = false;
+ break;
+
+ default:
+ LOG(ERROR) << "Rank of the subspace basis matrix is reported to be "
+ << "greater than 2. As the matrix contains only two "
+ << "columns this cannot be true and is indicative of "
+ << "a bug.";
+ return false;
+ }
+
+ // The subspace is two-dimensional, so compute the subspace model.
+ // Given the basis U, this is
+ //
+ // subspace_g_ = g_scaled^T U
+ //
+ // and
+ //
+ // subspace_B_ = U^T (J_scaled^T J_scaled) U
+ //
+ // As J_scaled = J * D^-1, the latter becomes
+ //
+ // subspace_B_ = ((U^T D^-1) J^T) (J (D^-1 U))
+ // = (J (D^-1 U))^T (J (D^-1 U))
+
+ subspace_basis_ =
+ basis_qr.householderQ() * Matrix::Identity(jacobian->num_cols(), 2);
+
+ subspace_g_ = subspace_basis_.transpose() * gradient_;
+
+ Eigen::Matrix<double, 2, Eigen::Dynamic, Eigen::RowMajor>
+ Jb(2, jacobian->num_rows());
+ Jb.setZero();
+
+ Vector tmp;
+ tmp = (subspace_basis_.col(0).array() / diagonal_.array()).matrix();
+ jacobian->RightMultiply(tmp.data(), Jb.row(0).data());
+ tmp = (subspace_basis_.col(1).array() / diagonal_.array()).matrix();
+ jacobian->RightMultiply(tmp.data(), Jb.row(1).data());
+
+ subspace_B_ = Jb * Jb.transpose();
+
+ return true;
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/extern/libmv/third_party/ceres/internal/ceres/dogleg_strategy.h b/extern/libmv/third_party/ceres/internal/ceres/dogleg_strategy.h
new file mode 100644
index 00000000000..bff1689aa4a
--- /dev/null
+++ b/extern/libmv/third_party/ceres/internal/ceres/dogleg_strategy.h
@@ -0,0 +1,163 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_DOGLEG_STRATEGY_H_
+#define CERES_INTERNAL_DOGLEG_STRATEGY_H_
+
+#include "ceres/linear_solver.h"
+#include "ceres/trust_region_strategy.h"
+
+namespace ceres {
+namespace internal {
+
+// Dogleg step computation and trust region sizing strategy based on
+// on "Methods for Nonlinear Least Squares" by K. Madsen, H.B. Nielsen
+// and O. Tingleff. Available to download from
+//
+// http://www2.imm.dtu.dk/pubdb/views/edoc_download.php/3215/pdf/imm3215.pdf
+//
+// One minor modification is that instead of computing the pure
+// Gauss-Newton step, we compute a regularized version of it. This is
+// because the Jacobian is often rank-deficient and in such cases
+// using a direct solver leads to numerical failure.
+//
+// If SUBSPACE is passed as the type argument to the constructor, the
+// DoglegStrategy follows the approach by Shultz, Schnabel, Byrd.
+// This finds the exact optimum over the two-dimensional subspace
+// spanned by the two Dogleg vectors.
+class DoglegStrategy : public TrustRegionStrategy {
+public:
+ DoglegStrategy(const TrustRegionStrategy::Options& options);
+ virtual ~DoglegStrategy() {}
+
+ // TrustRegionStrategy interface
+ virtual Summary ComputeStep(const PerSolveOptions& per_solve_options,
+ SparseMatrix* jacobian,
+ const double* residuals,
+ double* step);
+ virtual void StepAccepted(double step_quality);
+ virtual void StepRejected(double step_quality);
+ virtual void StepIsInvalid();
+
+ virtual double Radius() const;
+
+ // These functions are predominantly for testing.
+ Vector gradient() const { return gradient_; }
+ Vector gauss_newton_step() const { return gauss_newton_step_; }
+ Matrix subspace_basis() const { return subspace_basis_; }
+ Vector subspace_g() const { return subspace_g_; }
+ Matrix subspace_B() const { return subspace_B_; }
+
+ private:
+ typedef Eigen::Matrix<double, 2, 1, Eigen::DontAlign> Vector2d;
+ typedef Eigen::Matrix<double, 2, 2, Eigen::DontAlign> Matrix2d;
+
+ LinearSolver::Summary ComputeGaussNewtonStep(SparseMatrix* jacobian,
+ const double* residuals);
+ void ComputeCauchyPoint(SparseMatrix* jacobian);
+ void ComputeGradient(SparseMatrix* jacobian, const double* residuals);
+ void ComputeTraditionalDoglegStep(double* step);
+ bool ComputeSubspaceModel(SparseMatrix* jacobian);
+ void ComputeSubspaceDoglegStep(double* step);
+
+ bool FindMinimumOnTrustRegionBoundary(Vector2d* minimum) const;
+ Vector MakePolynomialForBoundaryConstrainedProblem() const;
+ Vector2d ComputeSubspaceStepFromRoot(double lambda) const;
+ double EvaluateSubspaceModel(const Vector2d& x) const;
+
+ LinearSolver* linear_solver_;
+ double radius_;
+ const double max_radius_;
+
+ const double min_diagonal_;
+ const double max_diagonal_;
+
+ // mu is used to scale the diagonal matrix used to make the
+ // Gauss-Newton solve full rank. In each solve, the strategy starts
+ // out with mu = min_mu, and tries values upto max_mu. If the user
+ // reports an invalid step, the value of mu_ is increased so that
+ // the next solve starts with a stronger regularization.
+ //
+ // If a successful step is reported, then the value of mu_ is
+ // decreased with a lower bound of min_mu_.
+ double mu_;
+ const double min_mu_;
+ const double max_mu_;
+ const double mu_increase_factor_;
+ const double increase_threshold_;
+ const double decrease_threshold_;
+
+ Vector diagonal_; // sqrt(diag(J^T J))
+ Vector lm_diagonal_;
+
+ Vector gradient_;
+ Vector gauss_newton_step_;
+
+ // cauchy_step = alpha * gradient
+ double alpha_;
+ double dogleg_step_norm_;
+
+ // When, ComputeStep is called, reuse_ indicates whether the
+ // Gauss-Newton and Cauchy steps from the last call to ComputeStep
+ // can be reused or not.
+ //
+ // If the user called StepAccepted, then it is expected that the
+ // user has recomputed the Jacobian matrix and new Gauss-Newton
+ // solve is needed and reuse is set to false.
+ //
+ // If the user called StepRejected, then it is expected that the
+ // user wants to solve the trust region problem with the same matrix
+ // but a different trust region radius and the Gauss-Newton and
+ // Cauchy steps can be reused to compute the Dogleg, thus reuse is
+ // set to true.
+ //
+ // If the user called StepIsInvalid, then there was a numerical
+ // problem with the step computed in the last call to ComputeStep,
+ // and the regularization used to do the Gauss-Newton solve is
+ // increased and a new solve should be done when ComputeStep is
+ // called again, thus reuse is set to false.
+ bool reuse_;
+
+ // The dogleg type determines how the minimum of the local
+ // quadratic model is found.
+ DoglegType dogleg_type_;
+
+ // If the type is SUBSPACE_DOGLEG, the two-dimensional
+ // model 1/2 x^T B x + g^T x has to be computed and stored.
+ bool subspace_is_one_dimensional_;
+ Matrix subspace_basis_;
+ Vector2d subspace_g_;
+ Matrix2d subspace_B_;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_DOGLEG_STRATEGY_H_
diff --git a/extern/libmv/third_party/ceres/internal/ceres/evaluator.cc b/extern/libmv/third_party/ceres/internal/ceres/evaluator.cc
index ea05aefec8c..a3ce6f04bd4 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/evaluator.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/evaluator.cc
@@ -28,14 +28,18 @@
//
// Author: keir@google.com (Keir Mierle)
-#include <glog/logging.h>
-#include "ceres/evaluator.h"
+#include <vector>
#include "ceres/block_evaluate_preparer.h"
#include "ceres/block_jacobian_writer.h"
#include "ceres/compressed_row_jacobian_writer.h"
-#include "ceres/scratch_evaluate_preparer.h"
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/crs_matrix.h"
#include "ceres/dense_jacobian_writer.h"
+#include "ceres/evaluator.h"
+#include "ceres/internal/port.h"
#include "ceres/program_evaluator.h"
+#include "ceres/scratch_evaluate_preparer.h"
+#include "glog/logging.h"
namespace ceres {
namespace internal {
@@ -47,6 +51,7 @@ Evaluator* Evaluator::Create(const Evaluator::Options& options,
string* error) {
switch (options.linear_solver_type) {
case DENSE_QR:
+ case DENSE_NORMAL_CHOLESKY:
return new ProgramEvaluator<ScratchEvaluatePreparer,
DenseJacobianWriter>(options,
program);
@@ -67,5 +72,76 @@ Evaluator* Evaluator::Create(const Evaluator::Options& options,
}
}
+bool Evaluator::Evaluate(Program* program,
+ int num_threads,
+ double* cost,
+ vector<double>* residuals,
+ vector<double>* gradient,
+ CRSMatrix* output_jacobian) {
+ CHECK_GE(num_threads, 1)
+ << "This is a Ceres bug; please contact the developers!";
+ CHECK_NOTNULL(cost);
+
+ // Setup the Parameter indices and offsets before an evaluator can
+ // be constructed and used.
+ program->SetParameterOffsetsAndIndex();
+
+ Evaluator::Options evaluator_options;
+ evaluator_options.linear_solver_type = SPARSE_NORMAL_CHOLESKY;
+ evaluator_options.num_threads = num_threads;
+
+ string error;
+ scoped_ptr<Evaluator> evaluator(
+ Evaluator::Create(evaluator_options, program, &error));
+ if (evaluator.get() == NULL) {
+ LOG(ERROR) << "Unable to create an Evaluator object. "
+ << "Error: " << error
+ << "This is a Ceres bug; please contact the developers!";
+ return false;
+ }
+
+ if (residuals !=NULL) {
+ residuals->resize(evaluator->NumResiduals());
+ }
+
+ if (gradient != NULL) {
+ gradient->resize(evaluator->NumEffectiveParameters());
+ }
+
+ scoped_ptr<CompressedRowSparseMatrix> jacobian;
+ if (output_jacobian != NULL) {
+ jacobian.reset(
+ down_cast<CompressedRowSparseMatrix*>(evaluator->CreateJacobian()));
+ }
+
+ // Point the state pointers to the user state pointers. This is
+ // needed so that we can extract a parameter vector which is then
+ // passed to Evaluator::Evaluate.
+ program->SetParameterBlockStatePtrsToUserStatePtrs();
+
+ // Copy the value of the parameter blocks into a vector, since the
+ // Evaluate::Evaluate method needs its input as such. The previous
+ // call to SetParameterBlockStatePtrsToUserStatePtrs ensures that
+ // these values are the ones corresponding to the actual state of
+ // the parameter blocks, rather than the temporary state pointer
+ // used for evaluation.
+ Vector parameters(program->NumParameters());
+ program->ParameterBlocksToStateVector(parameters.data());
+
+ if (!evaluator->Evaluate(parameters.data(),
+ cost,
+ residuals != NULL ? &(*residuals)[0] : NULL,
+ gradient != NULL ? &(*gradient)[0] : NULL,
+ jacobian.get())) {
+ return false;
+ }
+
+ if (output_jacobian != NULL) {
+ jacobian->ToCRSMatrix(output_jacobian);
+ }
+
+ return true;
+}
+
} // namespace internal
} // namespace ceres
diff --git a/extern/libmv/third_party/ceres/internal/ceres/evaluator.h b/extern/libmv/third_party/ceres/internal/ceres/evaluator.h
index adefdd26660..6aa30d7b739 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/evaluator.h
+++ b/extern/libmv/third_party/ceres/internal/ceres/evaluator.h
@@ -33,10 +33,14 @@
#define CERES_INTERNAL_EVALUATOR_H_
#include <string>
+#include <vector>
#include "ceres/internal/port.h"
#include "ceres/types.h"
namespace ceres {
+
+class CRSMatrix;
+
namespace internal {
class Program;
@@ -65,6 +69,32 @@ class Evaluator {
Program* program,
string* error);
+
+ // This is used for computing the cost, residual and Jacobian for
+ // returning to the user. For actually solving the optimization
+ // problem, the optimization algorithm uses the ProgramEvaluator
+ // objects directly.
+ //
+ // The residual, gradients and jacobian pointers can be NULL, in
+ // which case they will not be evaluated. cost cannot be NULL.
+ //
+ // The parallelism of the evaluator is controlled by num_threads; it
+ // should be at least 1.
+ //
+ // Note: That this function does not take a parameter vector as
+ // input. The parameter blocks are evaluated on the values contained
+ // in the arrays pointed to by their user_state pointers.
+ //
+ // Also worth noting is that this function mutates program by
+ // calling Program::SetParameterOffsetsAndIndex() on it so that an
+ // evaluator object can be constructed.
+ static bool Evaluate(Program* program,
+ int num_threads,
+ double* cost,
+ vector<double>* residuals,
+ vector<double>* gradient,
+ CRSMatrix* jacobian);
+
// Build and return a sparse matrix for storing and working with the Jacobian
// of the objective function. The jacobian has dimensions
// NumEffectiveParameters() by NumParameters(), and is typically extremely
@@ -95,6 +125,7 @@ class Evaluator {
virtual bool Evaluate(const double* state,
double* cost,
double* residuals,
+ double* gradient,
SparseMatrix* jacobian) = 0;
// Make a change delta (of size NumEffectiveParameters()) to state (of size
diff --git a/extern/libmv/third_party/ceres/internal/ceres/file.cc b/extern/libmv/third_party/ceres/internal/ceres/file.cc
index 5fc9d220861..6fe7557246d 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/file.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/file.cc
@@ -31,7 +31,8 @@
// Really simple file IO.
#include <cstdio>
-#include <glog/logging.h>
+#include "file.h"
+#include "glog/logging.h"
namespace ceres {
namespace internal {
@@ -48,7 +49,7 @@ void WriteStringToFileOrDie(const string &data, const string &filename) {
}
void ReadFileToStringOrDie(const string &filename, string *data) {
- FILE* file_descriptor = file_descriptor = fopen(filename.c_str(), "r");
+ FILE* file_descriptor = fopen(filename.c_str(), "r");
if (!file_descriptor) {
LOG(FATAL) << "Couldn't read file: " << filename;
diff --git a/extern/libmv/third_party/ceres/internal/ceres/generate_eliminator_specialization.py b/extern/libmv/third_party/ceres/internal/ceres/generate_eliminator_specialization.py
new file mode 100644
index 00000000000..af9873f94c0
--- /dev/null
+++ b/extern/libmv/third_party/ceres/internal/ceres/generate_eliminator_specialization.py
@@ -0,0 +1,186 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+#
+# Copyright 2011 Google Inc. All Rights Reserved.
+# Author: sameeragarwal@google.com (Sameer Agarwal)
+#
+# Script for explicitly generating template specialization of the
+# SchurEliminator class. It is a rather large class
+# and the number of explicit instantiations is also large. Explicitly
+# generating these instantiations in separate .cc files breaks the
+# compilation into separate compilation unit rather than one large cc
+# file which takes 2+GB of RAM to compile.
+#
+# This script creates two sets of files.
+#
+# 1. schur_eliminator_x_x_x.cc
+# where, the x indicates the template parameters and
+#
+# 2. schur_eliminator.cc
+#
+# that contains a factory function for instantiating these classes
+# based on runtime parameters.
+#
+# The list of tuples, specializations indicates the set of
+# specializations that is generated.
+
+# Set of template specializations to generate
+SPECIALIZATIONS = [(2, 2, 2),
+ (2, 2, 3),
+ (2, 2, 4),
+ (2, 2, "Dynamic"),
+ (2, 3, 3),
+ (2, 3, 4),
+ (2, 3, 9),
+ (2, 3, "Dynamic"),
+ (2, 4, 3),
+ (2, 4, 4),
+ (2, 4, "Dynamic"),
+ (4, 4, 2),
+ (4, 4, 3),
+ (4, 4, 4),
+ (4, 4, "Dynamic"),
+ ("Dynamic", "Dynamic", "Dynamic")]
+
+SPECIALIZATION_FILE = """// Copyright 2011 Google Inc. All Rights Reserved.
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// Template specialization of SchurEliminator.
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_eliminator_specializations.py.
+// Editing it manually is not recommended.
+
+#include "ceres/schur_eliminator_impl.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+template class SchurEliminator<%s, %s, %s>;
+
+} // namespace internal
+} // namespace ceres
+
+"""
+
+FACTORY_FILE_HEADER = """// Copyright 2011 Google Inc. All Rights Reserved.
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+//
+// ========================================
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+// THIS FILE IS AUTOGENERATED. DO NOT EDIT.
+//=========================================
+//
+// This file is generated using generate_template_specializations.py.
+// Editing it manually is not recommended.
+
+#include "ceres/linear_solver.h"
+#include "ceres/schur_eliminator.h"
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+SchurEliminatorBase*
+SchurEliminatorBase::Create(const LinearSolver::Options& options) {
+#ifndef CERES_RESTRICT_SCHUR_SPECIALIZATION
+"""
+
+FACTORY_CONDITIONAL = """ if ((options.row_block_size == %s) &&
+ (options.e_block_size == %s) &&
+ (options.f_block_size == %s)) {
+ return new SchurEliminator<%s, %s, %s>(options);
+ }
+"""
+
+FACTORY_FOOTER = """
+#endif
+ VLOG(1) << "Template specializations not found for <"
+ << options.row_block_size << ","
+ << options.e_block_size << ","
+ << options.f_block_size << ">";
+ return new SchurEliminator<Dynamic, Dynamic, Dynamic>(options);
+}
+
+} // namespace internal
+} // namespace ceres
+"""
+
+
+def SuffixForSize(size):
+ if size == "Dynamic":
+ return "d"
+ return str(size)
+
+
+def SpecializationFilename(prefix, row_block_size, e_block_size, f_block_size):
+ return "_".join([prefix] + map(SuffixForSize, (row_block_size,
+ e_block_size,
+ f_block_size)))
+
+
+def Specialize():
+ """
+ Generate specialization code and the conditionals to instantiate it.
+ """
+ f = open("schur_eliminator.cc", "w")
+ f.write(FACTORY_FILE_HEADER)
+
+ for row_block_size, e_block_size, f_block_size in SPECIALIZATIONS:
+ output = SpecializationFilename("generated/schur_eliminator",
+ row_block_size,
+ e_block_size,
+ f_block_size) + ".cc"
+ fptr = open(output, "w")
+ fptr.write(SPECIALIZATION_FILE % (row_block_size,
+ e_block_size,
+ f_block_size))
+ fptr.close()
+
+ f.write(FACTORY_CONDITIONAL % (row_block_size,
+ e_block_size,
+ f_block_size,
+ row_block_size,
+ e_block_size,
+ f_block_size))
+ f.write(FACTORY_FOOTER)
+ f.close()
+
+
+if __name__ == "__main__":
+ Specialize()
diff --git a/extern/libmv/third_party/ceres/internal/ceres/gradient_checking_cost_function.cc b/extern/libmv/third_party/ceres/internal/ceres/gradient_checking_cost_function.cc
index abba40824ef..7fb3ed7b3a8 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/gradient_checking_cost_function.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/gradient_checking_cost_function.cc
@@ -36,18 +36,18 @@
#include <string>
#include <vector>
-#include <glog/logging.h>
+#include "ceres/cost_function.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/scoped_ptr.h"
#include "ceres/parameter_block.h"
+#include "ceres/problem.h"
#include "ceres/problem_impl.h"
#include "ceres/program.h"
#include "ceres/residual_block.h"
#include "ceres/runtime_numeric_diff_cost_function.h"
#include "ceres/stringprintf.h"
-#include "ceres/cost_function.h"
-#include "ceres/internal/eigen.h"
-#include "ceres/internal/scoped_ptr.h"
-#include "ceres/problem.h"
#include "ceres/types.h"
+#include "glog/logging.h"
namespace ceres {
namespace internal {
diff --git a/extern/libmv/third_party/ceres/internal/ceres/graph.h b/extern/libmv/third_party/ceres/internal/ceres/graph.h
index fd7a224f0aa..2c0f6d28e54 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/graph.h
+++ b/extern/libmv/third_party/ceres/internal/ceres/graph.h
@@ -129,7 +129,7 @@ class Graph {
HashMap<Vertex, HashSet<Vertex> > edges_;
HashMap<pair<Vertex, Vertex>, double> edge_weights_;
- DISALLOW_COPY_AND_ASSIGN(Graph);
+ CERES_DISALLOW_COPY_AND_ASSIGN(Graph);
};
} // namespace internal
diff --git a/extern/libmv/third_party/ceres/internal/ceres/implicit_schur_complement.cc b/extern/libmv/third_party/ceres/internal/ceres/implicit_schur_complement.cc
index bd908846362..4af030a8535 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/implicit_schur_complement.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/implicit_schur_complement.cc
@@ -30,22 +30,20 @@
#include "ceres/implicit_schur_complement.h"
-#include <glog/logging.h>
#include "Eigen/Dense"
#include "ceres/block_sparse_matrix.h"
#include "ceres/block_structure.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/types.h"
+#include "glog/logging.h"
namespace ceres {
namespace internal {
ImplicitSchurComplement::ImplicitSchurComplement(int num_eliminate_blocks,
- bool constant_sparsity,
bool preconditioner)
: num_eliminate_blocks_(num_eliminate_blocks),
- constant_sparsity_(constant_sparsity),
preconditioner_(preconditioner),
A_(NULL),
D_(NULL),
@@ -62,7 +60,7 @@ void ImplicitSchurComplement::Init(const BlockSparseMatrixBase& A,
const double* b) {
// Since initialization is reasonably heavy, perhaps we can save on
// constructing a new object everytime.
- if ((A_ == NULL) || !constant_sparsity_) {
+ if (A_ == NULL) {
A_.reset(new PartitionedMatrixView(A, num_eliminate_blocks_));
}
@@ -71,7 +69,7 @@ void ImplicitSchurComplement::Init(const BlockSparseMatrixBase& A,
// Initialize temporary storage and compute the block diagonals of
// E'E and F'E.
- if ((!constant_sparsity_) || (block_diagonal_EtE_inverse_ == NULL)) {
+ if (block_diagonal_EtE_inverse_ == NULL) {
block_diagonal_EtE_inverse_.reset(A_->CreateBlockDiagonalEtE());
if (preconditioner_) {
block_diagonal_FtF_inverse_.reset(A_->CreateBlockDiagonalFtF());
@@ -92,17 +90,10 @@ void ImplicitSchurComplement::Init(const BlockSparseMatrixBase& A,
// The block diagonals of the augmented linear system contain
// contributions from the diagonal D if it is non-null. Add that to
// the block diagonals and invert them.
- if (D_ != NULL) {
- AddDiagonalAndInvert(D_, block_diagonal_EtE_inverse_.get());
- if (preconditioner_) {
- AddDiagonalAndInvert(D_ + A_->num_cols_e(),
- block_diagonal_FtF_inverse_.get());
- }
- } else {
- AddDiagonalAndInvert(NULL, block_diagonal_EtE_inverse_.get());
- if (preconditioner_) {
- AddDiagonalAndInvert(NULL, block_diagonal_FtF_inverse_.get());
- }
+ AddDiagonalAndInvert(D_, block_diagonal_EtE_inverse_.get());
+ if (preconditioner_) {
+ AddDiagonalAndInvert((D_ == NULL) ? NULL : D_ + A_->num_cols_e(),
+ block_diagonal_FtF_inverse_.get());
}
// Compute the RHS of the Schur complement system.
diff --git a/extern/libmv/third_party/ceres/internal/ceres/implicit_schur_complement.h b/extern/libmv/third_party/ceres/internal/ceres/implicit_schur_complement.h
index 37a319f9c57..b9ebaa4628e 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/implicit_schur_complement.h
+++ b/extern/libmv/third_party/ceres/internal/ceres/implicit_schur_complement.h
@@ -91,20 +91,13 @@ class ImplicitSchurComplement : public LinearOperator {
// num_eliminate_blocks is the number of E blocks in the matrix
// A.
//
- // constant_sparsity indicates if across calls to Init, the sparsity
- // structure of the matrix A remains constant or not. This makes for
- // significant savings across multiple matrices A, e.g. when used in
- // conjunction with an optimization algorithm.
- //
// preconditioner indicates whether the inverse of the matrix F'F
// should be computed or not as a preconditioner for the Schur
// Complement.
//
// TODO(sameeragarwal): Get rid of the two bools below and replace
// them with enums.
- ImplicitSchurComplement(int num_eliminate_blocks,
- bool constant_sparsity,
- bool preconditioner);
+ ImplicitSchurComplement(int num_eliminate_blocks, bool preconditioner);
virtual ~ImplicitSchurComplement();
// Initialize the Schur complement for a linear least squares
@@ -151,7 +144,6 @@ class ImplicitSchurComplement : public LinearOperator {
void UpdateRhs();
int num_eliminate_blocks_;
- bool constant_sparsity_;
bool preconditioner_;
scoped_ptr<PartitionedMatrixView> A_;
diff --git a/extern/libmv/third_party/ceres/internal/ceres/iterative_schur_complement_solver.cc b/extern/libmv/third_party/ceres/internal/ceres/iterative_schur_complement_solver.cc
index 51303195317..679c41f2431 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/iterative_schur_complement_solver.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/iterative_schur_complement_solver.cc
@@ -33,22 +33,18 @@
#include <algorithm>
#include <cstring>
#include <vector>
-
-#include <glog/logging.h>
#include "Eigen/Dense"
#include "ceres/block_sparse_matrix.h"
#include "ceres/block_structure.h"
#include "ceres/conjugate_gradients_solver.h"
#include "ceres/implicit_schur_complement.h"
-#include "ceres/linear_solver.h"
-#include "ceres/triplet_sparse_matrix.h"
-#include "ceres/visibility_based_preconditioner.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/linear_solver.h"
#include "ceres/triplet_sparse_matrix.h"
#include "ceres/types.h"
#include "ceres/visibility_based_preconditioner.h"
+#include "glog/logging.h"
namespace ceres {
namespace internal {
@@ -69,10 +65,9 @@ LinearSolver::Summary IterativeSchurComplementSolver::SolveImpl(
CHECK_NOTNULL(A->block_structure());
// Initialize a ImplicitSchurComplement object.
- if ((schur_complement_ == NULL) || (!options_.constant_sparsity)) {
+ if (schur_complement_ == NULL) {
schur_complement_.reset(
new ImplicitSchurComplement(options_.num_eliminate_blocks,
- options_.constant_sparsity,
options_.preconditioner_type == JACOBI));
}
schur_complement_->Init(*A, per_solve_options.D, b);
@@ -119,7 +114,7 @@ LinearSolver::Summary IterativeSchurComplementSolver::SolveImpl(
new VisibilityBasedPreconditioner(*A->block_structure(), options_));
}
is_preconditioner_good =
- visibility_based_preconditioner_->Compute(*A, per_solve_options.D);
+ visibility_based_preconditioner_->Update(*A, per_solve_options.D);
cg_per_solve_options.preconditioner =
visibility_based_preconditioner_.get();
break;
diff --git a/extern/libmv/third_party/ceres/internal/ceres/levenberg_marquardt.cc b/extern/libmv/third_party/ceres/internal/ceres/levenberg_marquardt.cc
deleted file mode 100644
index b40a5162adc..00000000000
--- a/extern/libmv/third_party/ceres/internal/ceres/levenberg_marquardt.cc
+++ /dev/null
@@ -1,574 +0,0 @@
-// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
-// http://code.google.com/p/ceres-solver/
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are met:
-//
-// * Redistributions of source code must retain the above copyright notice,
-// this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above copyright notice,
-// this list of conditions and the following disclaimer in the documentation
-// and/or other materials provided with the distribution.
-// * Neither the name of Google Inc. nor the names of its contributors may be
-// used to endorse or promote products derived from this software without
-// specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
-// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-// POSSIBILITY OF SUCH DAMAGE.
-//
-// Author: sameeragarwal@google.com (Sameer Agarwal)
-//
-// Implementation of a simple LM algorithm which uses the step sizing
-// rule of "Methods for Nonlinear Least Squares" by K. Madsen,
-// H.B. Nielsen and O. Tingleff. Available to download from
-//
-// http://www2.imm.dtu.dk/pubdb/views/edoc_download.php/3215/pdf/imm3215.pdf
-//
-// The basic algorithm described in this note is an exact step
-// algorithm that depends on the Newton(LM) step being solved exactly
-// in each iteration. When a suitable iterative solver is available to
-// solve the Newton(LM) step, the algorithm will automatically switch
-// to an inexact step solution method. This trades some slowdown in
-// convergence for significant savings in solve time and memory
-// usage. Our implementation of the Truncated Newton algorithm follows
-// the discussion and recommendataions in "Stephen G. Nash, A Survey
-// of Truncated Newton Methods, Journal of Computational and Applied
-// Mathematics, 124(1-2), 45-59, 2000.
-
-#include "ceres/levenberg_marquardt.h"
-
-#include <algorithm>
-#include <cstdlib>
-#include <cmath>
-#include <cstring>
-#include <string>
-#include <vector>
-
-#include <glog/logging.h>
-#include "Eigen/Core"
-#include "ceres/evaluator.h"
-#include "ceres/file.h"
-#include "ceres/linear_least_squares_problems.h"
-#include "ceres/linear_solver.h"
-#include "ceres/matrix_proto.h"
-#include "ceres/sparse_matrix.h"
-#include "ceres/stringprintf.h"
-#include "ceres/internal/eigen.h"
-#include "ceres/internal/scoped_ptr.h"
-#include "ceres/types.h"
-
-namespace ceres {
-namespace internal {
-namespace {
-
-// Numbers for clamping the size of the LM diagonal. The size of these
-// numbers is heuristic. We will probably be adjusting them in the
-// future based on more numerical experience. With jacobi scaling
-// enabled, these numbers should be all but redundant.
-const double kMinLevenbergMarquardtDiagonal = 1e-6;
-const double kMaxLevenbergMarquardtDiagonal = 1e32;
-
-// Small constant for various floating point issues.
-const double kEpsilon = 1e-12;
-
-// Number of times the linear solver should be retried in case of
-// numerical failure. The retries are done by exponentially scaling up
-// mu at each retry. This leads to stronger and stronger
-// regularization making the linear least squares problem better
-// conditioned at each retry.
-const int kMaxLinearSolverRetries = 5;
-
-// D = 1/sqrt(diag(J^T * J))
-void EstimateScale(const SparseMatrix& jacobian, double* D) {
- CHECK_NOTNULL(D);
- jacobian.SquaredColumnNorm(D);
- for (int i = 0; i < jacobian.num_cols(); ++i) {
- D[i] = 1.0 / (kEpsilon + sqrt(D[i]));
- }
-}
-
-// D = diag(J^T * J)
-void LevenbergMarquardtDiagonal(const SparseMatrix& jacobian,
- double* D) {
- CHECK_NOTNULL(D);
- jacobian.SquaredColumnNorm(D);
- for (int i = 0; i < jacobian.num_cols(); ++i) {
- D[i] = min(max(D[i], kMinLevenbergMarquardtDiagonal),
- kMaxLevenbergMarquardtDiagonal);
- }
-}
-
-bool RunCallback(IterationCallback* callback,
- const IterationSummary& iteration_summary,
- Solver::Summary* summary) {
- const CallbackReturnType status = (*callback)(iteration_summary);
- switch (status) {
- case SOLVER_TERMINATE_SUCCESSFULLY:
- summary->termination_type = USER_SUCCESS;
- VLOG(1) << "Terminating on USER_SUCCESS.";
- return false;
- case SOLVER_ABORT:
- summary->termination_type = USER_ABORT;
- VLOG(1) << "Terminating on USER_ABORT.";
- return false;
- case SOLVER_CONTINUE:
- return true;
- default:
- LOG(FATAL) << "Unknown status returned by callback: "
- << status;
- return NULL;
- }
-}
-
-} // namespace
-
-LevenbergMarquardt::~LevenbergMarquardt() {}
-
-void LevenbergMarquardt::Minimize(const Minimizer::Options& options,
- Evaluator* evaluator,
- LinearSolver* linear_solver,
- const double* initial_parameters,
- double* final_parameters,
- Solver::Summary* summary) {
- time_t start_time = time(NULL);
- const int num_parameters = evaluator->NumParameters();
- const int num_effective_parameters = evaluator->NumEffectiveParameters();
- const int num_residuals = evaluator->NumResiduals();
-
- summary->termination_type = NO_CONVERGENCE;
- summary->num_successful_steps = 0;
- summary->num_unsuccessful_steps = 0;
-
- // Allocate the various vectors needed by the algorithm.
- memcpy(final_parameters, initial_parameters,
- num_parameters * sizeof(*initial_parameters));
-
- VectorRef x(final_parameters, num_parameters);
- Vector x_new(num_parameters);
-
- Vector lm_step(num_effective_parameters);
- Vector gradient(num_effective_parameters);
- Vector scaled_gradient(num_effective_parameters);
- // Jacobi scaling vector
- Vector scale(num_effective_parameters);
-
- Vector f_model(num_residuals);
- Vector f(num_residuals);
- Vector f_new(num_residuals);
- Vector D(num_parameters);
- Vector muD(num_parameters);
-
- // Ask the Evaluator to create the jacobian matrix. The sparsity
- // pattern of this matrix is going to remain constant, so we only do
- // this once and then re-use this matrix for all subsequent Jacobian
- // computations.
- scoped_ptr<SparseMatrix> jacobian(evaluator->CreateJacobian());
-
- double x_norm = x.norm();
-
- double cost = 0.0;
- D.setOnes();
- f.setZero();
-
- // Do initial cost and Jacobian evaluation.
- if (!evaluator->Evaluate(x.data(), &cost, f.data(), jacobian.get())) {
- LOG(WARNING) << "Failed to compute residuals and Jacobian. "
- << "Terminating.";
- summary->termination_type = NUMERICAL_FAILURE;
- return;
- }
-
- if (options.jacobi_scaling) {
- EstimateScale(*jacobian, scale.data());
- jacobian->ScaleColumns(scale.data());
- } else {
- scale.setOnes();
- }
-
- // This is a poor way to do this computation. Even if fixed_cost is
- // zero, because we are subtracting two possibly large numbers, we
- // are depending on exact cancellation to give us a zero here. But
- // initial_cost and cost have been computed by two different
- // evaluators. One which runs on the whole problem (in
- // solver_impl.cc) in single threaded mode and another which runs
- // here on the reduced problem, so fixed_cost can (and does) contain
- // some numerical garbage with a relative magnitude of 1e-14.
- //
- // The right way to do this, would be to compute the fixed cost on
- // just the set of residual blocks which are held constant and were
- // removed from the original problem when the reduced problem was
- // constructed.
- summary->fixed_cost = summary->initial_cost - cost;
-
- double model_cost = f.squaredNorm() / 2.0;
- double total_cost = summary->fixed_cost + cost;
-
- scaled_gradient.setZero();
- jacobian->LeftMultiply(f.data(), scaled_gradient.data());
- gradient = scaled_gradient.array() / scale.array();
-
- double gradient_max_norm = gradient.lpNorm<Eigen::Infinity>();
- // We need the max here to guard againt the gradient being zero.
- const double gradient_max_norm_0 = max(gradient_max_norm, kEpsilon);
- double gradient_tolerance = options.gradient_tolerance * gradient_max_norm_0;
-
- double mu = options.tau;
- double nu = 2.0;
- int iteration = 0;
- double actual_cost_change = 0.0;
- double step_norm = 0.0;
- double relative_decrease = 0.0;
-
- // Insane steps are steps which are not sane, i.e. there is some
- // numerical kookiness going on with them. There are various reasons
- // for this kookiness, some easier to diagnose then others. From the
- // point of view of the non-linear solver, they are steps which
- // cannot be used. We return with NUMERICAL_FAILURE after
- // kMaxLinearSolverRetries consecutive insane steps.
- bool step_is_sane = false;
- int num_consecutive_insane_steps = 0;
-
- // Whether the step resulted in a sufficient decrease in the
- // objective function when compared to the decrease in the value of
- // the lineariztion.
- bool step_is_successful = false;
-
- // Parse the iterations for which to dump the linear problem.
- vector<int> iterations_to_dump = options.lsqp_iterations_to_dump;
- sort(iterations_to_dump.begin(), iterations_to_dump.end());
-
- IterationSummary iteration_summary;
- iteration_summary.iteration = iteration;
- iteration_summary.step_is_successful = false;
- iteration_summary.cost = total_cost;
- iteration_summary.cost_change = actual_cost_change;
- iteration_summary.gradient_max_norm = gradient_max_norm;
- iteration_summary.step_norm = step_norm;
- iteration_summary.relative_decrease = relative_decrease;
- iteration_summary.mu = mu;
- iteration_summary.eta = options.eta;
- iteration_summary.linear_solver_iterations = 0;
- iteration_summary.linear_solver_time_sec = 0.0;
- iteration_summary.iteration_time_sec = (time(NULL) - start_time);
- if (options.logging_type >= PER_MINIMIZER_ITERATION) {
- summary->iterations.push_back(iteration_summary);
- }
-
- // Check if the starting point is an optimum.
- VLOG(2) << "Gradient max norm: " << gradient_max_norm
- << " tolerance: " << gradient_tolerance
- << " ratio: " << gradient_max_norm / gradient_max_norm_0
- << " tolerance: " << options.gradient_tolerance;
- if (gradient_max_norm <= gradient_tolerance) {
- summary->termination_type = GRADIENT_TOLERANCE;
- VLOG(1) << "Terminating on GRADIENT_TOLERANCE. "
- << "Relative gradient max norm: "
- << gradient_max_norm / gradient_max_norm_0
- << " <= " << options.gradient_tolerance;
- return;
- }
-
- // Call the various callbacks.
- for (int i = 0; i < options.callbacks.size(); ++i) {
- if (!RunCallback(options.callbacks[i], iteration_summary, summary)) {
- return;
- }
- }
-
- // We only need the LM diagonal if we are actually going to do at
- // least one iteration of the optimization. So we wait to do it
- // until now.
- LevenbergMarquardtDiagonal(*jacobian, D.data());
-
- while ((iteration < options.max_num_iterations) &&
- (time(NULL) - start_time) <= options.max_solver_time_sec) {
- time_t iteration_start_time = time(NULL);
- step_is_sane = false;
- step_is_successful = false;
-
- IterationSummary iteration_summary;
- // The while loop here is just to provide an easily breakable
- // control structure. We are guaranteed to always exit this loop
- // at the end of one iteration or before.
- while (1) {
- muD = (mu * D).array().sqrt();
- LinearSolver::PerSolveOptions solve_options;
- solve_options.D = muD.data();
- solve_options.q_tolerance = options.eta;
- // Disable r_tolerance checking. Since we only care about
- // termination via the q_tolerance. As Nash and Sofer show,
- // r_tolerance based termination is essentially useless in
- // Truncated Newton methods.
- solve_options.r_tolerance = -1.0;
-
- const time_t linear_solver_start_time = time(NULL);
- LinearSolver::Summary linear_solver_summary =
- linear_solver->Solve(jacobian.get(),
- f.data(),
- solve_options,
- lm_step.data());
- iteration_summary.linear_solver_time_sec =
- (time(NULL) - linear_solver_start_time);
- iteration_summary.linear_solver_iterations =
- linear_solver_summary.num_iterations;
-
- if (binary_search(iterations_to_dump.begin(),
- iterations_to_dump.end(),
- iteration)) {
- CHECK(DumpLinearLeastSquaresProblem(options.lsqp_dump_directory,
- iteration,
- options.lsqp_dump_format_type,
- jacobian.get(),
- muD.data(),
- f.data(),
- lm_step.data(),
- options.num_eliminate_blocks))
- << "Tried writing linear least squares problem: "
- << options.lsqp_dump_directory
- << " but failed.";
- }
-
- // We ignore the case where the linear solver did not converge,
- // since the partial solution computed by it still maybe of use,
- // and there is no reason to ignore it, especially since we
- // spent so much time computing it.
- if ((linear_solver_summary.termination_type != TOLERANCE) &&
- (linear_solver_summary.termination_type != MAX_ITERATIONS)) {
- VLOG(1) << "Linear solver failure: retrying with a higher mu";
- break;
- }
-
- step_norm = (lm_step.array() * scale.array()).matrix().norm();
-
- // Check step length based convergence. If the step length is
- // too small, then we are done.
- const double step_size_tolerance = options.parameter_tolerance *
- (x_norm + options.parameter_tolerance);
-
- VLOG(2) << "Step size: " << step_norm
- << " tolerance: " << step_size_tolerance
- << " ratio: " << step_norm / step_size_tolerance
- << " tolerance: " << options.parameter_tolerance;
- if (step_norm <= options.parameter_tolerance *
- (x_norm + options.parameter_tolerance)) {
- summary->termination_type = PARAMETER_TOLERANCE;
- VLOG(1) << "Terminating on PARAMETER_TOLERANCE."
- << "Relative step size: " << step_norm / step_size_tolerance
- << " <= " << options.parameter_tolerance;
- return;
- }
-
- Vector delta = -(lm_step.array() * scale.array()).matrix();
- if (!evaluator->Plus(x.data(), delta.data(), x_new.data())) {
- LOG(WARNING) << "Failed to compute Plus(x, delta, x_plus_delta). "
- << "Terminating.";
- summary->termination_type = NUMERICAL_FAILURE;
- return;
- }
-
- double cost_new = 0.0;
- if (!evaluator->Evaluate(x_new.data(), &cost_new, NULL, NULL)) {
- LOG(WARNING) << "Failed to compute the value of the objective "
- << "function. Terminating.";
- summary->termination_type = NUMERICAL_FAILURE;
- return;
- }
-
- f_model.setZero();
- jacobian->RightMultiply(lm_step.data(), f_model.data());
- const double model_cost_new =
- (f.segment(0, num_residuals) - f_model).squaredNorm() / 2;
-
- actual_cost_change = cost - cost_new;
- double model_cost_change = model_cost - model_cost_new;
-
- VLOG(2) << "[Model cost] current: " << model_cost
- << " new : " << model_cost_new
- << " change: " << model_cost_change;
-
- VLOG(2) << "[Nonlinear cost] current: " << cost
- << " new : " << cost_new
- << " change: " << actual_cost_change
- << " relative change: " << fabs(actual_cost_change) / cost
- << " tolerance: " << options.function_tolerance;
-
- // In exact arithmetic model_cost_change should never be
- // negative. But due to numerical precision issues, we may end up
- // with a small negative number. model_cost_change which are
- // negative and large in absolute value are indicative of a
- // numerical failure in the solver.
- if (model_cost_change < -kEpsilon) {
- VLOG(1) << "Model cost change is negative.\n"
- << "Current : " << model_cost
- << " new : " << model_cost_new
- << " change: " << model_cost_change << "\n";
- break;
- }
-
- // If we have reached this far, then we are willing to trust the
- // numerical quality of the step.
- step_is_sane = true;
- num_consecutive_insane_steps = 0;
-
- // Check function value based convergence.
- if (fabs(actual_cost_change) < options.function_tolerance * cost) {
- VLOG(1) << "Termination on FUNCTION_TOLERANCE."
- << " Relative cost change: " << fabs(actual_cost_change) / cost
- << " tolerance: " << options.function_tolerance;
- summary->termination_type = FUNCTION_TOLERANCE;
- return;
- }
-
- // Clamp model_cost_change at kEpsilon from below.
- if (model_cost_change < kEpsilon) {
- VLOG(1) << "Clamping model cost change " << model_cost_change
- << " to " << kEpsilon;
- model_cost_change = kEpsilon;
- }
-
- relative_decrease = actual_cost_change / model_cost_change;
- VLOG(2) << "actual_cost_change / model_cost_change = "
- << relative_decrease;
-
- if (relative_decrease < options.min_relative_decrease) {
- VLOG(2) << "Unsuccessful step.";
- break;
- }
-
- VLOG(2) << "Successful step.";
-
- ++summary->num_successful_steps;
- x = x_new;
- x_norm = x.norm();
-
- if (!evaluator->Evaluate(x.data(), &cost, f.data(), jacobian.get())) {
- LOG(WARNING) << "Failed to compute residuals and jacobian. "
- << "Terminating.";
- summary->termination_type = NUMERICAL_FAILURE;
- return;
- }
-
- if (options.jacobi_scaling) {
- jacobian->ScaleColumns(scale.data());
- }
-
- model_cost = f.squaredNorm() / 2.0;
- LevenbergMarquardtDiagonal(*jacobian, D.data());
- scaled_gradient.setZero();
- jacobian->LeftMultiply(f.data(), scaled_gradient.data());
- gradient = scaled_gradient.array() / scale.array();
- gradient_max_norm = gradient.lpNorm<Eigen::Infinity>();
-
- // Check gradient based convergence.
- VLOG(2) << "Gradient max norm: " << gradient_max_norm
- << " tolerance: " << gradient_tolerance
- << " ratio: " << gradient_max_norm / gradient_max_norm_0
- << " tolerance: " << options.gradient_tolerance;
- if (gradient_max_norm <= gradient_tolerance) {
- summary->termination_type = GRADIENT_TOLERANCE;
- VLOG(1) << "Terminating on GRADIENT_TOLERANCE. "
- << "Relative gradient max norm: "
- << gradient_max_norm / gradient_max_norm_0
- << " <= " << options.gradient_tolerance
- << " (tolerance).";
- return;
- }
-
- mu = mu * max(1.0 / 3.0, 1 - pow(2 * relative_decrease - 1, 3));
- nu = 2.0;
- step_is_successful = true;
- break;
- }
-
- if (!step_is_sane) {
- ++num_consecutive_insane_steps;
- }
-
- if (num_consecutive_insane_steps == kMaxLinearSolverRetries) {
- summary->termination_type = NUMERICAL_FAILURE;
- VLOG(1) << "Too many consecutive retries; ending with numerical fail.";
-
- if (!options.crash_and_dump_lsqp_on_failure) {
- return;
- }
-
- // Dump debugging information to disk.
- CHECK(options.lsqp_dump_format_type == TEXTFILE ||
- options.lsqp_dump_format_type == PROTOBUF)
- << "Dumping the linear least squares problem on crash "
- << "requires Solver::Options::lsqp_dump_format_type to be "
- << "PROTOBUF or TEXTFILE.";
-
- if (DumpLinearLeastSquaresProblem(options.lsqp_dump_directory,
- iteration,
- options.lsqp_dump_format_type,
- jacobian.get(),
- muD.data(),
- f.data(),
- lm_step.data(),
- options.num_eliminate_blocks)) {
- LOG(FATAL) << "Linear least squares problem saved to: "
- << options.lsqp_dump_directory
- << ". Please provide this to the Ceres developers for "
- << " debugging along with the v=2 log.";
- } else {
- LOG(FATAL) << "Tried writing linear least squares problem: "
- << options.lsqp_dump_directory
- << " but failed.";
- }
- }
-
- if (!step_is_successful) {
- // Either the step did not lead to a decrease in cost or there
- // was numerical failure. In either case we will scale mu up and
- // retry. If it was a numerical failure, we hope that the
- // stronger regularization will make the linear system better
- // conditioned. If it was numerically sane, but there was no
- // decrease in cost, then increasing mu reduces the size of the
- // trust region and we look for a decrease closer to the
- // linearization point.
- ++summary->num_unsuccessful_steps;
- mu = mu * nu;
- nu = 2 * nu;
- }
-
- ++iteration;
-
- total_cost = summary->fixed_cost + cost;
-
- iteration_summary.iteration = iteration;
- iteration_summary.step_is_successful = step_is_successful;
- iteration_summary.cost = total_cost;
- iteration_summary.cost_change = actual_cost_change;
- iteration_summary.gradient_max_norm = gradient_max_norm;
- iteration_summary.step_norm = step_norm;
- iteration_summary.relative_decrease = relative_decrease;
- iteration_summary.mu = mu;
- iteration_summary.eta = options.eta;
- iteration_summary.iteration_time_sec = (time(NULL) - iteration_start_time);
-
- if (options.logging_type >= PER_MINIMIZER_ITERATION) {
- summary->iterations.push_back(iteration_summary);
- }
-
- // Call the various callbacks.
- for (int i = 0; i < options.callbacks.size(); ++i) {
- if (!RunCallback(options.callbacks[i], iteration_summary, summary)) {
- return;
- }
- }
- }
-}
-
-} // namespace internal
-} // namespace ceres
diff --git a/extern/libmv/third_party/ceres/internal/ceres/levenberg_marquardt_strategy.cc b/extern/libmv/third_party/ceres/internal/ceres/levenberg_marquardt_strategy.cc
new file mode 100644
index 00000000000..9e6a59e3813
--- /dev/null
+++ b/extern/libmv/third_party/ceres/internal/ceres/levenberg_marquardt_strategy.cc
@@ -0,0 +1,144 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/levenberg_marquardt_strategy.h"
+
+#include <cmath>
+#include "Eigen/Core"
+#include "ceres/array_utils.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/linear_solver.h"
+#include "ceres/sparse_matrix.h"
+#include "ceres/trust_region_strategy.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+
+LevenbergMarquardtStrategy::LevenbergMarquardtStrategy(
+ const TrustRegionStrategy::Options& options)
+ : linear_solver_(options.linear_solver),
+ radius_(options.initial_radius),
+ max_radius_(options.max_radius),
+ min_diagonal_(options.lm_min_diagonal),
+ max_diagonal_(options.lm_max_diagonal),
+ decrease_factor_(2.0),
+ reuse_diagonal_(false) {
+ CHECK_NOTNULL(linear_solver_);
+ CHECK_GT(min_diagonal_, 0.0);
+ CHECK_LE(min_diagonal_, max_diagonal_);
+ CHECK_GT(max_radius_, 0.0);
+}
+
+LevenbergMarquardtStrategy::~LevenbergMarquardtStrategy() {
+}
+
+TrustRegionStrategy::Summary LevenbergMarquardtStrategy::ComputeStep(
+ const TrustRegionStrategy::PerSolveOptions& per_solve_options,
+ SparseMatrix* jacobian,
+ const double* residuals,
+ double* step) {
+ CHECK_NOTNULL(jacobian);
+ CHECK_NOTNULL(residuals);
+ CHECK_NOTNULL(step);
+
+ const int num_parameters = jacobian->num_cols();
+ if (!reuse_diagonal_) {
+ if (diagonal_.rows() != num_parameters) {
+ diagonal_.resize(num_parameters, 1);
+ }
+
+ jacobian->SquaredColumnNorm(diagonal_.data());
+ for (int i = 0; i < num_parameters; ++i) {
+ diagonal_[i] = min(max(diagonal_[i], min_diagonal_), max_diagonal_);
+ }
+ }
+
+ lm_diagonal_ = (diagonal_ / radius_).array().sqrt();
+
+ LinearSolver::PerSolveOptions solve_options;
+ solve_options.D = lm_diagonal_.data();
+ solve_options.q_tolerance = per_solve_options.eta;
+ // Disable r_tolerance checking. Since we only care about
+ // termination via the q_tolerance. As Nash and Sofer show,
+ // r_tolerance based termination is essentially useless in
+ // Truncated Newton methods.
+ solve_options.r_tolerance = -1.0;
+
+ // Invalidate the output array lm_step, so that we can detect if
+ // the linear solver generated numerical garbage. This is known
+ // to happen for the DENSE_QR and then DENSE_SCHUR solver when
+ // the Jacobin is severly rank deficient and mu is too small.
+ InvalidateArray(num_parameters, step);
+
+ // Instead of solving Jx = -r, solve Jy = r.
+ // Then x can be found as x = -y, but the inputs jacobian and residuals
+ // do not need to be modified.
+ LinearSolver::Summary linear_solver_summary =
+ linear_solver_->Solve(jacobian, residuals, solve_options, step);
+ if (linear_solver_summary.termination_type == FAILURE ||
+ !IsArrayValid(num_parameters, step)) {
+ LOG(WARNING) << "Linear solver failure. Failed to compute a finite step.";
+ linear_solver_summary.termination_type = FAILURE;
+ } else {
+ VectorRef(step, num_parameters) *= -1.0;
+ }
+
+ reuse_diagonal_ = true;
+
+ TrustRegionStrategy::Summary summary;
+ summary.residual_norm = linear_solver_summary.residual_norm;
+ summary.num_iterations = linear_solver_summary.num_iterations;
+ summary.termination_type = linear_solver_summary.termination_type;
+ return summary;
+}
+
+void LevenbergMarquardtStrategy::StepAccepted(double step_quality) {
+ CHECK_GT(step_quality, 0.0);
+ radius_ = radius_ / std::max(1.0 / 3.0,
+ 1.0 - pow(2.0 * step_quality - 1.0, 3));
+ radius_ = std::min(max_radius_, radius_);
+ decrease_factor_ = 2.0;
+ reuse_diagonal_ = false;
+}
+
+void LevenbergMarquardtStrategy::StepRejected(double step_quality) {
+ radius_ = radius_ / decrease_factor_;
+ decrease_factor_ *= 2.0;
+ reuse_diagonal_ = true;
+}
+
+double LevenbergMarquardtStrategy::Radius() const {
+ return radius_;
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/extern/libmv/third_party/ceres/internal/ceres/levenberg_marquardt_strategy.h b/extern/libmv/third_party/ceres/internal/ceres/levenberg_marquardt_strategy.h
new file mode 100644
index 00000000000..90c21789797
--- /dev/null
+++ b/extern/libmv/third_party/ceres/internal/ceres/levenberg_marquardt_strategy.h
@@ -0,0 +1,86 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_LEVENBERG_MARQUARDT_STRATEGY_H_
+#define CERES_INTERNAL_LEVENBERG_MARQUARDT_STRATEGY_H_
+
+#include "ceres/internal/eigen.h"
+#include "ceres/trust_region_strategy.h"
+
+namespace ceres {
+namespace internal {
+
+// Levenberg-Marquardt step computation and trust region sizing
+// strategy based on on "Methods for Nonlinear Least Squares" by
+// K. Madsen, H.B. Nielsen and O. Tingleff. Available to download from
+//
+// http://www2.imm.dtu.dk/pubdb/views/edoc_download.php/3215/pdf/imm3215.pdf
+class LevenbergMarquardtStrategy : public TrustRegionStrategy {
+public:
+ LevenbergMarquardtStrategy(const TrustRegionStrategy::Options& options);
+ virtual ~LevenbergMarquardtStrategy();
+
+ // TrustRegionStrategy interface
+ virtual TrustRegionStrategy::Summary ComputeStep(
+ const TrustRegionStrategy::PerSolveOptions& per_solve_options,
+ SparseMatrix* jacobian,
+ const double* residuals,
+ double* step);
+ virtual void StepAccepted(double step_quality);
+ virtual void StepRejected(double step_quality);
+ virtual void StepIsInvalid() {
+ // Treat the current step as a rejected step with no increase in
+ // solution quality. Since rejected steps lead to decrease in the
+ // size of the trust region, the next time ComputeStep is called,
+ // this will lead to a better conditioned system.
+ StepRejected(0.0);
+ }
+
+ virtual double Radius() const;
+
+ private:
+ LinearSolver* linear_solver_;
+ double radius_;
+ double max_radius_;
+ const double min_diagonal_;
+ const double max_diagonal_;
+ double decrease_factor_;
+ bool reuse_diagonal_;
+ Vector diagonal_; // diagonal_ = diag(J'J)
+ // Scaled copy of diagonal_. Stored here as optimization to prevent
+ // allocations in every iteration and reuse when a step fails and
+ // ComputeStep is called again.
+ Vector lm_diagonal_; // lm_diagonal_ = diagonal_ / radius_;
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_LEVENBERG_MARQUARDT_STRATEGY_H_
diff --git a/extern/libmv/third_party/ceres/internal/ceres/linear_least_squares_problems.cc b/extern/libmv/third_party/ceres/internal/ceres/linear_least_squares_problems.cc
index cca9f442fe7..a91e254a663 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/linear_least_squares_problems.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/linear_least_squares_problems.cc
@@ -33,17 +33,17 @@
#include <cstdio>
#include <string>
#include <vector>
-#include <glog/logging.h>
#include "ceres/block_sparse_matrix.h"
#include "ceres/block_structure.h"
#include "ceres/casts.h"
#include "ceres/compressed_row_sparse_matrix.h"
#include "ceres/file.h"
+#include "ceres/internal/scoped_ptr.h"
#include "ceres/matrix_proto.h"
-#include "ceres/triplet_sparse_matrix.h"
#include "ceres/stringprintf.h"
-#include "ceres/internal/scoped_ptr.h"
+#include "ceres/triplet_sparse_matrix.h"
#include "ceres/types.h"
+#include "glog/logging.h"
namespace ceres {
namespace internal {
@@ -64,7 +64,7 @@ LinearLeastSquaresProblem* CreateLinearLeastSquaresProblemFromId(int id) {
return NULL;
}
-#ifndef CERES_DONT_HAVE_PROTOCOL_BUFFERS
+#ifndef CERES_NO_PROTOCOL_BUFFERS
LinearLeastSquaresProblem* CreateLinearLeastSquaresProblemFromFile(
const string& filename) {
LinearLeastSquaresProblemProto problem_proto;
@@ -130,7 +130,7 @@ LinearLeastSquaresProblem* CreateLinearLeastSquaresProblemFromFile(
<< "Ceres to be built with Protocol Buffers support.";
return NULL;
}
-#endif // CERES_DONT_HAVE_PROTOCOL_BUFFERS
+#endif // CERES_NO_PROTOCOL_BUFFERS
/*
A = [1 2]
@@ -573,13 +573,13 @@ LinearLeastSquaresProblem* LinearLeastSquaresProblem3() {
return problem;
}
-bool DumpLinearLeastSquaresProblemToConsole(const string& directory,
- int iteration,
- const SparseMatrix* A,
- const double* D,
- const double* b,
- const double* x,
- int num_eliminate_blocks) {
+static bool DumpLinearLeastSquaresProblemToConsole(const string& directory,
+ int iteration,
+ const SparseMatrix* A,
+ const double* D,
+ const double* b,
+ const double* x,
+ int num_eliminate_blocks) {
CHECK_NOTNULL(A);
Matrix AA;
A->ToDenseMatrix(&AA);
@@ -600,14 +600,14 @@ bool DumpLinearLeastSquaresProblemToConsole(const string& directory,
return true;
};
-#ifndef CERES_DONT_HAVE_PROTOCOL_BUFFERS
-bool DumpLinearLeastSquaresProblemToProtocolBuffer(const string& directory,
- int iteration,
- const SparseMatrix* A,
- const double* D,
- const double* b,
- const double* x,
- int num_eliminate_blocks) {
+#ifndef CERES_NO_PROTOCOL_BUFFERS
+static bool DumpLinearLeastSquaresProblemToProtocolBuffer(const string& directory,
+ int iteration,
+ const SparseMatrix* A,
+ const double* D,
+ const double* b,
+ const double* x,
+ int num_eliminate_blocks) {
CHECK_NOTNULL(A);
LinearLeastSquaresProblemProto lsqp;
A->ToProto(lsqp.mutable_a());
@@ -641,13 +641,13 @@ bool DumpLinearLeastSquaresProblemToProtocolBuffer(const string& directory,
return true;
}
#else
-bool DumpLinearLeastSquaresProblemToProtocolBuffer(const string& directory,
- int iteration,
- const SparseMatrix* A,
- const double* D,
- const double* b,
- const double* x,
- int num_eliminate_blocks) {
+static bool DumpLinearLeastSquaresProblemToProtocolBuffer(const string& directory,
+ int iteration,
+ const SparseMatrix* A,
+ const double* D,
+ const double* b,
+ const double* x,
+ int num_eliminate_blocks) {
LOG(ERROR) << "Dumping least squares problems is only "
<< "supported when Ceres is compiled with "
<< "protocol buffer support.";
@@ -655,9 +655,9 @@ bool DumpLinearLeastSquaresProblemToProtocolBuffer(const string& directory,
}
#endif
-void WriteArrayToFileOrDie(const string& filename,
- const double* x,
- const int size) {
+static void WriteArrayToFileOrDie(const string& filename,
+ const double* x,
+ const int size) {
CHECK_NOTNULL(x);
VLOG(2) << "Writing array to: " << filename;
FILE* fptr = fopen(filename.c_str(), "w");
@@ -668,43 +668,68 @@ void WriteArrayToFileOrDie(const string& filename,
fclose(fptr);
}
-bool DumpLinearLeastSquaresProblemToTextFile(const string& directory,
- int iteration,
- const SparseMatrix* A,
- const double* D,
- const double* b,
- const double* x,
- int num_eliminate_blocks) {
+static bool DumpLinearLeastSquaresProblemToTextFile(const string& directory,
+ int iteration,
+ const SparseMatrix* A,
+ const double* D,
+ const double* b,
+ const double* x,
+ int num_eliminate_blocks) {
CHECK_NOTNULL(A);
string format_string = JoinPath(directory,
"lm_iteration_%03d");
string filename_prefix =
StringPrintf(format_string.c_str(), iteration);
+ LOG(INFO) << "writing to: " << filename_prefix << "*";
+
+ string matlab_script;
+ StringAppendF(&matlab_script,
+ "function lsqp = lm_iteration_%03d()\n", iteration);
+ StringAppendF(&matlab_script,
+ "lsqp.num_rows = %d;\n", A->num_rows());
+ StringAppendF(&matlab_script,
+ "lsqp.num_cols = %d;\n", A->num_cols());
+
{
string filename = filename_prefix + "_A.txt";
- LOG(INFO) << "writing to: " << filename;
FILE* fptr = fopen(filename.c_str(), "w");
CHECK_NOTNULL(fptr);
A->ToTextFile(fptr);
fclose(fptr);
+ StringAppendF(&matlab_script,
+ "tmp = load('%s', '-ascii');\n", filename.c_str());
+ StringAppendF(
+ &matlab_script,
+ "lsqp.A = sparse(tmp(:, 1) + 1, tmp(:, 2) + 1, tmp(:, 3), %d, %d);\n",
+ A->num_rows(),
+ A->num_cols());
}
+
if (D != NULL) {
string filename = filename_prefix + "_D.txt";
WriteArrayToFileOrDie(filename, D, A->num_cols());
+ StringAppendF(&matlab_script,
+ "lsqp.D = load('%s', '-ascii');\n", filename.c_str());
}
if (b != NULL) {
string filename = filename_prefix + "_b.txt";
WriteArrayToFileOrDie(filename, b, A->num_rows());
+ StringAppendF(&matlab_script,
+ "lsqp.b = load('%s', '-ascii');\n", filename.c_str());
}
if (x != NULL) {
string filename = filename_prefix + "_x.txt";
WriteArrayToFileOrDie(filename, x, A->num_cols());
+ StringAppendF(&matlab_script,
+ "lsqp.x = load('%s', '-ascii');\n", filename.c_str());
}
+ string matlab_filename = filename_prefix + ".m";
+ WriteStringToFileOrDie(matlab_script, matlab_filename);
return true;
}
diff --git a/extern/libmv/third_party/ceres/internal/ceres/linear_solver.cc b/extern/libmv/third_party/ceres/internal/ceres/linear_solver.cc
index b2e3941eea1..08c3ba110d0 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/linear_solver.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/linear_solver.cc
@@ -30,13 +30,14 @@
#include "ceres/linear_solver.h"
-#include <glog/logging.h>
#include "ceres/cgnr_solver.h"
+#include "ceres/dense_normal_cholesky_solver.h"
#include "ceres/dense_qr_solver.h"
#include "ceres/iterative_schur_complement_solver.h"
#include "ceres/schur_complement_solver.h"
#include "ceres/sparse_normal_cholesky_solver.h"
#include "ceres/types.h"
+#include "glog/logging.h"
namespace ceres {
namespace internal {
@@ -50,22 +51,24 @@ LinearSolver* LinearSolver::Create(const LinearSolver::Options& options) {
return new CgnrSolver(options);
case SPARSE_NORMAL_CHOLESKY:
-#ifndef CERES_NO_SUITESPARSE
- return new SparseNormalCholeskySolver(options);
-#else
+#if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE)
LOG(WARNING) << "SPARSE_NORMAL_CHOLESKY is not available. Please "
- << "build Ceres with SuiteSparse. Returning NULL.";
+ << "build Ceres with SuiteSparse or CXSparse. "
+ << "Returning NULL.";
return NULL;
-#endif // CERES_NO_SUITESPARSE
+#else
+ return new SparseNormalCholeskySolver(options);
+#endif
case SPARSE_SCHUR:
-#ifndef CERES_NO_SUITESPARSE
- return new SparseSchurComplementSolver(options);
-#else
+#if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE)
LOG(WARNING) << "SPARSE_SCHUR is not available. Please "
- << "build Ceres with SuiteSparse. Returning NULL.";
+ << "build Ceres with SuiteSparse or CXSparse. "
+ << "Returning NULL.";
return NULL;
-#endif // CERES_NO_SUITESPARSE
+#else
+ return new SparseSchurComplementSolver(options);
+#endif
case DENSE_SCHUR:
return new DenseSchurComplementSolver(options);
@@ -76,10 +79,13 @@ LinearSolver* LinearSolver::Create(const LinearSolver::Options& options) {
case DENSE_QR:
return new DenseQRSolver(options);
+ case DENSE_NORMAL_CHOLESKY:
+ return new DenseNormalCholeskySolver(options);
+
default:
LOG(FATAL) << "Unknown linear solver type :"
<< options.type;
- return NULL; // MSVC doesn't understand that LOG(FATAL) never returns.
+ return NULL; // MSVC doesn't understand that LOG(FATAL) never returns.
}
}
diff --git a/extern/libmv/third_party/ceres/internal/ceres/linear_solver.h b/extern/libmv/third_party/ceres/internal/ceres/linear_solver.h
index 5860ecc8a77..31f88740b9f 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/linear_solver.h
+++ b/extern/libmv/third_party/ceres/internal/ceres/linear_solver.h
@@ -55,10 +55,11 @@ class LinearOperator;
// Ax = b
//
// It is expected that a single instance of a LinearSolver object
-// maybe used multiple times for solving different linear
-// systems. This allows them to cache and reuse information across
-// solves if for example the sparsity of the linear system remains
-// constant.
+// maybe used multiple times for solving multiple linear systems with
+// the same sparsity structure. This allows them to cache and reuse
+// information across solves. This means that calling Solve on the
+// same LinearSolver instance with two different linear systems will
+// result in undefined behaviour.
//
// Subclasses of LinearSolver use two structs to configure themselves.
// The Options struct configures the LinearSolver object for its
@@ -70,10 +71,11 @@ class LinearSolver {
Options()
: type(SPARSE_NORMAL_CHOLESKY),
preconditioner_type(JACOBI),
+ sparse_linear_algebra_library(SUITE_SPARSE),
+ use_block_amd(true),
min_num_iterations(1),
max_num_iterations(1),
num_threads(1),
- constant_sparsity(false),
num_eliminate_blocks(0),
residual_reset_period(10),
row_block_size(Dynamic),
@@ -85,6 +87,11 @@ class LinearSolver {
PreconditionerType preconditioner_type;
+ SparseLinearAlgebraLibraryType sparse_linear_algebra_library;
+
+ // See solver.h for explanation of this option.
+ bool use_block_amd;
+
// Number of internal iterations that the solver uses. This
// parameter only makes sense for iterative solvers like CG.
int min_num_iterations;
@@ -93,10 +100,6 @@ class LinearSolver {
// If possible, how many threads can the solver use.
int num_threads;
- // If possible cache and reuse the symbolic factorization across
- // multiple calls.
- bool constant_sparsity;
-
// Eliminate 0 to num_eliminate_blocks - 1 from the Normal
// equations to form a schur complement. Only used by the Schur
// complement based solver. The most common use for this parameter
@@ -121,8 +124,8 @@ class LinearSolver {
// It is expected that these parameters are set programmatically
// rather than manually.
//
- // Please see explicit_schur_complement_solver_impl.h for more
- // details.
+ // Please see schur_complement_solver.h and schur_eliminator.h for
+ // more details.
int row_block_size;
int e_block_size;
int f_block_size;
@@ -244,6 +247,7 @@ class LinearSolver {
const PerSolveOptions& per_solve_options,
double* x) = 0;
+ // Factory
static LinearSolver* Create(const Options& options);
};
diff --git a/extern/libmv/third_party/ceres/internal/ceres/local_parameterization.cc b/extern/libmv/third_party/ceres/internal/ceres/local_parameterization.cc
index eeae74e3f95..26e7f4908a4 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/local_parameterization.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/local_parameterization.cc
@@ -28,10 +28,11 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
-#include <glog/logging.h>
-#include "ceres/internal/eigen.h"
#include "ceres/local_parameterization.h"
+
+#include "ceres/internal/eigen.h"
#include "ceres/rotation.h"
+#include "glog/logging.h"
namespace ceres {
diff --git a/extern/libmv/third_party/ceres/internal/ceres/loss_function.cc b/extern/libmv/third_party/ceres/internal/ceres/loss_function.cc
index 00b2b184729..b948f289f21 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/loss_function.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/loss_function.cc
@@ -77,6 +77,70 @@ void CauchyLoss::Evaluate(double s, double rho[3]) const {
rho[2] = - c_ * (inv * inv);
}
+void ArctanLoss::Evaluate(double s, double rho[3]) const {
+ const double sum = 1 + s * s * b_;
+ const double inv = 1 / sum;
+ // 'sum' and 'inv' are always positive.
+ rho[0] = a_ * atan2(s, a_);
+ rho[1] = inv;
+ rho[2] = -2 * s * b_ * (inv * inv);
+}
+
+TolerantLoss::TolerantLoss(double a, double b)
+ : a_(a),
+ b_(b),
+ c_(b * log(1.0 + exp(-a / b))) {
+ CHECK_GE(a, 0.0);
+ CHECK_GT(b, 0.0);
+}
+
+void TolerantLoss::Evaluate(double s, double rho[3]) const {
+ const double x = (s - a_) / b_;
+ // The basic equation is rho[0] = b ln(1 + e^x). However, if e^x is too
+ // large, it will overflow. Since numerically 1 + e^x == e^x when the
+ // x is greater than about ln(2^53) for doubles, beyond this threshold
+ // we substitute x for ln(1 + e^x) as a numerically equivalent approximation.
+ static const double kLog2Pow53 = 36.7; // ln(MathLimits<double>::kEpsilon).
+ if (x > kLog2Pow53) {
+ rho[0] = s - a_ - c_;
+ rho[1] = 1.0;
+ rho[2] = 0.0;
+ } else {
+ const double e_x = exp(x);
+ rho[0] = b_ * log(1.0 + e_x) - c_;
+ rho[1] = e_x / (1.0 + e_x);
+ rho[2] = 0.5 / (b_ * (1.0 + cosh(x)));
+ }
+}
+
+ComposedLoss::ComposedLoss(const LossFunction* f, Ownership ownership_f,
+ const LossFunction* g, Ownership ownership_g)
+ : f_(CHECK_NOTNULL(f)),
+ g_(CHECK_NOTNULL(g)),
+ ownership_f_(ownership_f),
+ ownership_g_(ownership_g) {
+}
+
+ComposedLoss::~ComposedLoss() {
+ if (ownership_f_ == DO_NOT_TAKE_OWNERSHIP) {
+ f_.release();
+ }
+ if (ownership_g_ == DO_NOT_TAKE_OWNERSHIP) {
+ g_.release();
+ }
+}
+
+void ComposedLoss::Evaluate(double s, double rho[3]) const {
+ double rho_f[3], rho_g[3];
+ g_->Evaluate(s, rho_g);
+ f_->Evaluate(rho_g[0], rho_f);
+ rho[0] = rho_f[0];
+ // f'(g(s)) * g'(s).
+ rho[1] = rho_f[1] * rho_g[1];
+ // f''(g(s)) * g'(s) * g'(s) + f'(g(s)) * g''(s).
+ rho[2] = rho_f[2] * rho_g[1] * rho_g[1] + rho_f[1] * rho_g[2];
+}
+
void ScaledLoss::Evaluate(double s, double rho[3]) const {
if (rho_.get() == NULL) {
rho[0] = a_ * s;
diff --git a/extern/libmv/third_party/ceres/internal/ceres/matrix_proto.h b/extern/libmv/third_party/ceres/internal/ceres/matrix_proto.h
index b8a3a1a6de6..94b3076e3d7 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/matrix_proto.h
+++ b/extern/libmv/third_party/ceres/internal/ceres/matrix_proto.h
@@ -33,7 +33,7 @@
#ifndef CERES_INTERNAL_MATRIX_PROTO_H_
#define CERES_INTERNAL_MATRIX_PROTO_H_
-#ifndef CERES_DONT_HAVE_PROTOCOL_BUFFERS
+#ifndef CERES_NO_PROTOCOL_BUFFERS
#include "ceres/matrix.pb.h"
#endif
diff --git a/extern/libmv/third_party/ceres/internal/ceres/minimizer.h b/extern/libmv/third_party/ceres/internal/ceres/minimizer.h
index 77cb00cb6b4..cfc98a3ebd0 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/minimizer.h
+++ b/extern/libmv/third_party/ceres/internal/ceres/minimizer.h
@@ -40,6 +40,8 @@ namespace internal {
class Evaluator;
class LinearSolver;
+class SparseMatrix;
+class TrustRegionStrategy;
// Interface for non-linear least squares solvers.
class Minimizer {
@@ -48,53 +50,93 @@ class Minimizer {
// see solver.h for detailed information about the meaning and
// default values of each of these parameters.
struct Options {
+ Options() {
+ Init(Solver::Options());
+ }
+
explicit Options(const Solver::Options& options) {
+ Init(options);
+ }
+
+ void Init(const Solver::Options& options) {
max_num_iterations = options.max_num_iterations;
- max_solver_time_sec = options.max_solver_time_sec;
+ max_solver_time_in_seconds = options.max_solver_time_in_seconds;
+ max_step_solver_retries = 5;
gradient_tolerance = options.gradient_tolerance;
parameter_tolerance = options.parameter_tolerance;
function_tolerance = options.function_tolerance;
min_relative_decrease = options.min_relative_decrease;
eta = options.eta;
- tau = options.tau;
jacobi_scaling = options.jacobi_scaling;
- crash_and_dump_lsqp_on_failure = options.crash_and_dump_lsqp_on_failure;
+ use_nonmonotonic_steps = options.use_nonmonotonic_steps;
+ max_consecutive_nonmonotonic_steps =
+ options.max_consecutive_nonmonotonic_steps;
lsqp_dump_directory = options.lsqp_dump_directory;
lsqp_iterations_to_dump = options.lsqp_iterations_to_dump;
lsqp_dump_format_type = options.lsqp_dump_format_type;
num_eliminate_blocks = options.num_eliminate_blocks;
- logging_type = options.logging_type;
+ max_num_consecutive_invalid_steps =
+ options.max_num_consecutive_invalid_steps;
+ min_trust_region_radius = options.min_trust_region_radius;
+ evaluator = NULL;
+ trust_region_strategy = NULL;
+ jacobian = NULL;
+ callbacks = options.callbacks;
}
int max_num_iterations;
- int max_solver_time_sec;
+ double max_solver_time_in_seconds;
+
+ // Number of times the linear solver should be retried in case of
+ // numerical failure. The retries are done by exponentially scaling up
+ // mu at each retry. This leads to stronger and stronger
+ // regularization making the linear least squares problem better
+ // conditioned at each retry.
+ int max_step_solver_retries;
double gradient_tolerance;
double parameter_tolerance;
double function_tolerance;
double min_relative_decrease;
double eta;
- double tau;
bool jacobi_scaling;
- bool crash_and_dump_lsqp_on_failure;
+ bool use_nonmonotonic_steps;
+ int max_consecutive_nonmonotonic_steps;
vector<int> lsqp_iterations_to_dump;
DumpFormatType lsqp_dump_format_type;
string lsqp_dump_directory;
int num_eliminate_blocks;
- LoggingType logging_type;
+ int max_num_consecutive_invalid_steps;
+ int min_trust_region_radius;
// List of callbacks that are executed by the Minimizer at the end
// of each iteration.
//
- // Client owns these pointers.
+ // The Options struct does not own these pointers.
vector<IterationCallback*> callbacks;
+
+ // Object responsible for evaluating the cost, residuals and
+ // Jacobian matrix. The Options struct does not own this pointer.
+ Evaluator* evaluator;
+
+ // Object responsible for actually computing the trust region
+ // step, and sizing the trust region radius. The Options struct
+ // does not own this pointer.
+ TrustRegionStrategy* trust_region_strategy;
+
+ // Object holding the Jacobian matrix. It is assumed that the
+ // sparsity structure of the matrix has already been initialized
+ // and will remain constant for the life time of the
+ // optimization. The Options struct does not own this pointer.
+ SparseMatrix* jacobian;
};
virtual ~Minimizer() {}
+
+ // Note: The minimizer is expected to update the state of the
+ // parameters array every iteration. This is required for the
+ // StateUpdatingCallback to work.
virtual void Minimize(const Options& options,
- Evaluator* evaluator,
- LinearSolver* linear_solver,
- const double* initial_parameters,
- double* final_parameters,
+ double* parameters,
Solver::Summary* summary) = 0;
};
diff --git a/extern/libmv/third_party/ceres/internal/ceres/mutex.h b/extern/libmv/third_party/ceres/internal/ceres/mutex.h
index 6514b107041..5090a71b78d 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/mutex.h
+++ b/extern/libmv/third_party/ceres/internal/ceres/mutex.h
@@ -95,11 +95,11 @@
#ifndef CERES_INTERNAL_MUTEX_H_
#define CERES_INTERNAL_MUTEX_H_
-#if defined(NO_THREADS)
+#if defined(CERES_NO_THREADS)
typedef int MutexType; // to keep a lock-count
#elif defined(_WIN32) || defined(__CYGWIN32__) || defined(__CYGWIN64__)
-# define WIN32_LEAN_AND_MEAN // We only need minimal includes
-# ifdef GMUTEX_TRYLOCK
+# define CERES_WIN32_LEAN_AND_MEAN // We only need minimal includes
+# ifdef CERES_GMUTEX_TRYLOCK
// We need Windows NT or later for TryEnterCriticalSection(). If you
// don't need that functionality, you can remove these _WIN32_WINNT
// lines, and change TryLock() to assert(0) or something.
@@ -108,9 +108,9 @@
# endif
# endif
// To avoid macro definition of ERROR.
-# define NOGDI
+# define CERES_NOGDI
// To avoid macro definition of min/max.
-# define NOMINMAX
+# define CERES_NOMINMAX
# include <windows.h>
typedef CRITICAL_SECTION MutexType;
#elif defined(CERES_HAVE_PTHREAD) && defined(CERES_HAVE_RWLOCK)
@@ -151,7 +151,7 @@ class Mutex {
inline void Lock(); // Block if needed until free then acquire exclusively
inline void Unlock(); // Release a lock acquired via Lock()
-#ifdef GMUTEX_TRYLOCK
+#ifdef CERES_GMUTEX_TRYLOCK
inline bool TryLock(); // If free, Lock() and return true, else return false
#endif
// Note that on systems that don't support read-write locks, these may
@@ -183,7 +183,7 @@ class Mutex {
};
// Now the implementation of Mutex for various systems
-#if defined(NO_THREADS)
+#if defined(CERES_NO_THREADS)
// When we don't have threads, we can be either reading or writing,
// but not both. We can have lots of readers at once (in no-threads
@@ -199,7 +199,7 @@ Mutex::Mutex() : mutex_(0) { }
Mutex::~Mutex() { assert(mutex_ == 0); }
void Mutex::Lock() { assert(--mutex_ == -1); }
void Mutex::Unlock() { assert(mutex_++ == -1); }
-#ifdef GMUTEX_TRYLOCK
+#ifdef CERES_GMUTEX_TRYLOCK
bool Mutex::TryLock() { if (mutex_) return false; Lock(); return true; }
#endif
void Mutex::ReaderLock() { assert(++mutex_ > 0); }
@@ -220,91 +220,101 @@ void Mutex::ReaderUnlock() { Unlock(); }
#elif defined(CERES_HAVE_PTHREAD) && defined(CERES_HAVE_RWLOCK)
-#define SAFE_PTHREAD(fncall) do { /* run fncall if is_safe_ is true */ \
- if (is_safe_ && fncall(&mutex_) != 0) abort(); \
+#define CERES_SAFE_PTHREAD(fncall) do { /* run fncall if is_safe_ is true */ \
+ if (is_safe_ && fncall(&mutex_) != 0) abort(); \
} while (0)
Mutex::Mutex() {
SetIsSafe();
if (is_safe_ && pthread_rwlock_init(&mutex_, NULL) != 0) abort();
}
-Mutex::~Mutex() { SAFE_PTHREAD(pthread_rwlock_destroy); }
-void Mutex::Lock() { SAFE_PTHREAD(pthread_rwlock_wrlock); }
-void Mutex::Unlock() { SAFE_PTHREAD(pthread_rwlock_unlock); }
-#ifdef GMUTEX_TRYLOCK
+Mutex::~Mutex() { CERES_SAFE_PTHREAD(pthread_rwlock_destroy); }
+void Mutex::Lock() { CERES_SAFE_PTHREAD(pthread_rwlock_wrlock); }
+void Mutex::Unlock() { CERES_SAFE_PTHREAD(pthread_rwlock_unlock); }
+#ifdef CERES_GMUTEX_TRYLOCK
bool Mutex::TryLock() { return is_safe_ ?
pthread_rwlock_trywrlock(&mutex_) == 0 :
true; }
#endif
-void Mutex::ReaderLock() { SAFE_PTHREAD(pthread_rwlock_rdlock); }
-void Mutex::ReaderUnlock() { SAFE_PTHREAD(pthread_rwlock_unlock); }
-#undef SAFE_PTHREAD
+void Mutex::ReaderLock() { CERES_SAFE_PTHREAD(pthread_rwlock_rdlock); }
+void Mutex::ReaderUnlock() { CERES_SAFE_PTHREAD(pthread_rwlock_unlock); }
+#undef CERES_SAFE_PTHREAD
#elif defined(CERES_HAVE_PTHREAD)
-#define SAFE_PTHREAD(fncall) do { /* run fncall if is_safe_ is true */ \
- if (is_safe_ && fncall(&mutex_) != 0) abort(); \
+#define CERES_SAFE_PTHREAD(fncall) do { /* run fncall if is_safe_ is true */ \
+ if (is_safe_ && fncall(&mutex_) != 0) abort(); \
} while (0)
Mutex::Mutex() {
SetIsSafe();
if (is_safe_ && pthread_mutex_init(&mutex_, NULL) != 0) abort();
}
-Mutex::~Mutex() { SAFE_PTHREAD(pthread_mutex_destroy); }
-void Mutex::Lock() { SAFE_PTHREAD(pthread_mutex_lock); }
-void Mutex::Unlock() { SAFE_PTHREAD(pthread_mutex_unlock); }
-#ifdef GMUTEX_TRYLOCK
+Mutex::~Mutex() { CERES_SAFE_PTHREAD(pthread_mutex_destroy); }
+void Mutex::Lock() { CERES_SAFE_PTHREAD(pthread_mutex_lock); }
+void Mutex::Unlock() { CERES_SAFE_PTHREAD(pthread_mutex_unlock); }
+#ifdef CERES_GMUTEX_TRYLOCK
bool Mutex::TryLock() { return is_safe_ ?
pthread_mutex_trylock(&mutex_) == 0 : true; }
#endif
void Mutex::ReaderLock() { Lock(); }
void Mutex::ReaderUnlock() { Unlock(); }
-#undef SAFE_PTHREAD
+#undef CERES_SAFE_PTHREAD
#endif
// --------------------------------------------------------------------------
// Some helper classes
-// MutexLock(mu) acquires mu when constructed and releases it when destroyed.
-class MutexLock {
+// Note: The weird "Ceres" prefix for the class is a workaround for having two
+// similar mutex.h files included in the same translation unit. This is a
+// problem because macros do not respect C++ namespaces, and as a result, this
+// does not work well (e.g. inside Chrome). The offending macros are
+// "MutexLock(x) COMPILE_ASSERT(false)". To work around this, "Ceres" is
+// prefixed to the class names; this permits defining the classes.
+
+// CeresMutexLock(mu) acquires mu when constructed and releases it when destroyed.
+class CeresMutexLock {
public:
- explicit MutexLock(Mutex *mu) : mu_(mu) { mu_->Lock(); }
- ~MutexLock() { mu_->Unlock(); }
+ explicit CeresMutexLock(Mutex *mu) : mu_(mu) { mu_->Lock(); }
+ ~CeresMutexLock() { mu_->Unlock(); }
private:
Mutex * const mu_;
// Disallow "evil" constructors
- MutexLock(const MutexLock&);
- void operator=(const MutexLock&);
+ CeresMutexLock(const CeresMutexLock&);
+ void operator=(const CeresMutexLock&);
};
-// ReaderMutexLock and WriterMutexLock do the same, for rwlocks
-class ReaderMutexLock {
+// CeresReaderMutexLock and CeresWriterMutexLock do the same, for rwlocks
+class CeresReaderMutexLock {
public:
- explicit ReaderMutexLock(Mutex *mu) : mu_(mu) { mu_->ReaderLock(); }
- ~ReaderMutexLock() { mu_->ReaderUnlock(); }
+ explicit CeresReaderMutexLock(Mutex *mu) : mu_(mu) { mu_->ReaderLock(); }
+ ~CeresReaderMutexLock() { mu_->ReaderUnlock(); }
private:
Mutex * const mu_;
// Disallow "evil" constructors
- ReaderMutexLock(const ReaderMutexLock&);
- void operator=(const ReaderMutexLock&);
+ CeresReaderMutexLock(const CeresReaderMutexLock&);
+ void operator=(const CeresReaderMutexLock&);
};
-class WriterMutexLock {
+class CeresWriterMutexLock {
public:
- explicit WriterMutexLock(Mutex *mu) : mu_(mu) { mu_->WriterLock(); }
- ~WriterMutexLock() { mu_->WriterUnlock(); }
+ explicit CeresWriterMutexLock(Mutex *mu) : mu_(mu) { mu_->WriterLock(); }
+ ~CeresWriterMutexLock() { mu_->WriterUnlock(); }
private:
Mutex * const mu_;
// Disallow "evil" constructors
- WriterMutexLock(const WriterMutexLock&);
- void operator=(const WriterMutexLock&);
+ CeresWriterMutexLock(const CeresWriterMutexLock&);
+ void operator=(const CeresWriterMutexLock&);
};
// Catch bug where variable name is omitted, e.g. MutexLock (&mu);
-#define MutexLock(x) COMPILE_ASSERT(0, mutex_lock_decl_missing_var_name)
-#define ReaderMutexLock(x) COMPILE_ASSERT(0, rmutex_lock_decl_missing_var_name)
-#define WriterMutexLock(x) COMPILE_ASSERT(0, wmutex_lock_decl_missing_var_name)
+#define CeresMutexLock(x) \
+ COMPILE_ASSERT(0, ceres_mutex_lock_decl_missing_var_name)
+#define CeresReaderMutexLock(x) \
+ COMPILE_ASSERT(0, ceres_rmutex_lock_decl_missing_var_name)
+#define CeresWriterMutexLock(x) \
+ COMPILE_ASSERT(0, ceres_wmutex_lock_decl_missing_var_name)
} // namespace internal
} // namespace ceres
diff --git a/extern/libmv/third_party/ceres/internal/ceres/normal_prior.cc b/extern/libmv/third_party/ceres/internal/ceres/normal_prior.cc
index f30bbc8b46b..392d728fb32 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/normal_prior.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/normal_prior.cc
@@ -32,11 +32,10 @@
#include <cstddef>
#include <vector>
-
-#include <glog/logging.h>
#include "ceres/internal/eigen.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/types.h"
+#include "glog/logging.h"
namespace ceres {
diff --git a/extern/libmv/third_party/ceres/internal/ceres/parameter_block.h b/extern/libmv/third_party/ceres/internal/ceres/parameter_block.h
index 4bac1a85828..f20805ca873 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/parameter_block.h
+++ b/extern/libmv/third_party/ceres/internal/ceres/parameter_block.h
@@ -32,13 +32,15 @@
#define CERES_INTERNAL_PARAMETER_BLOCK_H_
#include <cstdlib>
+#include <string>
+#include "ceres/array_utils.h"
#include "ceres/integral_types.h"
-#include <glog/logging.h>
#include "ceres/internal/eigen.h"
#include "ceres/internal/port.h"
#include "ceres/internal/scoped_ptr.h"
#include "ceres/local_parameterization.h"
-#include "ceres/residual_block_utils.h"
+#include "ceres/stringprintf.h"
+#include "glog/logging.h"
namespace ceres {
namespace internal {
@@ -172,6 +174,19 @@ class ParameterBlock {
return local_parameterization_->Plus(x, delta, x_plus_delta);
}
+ string ToString() const {
+ return StringPrintf("{ user_state=%p, state=%p, size=%d, "
+ "constant=%d, index=%d, state_offset=%d, "
+ "delta_offset=%d }",
+ user_state_,
+ state_,
+ size_,
+ is_constant_,
+ index_,
+ state_offset_,
+ delta_offset_);
+ }
+
private:
void Init(double* user_state,
int size,
diff --git a/extern/libmv/third_party/ceres/internal/ceres/partitioned_matrix_view.cc b/extern/libmv/third_party/ceres/internal/ceres/partitioned_matrix_view.cc
index fcf8fd53aed..0722fc82c02 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/partitioned_matrix_view.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/partitioned_matrix_view.cc
@@ -35,10 +35,10 @@
#include <algorithm>
#include <cstring>
#include <vector>
-#include <glog/logging.h>
#include "ceres/block_sparse_matrix.h"
#include "ceres/block_structure.h"
#include "ceres/internal/eigen.h"
+#include "glog/logging.h"
namespace ceres {
namespace internal {
diff --git a/extern/libmv/third_party/ceres/internal/ceres/polynomial_solver.cc b/extern/libmv/third_party/ceres/internal/ceres/polynomial_solver.cc
new file mode 100644
index 00000000000..20c01566a89
--- /dev/null
+++ b/extern/libmv/third_party/ceres/internal/ceres/polynomial_solver.cc
@@ -0,0 +1,184 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: moll.markus@arcor.de (Markus Moll)
+
+#include "ceres/polynomial_solver.h"
+
+#include <cmath>
+#include <cstddef>
+#include "Eigen/Dense"
+#include "ceres/internal/port.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+namespace {
+
+// Balancing function as described by B. N. Parlett and C. Reinsch,
+// "Balancing a Matrix for Calculation of Eigenvalues and Eigenvectors".
+// In: Numerische Mathematik, Volume 13, Number 4 (1969), 293-304,
+// Springer Berlin / Heidelberg. DOI: 10.1007/BF02165404
+void BalanceCompanionMatrix(Matrix* companion_matrix_ptr) {
+ CHECK_NOTNULL(companion_matrix_ptr);
+ Matrix& companion_matrix = *companion_matrix_ptr;
+ Matrix companion_matrix_offdiagonal = companion_matrix;
+ companion_matrix_offdiagonal.diagonal().setZero();
+
+ const int degree = companion_matrix.rows();
+
+ // gamma <= 1 controls how much a change in the scaling has to
+ // lower the 1-norm of the companion matrix to be accepted.
+ //
+ // gamma = 1 seems to lead to cycles (numerical issues?), so
+ // we set it slightly lower.
+ const double gamma = 0.9;
+
+ // Greedily scale row/column pairs until there is no change.
+ bool scaling_has_changed;
+ do {
+ scaling_has_changed = false;
+
+ for (int i = 0; i < degree; ++i) {
+ const double row_norm = companion_matrix_offdiagonal.row(i).lpNorm<1>();
+ const double col_norm = companion_matrix_offdiagonal.col(i).lpNorm<1>();
+
+ // Decompose row_norm/col_norm into mantissa * 2^exponent,
+ // where 0.5 <= mantissa < 1. Discard mantissa (return value
+ // of frexp), as only the exponent is needed.
+ int exponent = 0;
+ std::frexp(row_norm / col_norm, &exponent);
+ exponent /= 2;
+
+ if (exponent != 0) {
+ const double scaled_col_norm = std::ldexp(col_norm, exponent);
+ const double scaled_row_norm = std::ldexp(row_norm, -exponent);
+ if (scaled_col_norm + scaled_row_norm < gamma * (col_norm + row_norm)) {
+ // Accept the new scaling. (Multiplication by powers of 2 should not
+ // introduce rounding errors (ignoring non-normalized numbers and
+ // over- or underflow))
+ scaling_has_changed = true;
+ companion_matrix_offdiagonal.row(i) *= std::ldexp(1.0, -exponent);
+ companion_matrix_offdiagonal.col(i) *= std::ldexp(1.0, exponent);
+ }
+ }
+ }
+ } while (scaling_has_changed);
+
+ companion_matrix_offdiagonal.diagonal() = companion_matrix.diagonal();
+ companion_matrix = companion_matrix_offdiagonal;
+ VLOG(3) << "Balanced companion matrix is\n" << companion_matrix;
+}
+
+void BuildCompanionMatrix(const Vector& polynomial,
+ Matrix* companion_matrix_ptr) {
+ CHECK_NOTNULL(companion_matrix_ptr);
+ Matrix& companion_matrix = *companion_matrix_ptr;
+
+ const int degree = polynomial.size() - 1;
+
+ companion_matrix.resize(degree, degree);
+ companion_matrix.setZero();
+ companion_matrix.diagonal(-1).setOnes();
+ companion_matrix.col(degree - 1) = -polynomial.reverse().head(degree);
+}
+
+// Remove leading terms with zero coefficients.
+Vector RemoveLeadingZeros(const Vector& polynomial_in) {
+ int i = 0;
+ while (i < (polynomial_in.size() - 1) && polynomial_in(i) == 0.0) {
+ ++i;
+ }
+ return polynomial_in.tail(polynomial_in.size() - i);
+}
+} // namespace
+
+bool FindPolynomialRoots(const Vector& polynomial_in,
+ Vector* real,
+ Vector* imaginary) {
+ if (polynomial_in.size() == 0) {
+ LOG(ERROR) << "Invalid polynomial of size 0 passed to FindPolynomialRoots";
+ return false;
+ }
+
+ Vector polynomial = RemoveLeadingZeros(polynomial_in);
+ const int degree = polynomial.size() - 1;
+
+ // Is the polynomial constant?
+ if (degree == 0) {
+ LOG(WARNING) << "Trying to extract roots from a constant "
+ << "polynomial in FindPolynomialRoots";
+ return true;
+ }
+
+ // Divide by leading term
+ const double leading_term = polynomial(0);
+ polynomial /= leading_term;
+
+ // Separately handle linear polynomials.
+ if (degree == 1) {
+ if (real != NULL) {
+ real->resize(1);
+ (*real)(0) = -polynomial(1);
+ }
+ if (imaginary != NULL) {
+ imaginary->resize(1);
+ imaginary->setZero();
+ }
+ }
+
+ // The degree is now known to be at least 2.
+ // Build and balance the companion matrix to the polynomial.
+ Matrix companion_matrix(degree, degree);
+ BuildCompanionMatrix(polynomial, &companion_matrix);
+ BalanceCompanionMatrix(&companion_matrix);
+
+ // Find its (complex) eigenvalues.
+ Eigen::EigenSolver<Matrix> solver(companion_matrix,
+ Eigen::EigenvaluesOnly);
+ if (solver.info() != Eigen::Success) {
+ LOG(ERROR) << "Failed to extract eigenvalues from companion matrix.";
+ return false;
+ }
+
+ // Output roots
+ if (real != NULL) {
+ *real = solver.eigenvalues().real();
+ } else {
+ LOG(WARNING) << "NULL pointer passed as real argument to "
+ << "FindPolynomialRoots. Real parts of the roots will not "
+ << "be returned.";
+ }
+ if (imaginary != NULL) {
+ *imaginary = solver.eigenvalues().imag();
+ }
+ return true;
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/extern/libmv/third_party/ceres/internal/ceres/polynomial_solver.h b/extern/libmv/third_party/ceres/internal/ceres/polynomial_solver.h
new file mode 100644
index 00000000000..1cf07ddb549
--- /dev/null
+++ b/extern/libmv/third_party/ceres/internal/ceres/polynomial_solver.h
@@ -0,0 +1,65 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: moll.markus@arcor.de (Markus Moll)
+
+#ifndef CERES_INTERNAL_POLYNOMIAL_SOLVER_H_
+#define CERES_INTERNAL_POLYNOMIAL_SOLVER_H_
+
+#include "ceres/internal/eigen.h"
+
+namespace ceres {
+namespace internal {
+
+// Use the companion matrix eigenvalues to determine the roots of the polynomial
+//
+// sum_{i=0}^N polynomial(i) x^{N-i}.
+//
+// This function returns true on success, false otherwise.
+// Failure indicates that the polynomial is invalid (of size 0) or
+// that the eigenvalues of the companion matrix could not be computed.
+// On failure, a more detailed message will be written to LOG(ERROR).
+// If real is not NULL, the real parts of the roots will be returned in it.
+// Likewise, if imaginary is not NULL, imaginary parts will be returned in it.
+bool FindPolynomialRoots(const Vector& polynomial,
+ Vector* real,
+ Vector* imaginary);
+
+// Evaluate the polynomial at x using the Horner scheme.
+inline double EvaluatePolynomial(const Vector& polynomial, double x) {
+ double v = 0.0;
+ for (int i = 0; i < polynomial.size(); ++i) {
+ v = v * x + polynomial(i);
+ }
+ return v;
+}
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_POLYNOMIAL_SOLVER_H_
diff --git a/extern/libmv/third_party/ceres/internal/ceres/problem_impl.cc b/extern/libmv/third_party/ceres/internal/ceres/problem_impl.cc
index 68242477d6f..c186f527be8 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/problem_impl.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/problem_impl.cc
@@ -37,16 +37,15 @@
#include <string>
#include <utility>
#include <vector>
-
-#include <glog/logging.h>
+#include "ceres/cost_function.h"
+#include "ceres/loss_function.h"
+#include "ceres/map_util.h"
#include "ceres/parameter_block.h"
#include "ceres/program.h"
#include "ceres/residual_block.h"
#include "ceres/stl_util.h"
-#include "ceres/map_util.h"
#include "ceres/stringprintf.h"
-#include "ceres/cost_function.h"
-#include "ceres/loss_function.h"
+#include "glog/logging.h"
namespace ceres {
namespace internal {
diff --git a/extern/libmv/third_party/ceres/internal/ceres/problem_impl.h b/extern/libmv/third_party/ceres/internal/ceres/problem_impl.h
index 523860e652a..2ca055448c3 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/problem_impl.h
+++ b/extern/libmv/third_party/ceres/internal/ceres/problem_impl.h
@@ -118,7 +118,7 @@ class ProblemImpl {
map<double*, ParameterBlock*> parameter_block_map_;
internal::scoped_ptr<internal::Program> program_;
- DISALLOW_COPY_AND_ASSIGN(ProblemImpl);
+ CERES_DISALLOW_COPY_AND_ASSIGN(ProblemImpl);
};
} // namespace internal
diff --git a/extern/libmv/third_party/ceres/internal/ceres/program.cc b/extern/libmv/third_party/ceres/internal/ceres/program.cc
index 444b1020253..82d76d39233 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/program.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/program.cc
@@ -32,14 +32,18 @@
#include <map>
#include <vector>
+#include "ceres/casts.h"
+#include "ceres/compressed_row_sparse_matrix.h"
+#include "ceres/cost_function.h"
+#include "ceres/evaluator.h"
+#include "ceres/internal/port.h"
+#include "ceres/local_parameterization.h"
+#include "ceres/loss_function.h"
+#include "ceres/map_util.h"
#include "ceres/parameter_block.h"
+#include "ceres/problem.h"
#include "ceres/residual_block.h"
#include "ceres/stl_util.h"
-#include "ceres/map_util.h"
-#include "ceres/problem.h"
-#include "ceres/cost_function.h"
-#include "ceres/loss_function.h"
-#include "ceres/local_parameterization.h"
namespace ceres {
namespace internal {
@@ -69,7 +73,8 @@ vector<ResidualBlock*>* Program::mutable_residual_blocks() {
bool Program::StateVectorToParameterBlocks(const double *state) {
for (int i = 0; i < parameter_blocks_.size(); ++i) {
- if (!parameter_blocks_[i]->SetState(state)) {
+ if (!parameter_blocks_[i]->IsConstant() &&
+ !parameter_blocks_[i]->SetState(state)) {
return false;
}
state += parameter_blocks_[i]->Size();
@@ -86,9 +91,18 @@ void Program::ParameterBlocksToStateVector(double *state) const {
void Program::CopyParameterBlockStateToUserState() {
for (int i = 0; i < parameter_blocks_.size(); ++i) {
- parameter_blocks_[i]->GetState(
- parameter_blocks_[i]->mutable_user_state());
+ parameter_blocks_[i]->GetState(parameter_blocks_[i]->mutable_user_state());
+ }
+}
+
+bool Program::SetParameterBlockStatePtrsToUserStatePtrs() {
+ for (int i = 0; i < parameter_blocks_.size(); ++i) {
+ if (!parameter_blocks_[i]->IsConstant() &&
+ !parameter_blocks_[i]->SetState(parameter_blocks_[i]->user_state())) {
+ return false;
+ }
}
+ return true;
}
bool Program::Plus(const double* state,
@@ -193,40 +207,25 @@ int Program::MaxParametersPerResidualBlock() const {
return max_parameters;
}
-bool Program::Evaluate(double* cost, double* residuals) {
- *cost = 0.0;
-
- // Scratch space is only needed if residuals is NULL.
- scoped_array<double> scratch;
- if (residuals == NULL) {
- scratch.reset(new double[MaxScratchDoublesNeededForEvaluate()]);
- } else {
- // TODO(keir): Is this needed? Check by removing the equivalent statement in
- // dense_evaluator.cc and running the tests.
- VectorRef(residuals, NumResiduals()).setZero();
- }
-
+int Program::MaxResidualsPerResidualBlock() const {
+ int max_residuals = 0;
for (int i = 0; i < residual_blocks_.size(); ++i) {
- ResidualBlock* residual_block = residual_blocks_[i];
-
- // Evaluate the cost function for this residual.
- double residual_cost;
- if (!residual_block->Evaluate(&residual_cost,
- residuals,
- NULL, // No jacobian.
- scratch.get())) {
- return false;
- }
-
- // Accumulate residual cost into the total cost.
- *cost += residual_cost;
+ max_residuals = max(max_residuals,
+ residual_blocks_[i]->NumResiduals());
+ }
+ return max_residuals;
+}
- // Update the residuals cursor.
- if (residuals != NULL) {
- residuals += residual_block->NumResiduals();
- }
+string Program::ToString() const {
+ string ret = "Program dump\n";
+ ret += StringPrintf("Number of parameter blocks: %d\n", NumParameterBlocks());
+ ret += StringPrintf("Number of parameters: %d\n", NumParameters());
+ ret += "Parameters:\n";
+ for (int i = 0; i < parameter_blocks_.size(); ++i) {
+ ret += StringPrintf("%d: %s\n",
+ i, parameter_blocks_[i]->ToString().c_str());
}
- return true;
+ return ret;
}
} // namespace internal
diff --git a/extern/libmv/third_party/ceres/internal/ceres/program.h b/extern/libmv/third_party/ceres/internal/ceres/program.h
index 113d352d562..5002b7e752e 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/program.h
+++ b/extern/libmv/third_party/ceres/internal/ceres/program.h
@@ -31,6 +31,7 @@
#ifndef CERES_INTERNAL_PROGRAM_H_
#define CERES_INTERNAL_PROGRAM_H_
+#include <string>
#include <vector>
#include "ceres/internal/port.h"
@@ -71,9 +72,14 @@ class Program {
bool StateVectorToParameterBlocks(const double *state);
void ParameterBlocksToStateVector(double *state) const;
- // Copy internal state out to the user's parameters.
+ // Copy internal state to the user's parameters.
void CopyParameterBlockStateToUserState();
+ // Set the parameter block pointers to the user pointers. Since this
+ // runs parameter block set state internally, which may call local
+ // parameterizations, this can fail. False is returned on failure.
+ bool SetParameterBlockStatePtrsToUserStatePtrs();
+
// Update a state vector for the program given a delta.
bool Plus(const double* state,
const double* delta,
@@ -103,16 +109,11 @@ class Program {
int MaxScratchDoublesNeededForEvaluate() const;
int MaxDerivativesPerResidualBlock() const;
int MaxParametersPerResidualBlock() const;
+ int MaxResidualsPerResidualBlock() const;
- // Evaluate the cost and maybe the residuals for the program. If residuals is
- // NULL, then residuals are not calculated. If the jacobian is needed, instead
- // use the various evaluators (e.g. dense_evaluator.h).
- //
- // This is a trivial implementation of evaluate not intended for use in the
- // core solving loop. The other evaluators, which support constructing the
- // jacobian in addition to the cost and residuals, are considerably
- // complicated by the need to construct the jacobian.
- bool Evaluate(double* cost, double* residuals);
+ // A human-readable dump of the parameter blocks for debugging.
+ // TODO(keir): If necessary, also dump the residual blocks.
+ string ToString() const;
private:
// The Program does not own the ParameterBlock or ResidualBlock objects.
diff --git a/extern/libmv/third_party/ceres/internal/ceres/program_evaluator.h b/extern/libmv/third_party/ceres/internal/ceres/program_evaluator.h
index 7ec74b1b269..6c48e7d7643 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/program_evaluator.h
+++ b/extern/libmv/third_party/ceres/internal/ceres/program_evaluator.h
@@ -120,13 +120,18 @@ class ProgramEvaluator : public Evaluator {
bool Evaluate(const double* state,
double* cost,
double* residuals,
+ double* gradient,
SparseMatrix* jacobian) {
// The parameters are stateful, so set the state before evaluating.
if (!program_->StateVectorToParameterBlocks(state)) {
return false;
}
- if (jacobian) {
+ if (residuals != NULL) {
+ VectorRef(residuals, program_->NumResiduals()).setZero();
+ }
+
+ if (jacobian != NULL) {
jacobian->SetZero();
}
@@ -158,13 +163,16 @@ class ProgramEvaluator : public Evaluator {
// Prepare block residuals if requested.
const ResidualBlock* residual_block = program_->residual_blocks()[i];
- double* block_residuals = (residuals != NULL)
- ? (residuals + residual_layout_[i])
- : NULL;
+ double* block_residuals = NULL;
+ if (residuals != NULL) {
+ block_residuals = residuals + residual_layout_[i];
+ } else if (gradient != NULL) {
+ block_residuals = scratch->residual_block_residuals.get();
+ }
// Prepare block jacobians if requested.
double** block_jacobians = NULL;
- if (jacobian != NULL) {
+ if (jacobian != NULL || gradient != NULL) {
preparer->Prepare(residual_block,
i,
jacobian,
@@ -174,10 +182,11 @@ class ProgramEvaluator : public Evaluator {
// Evaluate the cost, residuals, and jacobians.
double block_cost;
- if (!residual_block->Evaluate(&block_cost,
- block_residuals,
- block_jacobians,
- scratch->scratch.get())) {
+ if (!residual_block->Evaluate(
+ &block_cost,
+ block_residuals,
+ block_jacobians,
+ scratch->residual_block_evaluate_scratch.get())) {
abort = true;
// This ensures that the OpenMP threads have a consistent view of 'abort'. Do
// the flush inside the failure case so that there is usually only one
@@ -188,19 +197,49 @@ class ProgramEvaluator : public Evaluator {
scratch->cost += block_cost;
+ // Store the jacobians, if they were requested.
if (jacobian != NULL) {
jacobian_writer_.Write(i,
residual_layout_[i],
block_jacobians,
jacobian);
}
+
+ // Compute and store the gradient, if it was requested.
+ if (gradient != NULL) {
+ int num_residuals = residual_block->NumResiduals();
+ int num_parameter_blocks = residual_block->NumParameterBlocks();
+ for (int j = 0; j < num_parameter_blocks; ++j) {
+ const ParameterBlock* parameter_block =
+ residual_block->parameter_blocks()[j];
+ if (parameter_block->IsConstant()) {
+ continue;
+ }
+ MatrixRef block_jacobian(block_jacobians[j],
+ num_residuals,
+ parameter_block->LocalSize());
+ VectorRef block_gradient(scratch->gradient.get() +
+ parameter_block->delta_offset(),
+ parameter_block->LocalSize());
+ VectorRef block_residual(block_residuals, num_residuals);
+ block_gradient += block_residual.transpose() * block_jacobian;
+ }
+ }
}
if (!abort) {
- // Sum the cost from each thread.
+ // Sum the cost and gradient (if requested) from each thread.
(*cost) = 0.0;
+ int num_parameters = program_->NumEffectiveParameters();
+ if (gradient != NULL) {
+ VectorRef(gradient, num_parameters).setZero();
+ }
for (int i = 0; i < options_.num_threads; ++i) {
(*cost) += evaluate_scratch_[i].cost;
+ if (gradient != NULL) {
+ VectorRef(gradient, num_parameters) +=
+ VectorRef(evaluate_scratch_[i].gradient.get(), num_parameters);
+ }
}
}
return !abort;
@@ -224,16 +263,28 @@ class ProgramEvaluator : public Evaluator {
}
private:
+ // Per-thread scratch space needed to evaluate and store each residual block.
struct EvaluateScratch {
void Init(int max_parameters_per_residual_block,
- int max_scratch_doubles_needed_for_evaluate) {
+ int max_scratch_doubles_needed_for_evaluate,
+ int max_residuals_per_residual_block,
+ int num_parameters) {
+ residual_block_evaluate_scratch.reset(
+ new double[max_scratch_doubles_needed_for_evaluate]);
+ gradient.reset(new double[num_parameters]);
+ VectorRef(gradient.get(), num_parameters).setZero();
+ residual_block_residuals.reset(
+ new double[max_residuals_per_residual_block]);
jacobian_block_ptrs.reset(
new double*[max_parameters_per_residual_block]);
- scratch.reset(new double[max_scratch_doubles_needed_for_evaluate]);
}
double cost;
- scoped_array<double> scratch;
+ scoped_array<double> residual_block_evaluate_scratch;
+ // The gradient in the local parameterization.
+ scoped_array<double> gradient;
+ // Enough space to store the residual for the largest residual block.
+ scoped_array<double> residual_block_residuals;
scoped_array<double*> jacobian_block_ptrs;
};
@@ -256,11 +307,16 @@ class ProgramEvaluator : public Evaluator {
program.MaxParametersPerResidualBlock();
int max_scratch_doubles_needed_for_evaluate =
program.MaxScratchDoublesNeededForEvaluate();
+ int max_residuals_per_residual_block =
+ program.MaxResidualsPerResidualBlock();
+ int num_parameters = program.NumEffectiveParameters();
EvaluateScratch* evaluate_scratch = new EvaluateScratch[num_threads];
for (int i = 0; i < num_threads; i++) {
evaluate_scratch[i].Init(max_parameters_per_residual_block,
- max_scratch_doubles_needed_for_evaluate);
+ max_scratch_doubles_needed_for_evaluate,
+ max_residuals_per_residual_block,
+ num_parameters);
}
return evaluate_scratch;
}
diff --git a/extern/libmv/third_party/ceres/internal/ceres/random.h b/extern/libmv/third_party/ceres/internal/ceres/random.h
index 769e0b4dd27..352c0032b5a 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/random.h
+++ b/extern/libmv/third_party/ceres/internal/ceres/random.h
@@ -27,21 +27,44 @@
// POSSIBILITY OF SUCH DAMAGE.
//
// Author: keir@google.com (Keir Mierle)
+// sameeragarwal@google.com (Sameer Agarwal)
#ifndef CERES_INTERNAL_RANDOM_H_
#define CERES_INTERNAL_RANDOM_H_
+#include <cmath>
+#include <cstdlib>
+#include "ceres/internal/port.h"
+
namespace ceres {
-inline double RandDouble() {
- double r = rand();
- return r / RAND_MAX;
+inline void SetRandomState(int state) {
+ srand(state);
}
inline int Uniform(int n) {
return rand() % n;
}
+inline double RandDouble() {
+ double r = static_cast<double>(rand());
+ return r / RAND_MAX;
+}
+
+// Box-Muller algorithm for normal random number generation.
+// http://en.wikipedia.org/wiki/Box-Muller_transform
+inline double RandNormal() {
+ double x1, x2, w;
+ do {
+ x1 = 2.0 * RandDouble() - 1.0;
+ x2 = 2.0 * RandDouble() - 1.0;
+ w = x1 * x1 + x2 * x2;
+ } while ( w >= 1.0 || w == 0.0 );
+
+ w = sqrt((-2.0 * log(w)) / w);
+ return x1 * w;
+}
+
} // namespace ceres
#endif // CERES_INTERNAL_RANDOM_H_
diff --git a/extern/libmv/third_party/ceres/internal/ceres/residual_block.cc b/extern/libmv/third_party/ceres/internal/ceres/residual_block.cc
index 03867891dba..bdb88b1dd97 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/residual_block.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/residual_block.cc
@@ -102,8 +102,11 @@ bool ResidualBlock::Evaluate(double* cost,
InvalidateEvaluation(*this, cost, residuals, eval_jacobians);
- if (!cost_function_->Evaluate(parameters.get(), residuals, eval_jacobians) ||
- !IsEvaluationValid(*this,
+ if (!cost_function_->Evaluate(parameters.get(), residuals, eval_jacobians)) {
+ return false;
+ }
+
+ if (!IsEvaluationValid(*this,
parameters.get(),
cost,
residuals,
diff --git a/extern/libmv/third_party/ceres/internal/ceres/residual_block_utils.cc b/extern/libmv/third_party/ceres/internal/ceres/residual_block_utils.cc
index 28e03130844..9442bb2a1c1 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/residual_block_utils.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/residual_block_utils.cc
@@ -33,46 +33,17 @@
#include <cmath>
#include <cstddef>
#include <limits>
-#include <glog/logging.h>
-#include "ceres/residual_block.h"
-#include "ceres/parameter_block.h"
-#include "ceres/stringprintf.h"
+#include "ceres/array_utils.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/port.h"
-
-#ifdef _MSC_VER
-# define isfinite _finite
-#endif
+#include "ceres/parameter_block.h"
+#include "ceres/residual_block.h"
+#include "ceres/stringprintf.h"
+#include "glog/logging.h"
namespace ceres {
namespace internal {
-// It is a near impossibility that user code generates this exact
-// value in normal operation, thus we will use it to fill arrays
-// before passing them to user code. If on return an element of the
-// array still contains this value, we will assume that the user code
-// did not write to that memory location.
-static const double kImpossibleValue = 1e302;
-
-bool IsArrayValid(const int size, const double* x) {
- if (x != NULL) {
- for (int i = 0; i < size; ++i) {
- if (!isfinite(x[i]) || (x[i] == kImpossibleValue)) {
- return false;
- }
- }
- }
- return true;
-}
-
-void InvalidateArray(const int size, double* x) {
- if (x != NULL) {
- for (int i = 0; i < size; ++i) {
- x[i] = kImpossibleValue;
- }
- }
-}
-
void InvalidateEvaluation(const ResidualBlock& block,
double* cost,
double* residuals,
@@ -92,7 +63,7 @@ void InvalidateEvaluation(const ResidualBlock& block,
// Utility routine to print an array of doubles to a string. If the
// array pointer is NULL, it is treated as an array of zeros.
-void AppendArrayToString(const int size, const double* x, string* result) {
+static void AppendArrayToString(const int size, const double* x, string* result) {
for (int i = 0; i < size; ++i) {
if (x == NULL) {
StringAppendF(result, "Not Computed ");
diff --git a/extern/libmv/third_party/ceres/internal/ceres/residual_block_utils.h b/extern/libmv/third_party/ceres/internal/ceres/residual_block_utils.h
index 228867cc60c..7051c2112fd 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/residual_block_utils.h
+++ b/extern/libmv/third_party/ceres/internal/ceres/residual_block_utils.h
@@ -51,15 +51,6 @@ namespace internal {
class ResidualBlock;
-// Fill the array x with an impossible value that the user code is
-// never expected to compute.
-void InvalidateArray(int size, double* x);
-
-// Check if all the entries of the array x are valid, i.e. all the
-// values in the array should be finite and none of them should be
-// equal to the "impossible" value used by InvalidateArray.
-bool IsArrayValid(int size, const double* x);
-
// Invalidate cost, resdual and jacobian arrays (if not NULL).
void InvalidateEvaluation(const ResidualBlock& block,
double* cost,
diff --git a/extern/libmv/third_party/ceres/internal/ceres/runtime_numeric_diff_cost_function.cc b/extern/libmv/third_party/ceres/internal/ceres/runtime_numeric_diff_cost_function.cc
index ac6d8aa279a..7af275c1dd8 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/runtime_numeric_diff_cost_function.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/runtime_numeric_diff_cost_function.cc
@@ -35,11 +35,10 @@
#include <algorithm>
#include <numeric>
#include <vector>
-
-#include <glog/logging.h>
#include "Eigen/Dense"
#include "ceres/cost_function.h"
#include "ceres/internal/scoped_ptr.h"
+#include "glog/logging.h"
namespace ceres {
namespace internal {
diff --git a/extern/libmv/third_party/ceres/internal/ceres/schur_complement_solver.cc b/extern/libmv/third_party/ceres/internal/ceres/schur_complement_solver.cc
index 2bc8cdd6bec..2cbe78d133a 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/schur_complement_solver.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/schur_complement_solver.cc
@@ -32,7 +32,13 @@
#include <ctime>
#include <set>
#include <vector>
+
+#ifndef CERES_NO_CXSPARSE
+#include "cs.h"
+#endif // CERES_NO_CXSPARSE
+
#include "Eigen/Dense"
+#include "glog/logging.h"
#include "ceres/block_random_access_dense_matrix.h"
#include "ceres/block_random_access_matrix.h"
#include "ceres/block_random_access_sparse_matrix.h"
@@ -48,6 +54,7 @@
#include "ceres/internal/scoped_ptr.h"
#include "ceres/types.h"
+
namespace ceres {
namespace internal {
@@ -57,7 +64,7 @@ LinearSolver::Summary SchurComplementSolver::SolveImpl(
const LinearSolver::PerSolveOptions& per_solve_options,
double* x) {
const time_t start_time = time(NULL);
- if (!options_.constant_sparsity || (eliminator_.get() == NULL)) {
+ if (eliminator_.get() == NULL) {
InitStorage(A->block_structure());
DetectStructure(*A->block_structure(),
options_.num_eliminate_blocks,
@@ -88,11 +95,11 @@ LinearSolver::Summary SchurComplementSolver::SolveImpl(
const time_t backsubstitute_time = time(NULL);
summary.termination_type = TOLERANCE;
- VLOG(2) << "time (sec) total: " << backsubstitute_time - start_time
- << " init: " << init_time - start_time
- << " eliminate: " << eliminate_time - init_time
- << " solve: " << solve_time - eliminate_time
- << " backsubstitute: " << backsubstitute_time - solve_time;
+ VLOG(2) << "time (sec) total: " << (backsubstitute_time - start_time)
+ << " init: " << (init_time - start_time)
+ << " eliminate: " << (eliminate_time - init_time)
+ << " solve: " << (solve_time - eliminate_time)
+ << " backsubstitute: " << (backsubstitute_time - solve_time);
return summary;
}
@@ -139,18 +146,33 @@ bool DenseSchurComplementSolver::SolveReducedLinearSystem(double* solution) {
return true;
}
-#ifndef CERES_NO_SUITESPARSE
+
SparseSchurComplementSolver::SparseSchurComplementSolver(
const LinearSolver::Options& options)
- : SchurComplementSolver(options),
- symbolic_factor_(NULL) {
+ : SchurComplementSolver(options) {
+#ifndef CERES_NO_SUITESPARSE
+ factor_ = NULL;
+#endif // CERES_NO_SUITESPARSE
+
+#ifndef CERES_NO_CXSPARSE
+ cxsparse_factor_ = NULL;
+#endif // CERES_NO_CXSPARSE
}
SparseSchurComplementSolver::~SparseSchurComplementSolver() {
- if (symbolic_factor_ != NULL) {
- ss_.Free(symbolic_factor_);
- symbolic_factor_ = NULL;
+#ifndef CERES_NO_SUITESPARSE
+ if (factor_ != NULL) {
+ ss_.Free(factor_);
+ factor_ = NULL;
}
+#endif // CERES_NO_SUITESPARSE
+
+#ifndef CERES_NO_CXSPARSE
+ if (cxsparse_factor_ != NULL) {
+ cxsparse_.Free(cxsparse_factor_);
+ cxsparse_factor_ = NULL;
+ }
+#endif // CERES_NO_CXSPARSE
}
// Determine the non-zero blocks in the Schur Complement matrix, and
@@ -161,13 +183,13 @@ void SparseSchurComplementSolver::InitStorage(
const int num_col_blocks = bs->cols.size();
const int num_row_blocks = bs->rows.size();
- vector<int> blocks(num_col_blocks - num_eliminate_blocks, 0);
+ blocks_.resize(num_col_blocks - num_eliminate_blocks, 0);
for (int i = num_eliminate_blocks; i < num_col_blocks; ++i) {
- blocks[i - num_eliminate_blocks] = bs->cols[i].size;
+ blocks_[i - num_eliminate_blocks] = bs->cols[i].size;
}
set<pair<int, int> > block_pairs;
- for (int i = 0; i < blocks.size(); ++i) {
+ for (int i = 0; i < blocks_.size(); ++i) {
block_pairs.insert(make_pair(i, i));
}
@@ -220,15 +242,34 @@ void SparseSchurComplementSolver::InitStorage(
}
}
- set_lhs(new BlockRandomAccessSparseMatrix(blocks, block_pairs));
+ set_lhs(new BlockRandomAccessSparseMatrix(blocks_, block_pairs));
set_rhs(new double[lhs()->num_rows()]);
}
+bool SparseSchurComplementSolver::SolveReducedLinearSystem(double* solution) {
+ switch (options().sparse_linear_algebra_library) {
+ case SUITE_SPARSE:
+ return SolveReducedLinearSystemUsingSuiteSparse(solution);
+ case CX_SPARSE:
+ return SolveReducedLinearSystemUsingCXSparse(solution);
+ default:
+ LOG(FATAL) << "Unknown sparse linear algebra library : "
+ << options().sparse_linear_algebra_library;
+ }
+
+ LOG(FATAL) << "Unknown sparse linear algebra library : "
+ << options().sparse_linear_algebra_library;
+ return false;
+}
+
+#ifndef CERES_NO_SUITESPARSE
// Solve the system Sx = r, assuming that the matrix S is stored in a
// BlockRandomAccessSparseMatrix. The linear system is solved using
// CHOLMOD's sparse cholesky factorization routines.
-bool SparseSchurComplementSolver::SolveReducedLinearSystem(double* solution) {
- // Extract the TripletSparseMatrix that is used for actually storing S.
+bool SparseSchurComplementSolver::SolveReducedLinearSystemUsingSuiteSparse(
+ double* solution) {
+ const time_t start_time = time(NULL);
+
TripletSparseMatrix* tsm =
const_cast<TripletSparseMatrix*>(
down_cast<const BlockRandomAccessSparseMatrix*>(lhs())->matrix());
@@ -245,30 +286,38 @@ bool SparseSchurComplementSolver::SolveReducedLinearSystem(double* solution) {
// The matrix is symmetric, and the upper triangular part of the
// matrix contains the values.
cholmod_lhs->stype = 1;
+ const time_t lhs_time = time(NULL);
cholmod_dense* cholmod_rhs =
ss_.CreateDenseVector(const_cast<double*>(rhs()), num_rows, num_rows);
+ const time_t rhs_time = time(NULL);
// Symbolic factorization is computed if we don't already have one handy.
- if (symbolic_factor_ == NULL) {
- symbolic_factor_ = ss_.AnalyzeCholesky(cholmod_lhs);
+ if (factor_ == NULL) {
+ if (options().use_block_amd) {
+ factor_ = ss_.BlockAnalyzeCholesky(cholmod_lhs, blocks_, blocks_);
+ } else {
+ factor_ = ss_.AnalyzeCholesky(cholmod_lhs);
+ }
+
+ if (VLOG_IS_ON(2)) {
+ cholmod_print_common("Symbolic Analysis", ss_.mutable_cc());
+ }
}
+ CHECK_NOTNULL(factor_);
+
+ const time_t symbolic_time = time(NULL);
cholmod_dense* cholmod_solution =
- ss_.SolveCholesky(cholmod_lhs, symbolic_factor_, cholmod_rhs);
+ ss_.SolveCholesky(cholmod_lhs, factor_, cholmod_rhs);
+
+ const time_t solve_time = time(NULL);
ss_.Free(cholmod_lhs);
cholmod_lhs = NULL;
ss_.Free(cholmod_rhs);
cholmod_rhs = NULL;
- // If sparsity is not constant across calls, then reset the symbolic
- // factorization.
- if (!options().constant_sparsity) {
- ss_.Free(symbolic_factor_);
- symbolic_factor_ = NULL;
- }
-
if (cholmod_solution == NULL) {
LOG(ERROR) << "CHOLMOD solve failed.";
return false;
@@ -277,9 +326,63 @@ bool SparseSchurComplementSolver::SolveReducedLinearSystem(double* solution) {
VectorRef(solution, num_rows)
= VectorRef(static_cast<double*>(cholmod_solution->x), num_rows);
ss_.Free(cholmod_solution);
+ const time_t final_time = time(NULL);
+ VLOG(2) << "time: " << (final_time - start_time)
+ << " lhs : " << (lhs_time - start_time)
+ << " rhs: " << (rhs_time - lhs_time)
+ << " analyze: " << (symbolic_time - rhs_time)
+ << " factor_and_solve: " << (solve_time - symbolic_time)
+ << " cleanup: " << (final_time - solve_time);
return true;
}
+#else
+bool SparseSchurComplementSolver::SolveReducedLinearSystemUsingSuiteSparse(
+ double* solution) {
+ LOG(FATAL) << "No SuiteSparse support in Ceres.";
+ return false;
+}
#endif // CERES_NO_SUITESPARSE
+#ifndef CERES_NO_CXSPARSE
+// Solve the system Sx = r, assuming that the matrix S is stored in a
+// BlockRandomAccessSparseMatrix. The linear system is solved using
+// CXSparse's sparse cholesky factorization routines.
+bool SparseSchurComplementSolver::SolveReducedLinearSystemUsingCXSparse(
+ double* solution) {
+ // Extract the TripletSparseMatrix that is used for actually storing S.
+ TripletSparseMatrix* tsm =
+ const_cast<TripletSparseMatrix*>(
+ down_cast<const BlockRandomAccessSparseMatrix*>(lhs())->matrix());
+
+ const int num_rows = tsm->num_rows();
+
+ // The case where there are no f blocks, and the system is block
+ // diagonal.
+ if (num_rows == 0) {
+ return true;
+ }
+
+ cs_di* lhs = CHECK_NOTNULL(cxsparse_.CreateSparseMatrix(tsm));
+ VectorRef(solution, num_rows) = ConstVectorRef(rhs(), num_rows);
+
+ // Compute symbolic factorization if not available.
+ if (cxsparse_factor_ == NULL) {
+ cxsparse_factor_ = CHECK_NOTNULL(cxsparse_.AnalyzeCholesky(lhs));
+ }
+
+ // Solve the linear system.
+ bool ok = cxsparse_.SolveCholesky(lhs, cxsparse_factor_, solution);
+
+ cxsparse_.Free(lhs);
+ return ok;
+}
+#else
+bool SparseSchurComplementSolver::SolveReducedLinearSystemUsingCXSparse(
+ double* solution) {
+ LOG(FATAL) << "No CXSparse support in Ceres.";
+ return false;
+}
+#endif // CERES_NO_CXPARSE
+
} // namespace internal
} // namespace ceres
diff --git a/extern/libmv/third_party/ceres/internal/ceres/schur_complement_solver.h b/extern/libmv/third_party/ceres/internal/ceres/schur_complement_solver.h
index 039bc09e3ce..ea1b3184c33 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/schur_complement_solver.h
+++ b/extern/libmv/third_party/ceres/internal/ceres/schur_complement_solver.h
@@ -31,9 +31,13 @@
#ifndef CERES_INTERNAL_SCHUR_COMPLEMENT_SOLVER_H_
#define CERES_INTERNAL_SCHUR_COMPLEMENT_SOLVER_H_
+#include <set>
+#include <utility>
+
#include "ceres/block_random_access_matrix.h"
#include "ceres/block_sparse_matrix.h"
#include "ceres/block_structure.h"
+#include "ceres/cxsparse.h"
#include "ceres/linear_solver.h"
#include "ceres/schur_eliminator.h"
#include "ceres/suitesparse.h"
@@ -128,7 +132,7 @@ class SchurComplementSolver : public BlockSparseMatrixBaseSolver {
scoped_ptr<BlockRandomAccessMatrix> lhs_;
scoped_array<double> rhs_;
- DISALLOW_COPY_AND_ASSIGN(SchurComplementSolver);
+ CERES_DISALLOW_COPY_AND_ASSIGN(SchurComplementSolver);
};
// Dense Cholesky factorization based solver.
@@ -142,10 +146,10 @@ class DenseSchurComplementSolver : public SchurComplementSolver {
virtual void InitStorage(const CompressedRowBlockStructure* bs);
virtual bool SolveReducedLinearSystem(double* solution);
- DISALLOW_COPY_AND_ASSIGN(DenseSchurComplementSolver);
+ CERES_DISALLOW_COPY_AND_ASSIGN(DenseSchurComplementSolver);
};
-#ifndef CERES_NO_SUITESPARSE
+
// Sparse Cholesky factorization based solver.
class SparseSchurComplementSolver : public SchurComplementSolver {
public:
@@ -155,26 +159,26 @@ class SparseSchurComplementSolver : public SchurComplementSolver {
private:
virtual void InitStorage(const CompressedRowBlockStructure* bs);
virtual bool SolveReducedLinearSystem(double* solution);
+ bool SolveReducedLinearSystemUsingSuiteSparse(double* solution);
+ bool SolveReducedLinearSystemUsingCXSparse(double* solution);
+ // Size of the blocks in the Schur complement.
+ vector<int> blocks_;
+#ifndef CERES_NO_SUITESPARSE
SuiteSparse ss_;
// Symbolic factorization of the reduced linear system. Precomputed
- // once and reused if constant_sparsity_ is true.
- cholmod_factor* symbolic_factor_;
- DISALLOW_COPY_AND_ASSIGN(SparseSchurComplementSolver);
-};
-#else // CERES_NO_SUITESPARSE
-class SparseSchurComplementSolver : public SchurComplementSolver {
- public:
- explicit SparseSchurComplementSolver(const LinearSolver::Options& options)
- : SchurComplementSolver(options) {
- LOG(FATAL) << "SPARSE_SCHUR is not available. Please "
- "build Ceres with SuiteSparse.";
- }
+ // once and reused in subsequent calls.
+ cholmod_factor* factor_;
+#endif // CERES_NO_SUITESPARSE
- virtual ~SparseSchurComplementSolver() {}
+#ifndef CERES_NO_CXSPARSE
+ CXSparse cxsparse_;
+ // Cached factorization
+ cs_dis* cxsparse_factor_;
+#endif // CERES_NO_CXSPARSE
+ CERES_DISALLOW_COPY_AND_ASSIGN(SparseSchurComplementSolver);
};
-#endif // CERES_NO_SUITESPARSE
} // namespace internal
} // namespace ceres
diff --git a/extern/libmv/third_party/ceres/internal/ceres/schur_eliminator_impl.h b/extern/libmv/third_party/ceres/internal/ceres/schur_eliminator_impl.h
index a388d005424..6120db9b009 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/schur_eliminator_impl.h
+++ b/extern/libmv/third_party/ceres/internal/ceres/schur_eliminator_impl.h
@@ -188,7 +188,7 @@ Eliminate(const BlockSparseMatrixBase* A,
typename EigenTypes<kFBlockSize>::ConstVectorRef
diag(D + bs->cols[i].position, block_size);
- MutexLock l(&cell_info->m);
+ CeresMutexLock l(&cell_info->m);
MatrixRef m(cell_info->values, row_stride, col_stride);
m.block(r, c, block_size, block_size).diagonal()
+= diag.array().square().matrix();
@@ -387,7 +387,7 @@ UpdateRhs(const Chunk& chunk,
row.block.size, block_size);
const int block = block_id - num_eliminate_blocks_;
- MutexLock l(rhs_locks_[block]);
+ CeresMutexLock l(rhs_locks_[block]);
typename EigenTypes<kFBlockSize>::VectorRef
(rhs + lhs_row_layout_[block], block_size).noalias()
+= b.transpose() * sj;
@@ -523,7 +523,7 @@ ChunkOuterProduct(const CompressedRowBlockStructure* bs,
const typename EigenTypes<kEBlockSize, kFBlockSize>::ConstMatrixRef
b2(buffer + it2->second, e_block_size, block2_size);
- MutexLock l(&cell_info->m);
+ CeresMutexLock l(&cell_info->m);
MatrixRef m(cell_info->values, row_stride, col_stride);
// We explicitly construct a block object here instead of using
@@ -532,7 +532,29 @@ ChunkOuterProduct(const CompressedRowBlockStructure* bs,
// like the Matrix class does.
Eigen::Block<MatrixRef, kFBlockSize, kFBlockSize>
block(m, r, c, block1_size, block2_size);
- block.noalias() -= b1_transpose_inverse_ete * b2;
+#ifdef CERES_WORK_AROUND_ANDROID_NDK_COMPILER_BUG
+ // Removing the ".noalias()" annotation on the following statement is
+ // necessary to produce a correct build with the Android NDK, including
+ // versions 6, 7, 8, and 8b, when built with STLPort and the
+ // non-standalone toolchain (i.e. ndk-build). This appears to be a
+ // compiler bug; if the workaround is not in place, the line
+ //
+ // block.noalias() -= b1_transpose_inverse_ete * b2;
+ //
+ // gets compiled to
+ //
+ // block.noalias() += b1_transpose_inverse_ete * b2;
+ //
+ // which breaks schur elimination. Introducing a temporary by removing the
+ // .noalias() annotation causes the issue to disappear. Tracking this
+ // issue down was tricky, since the test suite doesn't run when built with
+ // the non-standalone toolchain.
+ //
+ // TODO(keir): Make a reproduction case for this and send it upstream.
+ block -= b1_transpose_inverse_ete * b2;
+#else
+ block.noalias() -= b1_transpose_inverse_ete * b2;
+#endif // CERES_WORK_AROUND_ANDROID_NDK_COMPILER_BUG
}
}
}
@@ -601,7 +623,7 @@ NoEBlockRowOuterProduct(const BlockSparseMatrixBase* A,
&r, &c,
&row_stride, &col_stride);
if (cell_info != NULL) {
- MutexLock l(&cell_info->m);
+ CeresMutexLock l(&cell_info->m);
MatrixRef m(cell_info->values, row_stride, col_stride);
m.block(r, c, block1_size, block1_size)
.selfadjointView<Eigen::Upper>()
@@ -621,7 +643,7 @@ NoEBlockRowOuterProduct(const BlockSparseMatrixBase* A,
}
const int block2_size = bs->cols[row.cells[j].block_id].size;
- MutexLock l(&cell_info->m);
+ CeresMutexLock l(&cell_info->m);
MatrixRef m(cell_info->values, row_stride, col_stride);
m.block(r, c, block1_size, block2_size).noalias() +=
b1.transpose() * ConstMatrixRef(row_values + row.cells[j].position,
@@ -660,7 +682,7 @@ EBlockRowOuterProduct(const BlockSparseMatrixBase* A,
continue;
}
- MutexLock l(&cell_info->m);
+ CeresMutexLock l(&cell_info->m);
MatrixRef m(cell_info->values, row_stride, col_stride);
Eigen::Block<MatrixRef, kFBlockSize, kFBlockSize>
@@ -687,7 +709,7 @@ EBlockRowOuterProduct(const BlockSparseMatrixBase* A,
row.block.size,
block2_size);
- MutexLock l(&cell_info->m);
+ CeresMutexLock l(&cell_info->m);
MatrixRef m(cell_info->values, row_stride, col_stride);
Eigen::Block<MatrixRef, kFBlockSize, kFBlockSize>
block(m, r, c, block1_size, block2_size);
diff --git a/extern/libmv/third_party/ceres/internal/ceres/schur_ordering.cc b/extern/libmv/third_party/ceres/internal/ceres/schur_ordering.cc
index c4fc1da3c2f..1cdff4e6dec 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/schur_ordering.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/schur_ordering.cc
@@ -30,25 +30,14 @@
#include "ceres/schur_ordering.h"
-#include <glog/logging.h>
#include "ceres/graph.h"
#include "ceres/graph_algorithms.h"
+#include "ceres/internal/scoped_ptr.h"
#include "ceres/map_util.h"
#include "ceres/parameter_block.h"
#include "ceres/program.h"
#include "ceres/residual_block.h"
-#include "ceres/internal/scoped_ptr.h"
-
-CERES_HASH_NAMESPACE_START
-
-// Allow us to hash pointers as if they were int's
-template<> struct hash< ::ceres::internal::ParameterBlock*> {
- size_t operator()(::ceres::internal::ParameterBlock* x) const {
- return reinterpret_cast<size_t>(x);
- }
-};
-
-CERES_HASH_NAMESPACE_END
+#include "glog/logging.h"
namespace ceres {
namespace internal {
@@ -59,8 +48,7 @@ int ComputeSchurOrdering(const Program& program,
scoped_ptr<Graph< ParameterBlock*> > graph(
CHECK_NOTNULL(CreateHessianGraph(program)));
- int independent_set_size =
- IndependentSetOrdering<ParameterBlock*>(*graph, ordering);
+ int independent_set_size = IndependentSetOrdering(*graph, ordering);
const vector<ParameterBlock*>& parameter_blocks = program.parameter_blocks();
// Add the excluded blocks to back of the ordering vector.
diff --git a/extern/libmv/third_party/ceres/internal/ceres/solver.cc b/extern/libmv/third_party/ceres/internal/ceres/solver.cc
index 77f04d1d918..66ca93283a1 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/solver.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/solver.cc
@@ -32,36 +32,32 @@
#include "ceres/solver.h"
#include <vector>
-#include "ceres/levenberg_marquardt.h"
+#include "ceres/problem.h"
+#include "ceres/problem_impl.h"
#include "ceres/program.h"
#include "ceres/solver_impl.h"
#include "ceres/stringprintf.h"
-#include "ceres/problem.h"
namespace ceres {
Solver::~Solver() {}
-// TODO(sameeragarwal): The timing code here should use a sub-second
-// timer.
+// TODO(sameeragarwal): Use subsecond timers.
void Solver::Solve(const Solver::Options& options,
Problem* problem,
Solver::Summary* summary) {
time_t start_time_seconds = time(NULL);
- internal::SolverImpl::Solve(options, problem, summary);
+ internal::ProblemImpl* problem_impl =
+ CHECK_NOTNULL(problem)->problem_impl_.get();
+ internal::SolverImpl::Solve(options, problem_impl, summary);
summary->total_time_in_seconds = time(NULL) - start_time_seconds;
- summary->preprocessor_time_in_seconds =
- summary->total_time_in_seconds - summary->minimizer_time_in_seconds;
}
void Solve(const Solver::Options& options,
Problem* problem,
Solver::Summary* summary) {
- time_t start_time_seconds = time(NULL);
- internal::SolverImpl::Solve(options, problem, summary);
- summary->total_time_in_seconds = time(NULL) - start_time_seconds;
- summary->preprocessor_time_in_seconds =
- summary->total_time_in_seconds - summary->minimizer_time_in_seconds;
+ Solver solver;
+ solver.Solve(options, problem, summary);
}
Solver::Summary::Summary()
@@ -75,6 +71,7 @@ Solver::Summary::Summary()
num_unsuccessful_steps(-1),
preprocessor_time_in_seconds(-1.0),
minimizer_time_in_seconds(-1.0),
+ postprocessor_time_in_seconds(-1.0),
total_time_in_seconds(-1.0),
num_parameter_blocks(-1),
num_parameters(-1),
@@ -93,7 +90,9 @@ Solver::Summary::Summary()
linear_solver_type_given(SPARSE_NORMAL_CHOLESKY),
linear_solver_type_used(SPARSE_NORMAL_CHOLESKY),
preconditioner_type(IDENTITY),
- ordering_type(NATURAL) {
+ ordering_type(NATURAL),
+ trust_region_strategy_type(LEVENBERG_MARQUARDT),
+ sparse_linear_algebra_library(SUITE_SPARSE) {
}
string Solver::Summary::BriefReport() const {
@@ -136,7 +135,7 @@ string Solver::Summary::FullReport() const {
num_parameters);
internal::StringAppendF(&report, "Residual blocks % 10d\n",
num_residual_blocks);
- internal::StringAppendF(&report, "Residual % 10d\n\n",
+ internal::StringAppendF(&report, "Residuals % 10d\n\n",
num_residuals);
} else {
internal::StringAppendF(&report, "%45s %21s\n", "Original", "Reduced");
@@ -183,10 +182,33 @@ string Solver::Summary::FullReport() const {
internal::StringAppendF(&report, "Threads: % 25d% 25d\n",
num_threads_given, num_threads_used);
- internal::StringAppendF(&report, "Linear Solver Threads:% 23d% 25d\n",
+ internal::StringAppendF(&report, "Linear solver threads % 23d% 25d\n",
num_linear_solver_threads_given,
num_linear_solver_threads_used);
+ if (linear_solver_type_used == SPARSE_NORMAL_CHOLESKY ||
+ linear_solver_type_used == SPARSE_SCHUR ||
+ (linear_solver_type_used == ITERATIVE_SCHUR &&
+ (preconditioner_type == SCHUR_JACOBI ||
+ preconditioner_type == CLUSTER_JACOBI ||
+ preconditioner_type == CLUSTER_TRIDIAGONAL))) {
+ internal::StringAppendF(&report, "\nSparse Linear Algebra Library %15s\n",
+ SparseLinearAlgebraLibraryTypeToString(
+ sparse_linear_algebra_library));
+ }
+
+ internal::StringAppendF(&report, "Trust Region Strategy %19s",
+ TrustRegionStrategyTypeToString(
+ trust_region_strategy_type));
+ if (trust_region_strategy_type == DOGLEG) {
+ if (dogleg_type == TRADITIONAL_DOGLEG) {
+ internal::StringAppendF(&report, " (TRADITIONAL)");
+ } else {
+ internal::StringAppendF(&report, " (SUBSPACE)");
+ }
+ }
+ internal::StringAppendF(&report, "\n");
+
if (termination_type == DID_NOT_RUN) {
CHECK(!error.empty())
diff --git a/extern/libmv/third_party/ceres/internal/ceres/solver_impl.cc b/extern/libmv/third_party/ceres/internal/ceres/solver_impl.cc
index ed07d9dc6d7..8ef5b98e35f 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/solver_impl.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/solver_impl.cc
@@ -30,40 +30,29 @@
#include "ceres/solver_impl.h"
+#include <cstdio>
#include <iostream> // NOLINT
#include <numeric>
#include "ceres/evaluator.h"
#include "ceres/gradient_checking_cost_function.h"
-#include "ceres/levenberg_marquardt.h"
+#include "ceres/iteration_callback.h"
+#include "ceres/levenberg_marquardt_strategy.h"
#include "ceres/linear_solver.h"
#include "ceres/map_util.h"
#include "ceres/minimizer.h"
#include "ceres/parameter_block.h"
+#include "ceres/problem.h"
#include "ceres/problem_impl.h"
#include "ceres/program.h"
#include "ceres/residual_block.h"
#include "ceres/schur_ordering.h"
#include "ceres/stringprintf.h"
-#include "ceres/iteration_callback.h"
-#include "ceres/problem.h"
+#include "ceres/trust_region_minimizer.h"
namespace ceres {
namespace internal {
namespace {
-void EvaluateCostAndResiduals(ProblemImpl* problem_impl,
- double* cost,
- vector<double>* residuals) {
- CHECK_NOTNULL(cost);
- Program* program = CHECK_NOTNULL(problem_impl)->mutable_program();
- if (residuals != NULL) {
- residuals->resize(program->NumResiduals());
- program->Evaluate(cost, &(*residuals)[0]);
- } else {
- program->Evaluate(cost, NULL);
- }
-}
-
// Callback for updating the user's parameter blocks. Updates are only
// done if the step is successful.
class StateUpdatingCallback : public IterationCallback {
@@ -96,7 +85,7 @@ class LoggingCallback : public IterationCallback {
CallbackReturnType operator()(const IterationSummary& summary) {
const char* kReportRowFormat =
"% 4d: f:% 8e d:% 3.2e g:% 3.2e h:% 3.2e "
- "rho:% 3.2e mu:% 3.2e li:% 3d";
+ "rho:% 3.2e mu:% 3.2e li:% 3d it:% 3.2e tt:% 3.2e";
string output = StringPrintf(kReportRowFormat,
summary.iteration,
summary.cost,
@@ -104,8 +93,10 @@ class LoggingCallback : public IterationCallback {
summary.gradient_max_norm,
summary.step_norm,
summary.relative_decrease,
- summary.mu,
- summary.linear_solver_iterations);
+ summary.trust_region_radius,
+ summary.linear_solver_iterations,
+ summary.iteration_time_in_seconds,
+ summary.cumulative_time_in_seconds);
if (log_to_stdout_) {
cout << output << endl;
} else {
@@ -118,44 +109,101 @@ class LoggingCallback : public IterationCallback {
const bool log_to_stdout_;
};
+// Basic callback to record the execution of the solver to a file for
+// offline analysis.
+class FileLoggingCallback : public IterationCallback {
+ public:
+ explicit FileLoggingCallback(const string& filename)
+ : fptr_(NULL) {
+ fptr_ = fopen(filename.c_str(), "w");
+ CHECK_NOTNULL(fptr_);
+ }
+
+ virtual ~FileLoggingCallback() {
+ if (fptr_ != NULL) {
+ fclose(fptr_);
+ }
+ }
+
+ virtual CallbackReturnType operator()(const IterationSummary& summary) {
+ fprintf(fptr_,
+ "%4d %e %e\n",
+ summary.iteration,
+ summary.cost,
+ summary.cumulative_time_in_seconds);
+ return SOLVER_CONTINUE;
+ }
+ private:
+ FILE* fptr_;
+};
+
} // namespace
void SolverImpl::Minimize(const Solver::Options& options,
Program* program,
Evaluator* evaluator,
LinearSolver* linear_solver,
- double* initial_parameters,
- double* final_parameters,
+ double* parameters,
Solver::Summary* summary) {
Minimizer::Options minimizer_options(options);
+ // TODO(sameeragarwal): Add support for logging the configuration
+ // and more detailed stats.
+ scoped_ptr<IterationCallback> file_logging_callback;
+ if (!options.solver_log.empty()) {
+ file_logging_callback.reset(new FileLoggingCallback(options.solver_log));
+ minimizer_options.callbacks.insert(minimizer_options.callbacks.begin(),
+ file_logging_callback.get());
+ }
+
LoggingCallback logging_callback(options.minimizer_progress_to_stdout);
if (options.logging_type != SILENT) {
- minimizer_options.callbacks.push_back(&logging_callback);
+ minimizer_options.callbacks.insert(minimizer_options.callbacks.begin(),
+ &logging_callback);
}
- StateUpdatingCallback updating_callback(program, initial_parameters);
+ StateUpdatingCallback updating_callback(program, parameters);
if (options.update_state_every_iteration) {
- minimizer_options.callbacks.push_back(&updating_callback);
- }
-
- LevenbergMarquardt levenberg_marquardt;
-
- time_t start_minimizer_time_seconds = time(NULL);
- levenberg_marquardt.Minimize(minimizer_options,
- evaluator,
- linear_solver,
- initial_parameters,
- final_parameters,
- summary);
- summary->minimizer_time_in_seconds =
- time(NULL) - start_minimizer_time_seconds;
+ // This must get pushed to the front of the callbacks so that it is run
+ // before any of the user callbacks.
+ minimizer_options.callbacks.insert(minimizer_options.callbacks.begin(),
+ &updating_callback);
+ }
+
+ minimizer_options.evaluator = evaluator;
+ scoped_ptr<SparseMatrix> jacobian(evaluator->CreateJacobian());
+ minimizer_options.jacobian = jacobian.get();
+
+ TrustRegionStrategy::Options trust_region_strategy_options;
+ trust_region_strategy_options.linear_solver = linear_solver;
+ trust_region_strategy_options.initial_radius =
+ options.initial_trust_region_radius;
+ trust_region_strategy_options.max_radius = options.max_trust_region_radius;
+ trust_region_strategy_options.lm_min_diagonal = options.lm_min_diagonal;
+ trust_region_strategy_options.lm_max_diagonal = options.lm_max_diagonal;
+ trust_region_strategy_options.trust_region_strategy_type =
+ options.trust_region_strategy_type;
+ trust_region_strategy_options.dogleg_type = options.dogleg_type;
+ scoped_ptr<TrustRegionStrategy> strategy(
+ TrustRegionStrategy::Create(trust_region_strategy_options));
+ minimizer_options.trust_region_strategy = strategy.get();
+
+ TrustRegionMinimizer minimizer;
+ time_t minimizer_start_time = time(NULL);
+ minimizer.Minimize(minimizer_options, parameters, summary);
+ summary->minimizer_time_in_seconds = time(NULL) - minimizer_start_time;
}
void SolverImpl::Solve(const Solver::Options& original_options,
- Problem* problem,
+ ProblemImpl* original_problem_impl,
Solver::Summary* summary) {
+ time_t solver_start_time = time(NULL);
Solver::Options options(original_options);
+ Program* original_program = original_problem_impl->mutable_program();
+ ProblemImpl* problem_impl = original_problem_impl;
+ // Reset the summary object to its default values.
+ *CHECK_NOTNULL(summary) = Solver::Summary();
+
#ifndef CERES_USE_OPENMP
if (options.num_threads > 1) {
@@ -174,8 +222,6 @@ void SolverImpl::Solve(const Solver::Options& original_options,
}
#endif
- // Reset the summary object to its default values;
- *CHECK_NOTNULL(summary) = Solver::Summary();
summary->linear_solver_type_given = options.linear_solver_type;
summary->num_eliminate_blocks_given = original_options.num_eliminate_blocks;
summary->num_threads_given = original_options.num_threads;
@@ -183,32 +229,38 @@ void SolverImpl::Solve(const Solver::Options& original_options,
original_options.num_linear_solver_threads;
summary->ordering_type = original_options.ordering_type;
- ProblemImpl* problem_impl = CHECK_NOTNULL(problem)->problem_impl_.get();
-
summary->num_parameter_blocks = problem_impl->NumParameterBlocks();
summary->num_parameters = problem_impl->NumParameters();
summary->num_residual_blocks = problem_impl->NumResidualBlocks();
summary->num_residuals = problem_impl->NumResiduals();
summary->num_threads_used = options.num_threads;
-
- // Evaluate the initial cost and residual vector (if needed). The
- // initial cost needs to be computed on the original unpreprocessed
- // problem, as it is used to determine the value of the "fixed" part
- // of the objective function after the problem has undergone
- // reduction. Also the initial residuals are in the order in which
- // the user added the ResidualBlocks to the optimization problem.
- EvaluateCostAndResiduals(problem_impl,
- &summary->initial_cost,
- options.return_initial_residuals
- ? &summary->initial_residuals
- : NULL);
+ summary->sparse_linear_algebra_library =
+ options.sparse_linear_algebra_library;
+ summary->trust_region_strategy_type = options.trust_region_strategy_type;
+ summary->dogleg_type = options.dogleg_type;
+
+ // Evaluate the initial cost, residual vector and the jacobian
+ // matrix if requested by the user. The initial cost needs to be
+ // computed on the original unpreprocessed problem, as it is used to
+ // determine the value of the "fixed" part of the objective function
+ // after the problem has undergone reduction.
+ Evaluator::Evaluate(
+ original_program,
+ options.num_threads,
+ &(summary->initial_cost),
+ options.return_initial_residuals ? &summary->initial_residuals : NULL,
+ options.return_initial_gradient ? &summary->initial_gradient : NULL,
+ options.return_initial_jacobian ? &summary->initial_jacobian : NULL);
+ original_program->SetParameterBlockStatePtrsToUserStatePtrs();
// If the user requests gradient checking, construct a new
// ProblemImpl by wrapping the CostFunctions of problem_impl inside
// GradientCheckingCostFunction and replacing problem_impl with
// gradient_checking_problem_impl.
scoped_ptr<ProblemImpl> gradient_checking_problem_impl;
+ // Save the original problem impl so we don't use the gradient
+ // checking one when computing the residuals.
if (options.check_gradients) {
VLOG(1) << "Checking Gradients";
gradient_checking_problem_impl.reset(
@@ -224,8 +276,10 @@ void SolverImpl::Solve(const Solver::Options& original_options,
// Create the three objects needed to minimize: the transformed program, the
// evaluator, and the linear solver.
- scoped_ptr<Program> reduced_program(
- CreateReducedProgram(&options, problem_impl, &summary->error));
+ scoped_ptr<Program> reduced_program(CreateReducedProgram(&options,
+ problem_impl,
+ &summary->fixed_cost,
+ &summary->error));
if (reduced_program == NULL) {
return;
}
@@ -259,19 +313,21 @@ void SolverImpl::Solve(const Solver::Options& original_options,
}
// The optimizer works on contiguous parameter vectors; allocate some.
- Vector initial_parameters(reduced_program->NumParameters());
- Vector optimized_parameters(reduced_program->NumParameters());
+ Vector parameters(reduced_program->NumParameters());
// Collect the discontiguous parameters into a contiguous state vector.
- reduced_program->ParameterBlocksToStateVector(&initial_parameters[0]);
+ reduced_program->ParameterBlocksToStateVector(parameters.data());
+
+ time_t minimizer_start_time = time(NULL);
+ summary->preprocessor_time_in_seconds =
+ minimizer_start_time - solver_start_time;
// Run the optimization.
Minimize(options,
reduced_program.get(),
evaluator.get(),
linear_solver.get(),
- initial_parameters.data(),
- optimized_parameters.data(),
+ parameters.data(),
summary);
// If the user aborted mid-optimization or the optimization
@@ -282,30 +338,45 @@ void SolverImpl::Solve(const Solver::Options& original_options,
return;
}
+ time_t post_process_start_time = time(NULL);
+
// Push the contiguous optimized parameters back to the user's parameters.
- reduced_program->StateVectorToParameterBlocks(&optimized_parameters[0]);
+ reduced_program->StateVectorToParameterBlocks(parameters.data());
reduced_program->CopyParameterBlockStateToUserState();
- // Return the final cost and residuals for the original problem.
- EvaluateCostAndResiduals(problem->problem_impl_.get(),
- &summary->final_cost,
- options.return_final_residuals
- ? &summary->final_residuals
- : NULL);
-
+ // Evaluate the final cost, residual vector and the jacobian
+ // matrix if requested by the user.
+ Evaluator::Evaluate(
+ original_program,
+ options.num_threads,
+ &summary->final_cost,
+ options.return_final_residuals ? &summary->final_residuals : NULL,
+ options.return_final_gradient ? &summary->final_gradient : NULL,
+ options.return_final_jacobian ? &summary->final_jacobian : NULL);
+
+ // Ensure the program state is set to the user parameters on the way out.
+ original_program->SetParameterBlockStatePtrsToUserStatePtrs();
// Stick a fork in it, we're done.
- return;
+ summary->postprocessor_time_in_seconds = time(NULL) - post_process_start_time;
}
// Strips varying parameters and residuals, maintaining order, and updating
// num_eliminate_blocks.
bool SolverImpl::RemoveFixedBlocksFromProgram(Program* program,
int* num_eliminate_blocks,
+ double* fixed_cost,
string* error) {
int original_num_eliminate_blocks = *num_eliminate_blocks;
vector<ParameterBlock*>* parameter_blocks =
program->mutable_parameter_blocks();
+ scoped_array<double> residual_block_evaluate_scratch;
+ if (fixed_cost != NULL) {
+ residual_block_evaluate_scratch.reset(
+ new double[program->MaxScratchDoublesNeededForEvaluate()]);
+ *fixed_cost = 0.0;
+ }
+
// Mark all the parameters as unused. Abuse the index member of the parameter
// blocks for the marking.
for (int i = 0; i < parameter_blocks->size(); ++i) {
@@ -335,6 +406,17 @@ bool SolverImpl::RemoveFixedBlocksFromProgram(Program* program,
if (!all_constant) {
(*residual_blocks)[j++] = (*residual_blocks)[i];
+ } else if (fixed_cost != NULL) {
+ // The residual is constant and will be removed, so its cost is
+ // added to the variable fixed_cost.
+ double cost = 0.0;
+ if (!residual_block->Evaluate(
+ &cost, NULL, NULL, residual_block_evaluate_scratch.get())) {
+ *error = StringPrintf("Evaluation of the residual %d failed during "
+ "removal of fixed residual blocks.", i);
+ return false;
+ }
+ *fixed_cost += cost;
}
}
residual_blocks->resize(j);
@@ -367,6 +449,7 @@ bool SolverImpl::RemoveFixedBlocksFromProgram(Program* program,
Program* SolverImpl::CreateReducedProgram(Solver::Options* options,
ProblemImpl* problem_impl,
+ double* fixed_cost,
string* error) {
Program* original_program = problem_impl->mutable_program();
scoped_ptr<Program> transformed_program(new Program(*original_program));
@@ -397,6 +480,7 @@ Program* SolverImpl::CreateReducedProgram(Solver::Options* options,
if (!RemoveFixedBlocksFromProgram(transformed_program.get(),
&num_eliminate_blocks,
+ fixed_cost,
error)) {
return NULL;
}
@@ -431,13 +515,34 @@ Program* SolverImpl::CreateReducedProgram(Solver::Options* options,
LinearSolver* SolverImpl::CreateLinearSolver(Solver::Options* options,
string* error) {
+ if (options->trust_region_strategy_type == DOGLEG) {
+ if (options->linear_solver_type == ITERATIVE_SCHUR ||
+ options->linear_solver_type == CGNR) {
+ *error = "DOGLEG only supports exact factorization based linear "
+ "solvers. If you want to use an iterative solver please "
+ "use LEVENBERG_MARQUARDT as the trust_region_strategy_type";
+ return NULL;
+ }
+ }
+
#ifdef CERES_NO_SUITESPARSE
- if (options->linear_solver_type == SPARSE_NORMAL_CHOLESKY) {
- *error = "Can't use SPARSE_NORMAL_CHOLESKY because SuiteSparse was not "
- "enabled when Ceres was built.";
+ if (options->linear_solver_type == SPARSE_NORMAL_CHOLESKY &&
+ options->sparse_linear_algebra_library == SUITE_SPARSE) {
+ *error = "Can't use SPARSE_NORMAL_CHOLESKY with SUITESPARSE because "
+ "SuiteSparse was not enabled when Ceres was built.";
+ return NULL;
+ }
+#endif
+
+#ifdef CERES_NO_CXSPARSE
+ if (options->linear_solver_type == SPARSE_NORMAL_CHOLESKY &&
+ options->sparse_linear_algebra_library == CX_SPARSE) {
+ *error = "Can't use SPARSE_NORMAL_CHOLESKY with CXSPARSE because "
+ "CXSparse was not enabled when Ceres was built.";
return NULL;
}
-#endif // CERES_NO_SUITESPARSE
+#endif
+
if (options->linear_solver_max_num_iterations <= 0) {
*error = "Solver::Options::linear_solver_max_num_iterations is 0.";
@@ -455,30 +560,32 @@ LinearSolver* SolverImpl::CreateLinearSolver(Solver::Options* options,
}
LinearSolver::Options linear_solver_options;
- linear_solver_options.constant_sparsity = true;
linear_solver_options.min_num_iterations =
options->linear_solver_min_num_iterations;
linear_solver_options.max_num_iterations =
options->linear_solver_max_num_iterations;
linear_solver_options.type = options->linear_solver_type;
linear_solver_options.preconditioner_type = options->preconditioner_type;
+ linear_solver_options.sparse_linear_algebra_library =
+ options->sparse_linear_algebra_library;
+ linear_solver_options.use_block_amd = options->use_block_amd;
#ifdef CERES_NO_SUITESPARSE
if (linear_solver_options.preconditioner_type == SCHUR_JACOBI) {
*error = "SCHUR_JACOBI preconditioner not suppored. Please build Ceres "
- "with SuiteSparse support";
+ "with SuiteSparse support.";
return NULL;
}
if (linear_solver_options.preconditioner_type == CLUSTER_JACOBI) {
*error = "CLUSTER_JACOBI preconditioner not suppored. Please build Ceres "
- "with SuiteSparse support";
+ "with SuiteSparse support.";
return NULL;
}
if (linear_solver_options.preconditioner_type == CLUSTER_TRIDIAGONAL) {
*error = "CLUSTER_TRIDIAGONAL preconditioner not suppored. Please build "
- "Ceres with SuiteSparse support";
+ "Ceres with SuiteSparse support.";
return NULL;
}
#endif
@@ -489,23 +596,23 @@ LinearSolver* SolverImpl::CreateLinearSolver(Solver::Options* options,
if ((linear_solver_options.num_eliminate_blocks == 0) &&
IsSchurType(linear_solver_options.type)) {
-#ifndef CERES_NO_SUITESPARSE
+#if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE)
+ LOG(INFO) << "No elimination block remaining switching to DENSE_QR.";
+ linear_solver_options.type = DENSE_QR;
+#else
LOG(INFO) << "No elimination block remaining "
<< "switching to SPARSE_NORMAL_CHOLESKY.";
linear_solver_options.type = SPARSE_NORMAL_CHOLESKY;
-#else
- LOG(INFO) << "No elimination block remaining switching to DENSE_QR.";
- linear_solver_options.type = DENSE_QR;
-#endif // CERES_NO_SUITESPARSE
+#endif
}
-#ifdef CERES_NO_SUITESPARSE
+#if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE)
if (linear_solver_options.type == SPARSE_SCHUR) {
- *error = "Can't use SPARSE_SCHUR because SuiteSparse was not "
- "enabled when Ceres was built.";
+ *error = "Can't use SPARSE_SCHUR because neither SuiteSparse nor"
+ "CXSparse was enabled when Ceres was compiled.";
return NULL;
}
-#endif // CERES_NO_SUITESPARSE
+#endif
// The matrix used for storing the dense Schur complement has a
// single lock guarding the whole matrix. Running the
@@ -578,15 +685,18 @@ bool SolverImpl::ApplyUserOrdering(const ProblemImpl& problem_impl,
// Find the minimum index of any parameter block to the given residual.
// Parameter blocks that have indices greater than num_eliminate_blocks are
// considered to have an index equal to num_eliminate_blocks.
-int MinParameterBlock(const ResidualBlock* residual_block,
- int num_eliminate_blocks) {
+static int MinParameterBlock(const ResidualBlock* residual_block,
+ int num_eliminate_blocks) {
int min_parameter_block_position = num_eliminate_blocks;
for (int i = 0; i < residual_block->NumParameterBlocks(); ++i) {
ParameterBlock* parameter_block = residual_block->parameter_blocks()[i];
- DCHECK_NE(parameter_block->index(), -1)
- << "Did you forget to call Program::SetParameterOffsetsAndIndex()?";
- min_parameter_block_position = std::min(parameter_block->index(),
- min_parameter_block_position);
+ if (!parameter_block->IsConstant()) {
+ CHECK_NE(parameter_block->index(), -1)
+ << "Did you forget to call Program::SetParameterOffsetsAndIndex()? "
+ << "This is a Ceres bug; please contact the developers!";
+ min_parameter_block_position = std::min(parameter_block->index(),
+ min_parameter_block_position);
+ }
}
return min_parameter_block_position;
}
diff --git a/extern/libmv/third_party/ceres/internal/ceres/solver_impl.h b/extern/libmv/third_party/ceres/internal/ceres/solver_impl.h
index 957ebcc65df..11b44de6f42 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/solver_impl.h
+++ b/extern/libmv/third_party/ceres/internal/ceres/solver_impl.h
@@ -31,6 +31,9 @@
#ifndef CERES_INTERNAL_SOLVER_IMPL_H_
#define CERES_INTERNAL_SOLVER_IMPL_H_
+#include <string>
+#include <vector>
+#include "ceres/internal/port.h"
#include "ceres/solver.h"
namespace ceres {
@@ -46,15 +49,18 @@ class SolverImpl {
// Mirrors the interface in solver.h, but exposes implementation
// details for testing internally.
static void Solve(const Solver::Options& options,
- Problem* problem,
+ ProblemImpl* problem_impl,
Solver::Summary* summary);
// Create the transformed Program, which has all the fixed blocks
// and residuals eliminated, and in the case of automatic schur
// ordering, has the E blocks first in the resulting program, with
// options.num_eliminate_blocks set appropriately.
+ // If fixed_cost is not NULL, the residual blocks that are removed
+ // are evaluated and the sum of their cost is returned in fixed_cost.
static Program* CreateReducedProgram(Solver::Options* options,
ProblemImpl* problem_impl,
+ double* fixed_cost,
string* error);
// Create the appropriate linear solver, taking into account any
@@ -92,16 +98,18 @@ class SolverImpl {
Program* program,
Evaluator* evaluator,
LinearSolver* linear_solver,
- double* initial_parameters,
- double* final_parameters,
+ double* parameters,
Solver::Summary* summary);
// Remove the fixed or unused parameter blocks and residuals
// depending only on fixed parameters from the problem. Also updates
// num_eliminate_blocks, since removed parameters changes the point
// at which the eliminated blocks is valid.
+ // If fixed_cost is not NULL, the residual blocks that are removed
+ // are evaluated and the sum of their cost is returned in fixed_cost.
static bool RemoveFixedBlocksFromProgram(Program* program,
int* num_eliminate_blocks,
+ double* fixed_cost,
string* error);
};
diff --git a/extern/libmv/third_party/ceres/internal/ceres/sparse_matrix.h b/extern/libmv/third_party/ceres/internal/ceres/sparse_matrix.h
index 562210dfec8..1b19f887946 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/sparse_matrix.h
+++ b/extern/libmv/third_party/ceres/internal/ceres/sparse_matrix.h
@@ -86,7 +86,7 @@ class SparseMatrix : public LinearOperator {
// sparse matrix.
virtual void ToDenseMatrix(Matrix* dense_matrix) const = 0;
-#ifndef CERES_DONT_HAVE_PROTOCOL_BUFFERS
+#ifndef CERES_NO_PROTOCOL_BUFFERS
// Dump the sparse matrix to a proto. Destroys the contents of proto.
virtual void ToProto(SparseMatrixProto* proto) const = 0;
#endif
diff --git a/extern/libmv/third_party/ceres/internal/ceres/sparse_normal_cholesky_solver.cc b/extern/libmv/third_party/ceres/internal/ceres/sparse_normal_cholesky_solver.cc
index 59222dc374d..9e00b4402dc 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/sparse_normal_cholesky_solver.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/sparse_normal_cholesky_solver.cc
@@ -28,13 +28,16 @@
//
// Author: sameeragarwal@google.com (Sameer Agarwal)
-#ifndef CERES_NO_SUITESPARSE
-
#include "ceres/sparse_normal_cholesky_solver.h"
#include <algorithm>
#include <cstring>
#include <ctime>
+
+#ifndef CERES_NO_CXSPARSE
+#include "cs.h"
+#endif
+
#include "ceres/compressed_row_sparse_matrix.h"
#include "ceres/linear_solver.h"
#include "ceres/suitesparse.h"
@@ -48,13 +51,30 @@ namespace internal {
SparseNormalCholeskySolver::SparseNormalCholeskySolver(
const LinearSolver::Options& options)
- : options_(options), symbolic_factor_(NULL) {}
+ : options_(options) {
+#ifndef CERES_NO_SUITESPARSE
+ factor_ = NULL;
+#endif
+
+#ifndef CERES_NO_CXSPARSE
+ cxsparse_factor_ = NULL;
+#endif // CERES_NO_CXSPARSE
+}
SparseNormalCholeskySolver::~SparseNormalCholeskySolver() {
- if (symbolic_factor_ != NULL) {
- ss_.Free(symbolic_factor_);
- symbolic_factor_ = NULL;
+#ifndef CERES_NO_SUITESPARSE
+ if (factor_ != NULL) {
+ ss_.Free(factor_);
+ factor_ = NULL;
}
+#endif
+
+#ifndef CERES_NO_CXSPARSE
+ if (cxsparse_factor_ != NULL) {
+ cxsparse_.Free(cxsparse_factor_);
+ cxsparse_factor_ = NULL;
+ }
+#endif // CERES_NO_CXSPARSE
}
LinearSolver::Summary SparseNormalCholeskySolver::SolveImpl(
@@ -62,6 +82,93 @@ LinearSolver::Summary SparseNormalCholeskySolver::SolveImpl(
const double* b,
const LinearSolver::PerSolveOptions& per_solve_options,
double * x) {
+ switch (options_.sparse_linear_algebra_library) {
+ case SUITE_SPARSE:
+ return SolveImplUsingSuiteSparse(A, b, per_solve_options, x);
+ case CX_SPARSE:
+ return SolveImplUsingCXSparse(A, b, per_solve_options, x);
+ default:
+ LOG(FATAL) << "Unknown sparse linear algebra library : "
+ << options_.sparse_linear_algebra_library;
+ }
+
+ LOG(FATAL) << "Unknown sparse linear algebra library : "
+ << options_.sparse_linear_algebra_library;
+ return LinearSolver::Summary();
+}
+
+#ifndef CERES_NO_CXSPARSE
+LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingCXSparse(
+ CompressedRowSparseMatrix* A,
+ const double* b,
+ const LinearSolver::PerSolveOptions& per_solve_options,
+ double * x) {
+ LinearSolver::Summary summary;
+ summary.num_iterations = 1;
+ const int num_cols = A->num_cols();
+ Vector Atb = Vector::Zero(num_cols);
+ A->LeftMultiply(b, Atb.data());
+
+ if (per_solve_options.D != NULL) {
+ // Temporarily append a diagonal block to the A matrix, but undo
+ // it before returning the matrix to the user.
+ CompressedRowSparseMatrix D(per_solve_options.D, num_cols);
+ A->AppendRows(D);
+ }
+
+ VectorRef(x, num_cols).setZero();
+
+ // Wrap the augmented Jacobian in a compressed sparse column matrix.
+ cs_di At = cxsparse_.CreateSparseMatrixTransposeView(A);
+
+ // Compute the normal equations. J'J delta = J'f and solve them
+ // using a sparse Cholesky factorization. Notice that when compared
+ // to SuiteSparse we have to explicitly compute the transpose of Jt,
+ // and then the normal equations before they can be
+ // factorized. CHOLMOD/SuiteSparse on the other hand can just work
+ // off of Jt to compute the Cholesky factorization of the normal
+ // equations.
+ cs_di* A2 = cs_transpose(&At, 1);
+ cs_di* AtA = cs_multiply(&At,A2);
+
+ cxsparse_.Free(A2);
+ if (per_solve_options.D != NULL) {
+ A->DeleteRows(num_cols);
+ }
+
+ // Compute symbolic factorization if not available.
+ if (cxsparse_factor_ == NULL) {
+ cxsparse_factor_ = CHECK_NOTNULL(cxsparse_.AnalyzeCholesky(AtA));
+ }
+
+ // Solve the linear system.
+ if (cxsparse_.SolveCholesky(AtA, cxsparse_factor_, Atb.data())) {
+ VectorRef(x, Atb.rows()) = Atb;
+ summary.termination_type = TOLERANCE;
+ }
+
+ cxsparse_.Free(AtA);
+ return summary;
+}
+#else
+LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingCXSparse(
+ CompressedRowSparseMatrix* A,
+ const double* b,
+ const LinearSolver::PerSolveOptions& per_solve_options,
+ double * x) {
+ LOG(FATAL) << "No CXSparse support in Ceres.";
+
+ // Unreachable but MSVC does not know this.
+ return LinearSolver::Summary();
+}
+#endif
+
+#ifndef CERES_NO_SUITESPARSE
+LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingSuiteSparse(
+ CompressedRowSparseMatrix* A,
+ const double* b,
+ const LinearSolver::PerSolveOptions& per_solve_options,
+ double * x) {
const time_t start_time = time(NULL);
const int num_cols = A->num_cols();
@@ -84,13 +191,25 @@ LinearSolver::Summary SparseNormalCholeskySolver::SolveImpl(
cholmod_dense* rhs = ss_.CreateDenseVector(Atb.data(), num_cols, num_cols);
const time_t init_time = time(NULL);
- if (symbolic_factor_ == NULL) {
- symbolic_factor_ = CHECK_NOTNULL(ss_.AnalyzeCholesky(lhs.get()));
+ if (factor_ == NULL) {
+ if (options_.use_block_amd) {
+ factor_ = ss_.BlockAnalyzeCholesky(lhs.get(),
+ A->col_blocks(),
+ A->row_blocks());
+ } else {
+ factor_ = ss_.AnalyzeCholesky(lhs.get());
+ }
+
+ if (VLOG_IS_ON(2)) {
+ cholmod_print_common("Symbolic Analysis", ss_.mutable_cc());
+ }
}
+ CHECK_NOTNULL(factor_);
+
const time_t symbolic_time = time(NULL);
- cholmod_dense* sol = ss_.SolveCholesky(lhs.get(), symbolic_factor_, rhs);
+ cholmod_dense* sol = ss_.SolveCholesky(lhs.get(), factor_, rhs);
const time_t solve_time = time(NULL);
ss_.Free(rhs);
@@ -100,11 +219,6 @@ LinearSolver::Summary SparseNormalCholeskySolver::SolveImpl(
A->DeleteRows(num_cols);
}
- if (!options_.constant_sparsity) {
- ss_.Free(symbolic_factor_);
- symbolic_factor_ = NULL;
- }
-
summary.num_iterations = 1;
if (sol != NULL) {
memcpy(x, sol->x, num_cols * sizeof(*x));
@@ -115,15 +229,25 @@ LinearSolver::Summary SparseNormalCholeskySolver::SolveImpl(
}
const time_t cleanup_time = time(NULL);
- VLOG(2) << "time (sec) total: " << cleanup_time - start_time
- << " init: " << init_time - start_time
- << " symbolic: " << symbolic_time - init_time
- << " solve: " << solve_time - symbolic_time
- << " cleanup: " << cleanup_time - solve_time;
+ VLOG(2) << "time (sec) total: " << (cleanup_time - start_time)
+ << " init: " << (init_time - start_time)
+ << " symbolic: " << (symbolic_time - init_time)
+ << " solve: " << (solve_time - symbolic_time)
+ << " cleanup: " << (cleanup_time - solve_time);
return summary;
}
+#else
+LinearSolver::Summary SparseNormalCholeskySolver::SolveImplUsingSuiteSparse(
+ CompressedRowSparseMatrix* A,
+ const double* b,
+ const LinearSolver::PerSolveOptions& per_solve_options,
+ double * x) {
+ LOG(FATAL) << "No SuiteSparse support in Ceres.";
+
+ // Unreachable but MSVC does not know this.
+ return LinearSolver::Summary();
+}
+#endif
} // namespace internal
} // namespace ceres
-
-#endif // CERES_NO_SUITESPARSE
diff --git a/extern/libmv/third_party/ceres/internal/ceres/sparse_normal_cholesky_solver.h b/extern/libmv/third_party/ceres/internal/ceres/sparse_normal_cholesky_solver.h
index ce1d6d285be..40d9e0a0327 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/sparse_normal_cholesky_solver.h
+++ b/extern/libmv/third_party/ceres/internal/ceres/sparse_normal_cholesky_solver.h
@@ -34,13 +34,10 @@
#ifndef CERES_INTERNAL_SPARSE_NORMAL_CHOLESKY_SOLVER_H_
#define CERES_INTERNAL_SPARSE_NORMAL_CHOLESKY_SOLVER_H_
-#ifndef CERES_NO_SUITESPARSE
-
-#include "cholmod.h"
-#include "cholmod_core.h"
+#include "ceres/cxsparse.h"
#include "ceres/linear_solver.h"
-#include "ceres/suitesparse.h"
#include "ceres/internal/macros.h"
+#include "ceres/suitesparse.h"
namespace ceres {
namespace internal {
@@ -61,17 +58,36 @@ class SparseNormalCholeskySolver : public CompressedRowSparseMatrixSolver {
const LinearSolver::PerSolveOptions& options,
double* x);
- const LinearSolver::Options options_;
+ LinearSolver::Summary SolveImplUsingSuiteSparse(
+ CompressedRowSparseMatrix* A,
+ const double* b,
+ const LinearSolver::PerSolveOptions& options,
+ double* x);
+
+ // Crashes if CSparse is not installed.
+ LinearSolver::Summary SolveImplUsingCXSparse(
+ CompressedRowSparseMatrix* A,
+ const double* b,
+ const LinearSolver::PerSolveOptions& options,
+ double* x);
+
+#ifndef CERES_NO_SUITESPARSE
SuiteSparse ss_;
+ // Cached factorization
+ cholmod_factor* factor_;
+#endif // CERES_NO_SUITESPARSE
+#ifndef CERES_NO_CXSPARSE
+ CXSparse cxsparse_;
// Cached factorization
- cholmod_factor* symbolic_factor_;
- DISALLOW_COPY_AND_ASSIGN(SparseNormalCholeskySolver);
+ cs_dis* cxsparse_factor_;
+#endif // CERES_NO_CXSPARSE
+
+ const LinearSolver::Options options_;
+ CERES_DISALLOW_COPY_AND_ASSIGN(SparseNormalCholeskySolver);
};
} // namespace internal
} // namespace ceres
-#endif // CERES_NO_SUITESPARSE
-
#endif // CERES_INTERNAL_SPARSE_NORMAL_CHOLESKY_SOLVER_H_
diff --git a/extern/libmv/third_party/ceres/internal/ceres/split.cc b/extern/libmv/third_party/ceres/internal/ceres/split.cc
index 4fa1bd468b9..c65c8a5bb5d 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/split.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/split.cc
@@ -31,6 +31,7 @@
#include <string>
#include <vector>
#include <iterator>
+#include "ceres/split.h"
#include "ceres/internal/port.h"
namespace ceres {
diff --git a/extern/libmv/third_party/ceres/internal/ceres/split.h b/extern/libmv/third_party/ceres/internal/ceres/split.h
new file mode 100644
index 00000000000..ec579e974da
--- /dev/null
+++ b/extern/libmv/third_party/ceres/internal/ceres/split.h
@@ -0,0 +1,21 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+// Author: keir@google.com (Keir Mierle)
+
+#ifndef CERES_INTERNAL_SPLIT_H_
+#define VISION_OPTIMIZATION_LEAST_SQUARES_INTERNAL_SPLIT_H_
+
+#include <string>
+#include <vector>
+#include "ceres/internal/port.h"
+
+namespace ceres {
+
+// Split a string using one or more character delimiters, presented as a
+// nul-terminated c string. Append the components to 'result'. If there are
+// consecutive delimiters, this function skips over all of them.
+void SplitStringUsing(const string& full, const char* delim,
+ vector<string>* res);
+
+} // namespace ceres
+
+#endif // CERES_INTERNAL_SPLIT_H_
diff --git a/extern/libmv/third_party/ceres/internal/ceres/stringprintf.cc b/extern/libmv/third_party/ceres/internal/ceres/stringprintf.cc
index c0f35225bc3..396a48b7d97 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/stringprintf.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/stringprintf.cc
@@ -34,6 +34,7 @@
#include <string>
#include <vector>
+#include "ceres/stringprintf.h"
#include "ceres/internal/port.h"
namespace ceres {
diff --git a/extern/libmv/third_party/ceres/internal/ceres/stringprintf.h b/extern/libmv/third_party/ceres/internal/ceres/stringprintf.h
index 30b974e7ae5..f2f907ab32d 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/stringprintf.h
+++ b/extern/libmv/third_party/ceres/internal/ceres/stringprintf.h
@@ -54,34 +54,34 @@ namespace internal {
// N.B.: As the GCC manual states, "[s]ince non-static C++ methods
// have an implicit 'this' argument, the arguments of such methods
// should be counted from two, not one."
-#define PRINTF_ATTRIBUTE(string_index, first_to_check) \
+#define CERES_PRINTF_ATTRIBUTE(string_index, first_to_check) \
__attribute__((__format__ (__printf__, string_index, first_to_check)))
-#define SCANF_ATTRIBUTE(string_index, first_to_check) \
+#define CERES_SCANF_ATTRIBUTE(string_index, first_to_check) \
__attribute__((__format__ (__scanf__, string_index, first_to_check)))
#else
-#define PRINTF_ATTRIBUTE(string_index, first_to_check)
+#define CERES_PRINTF_ATTRIBUTE(string_index, first_to_check)
#endif
// Return a C++ string.
extern string StringPrintf(const char* format, ...)
// Tell the compiler to do printf format string checking.
- PRINTF_ATTRIBUTE(1,2);
+ CERES_PRINTF_ATTRIBUTE(1,2);
// Store result into a supplied string and return it.
extern const string& SStringPrintf(string* dst, const char* format, ...)
// Tell the compiler to do printf format string checking.
- PRINTF_ATTRIBUTE(2,3);
+ CERES_PRINTF_ATTRIBUTE(2,3);
// Append result to a supplied string.
extern void StringAppendF(string* dst, const char* format, ...)
// Tell the compiler to do printf format string checking.
- PRINTF_ATTRIBUTE(2,3);
+ CERES_PRINTF_ATTRIBUTE(2,3);
// Lower-level routine that takes a va_list and appends to a specified string.
// All other routines are just convenience wrappers around it.
extern void StringAppendV(string* dst, const char* format, va_list ap);
-#undef PRINTF_ATTRIBUTE
+#undef CERES_PRINTF_ATTRIBUTE
} // namespace internal
} // namespace ceres
diff --git a/extern/libmv/third_party/ceres/internal/ceres/suitesparse.cc b/extern/libmv/third_party/ceres/internal/ceres/suitesparse.cc
index 1cf6a7496a7..cf3c48f84e6 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/suitesparse.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/suitesparse.cc
@@ -29,15 +29,14 @@
// Author: sameeragarwal@google.com (Sameer Agarwal)
#ifndef CERES_NO_SUITESPARSE
-
#include "ceres/suitesparse.h"
+#include <vector>
#include "cholmod.h"
#include "ceres/compressed_row_sparse_matrix.h"
#include "ceres/triplet_sparse_matrix.h"
namespace ceres {
namespace internal {
-
cholmod_sparse* SuiteSparse::CreateSparseMatrix(TripletSparseMatrix* A) {
cholmod_triplet triplet;
@@ -111,6 +110,13 @@ cholmod_dense* SuiteSparse::CreateDenseVector(const double* x,
}
cholmod_factor* SuiteSparse::AnalyzeCholesky(cholmod_sparse* A) {
+ // Cholmod can try multiple re-ordering strategies to find a fill
+ // reducing ordering. Here we just tell it use AMD with automatic
+ // matrix dependence choice of supernodal versus simplicial
+ // factorization.
+ cc_.nmethods = 1;
+ cc_.method[0].ordering = CHOLMOD_AMD;
+ cc_.supernodal = CHOLMOD_AUTO;
cholmod_factor* factor = cholmod_analyze(A, &cc_);
CHECK_EQ(cc_.status, CHOLMOD_OK)
<< "Cholmod symbolic analysis failed " << cc_.status;
@@ -118,6 +124,153 @@ cholmod_factor* SuiteSparse::AnalyzeCholesky(cholmod_sparse* A) {
return factor;
}
+cholmod_factor* SuiteSparse::BlockAnalyzeCholesky(
+ cholmod_sparse* A,
+ const vector<int>& row_blocks,
+ const vector<int>& col_blocks) {
+ vector<int> ordering;
+ if (!BlockAMDOrdering(A, row_blocks, col_blocks, &ordering)) {
+ return NULL;
+ }
+ return AnalyzeCholeskyWithUserOrdering(A, ordering);
+}
+
+cholmod_factor* SuiteSparse::AnalyzeCholeskyWithUserOrdering(cholmod_sparse* A,
+ const vector<int>& ordering) {
+ CHECK_EQ(ordering.size(), A->nrow);
+ cc_.nmethods = 1 ;
+ cc_.method[0].ordering = CHOLMOD_GIVEN;
+ cholmod_factor* factor =
+ cholmod_analyze_p(A, const_cast<int*>(&ordering[0]), NULL, 0, &cc_);
+ CHECK_EQ(cc_.status, CHOLMOD_OK)
+ << "Cholmod symbolic analysis failed " << cc_.status;
+ CHECK_NOTNULL(factor);
+ return factor;
+}
+
+bool SuiteSparse::BlockAMDOrdering(const cholmod_sparse* A,
+ const vector<int>& row_blocks,
+ const vector<int>& col_blocks,
+ vector<int>* ordering) {
+ const int num_row_blocks = row_blocks.size();
+ const int num_col_blocks = col_blocks.size();
+
+ // Arrays storing the compressed column structure of the matrix
+ // incoding the block sparsity of A.
+ vector<int> block_cols;
+ vector<int> block_rows;
+
+ ScalarMatrixToBlockMatrix(A,
+ row_blocks,
+ col_blocks,
+ &block_rows,
+ &block_cols);
+
+ cholmod_sparse_struct block_matrix;
+ block_matrix.nrow = num_row_blocks;
+ block_matrix.ncol = num_col_blocks;
+ block_matrix.nzmax = block_rows.size();
+ block_matrix.p = reinterpret_cast<void*>(&block_cols[0]);
+ block_matrix.i = reinterpret_cast<void*>(&block_rows[0]);
+ block_matrix.x = NULL;
+ block_matrix.stype = A->stype;
+ block_matrix.itype = CHOLMOD_INT;
+ block_matrix.xtype = CHOLMOD_PATTERN;
+ block_matrix.dtype = CHOLMOD_DOUBLE;
+ block_matrix.sorted = 1;
+ block_matrix.packed = 1;
+
+ vector<int> block_ordering(num_row_blocks);
+ if (!cholmod_amd(&block_matrix, NULL, 0, &block_ordering[0], &cc_)) {
+ return false;
+ }
+
+ BlockOrderingToScalarOrdering(row_blocks, block_ordering, ordering);
+ return true;
+}
+
+void SuiteSparse::ScalarMatrixToBlockMatrix(const cholmod_sparse* A,
+ const vector<int>& row_blocks,
+ const vector<int>& col_blocks,
+ vector<int>* block_rows,
+ vector<int>* block_cols) {
+ CHECK_NOTNULL(block_rows)->clear();
+ CHECK_NOTNULL(block_cols)->clear();
+ const int num_row_blocks = row_blocks.size();
+ const int num_col_blocks = col_blocks.size();
+
+ vector<int> row_block_starts(num_row_blocks);
+ for (int i = 0, cursor = 0; i < num_row_blocks; ++i) {
+ row_block_starts[i] = cursor;
+ cursor += row_blocks[i];
+ }
+
+ // The reinterpret_cast is needed here because CHOLMOD stores arrays
+ // as void*.
+ const int* scalar_cols = reinterpret_cast<const int*>(A->p);
+ const int* scalar_rows = reinterpret_cast<const int*>(A->i);
+
+ // This loop extracts the block sparsity of the scalar sparse matrix
+ // A. It does so by iterating over the columns, but only considering
+ // the columns corresponding to the first element of each column
+ // block. Within each column, the inner loop iterates over the rows,
+ // and detects the presence of a row block by checking for the
+ // presence of a non-zero entry corresponding to its first element.
+ block_cols->push_back(0);
+ int c = 0;
+ for (int col_block = 0; col_block < num_col_blocks; ++col_block) {
+ int column_size = 0;
+ for (int idx = scalar_cols[c]; idx < scalar_cols[c + 1]; ++idx) {
+ vector<int>::const_iterator it = lower_bound(row_block_starts.begin(),
+ row_block_starts.end(),
+ scalar_rows[idx]);
+ // Since we are using lower_bound, it will return the row id
+ // where the row block starts. For everything but the first row
+ // of the block, where these values will be the same, we can
+ // skip, as we only need the first row to detect the presence of
+ // the block.
+ //
+ // For rows all but the first row in the last row block,
+ // lower_bound will return row_block_starts.end(), but those can
+ // be skipped like the rows in other row blocks too.
+ if (it == row_block_starts.end() || *it != scalar_rows[idx]) {
+ continue;
+ }
+
+ block_rows->push_back(it - row_block_starts.begin());
+ ++column_size;
+ }
+ block_cols->push_back(block_cols->back() + column_size);
+ c += col_blocks[col_block];
+ }
+}
+
+void SuiteSparse::BlockOrderingToScalarOrdering(
+ const vector<int>& blocks,
+ const vector<int>& block_ordering,
+ vector<int>* scalar_ordering) {
+ CHECK_EQ(blocks.size(), block_ordering.size());
+ const int num_blocks = blocks.size();
+
+ // block_starts = [0, block1, block1 + block2 ..]
+ vector<int> block_starts(num_blocks);
+ for (int i = 0, cursor = 0; i < num_blocks ; ++i) {
+ block_starts[i] = cursor;
+ cursor += blocks[i];
+ }
+
+ scalar_ordering->resize(block_starts.back() + blocks.back());
+ int cursor = 0;
+ for (int i = 0; i < num_blocks; ++i) {
+ const int block_id = block_ordering[i];
+ const int block_size = blocks[block_id];
+ int block_position = block_starts[block_id];
+ for (int j = 0; j < block_size; ++j) {
+ (*scalar_ordering)[cursor++] = block_position++;
+ }
+ }
+}
+
bool SuiteSparse::Cholesky(cholmod_sparse* A, cholmod_factor* L) {
CHECK_NOTNULL(A);
CHECK_NOTNULL(L);
diff --git a/extern/libmv/third_party/ceres/internal/ceres/suitesparse.h b/extern/libmv/third_party/ceres/internal/ceres/suitesparse.h
index 091e67a69a9..eb691c0c0ed 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/suitesparse.h
+++ b/extern/libmv/third_party/ceres/internal/ceres/suitesparse.h
@@ -37,6 +37,7 @@
#include <cstring>
#include <string>
+#include <vector>
#include <glog/logging.h>
#include "cholmod.h"
@@ -105,12 +106,35 @@ class SuiteSparse {
cholmod_sdmult(A, 0, alpha_, beta_, x, y, &cc_);
}
- // Analyze the sparsity structure of the matrix A compute the
- // symbolic factorization of A. A is not modified, only the pattern
- // of non-zeros of A is used, the actual numerical values in A are
- // of no consequence. Caller owns the result.
+ // Find an ordering of A or AA' (if A is unsymmetric) that minimizes
+ // the fill-in in the Cholesky factorization of the corresponding
+ // matrix. This is done by using the AMD algorithm.
+ //
+ // Using this ordering, the symbolic Cholesky factorization of A (or
+ // AA') is computed and returned.
+ //
+ // A is not modified, only the pattern of non-zeros of A is used,
+ // the actual numerical values in A are of no consequence.
+ //
+ // Caller owns the result.
cholmod_factor* AnalyzeCholesky(cholmod_sparse* A);
+ cholmod_factor* BlockAnalyzeCholesky(cholmod_sparse* A,
+ const vector<int>& row_blocks,
+ const vector<int>& col_blocks);
+
+ // If A is symmetric, then compute the symbolic Cholesky
+ // factorization of A(ordering, ordering). If A is unsymmetric, then
+ // compute the symbolic factorization of
+ // A(ordering,:) A(ordering,:)'.
+ //
+ // A is not modified, only the pattern of non-zeros of A is used,
+ // the actual numerical values in A are of no consequence.
+ //
+ // Caller owns the result.
+ cholmod_factor* AnalyzeCholeskyWithUserOrdering(cholmod_sparse* A,
+ const vector<int>& ordering);
+
// Use the symbolic factorization in L, to find the numerical
// factorization for the matrix A or AA^T. Return true if
// successful, false otherwise. L contains the numeric factorization
@@ -129,6 +153,56 @@ class SuiteSparse {
cholmod_factor* L,
cholmod_dense* b);
+ // By virtue of the modeling layer in Ceres being block oriented,
+ // all the matrices used by Ceres are also block oriented. When
+ // doing sparse direct factorization of these matrices the
+ // fill-reducing ordering algorithms (in particular AMD) can either
+ // be run on the block or the scalar form of these matrices. The two
+ // SuiteSparse::AnalyzeCholesky methods allows the the client to
+ // compute the symbolic factorization of a matrix by either using
+ // AMD on the matrix or a user provided ordering of the rows.
+ //
+ // But since the underlying matrices are block oriented, it is worth
+ // running AMD on just the block structre of these matrices and then
+ // lifting these block orderings to a full scalar ordering. This
+ // preserves the block structure of the permuted matrix, and exposes
+ // more of the super-nodal structure of the matrix to the numerical
+ // factorization routines.
+ //
+ // Find the block oriented AMD ordering of a matrix A, whose row and
+ // column blocks are given by row_blocks, and col_blocks
+ // respectively. The matrix may or may not be symmetric. The entries
+ // of col_blocks do not need to sum to the number of columns in
+ // A. If this is the case, only the first sum(col_blocks) are used
+ // to compute the ordering.
+ bool BlockAMDOrdering(const cholmod_sparse* A,
+ const vector<int>& row_blocks,
+ const vector<int>& col_blocks,
+ vector<int>* ordering);
+
+ // Given a set of blocks and a permutation of these blocks, compute
+ // the corresponding "scalar" ordering, where the scalar ordering of
+ // size sum(blocks).
+ static void BlockOrderingToScalarOrdering(const vector<int>& blocks,
+ const vector<int>& block_ordering,
+ vector<int>* scalar_ordering);
+
+ // Extract the block sparsity pattern of the scalar sparse matrix
+ // A and return it in compressed column form. The compressed column
+ // form is stored in two vectors block_rows, and block_cols, which
+ // correspond to the row and column arrays in a compressed column sparse
+ // matrix.
+ //
+ // If c_ij is the block in the matrix A corresponding to row block i
+ // and column block j, then it is expected that A contains at least
+ // one non-zero entry corresponding to the top left entry of c_ij,
+ // as that entry is used to detect the presence of a non-zero c_ij.
+ static void ScalarMatrixToBlockMatrix(const cholmod_sparse* A,
+ const vector<int>& row_blocks,
+ const vector<int>& col_blocks,
+ vector<int>* block_rows,
+ vector<int>* block_cols);
+
void Free(cholmod_sparse* m) { cholmod_free_sparse(&m, &cc_); }
void Free(cholmod_dense* m) { cholmod_free_dense(&m, &cc_); }
void Free(cholmod_factor* m) { cholmod_free_factor(&m, &cc_); }
diff --git a/extern/libmv/third_party/ceres/internal/ceres/triplet_sparse_matrix.cc b/extern/libmv/third_party/ceres/internal/ceres/triplet_sparse_matrix.cc
index 247ab2e697b..ed8677ea18a 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/triplet_sparse_matrix.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/triplet_sparse_matrix.cc
@@ -32,12 +32,12 @@
#include <algorithm>
#include <cstddef>
-#include <glog/logging.h>
-#include "ceres/matrix_proto.h"
#include "ceres/internal/eigen.h"
#include "ceres/internal/port.h"
#include "ceres/internal/scoped_ptr.h"
+#include "ceres/matrix_proto.h"
#include "ceres/types.h"
+#include "glog/logging.h"
namespace ceres {
namespace internal {
@@ -82,7 +82,7 @@ TripletSparseMatrix::TripletSparseMatrix(const TripletSparseMatrix& orig)
CopyData(orig);
}
-#ifndef CERES_DONT_HAVE_PROTOCOL_BUFFERS
+#ifndef CERES_NO_PROTOCOL_BUFFERS
TripletSparseMatrix::TripletSparseMatrix(const SparseMatrixProto& outer_proto) {
CHECK(outer_proto.has_triplet_matrix());
@@ -130,7 +130,7 @@ bool TripletSparseMatrix::AllTripletsWithinBounds() const {
void TripletSparseMatrix::Reserve(int new_max_num_nonzeros) {
CHECK_LE(num_nonzeros_, new_max_num_nonzeros)
- << "Reallocation will cause data loss";
+ << "Reallocation will cause data loss";
// Nothing to do if we have enough space already.
if (new_max_num_nonzeros <= max_num_nonzeros_)
@@ -214,7 +214,7 @@ void TripletSparseMatrix::ToDenseMatrix(Matrix* dense_matrix) const {
}
}
-#ifndef CERES_DONT_HAVE_PROTOCOL_BUFFERS
+#ifndef CERES_NO_PROTOCOL_BUFFERS
void TripletSparseMatrix::ToProto(SparseMatrixProto *proto) const {
proto->Clear();
diff --git a/extern/libmv/third_party/ceres/internal/ceres/triplet_sparse_matrix.h b/extern/libmv/third_party/ceres/internal/ceres/triplet_sparse_matrix.h
index 300e74d0bbc..89a645bd879 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/triplet_sparse_matrix.h
+++ b/extern/libmv/third_party/ceres/internal/ceres/triplet_sparse_matrix.h
@@ -50,7 +50,7 @@ class TripletSparseMatrix : public SparseMatrix {
TripletSparseMatrix();
TripletSparseMatrix(int num_rows, int num_cols, int max_num_nonzeros);
explicit TripletSparseMatrix(const TripletSparseMatrix& orig);
-#ifndef CERES_DONT_HAVE_PROTOCOL_BUFFERS
+#ifndef CERES_NO_PROTOCOL_BUFFERS
explicit TripletSparseMatrix(const SparseMatrixProto& proto);
#endif
@@ -65,7 +65,7 @@ class TripletSparseMatrix : public SparseMatrix {
virtual void SquaredColumnNorm(double* x) const;
virtual void ScaleColumns(const double* scale);
virtual void ToDenseMatrix(Matrix* dense_matrix) const;
-#ifndef CERES_DONT_HAVE_PROTOCOL_BUFFERS
+#ifndef CERES_NO_PROTOCOL_BUFFERS
virtual void ToProto(SparseMatrixProto *proto) const;
#endif
virtual void ToTextFile(FILE* file) const;
diff --git a/extern/libmv/third_party/ceres/internal/ceres/trust_region_minimizer.cc b/extern/libmv/third_party/ceres/internal/ceres/trust_region_minimizer.cc
new file mode 100644
index 00000000000..76c4f8a7580
--- /dev/null
+++ b/extern/libmv/third_party/ceres/internal/ceres/trust_region_minimizer.cc
@@ -0,0 +1,550 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#include "ceres/trust_region_minimizer.h"
+
+#include <algorithm>
+#include <cstdlib>
+#include <cmath>
+#include <cstring>
+#include <limits>
+#include <string>
+#include <vector>
+
+#include "Eigen/Core"
+#include "ceres/array_utils.h"
+#include "ceres/evaluator.h"
+#include "ceres/internal/eigen.h"
+#include "ceres/internal/scoped_ptr.h"
+#include "ceres/linear_least_squares_problems.h"
+#include "ceres/sparse_matrix.h"
+#include "ceres/trust_region_strategy.h"
+#include "ceres/types.h"
+#include "glog/logging.h"
+
+namespace ceres {
+namespace internal {
+namespace {
+// Small constant for various floating point issues.
+const double kEpsilon = 1e-12;
+} // namespace
+
+// Execute the list of IterationCallbacks sequentially. If any one of
+// the callbacks does not return SOLVER_CONTINUE, then stop and return
+// its status.
+CallbackReturnType TrustRegionMinimizer::RunCallbacks(
+ const IterationSummary& iteration_summary) {
+ for (int i = 0; i < options_.callbacks.size(); ++i) {
+ const CallbackReturnType status =
+ (*options_.callbacks[i])(iteration_summary);
+ if (status != SOLVER_CONTINUE) {
+ return status;
+ }
+ }
+ return SOLVER_CONTINUE;
+}
+
+// Compute a scaling vector that is used to improve the conditioning
+// of the Jacobian.
+void TrustRegionMinimizer::EstimateScale(const SparseMatrix& jacobian,
+ double* scale) const {
+ jacobian.SquaredColumnNorm(scale);
+ for (int i = 0; i < jacobian.num_cols(); ++i) {
+ scale[i] = 1.0 / (kEpsilon + sqrt(scale[i]));
+ }
+}
+
+void TrustRegionMinimizer::Init(const Minimizer::Options& options) {
+ options_ = options;
+ sort(options_.lsqp_iterations_to_dump.begin(),
+ options_.lsqp_iterations_to_dump.end());
+}
+
+bool TrustRegionMinimizer::MaybeDumpLinearLeastSquaresProblem(
+ const int iteration,
+ const SparseMatrix* jacobian,
+ const double* residuals,
+ const double* step) const {
+ // TODO(sameeragarwal): Since the use of trust_region_radius has
+ // moved inside TrustRegionStrategy, its not clear how we dump the
+ // regularization vector/matrix anymore.
+ //
+ // Doing this right requires either an API change to the
+ // TrustRegionStrategy and/or how LinearLeastSquares problems are
+ // stored on disk.
+ //
+ // For now, we will just not dump the regularizer.
+ return (!binary_search(options_.lsqp_iterations_to_dump.begin(),
+ options_.lsqp_iterations_to_dump.end(),
+ iteration) ||
+ DumpLinearLeastSquaresProblem(options_.lsqp_dump_directory,
+ iteration,
+ options_.lsqp_dump_format_type,
+ jacobian,
+ NULL,
+ residuals,
+ step,
+ options_.num_eliminate_blocks));
+}
+
+void TrustRegionMinimizer::Minimize(const Minimizer::Options& options,
+ double* parameters,
+ Solver::Summary* summary) {
+ time_t start_time = time(NULL);
+ time_t iteration_start_time = start_time;
+ Init(options);
+
+ summary->termination_type = NO_CONVERGENCE;
+ summary->num_successful_steps = 0;
+ summary->num_unsuccessful_steps = 0;
+
+ Evaluator* evaluator = CHECK_NOTNULL(options_.evaluator);
+ SparseMatrix* jacobian = CHECK_NOTNULL(options_.jacobian);
+ TrustRegionStrategy* strategy = CHECK_NOTNULL(options_.trust_region_strategy);
+
+ const int num_parameters = evaluator->NumParameters();
+ const int num_effective_parameters = evaluator->NumEffectiveParameters();
+ const int num_residuals = evaluator->NumResiduals();
+
+ VectorRef x_min(parameters, num_parameters);
+ Vector x = x_min;
+ double x_norm = x.norm();
+
+ Vector residuals(num_residuals);
+ Vector trust_region_step(num_effective_parameters);
+ Vector delta(num_effective_parameters);
+ Vector x_plus_delta(num_parameters);
+ Vector gradient(num_effective_parameters);
+ Vector model_residuals(num_residuals);
+ Vector scale(num_effective_parameters);
+
+ IterationSummary iteration_summary;
+ iteration_summary.iteration = 0;
+ iteration_summary.step_is_valid = false;
+ iteration_summary.step_is_successful = false;
+ iteration_summary.cost = summary->initial_cost;
+ iteration_summary.cost_change = 0.0;
+ iteration_summary.gradient_max_norm = 0.0;
+ iteration_summary.step_norm = 0.0;
+ iteration_summary.relative_decrease = 0.0;
+ iteration_summary.trust_region_radius = strategy->Radius();
+ // TODO(sameeragarwal): Rename eta to linear_solver_accuracy or
+ // something similar across the board.
+ iteration_summary.eta = options_.eta;
+ iteration_summary.linear_solver_iterations = 0;
+ iteration_summary.step_solver_time_in_seconds = 0;
+
+ // Do initial cost and Jacobian evaluation.
+ double cost = 0.0;
+ if (!evaluator->Evaluate(x.data(), &cost, residuals.data(), NULL, jacobian)) {
+ LOG(WARNING) << "Terminating: Residual and Jacobian evaluation failed.";
+ summary->termination_type = NUMERICAL_FAILURE;
+ return;
+ }
+
+ int num_consecutive_nonmonotonic_steps = 0;
+ double minimum_cost = cost;
+ double reference_cost = cost;
+ double accumulated_reference_model_cost_change = 0.0;
+ double candidate_cost = cost;
+ double accumulated_candidate_model_cost_change = 0.0;
+
+ gradient.setZero();
+ jacobian->LeftMultiply(residuals.data(), gradient.data());
+ iteration_summary.gradient_max_norm = gradient.lpNorm<Eigen::Infinity>();
+
+ if (options_.jacobi_scaling) {
+ EstimateScale(*jacobian, scale.data());
+ jacobian->ScaleColumns(scale.data());
+ } else {
+ scale.setOnes();
+ }
+
+ // The initial gradient max_norm is bounded from below so that we do
+ // not divide by zero.
+ const double gradient_max_norm_0 =
+ max(iteration_summary.gradient_max_norm, kEpsilon);
+ const double absolute_gradient_tolerance =
+ options_.gradient_tolerance * gradient_max_norm_0;
+
+ if (iteration_summary.gradient_max_norm <= absolute_gradient_tolerance) {
+ summary->termination_type = GRADIENT_TOLERANCE;
+ VLOG(1) << "Terminating: Gradient tolerance reached."
+ << "Relative gradient max norm: "
+ << iteration_summary.gradient_max_norm / gradient_max_norm_0
+ << " <= " << options_.gradient_tolerance;
+ return;
+ }
+
+ iteration_summary.iteration_time_in_seconds =
+ time(NULL) - iteration_start_time;
+ iteration_summary.cumulative_time_in_seconds = time(NULL) - start_time +
+ summary->preprocessor_time_in_seconds;
+ summary->iterations.push_back(iteration_summary);
+
+ // Call the various callbacks.
+ switch (RunCallbacks(iteration_summary)) {
+ case SOLVER_TERMINATE_SUCCESSFULLY:
+ summary->termination_type = USER_SUCCESS;
+ VLOG(1) << "Terminating: User callback returned USER_SUCCESS.";
+ return;
+ case SOLVER_ABORT:
+ summary->termination_type = USER_ABORT;
+ VLOG(1) << "Terminating: User callback returned USER_ABORT.";
+ return;
+ case SOLVER_CONTINUE:
+ break;
+ default:
+ LOG(FATAL) << "Unknown type of user callback status";
+ }
+
+ int num_consecutive_invalid_steps = 0;
+ while (true) {
+ iteration_start_time = time(NULL);
+ if (iteration_summary.iteration >= options_.max_num_iterations) {
+ summary->termination_type = NO_CONVERGENCE;
+ VLOG(1) << "Terminating: Maximum number of iterations reached.";
+ break;
+ }
+
+ const double total_solver_time = iteration_start_time - start_time +
+ summary->preprocessor_time_in_seconds;
+ if (total_solver_time >= options_.max_solver_time_in_seconds) {
+ summary->termination_type = NO_CONVERGENCE;
+ VLOG(1) << "Terminating: Maximum solver time reached.";
+ break;
+ }
+
+ iteration_summary = IterationSummary();
+ iteration_summary = summary->iterations.back();
+ iteration_summary.iteration = summary->iterations.back().iteration + 1;
+ iteration_summary.step_is_valid = false;
+ iteration_summary.step_is_successful = false;
+
+ const time_t strategy_start_time = time(NULL);
+ TrustRegionStrategy::PerSolveOptions per_solve_options;
+ per_solve_options.eta = options_.eta;
+ TrustRegionStrategy::Summary strategy_summary =
+ strategy->ComputeStep(per_solve_options,
+ jacobian,
+ residuals.data(),
+ trust_region_step.data());
+
+ iteration_summary.step_solver_time_in_seconds =
+ time(NULL) - strategy_start_time;
+ iteration_summary.linear_solver_iterations =
+ strategy_summary.num_iterations;
+
+ if (!MaybeDumpLinearLeastSquaresProblem(iteration_summary.iteration,
+ jacobian,
+ residuals.data(),
+ trust_region_step.data())) {
+ LOG(FATAL) << "Tried writing linear least squares problem: "
+ << options.lsqp_dump_directory << "but failed.";
+ }
+
+ double new_model_cost = 0.0;
+ if (strategy_summary.termination_type != FAILURE) {
+ // new_model_cost = 1/2 |f + J * step|^2
+ model_residuals = residuals;
+ jacobian->RightMultiply(trust_region_step.data(), model_residuals.data());
+ new_model_cost = model_residuals.squaredNorm() / 2.0;
+
+ // In exact arithmetic, this would never be the case. But poorly
+ // conditioned matrices can give rise to situations where the
+ // new_model_cost can actually be larger than half the squared
+ // norm of the residual vector. We allow for small tolerance
+ // around cost and beyond that declare the step to be invalid.
+ if ((1.0 - new_model_cost / cost) < -kEpsilon) {
+ VLOG(1) << "Invalid step: current_cost: " << cost
+ << " new_model_cost " << new_model_cost
+ << " absolute difference " << (cost - new_model_cost)
+ << " relative difference " << (1.0 - new_model_cost/cost);
+ } else {
+ iteration_summary.step_is_valid = true;
+ }
+ }
+
+ if (!iteration_summary.step_is_valid) {
+ // Invalid steps can happen due to a number of reasons, and we
+ // allow a limited number of successive failures, and return with
+ // NUMERICAL_FAILURE if this limit is exceeded.
+ if (++num_consecutive_invalid_steps >=
+ options_.max_num_consecutive_invalid_steps) {
+ summary->termination_type = NUMERICAL_FAILURE;
+ LOG(WARNING) << "Terminating. Number of successive invalid steps more "
+ << "than "
+ << "Solver::Options::max_num_consecutive_invalid_steps: "
+ << options_.max_num_consecutive_invalid_steps;
+ return;
+ }
+
+ // We are going to try and reduce the trust region radius and
+ // solve again. To do this, we are going to treat this iteration
+ // as an unsuccessful iteration. Since the various callbacks are
+ // still executed, we are going to fill the iteration summary
+ // with data that assumes a step of length zero and no progress.
+ iteration_summary.cost = cost;
+ iteration_summary.cost_change = 0.0;
+ iteration_summary.gradient_max_norm =
+ summary->iterations.back().gradient_max_norm;
+ iteration_summary.step_norm = 0.0;
+ iteration_summary.relative_decrease = 0.0;
+ iteration_summary.eta = options_.eta;
+ } else {
+ // The step is numerically valid, so now we can judge its quality.
+ num_consecutive_invalid_steps = 0;
+
+ // We allow some slop around 0, and clamp the model_cost_change
+ // at kEpsilon * min(1.0, cost) from below.
+ //
+ // In exact arithmetic this should never be needed, as we are
+ // guaranteed to new_model_cost <= cost. However, due to various
+ // numerical issues, it is possible that new_model_cost is
+ // nearly equal to cost, and the difference is a small negative
+ // number. To make sure that the relative_decrease computation
+ // remains sane, as clamp the difference (cost - new_model_cost)
+ // from below at a small positive number.
+ //
+ // This number is the minimum of kEpsilon * (cost, 1.0), which
+ // ensures that it will never get too large in absolute value,
+ // while scaling down proportionally with the magnitude of the
+ // cost. This is important for problems where the minimum of the
+ // objective function is near zero.
+ const double model_cost_change =
+ max(kEpsilon * min(1.0, cost), cost - new_model_cost);
+
+ // Undo the Jacobian column scaling.
+ delta = (trust_region_step.array() * scale.array()).matrix();
+ iteration_summary.step_norm = delta.norm();
+
+ // Convergence based on parameter_tolerance.
+ const double step_size_tolerance = options_.parameter_tolerance *
+ (x_norm + options_.parameter_tolerance);
+ if (iteration_summary.step_norm <= step_size_tolerance) {
+ VLOG(1) << "Terminating. Parameter tolerance reached. "
+ << "relative step_norm: "
+ << iteration_summary.step_norm /
+ (x_norm + options_.parameter_tolerance)
+ << " <= " << options_.parameter_tolerance;
+ summary->termination_type = PARAMETER_TOLERANCE;
+ return;
+ }
+
+ if (!evaluator->Plus(x.data(), delta.data(), x_plus_delta.data())) {
+ summary->termination_type = NUMERICAL_FAILURE;
+ LOG(WARNING) << "Terminating. Failed to compute "
+ << "Plus(x, delta, x_plus_delta).";
+ return;
+ }
+
+ // Try this step.
+ double new_cost;
+ if (!evaluator->Evaluate(x_plus_delta.data(),
+ &new_cost,
+ NULL, NULL, NULL)) {
+ // If the evaluation of the new cost fails, treat it as a step
+ // with high cost.
+ LOG(WARNING) << "Step failed to evaluate. "
+ << "Treating it as step with infinite cost";
+ new_cost = numeric_limits<double>::max();
+ }
+
+ VLOG(2) << "old cost: " << cost << " new cost: " << new_cost;
+ iteration_summary.cost_change = cost - new_cost;
+ const double absolute_function_tolerance =
+ options_.function_tolerance * cost;
+ if (fabs(iteration_summary.cost_change) < absolute_function_tolerance) {
+ VLOG(1) << "Terminating. Function tolerance reached. "
+ << "|cost_change|/cost: "
+ << fabs(iteration_summary.cost_change) / cost
+ << " <= " << options_.function_tolerance;
+ summary->termination_type = FUNCTION_TOLERANCE;
+ return;
+ }
+
+ const double relative_decrease =
+ iteration_summary.cost_change / model_cost_change;
+
+ const double historical_relative_decrease =
+ (reference_cost - new_cost) /
+ (accumulated_reference_model_cost_change + model_cost_change);
+
+ // If monotonic steps are being used, then the relative_decrease
+ // is the usual ratio of the change in objective function value
+ // divided by the change in model cost.
+ //
+ // If non-monotonic steps are allowed, then we take the maximum
+ // of the relative_decrease and the
+ // historical_relative_decrease, which measures the increase
+ // from a reference iteration. The model cost change is
+ // estimated by accumulating the model cost changes since the
+ // reference iteration. The historical relative_decrease offers
+ // a boost to a step which is not too bad compared to the
+ // reference iteration, allowing for non-monotonic steps.
+ iteration_summary.relative_decrease =
+ options.use_nonmonotonic_steps
+ ? max(relative_decrease, historical_relative_decrease)
+ : relative_decrease;
+
+ iteration_summary.step_is_successful =
+ iteration_summary.relative_decrease > options_.min_relative_decrease;
+
+ if (iteration_summary.step_is_successful) {
+ accumulated_candidate_model_cost_change += model_cost_change;
+ accumulated_reference_model_cost_change += model_cost_change;
+ if (relative_decrease <= options_.min_relative_decrease) {
+ VLOG(2) << "Non-monotonic step! "
+ << " relative_decrease: " << relative_decrease
+ << " historical_relative_decrease: "
+ << historical_relative_decrease;
+ }
+ }
+ }
+
+ if (iteration_summary.step_is_successful) {
+ ++summary->num_successful_steps;
+ strategy->StepAccepted(iteration_summary.relative_decrease);
+ x = x_plus_delta;
+ x_norm = x.norm();
+
+ // Step looks good, evaluate the residuals and Jacobian at this
+ // point.
+ if (!evaluator->Evaluate(x.data(),
+ &cost,
+ residuals.data(),
+ NULL,
+ jacobian)) {
+ summary->termination_type = NUMERICAL_FAILURE;
+ LOG(WARNING) << "Terminating: Residual and Jacobian evaluation failed.";
+ return;
+ }
+
+ gradient.setZero();
+ jacobian->LeftMultiply(residuals.data(), gradient.data());
+ iteration_summary.gradient_max_norm = gradient.lpNorm<Eigen::Infinity>();
+
+ if (iteration_summary.gradient_max_norm <= absolute_gradient_tolerance) {
+ summary->termination_type = GRADIENT_TOLERANCE;
+ VLOG(1) << "Terminating: Gradient tolerance reached."
+ << "Relative gradient max norm: "
+ << iteration_summary.gradient_max_norm / gradient_max_norm_0
+ << " <= " << options_.gradient_tolerance;
+ return;
+ }
+
+ if (options_.jacobi_scaling) {
+ jacobian->ScaleColumns(scale.data());
+ }
+
+ // Update the best, reference and candidate iterates.
+ //
+ // Based on algorithm 10.1.2 (page 357) of "Trust Region
+ // Methods" by Conn Gould & Toint, or equations 33-40 of
+ // "Non-monotone trust-region algorithms for nonlinear
+ // optimization subject to convex constraints" by Phil Toint,
+ // Mathematical Programming, 77, 1997.
+ if (cost < minimum_cost) {
+ // A step that improves solution quality was found.
+ x_min = x;
+ minimum_cost = cost;
+ // Set the candidate iterate to the current point.
+ candidate_cost = cost;
+ num_consecutive_nonmonotonic_steps = 0;
+ accumulated_candidate_model_cost_change = 0.0;
+ } else {
+ ++num_consecutive_nonmonotonic_steps;
+ if (cost > candidate_cost) {
+ // The current iterate is has a higher cost than the
+ // candidate iterate. Set the candidate to this point.
+ VLOG(2) << "Updating the candidate iterate to the current point.";
+ candidate_cost = cost;
+ accumulated_candidate_model_cost_change = 0.0;
+ }
+
+ // At this point we have made too many non-monotonic steps and
+ // we are going to reset the value of the reference iterate so
+ // as to force the algorithm to descend.
+ //
+ // This is the case because the candidate iterate has a value
+ // greater than minimum_cost but smaller than the reference
+ // iterate.
+ if (num_consecutive_nonmonotonic_steps ==
+ options.max_consecutive_nonmonotonic_steps) {
+ VLOG(2) << "Resetting the reference point to the candidate point";
+ reference_cost = candidate_cost;
+ accumulated_reference_model_cost_change =
+ accumulated_candidate_model_cost_change;
+ }
+ }
+ } else {
+ ++summary->num_unsuccessful_steps;
+ if (iteration_summary.step_is_valid) {
+ strategy->StepRejected(iteration_summary.relative_decrease);
+ } else {
+ strategy->StepIsInvalid();
+ }
+ }
+
+ iteration_summary.cost = cost + summary->fixed_cost;
+ iteration_summary.trust_region_radius = strategy->Radius();
+ if (iteration_summary.trust_region_radius <
+ options_.min_trust_region_radius) {
+ summary->termination_type = PARAMETER_TOLERANCE;
+ VLOG(1) << "Termination. Minimum trust region radius reached.";
+ return;
+ }
+
+ iteration_summary.iteration_time_in_seconds =
+ time(NULL) - iteration_start_time;
+ iteration_summary.cumulative_time_in_seconds = time(NULL) - start_time +
+ summary->preprocessor_time_in_seconds;
+ summary->iterations.push_back(iteration_summary);
+
+ switch (RunCallbacks(iteration_summary)) {
+ case SOLVER_TERMINATE_SUCCESSFULLY:
+ summary->termination_type = USER_SUCCESS;
+ VLOG(1) << "Terminating: User callback returned USER_SUCCESS.";
+ return;
+ case SOLVER_ABORT:
+ summary->termination_type = USER_ABORT;
+ VLOG(1) << "Terminating: User callback returned USER_ABORT.";
+ return;
+ case SOLVER_CONTINUE:
+ break;
+ default:
+ LOG(FATAL) << "Unknown type of user callback status";
+ }
+ }
+}
+
+
+} // namespace internal
+} // namespace ceres
diff --git a/extern/libmv/third_party/ceres/internal/ceres/trust_region_minimizer.h b/extern/libmv/third_party/ceres/internal/ceres/trust_region_minimizer.h
new file mode 100644
index 00000000000..a4f5ba3674d
--- /dev/null
+++ b/extern/libmv/third_party/ceres/internal/ceres/trust_region_minimizer.h
@@ -0,0 +1,67 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_TRUST_REGION_MINIMIZER_H_
+#define CERES_INTERNAL_TRUST_REGION_MINIMIZER_H_
+
+#include "ceres/minimizer.h"
+#include "ceres/solver.h"
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+// Generic trust region minimization algorithm. The heavy lifting is
+// done by a TrustRegionStrategy object passed in as one of the
+// arguments to the Minimize method.
+//
+// For example usage, see SolverImpl::Minimize.
+class TrustRegionMinimizer : public Minimizer {
+ public:
+ ~TrustRegionMinimizer() {}
+ virtual void Minimize(const Minimizer::Options& options,
+ double* parameters,
+ Solver::Summary* summary);
+
+ private:
+ void Init(const Minimizer::Options& options);
+ void EstimateScale(const SparseMatrix& jacobian, double* scale) const;
+ CallbackReturnType RunCallbacks(const IterationSummary& iteration_summary);
+ bool MaybeDumpLinearLeastSquaresProblem( const int iteration,
+ const SparseMatrix* jacobian,
+ const double* residuals,
+ const double* step) const;
+
+ Minimizer::Options options_;
+};
+
+} // namespace internal
+} // namespace ceres
+#endif // CERES_INTERNAL_TRUST_REGION_MINIMIZER_H_
diff --git a/extern/libmv/third_party/ceres/internal/ceres/trust_region_strategy.cc b/extern/libmv/third_party/ceres/internal/ceres/trust_region_strategy.cc
new file mode 100644
index 00000000000..89bc19d084b
--- /dev/null
+++ b/extern/libmv/third_party/ceres/internal/ceres/trust_region_strategy.cc
@@ -0,0 +1,27 @@
+#include "ceres/trust_region_strategy.h"
+#include "ceres/dogleg_strategy.h"
+#include "ceres/levenberg_marquardt_strategy.h"
+
+namespace ceres {
+namespace internal {
+
+TrustRegionStrategy::~TrustRegionStrategy() {}
+
+TrustRegionStrategy* TrustRegionStrategy::Create(const Options& options) {
+ switch (options.trust_region_strategy_type) {
+ case LEVENBERG_MARQUARDT:
+ return new LevenbergMarquardtStrategy(options);
+ case DOGLEG:
+ return new DoglegStrategy(options);
+ default:
+ LOG(FATAL) << "Unknown trust region strategy: "
+ << options.trust_region_strategy_type;
+ }
+
+ LOG(FATAL) << "Unknown trust region strategy: "
+ << options.trust_region_strategy_type;
+ return NULL;
+}
+
+} // namespace internal
+} // namespace ceres
diff --git a/extern/libmv/third_party/ceres/internal/ceres/trust_region_strategy.h b/extern/libmv/third_party/ceres/internal/ceres/trust_region_strategy.h
new file mode 100644
index 00000000000..391da97d5eb
--- /dev/null
+++ b/extern/libmv/third_party/ceres/internal/ceres/trust_region_strategy.h
@@ -0,0 +1,148 @@
+// Ceres Solver - A fast non-linear least squares minimizer
+// Copyright 2012 Google Inc. All rights reserved.
+// http://code.google.com/p/ceres-solver/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of Google Inc. nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+// POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: sameeragarwal@google.com (Sameer Agarwal)
+
+#ifndef CERES_INTERNAL_TRUST_REGION_STRATEGY_H_
+#define CERES_INTERNAL_TRUST_REGION_STRATEGY_H_
+
+#include "ceres/types.h"
+
+namespace ceres {
+namespace internal {
+
+class LinearSolver;
+class SparseMatrix;
+
+// Interface for classes implementing various trust region strategies
+// for nonlinear least squares problems.
+//
+// The object is expected to maintain and update a trust region
+// radius, which it then uses to solve for the trust region step using
+// the jacobian matrix and residual vector.
+//
+// Here the term trust region radius is used loosely, as the strategy
+// is free to treat it as guidance and violate it as need be. e.g.,
+// the LevenbergMarquardtStrategy uses the inverse of the trust region
+// radius to scale the damping term, which controls the step size, but
+// does not set a hard limit on its size.
+class TrustRegionStrategy {
+public:
+ struct Options {
+ Options()
+ : trust_region_strategy_type(LEVENBERG_MARQUARDT),
+ initial_radius(1e4),
+ max_radius(1e32),
+ lm_min_diagonal(1e-6),
+ lm_max_diagonal(1e32),
+ dogleg_type(TRADITIONAL_DOGLEG) {
+ }
+
+ TrustRegionStrategyType trust_region_strategy_type;
+ // Linear solver used for actually solving the trust region step.
+ LinearSolver* linear_solver;
+ double initial_radius;
+ double max_radius;
+
+ // Minimum and maximum values of the diagonal damping matrix used
+ // by LevenbergMarquardtStrategy. The DoglegStrategy also uses
+ // these bounds to construct a regularizing diagonal to ensure
+ // that the Gauss-Newton step computation is of full rank.
+ double lm_min_diagonal;
+ double lm_max_diagonal;
+
+ // Further specify which dogleg method to use
+ DoglegType dogleg_type;
+ };
+
+ // Per solve options.
+ struct PerSolveOptions {
+ // Forcing sequence for inexact solves.
+ double eta;
+ };
+
+ struct Summary {
+ Summary()
+ : residual_norm(0.0),
+ num_iterations(-1),
+ termination_type(FAILURE) {
+ }
+
+ // If the trust region problem is,
+ //
+ // 1/2 x'Ax + b'x + c,
+ //
+ // then
+ //
+ // residual_norm = |Ax -b|
+ double residual_norm;
+
+ // Number of iterations used by the linear solver. If a linear
+ // solver was not called (e.g., DogLegStrategy after an
+ // unsuccessful step), then this would be zero.
+ int num_iterations;
+
+ // Status of the linear solver used to solve the Newton system.
+ LinearSolverTerminationType termination_type;
+ };
+
+ virtual ~TrustRegionStrategy();
+
+ // Use the current radius to solve for the trust region step.
+ virtual Summary ComputeStep(const PerSolveOptions& per_solve_options,
+ SparseMatrix* jacobian,
+ const double* residuals,
+ double* step) = 0;
+
+ // Inform the strategy that the current step has been accepted, and
+ // that the ratio of the decrease in the non-linear objective to the
+ // decrease in the trust region model is step_quality.
+ virtual void StepAccepted(double step_quality) = 0;
+
+ // Inform the strategy that the current step has been rejected, and
+ // that the ratio of the decrease in the non-linear objective to the
+ // decrease in the trust region model is step_quality.
+ virtual void StepRejected(double step_quality) = 0;
+
+ // Inform the strategy that the current step has been rejected
+ // because it was found to be numerically invalid.
+ // StepRejected/StepAccepted will not be called for this step, and
+ // the strategy is free to do what it wants with this information.
+ virtual void StepIsInvalid() = 0;
+
+ // Current trust region radius.
+ virtual double Radius() const = 0;
+
+ // Factory.
+ static TrustRegionStrategy* Create(const Options& options);
+};
+
+} // namespace internal
+} // namespace ceres
+
+#endif // CERES_INTERNAL_TRUST_REGION_STRATEGY_H_
diff --git a/extern/libmv/third_party/ceres/internal/ceres/types.cc b/extern/libmv/third_party/ceres/internal/ceres/types.cc
index 860f8a43f37..05e573ff6d5 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/types.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/types.cc
@@ -37,8 +37,9 @@ namespace ceres {
const char* LinearSolverTypeToString(LinearSolverType solver_type) {
switch (solver_type) {
- CASESTR(SPARSE_NORMAL_CHOLESKY);
+ CASESTR(DENSE_NORMAL_CHOLESKY);
CASESTR(DENSE_QR);
+ CASESTR(SPARSE_NORMAL_CHOLESKY);
CASESTR(DENSE_SCHUR);
CASESTR(SPARSE_SCHUR);
CASESTR(ITERATIVE_SCHUR);
@@ -61,6 +62,16 @@ const char* PreconditionerTypeToString(
}
}
+const char* SparseLinearAlgebraLibraryTypeToString(
+ SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type) {
+ switch (sparse_linear_algebra_library_type) {
+ CASESTR(SUITE_SPARSE);
+ CASESTR(CX_SPARSE);
+ default:
+ return "UNKNOWN";
+ }
+}
+
const char* OrderingTypeToString(OrderingType ordering_type) {
switch (ordering_type) {
CASESTR(NATURAL);
@@ -87,6 +98,28 @@ const char* SolverTerminationTypeToString(
}
}
+#if 0 /* UNUSED */
+static const char* SparseLinearAlgebraTypeToString(
+ SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type) {
+ switch (sparse_linear_algebra_library_type) {
+ CASESTR(CX_SPARSE);
+ CASESTR(SUITE_SPARSE);
+ default:
+ return "UNKNOWN";
+ }
+}
+#endif
+
+const char* TrustRegionStrategyTypeToString(
+ TrustRegionStrategyType trust_region_strategy_type) {
+ switch (trust_region_strategy_type) {
+ CASESTR(LEVENBERG_MARQUARDT);
+ CASESTR(DOGLEG);
+ default:
+ return "UNKNOWN";
+ }
+}
+
#undef CASESTR
bool IsSchurType(LinearSolverType type) {
diff --git a/extern/libmv/third_party/ceres/internal/ceres/visibility.cc b/extern/libmv/third_party/ceres/internal/ceres/visibility.cc
index fd41648a7af..564cc54493e 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/visibility.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/visibility.cc
@@ -34,11 +34,11 @@
#include <set>
#include <vector>
#include <utility>
-
-#include <glog/logging.h>
#include "ceres/block_structure.h"
#include "ceres/collections_port.h"
+#include "ceres/visibility.h"
#include "ceres/graph.h"
+#include "glog/logging.h"
namespace ceres {
namespace internal {
diff --git a/extern/libmv/third_party/ceres/internal/ceres/visibility_based_preconditioner.cc b/extern/libmv/third_party/ceres/internal/ceres/visibility_based_preconditioner.cc
index aca77528215..4caad03d7a1 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/visibility_based_preconditioner.cc
+++ b/extern/libmv/third_party/ceres/internal/ceres/visibility_based_preconditioner.cc
@@ -33,11 +33,9 @@
#include <algorithm>
#include <functional>
#include <iterator>
-#include <numeric>
#include <set>
#include <utility>
#include <vector>
-#include <glog/logging.h>
#include "Eigen/Dense"
#include "ceres/block_random_access_sparse_matrix.h"
#include "ceres/block_sparse_matrix.h"
@@ -46,10 +44,11 @@
#include "ceres/detect_structure.h"
#include "ceres/graph.h"
#include "ceres/graph_algorithms.h"
+#include "ceres/internal/scoped_ptr.h"
#include "ceres/linear_solver.h"
#include "ceres/schur_eliminator.h"
#include "ceres/visibility.h"
-#include "ceres/internal/scoped_ptr.h"
+#include "glog/logging.h"
namespace ceres {
namespace internal {
@@ -253,7 +252,6 @@ void VisibilityBasedPreconditioner::ComputeBlockPairsInPreconditioner(
}
int r = 0;
- set<pair<int, int> > skipped_pairs;
const int num_row_blocks = bs.rows.size();
const int num_eliminate_blocks = options_.num_eliminate_blocks;
@@ -304,8 +302,6 @@ void VisibilityBasedPreconditioner::ComputeBlockPairsInPreconditioner(
for (; block2 != f_blocks.end(); ++block2) {
if (IsBlockPairInPreconditioner(*block1, *block2)) {
block_pairs_.insert(make_pair(*block1, *block2));
- } else {
- skipped_pairs.insert(make_pair(*block1, *block2));
}
}
}
@@ -322,17 +318,13 @@ void VisibilityBasedPreconditioner::ComputeBlockPairsInPreconditioner(
if (block1 <= block2) {
if (IsBlockPairInPreconditioner(block1, block2)) {
block_pairs_.insert(make_pair(block1, block2));
- } else {
- skipped_pairs.insert(make_pair(block1, block2));
}
}
}
}
}
- VLOG(1) << "Block pair stats: "
- << block_pairs_.size() << " included "
- << skipped_pairs.size() << " excluded";
+ VLOG(1) << "Block pair stats: " << block_pairs_.size();
}
// Initialize the SchurEliminator.
@@ -341,7 +333,6 @@ void VisibilityBasedPreconditioner::InitEliminator(
LinearSolver::Options eliminator_options;
eliminator_options.num_eliminate_blocks = options_.num_eliminate_blocks;
eliminator_options.num_threads = options_.num_threads;
- eliminator_options.constant_sparsity = true;
DetectStructure(bs, options_.num_eliminate_blocks,
&eliminator_options.row_block_size,
@@ -352,9 +343,9 @@ void VisibilityBasedPreconditioner::InitEliminator(
eliminator_->Init(options_.num_eliminate_blocks, &bs);
}
-// Compute the values of the preconditioner matrix and factorize it.
-bool VisibilityBasedPreconditioner::Compute(const BlockSparseMatrixBase& A,
- const double* D) {
+// Update the values of the preconditioner matrix and factorize it.
+bool VisibilityBasedPreconditioner::Update(const BlockSparseMatrixBase& A,
+ const double* D) {
const time_t start_time = time(NULL);
const int num_rows = m_->num_rows();
CHECK_GT(num_rows, 0);
@@ -448,12 +439,21 @@ bool VisibilityBasedPreconditioner::Factorize() {
// matrix contains the values.
lhs->stype = 1;
- // Symbolic factorization is computed if we don't already have one
- // handy.
+ // Symbolic factorization is computed if we don't already have one handy.
if (factor_ == NULL) {
- factor_ = ss_.AnalyzeCholesky(lhs);
+ if (options_.use_block_amd) {
+ factor_ = ss_.BlockAnalyzeCholesky(lhs, block_size_, block_size_);
+ } else {
+ factor_ = ss_.AnalyzeCholesky(lhs);
+ }
+
+ if (VLOG_IS_ON(2)) {
+ cholmod_print_common("Symbolic Analysis", ss_.mutable_cc());
+ }
}
+ CHECK_NOTNULL(factor_);
+
bool status = ss_.Cholesky(lhs, factor_);
ss_.Free(lhs);
return status;
diff --git a/extern/libmv/third_party/ceres/internal/ceres/visibility_based_preconditioner.h b/extern/libmv/third_party/ceres/internal/ceres/visibility_based_preconditioner.h
index fa095ca1dd8..888c65eba3a 100644
--- a/extern/libmv/third_party/ceres/internal/ceres/visibility_based_preconditioner.h
+++ b/extern/libmv/third_party/ceres/internal/ceres/visibility_based_preconditioner.h
@@ -133,7 +133,7 @@ class SchurEliminatorBase;
// options.num_eliminate_blocks = num_points;
// VisibilityBasedPreconditioner preconditioner(
// *A.block_structure(), options);
-// preconditioner.Compute(A, NULL);
+// preconditioner.Update(A, NULL);
// preconditioner.RightMultiply(x, y);
//
@@ -160,7 +160,7 @@ class VisibilityBasedPreconditioner : public LinearOperator {
const LinearSolver::Options& options);
virtual ~VisibilityBasedPreconditioner();
- // Compute the numerical value of the preconditioner for the linear
+ // Update the numerical value of the preconditioner for the linear
// system:
//
// | A | x = |b|
@@ -171,12 +171,12 @@ class VisibilityBasedPreconditioner : public LinearOperator {
//
// D can be NULL, in which case its interpreted as a diagonal matrix
// of size zero.
- bool Compute(const BlockSparseMatrixBase& A,
- const double* D);
+ bool Update(const BlockSparseMatrixBase& A, const double* D);
+
// LinearOperator interface. Since the operator is symmetric,
// LeftMultiply and num_cols are just calls to RightMultiply and
- // num_rows respectively. Compute() must be called before
+ // num_rows respectively. Update() must be called before
// RightMultiply can be called.
virtual void RightMultiply(const double* x, double* y) const;
virtual void LeftMultiply(const double* x, double* y) const {
@@ -244,7 +244,7 @@ class VisibilityBasedPreconditioner : public LinearOperator {
// Temporary vector used by RightMultiply.
cholmod_dense* tmp_rhs_;
- DISALLOW_COPY_AND_ASSIGN(VisibilityBasedPreconditioner);
+ CERES_DISALLOW_COPY_AND_ASSIGN(VisibilityBasedPreconditioner);
};
#else // SuiteSparse
// If SuiteSparse is not compiled in, the preconditioner is not
@@ -261,7 +261,7 @@ class VisibilityBasedPreconditioner : public LinearOperator {
virtual void LeftMultiply(const double* x, double* y) const {}
virtual int num_rows() const { return -1; }
virtual int num_cols() const { return -1; }
- bool Compute(const BlockSparseMatrixBase& A, const double* D) {
+ bool Update(const BlockSparseMatrixBase& A, const double* D) {
return false;
}
};
diff --git a/extern/libmv/third_party/ceres/patches/collections_port.h.mingw.patch b/extern/libmv/third_party/ceres/patches/collections_port.h.mingw.patch
index bbb366e22bc..c01a17c7992 100644
--- a/extern/libmv/third_party/ceres/patches/collections_port.h.mingw.patch
+++ b/extern/libmv/third_party/ceres/patches/collections_port.h.mingw.patch
@@ -1,10 +1,10 @@
-Index: internal/ceres/collections_port.h
-===================================================================
---- internal/ceres/collections_port.h (revision 47730)
-+++ internal/ceres/collections_port.h (working copy)
-@@ -53,7 +53,7 @@
+diff --git a/internal/ceres/collections_port.h b/internal/ceres/collections_port.h
+index a356cc0..c2fce90 100644
+--- a/internal/ceres/collections_port.h
++++ b/internal/ceres/collections_port.h
+@@ -77,7 +77,7 @@ struct HashMap : std::tr1::unordered_map<K, V> {};
template<typename K>
- struct HashSet : tr1::unordered_set<K> {};
+ struct HashSet : std::tr1::unordered_set<K> {};
-#ifdef _WIN32
+#if defined(_WIN32) && !defined(__MINGW64__) && !defined(__MINGW32__)
diff --git a/extern/libmv/third_party/ceres/patches/msvc_glog_fix.patch b/extern/libmv/third_party/ceres/patches/msvc_glog_fix.patch
new file mode 100644
index 00000000000..f3200fb8e0a
--- /dev/null
+++ b/extern/libmv/third_party/ceres/patches/msvc_glog_fix.patch
@@ -0,0 +1,50 @@
+diff --git a/internal/ceres/block_random_access_dense_matrix.cc b/internal/ceres/block_random_access_dense_matrix.cc
+index aedfc74..0f95e89 100644
+--- a/internal/ceres/block_random_access_dense_matrix.cc
++++ b/internal/ceres/block_random_access_dense_matrix.cc
+@@ -28,12 +28,12 @@
+ //
+ // Author: sameeragarwal@google.com (Sameer Agarwal)
+
++#include "glog/logging.h"
+ #include "ceres/block_random_access_dense_matrix.h"
+
+ #include <vector>
+ #include "ceres/internal/eigen.h"
+ #include "ceres/internal/scoped_ptr.h"
+-#include "glog/logging.h"
+
+ namespace ceres {
+ namespace internal {
+diff --git a/internal/ceres/block_random_access_sparse_matrix.cc b/internal/ceres/block_random_access_sparse_matrix.cc
+index f789436..9ed62ce 100644
+--- a/internal/ceres/block_random_access_sparse_matrix.cc
++++ b/internal/ceres/block_random_access_sparse_matrix.cc
+@@ -28,6 +28,7 @@
+ //
+ // Author: sameeragarwal@google.com (Sameer Agarwal)
+
++#include "glog/logging.h"
+ #include "ceres/block_random_access_sparse_matrix.h"
+
+ #include <algorithm>
+@@ -39,7 +40,6 @@
+ #include "ceres/mutex.h"
+ #include "ceres/triplet_sparse_matrix.h"
+ #include "ceres/types.h"
+-#include "glog/logging.h"
+
+ namespace ceres {
+ namespace internal {
+diff --git a/internal/ceres/schur_complement_solver.cc b/internal/ceres/schur_complement_solver.cc
+index b9224d8..2cbe78d 100644
+--- a/internal/ceres/schur_complement_solver.cc
++++ b/internal/ceres/schur_complement_solver.cc
+@@ -38,6 +38,7 @@
+ #endif // CERES_NO_CXSPARSE
+
+ #include "Eigen/Dense"
++#include "glog/logging.h"
+ #include "ceres/block_random_access_dense_matrix.h"
+ #include "ceres/block_random_access_matrix.h"
+ #include "ceres/block_random_access_sparse_matrix.h"
diff --git a/extern/libmv/third_party/ceres/patches/msvc_isfinite.patch b/extern/libmv/third_party/ceres/patches/msvc_isfinite.patch
deleted file mode 100644
index c3129d8e02b..00000000000
--- a/extern/libmv/third_party/ceres/patches/msvc_isfinite.patch
+++ /dev/null
@@ -1,15 +0,0 @@
-diff --git a/internal/ceres/residual_block_utils.cc b/internal/ceres/residual_block_utils.cc
-index ed3499b..28e0313 100644
---- a/internal/ceres/residual_block_utils.cc
-+++ b/internal/ceres/residual_block_utils.cc
-@@ -40,6 +40,10 @@
- #include "ceres/internal/eigen.h"
- #include "ceres/internal/port.h"
-
-+#ifdef _MSC_VER
-+# define isfinite _finite
-+#endif
-+
- namespace ceres {
- namespace internal {
-
diff --git a/extern/libmv/third_party/ceres/patches/no_previous_declaration_fix.patch b/extern/libmv/third_party/ceres/patches/no_previous_declaration_fix.patch
new file mode 100644
index 00000000000..03f1c500d9a
--- /dev/null
+++ b/extern/libmv/third_party/ceres/patches/no_previous_declaration_fix.patch
@@ -0,0 +1,199 @@
+diff --git a/internal/ceres/file.cc b/internal/ceres/file.cc
+index 387f359..6fe7557 100644
+--- a/internal/ceres/file.cc
++++ b/internal/ceres/file.cc
+@@ -31,6 +31,7 @@
+ // Really simple file IO.
+
+ #include <cstdio>
++#include "file.h"
+ #include "glog/logging.h"
+
+ namespace ceres {
+diff --git a/internal/ceres/linear_least_squares_problems.cc b/internal/ceres/linear_least_squares_problems.cc
+index 3e3bcd0..a91e254 100644
+--- a/internal/ceres/linear_least_squares_problems.cc
++++ b/internal/ceres/linear_least_squares_problems.cc
+@@ -573,13 +573,13 @@ LinearLeastSquaresProblem* LinearLeastSquaresProblem3() {
+ return problem;
+ }
+
+-bool DumpLinearLeastSquaresProblemToConsole(const string& directory,
+- int iteration,
+- const SparseMatrix* A,
+- const double* D,
+- const double* b,
+- const double* x,
+- int num_eliminate_blocks) {
++static bool DumpLinearLeastSquaresProblemToConsole(const string& directory,
++ int iteration,
++ const SparseMatrix* A,
++ const double* D,
++ const double* b,
++ const double* x,
++ int num_eliminate_blocks) {
+ CHECK_NOTNULL(A);
+ Matrix AA;
+ A->ToDenseMatrix(&AA);
+@@ -601,13 +601,13 @@ bool DumpLinearLeastSquaresProblemToConsole(const string& directory,
+ };
+
+ #ifndef CERES_NO_PROTOCOL_BUFFERS
+-bool DumpLinearLeastSquaresProblemToProtocolBuffer(const string& directory,
+- int iteration,
+- const SparseMatrix* A,
+- const double* D,
+- const double* b,
+- const double* x,
+- int num_eliminate_blocks) {
++static bool DumpLinearLeastSquaresProblemToProtocolBuffer(const string& directory,
++ int iteration,
++ const SparseMatrix* A,
++ const double* D,
++ const double* b,
++ const double* x,
++ int num_eliminate_blocks) {
+ CHECK_NOTNULL(A);
+ LinearLeastSquaresProblemProto lsqp;
+ A->ToProto(lsqp.mutable_a());
+@@ -641,13 +641,13 @@ bool DumpLinearLeastSquaresProblemToProtocolBuffer(const string& directory,
+ return true;
+ }
+ #else
+-bool DumpLinearLeastSquaresProblemToProtocolBuffer(const string& directory,
+- int iteration,
+- const SparseMatrix* A,
+- const double* D,
+- const double* b,
+- const double* x,
+- int num_eliminate_blocks) {
++static bool DumpLinearLeastSquaresProblemToProtocolBuffer(const string& directory,
++ int iteration,
++ const SparseMatrix* A,
++ const double* D,
++ const double* b,
++ const double* x,
++ int num_eliminate_blocks) {
+ LOG(ERROR) << "Dumping least squares problems is only "
+ << "supported when Ceres is compiled with "
+ << "protocol buffer support.";
+@@ -655,9 +655,9 @@ bool DumpLinearLeastSquaresProblemToProtocolBuffer(const string& directory,
+ }
+ #endif
+
+-void WriteArrayToFileOrDie(const string& filename,
+- const double* x,
+- const int size) {
++static void WriteArrayToFileOrDie(const string& filename,
++ const double* x,
++ const int size) {
+ CHECK_NOTNULL(x);
+ VLOG(2) << "Writing array to: " << filename;
+ FILE* fptr = fopen(filename.c_str(), "w");
+@@ -668,13 +668,13 @@ void WriteArrayToFileOrDie(const string& filename,
+ fclose(fptr);
+ }
+
+-bool DumpLinearLeastSquaresProblemToTextFile(const string& directory,
+- int iteration,
+- const SparseMatrix* A,
+- const double* D,
+- const double* b,
+- const double* x,
+- int num_eliminate_blocks) {
++static bool DumpLinearLeastSquaresProblemToTextFile(const string& directory,
++ int iteration,
++ const SparseMatrix* A,
++ const double* D,
++ const double* b,
++ const double* x,
++ int num_eliminate_blocks) {
+ CHECK_NOTNULL(A);
+ string format_string = JoinPath(directory,
+ "lm_iteration_%03d");
+diff --git a/internal/ceres/residual_block_utils.cc b/internal/ceres/residual_block_utils.cc
+index ff18e21..9442bb2 100644
+--- a/internal/ceres/residual_block_utils.cc
++++ b/internal/ceres/residual_block_utils.cc
+@@ -63,7 +63,7 @@ void InvalidateEvaluation(const ResidualBlock& block,
+
+ // Utility routine to print an array of doubles to a string. If the
+ // array pointer is NULL, it is treated as an array of zeros.
+-void AppendArrayToString(const int size, const double* x, string* result) {
++static void AppendArrayToString(const int size, const double* x, string* result) {
+ for (int i = 0; i < size; ++i) {
+ if (x == NULL) {
+ StringAppendF(result, "Not Computed ");
+diff --git a/internal/ceres/solver_impl.cc b/internal/ceres/solver_impl.cc
+index 2802a75..8ef5b98 100644
+--- a/internal/ceres/solver_impl.cc
++++ b/internal/ceres/solver_impl.cc
+@@ -685,8 +685,8 @@ bool SolverImpl::ApplyUserOrdering(const ProblemImpl& problem_impl,
+ // Find the minimum index of any parameter block to the given residual.
+ // Parameter blocks that have indices greater than num_eliminate_blocks are
+ // considered to have an index equal to num_eliminate_blocks.
+-int MinParameterBlock(const ResidualBlock* residual_block,
+- int num_eliminate_blocks) {
++static int MinParameterBlock(const ResidualBlock* residual_block,
++ int num_eliminate_blocks) {
+ int min_parameter_block_position = num_eliminate_blocks;
+ for (int i = 0; i < residual_block->NumParameterBlocks(); ++i) {
+ ParameterBlock* parameter_block = residual_block->parameter_blocks()[i];
+diff --git a/internal/ceres/split.cc b/internal/ceres/split.cc
+index 4fa1bd4..c65c8a5 100644
+--- a/internal/ceres/split.cc
++++ b/internal/ceres/split.cc
+@@ -31,6 +31,7 @@
+ #include <string>
+ #include <vector>
+ #include <iterator>
++#include "ceres/split.h"
+ #include "ceres/internal/port.h"
+
+ namespace ceres {
+diff --git a/internal/ceres/stringprintf.cc b/internal/ceres/stringprintf.cc
+index c0f3522..396a48b 100644
+--- a/internal/ceres/stringprintf.cc
++++ b/internal/ceres/stringprintf.cc
+@@ -34,6 +34,7 @@
+ #include <string>
+ #include <vector>
+
++#include "ceres/stringprintf.h"
+ #include "ceres/internal/port.h"
+
+ namespace ceres {
+diff --git a/internal/ceres/types.cc b/internal/ceres/types.cc
+index 2e950c5..05e573f 100644
+--- a/internal/ceres/types.cc
++++ b/internal/ceres/types.cc
+@@ -98,7 +98,8 @@ const char* SolverTerminationTypeToString(
+ }
+ }
+
+-const char* SparseLinearAlgebraTypeToString(
++#if 0 /* UNUSED */
++static const char* SparseLinearAlgebraTypeToString(
+ SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type) {
+ switch (sparse_linear_algebra_library_type) {
+ CASESTR(CX_SPARSE);
+@@ -107,6 +108,7 @@ const char* SparseLinearAlgebraTypeToString(
+ return "UNKNOWN";
+ }
+ }
++#endif
+
+ const char* TrustRegionStrategyTypeToString(
+ TrustRegionStrategyType trust_region_strategy_type) {
+diff --git a/internal/ceres/visibility.cc b/internal/ceres/visibility.cc
+index 9d80654..564cc54 100644
+--- a/internal/ceres/visibility.cc
++++ b/internal/ceres/visibility.cc
+@@ -36,6 +36,7 @@
+ #include <utility>
+ #include "ceres/block_structure.h"
+ #include "ceres/collections_port.h"
++#include "ceres/visibility.h"
+ #include "ceres/graph.h"
+ #include "glog/logging.h"
+
diff --git a/extern/libmv/third_party/ceres/patches/series b/extern/libmv/third_party/ceres/patches/series
index dbe955ae61e..a6874318923 100644
--- a/extern/libmv/third_party/ceres/patches/series
+++ b/extern/libmv/third_party/ceres/patches/series
@@ -1 +1,3 @@
-msvc_isfinite.patch
+collections_port.h.mingw.patch
+msvc_glog_fix.patch
+no_previous_declaration_fix.patch \ No newline at end of file