Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'extern/libmv/patches/levenberg_marquardt.patch')
-rw-r--r--extern/libmv/patches/levenberg_marquardt.patch71
1 files changed, 71 insertions, 0 deletions
diff --git a/extern/libmv/patches/levenberg_marquardt.patch b/extern/libmv/patches/levenberg_marquardt.patch
new file mode 100644
index 00000000000..49ef82d73d2
--- /dev/null
+++ b/extern/libmv/patches/levenberg_marquardt.patch
@@ -0,0 +1,71 @@
+diff --git a/src/libmv/numeric/levenberg_marquardt.h b/src/libmv/numeric/levenberg_marquardt.h
+index 6a54f66..4473b72 100644
+--- a/src/libmv/numeric/levenberg_marquardt.h
++++ b/src/libmv/numeric/levenberg_marquardt.h
+@@ -33,6 +33,7 @@
+
+ #include "libmv/numeric/numeric.h"
+ #include "libmv/numeric/function_derivative.h"
++#include "libmv/logging/logging.h"
+
+ namespace libmv {
+
+@@ -123,26 +124,40 @@ class LevenbergMarquardt {
+ Parameters dx, x_new;
+ int i;
+ for (i = 0; results.status == RUNNING && i < params.max_iterations; ++i) {
+- if (dx.norm() <= params.relative_step_threshold * x.norm()) {
++ VLOG(1) << "iteration: " << i;
++ VLOG(1) << "||f(x)||: " << f_(x).norm();
++ VLOG(1) << "max(g): " << g.array().abs().maxCoeff();
++ VLOG(1) << "u: " << u;
++ VLOG(1) << "v: " << v;
++
++ AMatrixType A_augmented = A + u*AMatrixType::Identity(J.cols(), J.cols());
++ Solver solver(A_augmented);
++ dx = solver.solve(g);
++ bool solved = (A_augmented * dx).isApprox(g);
++ if (!solved) {
++ LOG(ERROR) << "Failed to solve";
++ }
++ if (solved && dx.norm() <= params.relative_step_threshold * x.norm()) {
+ results.status = RELATIVE_STEP_SIZE_TOO_SMALL;
+ break;
+- }
+- x_new = x + dx;
+- // Rho is the ratio of the actual reduction in error to the reduction
+- // in error that would be obtained if the problem was linear.
+- // See [1] for details.
+- Scalar rho((error.squaredNorm() - f_(x_new).squaredNorm())
+- / dx.dot(u*dx + g));
+- if (rho > 0) {
+- // Accept the Gauss-Newton step because the linear model fits well.
+- x = x_new;
+- results.status = Update(x, params, &J, &A, &error, &g);
+- Scalar tmp = Scalar(2*rho-1);
+- u = u*std::max(1/3., 1 - (tmp*tmp*tmp));
+- v = 2;
+- continue;
+- }
+-
++ }
++ if (solved) {
++ x_new = x + dx;
++ // Rho is the ratio of the actual reduction in error to the reduction
++ // in error that would be obtained if the problem was linear.
++ // See [1] for details.
++ Scalar rho((error.squaredNorm() - f_(x_new).squaredNorm())
++ / dx.dot(u*dx + g));
++ if (rho > 0) {
++ // Accept the Gauss-Newton step because the linear model fits well.
++ x = x_new;
++ results.status = Update(x, params, &J, &A, &error, &g);
++ Scalar tmp = Scalar(2*rho-1);
++ u = u*std::max(1/3., 1 - (tmp*tmp*tmp));
++ v = 2;
++ continue;
++ }
++ }
+ // Reject the update because either the normal equations failed to solve
+ // or the local linear model was not good (rho < 0). Instead, increase u
+ // to move closer to gradient descent.