1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
|
diff --git a/src/libmv/numeric/levenberg_marquardt.h b/src/libmv/numeric/levenberg_marquardt.h
index 6a54f66..4473b72 100644
--- a/src/libmv/numeric/levenberg_marquardt.h
+++ b/src/libmv/numeric/levenberg_marquardt.h
@@ -33,6 +33,7 @@
#include "libmv/numeric/numeric.h"
#include "libmv/numeric/function_derivative.h"
+#include "libmv/logging/logging.h"
namespace libmv {
@@ -123,26 +124,40 @@ class LevenbergMarquardt {
Parameters dx, x_new;
int i;
for (i = 0; results.status == RUNNING && i < params.max_iterations; ++i) {
- if (dx.norm() <= params.relative_step_threshold * x.norm()) {
+ VLOG(1) << "iteration: " << i;
+ VLOG(1) << "||f(x)||: " << f_(x).norm();
+ VLOG(1) << "max(g): " << g.array().abs().maxCoeff();
+ VLOG(1) << "u: " << u;
+ VLOG(1) << "v: " << v;
+
+ AMatrixType A_augmented = A + u*AMatrixType::Identity(J.cols(), J.cols());
+ Solver solver(A_augmented);
+ dx = solver.solve(g);
+ bool solved = (A_augmented * dx).isApprox(g);
+ if (!solved) {
+ LOG(ERROR) << "Failed to solve";
+ }
+ if (solved && dx.norm() <= params.relative_step_threshold * x.norm()) {
results.status = RELATIVE_STEP_SIZE_TOO_SMALL;
break;
- }
- x_new = x + dx;
- // Rho is the ratio of the actual reduction in error to the reduction
- // in error that would be obtained if the problem was linear.
- // See [1] for details.
- Scalar rho((error.squaredNorm() - f_(x_new).squaredNorm())
- / dx.dot(u*dx + g));
- if (rho > 0) {
- // Accept the Gauss-Newton step because the linear model fits well.
- x = x_new;
- results.status = Update(x, params, &J, &A, &error, &g);
- Scalar tmp = Scalar(2*rho-1);
- u = u*std::max(1/3., 1 - (tmp*tmp*tmp));
- v = 2;
- continue;
- }
-
+ }
+ if (solved) {
+ x_new = x + dx;
+ // Rho is the ratio of the actual reduction in error to the reduction
+ // in error that would be obtained if the problem was linear.
+ // See [1] for details.
+ Scalar rho((error.squaredNorm() - f_(x_new).squaredNorm())
+ / dx.dot(u*dx + g));
+ if (rho > 0) {
+ // Accept the Gauss-Newton step because the linear model fits well.
+ x = x_new;
+ results.status = Update(x, params, &J, &A, &error, &g);
+ Scalar tmp = Scalar(2*rho-1);
+ u = u*std::max(1/3., 1 - (tmp*tmp*tmp));
+ v = 2;
+ continue;
+ }
+ }
// Reject the update because either the normal equations failed to solve
// or the local linear model was not good (rho < 0). Instead, increase u
// to move closer to gradient descent.
|