Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--intern/libmv/.clang-format36
-rw-r--r--intern/libmv/intern/autotrack.cc35
-rw-r--r--intern/libmv/intern/autotrack.h10
-rw-r--r--intern/libmv/intern/camera_intrinsics.cc436
-rw-r--r--intern/libmv/intern/camera_intrinsics.h12
-rw-r--r--intern/libmv/intern/detector.cc16
-rw-r--r--intern/libmv/intern/detector.h2
-rw-r--r--intern/libmv/intern/frame_accessor.cc101
-rw-r--r--intern/libmv/intern/frame_accessor.h19
-rw-r--r--intern/libmv/intern/homography.cc6
-rw-r--r--intern/libmv/intern/image.cc64
-rw-r--r--intern/libmv/intern/image.h30
-rw-r--r--intern/libmv/intern/reconstruction.cc229
-rw-r--r--intern/libmv/intern/reconstruction.h46
-rw-r--r--intern/libmv/intern/stub.cc214
-rw-r--r--intern/libmv/intern/track_region.cc43
-rw-r--r--intern/libmv/intern/track_region.h8
-rw-r--r--intern/libmv/intern/tracks.cc6
-rw-r--r--intern/libmv/intern/tracksN.cc39
-rw-r--r--intern/libmv/intern/tracksN.h10
-rw-r--r--intern/libmv/intern/utildefines.h36
-rw-r--r--intern/libmv/libmv/autotrack/autotrack.cc68
-rw-r--r--intern/libmv/libmv/autotrack/autotrack.h58
-rw-r--r--intern/libmv/libmv/autotrack/frame_accessor.h9
-rw-r--r--intern/libmv/libmv/autotrack/marker.h39
-rw-r--r--intern/libmv/libmv/autotrack/model.h9
-rw-r--r--intern/libmv/libmv/autotrack/predict_tracks.cc60
-rw-r--r--intern/libmv/libmv/autotrack/predict_tracks_test.cc44
-rw-r--r--intern/libmv/libmv/autotrack/quad.h6
-rw-r--r--intern/libmv/libmv/autotrack/reconstruction.h12
-rw-r--r--intern/libmv/libmv/autotrack/region.h2
-rw-r--r--intern/libmv/libmv/autotrack/tracks.cc43
-rw-r--r--intern/libmv/libmv/autotrack/tracks.h12
-rw-r--r--intern/libmv/libmv/autotrack/tracks_test.cc2
-rw-r--r--intern/libmv/libmv/base/aligned_malloc.cc6
-rw-r--r--intern/libmv/libmv/base/aligned_malloc.h4
-rw-r--r--intern/libmv/libmv/base/id_generator.h1
-rw-r--r--intern/libmv/libmv/base/map.h2
-rw-r--r--intern/libmv/libmv/base/scoped_ptr.h38
-rw-r--r--intern/libmv/libmv/base/scoped_ptr_test.cc8
-rw-r--r--intern/libmv/libmv/base/vector_test.cc8
-rw-r--r--intern/libmv/libmv/base/vector_utils.h5
-rw-r--r--intern/libmv/libmv/image/array_nd.cc27
-rw-r--r--intern/libmv/libmv/image/array_nd.h188
-rw-r--r--intern/libmv/libmv/image/array_nd_test.cc10
-rw-r--r--intern/libmv/libmv/image/convolve.cc170
-rw-r--r--intern/libmv/libmv/image/convolve.h62
-rw-r--r--intern/libmv/libmv/image/convolve_test.cc12
-rw-r--r--intern/libmv/libmv/image/correlation.h11
-rw-r--r--intern/libmv/libmv/image/image.h93
-rw-r--r--intern/libmv/libmv/image/image_converter.h21
-rw-r--r--intern/libmv/libmv/image/image_drawing.h112
-rw-r--r--intern/libmv/libmv/image/image_test.cc6
-rw-r--r--intern/libmv/libmv/image/sample.h40
-rw-r--r--intern/libmv/libmv/image/sample_test.cc14
-rw-r--r--intern/libmv/libmv/image/tuple.h32
-rw-r--r--intern/libmv/libmv/multiview/conditioning.cc28
-rw-r--r--intern/libmv/libmv/multiview/conditioning.h24
-rw-r--r--intern/libmv/libmv/multiview/euclidean_resection.cc198
-rw-r--r--intern/libmv/libmv/multiview/euclidean_resection.h47
-rw-r--r--intern/libmv/libmv/multiview/euclidean_resection_test.cc74
-rw-r--r--intern/libmv/libmv/multiview/fundamental.cc227
-rw-r--r--intern/libmv/libmv/multiview/fundamental.h115
-rw-r--r--intern/libmv/libmv/multiview/fundamental_test.cc8
-rw-r--r--intern/libmv/libmv/multiview/homography.cc249
-rw-r--r--intern/libmv/libmv/multiview/homography.h34
-rw-r--r--intern/libmv/libmv/multiview/homography_error.h120
-rw-r--r--intern/libmv/libmv/multiview/homography_parameterization.h36
-rw-r--r--intern/libmv/libmv/multiview/homography_test.cc40
-rw-r--r--intern/libmv/libmv/multiview/nviewtriangulation.h32
-rw-r--r--intern/libmv/libmv/multiview/nviewtriangulation_test.cc4
-rw-r--r--intern/libmv/libmv/multiview/panography.cc47
-rw-r--r--intern/libmv/libmv/multiview/panography.h12
-rw-r--r--intern/libmv/libmv/multiview/panography_kernel.cc4
-rw-r--r--intern/libmv/libmv/multiview/panography_kernel.h18
-rw-r--r--intern/libmv/libmv/multiview/panography_test.cc26
-rw-r--r--intern/libmv/libmv/multiview/projection.cc58
-rw-r--r--intern/libmv/libmv/multiview/projection.h124
-rw-r--r--intern/libmv/libmv/multiview/projection_test.cc18
-rw-r--r--intern/libmv/libmv/multiview/resection.h18
-rw-r--r--intern/libmv/libmv/multiview/resection_test.cc8
-rw-r--r--intern/libmv/libmv/multiview/test_data_sets.cc33
-rw-r--r--intern/libmv/libmv/multiview/test_data_sets.h42
-rw-r--r--intern/libmv/libmv/multiview/triangulation.cc18
-rw-r--r--intern/libmv/libmv/multiview/triangulation.h16
-rw-r--r--intern/libmv/libmv/multiview/triangulation_test.cc2
-rw-r--r--intern/libmv/libmv/multiview/two_view_kernel.h31
-rw-r--r--intern/libmv/libmv/numeric/dogleg.h119
-rw-r--r--intern/libmv/libmv/numeric/dogleg_test.cc31
-rw-r--r--intern/libmv/libmv/numeric/function_derivative.h17
-rw-r--r--intern/libmv/libmv/numeric/function_derivative_test.cc18
-rw-r--r--intern/libmv/libmv/numeric/levenberg_marquardt.h70
-rw-r--r--intern/libmv/libmv/numeric/levenberg_marquardt_test.cc10
-rw-r--r--intern/libmv/libmv/numeric/numeric.cc36
-rw-r--r--intern/libmv/libmv/numeric/numeric.h393
-rw-r--r--intern/libmv/libmv/numeric/numeric_test.cc92
-rw-r--r--intern/libmv/libmv/numeric/poly.h29
-rw-r--r--intern/libmv/libmv/numeric/poly_test.cc26
-rw-r--r--intern/libmv/libmv/simple_pipeline/bundle.cc458
-rw-r--r--intern/libmv/libmv/simple_pipeline/bundle.h35
-rw-r--r--intern/libmv/libmv/simple_pipeline/callbacks.h2
-rw-r--r--intern/libmv/libmv/simple_pipeline/camera_intrinsics.cc279
-rw-r--r--intern/libmv/libmv/simple_pipeline/camera_intrinsics.h104
-rw-r--r--intern/libmv/libmv/simple_pipeline/camera_intrinsics_impl.h143
-rw-r--r--intern/libmv/libmv/simple_pipeline/camera_intrinsics_test.cc129
-rw-r--r--intern/libmv/libmv/simple_pipeline/detect.cc172
-rw-r--r--intern/libmv/libmv/simple_pipeline/detect.h17
-rw-r--r--intern/libmv/libmv/simple_pipeline/detect_test.cc19
-rw-r--r--intern/libmv/libmv/simple_pipeline/distortion_models.cc193
-rw-r--r--intern/libmv/libmv/simple_pipeline/distortion_models.h129
-rw-r--r--intern/libmv/libmv/simple_pipeline/initialize_reconstruction.cc42
-rw-r--r--intern/libmv/libmv/simple_pipeline/initialize_reconstruction.h29
-rw-r--r--intern/libmv/libmv/simple_pipeline/intersect.cc82
-rw-r--r--intern/libmv/libmv/simple_pipeline/intersect.h16
-rw-r--r--intern/libmv/libmv/simple_pipeline/intersect_test.cc18
-rw-r--r--intern/libmv/libmv/simple_pipeline/keyframe_selection.cc107
-rw-r--r--intern/libmv/libmv/simple_pipeline/keyframe_selection.h9
-rw-r--r--intern/libmv/libmv/simple_pipeline/keyframe_selection_test.cc478
-rw-r--r--intern/libmv/libmv/simple_pipeline/modal_solver.cc59
-rw-r--r--intern/libmv/libmv/simple_pipeline/modal_solver.h10
-rw-r--r--intern/libmv/libmv/simple_pipeline/modal_solver_test.cc31
-rw-r--r--intern/libmv/libmv/simple_pipeline/packed_intrinsics.h18
-rw-r--r--intern/libmv/libmv/simple_pipeline/pipeline.cc192
-rw-r--r--intern/libmv/libmv/simple_pipeline/pipeline.h31
-rw-r--r--intern/libmv/libmv/simple_pipeline/reconstruction.cc67
-rw-r--r--intern/libmv/libmv/simple_pipeline/reconstruction.h41
-rw-r--r--intern/libmv/libmv/simple_pipeline/reconstruction_scale.cc6
-rw-r--r--intern/libmv/libmv/simple_pipeline/reconstruction_scale.h2
-rw-r--r--intern/libmv/libmv/simple_pipeline/resect.cc80
-rw-r--r--intern/libmv/libmv/simple_pipeline/resect.h11
-rw-r--r--intern/libmv/libmv/simple_pipeline/resect_test.cc4
-rw-r--r--intern/libmv/libmv/simple_pipeline/tracks.cc30
-rw-r--r--intern/libmv/libmv/simple_pipeline/tracks.h16
-rw-r--r--intern/libmv/libmv/threading/threading.h2
-rw-r--r--intern/libmv/libmv/tracking/brute_region_tracker.cc195
-rw-r--r--intern/libmv/libmv/tracking/brute_region_tracker.h14
-rw-r--r--intern/libmv/libmv/tracking/hybrid_region_tracker.cc12
-rw-r--r--intern/libmv/libmv/tracking/hybrid_region_tracker.h19
-rw-r--r--intern/libmv/libmv/tracking/kalman_filter.h68
-rw-r--r--intern/libmv/libmv/tracking/klt_region_tracker.cc63
-rw-r--r--intern/libmv/libmv/tracking/klt_region_tracker.h10
-rw-r--r--intern/libmv/libmv/tracking/pyramid_region_tracker.cc19
-rw-r--r--intern/libmv/libmv/tracking/pyramid_region_tracker.h15
-rw-r--r--intern/libmv/libmv/tracking/pyramid_region_tracker_test.cc10
-rw-r--r--intern/libmv/libmv/tracking/region_tracker.h10
-rw-r--r--intern/libmv/libmv/tracking/retrack_region_tracker.cc10
-rw-r--r--intern/libmv/libmv/tracking/retrack_region_tracker.h15
-rw-r--r--intern/libmv/libmv/tracking/track_region.cc620
-rw-r--r--intern/libmv/libmv/tracking/track_region.h34
-rw-r--r--intern/libmv/libmv/tracking/trklt_region_tracker.cc63
-rw-r--r--intern/libmv/libmv/tracking/trklt_region_tracker.h10
151 files changed, 4928 insertions, 4504 deletions
diff --git a/intern/libmv/.clang-format b/intern/libmv/.clang-format
index 9d159247d51..fae0530c572 100644
--- a/intern/libmv/.clang-format
+++ b/intern/libmv/.clang-format
@@ -1,2 +1,34 @@
-DisableFormat: true
-SortIncludes: false
+BasedOnStyle: Google
+
+ColumnLimit: 80
+
+Standard: Cpp11
+
+# Indent nested preprocessor.
+# #ifdef Foo
+# # include <nested>
+# #endif
+IndentPPDirectives: AfterHash
+
+# For the cases when namespace is closing with a wrong comment
+FixNamespaceComments: true
+
+AllowShortFunctionsOnASingleLine: InlineOnly
+AllowShortBlocksOnASingleLine: false
+AllowShortIfStatementsOnASingleLine: false
+AllowShortLoopsOnASingleLine: false
+AllowShortCaseLabelsOnASingleLine: true
+
+# No bin packing, every argument is on its own line.
+BinPackArguments: false
+BinPackParameters: false
+
+# Ensure pointer alignment.
+# ObjectType* object;
+PointerAlignment: Left
+DerivePointerAlignment: false
+
+AlignEscapedNewlines: Right
+
+IncludeBlocks: Preserve
+SortIncludes: true
diff --git a/intern/libmv/intern/autotrack.cc b/intern/libmv/intern/autotrack.cc
index 7000a422de8..b7110957b15 100644
--- a/intern/libmv/intern/autotrack.cc
+++ b/intern/libmv/intern/autotrack.cc
@@ -22,15 +22,15 @@
#include "intern/utildefines.h"
#include "libmv/autotrack/autotrack.h"
+using libmv::TrackRegionOptions;
+using libmv::TrackRegionResult;
using mv::AutoTrack;
using mv::FrameAccessor;
using mv::Marker;
-using libmv::TrackRegionOptions;
-using libmv::TrackRegionResult;
-libmv_AutoTrack* libmv_autoTrackNew(libmv_FrameAccessor *frame_accessor) {
- return (libmv_AutoTrack*) LIBMV_OBJECT_NEW(AutoTrack,
- (FrameAccessor*) frame_accessor);
+libmv_AutoTrack* libmv_autoTrackNew(libmv_FrameAccessor* frame_accessor) {
+ return (libmv_AutoTrack*)LIBMV_OBJECT_NEW(AutoTrack,
+ (FrameAccessor*)frame_accessor);
}
void libmv_autoTrackDestroy(libmv_AutoTrack* libmv_autotrack) {
@@ -39,7 +39,7 @@ void libmv_autoTrackDestroy(libmv_AutoTrack* libmv_autotrack) {
void libmv_autoTrackSetOptions(libmv_AutoTrack* libmv_autotrack,
const libmv_AutoTrackOptions* options) {
- AutoTrack *autotrack = ((AutoTrack*) libmv_autotrack);
+ AutoTrack* autotrack = ((AutoTrack*)libmv_autotrack);
libmv_configureTrackRegionOptions(options->track_region,
&autotrack->options.track_region);
@@ -51,18 +51,15 @@ void libmv_autoTrackSetOptions(libmv_AutoTrack* libmv_autotrack,
int libmv_autoTrackMarker(libmv_AutoTrack* libmv_autotrack,
const libmv_TrackRegionOptions* libmv_options,
- libmv_Marker *libmv_tracked_marker,
+ libmv_Marker* libmv_tracked_marker,
libmv_TrackRegionResult* libmv_result) {
-
Marker tracked_marker;
TrackRegionOptions options;
TrackRegionResult result;
libmv_apiMarkerToMarker(*libmv_tracked_marker, &tracked_marker);
- libmv_configureTrackRegionOptions(*libmv_options,
- &options);
- bool ok = (((AutoTrack*) libmv_autotrack)->TrackMarker(&tracked_marker,
- &result,
- &options));
+ libmv_configureTrackRegionOptions(*libmv_options, &options);
+ bool ok = (((AutoTrack*)libmv_autotrack)
+ ->TrackMarker(&tracked_marker, &result, &options));
libmv_markerToApiMarker(tracked_marker, libmv_tracked_marker);
libmv_regionTrackergetResult(result, libmv_result);
return ok && result.is_usable();
@@ -72,7 +69,7 @@ void libmv_autoTrackAddMarker(libmv_AutoTrack* libmv_autotrack,
const libmv_Marker* libmv_marker) {
Marker marker;
libmv_apiMarkerToMarker(*libmv_marker, &marker);
- ((AutoTrack*) libmv_autotrack)->AddMarker(marker);
+ ((AutoTrack*)libmv_autotrack)->AddMarker(marker);
}
void libmv_autoTrackSetMarkers(libmv_AutoTrack* libmv_autotrack,
@@ -87,19 +84,17 @@ void libmv_autoTrackSetMarkers(libmv_AutoTrack* libmv_autotrack,
for (size_t i = 0; i < num_markers; ++i) {
libmv_apiMarkerToMarker(libmv_marker[i], &markers[i]);
}
- ((AutoTrack*) libmv_autotrack)->SetMarkers(&markers);
+ ((AutoTrack*)libmv_autotrack)->SetMarkers(&markers);
}
int libmv_autoTrackGetMarker(libmv_AutoTrack* libmv_autotrack,
int clip,
int frame,
int track,
- libmv_Marker *libmv_marker) {
+ libmv_Marker* libmv_marker) {
Marker marker;
- int ok = ((AutoTrack*) libmv_autotrack)->GetMarker(clip,
- frame,
- track,
- &marker);
+ int ok =
+ ((AutoTrack*)libmv_autotrack)->GetMarker(clip, frame, track, &marker);
if (ok) {
libmv_markerToApiMarker(marker, libmv_marker);
}
diff --git a/intern/libmv/intern/autotrack.h b/intern/libmv/intern/autotrack.h
index 6b49a6908e1..3887983814b 100644
--- a/intern/libmv/intern/autotrack.h
+++ b/intern/libmv/intern/autotrack.h
@@ -21,9 +21,9 @@
#define LIBMV_C_API_AUTOTRACK_H_
#include "intern/frame_accessor.h"
-#include "intern/tracksN.h"
-#include "intern/track_region.h"
#include "intern/region.h"
+#include "intern/track_region.h"
+#include "intern/tracksN.h"
#ifdef __cplusplus
extern "C" {
@@ -36,7 +36,7 @@ typedef struct libmv_AutoTrackOptions {
libmv_Region search_region;
} libmv_AutoTrackOptions;
-libmv_AutoTrack* libmv_autoTrackNew(libmv_FrameAccessor *frame_accessor);
+libmv_AutoTrack* libmv_autoTrackNew(libmv_FrameAccessor* frame_accessor);
void libmv_autoTrackDestroy(libmv_AutoTrack* libmv_autotrack);
@@ -45,7 +45,7 @@ void libmv_autoTrackSetOptions(libmv_AutoTrack* libmv_autotrack,
int libmv_autoTrackMarker(libmv_AutoTrack* libmv_autotrack,
const libmv_TrackRegionOptions* libmv_options,
- libmv_Marker *libmv_tracker_marker,
+ libmv_Marker* libmv_tracker_marker,
libmv_TrackRegionResult* libmv_result);
void libmv_autoTrackAddMarker(libmv_AutoTrack* libmv_autotrack,
@@ -59,7 +59,7 @@ int libmv_autoTrackGetMarker(libmv_AutoTrack* libmv_autotrack,
int clip,
int frame,
int track,
- libmv_Marker *libmv_marker);
+ libmv_Marker* libmv_marker);
#ifdef __cplusplus
}
diff --git a/intern/libmv/intern/camera_intrinsics.cc b/intern/libmv/intern/camera_intrinsics.cc
index 628637e12cc..243b26d9fb3 100644
--- a/intern/libmv/intern/camera_intrinsics.cc
+++ b/intern/libmv/intern/camera_intrinsics.cc
@@ -21,62 +21,56 @@
#include "intern/utildefines.h"
#include "libmv/simple_pipeline/camera_intrinsics.h"
+using libmv::BrownCameraIntrinsics;
using libmv::CameraIntrinsics;
using libmv::DivisionCameraIntrinsics;
-using libmv::PolynomialCameraIntrinsics;
using libmv::NukeCameraIntrinsics;
-using libmv::BrownCameraIntrinsics;
+using libmv::PolynomialCameraIntrinsics;
-libmv_CameraIntrinsics *libmv_cameraIntrinsicsNew(
+libmv_CameraIntrinsics* libmv_cameraIntrinsicsNew(
const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options) {
- CameraIntrinsics *camera_intrinsics =
- libmv_cameraIntrinsicsCreateFromOptions(libmv_camera_intrinsics_options);
- return (libmv_CameraIntrinsics *) camera_intrinsics;
+ CameraIntrinsics* camera_intrinsics =
+ libmv_cameraIntrinsicsCreateFromOptions(libmv_camera_intrinsics_options);
+ return (libmv_CameraIntrinsics*)camera_intrinsics;
}
-libmv_CameraIntrinsics *libmv_cameraIntrinsicsCopy(
+libmv_CameraIntrinsics* libmv_cameraIntrinsicsCopy(
const libmv_CameraIntrinsics* libmv_intrinsics) {
- const CameraIntrinsics *orig_intrinsics =
- (const CameraIntrinsics *) libmv_intrinsics;
+ const CameraIntrinsics* orig_intrinsics =
+ (const CameraIntrinsics*)libmv_intrinsics;
- CameraIntrinsics *new_intrinsics = NULL;
+ CameraIntrinsics* new_intrinsics = NULL;
switch (orig_intrinsics->GetDistortionModelType()) {
- case libmv::DISTORTION_MODEL_POLYNOMIAL:
- {
- const PolynomialCameraIntrinsics *polynomial_intrinsics =
+ case libmv::DISTORTION_MODEL_POLYNOMIAL: {
+ const PolynomialCameraIntrinsics* polynomial_intrinsics =
static_cast<const PolynomialCameraIntrinsics*>(orig_intrinsics);
- new_intrinsics = LIBMV_OBJECT_NEW(PolynomialCameraIntrinsics,
- *polynomial_intrinsics);
- break;
- }
- case libmv::DISTORTION_MODEL_DIVISION:
- {
- const DivisionCameraIntrinsics *division_intrinsics =
+ new_intrinsics =
+ LIBMV_OBJECT_NEW(PolynomialCameraIntrinsics, *polynomial_intrinsics);
+ break;
+ }
+ case libmv::DISTORTION_MODEL_DIVISION: {
+ const DivisionCameraIntrinsics* division_intrinsics =
static_cast<const DivisionCameraIntrinsics*>(orig_intrinsics);
- new_intrinsics = LIBMV_OBJECT_NEW(DivisionCameraIntrinsics,
- *division_intrinsics);
- break;
- }
- case libmv::DISTORTION_MODEL_NUKE:
- {
- const NukeCameraIntrinsics *nuke_intrinsics =
+ new_intrinsics =
+ LIBMV_OBJECT_NEW(DivisionCameraIntrinsics, *division_intrinsics);
+ break;
+ }
+ case libmv::DISTORTION_MODEL_NUKE: {
+ const NukeCameraIntrinsics* nuke_intrinsics =
static_cast<const NukeCameraIntrinsics*>(orig_intrinsics);
- new_intrinsics = LIBMV_OBJECT_NEW(NukeCameraIntrinsics,
- *nuke_intrinsics);
- break;
- }
- case libmv::DISTORTION_MODEL_BROWN:
- {
- const BrownCameraIntrinsics *brown_intrinsics =
+ new_intrinsics = LIBMV_OBJECT_NEW(NukeCameraIntrinsics, *nuke_intrinsics);
+ break;
+ }
+ case libmv::DISTORTION_MODEL_BROWN: {
+ const BrownCameraIntrinsics* brown_intrinsics =
static_cast<const BrownCameraIntrinsics*>(orig_intrinsics);
- new_intrinsics = LIBMV_OBJECT_NEW(BrownCameraIntrinsics,
- *brown_intrinsics);
- break;
- }
- default:
- assert(!"Unknown distortion model");
+ new_intrinsics =
+ LIBMV_OBJECT_NEW(BrownCameraIntrinsics, *brown_intrinsics);
+ break;
+ }
+ default: assert(!"Unknown distortion model");
}
- return (libmv_CameraIntrinsics *) new_intrinsics;
+ return (libmv_CameraIntrinsics*)new_intrinsics;
}
void libmv_cameraIntrinsicsDestroy(libmv_CameraIntrinsics* libmv_intrinsics) {
@@ -86,7 +80,7 @@ void libmv_cameraIntrinsicsDestroy(libmv_CameraIntrinsics* libmv_intrinsics) {
void libmv_cameraIntrinsicsUpdate(
const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options,
libmv_CameraIntrinsics* libmv_intrinsics) {
- CameraIntrinsics *camera_intrinsics = (CameraIntrinsics *) libmv_intrinsics;
+ CameraIntrinsics* camera_intrinsics = (CameraIntrinsics*)libmv_intrinsics;
double focal_length = libmv_camera_intrinsics_options->focal_length;
double principal_x = libmv_camera_intrinsics_options->principal_point_x;
@@ -115,191 +109,173 @@ void libmv_cameraIntrinsicsUpdate(
}
switch (libmv_camera_intrinsics_options->distortion_model) {
- case LIBMV_DISTORTION_MODEL_POLYNOMIAL:
- {
- assert(camera_intrinsics->GetDistortionModelType() ==
- libmv::DISTORTION_MODEL_POLYNOMIAL);
-
- PolynomialCameraIntrinsics *polynomial_intrinsics =
- (PolynomialCameraIntrinsics *) camera_intrinsics;
-
- double k1 = libmv_camera_intrinsics_options->polynomial_k1;
- double k2 = libmv_camera_intrinsics_options->polynomial_k2;
- double k3 = libmv_camera_intrinsics_options->polynomial_k3;
-
- if (polynomial_intrinsics->k1() != k1 ||
- polynomial_intrinsics->k2() != k2 ||
- polynomial_intrinsics->k3() != k3) {
- polynomial_intrinsics->SetRadialDistortion(k1, k2, k3);
- }
- break;
+ case LIBMV_DISTORTION_MODEL_POLYNOMIAL: {
+ assert(camera_intrinsics->GetDistortionModelType() ==
+ libmv::DISTORTION_MODEL_POLYNOMIAL);
+
+ PolynomialCameraIntrinsics* polynomial_intrinsics =
+ (PolynomialCameraIntrinsics*)camera_intrinsics;
+
+ double k1 = libmv_camera_intrinsics_options->polynomial_k1;
+ double k2 = libmv_camera_intrinsics_options->polynomial_k2;
+ double k3 = libmv_camera_intrinsics_options->polynomial_k3;
+
+ if (polynomial_intrinsics->k1() != k1 ||
+ polynomial_intrinsics->k2() != k2 ||
+ polynomial_intrinsics->k3() != k3) {
+ polynomial_intrinsics->SetRadialDistortion(k1, k2, k3);
}
+ break;
+ }
- case LIBMV_DISTORTION_MODEL_DIVISION:
- {
- assert(camera_intrinsics->GetDistortionModelType() ==
- libmv::DISTORTION_MODEL_DIVISION);
+ case LIBMV_DISTORTION_MODEL_DIVISION: {
+ assert(camera_intrinsics->GetDistortionModelType() ==
+ libmv::DISTORTION_MODEL_DIVISION);
- DivisionCameraIntrinsics *division_intrinsics =
- (DivisionCameraIntrinsics *) camera_intrinsics;
+ DivisionCameraIntrinsics* division_intrinsics =
+ (DivisionCameraIntrinsics*)camera_intrinsics;
- double k1 = libmv_camera_intrinsics_options->division_k1;
- double k2 = libmv_camera_intrinsics_options->division_k2;
+ double k1 = libmv_camera_intrinsics_options->division_k1;
+ double k2 = libmv_camera_intrinsics_options->division_k2;
- if (division_intrinsics->k1() != k1 ||
- division_intrinsics->k2() != k2) {
- division_intrinsics->SetDistortion(k1, k2);
- }
+ if (division_intrinsics->k1() != k1 || division_intrinsics->k2() != k2) {
+ division_intrinsics->SetDistortion(k1, k2);
+ }
- break;
+ break;
+ }
+
+ case LIBMV_DISTORTION_MODEL_NUKE: {
+ assert(camera_intrinsics->GetDistortionModelType() ==
+ libmv::DISTORTION_MODEL_NUKE);
+
+ NukeCameraIntrinsics* nuke_intrinsics =
+ (NukeCameraIntrinsics*)camera_intrinsics;
+
+ double k1 = libmv_camera_intrinsics_options->nuke_k1;
+ double k2 = libmv_camera_intrinsics_options->nuke_k2;
+
+ if (nuke_intrinsics->k1() != k1 || nuke_intrinsics->k2() != k2) {
+ nuke_intrinsics->SetDistortion(k1, k2);
}
- case LIBMV_DISTORTION_MODEL_NUKE:
- {
- assert(camera_intrinsics->GetDistortionModelType() ==
- libmv::DISTORTION_MODEL_NUKE);
+ break;
+ }
- NukeCameraIntrinsics *nuke_intrinsics =
- (NukeCameraIntrinsics *) camera_intrinsics;
+ case LIBMV_DISTORTION_MODEL_BROWN: {
+ assert(camera_intrinsics->GetDistortionModelType() ==
+ libmv::DISTORTION_MODEL_BROWN);
- double k1 = libmv_camera_intrinsics_options->nuke_k1;
- double k2 = libmv_camera_intrinsics_options->nuke_k2;
+ BrownCameraIntrinsics* brown_intrinsics =
+ (BrownCameraIntrinsics*)camera_intrinsics;
- if (nuke_intrinsics->k1() != k1 ||
- nuke_intrinsics->k2() != k2) {
- nuke_intrinsics->SetDistortion(k1, k2);
- }
+ double k1 = libmv_camera_intrinsics_options->brown_k1;
+ double k2 = libmv_camera_intrinsics_options->brown_k2;
+ double k3 = libmv_camera_intrinsics_options->brown_k3;
+ double k4 = libmv_camera_intrinsics_options->brown_k4;
- break;
+ if (brown_intrinsics->k1() != k1 || brown_intrinsics->k2() != k2 ||
+ brown_intrinsics->k3() != k3 || brown_intrinsics->k4() != k4) {
+ brown_intrinsics->SetRadialDistortion(k1, k2, k3, k4);
}
- case LIBMV_DISTORTION_MODEL_BROWN:
- {
- assert(camera_intrinsics->GetDistortionModelType() ==
- libmv::DISTORTION_MODEL_BROWN);
-
- BrownCameraIntrinsics *brown_intrinsics =
- (BrownCameraIntrinsics *) camera_intrinsics;
-
- double k1 = libmv_camera_intrinsics_options->brown_k1;
- double k2 = libmv_camera_intrinsics_options->brown_k2;
- double k3 = libmv_camera_intrinsics_options->brown_k3;
- double k4 = libmv_camera_intrinsics_options->brown_k4;
-
- if (brown_intrinsics->k1() != k1 ||
- brown_intrinsics->k2() != k2 ||
- brown_intrinsics->k3() != k3 ||
- brown_intrinsics->k4() != k4) {
- brown_intrinsics->SetRadialDistortion(k1, k2, k3, k4);
- }
-
- double p1 = libmv_camera_intrinsics_options->brown_p1;
- double p2 = libmv_camera_intrinsics_options->brown_p2;
-
- if (brown_intrinsics->p1() != p1 || brown_intrinsics->p2() != p2) {
- brown_intrinsics->SetTangentialDistortion(p1, p2);
- }
- break;
+ double p1 = libmv_camera_intrinsics_options->brown_p1;
+ double p2 = libmv_camera_intrinsics_options->brown_p2;
+
+ if (brown_intrinsics->p1() != p1 || brown_intrinsics->p2() != p2) {
+ brown_intrinsics->SetTangentialDistortion(p1, p2);
}
+ break;
+ }
- default:
- assert(!"Unknown distortion model");
+ default: assert(!"Unknown distortion model");
}
}
void libmv_cameraIntrinsicsSetThreads(libmv_CameraIntrinsics* libmv_intrinsics,
int threads) {
- CameraIntrinsics *camera_intrinsics = (CameraIntrinsics *) libmv_intrinsics;
+ CameraIntrinsics* camera_intrinsics = (CameraIntrinsics*)libmv_intrinsics;
camera_intrinsics->SetThreads(threads);
}
void libmv_cameraIntrinsicsExtractOptions(
const libmv_CameraIntrinsics* libmv_intrinsics,
libmv_CameraIntrinsicsOptions* camera_intrinsics_options) {
- const CameraIntrinsics *camera_intrinsics =
- (const CameraIntrinsics *) libmv_intrinsics;
+ const CameraIntrinsics* camera_intrinsics =
+ (const CameraIntrinsics*)libmv_intrinsics;
// Fill in options which are common for all distortion models.
camera_intrinsics_options->focal_length = camera_intrinsics->focal_length();
camera_intrinsics_options->principal_point_x =
- camera_intrinsics->principal_point_x();
+ camera_intrinsics->principal_point_x();
camera_intrinsics_options->principal_point_y =
- camera_intrinsics->principal_point_y();
+ camera_intrinsics->principal_point_y();
camera_intrinsics_options->image_width = camera_intrinsics->image_width();
camera_intrinsics_options->image_height = camera_intrinsics->image_height();
switch (camera_intrinsics->GetDistortionModelType()) {
- case libmv::DISTORTION_MODEL_POLYNOMIAL:
- {
- const PolynomialCameraIntrinsics *polynomial_intrinsics =
- static_cast<const PolynomialCameraIntrinsics *>(camera_intrinsics);
- camera_intrinsics_options->distortion_model =
+ case libmv::DISTORTION_MODEL_POLYNOMIAL: {
+ const PolynomialCameraIntrinsics* polynomial_intrinsics =
+ static_cast<const PolynomialCameraIntrinsics*>(camera_intrinsics);
+ camera_intrinsics_options->distortion_model =
LIBMV_DISTORTION_MODEL_POLYNOMIAL;
- camera_intrinsics_options->polynomial_k1 = polynomial_intrinsics->k1();
- camera_intrinsics_options->polynomial_k2 = polynomial_intrinsics->k2();
- camera_intrinsics_options->polynomial_k3 = polynomial_intrinsics->k3();
- camera_intrinsics_options->polynomial_p1 = polynomial_intrinsics->p1();
- camera_intrinsics_options->polynomial_p2 = polynomial_intrinsics->p2();
- break;
- }
+ camera_intrinsics_options->polynomial_k1 = polynomial_intrinsics->k1();
+ camera_intrinsics_options->polynomial_k2 = polynomial_intrinsics->k2();
+ camera_intrinsics_options->polynomial_k3 = polynomial_intrinsics->k3();
+ camera_intrinsics_options->polynomial_p1 = polynomial_intrinsics->p1();
+ camera_intrinsics_options->polynomial_p2 = polynomial_intrinsics->p2();
+ break;
+ }
- case libmv::DISTORTION_MODEL_DIVISION:
- {
- const DivisionCameraIntrinsics *division_intrinsics =
- static_cast<const DivisionCameraIntrinsics *>(camera_intrinsics);
- camera_intrinsics_options->distortion_model =
+ case libmv::DISTORTION_MODEL_DIVISION: {
+ const DivisionCameraIntrinsics* division_intrinsics =
+ static_cast<const DivisionCameraIntrinsics*>(camera_intrinsics);
+ camera_intrinsics_options->distortion_model =
LIBMV_DISTORTION_MODEL_DIVISION;
- camera_intrinsics_options->division_k1 = division_intrinsics->k1();
- camera_intrinsics_options->division_k2 = division_intrinsics->k2();
- break;
- }
-
- case libmv::DISTORTION_MODEL_NUKE:
- {
- const NukeCameraIntrinsics *nuke_intrinsics =
- static_cast<const NukeCameraIntrinsics *>(camera_intrinsics);
- camera_intrinsics_options->distortion_model =
- LIBMV_DISTORTION_MODEL_NUKE;
- camera_intrinsics_options->nuke_k1 = nuke_intrinsics->k1();
- camera_intrinsics_options->nuke_k2 = nuke_intrinsics->k2();
- break;
- }
+ camera_intrinsics_options->division_k1 = division_intrinsics->k1();
+ camera_intrinsics_options->division_k2 = division_intrinsics->k2();
+ break;
+ }
+
+ case libmv::DISTORTION_MODEL_NUKE: {
+ const NukeCameraIntrinsics* nuke_intrinsics =
+ static_cast<const NukeCameraIntrinsics*>(camera_intrinsics);
+ camera_intrinsics_options->distortion_model = LIBMV_DISTORTION_MODEL_NUKE;
+ camera_intrinsics_options->nuke_k1 = nuke_intrinsics->k1();
+ camera_intrinsics_options->nuke_k2 = nuke_intrinsics->k2();
+ break;
+ }
- case libmv::DISTORTION_MODEL_BROWN:
- {
- const BrownCameraIntrinsics *brown_intrinsics =
- static_cast<const BrownCameraIntrinsics *>(camera_intrinsics);
- camera_intrinsics_options->distortion_model =
+ case libmv::DISTORTION_MODEL_BROWN: {
+ const BrownCameraIntrinsics* brown_intrinsics =
+ static_cast<const BrownCameraIntrinsics*>(camera_intrinsics);
+ camera_intrinsics_options->distortion_model =
LIBMV_DISTORTION_MODEL_BROWN;
- camera_intrinsics_options->brown_k1 = brown_intrinsics->k1();
- camera_intrinsics_options->brown_k2 = brown_intrinsics->k2();
- camera_intrinsics_options->brown_k3 = brown_intrinsics->k3();
- camera_intrinsics_options->brown_k4 = brown_intrinsics->k4();
- camera_intrinsics_options->brown_p1 = brown_intrinsics->p1();
- camera_intrinsics_options->brown_p2 = brown_intrinsics->p2();
- break;
- }
+ camera_intrinsics_options->brown_k1 = brown_intrinsics->k1();
+ camera_intrinsics_options->brown_k2 = brown_intrinsics->k2();
+ camera_intrinsics_options->brown_k3 = brown_intrinsics->k3();
+ camera_intrinsics_options->brown_k4 = brown_intrinsics->k4();
+ camera_intrinsics_options->brown_p1 = brown_intrinsics->p1();
+ camera_intrinsics_options->brown_p2 = brown_intrinsics->p2();
+ break;
+ }
- default:
- assert(!"Unknown distortion model");
+ default: assert(!"Unknown distortion model");
}
}
void libmv_cameraIntrinsicsUndistortByte(
const libmv_CameraIntrinsics* libmv_intrinsics,
- const unsigned char *source_image,
+ const unsigned char* source_image,
int width,
int height,
float overscan,
int channels,
unsigned char* destination_image) {
- CameraIntrinsics *camera_intrinsics = (CameraIntrinsics *) libmv_intrinsics;
- camera_intrinsics->UndistortBuffer(source_image,
- width, height,
- overscan,
- channels,
- destination_image);
+ CameraIntrinsics* camera_intrinsics = (CameraIntrinsics*)libmv_intrinsics;
+ camera_intrinsics->UndistortBuffer(
+ source_image, width, height, overscan, channels, destination_image);
}
void libmv_cameraIntrinsicsUndistortFloat(
@@ -310,28 +286,22 @@ void libmv_cameraIntrinsicsUndistortFloat(
float overscan,
int channels,
float* destination_image) {
- CameraIntrinsics *intrinsics = (CameraIntrinsics *) libmv_intrinsics;
- intrinsics->UndistortBuffer(source_image,
- width, height,
- overscan,
- channels,
- destination_image);
+ CameraIntrinsics* intrinsics = (CameraIntrinsics*)libmv_intrinsics;
+ intrinsics->UndistortBuffer(
+ source_image, width, height, overscan, channels, destination_image);
}
void libmv_cameraIntrinsicsDistortByte(
const struct libmv_CameraIntrinsics* libmv_intrinsics,
- const unsigned char *source_image,
+ const unsigned char* source_image,
int width,
int height,
float overscan,
int channels,
- unsigned char *destination_image) {
- CameraIntrinsics *intrinsics = (CameraIntrinsics *) libmv_intrinsics;
- intrinsics->DistortBuffer(source_image,
- width, height,
- overscan,
- channels,
- destination_image);
+ unsigned char* destination_image) {
+ CameraIntrinsics* intrinsics = (CameraIntrinsics*)libmv_intrinsics;
+ intrinsics->DistortBuffer(
+ source_image, width, height, overscan, channels, destination_image);
}
void libmv_cameraIntrinsicsDistortFloat(
@@ -342,12 +312,9 @@ void libmv_cameraIntrinsicsDistortFloat(
float overscan,
int channels,
float* destination_image) {
- CameraIntrinsics *intrinsics = (CameraIntrinsics *) libmv_intrinsics;
- intrinsics->DistortBuffer(source_image,
- width, height,
- overscan,
- channels,
- destination_image);
+ CameraIntrinsics* intrinsics = (CameraIntrinsics*)libmv_intrinsics;
+ intrinsics->DistortBuffer(
+ source_image, width, height, overscan, channels, destination_image);
}
void libmv_cameraIntrinsicsApply(
@@ -356,7 +323,7 @@ void libmv_cameraIntrinsicsApply(
double y,
double* x1,
double* y1) {
- CameraIntrinsics *intrinsics = (CameraIntrinsics *) libmv_intrinsics;
+ CameraIntrinsics* intrinsics = (CameraIntrinsics*)libmv_intrinsics;
intrinsics->ApplyIntrinsics(x, y, x1, y1);
}
@@ -366,7 +333,7 @@ void libmv_cameraIntrinsicsInvert(
double y,
double* x1,
double* y1) {
- CameraIntrinsics *intrinsics = (CameraIntrinsics *) libmv_intrinsics;
+ CameraIntrinsics* intrinsics = (CameraIntrinsics*)libmv_intrinsics;
intrinsics->InvertIntrinsics(x, y, x1, y1);
}
@@ -381,69 +348,63 @@ static void libmv_cameraIntrinsicsFillFromOptions(
camera_intrinsics_options->principal_point_y);
camera_intrinsics->SetImageSize(camera_intrinsics_options->image_width,
- camera_intrinsics_options->image_height);
+ camera_intrinsics_options->image_height);
switch (camera_intrinsics_options->distortion_model) {
- case LIBMV_DISTORTION_MODEL_POLYNOMIAL:
- {
- PolynomialCameraIntrinsics *polynomial_intrinsics =
+ case LIBMV_DISTORTION_MODEL_POLYNOMIAL: {
+ PolynomialCameraIntrinsics* polynomial_intrinsics =
static_cast<PolynomialCameraIntrinsics*>(camera_intrinsics);
- polynomial_intrinsics->SetRadialDistortion(
- camera_intrinsics_options->polynomial_k1,
- camera_intrinsics_options->polynomial_k2,
- camera_intrinsics_options->polynomial_k3);
+ polynomial_intrinsics->SetRadialDistortion(
+ camera_intrinsics_options->polynomial_k1,
+ camera_intrinsics_options->polynomial_k2,
+ camera_intrinsics_options->polynomial_k3);
- break;
- }
+ break;
+ }
- case LIBMV_DISTORTION_MODEL_DIVISION:
- {
- DivisionCameraIntrinsics *division_intrinsics =
+ case LIBMV_DISTORTION_MODEL_DIVISION: {
+ DivisionCameraIntrinsics* division_intrinsics =
static_cast<DivisionCameraIntrinsics*>(camera_intrinsics);
- division_intrinsics->SetDistortion(
- camera_intrinsics_options->division_k1,
- camera_intrinsics_options->division_k2);
- break;
- }
+ division_intrinsics->SetDistortion(
+ camera_intrinsics_options->division_k1,
+ camera_intrinsics_options->division_k2);
+ break;
+ }
- case LIBMV_DISTORTION_MODEL_NUKE:
- {
- NukeCameraIntrinsics *nuke_intrinsics =
+ case LIBMV_DISTORTION_MODEL_NUKE: {
+ NukeCameraIntrinsics* nuke_intrinsics =
static_cast<NukeCameraIntrinsics*>(camera_intrinsics);
- nuke_intrinsics->SetDistortion(
- camera_intrinsics_options->nuke_k1,
- camera_intrinsics_options->nuke_k2);
- break;
- }
+ nuke_intrinsics->SetDistortion(camera_intrinsics_options->nuke_k1,
+ camera_intrinsics_options->nuke_k2);
+ break;
+ }
- case LIBMV_DISTORTION_MODEL_BROWN:
- {
- BrownCameraIntrinsics *brown_intrinsics =
+ case LIBMV_DISTORTION_MODEL_BROWN: {
+ BrownCameraIntrinsics* brown_intrinsics =
static_cast<BrownCameraIntrinsics*>(camera_intrinsics);
- brown_intrinsics->SetRadialDistortion(
- camera_intrinsics_options->brown_k1,
- camera_intrinsics_options->brown_k2,
- camera_intrinsics_options->brown_k3,
- camera_intrinsics_options->brown_k4);
- brown_intrinsics->SetTangentialDistortion(
+ brown_intrinsics->SetRadialDistortion(
+ camera_intrinsics_options->brown_k1,
+ camera_intrinsics_options->brown_k2,
+ camera_intrinsics_options->brown_k3,
+ camera_intrinsics_options->brown_k4);
+ brown_intrinsics->SetTangentialDistortion(
camera_intrinsics_options->brown_p1,
camera_intrinsics_options->brown_p2);
- break;
- }
+ break;
+ }
- default:
- assert(!"Unknown distortion model");
+ default: assert(!"Unknown distortion model");
}
}
CameraIntrinsics* libmv_cameraIntrinsicsCreateFromOptions(
const libmv_CameraIntrinsicsOptions* camera_intrinsics_options) {
- CameraIntrinsics *camera_intrinsics = NULL;
+ CameraIntrinsics* camera_intrinsics = NULL;
switch (camera_intrinsics_options->distortion_model) {
case LIBMV_DISTORTION_MODEL_POLYNOMIAL:
camera_intrinsics = LIBMV_OBJECT_NEW(PolynomialCameraIntrinsics);
@@ -457,8 +418,7 @@ CameraIntrinsics* libmv_cameraIntrinsicsCreateFromOptions(
case LIBMV_DISTORTION_MODEL_BROWN:
camera_intrinsics = LIBMV_OBJECT_NEW(BrownCameraIntrinsics);
break;
- default:
- assert(!"Unknown distortion model");
+ default: assert(!"Unknown distortion model");
}
libmv_cameraIntrinsicsFillFromOptions(camera_intrinsics_options,
camera_intrinsics);
diff --git a/intern/libmv/intern/camera_intrinsics.h b/intern/libmv/intern/camera_intrinsics.h
index eb6176770ec..8a65c93e6a4 100644
--- a/intern/libmv/intern/camera_intrinsics.h
+++ b/intern/libmv/intern/camera_intrinsics.h
@@ -56,10 +56,10 @@ typedef struct libmv_CameraIntrinsicsOptions {
double brown_p1, brown_p2;
} libmv_CameraIntrinsicsOptions;
-libmv_CameraIntrinsics *libmv_cameraIntrinsicsNew(
+libmv_CameraIntrinsics* libmv_cameraIntrinsicsNew(
const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options);
-libmv_CameraIntrinsics *libmv_cameraIntrinsicsCopy(
+libmv_CameraIntrinsics* libmv_cameraIntrinsicsCopy(
const libmv_CameraIntrinsics* libmv_intrinsics);
void libmv_cameraIntrinsicsDestroy(libmv_CameraIntrinsics* libmv_intrinsics);
@@ -76,7 +76,7 @@ void libmv_cameraIntrinsicsExtractOptions(
void libmv_cameraIntrinsicsUndistortByte(
const libmv_CameraIntrinsics* libmv_intrinsics,
- const unsigned char *source_image,
+ const unsigned char* source_image,
int width,
int height,
float overscan,
@@ -94,12 +94,12 @@ void libmv_cameraIntrinsicsUndistortFloat(
void libmv_cameraIntrinsicsDistortByte(
const struct libmv_CameraIntrinsics* libmv_intrinsics,
- const unsigned char *source_image,
+ const unsigned char* source_image,
int width,
int height,
float overscan,
int channels,
- unsigned char *destination_image);
+ unsigned char* destination_image);
void libmv_cameraIntrinsicsDistortFloat(
const libmv_CameraIntrinsics* libmv_intrinsics,
@@ -131,7 +131,7 @@ void libmv_cameraIntrinsicsInvert(
#ifdef __cplusplus
namespace libmv {
- class CameraIntrinsics;
+class CameraIntrinsics;
}
libmv::CameraIntrinsics* libmv_cameraIntrinsicsCreateFromOptions(
diff --git a/intern/libmv/intern/detector.cc b/intern/libmv/intern/detector.cc
index 455e431f6f4..21f5b46595c 100644
--- a/intern/libmv/intern/detector.cc
+++ b/intern/libmv/intern/detector.cc
@@ -34,7 +34,7 @@ struct libmv_Features {
namespace {
-libmv_Features *libmv_featuresFromVector(
+libmv_Features* libmv_featuresFromVector(
const libmv::vector<Feature>& features) {
libmv_Features* libmv_features = LIBMV_STRUCT_NEW(libmv_Features, 1);
int count = features.size();
@@ -50,12 +50,12 @@ libmv_Features *libmv_featuresFromVector(
return libmv_features;
}
-void libmv_convertDetectorOptions(libmv_DetectOptions *options,
- DetectOptions *detector_options) {
+void libmv_convertDetectorOptions(libmv_DetectOptions* options,
+ DetectOptions* detector_options) {
switch (options->detector) {
-#define LIBMV_CONVERT(the_detector) \
- case LIBMV_DETECTOR_ ## the_detector: \
- detector_options->type = DetectOptions::the_detector; \
+#define LIBMV_CONVERT(the_detector) \
+ case LIBMV_DETECTOR_##the_detector: \
+ detector_options->type = DetectOptions::the_detector; \
break;
LIBMV_CONVERT(FAST)
LIBMV_CONVERT(MORAVEC)
@@ -72,7 +72,7 @@ void libmv_convertDetectorOptions(libmv_DetectOptions *options,
} // namespace
-libmv_Features *libmv_detectFeaturesByte(const unsigned char* image_buffer,
+libmv_Features* libmv_detectFeaturesByte(const unsigned char* image_buffer,
int width,
int height,
int channels,
@@ -133,7 +133,7 @@ void libmv_getFeature(const libmv_Features* libmv_features,
double* y,
double* score,
double* size) {
- Feature &feature = libmv_features->features[number];
+ Feature& feature = libmv_features->features[number];
*x = feature.x;
*y = feature.y;
*score = feature.score;
diff --git a/intern/libmv/intern/detector.h b/intern/libmv/intern/detector.h
index b36935fc67f..f21e78d7f2b 100644
--- a/intern/libmv/intern/detector.h
+++ b/intern/libmv/intern/detector.h
@@ -38,7 +38,7 @@ typedef struct libmv_DetectOptions {
int min_distance;
int fast_min_trackness;
int moravec_max_count;
- unsigned char *moravec_pattern;
+ unsigned char* moravec_pattern;
double harris_threshold;
} libmv_DetectOptions;
diff --git a/intern/libmv/intern/frame_accessor.cc b/intern/libmv/intern/frame_accessor.cc
index 9fde73f0d71..3a6c753cc8d 100644
--- a/intern/libmv/intern/frame_accessor.cc
+++ b/intern/libmv/intern/frame_accessor.cc
@@ -36,20 +36,18 @@ struct LibmvFrameAccessor : public FrameAccessor {
libmv_ReleaseImageCallback release_image_callback,
libmv_GetMaskForTrackCallback get_mask_for_track_callback,
libmv_ReleaseMaskCallback release_mask_callback)
- : user_data_(user_data),
- get_image_callback_(get_image_callback),
- release_image_callback_(release_image_callback),
- get_mask_for_track_callback_(get_mask_for_track_callback),
- release_mask_callback_(release_mask_callback) { }
+ : user_data_(user_data),
+ get_image_callback_(get_image_callback),
+ release_image_callback_(release_image_callback),
+ get_mask_for_track_callback_(get_mask_for_track_callback),
+ release_mask_callback_(release_mask_callback) {}
- virtual ~LibmvFrameAccessor() {
- }
+ virtual ~LibmvFrameAccessor() {}
libmv_InputMode get_libmv_input_mode(InputMode input_mode) {
switch (input_mode) {
-#define CHECK_INPUT_MODE(mode) \
- case mode: \
- return LIBMV_IMAGE_MODE_ ## mode;
+#define CHECK_INPUT_MODE(mode) \
+ case mode: return LIBMV_IMAGE_MODE_##mode;
CHECK_INPUT_MODE(MONO)
CHECK_INPUT_MODE(RGBA)
#undef CHECK_INPUT_MODE
@@ -59,8 +57,7 @@ struct LibmvFrameAccessor : public FrameAccessor {
return LIBMV_IMAGE_MODE_MONO;
}
- void get_libmv_region(const Region& region,
- libmv_Region* libmv_region) {
+ void get_libmv_region(const Region& region, libmv_Region* libmv_region) {
libmv_region->min[0] = region.min(0);
libmv_region->min[1] = region.min(1);
libmv_region->max[0] = region.max(0);
@@ -74,7 +71,7 @@ struct LibmvFrameAccessor : public FrameAccessor {
const Region* region,
const Transform* transform,
FloatImage* destination) {
- float *float_buffer;
+ float* float_buffer;
int width, height, channels;
libmv_Region libmv_region;
if (region) {
@@ -86,46 +83,41 @@ struct LibmvFrameAccessor : public FrameAccessor {
get_libmv_input_mode(input_mode),
downscale,
region != NULL ? &libmv_region : NULL,
- (libmv_FrameTransform*) transform,
+ (libmv_FrameTransform*)transform,
&float_buffer,
&width,
&height,
&channels);
// TODO(sergey): Dumb code for until we can set data directly.
- FloatImage temp_image(float_buffer,
- height,
- width,
- channels);
+ FloatImage temp_image(float_buffer, height, width, channels);
destination->CopyFrom(temp_image);
return cache_key;
}
- void ReleaseImage(Key cache_key) {
- release_image_callback_(cache_key);
- }
+ void ReleaseImage(Key cache_key) { release_image_callback_(cache_key); }
Key GetMaskForTrack(int clip,
int frame,
int track,
const Region* region,
FloatImage* destination) {
- float *float_buffer;
+ float* float_buffer;
int width, height;
libmv_Region libmv_region;
if (region) {
get_libmv_region(*region, &libmv_region);
}
- Key cache_key = get_mask_for_track_callback_(
- user_data_,
- clip,
- frame,
- track,
- region != NULL ? &libmv_region : NULL,
- &float_buffer,
- &width,
- &height);
+ Key cache_key =
+ get_mask_for_track_callback_(user_data_,
+ clip,
+ frame,
+ track,
+ region != NULL ? &libmv_region : NULL,
+ &float_buffer,
+ &width,
+ &height);
if (cache_key == NULL) {
// No mask for the given track.
@@ -133,30 +125,21 @@ struct LibmvFrameAccessor : public FrameAccessor {
}
// TODO(sergey): Dumb code for until we can set data directly.
- FloatImage temp_image(float_buffer,
- height,
- width,
- 1);
+ FloatImage temp_image(float_buffer, height, width, 1);
destination->CopyFrom(temp_image);
return cache_key;
}
- void ReleaseMask(Key key) {
- release_mask_callback_(key);
- }
+ void ReleaseMask(Key key) { release_mask_callback_(key); }
- bool GetClipDimensions(int /*clip*/, int * /*width*/, int * /*height*/) {
+ bool GetClipDimensions(int /*clip*/, int* /*width*/, int* /*height*/) {
return false;
}
- int NumClips() {
- return 1;
- }
+ int NumClips() { return 1; }
- int NumFrames(int /*clip*/) {
- return 0;
- }
+ int NumFrames(int /*clip*/) { return 0; }
libmv_FrameAccessorUserData* user_data_;
libmv_GetImageCallback get_image_callback_;
@@ -173,35 +156,35 @@ libmv_FrameAccessor* libmv_FrameAccessorNew(
libmv_ReleaseImageCallback release_image_callback,
libmv_GetMaskForTrackCallback get_mask_for_track_callback,
libmv_ReleaseMaskCallback release_mask_callback) {
- return (libmv_FrameAccessor*) LIBMV_OBJECT_NEW(LibmvFrameAccessor,
- user_data,
- get_image_callback,
- release_image_callback,
- get_mask_for_track_callback,
- release_mask_callback);
+ return (libmv_FrameAccessor*)LIBMV_OBJECT_NEW(LibmvFrameAccessor,
+ user_data,
+ get_image_callback,
+ release_image_callback,
+ get_mask_for_track_callback,
+ release_mask_callback);
}
void libmv_FrameAccessorDestroy(libmv_FrameAccessor* frame_accessor) {
LIBMV_OBJECT_DELETE(frame_accessor, LibmvFrameAccessor);
}
-int64_t libmv_frameAccessorgetTransformKey(const libmv_FrameTransform *transform) {
- return ((FrameAccessor::Transform*) transform)->key();
+int64_t libmv_frameAccessorgetTransformKey(
+ const libmv_FrameTransform* transform) {
+ return ((FrameAccessor::Transform*)transform)->key();
}
-void libmv_frameAccessorgetTransformRun(const libmv_FrameTransform *transform,
- const libmv_FloatImage *input_image,
- libmv_FloatImage *output_image) {
+void libmv_frameAccessorgetTransformRun(const libmv_FrameTransform* transform,
+ const libmv_FloatImage* input_image,
+ libmv_FloatImage* output_image) {
const FloatImage input(input_image->buffer,
input_image->height,
input_image->width,
input_image->channels);
FloatImage output;
- ((FrameAccessor::Transform*) transform)->run(input,
- &output);
+ ((FrameAccessor::Transform*)transform)->run(input, &output);
- int num_pixels = output.Width() *output.Height() * output.Depth();
+ int num_pixels = output.Width() * output.Height() * output.Depth();
output_image->buffer = new float[num_pixels];
memcpy(output_image->buffer, output.Data(), num_pixels * sizeof(float));
output_image->width = output.Width();
diff --git a/intern/libmv/intern/frame_accessor.h b/intern/libmv/intern/frame_accessor.h
index 6bccb305282..39a3bc5eb1d 100644
--- a/intern/libmv/intern/frame_accessor.h
+++ b/intern/libmv/intern/frame_accessor.h
@@ -32,14 +32,14 @@ extern "C" {
typedef struct libmv_FrameAccessor libmv_FrameAccessor;
typedef struct libmv_FrameTransform libmv_FrameTransform;
typedef struct libmv_FrameAccessorUserData libmv_FrameAccessorUserData;
-typedef void *libmv_CacheKey;
+typedef void* libmv_CacheKey;
typedef enum {
LIBMV_IMAGE_MODE_MONO,
LIBMV_IMAGE_MODE_RGBA,
} libmv_InputMode;
-typedef libmv_CacheKey (*libmv_GetImageCallback) (
+typedef libmv_CacheKey (*libmv_GetImageCallback)(
libmv_FrameAccessorUserData* user_data,
int clip,
int frame,
@@ -52,9 +52,9 @@ typedef libmv_CacheKey (*libmv_GetImageCallback) (
int* height,
int* channels);
-typedef void (*libmv_ReleaseImageCallback) (libmv_CacheKey cache_key);
+typedef void (*libmv_ReleaseImageCallback)(libmv_CacheKey cache_key);
-typedef libmv_CacheKey (*libmv_GetMaskForTrackCallback) (
+typedef libmv_CacheKey (*libmv_GetMaskForTrackCallback)(
libmv_FrameAccessorUserData* user_data,
int clip,
int frame,
@@ -63,7 +63,7 @@ typedef libmv_CacheKey (*libmv_GetMaskForTrackCallback) (
float** destination,
int* width,
int* height);
-typedef void (*libmv_ReleaseMaskCallback) (libmv_CacheKey cache_key);
+typedef void (*libmv_ReleaseMaskCallback)(libmv_CacheKey cache_key);
libmv_FrameAccessor* libmv_FrameAccessorNew(
libmv_FrameAccessorUserData* user_data,
@@ -73,11 +73,12 @@ libmv_FrameAccessor* libmv_FrameAccessorNew(
libmv_ReleaseMaskCallback release_mask_callback);
void libmv_FrameAccessorDestroy(libmv_FrameAccessor* frame_accessor);
-int64_t libmv_frameAccessorgetTransformKey(const libmv_FrameTransform *transform);
+int64_t libmv_frameAccessorgetTransformKey(
+ const libmv_FrameTransform* transform);
-void libmv_frameAccessorgetTransformRun(const libmv_FrameTransform *transform,
- const libmv_FloatImage *input_image,
- libmv_FloatImage *output_image);
+void libmv_frameAccessorgetTransformRun(const libmv_FrameTransform* transform,
+ const libmv_FloatImage* input_image,
+ libmv_FloatImage* output_image);
#ifdef __cplusplus
}
#endif
diff --git a/intern/libmv/intern/homography.cc b/intern/libmv/intern/homography.cc
index 179aeaa08aa..dc1009b5636 100644
--- a/intern/libmv/intern/homography.cc
+++ b/intern/libmv/intern/homography.cc
@@ -41,10 +41,8 @@ void libmv_homography2DFromCorrespondencesEuc(/* const */ double (*x1)[2],
LG << "x2: " << x2_mat;
libmv::EstimateHomographyOptions options;
- libmv::EstimateHomography2DFromCorrespondences(x1_mat,
- x2_mat,
- options,
- &H_mat);
+ libmv::EstimateHomography2DFromCorrespondences(
+ x1_mat, x2_mat, options, &H_mat);
LG << "H: " << H_mat;
diff --git a/intern/libmv/intern/image.cc b/intern/libmv/intern/image.cc
index a6e09277680..564e4762448 100644
--- a/intern/libmv/intern/image.cc
+++ b/intern/libmv/intern/image.cc
@@ -21,14 +21,14 @@
#include "intern/utildefines.h"
#include "libmv/tracking/track_region.h"
-#include <cassert>
#include <png.h>
+#include <cassert>
using libmv::FloatImage;
using libmv::SamplePlanarPatch;
-void libmv_floatImageDestroy(libmv_FloatImage *image) {
- delete [] image->buffer;
+void libmv_floatImageDestroy(libmv_FloatImage* image) {
+ delete[] image->buffer;
}
/* Image <-> buffers conversion */
@@ -63,8 +63,7 @@ void libmv_floatBufferToFloatImage(const float* buffer,
}
}
-void libmv_floatImageToFloatBuffer(const FloatImage &image,
- float* buffer) {
+void libmv_floatImageToFloatBuffer(const FloatImage& image, float* buffer) {
for (int y = 0, a = 0; y < image.Height(); y++) {
for (int x = 0; x < image.Width(); x++) {
for (int k = 0; k < image.Depth(); k++) {
@@ -74,9 +73,9 @@ void libmv_floatImageToFloatBuffer(const FloatImage &image,
}
}
-void libmv_floatImageToByteBuffer(const libmv::FloatImage &image,
+void libmv_floatImageToByteBuffer(const libmv::FloatImage& image,
unsigned char* buffer) {
- for (int y = 0, a= 0; y < image.Height(); y++) {
+ for (int y = 0, a = 0; y < image.Height(); y++) {
for (int x = 0; x < image.Width(); x++) {
for (int k = 0; k < image.Depth(); k++) {
buffer[a++] = image(y, x, k) * 255.0f;
@@ -93,7 +92,7 @@ static bool savePNGImage(png_bytep* row_pointers,
const char* file_name) {
png_infop info_ptr;
png_structp png_ptr;
- FILE *fp = fopen(file_name, "wb");
+ FILE* fp = fopen(file_name, "wb");
if (fp == NULL) {
return false;
@@ -153,7 +152,7 @@ bool libmv_saveImage(const FloatImage& image,
int x0,
int y0) {
int x, y;
- png_bytep *row_pointers;
+ png_bytep* row_pointers;
assert(image.Depth() == 1);
@@ -180,9 +179,8 @@ bool libmv_saveImage(const FloatImage& image,
static int image_counter = 0;
char file_name[128];
- snprintf(file_name, sizeof(file_name),
- "%s_%02d.png",
- prefix, ++image_counter);
+ snprintf(
+ file_name, sizeof(file_name), "%s_%02d.png", prefix, ++image_counter);
bool result = savePNGImage(row_pointers,
image.Width(),
image.Height(),
@@ -191,9 +189,9 @@ bool libmv_saveImage(const FloatImage& image,
file_name);
for (y = 0; y < image.Height(); y++) {
- delete [] row_pointers[y];
+ delete[] row_pointers[y];
}
- delete [] row_pointers;
+ delete[] row_pointers;
return result;
}
@@ -211,7 +209,7 @@ void libmv_samplePlanarPatchFloat(const float* image,
double* warped_position_x,
double* warped_position_y) {
FloatImage libmv_image, libmv_patch, libmv_mask;
- FloatImage *libmv_mask_for_sample = NULL;
+ FloatImage* libmv_mask_for_sample = NULL;
libmv_floatBufferToFloatImage(image, width, height, channels, &libmv_image);
@@ -221,8 +219,10 @@ void libmv_samplePlanarPatchFloat(const float* image,
}
SamplePlanarPatch(libmv_image,
- xs, ys,
- num_samples_x, num_samples_y,
+ xs,
+ ys,
+ num_samples_x,
+ num_samples_y,
libmv_mask_for_sample,
&libmv_patch,
warped_position_x,
@@ -232,19 +232,19 @@ void libmv_samplePlanarPatchFloat(const float* image,
}
void libmv_samplePlanarPatchByte(const unsigned char* image,
- int width,
- int height,
- int channels,
- const double* xs,
- const double* ys,
- int num_samples_x,
- int num_samples_y,
- const float* mask,
- unsigned char* patch,
- double* warped_position_x,
- double* warped_position_y) {
+ int width,
+ int height,
+ int channels,
+ const double* xs,
+ const double* ys,
+ int num_samples_x,
+ int num_samples_y,
+ const float* mask,
+ unsigned char* patch,
+ double* warped_position_x,
+ double* warped_position_y) {
libmv::FloatImage libmv_image, libmv_patch, libmv_mask;
- libmv::FloatImage *libmv_mask_for_sample = NULL;
+ libmv::FloatImage* libmv_mask_for_sample = NULL;
libmv_byteBufferToFloatImage(image, width, height, channels, &libmv_image);
@@ -254,8 +254,10 @@ void libmv_samplePlanarPatchByte(const unsigned char* image,
}
libmv::SamplePlanarPatch(libmv_image,
- xs, ys,
- num_samples_x, num_samples_y,
+ xs,
+ ys,
+ num_samples_x,
+ num_samples_y,
libmv_mask_for_sample,
&libmv_patch,
warped_position_x,
diff --git a/intern/libmv/intern/image.h b/intern/libmv/intern/image.h
index a1381a4c216..02cef86a127 100644
--- a/intern/libmv/intern/image.h
+++ b/intern/libmv/intern/image.h
@@ -35,7 +35,7 @@ void libmv_floatBufferToFloatImage(const float* buffer,
libmv::FloatImage* image);
void libmv_floatImageToFloatBuffer(const libmv::FloatImage& image,
- float *buffer);
+ float* buffer);
void libmv_floatImageToByteBuffer(const libmv::FloatImage& image,
unsigned char* buffer);
@@ -51,13 +51,13 @@ extern "C" {
#endif
typedef struct libmv_FloatImage {
- float *buffer;
+ float* buffer;
int width;
int height;
int channels;
} libmv_FloatImage;
-void libmv_floatImageDestroy(libmv_FloatImage *image);
+void libmv_floatImageDestroy(libmv_FloatImage* image);
void libmv_samplePlanarPatchFloat(const float* image,
int width,
@@ -72,18 +72,18 @@ void libmv_samplePlanarPatchFloat(const float* image,
double* warped_position_x,
double* warped_position_y);
- void libmv_samplePlanarPatchByte(const unsigned char* image,
- int width,
- int height,
- int channels,
- const double* xs,
- const double* ys,
- int num_samples_x,
- int num_samples_y,
- const float* mask,
- unsigned char* patch,
- double* warped_position_x,
- double* warped_position_y);
+void libmv_samplePlanarPatchByte(const unsigned char* image,
+ int width,
+ int height,
+ int channels,
+ const double* xs,
+ const double* ys,
+ int num_samples_x,
+ int num_samples_y,
+ const float* mask,
+ unsigned char* patch,
+ double* warped_position_x,
+ double* warped_position_y);
#ifdef __cplusplus
}
diff --git a/intern/libmv/intern/reconstruction.cc b/intern/libmv/intern/reconstruction.cc
index 0f4e890d4ca..430607461da 100644
--- a/intern/libmv/intern/reconstruction.cc
+++ b/intern/libmv/intern/reconstruction.cc
@@ -24,8 +24,8 @@
#include "libmv/logging/logging.h"
#include "libmv/simple_pipeline/bundle.h"
-#include "libmv/simple_pipeline/keyframe_selection.h"
#include "libmv/simple_pipeline/initialize_reconstruction.h"
+#include "libmv/simple_pipeline/keyframe_selection.h"
#include "libmv/simple_pipeline/modal_solver.h"
#include "libmv/simple_pipeline/pipeline.h"
#include "libmv/simple_pipeline/reconstruction_scale.h"
@@ -39,19 +39,19 @@ using libmv::EuclideanScaleToUnity;
using libmv::Marker;
using libmv::ProgressUpdateCallback;
-using libmv::PolynomialCameraIntrinsics;
-using libmv::Tracks;
using libmv::EuclideanBundle;
using libmv::EuclideanCompleteReconstruction;
using libmv::EuclideanReconstructTwoFrames;
using libmv::EuclideanReprojectionError;
+using libmv::PolynomialCameraIntrinsics;
+using libmv::Tracks;
struct libmv_Reconstruction {
EuclideanReconstruction reconstruction;
/* Used for per-track average error calculation after reconstruction */
Tracks tracks;
- CameraIntrinsics *intrinsics;
+ CameraIntrinsics* intrinsics;
double error;
bool is_valid;
@@ -63,7 +63,7 @@ class ReconstructUpdateCallback : public ProgressUpdateCallback {
public:
ReconstructUpdateCallback(
reconstruct_progress_update_cb progress_update_callback,
- void *callback_customdata) {
+ void* callback_customdata) {
progress_update_callback_ = progress_update_callback;
callback_customdata_ = callback_customdata;
}
@@ -73,13 +73,14 @@ class ReconstructUpdateCallback : public ProgressUpdateCallback {
progress_update_callback_(callback_customdata_, progress, message);
}
}
+
protected:
reconstruct_progress_update_cb progress_update_callback_;
void* callback_customdata_;
};
void libmv_solveRefineIntrinsics(
- const Tracks &tracks,
+ const Tracks& tracks,
const int refine_intrinsics,
const int bundle_constraints,
reconstruct_progress_update_cb progress_update_callback,
@@ -96,11 +97,11 @@ void libmv_solveRefineIntrinsics(
bundle_intrinsics |= libmv::BUNDLE_PRINCIPAL_POINT;
}
-#define SET_DISTORTION_FLAG_CHECKED(type, coefficient) \
- do { \
- if (refine_intrinsics & LIBMV_REFINE_ ## type ##_DISTORTION_ ## coefficient) { \
- bundle_intrinsics |= libmv::BUNDLE_ ## type ## _ ## coefficient; \
- } \
+#define SET_DISTORTION_FLAG_CHECKED(type, coefficient) \
+ do { \
+ if (refine_intrinsics & LIBMV_REFINE_##type##_DISTORTION_##coefficient) { \
+ bundle_intrinsics |= libmv::BUNDLE_##type##_##coefficient; \
+ } \
} while (0)
SET_DISTORTION_FLAG_CHECKED(RADIAL, K1);
@@ -123,20 +124,19 @@ void libmv_solveRefineIntrinsics(
}
void finishReconstruction(
- const Tracks &tracks,
- const CameraIntrinsics &camera_intrinsics,
- libmv_Reconstruction *libmv_reconstruction,
+ const Tracks& tracks,
+ const CameraIntrinsics& camera_intrinsics,
+ libmv_Reconstruction* libmv_reconstruction,
reconstruct_progress_update_cb progress_update_callback,
- void *callback_customdata) {
- EuclideanReconstruction &reconstruction =
- libmv_reconstruction->reconstruction;
+ void* callback_customdata) {
+ EuclideanReconstruction& reconstruction =
+ libmv_reconstruction->reconstruction;
/* Reprojection error calculation. */
progress_update_callback(callback_customdata, 1.0, "Finishing solution");
libmv_reconstruction->tracks = tracks;
- libmv_reconstruction->error = EuclideanReprojectionError(tracks,
- reconstruction,
- camera_intrinsics);
+ libmv_reconstruction->error =
+ EuclideanReprojectionError(tracks, reconstruction, camera_intrinsics);
}
bool selectTwoKeyframesBasedOnGRICAndVariance(
@@ -148,9 +148,8 @@ bool selectTwoKeyframesBasedOnGRICAndVariance(
libmv::vector<int> keyframes;
/* Get list of all keyframe candidates first. */
- SelectKeyframesBasedOnGRICAndVariance(normalized_tracks,
- camera_intrinsics,
- keyframes);
+ SelectKeyframesBasedOnGRICAndVariance(
+ normalized_tracks, camera_intrinsics, keyframes);
if (keyframes.size() < 2) {
LG << "Not enough keyframes detected by GRIC";
@@ -175,24 +174,20 @@ bool selectTwoKeyframesBasedOnGRICAndVariance(
EuclideanReconstruction reconstruction;
int current_keyframe = keyframes[i];
libmv::vector<Marker> keyframe_markers =
- normalized_tracks.MarkersForTracksInBothImages(previous_keyframe,
- current_keyframe);
+ normalized_tracks.MarkersForTracksInBothImages(previous_keyframe,
+ current_keyframe);
Tracks keyframe_tracks(keyframe_markers);
/* get a solution from two keyframes only */
EuclideanReconstructTwoFrames(keyframe_markers, &reconstruction);
EuclideanBundle(keyframe_tracks, &reconstruction);
- EuclideanCompleteReconstruction(keyframe_tracks,
- &reconstruction,
- NULL);
+ EuclideanCompleteReconstruction(keyframe_tracks, &reconstruction, NULL);
- double current_error = EuclideanReprojectionError(tracks,
- reconstruction,
- camera_intrinsics);
+ double current_error =
+ EuclideanReprojectionError(tracks, reconstruction, camera_intrinsics);
- LG << "Error between " << previous_keyframe
- << " and " << current_keyframe
+ LG << "Error between " << previous_keyframe << " and " << current_keyframe
<< ": " << current_error;
if (current_error < best_error) {
@@ -214,53 +209,49 @@ Marker libmv_projectMarker(const EuclideanPoint& point,
projected /= projected(2);
libmv::Marker reprojected_marker;
- intrinsics.ApplyIntrinsics(projected(0), projected(1),
- &reprojected_marker.x,
- &reprojected_marker.y);
+ intrinsics.ApplyIntrinsics(
+ projected(0), projected(1), &reprojected_marker.x, &reprojected_marker.y);
reprojected_marker.image = camera.image;
reprojected_marker.track = point.track;
return reprojected_marker;
}
-void libmv_getNormalizedTracks(const Tracks &tracks,
- const CameraIntrinsics &camera_intrinsics,
- Tracks *normalized_tracks) {
+void libmv_getNormalizedTracks(const Tracks& tracks,
+ const CameraIntrinsics& camera_intrinsics,
+ Tracks* normalized_tracks) {
libmv::vector<Marker> markers = tracks.AllMarkers();
for (int i = 0; i < markers.size(); ++i) {
- Marker &marker = markers[i];
- camera_intrinsics.InvertIntrinsics(marker.x, marker.y,
- &marker.x, &marker.y);
- normalized_tracks->Insert(marker.image,
- marker.track,
- marker.x, marker.y,
- marker.weight);
+ Marker& marker = markers[i];
+ camera_intrinsics.InvertIntrinsics(
+ marker.x, marker.y, &marker.x, &marker.y);
+ normalized_tracks->Insert(
+ marker.image, marker.track, marker.x, marker.y, marker.weight);
}
}
} // namespace
-libmv_Reconstruction *libmv_solveReconstruction(
+libmv_Reconstruction* libmv_solveReconstruction(
const libmv_Tracks* libmv_tracks,
const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options,
libmv_ReconstructionOptions* libmv_reconstruction_options,
reconstruct_progress_update_cb progress_update_callback,
void* callback_customdata) {
- libmv_Reconstruction *libmv_reconstruction =
- LIBMV_OBJECT_NEW(libmv_Reconstruction);
+ libmv_Reconstruction* libmv_reconstruction =
+ LIBMV_OBJECT_NEW(libmv_Reconstruction);
- Tracks &tracks = *((Tracks *) libmv_tracks);
- EuclideanReconstruction &reconstruction =
- libmv_reconstruction->reconstruction;
+ Tracks& tracks = *((Tracks*)libmv_tracks);
+ EuclideanReconstruction& reconstruction =
+ libmv_reconstruction->reconstruction;
ReconstructUpdateCallback update_callback =
- ReconstructUpdateCallback(progress_update_callback,
- callback_customdata);
+ ReconstructUpdateCallback(progress_update_callback, callback_customdata);
/* Retrieve reconstruction options from C-API to libmv API. */
- CameraIntrinsics *camera_intrinsics;
+ CameraIntrinsics* camera_intrinsics;
camera_intrinsics = libmv_reconstruction->intrinsics =
- libmv_cameraIntrinsicsCreateFromOptions(libmv_camera_intrinsics_options);
+ libmv_cameraIntrinsicsCreateFromOptions(libmv_camera_intrinsics_options);
/* Invert the camera intrinsics/ */
Tracks normalized_tracks;
@@ -276,10 +267,10 @@ libmv_Reconstruction *libmv_solveReconstruction(
update_callback.invoke(0, "Selecting keyframes");
if (selectTwoKeyframesBasedOnGRICAndVariance(tracks,
- normalized_tracks,
- *camera_intrinsics,
- keyframe1,
- keyframe2)) {
+ normalized_tracks,
+ *camera_intrinsics,
+ keyframe1,
+ keyframe2)) {
/* so keyframes in the interface would be updated */
libmv_reconstruction_options->keyframe1 = keyframe1;
libmv_reconstruction_options->keyframe2 = keyframe2;
@@ -290,7 +281,7 @@ libmv_Reconstruction *libmv_solveReconstruction(
LG << "frames to init from: " << keyframe1 << " " << keyframe2;
libmv::vector<Marker> keyframe_markers =
- normalized_tracks.MarkersForTracksInBothImages(keyframe1, keyframe2);
+ normalized_tracks.MarkersForTracksInBothImages(keyframe1, keyframe2);
LG << "number of markers for init: " << keyframe_markers.size();
@@ -309,14 +300,12 @@ libmv_Reconstruction *libmv_solveReconstruction(
}
EuclideanBundle(normalized_tracks, &reconstruction);
- EuclideanCompleteReconstruction(normalized_tracks,
- &reconstruction,
- &update_callback);
+ EuclideanCompleteReconstruction(
+ normalized_tracks, &reconstruction, &update_callback);
/* Refinement. */
if (libmv_reconstruction_options->refine_intrinsics) {
- libmv_solveRefineIntrinsics(
- tracks,
+ libmv_solveRefineIntrinsics(tracks,
libmv_reconstruction_options->refine_intrinsics,
libmv::BUNDLE_NO_CONSTRAINTS,
progress_update_callback,
@@ -336,31 +325,29 @@ libmv_Reconstruction *libmv_solveReconstruction(
callback_customdata);
libmv_reconstruction->is_valid = true;
- return (libmv_Reconstruction *) libmv_reconstruction;
+ return (libmv_Reconstruction*)libmv_reconstruction;
}
-libmv_Reconstruction *libmv_solveModal(
- const libmv_Tracks *libmv_tracks,
- const libmv_CameraIntrinsicsOptions *libmv_camera_intrinsics_options,
- const libmv_ReconstructionOptions *libmv_reconstruction_options,
+libmv_Reconstruction* libmv_solveModal(
+ const libmv_Tracks* libmv_tracks,
+ const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options,
+ const libmv_ReconstructionOptions* libmv_reconstruction_options,
reconstruct_progress_update_cb progress_update_callback,
- void *callback_customdata) {
- libmv_Reconstruction *libmv_reconstruction =
- LIBMV_OBJECT_NEW(libmv_Reconstruction);
+ void* callback_customdata) {
+ libmv_Reconstruction* libmv_reconstruction =
+ LIBMV_OBJECT_NEW(libmv_Reconstruction);
- Tracks &tracks = *((Tracks *) libmv_tracks);
- EuclideanReconstruction &reconstruction =
- libmv_reconstruction->reconstruction;
+ Tracks& tracks = *((Tracks*)libmv_tracks);
+ EuclideanReconstruction& reconstruction =
+ libmv_reconstruction->reconstruction;
ReconstructUpdateCallback update_callback =
- ReconstructUpdateCallback(progress_update_callback,
- callback_customdata);
+ ReconstructUpdateCallback(progress_update_callback, callback_customdata);
/* Retrieve reconstruction options from C-API to libmv API. */
- CameraIntrinsics *camera_intrinsics;
+ CameraIntrinsics* camera_intrinsics;
camera_intrinsics = libmv_reconstruction->intrinsics =
- libmv_cameraIntrinsicsCreateFromOptions(
- libmv_camera_intrinsics_options);
+ libmv_cameraIntrinsicsCreateFromOptions(libmv_camera_intrinsics_options);
/* Invert the camera intrinsics. */
Tracks normalized_tracks;
@@ -378,11 +365,11 @@ libmv_Reconstruction *libmv_solveModal(
/* Refinement. */
if (libmv_reconstruction_options->refine_intrinsics) {
- libmv_solveRefineIntrinsics(
- tracks,
+ libmv_solveRefineIntrinsics(tracks,
libmv_reconstruction_options->refine_intrinsics,
libmv::BUNDLE_NO_TRANSLATION,
- progress_update_callback, callback_customdata,
+ progress_update_callback,
+ callback_customdata,
&reconstruction,
camera_intrinsics);
}
@@ -395,26 +382,25 @@ libmv_Reconstruction *libmv_solveModal(
callback_customdata);
libmv_reconstruction->is_valid = true;
- return (libmv_Reconstruction *) libmv_reconstruction;
+ return (libmv_Reconstruction*)libmv_reconstruction;
}
-int libmv_reconstructionIsValid(libmv_Reconstruction *libmv_reconstruction) {
+int libmv_reconstructionIsValid(libmv_Reconstruction* libmv_reconstruction) {
return libmv_reconstruction->is_valid;
}
-void libmv_reconstructionDestroy(libmv_Reconstruction *libmv_reconstruction) {
+void libmv_reconstructionDestroy(libmv_Reconstruction* libmv_reconstruction) {
LIBMV_OBJECT_DELETE(libmv_reconstruction->intrinsics, CameraIntrinsics);
LIBMV_OBJECT_DELETE(libmv_reconstruction, libmv_Reconstruction);
}
int libmv_reprojectionPointForTrack(
- const libmv_Reconstruction *libmv_reconstruction,
+ const libmv_Reconstruction* libmv_reconstruction,
int track,
double pos[3]) {
- const EuclideanReconstruction *reconstruction =
- &libmv_reconstruction->reconstruction;
- const EuclideanPoint *point =
- reconstruction->PointForTrack(track);
+ const EuclideanReconstruction* reconstruction =
+ &libmv_reconstruction->reconstruction;
+ const EuclideanPoint* point = reconstruction->PointForTrack(track);
if (point) {
pos[0] = point->X[0];
pos[1] = point->X[2];
@@ -425,23 +411,22 @@ int libmv_reprojectionPointForTrack(
}
double libmv_reprojectionErrorForTrack(
- const libmv_Reconstruction *libmv_reconstruction,
- int track) {
- const EuclideanReconstruction *reconstruction =
- &libmv_reconstruction->reconstruction;
- const CameraIntrinsics *intrinsics = libmv_reconstruction->intrinsics;
+ const libmv_Reconstruction* libmv_reconstruction, int track) {
+ const EuclideanReconstruction* reconstruction =
+ &libmv_reconstruction->reconstruction;
+ const CameraIntrinsics* intrinsics = libmv_reconstruction->intrinsics;
libmv::vector<Marker> markers =
- libmv_reconstruction->tracks.MarkersForTrack(track);
+ libmv_reconstruction->tracks.MarkersForTrack(track);
int num_reprojected = 0;
double total_error = 0.0;
for (int i = 0; i < markers.size(); ++i) {
double weight = markers[i].weight;
- const EuclideanCamera *camera =
- reconstruction->CameraForImage(markers[i].image);
- const EuclideanPoint *point =
- reconstruction->PointForTrack(markers[i].track);
+ const EuclideanCamera* camera =
+ reconstruction->CameraForImage(markers[i].image);
+ const EuclideanPoint* point =
+ reconstruction->PointForTrack(markers[i].track);
if (!camera || !point || weight == 0.0) {
continue;
@@ -450,7 +435,7 @@ double libmv_reprojectionErrorForTrack(
num_reprojected++;
Marker reprojected_marker =
- libmv_projectMarker(*point, *camera, *intrinsics);
+ libmv_projectMarker(*point, *camera, *intrinsics);
double ex = (reprojected_marker.x - markers[i].x) * weight;
double ey = (reprojected_marker.y - markers[i].y) * weight;
@@ -461,14 +446,13 @@ double libmv_reprojectionErrorForTrack(
}
double libmv_reprojectionErrorForImage(
- const libmv_Reconstruction *libmv_reconstruction,
- int image) {
- const EuclideanReconstruction *reconstruction =
- &libmv_reconstruction->reconstruction;
- const CameraIntrinsics *intrinsics = libmv_reconstruction->intrinsics;
+ const libmv_Reconstruction* libmv_reconstruction, int image) {
+ const EuclideanReconstruction* reconstruction =
+ &libmv_reconstruction->reconstruction;
+ const CameraIntrinsics* intrinsics = libmv_reconstruction->intrinsics;
libmv::vector<Marker> markers =
- libmv_reconstruction->tracks.MarkersInImage(image);
- const EuclideanCamera *camera = reconstruction->CameraForImage(image);
+ libmv_reconstruction->tracks.MarkersInImage(image);
+ const EuclideanCamera* camera = reconstruction->CameraForImage(image);
int num_reprojected = 0;
double total_error = 0.0;
@@ -477,8 +461,8 @@ double libmv_reprojectionErrorForImage(
}
for (int i = 0; i < markers.size(); ++i) {
- const EuclideanPoint *point =
- reconstruction->PointForTrack(markers[i].track);
+ const EuclideanPoint* point =
+ reconstruction->PointForTrack(markers[i].track);
if (!point) {
continue;
@@ -487,7 +471,7 @@ double libmv_reprojectionErrorForImage(
num_reprojected++;
Marker reprojected_marker =
- libmv_projectMarker(*point, *camera, *intrinsics);
+ libmv_projectMarker(*point, *camera, *intrinsics);
double ex = (reprojected_marker.x - markers[i].x) * markers[i].weight;
double ey = (reprojected_marker.y - markers[i].y) * markers[i].weight;
@@ -498,13 +482,12 @@ double libmv_reprojectionErrorForImage(
}
int libmv_reprojectionCameraForImage(
- const libmv_Reconstruction *libmv_reconstruction,
+ const libmv_Reconstruction* libmv_reconstruction,
int image,
double mat[4][4]) {
- const EuclideanReconstruction *reconstruction =
- &libmv_reconstruction->reconstruction;
- const EuclideanCamera *camera =
- reconstruction->CameraForImage(image);
+ const EuclideanReconstruction* reconstruction =
+ &libmv_reconstruction->reconstruction;
+ const EuclideanCamera* camera = reconstruction->CameraForImage(image);
if (camera) {
for (int j = 0; j < 3; ++j) {
@@ -541,11 +524,11 @@ int libmv_reprojectionCameraForImage(
}
double libmv_reprojectionError(
- const libmv_Reconstruction *libmv_reconstruction) {
+ const libmv_Reconstruction* libmv_reconstruction) {
return libmv_reconstruction->error;
}
-libmv_CameraIntrinsics *libmv_reconstructionExtractIntrinsics(
- libmv_Reconstruction *libmv_reconstruction) {
- return (libmv_CameraIntrinsics *) libmv_reconstruction->intrinsics;
+libmv_CameraIntrinsics* libmv_reconstructionExtractIntrinsics(
+ libmv_Reconstruction* libmv_reconstruction) {
+ return (libmv_CameraIntrinsics*)libmv_reconstruction->intrinsics;
}
diff --git a/intern/libmv/intern/reconstruction.h b/intern/libmv/intern/reconstruction.h
index 408ac884684..600bc92ccfc 100644
--- a/intern/libmv/intern/reconstruction.h
+++ b/intern/libmv/intern/reconstruction.h
@@ -31,17 +31,16 @@ struct libmv_CameraIntrinsicsOptions;
typedef struct libmv_Reconstruction libmv_Reconstruction;
enum {
- LIBMV_REFINE_FOCAL_LENGTH = (1 << 0),
- LIBMV_REFINE_PRINCIPAL_POINT = (1 << 1),
-
- LIBMV_REFINE_RADIAL_DISTORTION_K1 = (1 << 2),
- LIBMV_REFINE_RADIAL_DISTORTION_K2 = (1 << 3),
- LIBMV_REFINE_RADIAL_DISTORTION_K3 = (1 << 4),
- LIBMV_REFINE_RADIAL_DISTORTION_K4 = (1 << 5),
- LIBMV_REFINE_RADIAL_DISTORTION = (LIBMV_REFINE_RADIAL_DISTORTION_K1 |
- LIBMV_REFINE_RADIAL_DISTORTION_K2 |
- LIBMV_REFINE_RADIAL_DISTORTION_K3 |
- LIBMV_REFINE_RADIAL_DISTORTION_K4),
+ LIBMV_REFINE_FOCAL_LENGTH = (1 << 0),
+ LIBMV_REFINE_PRINCIPAL_POINT = (1 << 1),
+
+ LIBMV_REFINE_RADIAL_DISTORTION_K1 = (1 << 2),
+ LIBMV_REFINE_RADIAL_DISTORTION_K2 = (1 << 3),
+ LIBMV_REFINE_RADIAL_DISTORTION_K3 = (1 << 4),
+ LIBMV_REFINE_RADIAL_DISTORTION_K4 = (1 << 5),
+ LIBMV_REFINE_RADIAL_DISTORTION =
+ (LIBMV_REFINE_RADIAL_DISTORTION_K1 | LIBMV_REFINE_RADIAL_DISTORTION_K2 |
+ LIBMV_REFINE_RADIAL_DISTORTION_K3 | LIBMV_REFINE_RADIAL_DISTORTION_K4),
LIBMV_REFINE_TANGENTIAL_DISTORTION_P1 = (1 << 6),
LIBMV_REFINE_TANGENTIAL_DISTORTION_P2 = (1 << 7),
@@ -55,9 +54,9 @@ typedef struct libmv_ReconstructionOptions {
int refine_intrinsics;
} libmv_ReconstructionOptions;
-typedef void (*reconstruct_progress_update_cb) (void* customdata,
- double progress,
- const char* message);
+typedef void (*reconstruct_progress_update_cb)(void* customdata,
+ double progress,
+ const char* message);
libmv_Reconstruction* libmv_solveReconstruction(
const struct libmv_Tracks* libmv_tracks,
@@ -73,35 +72,32 @@ libmv_Reconstruction* libmv_solveModal(
reconstruct_progress_update_cb progress_update_callback,
void* callback_customdata);
-int libmv_reconstructionIsValid(libmv_Reconstruction *libmv_reconstruction);
+int libmv_reconstructionIsValid(libmv_Reconstruction* libmv_reconstruction);
void libmv_reconstructionDestroy(libmv_Reconstruction* libmv_reconstruction);
int libmv_reprojectionPointForTrack(
- const libmv_Reconstruction* libmv_reconstruction,
- int track,
- double pos[3]);
+ const libmv_Reconstruction* libmv_reconstruction, int track, double pos[3]);
double libmv_reprojectionErrorForTrack(
- const libmv_Reconstruction* libmv_reconstruction,
- int track);
+ const libmv_Reconstruction* libmv_reconstruction, int track);
double libmv_reprojectionErrorForImage(
- const libmv_Reconstruction* libmv_reconstruction,
- int image);
+ const libmv_Reconstruction* libmv_reconstruction, int image);
int libmv_reprojectionCameraForImage(
const libmv_Reconstruction* libmv_reconstruction,
int image,
double mat[4][4]);
-double libmv_reprojectionError(const libmv_Reconstruction* libmv_reconstruction);
+double libmv_reprojectionError(
+ const libmv_Reconstruction* libmv_reconstruction);
struct libmv_CameraIntrinsics* libmv_reconstructionExtractIntrinsics(
- libmv_Reconstruction *libmv_Reconstruction);
+ libmv_Reconstruction* libmv_Reconstruction);
#ifdef __cplusplus
}
#endif
-#endif // LIBMV_C_API_RECONSTRUCTION_H_
+#endif // LIBMV_C_API_RECONSTRUCTION_H_
diff --git a/intern/libmv/intern/stub.cc b/intern/libmv/intern/stub.cc
index 47e1896b4cf..1920042b4ec 100644
--- a/intern/libmv/intern/stub.cc
+++ b/intern/libmv/intern/stub.cc
@@ -24,7 +24,7 @@
/* ************ Logging ************ */
-void libmv_initLogging(const char * /*argv0*/) {
+void libmv_initLogging(const char* /*argv0*/) {
}
void libmv_startDebugLogging(void) {
@@ -36,18 +36,18 @@ void libmv_setLoggingVerbosity(int /*verbosity*/) {
/* ************ Planar tracker ************ */
/* TrackRegion (new planar tracker) */
-int libmv_trackRegion(const libmv_TrackRegionOptions * /*options*/,
- const float * /*image1*/,
+int libmv_trackRegion(const libmv_TrackRegionOptions* /*options*/,
+ const float* /*image1*/,
int /*image1_width*/,
int /*image1_height*/,
- const float * /*image2*/,
+ const float* /*image2*/,
int /*image2_width*/,
int /*image2_height*/,
- const double *x1,
- const double *y1,
- libmv_TrackRegionResult *result,
- double *x2,
- double *y2) {
+ const double* x1,
+ const double* y1,
+ libmv_TrackRegionResult* result,
+ double* x2,
+ double* y2) {
/* Convert to doubles for the libmv api. The four corners and the center. */
for (int i = 0; i < 5; ++i) {
x2[i] = x1[i];
@@ -61,46 +61,46 @@ int libmv_trackRegion(const libmv_TrackRegionOptions * /*options*/,
return false;
}
-void libmv_samplePlanarPatchFloat(const float * /*image*/,
+void libmv_samplePlanarPatchFloat(const float* /*image*/,
int /*width*/,
int /*height*/,
int /*channels*/,
- const double * /*xs*/,
- const double * /*ys*/,
+ const double* /*xs*/,
+ const double* /*ys*/,
int /*num_samples_x*/,
int /*num_samples_y*/,
- const float * /*mask*/,
- float * /*patch*/,
- double * /*warped_position_x*/,
- double * /*warped_position_y*/) {
+ const float* /*mask*/,
+ float* /*patch*/,
+ double* /*warped_position_x*/,
+ double* /*warped_position_y*/) {
/* TODO(sergey): implement */
}
-void libmv_samplePlanarPatchByte(const unsigned char * /*image*/,
+void libmv_samplePlanarPatchByte(const unsigned char* /*image*/,
int /*width*/,
int /*height*/,
int /*channels*/,
- const double * /*xs*/,
- const double * /*ys*/,
- int /*num_samples_x*/, int /*num_samples_y*/,
- const float * /*mask*/,
- unsigned char * /*patch*/,
- double * /*warped_position_x*/,
- double * /*warped_position_y*/) {
+ const double* /*xs*/,
+ const double* /*ys*/,
+ int /*num_samples_x*/,
+ int /*num_samples_y*/,
+ const float* /*mask*/,
+ unsigned char* /*patch*/,
+ double* /*warped_position_x*/,
+ double* /*warped_position_y*/) {
/* TODO(sergey): implement */
}
-void libmv_floatImageDestroy(libmv_FloatImage* /*image*/)
-{
+void libmv_floatImageDestroy(libmv_FloatImage* /*image*/) {
}
/* ************ Tracks ************ */
-libmv_Tracks *libmv_tracksNew(void) {
+libmv_Tracks* libmv_tracksNew(void) {
return NULL;
}
-void libmv_tracksInsert(libmv_Tracks * /*libmv_tracks*/,
+void libmv_tracksInsert(libmv_Tracks* /*libmv_tracks*/,
int /*image*/,
int /*track*/,
double /*x*/,
@@ -108,152 +108,152 @@ void libmv_tracksInsert(libmv_Tracks * /*libmv_tracks*/,
double /*weight*/) {
}
-void libmv_tracksDestroy(libmv_Tracks * /*libmv_tracks*/) {
+void libmv_tracksDestroy(libmv_Tracks* /*libmv_tracks*/) {
}
/* ************ Reconstruction solver ************ */
-libmv_Reconstruction *libmv_solveReconstruction(
- const libmv_Tracks * /*libmv_tracks*/,
- const libmv_CameraIntrinsicsOptions * /*libmv_camera_intrinsics_options*/,
- libmv_ReconstructionOptions * /*libmv_reconstruction_options*/,
+libmv_Reconstruction* libmv_solveReconstruction(
+ const libmv_Tracks* /*libmv_tracks*/,
+ const libmv_CameraIntrinsicsOptions* /*libmv_camera_intrinsics_options*/,
+ libmv_ReconstructionOptions* /*libmv_reconstruction_options*/,
reconstruct_progress_update_cb /*progress_update_callback*/,
- void * /*callback_customdata*/) {
+ void* /*callback_customdata*/) {
return NULL;
}
-libmv_Reconstruction *libmv_solveModal(
- const libmv_Tracks * /*libmv_tracks*/,
- const libmv_CameraIntrinsicsOptions * /*libmv_camera_intrinsics_options*/,
- const libmv_ReconstructionOptions * /*libmv_reconstruction_options*/,
+libmv_Reconstruction* libmv_solveModal(
+ const libmv_Tracks* /*libmv_tracks*/,
+ const libmv_CameraIntrinsicsOptions* /*libmv_camera_intrinsics_options*/,
+ const libmv_ReconstructionOptions* /*libmv_reconstruction_options*/,
reconstruct_progress_update_cb /*progress_update_callback*/,
- void * /*callback_customdata*/) {
+ void* /*callback_customdata*/) {
return NULL;
}
-int libmv_reconstructionIsValid(libmv_Reconstruction * /*libmv_reconstruction*/) {
+int libmv_reconstructionIsValid(
+ libmv_Reconstruction* /*libmv_reconstruction*/) {
return 0;
}
int libmv_reprojectionPointForTrack(
- const libmv_Reconstruction * /*libmv_reconstruction*/,
+ const libmv_Reconstruction* /*libmv_reconstruction*/,
int /*track*/,
double /*pos*/[3]) {
return 0;
}
double libmv_reprojectionErrorForTrack(
- const libmv_Reconstruction * /*libmv_reconstruction*/,
- int /*track*/) {
+ const libmv_Reconstruction* /*libmv_reconstruction*/, int /*track*/) {
return 0.0;
}
double libmv_reprojectionErrorForImage(
- const libmv_Reconstruction * /*libmv_reconstruction*/,
- int /*image*/) {
+ const libmv_Reconstruction* /*libmv_reconstruction*/, int /*image*/) {
return 0.0;
}
int libmv_reprojectionCameraForImage(
- const libmv_Reconstruction * /*libmv_reconstruction*/,
+ const libmv_Reconstruction* /*libmv_reconstruction*/,
int /*image*/,
double /*mat*/[4][4]) {
return 0;
}
double libmv_reprojectionError(
- const libmv_Reconstruction * /*libmv_reconstruction*/) {
+ const libmv_Reconstruction* /*libmv_reconstruction*/) {
return 0.0;
}
void libmv_reconstructionDestroy(
- struct libmv_Reconstruction * /*libmv_reconstruction*/) {
+ struct libmv_Reconstruction* /*libmv_reconstruction*/) {
}
/* ************ Feature detector ************ */
-libmv_Features *libmv_detectFeaturesByte(const unsigned char * /*image_buffer*/,
+libmv_Features* libmv_detectFeaturesByte(const unsigned char* /*image_buffer*/,
int /*width*/,
int /*height*/,
int /*channels*/,
- libmv_DetectOptions * /*options*/) {
+ libmv_DetectOptions* /*options*/) {
return NULL;
}
-struct libmv_Features *libmv_detectFeaturesFloat(
- const float * /*image_buffer*/,
+struct libmv_Features* libmv_detectFeaturesFloat(
+ const float* /*image_buffer*/,
int /*width*/,
int /*height*/,
int /*channels*/,
- libmv_DetectOptions * /*options*/) {
+ libmv_DetectOptions* /*options*/) {
return NULL;
}
-int libmv_countFeatures(const libmv_Features * /*libmv_features*/) {
+int libmv_countFeatures(const libmv_Features* /*libmv_features*/) {
return 0;
}
-void libmv_getFeature(const libmv_Features * /*libmv_features*/,
+void libmv_getFeature(const libmv_Features* /*libmv_features*/,
int /*number*/,
- double *x,
- double *y,
- double *score,
- double *size) {
+ double* x,
+ double* y,
+ double* score,
+ double* size) {
*x = 0.0;
*y = 0.0;
*score = 0.0;
*size = 0.0;
}
-void libmv_featuresDestroy(struct libmv_Features * /*libmv_features*/) {
+void libmv_featuresDestroy(struct libmv_Features* /*libmv_features*/) {
}
/* ************ Camera intrinsics ************ */
-libmv_CameraIntrinsics *libmv_reconstructionExtractIntrinsics(
- libmv_Reconstruction * /*libmv_reconstruction*/) {
+libmv_CameraIntrinsics* libmv_reconstructionExtractIntrinsics(
+ libmv_Reconstruction* /*libmv_reconstruction*/) {
return NULL;
}
-libmv_CameraIntrinsics *libmv_cameraIntrinsicsNew(
- const libmv_CameraIntrinsicsOptions * /*libmv_camera_intrinsics_options*/) {
+libmv_CameraIntrinsics* libmv_cameraIntrinsicsNew(
+ const libmv_CameraIntrinsicsOptions* /*libmv_camera_intrinsics_options*/) {
return NULL;
}
-libmv_CameraIntrinsics *libmv_cameraIntrinsicsCopy(
- const libmv_CameraIntrinsics * /*libmvIntrinsics*/) {
+libmv_CameraIntrinsics* libmv_cameraIntrinsicsCopy(
+ const libmv_CameraIntrinsics* /*libmvIntrinsics*/) {
return NULL;
}
void libmv_cameraIntrinsicsDestroy(
- libmv_CameraIntrinsics * /*libmvIntrinsics*/) {
+ libmv_CameraIntrinsics* /*libmvIntrinsics*/) {
}
void libmv_cameraIntrinsicsUpdate(
- const libmv_CameraIntrinsicsOptions * /*libmv_camera_intrinsics_options*/,
- libmv_CameraIntrinsics * /*libmv_intrinsics*/) {
+ const libmv_CameraIntrinsicsOptions* /*libmv_camera_intrinsics_options*/,
+ libmv_CameraIntrinsics* /*libmv_intrinsics*/) {
}
void libmv_cameraIntrinsicsSetThreads(
- libmv_CameraIntrinsics * /*libmv_intrinsics*/,
- int /*threads*/) {
+ libmv_CameraIntrinsics* /*libmv_intrinsics*/, int /*threads*/) {
}
void libmv_cameraIntrinsicsExtractOptions(
- const libmv_CameraIntrinsics * /*libmv_intrinsics*/,
- libmv_CameraIntrinsicsOptions *camera_intrinsics_options) {
+ const libmv_CameraIntrinsics* /*libmv_intrinsics*/,
+ libmv_CameraIntrinsicsOptions* camera_intrinsics_options) {
memset(camera_intrinsics_options, 0, sizeof(libmv_CameraIntrinsicsOptions));
camera_intrinsics_options->focal_length = 1.0;
}
void libmv_cameraIntrinsicsUndistortByte(
- const libmv_CameraIntrinsics * /*libmv_intrinsics*/,
- const unsigned char *source_image,
- int width, int height,
+ const libmv_CameraIntrinsics* /*libmv_intrinsics*/,
+ const unsigned char* source_image,
+ int width,
+ int height,
float /*overscan*/,
int channels,
- unsigned char *destination_image) {
- memcpy(destination_image, source_image,
+ unsigned char* destination_image) {
+ memcpy(destination_image,
+ source_image,
channels * width * height * sizeof(unsigned char));
}
@@ -265,19 +265,21 @@ void libmv_cameraIntrinsicsUndistortFloat(
float /*overscan*/,
int channels,
float* destination_image) {
- memcpy(destination_image, source_image,
+ memcpy(destination_image,
+ source_image,
channels * width * height * sizeof(float));
}
void libmv_cameraIntrinsicsDistortByte(
const struct libmv_CameraIntrinsics* /*libmv_intrinsics*/,
- const unsigned char *source_image,
+ const unsigned char* source_image,
int width,
int height,
float /*overscan*/,
int channels,
- unsigned char *destination_image) {
- memcpy(destination_image, source_image,
+ unsigned char* destination_image) {
+ memcpy(destination_image,
+ source_image,
channels * width * height * sizeof(unsigned char));
}
@@ -289,7 +291,8 @@ void libmv_cameraIntrinsicsDistortFloat(
float /*overscan*/,
int channels,
float* destination_image) {
- memcpy(destination_image, source_image,
+ memcpy(destination_image,
+ source_image,
channels * width * height * sizeof(float));
}
@@ -315,8 +318,8 @@ void libmv_cameraIntrinsicsInvert(
*y1 = 0.0;
}
-void libmv_homography2DFromCorrespondencesEuc(/* const */ double (* /*x1*/)[2],
- /* const */ double (* /*x2*/)[2],
+void libmv_homography2DFromCorrespondencesEuc(/* const */ double (*/*x1*/)[2],
+ /* const */ double (*/*x2*/)[2],
int /*num_points*/,
double H[3][3]) {
memset(H, 0, sizeof(double[3][3]));
@@ -327,45 +330,38 @@ void libmv_homography2DFromCorrespondencesEuc(/* const */ double (* /*x1*/)[2],
/* ************ autotrack ************ */
-libmv_AutoTrack* libmv_autoTrackNew(libmv_FrameAccessor* /*frame_accessor*/)
-{
+libmv_AutoTrack* libmv_autoTrackNew(libmv_FrameAccessor* /*frame_accessor*/) {
return NULL;
}
-void libmv_autoTrackDestroy(libmv_AutoTrack* /*libmv_autotrack*/)
-{
+void libmv_autoTrackDestroy(libmv_AutoTrack* /*libmv_autotrack*/) {
}
void libmv_autoTrackSetOptions(libmv_AutoTrack* /*libmv_autotrack*/,
- const libmv_AutoTrackOptions* /*options*/)
-{
+ const libmv_AutoTrackOptions* /*options*/) {
}
int libmv_autoTrackMarker(libmv_AutoTrack* /*libmv_autotrack*/,
const libmv_TrackRegionOptions* /*libmv_options*/,
- libmv_Marker * /*libmv_tracker_marker*/,
- libmv_TrackRegionResult* /*libmv_result*/)
-{
+ libmv_Marker* /*libmv_tracker_marker*/,
+ libmv_TrackRegionResult* /*libmv_result*/) {
return 0;
}
void libmv_autoTrackAddMarker(libmv_AutoTrack* /*libmv_autotrack*/,
- const libmv_Marker* /*libmv_marker*/)
-{
+ const libmv_Marker* /*libmv_marker*/) {
}
void libmv_autoTrackSetMarkers(libmv_AutoTrack* /*libmv_autotrack*/,
const libmv_Marker* /*libmv_marker-*/,
- size_t /*num_markers*/)
-{
+ size_t /*num_markers*/) {
}
int libmv_autoTrackGetMarker(libmv_AutoTrack* /*libmv_autotrack*/,
int /*clip*/,
int /*frame*/,
int /*track*/,
- libmv_Marker* /*libmv_marker*/)
-{
+ libmv_Marker* /*libmv_marker*/) {
return 0;
}
@@ -376,24 +372,20 @@ libmv_FrameAccessor* libmv_FrameAccessorNew(
libmv_GetImageCallback /*get_image_callback*/,
libmv_ReleaseImageCallback /*release_image_callback*/,
libmv_GetMaskForTrackCallback /*get_mask_for_track_callback*/,
- libmv_ReleaseMaskCallback /*release_mask_callback*/)
-{
+ libmv_ReleaseMaskCallback /*release_mask_callback*/) {
return NULL;
}
-void libmv_FrameAccessorDestroy(libmv_FrameAccessor* /*frame_accessor*/)
-{
+void libmv_FrameAccessorDestroy(libmv_FrameAccessor* /*frame_accessor*/) {
}
int64_t libmv_frameAccessorgetTransformKey(
- const libmv_FrameTransform * /*transform*/)
-{
+ const libmv_FrameTransform* /*transform*/) {
return 0;
}
-void libmv_frameAccessorgetTransformRun(const libmv_FrameTransform* /*transform*/,
- const libmv_FloatImage* /*input_image*/,
- libmv_FloatImage* /*output_image*/)
-{
+void libmv_frameAccessorgetTransformRun(
+ const libmv_FrameTransform* /*transform*/,
+ const libmv_FloatImage* /*input_image*/,
+ libmv_FloatImage* /*output_image*/) {
}
-
diff --git a/intern/libmv/intern/track_region.cc b/intern/libmv/intern/track_region.cc
index 2a3909c0ced..af88afd7ac2 100644
--- a/intern/libmv/intern/track_region.cc
+++ b/intern/libmv/intern/track_region.cc
@@ -32,17 +32,17 @@
#undef DUMP_ALWAYS
using libmv::FloatImage;
+using libmv::TrackRegion;
using libmv::TrackRegionOptions;
using libmv::TrackRegionResult;
-using libmv::TrackRegion;
void libmv_configureTrackRegionOptions(
const libmv_TrackRegionOptions& options,
TrackRegionOptions* track_region_options) {
switch (options.motion_model) {
-#define LIBMV_CONVERT(the_model) \
- case TrackRegionOptions::the_model: \
- track_region_options->mode = TrackRegionOptions::the_model; \
+#define LIBMV_CONVERT(the_model) \
+ case TrackRegionOptions::the_model: \
+ track_region_options->mode = TrackRegionOptions::the_model; \
break;
LIBMV_CONVERT(TRANSLATION)
LIBMV_CONVERT(TRANSLATION_ROTATION)
@@ -66,7 +66,8 @@ void libmv_configureTrackRegionOptions(
* so disabling for now for until proper prediction model is landed.
*
* The thing is, currently blender sends input coordinates as the guess to
- * region tracker and in case of fast motion such an early out ruins the track.
+ * region tracker and in case of fast motion such an early out ruins the
+ * track.
*/
track_region_options->attempt_refine_before_brute = false;
track_region_options->use_normalized_intensities = options.use_normalization;
@@ -74,7 +75,7 @@ void libmv_configureTrackRegionOptions(
void libmv_regionTrackergetResult(const TrackRegionResult& track_region_result,
libmv_TrackRegionResult* result) {
- result->termination = (int) track_region_result.termination;
+ result->termination = (int)track_region_result.termination;
result->termination_reason = "";
result->correlation = track_region_result.correlation;
}
@@ -108,33 +109,27 @@ int libmv_trackRegion(const libmv_TrackRegionOptions* options,
libmv_configureTrackRegionOptions(*options, &track_region_options);
if (options->image1_mask) {
- libmv_floatBufferToFloatImage(options->image1_mask,
- image1_width,
- image1_height,
- 1,
- &image1_mask);
+ libmv_floatBufferToFloatImage(
+ options->image1_mask, image1_width, image1_height, 1, &image1_mask);
track_region_options.image1_mask = &image1_mask;
}
// Convert from raw float buffers to libmv's FloatImage.
FloatImage old_patch, new_patch;
- libmv_floatBufferToFloatImage(image1,
- image1_width,
- image1_height,
- 1,
- &old_patch);
- libmv_floatBufferToFloatImage(image2,
- image2_width,
- image2_height,
- 1,
- &new_patch);
+ libmv_floatBufferToFloatImage(
+ image1, image1_width, image1_height, 1, &old_patch);
+ libmv_floatBufferToFloatImage(
+ image2, image2_width, image2_height, 1, &new_patch);
TrackRegionResult track_region_result;
- TrackRegion(old_patch, new_patch,
- xx1, yy1,
+ TrackRegion(old_patch,
+ new_patch,
+ xx1,
+ yy1,
track_region_options,
- xx2, yy2,
+ xx2,
+ yy2,
&track_region_result);
// Convert to floats for the blender api.
diff --git a/intern/libmv/intern/track_region.h b/intern/libmv/intern/track_region.h
index 48ae97a1c1a..4566fe9bf0f 100644
--- a/intern/libmv/intern/track_region.h
+++ b/intern/libmv/intern/track_region.h
@@ -31,7 +31,7 @@ typedef struct libmv_TrackRegionOptions {
int use_normalization;
double minimum_correlation;
double sigma;
- float *image1_mask;
+ float* image1_mask;
} libmv_TrackRegionOptions;
typedef struct libmv_TrackRegionResult {
@@ -42,9 +42,9 @@ typedef struct libmv_TrackRegionResult {
#ifdef __cplusplus
namespace libmv {
- struct TrackRegionOptions;
- struct TrackRegionResult;
-}
+struct TrackRegionOptions;
+struct TrackRegionResult;
+} // namespace libmv
void libmv_configureTrackRegionOptions(
const libmv_TrackRegionOptions& options,
libmv::TrackRegionOptions* track_region_options);
diff --git a/intern/libmv/intern/tracks.cc b/intern/libmv/intern/tracks.cc
index 0ca5a31796b..146908d6db5 100644
--- a/intern/libmv/intern/tracks.cc
+++ b/intern/libmv/intern/tracks.cc
@@ -28,18 +28,18 @@ using libmv::Tracks;
libmv_Tracks* libmv_tracksNew(void) {
Tracks* tracks = LIBMV_OBJECT_NEW(Tracks);
- return (libmv_Tracks*) tracks;
+ return (libmv_Tracks*)tracks;
}
void libmv_tracksDestroy(libmv_Tracks* libmv_tracks) {
LIBMV_OBJECT_DELETE(libmv_tracks, Tracks);
}
-void libmv_tracksInsert(libmv_Tracks *libmv_tracks,
+void libmv_tracksInsert(libmv_Tracks* libmv_tracks,
int image,
int track,
double x,
double y,
double weight) {
- ((Tracks *) libmv_tracks)->Insert(image, track, x, y, weight);
+ ((Tracks*)libmv_tracks)->Insert(image, track, x, y, weight);
}
diff --git a/intern/libmv/intern/tracksN.cc b/intern/libmv/intern/tracksN.cc
index 1441d8a2066..c7ffb13a386 100644
--- a/intern/libmv/intern/tracksN.cc
+++ b/intern/libmv/intern/tracksN.cc
@@ -25,8 +25,7 @@
using mv::Marker;
using mv::Tracks;
-void libmv_apiMarkerToMarker(const libmv_Marker& libmv_marker,
- Marker *marker) {
+void libmv_apiMarkerToMarker(const libmv_Marker& libmv_marker, Marker* marker) {
marker->clip = libmv_marker.clip;
marker->frame = libmv_marker.frame;
marker->track = libmv_marker.track;
@@ -41,17 +40,16 @@ void libmv_apiMarkerToMarker(const libmv_Marker& libmv_marker,
marker->search_region.max(0) = libmv_marker.search_region_max[0];
marker->search_region.max(1) = libmv_marker.search_region_max[1];
marker->weight = libmv_marker.weight;
- marker->source = (Marker::Source) libmv_marker.source;
- marker->status = (Marker::Status) libmv_marker.status;
+ marker->source = (Marker::Source)libmv_marker.source;
+ marker->status = (Marker::Status)libmv_marker.status;
marker->reference_clip = libmv_marker.reference_clip;
marker->reference_frame = libmv_marker.reference_frame;
- marker->model_type = (Marker::ModelType) libmv_marker.model_type;
+ marker->model_type = (Marker::ModelType)libmv_marker.model_type;
marker->model_id = libmv_marker.model_id;
marker->disabled_channels = libmv_marker.disabled_channels;
}
-void libmv_markerToApiMarker(const Marker& marker,
- libmv_Marker *libmv_marker) {
+void libmv_markerToApiMarker(const Marker& marker, libmv_Marker* libmv_marker) {
libmv_marker->clip = marker.clip;
libmv_marker->frame = marker.frame;
libmv_marker->track = marker.track;
@@ -66,11 +64,11 @@ void libmv_markerToApiMarker(const Marker& marker,
libmv_marker->search_region_max[0] = marker.search_region.max(0);
libmv_marker->search_region_max[1] = marker.search_region.max(1);
libmv_marker->weight = marker.weight;
- libmv_marker->source = (libmv_MarkerSource) marker.source;
- libmv_marker->status = (libmv_MarkerStatus) marker.status;
+ libmv_marker->source = (libmv_MarkerSource)marker.source;
+ libmv_marker->status = (libmv_MarkerStatus)marker.status;
libmv_marker->reference_clip = marker.reference_clip;
libmv_marker->reference_frame = marker.reference_frame;
- libmv_marker->model_type = (libmv_MarkerModelType) marker.model_type;
+ libmv_marker->model_type = (libmv_MarkerModelType)marker.model_type;
libmv_marker->model_id = marker.model_id;
libmv_marker->disabled_channels = marker.disabled_channels;
}
@@ -78,7 +76,7 @@ void libmv_markerToApiMarker(const Marker& marker,
libmv_TracksN* libmv_tracksNewN(void) {
Tracks* tracks = LIBMV_OBJECT_NEW(Tracks);
- return (libmv_TracksN*) tracks;
+ return (libmv_TracksN*)tracks;
}
void libmv_tracksDestroyN(libmv_TracksN* libmv_tracks) {
@@ -89,7 +87,7 @@ void libmv_tracksAddMarkerN(libmv_TracksN* libmv_tracks,
const libmv_Marker* libmv_marker) {
Marker marker;
libmv_apiMarkerToMarker(*libmv_marker, &marker);
- ((Tracks*) libmv_tracks)->AddMarker(marker);
+ ((Tracks*)libmv_tracks)->AddMarker(marker);
}
void libmv_tracksGetMarkerN(libmv_TracksN* libmv_tracks,
@@ -98,7 +96,7 @@ void libmv_tracksGetMarkerN(libmv_TracksN* libmv_tracks,
int track,
libmv_Marker* libmv_marker) {
Marker marker;
- ((Tracks*) libmv_tracks)->GetMarker(clip, frame, track, &marker);
+ ((Tracks*)libmv_tracks)->GetMarker(clip, frame, track, &marker);
libmv_markerToApiMarker(marker, libmv_marker);
}
@@ -106,26 +104,25 @@ void libmv_tracksRemoveMarkerN(libmv_TracksN* libmv_tracks,
int clip,
int frame,
int track) {
- ((Tracks *) libmv_tracks)->RemoveMarker(clip, frame, track);
+ ((Tracks*)libmv_tracks)->RemoveMarker(clip, frame, track);
}
-void libmv_tracksRemoveMarkersForTrack(libmv_TracksN* libmv_tracks,
- int track) {
- ((Tracks *) libmv_tracks)->RemoveMarkersForTrack(track);
+void libmv_tracksRemoveMarkersForTrack(libmv_TracksN* libmv_tracks, int track) {
+ ((Tracks*)libmv_tracks)->RemoveMarkersForTrack(track);
}
int libmv_tracksMaxClipN(libmv_TracksN* libmv_tracks) {
- return ((Tracks*) libmv_tracks)->MaxClip();
+ return ((Tracks*)libmv_tracks)->MaxClip();
}
int libmv_tracksMaxFrameN(libmv_TracksN* libmv_tracks, int clip) {
- return ((Tracks*) libmv_tracks)->MaxFrame(clip);
+ return ((Tracks*)libmv_tracks)->MaxFrame(clip);
}
int libmv_tracksMaxTrackN(libmv_TracksN* libmv_tracks) {
- return ((Tracks*) libmv_tracks)->MaxTrack();
+ return ((Tracks*)libmv_tracks)->MaxTrack();
}
int libmv_tracksNumMarkersN(libmv_TracksN* libmv_tracks) {
- return ((Tracks*) libmv_tracks)->NumMarkers();
+ return ((Tracks*)libmv_tracks)->NumMarkers();
}
diff --git a/intern/libmv/intern/tracksN.h b/intern/libmv/intern/tracksN.h
index 9363d34bed7..b5d1f9753e0 100644
--- a/intern/libmv/intern/tracksN.h
+++ b/intern/libmv/intern/tracksN.h
@@ -79,20 +79,19 @@ typedef struct libmv_Marker {
#ifdef __cplusplus
namespace mv {
- struct Marker;
+struct Marker;
}
void libmv_apiMarkerToMarker(const libmv_Marker& libmv_marker,
- mv::Marker *marker);
+ mv::Marker* marker);
void libmv_markerToApiMarker(const mv::Marker& marker,
- libmv_Marker *libmv_marker);
+ libmv_Marker* libmv_marker);
#endif
libmv_TracksN* libmv_tracksNewN(void);
void libmv_tracksDestroyN(libmv_TracksN* libmv_tracks);
-
void libmv_tracksAddMarkerN(libmv_TracksN* libmv_tracks,
const libmv_Marker* libmv_marker);
@@ -107,8 +106,7 @@ void libmv_tracksRemoveMarkerN(libmv_TracksN* libmv_tracks,
int frame,
int track);
-void libmv_tracksRemoveMarkersForTrack(libmv_TracksN* libmv_tracks,
- int track);
+void libmv_tracksRemoveMarkersForTrack(libmv_TracksN* libmv_tracks, int track);
int libmv_tracksMaxClipN(libmv_TracksN* libmv_tracks);
int libmv_tracksMaxFrameN(libmv_TracksN* libmv_tracks, int clip);
diff --git a/intern/libmv/intern/utildefines.h b/intern/libmv/intern/utildefines.h
index d76d32f9c4d..052052a1d76 100644
--- a/intern/libmv/intern/utildefines.h
+++ b/intern/libmv/intern/utildefines.h
@@ -30,27 +30,33 @@
# define LIBMV_OBJECT_NEW OBJECT_GUARDED_NEW
# define LIBMV_OBJECT_DELETE OBJECT_GUARDED_DELETE
# define LIBMV_OBJECT_DELETE OBJECT_GUARDED_DELETE
-# define LIBMV_STRUCT_NEW(type, count) \
- (type*)MEM_mallocN(sizeof(type) * count, __func__)
+# define LIBMV_STRUCT_NEW(type, count) \
+ (type*)MEM_mallocN(sizeof(type) * count, __func__)
# define LIBMV_STRUCT_DELETE(what) MEM_freeN(what)
#else
// Need this to keep libmv-capi potentially standalone.
# if defined __GNUC__ || defined __sun
-# define LIBMV_OBJECT_NEW(type, args ...) \
- new(malloc(sizeof(type))) type(args)
+# define LIBMV_OBJECT_NEW(type, args...) \
+ new (malloc(sizeof(type))) type(args)
# else
-# define LIBMV_OBJECT_NEW(type, ...) \
- new(malloc(sizeof(type))) type(__VA_ARGS__)
-#endif
-# define LIBMV_OBJECT_DELETE(what, type) \
- { \
- if (what) { \
- ((type*)(what))->~type(); \
- free(what); \
- } \
- } (void)0
+# define LIBMV_OBJECT_NEW(type, ...) \
+ new (malloc(sizeof(type))) type(__VA_ARGS__)
+# endif
+# define LIBMV_OBJECT_DELETE(what, type) \
+ { \
+ if (what) { \
+ ((type*)(what))->~type(); \
+ free(what); \
+ } \
+ } \
+ (void)0
# define LIBMV_STRUCT_NEW(type, count) (type*)malloc(sizeof(type) * count)
-# define LIBMV_STRUCT_DELETE(what) { if (what) free(what); } (void)0
+# define LIBMV_STRUCT_DELETE(what) \
+ { \
+ if (what) \
+ free(what); \
+ } \
+ (void)0
#endif
#endif // LIBMV_C_API_UTILDEFINES_H_
diff --git a/intern/libmv/libmv/autotrack/autotrack.cc b/intern/libmv/libmv/autotrack/autotrack.cc
index 3b0a762178a..c18567a5f28 100644
--- a/intern/libmv/libmv/autotrack/autotrack.cc
+++ b/intern/libmv/libmv/autotrack/autotrack.cc
@@ -21,9 +21,9 @@
// Author: mierle@gmail.com (Keir Mierle)
#include "libmv/autotrack/autotrack.h"
-#include "libmv/autotrack/quad.h"
#include "libmv/autotrack/frame_accessor.h"
#include "libmv/autotrack/predict_tracks.h"
+#include "libmv/autotrack/quad.h"
#include "libmv/base/scoped_ptr.h"
#include "libmv/logging/logging.h"
#include "libmv/numeric/numeric.h"
@@ -35,34 +35,30 @@ namespace {
class DisableChannelsTransform : public FrameAccessor::Transform {
public:
DisableChannelsTransform(int disabled_channels)
- : disabled_channels_(disabled_channels) { }
+ : disabled_channels_(disabled_channels) {}
- int64_t key() const {
- return disabled_channels_;
- }
+ int64_t key() const { return disabled_channels_; }
void run(const FloatImage& input, FloatImage* output) const {
- bool disable_red = (disabled_channels_ & Marker::CHANNEL_R) != 0,
+ bool disable_red = (disabled_channels_ & Marker::CHANNEL_R) != 0,
disable_green = (disabled_channels_ & Marker::CHANNEL_G) != 0,
- disable_blue = (disabled_channels_ & Marker::CHANNEL_B) != 0;
+ disable_blue = (disabled_channels_ & Marker::CHANNEL_B) != 0;
- LG << "Disabling channels: "
- << (disable_red ? "R " : "")
- << (disable_green ? "G " : "")
- << (disable_blue ? "B" : "");
+ LG << "Disabling channels: " << (disable_red ? "R " : "")
+ << (disable_green ? "G " : "") << (disable_blue ? "B" : "");
// It's important to rescale the resultappropriately so that e.g. if only
// blue is selected, it's not zeroed out.
- float scale = (disable_red ? 0.0f : 0.2126f) +
+ float scale = (disable_red ? 0.0f : 0.2126f) +
(disable_green ? 0.0f : 0.7152f) +
- (disable_blue ? 0.0f : 0.0722f);
+ (disable_blue ? 0.0f : 0.0722f);
output->Resize(input.Height(), input.Width(), 1);
for (int y = 0; y < input.Height(); y++) {
for (int x = 0; x < input.Width(); x++) {
- float r = disable_red ? 0.0f : input(y, x, 0);
+ float r = disable_red ? 0.0f : input(y, x, 0);
float g = disable_green ? 0.0f : input(y, x, 1);
- float b = disable_blue ? 0.0f : input(y, x, 2);
+ float b = disable_blue ? 0.0f : input(y, x, 2);
(*output)(y, x, 0) = (0.2126f * r + 0.7152f * g + 0.0722f * b) / scale;
}
}
@@ -73,7 +69,7 @@ class DisableChannelsTransform : public FrameAccessor::Transform {
int disabled_channels_;
};
-template<typename QuadT, typename ArrayT>
+template <typename QuadT, typename ArrayT>
void QuadToArrays(const QuadT& quad, ArrayT* x, ArrayT* y) {
for (int i = 0; i < 4; ++i) {
x[i] = quad.coordinates(i, 0);
@@ -115,11 +111,8 @@ FrameAccessor::Key GetMaskForMarker(const Marker& marker,
FrameAccessor* frame_accessor,
FloatImage* mask) {
Region region = marker.search_region.Rounded();
- return frame_accessor->GetMaskForTrack(marker.clip,
- marker.frame,
- marker.track,
- &region,
- mask);
+ return frame_accessor->GetMaskForTrack(
+ marker.clip, marker.frame, marker.track, &region, mask);
}
} // namespace
@@ -152,23 +145,20 @@ bool AutoTrack::TrackMarker(Marker* tracked_marker,
// TODO(keir): Technically this could take a smaller slice from the source
// image instead of taking one the size of the search window.
FloatImage reference_image;
- FrameAccessor::Key reference_key = GetImageForMarker(reference_marker,
- frame_accessor_,
- &reference_image);
+ FrameAccessor::Key reference_key =
+ GetImageForMarker(reference_marker, frame_accessor_, &reference_image);
if (!reference_key) {
LG << "Couldn't get frame for reference marker: " << reference_marker;
return false;
}
FloatImage reference_mask;
- FrameAccessor::Key reference_mask_key = GetMaskForMarker(reference_marker,
- frame_accessor_,
- &reference_mask);
+ FrameAccessor::Key reference_mask_key =
+ GetMaskForMarker(reference_marker, frame_accessor_, &reference_mask);
FloatImage tracked_image;
- FrameAccessor::Key tracked_key = GetImageForMarker(*tracked_marker,
- frame_accessor_,
- &tracked_image);
+ FrameAccessor::Key tracked_key =
+ GetImageForMarker(*tracked_marker, frame_accessor_, &tracked_image);
if (!tracked_key) {
frame_accessor_->ReleaseImage(reference_key);
LG << "Couldn't get frame for tracked marker: " << tracked_marker;
@@ -191,9 +181,11 @@ bool AutoTrack::TrackMarker(Marker* tracked_marker,
local_track_region_options.attempt_refine_before_brute = predicted_position;
TrackRegion(reference_image,
tracked_image,
- x1, y1,
+ x1,
+ y1,
local_track_region_options,
- x2, y2,
+ x2,
+ y2,
result);
// Copy results over the tracked marker.
@@ -208,7 +200,7 @@ bool AutoTrack::TrackMarker(Marker* tracked_marker,
tracked_marker->search_region.Offset(delta);
tracked_marker->source = Marker::TRACKED;
tracked_marker->status = Marker::UNKNOWN;
- tracked_marker->reference_clip = reference_marker.clip;
+ tracked_marker->reference_clip = reference_marker.clip;
tracked_marker->reference_frame = reference_marker.frame;
// Release the images and masks from the accessor cache.
@@ -230,7 +222,9 @@ void AutoTrack::SetMarkers(vector<Marker>* markers) {
tracks_.SetMarkers(markers);
}
-bool AutoTrack::GetMarker(int clip, int frame, int track,
+bool AutoTrack::GetMarker(int clip,
+ int frame,
+ int track,
Marker* markers) const {
return tracks_.GetMarker(clip, frame, track, markers);
}
@@ -242,7 +236,8 @@ void AutoTrack::DetectAndTrack(const DetectAndTrackOptions& options) {
vector<Marker> previous_frame_markers;
// Q: How to decide track #s when detecting?
// Q: How to match markers from previous frame? set of prev frame tracks?
- // Q: How to decide what markers should get tracked and which ones should not?
+ // Q: How to decide what markers should get tracked and which ones should
+ // not?
for (int frame = 0; frame < num_frames; ++frame) {
if (Cancelled()) {
LG << "Got cancel message while detecting and tracking...";
@@ -271,8 +266,7 @@ void AutoTrack::DetectAndTrack(const DetectAndTrackOptions& options) {
for (int i = 0; i < this_frame_markers.size(); ++i) {
tracks_in_this_frame.push_back(this_frame_markers[i].track);
}
- std::sort(tracks_in_this_frame.begin(),
- tracks_in_this_frame.end());
+ std::sort(tracks_in_this_frame.begin(), tracks_in_this_frame.end());
// Find tracks in the previous frame that are not in this one.
vector<Marker*> previous_frame_markers_to_track;
diff --git a/intern/libmv/libmv/autotrack/autotrack.h b/intern/libmv/libmv/autotrack/autotrack.h
index 1d7422f54e7..281766f600f 100644
--- a/intern/libmv/libmv/autotrack/autotrack.h
+++ b/intern/libmv/libmv/autotrack/autotrack.h
@@ -23,8 +23,8 @@
#ifndef LIBMV_AUTOTRACK_AUTOTRACK_H_
#define LIBMV_AUTOTRACK_AUTOTRACK_H_
-#include "libmv/autotrack/tracks.h"
#include "libmv/autotrack/region.h"
+#include "libmv/autotrack/tracks.h"
#include "libmv/tracking/track_region.h"
namespace libmv {
@@ -74,15 +74,14 @@ class AutoTrack {
Region search_region;
};
- AutoTrack(FrameAccessor* frame_accessor)
- : frame_accessor_(frame_accessor) {}
+ AutoTrack(FrameAccessor* frame_accessor) : frame_accessor_(frame_accessor) {}
// Marker manipulation.
// Clip manipulation.
// Set the number of clips. These clips will get accessed from the frame
// accessor, matches between frames found, and a reconstruction created.
- //void SetNumFrames(int clip, int num_frames);
+ // void SetNumFrames(int clip, int num_frames);
// Tracking & Matching
@@ -90,7 +89,7 @@ class AutoTrack {
// Caller maintains ownership of *result and *tracked_marker.
bool TrackMarker(Marker* tracked_marker,
TrackRegionResult* result,
- const TrackRegionOptions* track_options=NULL);
+ const TrackRegionOptions* track_options = NULL);
// Wrapper around Tracks API; however these may add additional processing.
void AddMarker(const Marker& tracked_marker);
@@ -99,36 +98,36 @@ class AutoTrack {
// TODO(keir): Implement frame matching! This could be very cool for loop
// closing and connecting across clips.
- //void MatchFrames(int clip1, int frame1, int clip2, int frame2) {}
+ // void MatchFrames(int clip1, int frame1, int clip2, int frame2) {}
// Wrapper around the Reconstruction API.
// Returns the new ID.
int AddCameraIntrinsics(CameraIntrinsics* intrinsics) {
- (void) intrinsics;
+ (void)intrinsics;
return 0;
} // XXX
int SetClipIntrinsics(int clip, int intrinsics) {
- (void) clip;
- (void) intrinsics;
+ (void)clip;
+ (void)intrinsics;
return 0;
- } // XXX
+ } // XXX
enum Motion {
GENERAL_CAMERA_MOTION,
TRIPOD_CAMERA_MOTION,
};
int SetClipMotion(int clip, Motion motion) {
- (void) clip;
- (void) motion;
+ (void)clip;
+ (void)motion;
return 0;
- } // XXX
+ } // XXX
// Decide what to refine for the given intrinsics. bundle_options is from
// bundle.h (e.g. BUNDLE_FOCAL_LENGTH | BUNDLE_RADIAL_K1).
void SetIntrinsicsRefine(int intrinsics, int bundle_options) {
- (void) intrinsics;
- (void) bundle_options;
- } // XXX
+ (void)intrinsics;
+ (void)bundle_options;
+ } // XXX
// Keyframe read/write.
struct ClipFrame {
@@ -150,20 +149,19 @@ class AutoTrack {
};
void DetectAndTrack(const DetectAndTrackOptions& options);
- struct DetectFeaturesInFrameOptions {
- };
- void DetectFeaturesInFrame(int clip, int frame,
- const DetectFeaturesInFrameOptions* options=NULL) {
- (void) clip;
- (void) frame;
- (void) options;
- } // XXX
+ struct DetectFeaturesInFrameOptions {};
+ void DetectFeaturesInFrame(
+ int clip, int frame, const DetectFeaturesInFrameOptions* options = NULL) {
+ (void)clip;
+ (void)frame;
+ (void)options;
+ } // XXX
// Does not take ownership of the given listener, but keeps a reference to it.
- void AddListener(OperationListener* listener) {(void) listener;} // XXX
+ void AddListener(OperationListener* listener) { (void)listener; } // XXX
// Create the initial reconstruction,
- //void FindInitialReconstruction();
+ // void FindInitialReconstruction();
// State machine
//
@@ -202,17 +200,17 @@ class AutoTrack {
bool Cancelled() { return false; }
Tracks tracks_; // May be normalized camera coordinates or raw pixels.
- //Reconstruction reconstruction_;
+ // Reconstruction reconstruction_;
// TODO(keir): Add the motion models here.
- //vector<MotionModel> motion_models_;
+ // vector<MotionModel> motion_models_;
// TODO(keir): Should num_clips and num_frames get moved to FrameAccessor?
// TODO(keir): What about masking for clips and frames to prevent various
// things like reconstruction or tracking from happening on certain frames?
FrameAccessor* frame_accessor_;
- //int num_clips_;
- //vector<int> num_frames_; // Indexed by clip.
+ // int num_clips_;
+ // vector<int> num_frames_; // Indexed by clip.
// The intrinsics for each clip, assuming each clip has fixed intrinsics.
// TODO(keir): Decide what the semantics should be for varying focal length.
diff --git a/intern/libmv/libmv/autotrack/frame_accessor.h b/intern/libmv/libmv/autotrack/frame_accessor.h
index cfad9ca71ff..e9308dc400a 100644
--- a/intern/libmv/libmv/autotrack/frame_accessor.h
+++ b/intern/libmv/libmv/autotrack/frame_accessor.h
@@ -41,7 +41,7 @@ using libmv::FloatImage;
// implementations to cache filtered image pieces).
struct FrameAccessor {
struct Transform {
- virtual ~Transform() { }
+ virtual ~Transform() {}
// The key should depend on the transform arguments. Must be non-zero.
virtual int64_t key() const = 0;
@@ -50,10 +50,7 @@ struct FrameAccessor {
virtual void run(const FloatImage& input, FloatImage* output) const = 0;
};
- enum InputMode {
- MONO,
- RGBA
- };
+ enum InputMode { MONO, RGBA };
typedef void* Key;
@@ -100,6 +97,6 @@ struct FrameAccessor {
virtual int NumFrames(int clip) = 0;
};
-} // namespace libmv
+} // namespace mv
#endif // LIBMV_AUTOTRACK_FRAME_ACCESSOR_H_
diff --git a/intern/libmv/libmv/autotrack/marker.h b/intern/libmv/libmv/autotrack/marker.h
index bb803313af8..29e163c0446 100644
--- a/intern/libmv/libmv/autotrack/marker.h
+++ b/intern/libmv/libmv/autotrack/marker.h
@@ -57,23 +57,19 @@ struct Marker {
float weight;
enum Source {
- MANUAL, // The user placed this marker manually.
- DETECTED, // A keypoint detector found this point.
- TRACKED, // The tracking algorithm placed this marker.
- MATCHED, // A matching algorithm (e.g. SIFT or SURF or ORB) found this.
- PREDICTED, // A motion model predicted this marker. This is needed for
- // handling occlusions in some cases where an imaginary marker
- // is placed to keep camera motion smooth.
+ MANUAL, // The user placed this marker manually.
+ DETECTED, // A keypoint detector found this point.
+ TRACKED, // The tracking algorithm placed this marker.
+ MATCHED, // A matching algorithm (e.g. SIFT or SURF or ORB) found this.
+ PREDICTED, // A motion model predicted this marker. This is needed for
+ // handling occlusions in some cases where an imaginary marker
+ // is placed to keep camera motion smooth.
};
Source source;
// Markers may be inliers or outliers if the tracking fails; this allows
// visualizing the markers in the image.
- enum Status {
- UNKNOWN,
- INLIER,
- OUTLIER
- };
+ enum Status { UNKNOWN, INLIER, OUTLIER };
Status status;
// When doing correlation tracking, where to search in the current frame for
@@ -90,12 +86,7 @@ struct Marker {
// another primitive (a rectangular prisim). This captures the information
// needed to say that for example a collection of markers belongs to model #2
// (and model #2 is a plane).
- enum ModelType {
- POINT,
- PLANE,
- LINE,
- CUBE
- };
+ enum ModelType { POINT, PLANE, LINE, CUBE };
ModelType model_type;
// The model ID this track (e.g. the second model, which is a plane).
@@ -114,7 +105,7 @@ struct Marker {
int disabled_channels;
// Offset everything (center, patch, search) by the given delta.
- template<typename T>
+ template <typename T>
void Offset(const T& offset) {
center += offset.template cast<float>();
patch.coordinates.rowwise() += offset.template cast<int>();
@@ -122,19 +113,15 @@ struct Marker {
}
// Shift the center to the given new position (and patch, search).
- template<typename T>
+ template <typename T>
void SetPosition(const T& new_center) {
Offset(new_center - center);
}
};
inline std::ostream& operator<<(std::ostream& out, const Marker& marker) {
- out << "{"
- << marker.clip << ", "
- << marker.frame << ", "
- << marker.track << ", ("
- << marker.center.x() << ", "
- << marker.center.y() << ")"
+ out << "{" << marker.clip << ", " << marker.frame << ", " << marker.track
+ << ", (" << marker.center.x() << ", " << marker.center.y() << ")"
<< "}";
return out;
}
diff --git a/intern/libmv/libmv/autotrack/model.h b/intern/libmv/libmv/autotrack/model.h
index 1165281cdac..e79d38b742b 100644
--- a/intern/libmv/libmv/autotrack/model.h
+++ b/intern/libmv/libmv/autotrack/model.h
@@ -23,18 +23,13 @@
#ifndef LIBMV_AUTOTRACK_MODEL_H_
#define LIBMV_AUTOTRACK_MODEL_H_
-#include "libmv/numeric/numeric.h"
#include "libmv/autotrack/quad.h"
+#include "libmv/numeric/numeric.h"
namespace mv {
struct Model {
- enum ModelType {
- POINT,
- PLANE,
- LINE,
- CUBE
- };
+ enum ModelType { POINT, PLANE, LINE, CUBE };
// ???
};
diff --git a/intern/libmv/libmv/autotrack/predict_tracks.cc b/intern/libmv/libmv/autotrack/predict_tracks.cc
index 3786c1b9a3b..d78b5b340f9 100644
--- a/intern/libmv/libmv/autotrack/predict_tracks.cc
+++ b/intern/libmv/libmv/autotrack/predict_tracks.cc
@@ -20,8 +20,8 @@
//
// Author: mierle@gmail.com (Keir Mierle)
-#include "libmv/autotrack/marker.h"
#include "libmv/autotrack/predict_tracks.h"
+#include "libmv/autotrack/marker.h"
#include "libmv/autotrack/tracks.h"
#include "libmv/base/vector.h"
#include "libmv/logging/logging.h"
@@ -31,8 +31,8 @@ namespace mv {
namespace {
-using libmv::vector;
using libmv::Vec2;
+using libmv::vector;
// Implied time delta between steps. Set empirically by tweaking and seeing
// what numbers did best at prediction.
@@ -57,6 +57,8 @@ const double dt = 3.8;
// For a typical system having constant velocity. This gives smooth-appearing
// predictions, but they are not always as accurate.
+//
+// clang-format off
const double velocity_state_transition_data[] = {
1, dt, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0,
@@ -65,10 +67,13 @@ const double velocity_state_transition_data[] = {
0, 0, 0, 0, 1, 0,
0, 0, 0, 0, 0, 1
};
+// clang-format on
#if 0
// This 3rd-order system also models acceleration. This makes for "jerky"
// predictions, but that tend to be more accurate.
+//
+// clang-format off
const double acceleration_state_transition_data[] = {
1, dt, dt*dt/2, 0, 0, 0,
0, 1, dt, 0, 0, 0,
@@ -77,9 +82,12 @@ const double acceleration_state_transition_data[] = {
0, 0, 0, 0, 1, dt,
0, 0, 0, 0, 0, 1
};
+// clang-format on
// This system (attempts) to add an angular velocity component. However, it's
// total junk.
+//
+// clang-format off
const double angular_state_transition_data[] = {
1, dt, -dt, 0, 0, 0, // Position x
0, 1, 0, 0, 0, 0, // Velocity x
@@ -88,17 +96,22 @@ const double angular_state_transition_data[] = {
0, 0, 0, 0, 1, 0, // Velocity y
0, 0, 0, 0, 0, 1 // Ignored
};
+// clang-format on
#endif
const double* state_transition_data = velocity_state_transition_data;
// Observation matrix.
+// clang-format off
const double observation_data[] = {
1., 0., 0., 0., 0., 0.,
0., 0., 0., 1., 0., 0.
};
+// clang-format on
// Process covariance.
+//
+// clang-format off
const double process_covariance_data[] = {
35, 0, 0, 0, 0, 0,
0, 5, 0, 0, 0, 0,
@@ -107,14 +120,19 @@ const double process_covariance_data[] = {
0, 0, 0, 0, 5, 0,
0, 0, 0, 0, 0, 5
};
+// clang-format on
// Process covariance.
const double measurement_covariance_data[] = {
- 0.01, 0.00,
- 0.00, 0.01,
+ 0.01,
+ 0.00,
+ 0.00,
+ 0.01,
};
// Initial covariance.
+//
+// clang-format off
const double initial_covariance_data[] = {
10, 0, 0, 0, 0, 0,
0, 1, 0, 0, 0, 0,
@@ -123,6 +141,7 @@ const double initial_covariance_data[] = {
0, 0, 0, 0, 1, 0,
0, 0, 0, 0, 0, 1
};
+// clang-format on
typedef mv::KalmanFilter<double, 6, 2> TrackerKalman;
@@ -138,7 +157,7 @@ bool OrderByFrameLessThan(const Marker* a, const Marker* b) {
}
return a->clip < b->clip;
}
- return a->frame < b-> frame;
+ return a->frame < b->frame;
}
// Predicted must be after the previous markers (in the frame numbering sense).
@@ -146,9 +165,9 @@ void RunPrediction(const vector<Marker*> previous_markers,
Marker* predicted_marker) {
TrackerKalman::State state;
state.mean << previous_markers[0]->center.x(), 0, 0,
- previous_markers[0]->center.y(), 0, 0;
- state.covariance = Eigen::Matrix<double, 6, 6, Eigen::RowMajor>(
- initial_covariance_data);
+ previous_markers[0]->center.y(), 0, 0;
+ state.covariance =
+ Eigen::Matrix<double, 6, 6, Eigen::RowMajor>(initial_covariance_data);
int current_frame = previous_markers[0]->frame;
int target_frame = predicted_marker->frame;
@@ -159,19 +178,18 @@ void RunPrediction(const vector<Marker*> previous_markers,
for (int i = 1; i < previous_markers.size(); ++i) {
// Step forward predicting the state until it is on the current marker.
int predictions = 0;
- for (;
- current_frame != previous_markers[i]->frame;
+ for (; current_frame != previous_markers[i]->frame;
current_frame += frame_delta) {
filter.Step(&state);
predictions++;
- LG << "Predicted point (frame " << current_frame << "): "
- << state.mean(0) << ", " << state.mean(3);
+ LG << "Predicted point (frame " << current_frame << "): " << state.mean(0)
+ << ", " << state.mean(3);
}
// Log the error -- not actually used, but interesting.
Vec2 error = previous_markers[i]->center.cast<double>() -
Vec2(state.mean(0), state.mean(3));
- LG << "Prediction error for " << predictions << " steps: ("
- << error.x() << ", " << error.y() << "); norm: " << error.norm();
+ LG << "Prediction error for " << predictions << " steps: (" << error.x()
+ << ", " << error.y() << "); norm: " << error.norm();
// Now that the state is predicted in the current frame, update the state
// based on the measurement from the current frame.
filter.Update(previous_markers[i]->center.cast<double>(),
@@ -184,8 +202,8 @@ void RunPrediction(const vector<Marker*> previous_markers,
// predict until the target frame.
for (; current_frame != target_frame; current_frame += frame_delta) {
filter.Step(&state);
- LG << "Final predicted point (frame " << current_frame << "): "
- << state.mean(0) << ", " << state.mean(3);
+ LG << "Final predicted point (frame " << current_frame
+ << "): " << state.mean(0) << ", " << state.mean(3);
}
// The x and y positions are at 0 and 3; ignore acceleration and velocity.
@@ -253,13 +271,13 @@ bool PredictMarkerPosition(const Tracks& tracks, Marker* marker) {
} else if (insert_at != -1) {
// Found existing marker; scan before and after it.
forward_scan_begin = insert_at + 1;
- forward_scan_end = markers.size() - 1;;
+ forward_scan_end = markers.size() - 1;
backward_scan_begin = insert_at - 1;
backward_scan_end = 0;
} else {
// Didn't find existing marker but found an insertion point.
forward_scan_begin = insert_before;
- forward_scan_end = markers.size() - 1;;
+ forward_scan_end = markers.size() - 1;
backward_scan_begin = insert_before - 1;
backward_scan_end = 0;
}
@@ -301,9 +319,8 @@ bool PredictMarkerPosition(const Tracks& tracks, Marker* marker) {
return false;
}
LG << "Predicting backward";
- int predict_begin =
- std::min(forward_scan_begin + max_frames_to_predict_from,
- forward_scan_end);
+ int predict_begin = std::min(
+ forward_scan_begin + max_frames_to_predict_from, forward_scan_end);
int predict_end = forward_scan_begin;
vector<Marker*> previous_markers;
for (int i = predict_begin; i >= predict_end; --i) {
@@ -312,7 +329,6 @@ bool PredictMarkerPosition(const Tracks& tracks, Marker* marker) {
RunPrediction(previous_markers, marker);
return false;
}
-
}
} // namespace mv
diff --git a/intern/libmv/libmv/autotrack/predict_tracks_test.cc b/intern/libmv/libmv/autotrack/predict_tracks_test.cc
index f7c2c68d750..fea93b91bce 100644
--- a/intern/libmv/libmv/autotrack/predict_tracks_test.cc
+++ b/intern/libmv/libmv/autotrack/predict_tracks_test.cc
@@ -35,17 +35,15 @@ static void AddMarker(int frame, float x, float y, Tracks* tracks) {
marker.frame = frame;
marker.center.x() = x;
marker.center.y() = y;
- marker.patch.coordinates << x - 1, y - 1,
- x + 1, y - 1,
- x + 1, y + 1,
- x - 1, y + 1;
+ marker.patch.coordinates << x - 1, y - 1, x + 1, y - 1, x + 1, y + 1, x - 1,
+ y + 1;
tracks->AddMarker(marker);
}
TEST(PredictMarkerPosition, EasyLinearMotion) {
Tracks tracks;
- AddMarker(0, 1.0, 0.0, &tracks);
- AddMarker(1, 2.0, 5.0, &tracks);
+ AddMarker(0, 1.0, 0.0, &tracks);
+ AddMarker(1, 2.0, 5.0, &tracks);
AddMarker(2, 3.0, 10.0, &tracks);
AddMarker(3, 4.0, 15.0, &tracks);
AddMarker(4, 5.0, 20.0, &tracks);
@@ -66,10 +64,8 @@ TEST(PredictMarkerPosition, EasyLinearMotion) {
// Check the patch coordinates as well.
double x = 9, y = 40.0;
Quad2Df expected_patch;
- expected_patch.coordinates << x - 1, y - 1,
- x + 1, y - 1,
- x + 1, y + 1,
- x - 1, y + 1;
+ expected_patch.coordinates << x - 1, y - 1, x + 1, y - 1, x + 1, y + 1, x - 1,
+ y + 1;
error = (expected_patch.coordinates - predicted.patch.coordinates).norm();
LG << "Patch error: " << error;
@@ -78,8 +74,8 @@ TEST(PredictMarkerPosition, EasyLinearMotion) {
TEST(PredictMarkerPosition, EasyBackwardLinearMotion) {
Tracks tracks;
- AddMarker(8, 1.0, 0.0, &tracks);
- AddMarker(7, 2.0, 5.0, &tracks);
+ AddMarker(8, 1.0, 0.0, &tracks);
+ AddMarker(7, 2.0, 5.0, &tracks);
AddMarker(6, 3.0, 10.0, &tracks);
AddMarker(5, 4.0, 15.0, &tracks);
AddMarker(4, 5.0, 20.0, &tracks);
@@ -101,10 +97,8 @@ TEST(PredictMarkerPosition, EasyBackwardLinearMotion) {
// Check the patch coordinates as well.
double x = 9.0, y = 40.0;
Quad2Df expected_patch;
- expected_patch.coordinates << x - 1, y - 1,
- x + 1, y - 1,
- x + 1, y + 1,
- x - 1, y + 1;
+ expected_patch.coordinates << x - 1, y - 1, x + 1, y - 1, x + 1, y + 1, x - 1,
+ y + 1;
error = (expected_patch.coordinates - predicted.patch.coordinates).norm();
LG << "Patch error: " << error;
@@ -113,8 +107,8 @@ TEST(PredictMarkerPosition, EasyBackwardLinearMotion) {
TEST(PredictMarkerPosition, TwoFrameGap) {
Tracks tracks;
- AddMarker(0, 1.0, 0.0, &tracks);
- AddMarker(1, 2.0, 5.0, &tracks);
+ AddMarker(0, 1.0, 0.0, &tracks);
+ AddMarker(1, 2.0, 5.0, &tracks);
AddMarker(2, 3.0, 10.0, &tracks);
AddMarker(3, 4.0, 15.0, &tracks);
AddMarker(4, 5.0, 20.0, &tracks);
@@ -135,8 +129,8 @@ TEST(PredictMarkerPosition, TwoFrameGap) {
TEST(PredictMarkerPosition, FourFrameGap) {
Tracks tracks;
- AddMarker(0, 1.0, 0.0, &tracks);
- AddMarker(1, 2.0, 5.0, &tracks);
+ AddMarker(0, 1.0, 0.0, &tracks);
+ AddMarker(1, 2.0, 5.0, &tracks);
AddMarker(2, 3.0, 10.0, &tracks);
AddMarker(3, 4.0, 15.0, &tracks);
// Missing frames 4, 5, 6, 7.
@@ -154,13 +148,13 @@ TEST(PredictMarkerPosition, FourFrameGap) {
TEST(PredictMarkerPosition, MultipleGaps) {
Tracks tracks;
- AddMarker(0, 1.0, 0.0, &tracks);
- AddMarker(1, 2.0, 5.0, &tracks);
+ AddMarker(0, 1.0, 0.0, &tracks);
+ AddMarker(1, 2.0, 5.0, &tracks);
AddMarker(2, 3.0, 10.0, &tracks);
// AddMarker(3, 4.0, 15.0, &tracks); // Note the 3-frame gap.
// AddMarker(4, 5.0, 20.0, &tracks);
// AddMarker(5, 6.0, 25.0, &tracks);
- AddMarker(6, 7.0, 30.0, &tracks); // Intermediate measurement.
+ AddMarker(6, 7.0, 30.0, &tracks); // Intermediate measurement.
// AddMarker(7, 8.0, 35.0, &tracks);
Marker predicted;
@@ -178,14 +172,14 @@ TEST(PredictMarkerPosition, MarkersInRandomOrder) {
Tracks tracks;
// This is the same as the easy, except that the tracks are randomly ordered.
- AddMarker(0, 1.0, 0.0, &tracks);
+ AddMarker(0, 1.0, 0.0, &tracks);
AddMarker(2, 3.0, 10.0, &tracks);
AddMarker(7, 8.0, 35.0, &tracks);
AddMarker(5, 6.0, 25.0, &tracks);
AddMarker(4, 5.0, 20.0, &tracks);
AddMarker(3, 4.0, 15.0, &tracks);
AddMarker(6, 7.0, 30.0, &tracks);
- AddMarker(1, 2.0, 5.0, &tracks);
+ AddMarker(1, 2.0, 5.0, &tracks);
Marker predicted;
predicted.clip = 0;
diff --git a/intern/libmv/libmv/autotrack/quad.h b/intern/libmv/libmv/autotrack/quad.h
index 0c70f9882da..4aeb66f20f7 100644
--- a/intern/libmv/libmv/autotrack/quad.h
+++ b/intern/libmv/libmv/autotrack/quad.h
@@ -27,7 +27,7 @@
namespace mv {
-template<typename T, int D>
+template <typename T, int D>
struct Quad {
// A quad is 4 points; generally in 2D or 3D.
//
@@ -35,7 +35,7 @@ struct Quad {
// |\.
// | \.
// | z (z goes into screen)
- // |
+ // |
// | r0----->r1
// | ^ |
// | | . |
@@ -44,7 +44,7 @@ struct Quad {
// | \.
// | \.
// v normal goes away (right handed).
- // y
+ // y
//
// Each row is one of the corners coordinates; either (x, y) or (x, y, z).
Eigen::Matrix<T, 4, D> coordinates;
diff --git a/intern/libmv/libmv/autotrack/reconstruction.h b/intern/libmv/libmv/autotrack/reconstruction.h
index 732e74063f1..f993b39f79e 100644
--- a/intern/libmv/libmv/autotrack/reconstruction.h
+++ b/intern/libmv/libmv/autotrack/reconstruction.h
@@ -57,17 +57,17 @@ class Reconstruction {
public:
// All methods copy their input reference or take ownership of the pointer.
void AddCameraPose(const CameraPose& pose);
- int AddCameraIntrinsics(CameraIntrinsics* intrinsics);
- int AddPoint(const Point& point);
- int AddModel(Model* model);
+ int AddCameraIntrinsics(CameraIntrinsics* intrinsics);
+ int AddPoint(const Point& point);
+ int AddModel(Model* model);
// Returns the corresponding pose or point or NULL if missing.
- CameraPose* CameraPoseForFrame(int clip, int frame);
+ CameraPose* CameraPoseForFrame(int clip, int frame);
const CameraPose* CameraPoseForFrame(int clip, int frame) const;
- Point* PointForTrack(int track);
+ Point* PointForTrack(int track);
const Point* PointForTrack(int track) const;
- const vector<vector<CameraPose> >& camera_poses() const {
+ const vector<vector<CameraPose>>& camera_poses() const {
return camera_poses_;
}
diff --git a/intern/libmv/libmv/autotrack/region.h b/intern/libmv/libmv/autotrack/region.h
index b35d99eb60d..687a217ab2a 100644
--- a/intern/libmv/libmv/autotrack/region.h
+++ b/intern/libmv/libmv/autotrack/region.h
@@ -46,7 +46,7 @@ struct Region {
Vec2f min;
Vec2f max;
- template<typename T>
+ template <typename T>
void Offset(const T& offset) {
min += offset.template cast<float>();
max += offset.template cast<float>();
diff --git a/intern/libmv/libmv/autotrack/tracks.cc b/intern/libmv/libmv/autotrack/tracks.cc
index 174f264f3f2..8044bb28bb4 100644
--- a/intern/libmv/libmv/autotrack/tracks.cc
+++ b/intern/libmv/libmv/autotrack/tracks.cc
@@ -23,8 +23,8 @@
#include "libmv/autotrack/tracks.h"
#include <algorithm>
-#include <vector>
#include <iterator>
+#include <vector>
#include "libmv/numeric/numeric.h"
@@ -34,12 +34,12 @@ Tracks::Tracks(const Tracks& other) {
markers_ = other.markers_;
}
-Tracks::Tracks(const vector<Marker>& markers) : markers_(markers) {}
+Tracks::Tracks(const vector<Marker>& markers) : markers_(markers) {
+}
bool Tracks::GetMarker(int clip, int frame, int track, Marker* marker) const {
for (int i = 0; i < markers_.size(); ++i) {
- if (markers_[i].clip == clip &&
- markers_[i].frame == frame &&
+ if (markers_[i].clip == clip && markers_[i].frame == frame &&
markers_[i].track == track) {
*marker = markers_[i];
return true;
@@ -60,8 +60,7 @@ void Tracks::GetMarkersForTrackInClip(int clip,
int track,
vector<Marker>* markers) const {
for (int i = 0; i < markers_.size(); ++i) {
- if (clip == markers_[i].clip &&
- track == markers_[i].track) {
+ if (clip == markers_[i].clip && track == markers_[i].track) {
markers->push_back(markers_[i]);
}
}
@@ -71,15 +70,16 @@ void Tracks::GetMarkersInFrame(int clip,
int frame,
vector<Marker>* markers) const {
for (int i = 0; i < markers_.size(); ++i) {
- if (markers_[i].clip == clip &&
- markers_[i].frame == frame) {
+ if (markers_[i].clip == clip && markers_[i].frame == frame) {
markers->push_back(markers_[i]);
}
}
}
-void Tracks::GetMarkersForTracksInBothImages(int clip1, int frame1,
- int clip2, int frame2,
+void Tracks::GetMarkersForTracksInBothImages(int clip1,
+ int frame1,
+ int clip2,
+ int frame2,
vector<Marker>* markers) const {
std::vector<int> image1_tracks;
std::vector<int> image2_tracks;
@@ -99,20 +99,19 @@ void Tracks::GetMarkersForTracksInBothImages(int clip1, int frame1,
std::sort(image1_tracks.begin(), image1_tracks.end());
std::sort(image2_tracks.begin(), image2_tracks.end());
std::vector<int> intersection;
- std::set_intersection(image1_tracks.begin(), image1_tracks.end(),
- image2_tracks.begin(), image2_tracks.end(),
+ std::set_intersection(image1_tracks.begin(),
+ image1_tracks.end(),
+ image2_tracks.begin(),
+ image2_tracks.end(),
std::back_inserter(intersection));
// Scan through and get the relevant tracks from the two images.
for (int i = 0; i < markers_.size(); ++i) {
// Save markers that are in either frame and are in our candidate set.
- if (((markers_[i].clip == clip1 &&
- markers_[i].frame == frame1) ||
- (markers_[i].clip == clip2 &&
- markers_[i].frame == frame2)) &&
- std::binary_search(intersection.begin(),
- intersection.end(),
- markers_[i].track)) {
+ if (((markers_[i].clip == clip1 && markers_[i].frame == frame1) ||
+ (markers_[i].clip == clip2 && markers_[i].frame == frame2)) &&
+ std::binary_search(
+ intersection.begin(), intersection.end(), markers_[i].track)) {
markers->push_back(markers_[i]);
}
}
@@ -122,8 +121,7 @@ void Tracks::AddMarker(const Marker& marker) {
// TODO(keir): This is quadratic for repeated insertions. Fix this by adding
// a smarter data structure like a set<>.
for (int i = 0; i < markers_.size(); ++i) {
- if (markers_[i].clip == marker.clip &&
- markers_[i].frame == marker.frame &&
+ if (markers_[i].clip == marker.clip && markers_[i].frame == marker.frame &&
markers_[i].track == marker.track) {
markers_[i] = marker;
return;
@@ -139,8 +137,7 @@ void Tracks::SetMarkers(vector<Marker>* markers) {
bool Tracks::RemoveMarker(int clip, int frame, int track) {
int size = markers_.size();
for (int i = 0; i < markers_.size(); ++i) {
- if (markers_[i].clip == clip &&
- markers_[i].frame == frame &&
+ if (markers_[i].clip == clip && markers_[i].frame == frame &&
markers_[i].track == track) {
markers_[i] = markers_[size - 1];
markers_.resize(size - 1);
diff --git a/intern/libmv/libmv/autotrack/tracks.h b/intern/libmv/libmv/autotrack/tracks.h
index 0b7de91d211..dd11a2d6fbd 100644
--- a/intern/libmv/libmv/autotrack/tracks.h
+++ b/intern/libmv/libmv/autotrack/tracks.h
@@ -23,8 +23,8 @@
#ifndef LIBMV_AUTOTRACK_TRACKS_H_
#define LIBMV_AUTOTRACK_TRACKS_H_
-#include "libmv/base/vector.h"
#include "libmv/autotrack/marker.h"
+#include "libmv/base/vector.h"
namespace mv {
@@ -33,8 +33,8 @@ using libmv::vector;
// The Tracks container stores correspondences between frames.
class Tracks {
public:
- Tracks() { }
- Tracks(const Tracks &other);
+ Tracks() {}
+ Tracks(const Tracks& other);
// Create a tracks object with markers already initialized. Copies markers.
explicit Tracks(const vector<Marker>& markers);
@@ -51,8 +51,10 @@ class Tracks {
//
// This is not the same as the union of the markers in frame1 and
// frame2; each marker is for a track that appears in both images.
- void GetMarkersForTracksInBothImages(int clip1, int frame1,
- int clip2, int frame2,
+ void GetMarkersForTracksInBothImages(int clip1,
+ int frame1,
+ int clip2,
+ int frame2,
vector<Marker>* markers) const;
void AddMarker(const Marker& marker);
diff --git a/intern/libmv/libmv/autotrack/tracks_test.cc b/intern/libmv/libmv/autotrack/tracks_test.cc
index 028b4a10913..eeefd3714b0 100644
--- a/intern/libmv/libmv/autotrack/tracks_test.cc
+++ b/intern/libmv/libmv/autotrack/tracks_test.cc
@@ -22,8 +22,8 @@
#include "libmv/autotrack/tracks.h"
-#include "testing/testing.h"
#include "libmv/logging/logging.h"
+#include "testing/testing.h"
namespace mv {
diff --git a/intern/libmv/libmv/base/aligned_malloc.cc b/intern/libmv/libmv/base/aligned_malloc.cc
index 5d3e05e9df9..6e327acf928 100644
--- a/intern/libmv/libmv/base/aligned_malloc.cc
+++ b/intern/libmv/libmv/base/aligned_malloc.cc
@@ -41,11 +41,11 @@
namespace libmv {
-void *aligned_malloc(int size, int alignment) {
+void* aligned_malloc(int size, int alignment) {
#ifdef _WIN32
return _aligned_malloc(size, alignment);
#elif defined(__FreeBSD__) || defined(__NetBSD__) || defined(__APPLE__)
- void *result;
+ void* result;
if (posix_memalign(&result, alignment, size)) {
// non-zero means allocation error
@@ -58,7 +58,7 @@ void *aligned_malloc(int size, int alignment) {
#endif
}
-void aligned_free(void *ptr) {
+void aligned_free(void* ptr) {
#ifdef _WIN32
_aligned_free(ptr);
#else
diff --git a/intern/libmv/libmv/base/aligned_malloc.h b/intern/libmv/libmv/base/aligned_malloc.h
index 096ff6e2d7c..25583bb6be4 100644
--- a/intern/libmv/libmv/base/aligned_malloc.h
+++ b/intern/libmv/libmv/base/aligned_malloc.h
@@ -24,10 +24,10 @@
namespace libmv {
// Allocate block of size bytes at least aligned to a given value.
-void *aligned_malloc(int size, int alignment);
+void* aligned_malloc(int size, int alignment);
// Free memory allocated by aligned_malloc.
-void aligned_free(void *ptr);
+void aligned_free(void* ptr);
} // namespace libmv
diff --git a/intern/libmv/libmv/base/id_generator.h b/intern/libmv/libmv/base/id_generator.h
index bf1eafd218e..535c9bd7b38 100644
--- a/intern/libmv/libmv/base/id_generator.h
+++ b/intern/libmv/libmv/base/id_generator.h
@@ -28,6 +28,7 @@ class IdGenerator {
public:
IdGenerator() : next_(0) {}
ID Generate() { return next_++; }
+
private:
ID next_;
};
diff --git a/intern/libmv/libmv/base/map.h b/intern/libmv/libmv/base/map.h
index 88b720f17fe..a91e3561791 100644
--- a/intern/libmv/libmv/base/map.h
+++ b/intern/libmv/libmv/base/map.h
@@ -26,8 +26,8 @@
namespace libmv {
-using std::map;
using std::make_pair;
+using std::map;
} // namespace libmv
diff --git a/intern/libmv/libmv/base/scoped_ptr.h b/intern/libmv/libmv/base/scoped_ptr.h
index b9cd4854213..9bfcfe1d615 100644
--- a/intern/libmv/libmv/base/scoped_ptr.h
+++ b/intern/libmv/libmv/base/scoped_ptr.h
@@ -30,44 +30,44 @@ namespace libmv {
* A handle for a heap-allocated resource that should be freed when it goes out
* of scope. This looks similar to the one found in TR1.
*/
-template<typename T>
+template <typename T>
class scoped_ptr {
public:
- scoped_ptr(T *resource) : resource_(resource) {}
+ scoped_ptr(T* resource) : resource_(resource) {}
~scoped_ptr() { reset(0); }
- T *get() const { return resource_; }
- T *operator->() const { return resource_; }
- T &operator*() const { return *resource_; }
+ T* get() const { return resource_; }
+ T* operator->() const { return resource_; }
+ T& operator*() const { return *resource_; }
- void reset(T *new_resource) {
+ void reset(T* new_resource) {
if (sizeof(T)) {
delete resource_;
}
resource_ = new_resource;
}
- T *release() {
- T *released_resource = resource_;
+ T* release() {
+ T* released_resource = resource_;
resource_ = 0;
return released_resource;
}
private:
// No copying allowed.
- T *resource_;
+ T* resource_;
};
// Same as scoped_ptr but caller must allocate the data
// with new[] and the destructor will free the memory
// using delete[].
-template<typename T>
+template <typename T>
class scoped_array {
public:
- scoped_array(T *array) : array_(array) {}
+ scoped_array(T* array) : array_(array) {}
~scoped_array() { reset(NULL); }
- T *get() const { return array_; }
+ T* get() const { return array_; }
T& operator[](std::ptrdiff_t i) const {
assert(i >= 0);
@@ -75,25 +75,27 @@ class scoped_array {
return array_[i];
}
- void reset(T *new_array) {
+ void reset(T* new_array) {
if (sizeof(T)) {
delete array_;
}
array_ = new_array;
}
- T *release() {
- T *released_array = array_;
+ T* release() {
+ T* released_array = array_;
array_ = NULL;
return released_array;
}
private:
- T *array_;
+ T* array_;
// Forbid comparison of different scoped_array types.
- template <typename T2> bool operator==(scoped_array<T2> const& p2) const;
- template <typename T2> bool operator!=(scoped_array<T2> const& p2) const;
+ template <typename T2>
+ bool operator==(scoped_array<T2> const& p2) const;
+ template <typename T2>
+ bool operator!=(scoped_array<T2> const& p2) const;
// Disallow evil constructors
scoped_array(const scoped_array&);
diff --git a/intern/libmv/libmv/base/scoped_ptr_test.cc b/intern/libmv/libmv/base/scoped_ptr_test.cc
index ce1d56b500a..e86af6d4516 100644
--- a/intern/libmv/libmv/base/scoped_ptr_test.cc
+++ b/intern/libmv/libmv/base/scoped_ptr_test.cc
@@ -25,9 +25,9 @@ namespace libmv {
namespace {
struct FreeMe {
- FreeMe(int *freed) : freed(freed) {}
+ FreeMe(int* freed) : freed(freed) {}
~FreeMe() { (*freed)++; }
- int *freed;
+ int* freed;
};
TEST(ScopedPtr, NullDoesNothing) {
@@ -61,8 +61,8 @@ TEST(ScopedPtr, Reset) {
TEST(ScopedPtr, ReleaseAndGet) {
int frees = 0;
- FreeMe *allocated = new FreeMe(&frees);
- FreeMe *released = NULL;
+ FreeMe* allocated = new FreeMe(&frees);
+ FreeMe* released = NULL;
{
scoped_ptr<FreeMe> scoped(allocated);
EXPECT_EQ(0, frees);
diff --git a/intern/libmv/libmv/base/vector_test.cc b/intern/libmv/libmv/base/vector_test.cc
index f171e3a18b5..d31bee751cd 100644
--- a/intern/libmv/libmv/base/vector_test.cc
+++ b/intern/libmv/libmv/base/vector_test.cc
@@ -19,9 +19,9 @@
// IN THE SOFTWARE.
#include "libmv/base/vector.h"
+#include <algorithm>
#include "libmv/numeric/numeric.h"
#include "testing/testing.h"
-#include <algorithm>
namespace {
using namespace libmv;
@@ -62,7 +62,7 @@ int foo_destruct_calls = 0;
struct Foo {
public:
Foo() : value(5) { foo_construct_calls++; }
- ~Foo() { foo_destruct_calls++; }
+ ~Foo() { foo_destruct_calls++; }
int value;
};
@@ -150,7 +150,7 @@ TEST_F(VectorTest, CopyConstructor) {
a.push_back(3);
vector<int> b(a);
- EXPECT_EQ(a.size(), b.size());
+ EXPECT_EQ(a.size(), b.size());
for (int i = 0; i < a.size(); ++i) {
EXPECT_EQ(a[i], b[i]);
}
@@ -164,7 +164,7 @@ TEST_F(VectorTest, OperatorEquals) {
b = a;
- EXPECT_EQ(a.size(), b.size());
+ EXPECT_EQ(a.size(), b.size());
for (int i = 0; i < a.size(); ++i) {
EXPECT_EQ(a[i], b[i]);
}
diff --git a/intern/libmv/libmv/base/vector_utils.h b/intern/libmv/libmv/base/vector_utils.h
index c71e1bea951..5f69c03d937 100644
--- a/intern/libmv/libmv/base/vector_utils.h
+++ b/intern/libmv/libmv/base/vector_utils.h
@@ -18,14 +18,13 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
-
#ifndef LIBMV_BASE_VECTOR_UTILS_H_
#define LIBMV_BASE_VECTOR_UTILS_H_
/// Delete the contents of a container.
template <class Array>
-void DeleteElements(Array *array) {
- for (int i = 0; i < array->size(); ++i) {
+void DeleteElements(Array* array) {
+ for (int i = 0; i < array->size(); ++i) {
delete (*array)[i];
}
array->clear();
diff --git a/intern/libmv/libmv/image/array_nd.cc b/intern/libmv/libmv/image/array_nd.cc
index 469a19aabf1..07feda33e66 100644
--- a/intern/libmv/libmv/image/array_nd.cc
+++ b/intern/libmv/libmv/image/array_nd.cc
@@ -18,18 +18,17 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
-#include "libmv/image/image.h"
-#include <iostream>
#include <cmath>
+#include <iostream>
+#include "libmv/image/image.h"
namespace libmv {
-void FloatArrayToScaledByteArray(const Array3Df &float_array,
- Array3Du *byte_array,
- bool automatic_range_detection
- ) {
+void FloatArrayToScaledByteArray(const Array3Df& float_array,
+ Array3Du* byte_array,
+ bool automatic_range_detection) {
byte_array->ResizeLike(float_array);
- float minval = HUGE_VAL;
+ float minval = HUGE_VAL;
float maxval = -HUGE_VAL;
if (automatic_range_detection) {
for (int i = 0; i < float_array.Height(); ++i) {
@@ -54,8 +53,8 @@ void FloatArrayToScaledByteArray(const Array3Df &float_array,
}
}
-void ByteArrayToScaledFloatArray(const Array3Du &byte_array,
- Array3Df *float_array) {
+void ByteArrayToScaledFloatArray(const Array3Du& byte_array,
+ Array3Df* float_array) {
float_array->ResizeLike(byte_array);
for (int i = 0; i < byte_array.Height(); ++i) {
for (int j = 0; j < byte_array.Width(); ++j) {
@@ -66,10 +65,10 @@ void ByteArrayToScaledFloatArray(const Array3Du &byte_array,
}
}
-void SplitChannels(const Array3Df &input,
- Array3Df *channel0,
- Array3Df *channel1,
- Array3Df *channel2) {
+void SplitChannels(const Array3Df& input,
+ Array3Df* channel0,
+ Array3Df* channel1,
+ Array3Df* channel2) {
assert(input.Depth() >= 3);
channel0->Resize(input.Height(), input.Width());
channel1->Resize(input.Height(), input.Width());
@@ -83,7 +82,7 @@ void SplitChannels(const Array3Df &input,
}
}
-void PrintArray(const Array3Df &array) {
+void PrintArray(const Array3Df& array) {
using namespace std;
printf("[\n");
diff --git a/intern/libmv/libmv/image/array_nd.h b/intern/libmv/libmv/image/array_nd.h
index e95e66aa2b3..1a3c39d0461 100644
--- a/intern/libmv/libmv/image/array_nd.h
+++ b/intern/libmv/libmv/image/array_nd.h
@@ -44,13 +44,13 @@ class ArrayND : public BaseArray {
ArrayND() : data_(NULL), own_data_(true) { Resize(Index(0)); }
/// Create an array with the specified shape.
- ArrayND(const Index &shape) : data_(NULL), own_data_(true) { Resize(shape); }
+ ArrayND(const Index& shape) : data_(NULL), own_data_(true) { Resize(shape); }
/// Create an array with the specified shape.
- ArrayND(int *shape) : data_(NULL), own_data_(true) { Resize(shape); }
+ ArrayND(int* shape) : data_(NULL), own_data_(true) { Resize(shape); }
/// Copy constructor.
- ArrayND(const ArrayND<T, N> &b) : data_(NULL), own_data_(true) {
+ ArrayND(const ArrayND<T, N>& b) : data_(NULL), own_data_(true) {
ResizeLike(b);
std::memcpy(Data(), b.Data(), sizeof(T) * Size());
}
@@ -58,7 +58,7 @@ class ArrayND : public BaseArray {
ArrayND(int s0) : data_(NULL), own_data_(true) { Resize(s0); }
ArrayND(int s0, int s1) : data_(NULL), own_data_(true) { Resize(s0, s1); }
ArrayND(int s0, int s1, int s2) : data_(NULL), own_data_(true) {
- Resize(s0, s1, s2);
+ Resize(s0, s1, s2);
}
ArrayND(T* data, int s0, int s1, int s2)
@@ -69,28 +69,24 @@ class ArrayND : public BaseArray {
/// Destructor deletes pixel data.
~ArrayND() {
if (own_data_) {
- delete [] data_;
+ delete[] data_;
}
}
/// Assignation copies pixel data.
- ArrayND &operator=(const ArrayND<T, N> &b) {
+ ArrayND& operator=(const ArrayND<T, N>& b) {
assert(this != &b);
ResizeLike(b);
std::memcpy(Data(), b.Data(), sizeof(T) * Size());
return *this;
}
- const Index &Shapes() const {
- return shape_;
- }
+ const Index& Shapes() const { return shape_; }
- const Index &Strides() const {
- return strides_;
- }
+ const Index& Strides() const { return strides_; }
/// Create an array of shape s.
- void Resize(const Index &new_shape) {
+ void Resize(const Index& new_shape) {
if (data_ != NULL && shape_ == new_shape) {
// Don't bother realloacting if the shapes match.
return;
@@ -101,7 +97,7 @@ class ArrayND : public BaseArray {
strides_(i - 1) = strides_(i) * shape_(i);
}
if (own_data_) {
- delete [] data_;
+ delete[] data_;
data_ = NULL;
if (Size() > 0) {
data_ = new T[Size()];
@@ -109,15 +105,13 @@ class ArrayND : public BaseArray {
}
}
- template<typename D>
- void ResizeLike(const ArrayND<D, N> &other) {
+ template <typename D>
+ void ResizeLike(const ArrayND<D, N>& other) {
Resize(other.Shape());
}
/// Resizes the array to shape s. All data is lost.
- void Resize(const int *new_shape_array) {
- Resize(Index(new_shape_array));
- }
+ void Resize(const int* new_shape_array) { Resize(Index(new_shape_array)); }
/// Resize a 1D array to length s0.
void Resize(int s0) {
@@ -136,9 +130,7 @@ class ArrayND : public BaseArray {
}
// Match Eigen2's API.
- void resize(int rows, int cols) {
- Resize(rows, cols);
- }
+ void resize(int rows, int cols) { Resize(rows, cols); }
/// Resize a 3D array to shape (s0,s1,s2).
void Resize(int s0, int s1, int s2) {
@@ -147,11 +139,11 @@ class ArrayND : public BaseArray {
Resize(shape);
}
- template<typename D>
- void CopyFrom(const ArrayND<D, N> &other) {
+ template <typename D>
+ void CopyFrom(const ArrayND<D, N>& other) {
ResizeLike(other);
- T *data = Data();
- const D *other_data = other.Data();
+ T* data = Data();
+ const D* other_data = other.Data();
for (int i = 0; i < Size(); ++i) {
data[i] = T(other_data[i]);
}
@@ -171,19 +163,13 @@ class ArrayND : public BaseArray {
}
/// Return a tuple containing the length of each axis.
- const Index &Shape() const {
- return shape_;
- }
+ const Index& Shape() const { return shape_; }
/// Return the length of an axis.
- int Shape(int axis) const {
- return shape_(axis);
- }
+ int Shape(int axis) const { return shape_(axis); }
/// Return the distance between neighboring elements along axis.
- int Stride(int axis) const {
- return strides_(axis);
- }
+ int Stride(int axis) const { return strides_(axis); }
/// Return the number of elements of the array.
int Size() const {
@@ -194,18 +180,16 @@ class ArrayND : public BaseArray {
}
/// Return the total amount of memory used by the array.
- int MemorySizeInBytes() const {
- return sizeof(*this) + Size() * sizeof(T);
- }
+ int MemorySizeInBytes() const { return sizeof(*this) + Size() * sizeof(T); }
/// Pointer to the first element of the array.
- T *Data() { return data_; }
+ T* Data() { return data_; }
/// Constant pointer to the first element of the array.
- const T *Data() const { return data_; }
+ const T* Data() const { return data_; }
/// Distance between the first element and the element at position index.
- int Offset(const Index &index) const {
+ int Offset(const Index& index) const {
int offset = 0;
for (int i = 0; i < N; ++i)
offset += index(i) * Stride(i);
@@ -231,25 +215,23 @@ class ArrayND : public BaseArray {
}
/// Return a reference to the element at position index.
- T &operator()(const Index &index) {
+ T& operator()(const Index& index) {
// TODO(pau) Boundary checking in debug mode.
- return *( Data() + Offset(index) );
+ return *(Data() + Offset(index));
}
/// 1D specialization.
- T &operator()(int i0) {
- return *( Data() + Offset(i0) );
- }
+ T& operator()(int i0) { return *(Data() + Offset(i0)); }
/// 2D specialization.
- T &operator()(int i0, int i1) {
+ T& operator()(int i0, int i1) {
assert(0 <= i0 && i0 < Shape(0));
assert(0 <= i1 && i1 < Shape(1));
return *(Data() + Offset(i0, i1));
}
/// 3D specialization.
- T &operator()(int i0, int i1, int i2) {
+ T& operator()(int i0, int i1, int i2) {
assert(0 <= i0 && i0 < Shape(0));
assert(0 <= i1 && i1 < Shape(1));
assert(0 <= i2 && i2 < Shape(2));
@@ -257,29 +239,27 @@ class ArrayND : public BaseArray {
}
/// Return a constant reference to the element at position index.
- const T &operator()(const Index &index) const {
+ const T& operator()(const Index& index) const {
return *(Data() + Offset(index));
}
/// 1D specialization.
- const T &operator()(int i0) const {
- return *(Data() + Offset(i0));
- }
+ const T& operator()(int i0) const { return *(Data() + Offset(i0)); }
/// 2D specialization.
- const T &operator()(int i0, int i1) const {
+ const T& operator()(int i0, int i1) const {
assert(0 <= i0 && i0 < Shape(0));
assert(0 <= i1 && i1 < Shape(1));
return *(Data() + Offset(i0, i1));
}
/// 3D specialization.
- const T &operator()(int i0, int i1, int i2) const {
+ const T& operator()(int i0, int i1, int i2) const {
return *(Data() + Offset(i0, i1, i2));
}
/// True if index is inside array.
- bool Contains(const Index &index) const {
+ bool Contains(const Index& index) const {
for (int i = 0; i < N; ++i)
if (index(i) < 0 || index(i) >= Shape(i))
return false;
@@ -287,26 +267,24 @@ class ArrayND : public BaseArray {
}
/// 1D specialization.
- bool Contains(int i0) const {
- return 0 <= i0 && i0 < Shape(0);
- }
+ bool Contains(int i0) const { return 0 <= i0 && i0 < Shape(0); }
/// 2D specialization.
bool Contains(int i0, int i1) const {
- return 0 <= i0 && i0 < Shape(0)
- && 0 <= i1 && i1 < Shape(1);
+ return 0 <= i0 && i0 < Shape(0) && 0 <= i1 && i1 < Shape(1);
}
/// 3D specialization.
bool Contains(int i0, int i1, int i2) const {
- return 0 <= i0 && i0 < Shape(0)
- && 0 <= i1 && i1 < Shape(1)
- && 0 <= i2 && i2 < Shape(2);
+ return 0 <= i0 && i0 < Shape(0) && 0 <= i1 && i1 < Shape(1) && 0 <= i2 &&
+ i2 < Shape(2);
}
- bool operator==(const ArrayND<T, N> &other) const {
- if (shape_ != other.shape_) return false;
- if (strides_ != other.strides_) return false;
+ bool operator==(const ArrayND<T, N>& other) const {
+ if (shape_ != other.shape_)
+ return false;
+ if (strides_ != other.strides_)
+ return false;
for (int i = 0; i < Size(); ++i) {
if (this->Data()[i] != other.Data()[i])
return false;
@@ -314,11 +292,11 @@ class ArrayND : public BaseArray {
return true;
}
- bool operator!=(const ArrayND<T, N> &other) const {
+ bool operator!=(const ArrayND<T, N>& other) const {
return !(*this == other);
}
- ArrayND<T, N> operator*(const ArrayND<T, N> &other) const {
+ ArrayND<T, N> operator*(const ArrayND<T, N>& other) const {
assert(Shape() = other.Shape());
ArrayND<T, N> res;
res.ResizeLike(*this);
@@ -336,7 +314,7 @@ class ArrayND : public BaseArray {
Index strides_;
/// Pointer to the first element of the array.
- T *data_;
+ T* data_;
/// Flag if this Array either own or reference the data
bool own_data_;
@@ -346,30 +324,20 @@ class ArrayND : public BaseArray {
template <typename T>
class Array3D : public ArrayND<T, 3> {
typedef ArrayND<T, 3> Base;
+
public:
- Array3D()
- : Base() {
- }
- Array3D(int height, int width, int depth = 1)
- : Base(height, width, depth) {
- }
+ Array3D() : Base() {}
+ Array3D(int height, int width, int depth = 1) : Base(height, width, depth) {}
Array3D(T* data, int height, int width, int depth = 1)
- : Base(data, height, width, depth) {
- }
+ : Base(data, height, width, depth) {}
void Resize(int height, int width, int depth = 1) {
Base::Resize(height, width, depth);
}
- int Height() const {
- return Base::Shape(0);
- }
- int Width() const {
- return Base::Shape(1);
- }
- int Depth() const {
- return Base::Shape(2);
- }
+ int Height() const { return Base::Shape(0); }
+ int Width() const { return Base::Shape(1); }
+ int Depth() const { return Base::Shape(2); }
// Match Eigen2's API so that Array3D's and Mat*'s can work together via
// template magic.
@@ -377,15 +345,15 @@ class Array3D : public ArrayND<T, 3> {
int cols() const { return Width(); }
int depth() const { return Depth(); }
- int Get_Step() const { return Width()*Depth(); }
+ int Get_Step() const { return Width() * Depth(); }
/// Enable accessing with 2 indices for grayscale images.
- T &operator()(int i0, int i1, int i2 = 0) {
+ T& operator()(int i0, int i1, int i2 = 0) {
assert(0 <= i0 && i0 < Height());
assert(0 <= i1 && i1 < Width());
return Base::operator()(i0, i1, i2);
}
- const T &operator()(int i0, int i1, int i2 = 0) const {
+ const T& operator()(int i0, int i1, int i2 = 0) const {
assert(0 <= i0 && i0 < Height());
assert(0 <= i1 && i1 < Width());
return Base::operator()(i0, i1, i2);
@@ -398,31 +366,29 @@ typedef Array3D<int> Array3Di;
typedef Array3D<float> Array3Df;
typedef Array3D<short> Array3Ds;
-void SplitChannels(const Array3Df &input,
- Array3Df *channel0,
- Array3Df *channel1,
- Array3Df *channel2);
+void SplitChannels(const Array3Df& input,
+ Array3Df* channel0,
+ Array3Df* channel1,
+ Array3Df* channel2);
-void PrintArray(const Array3Df &array);
+void PrintArray(const Array3Df& array);
/** Convert a float array into a byte array by scaling values by 255* (max-min).
- * where max and min are automatically detected
+ * where max and min are automatically detected
* (if automatic_range_detection = true)
* \note and TODO this automatic detection only works when the image contains
* at least one pixel of both bounds.
**/
-void FloatArrayToScaledByteArray(const Array3Df &float_array,
- Array3Du *byte_array,
+void FloatArrayToScaledByteArray(const Array3Df& float_array,
+ Array3Du* byte_array,
bool automatic_range_detection = false);
//! Convert a byte array into a float array by dividing values by 255.
-void ByteArrayToScaledFloatArray(const Array3Du &byte_array,
- Array3Df *float_array);
+void ByteArrayToScaledFloatArray(const Array3Du& byte_array,
+ Array3Df* float_array);
template <typename AArrayType, typename BArrayType, typename CArrayType>
-void MultiplyElements(const AArrayType &a,
- const BArrayType &b,
- CArrayType *c) {
+void MultiplyElements(const AArrayType& a, const BArrayType& b, CArrayType* c) {
// This function does an element-wise multiply between
// the two Arrays A and B, and stores the result in C.
// A and B must have the same dimensions.
@@ -435,7 +401,7 @@ void MultiplyElements(const AArrayType &a,
// The index starts at the maximum value for each dimension
const typename CArrayType::Index& cShape = c->Shape();
- for ( int i = 0; i < CArrayType::Index::SIZE; ++i )
+ for (int i = 0; i < CArrayType::Index::SIZE; ++i)
index(i) = cShape(i) - 1;
// After each multiplication, the highest-dimensional index is reduced.
@@ -443,12 +409,12 @@ void MultiplyElements(const AArrayType &a,
// and decrements the index of the next lower dimension.
// This ripple-action continues until the entire new array has been
// calculated, indicated by dimension zero having a negative index.
- while ( index(0) >= 0 ) {
+ while (index(0) >= 0) {
(*c)(index) = a(index) * b(index);
int dimension = CArrayType::Index::SIZE - 1;
index(dimension) = index(dimension) - 1;
- while ( dimension > 0 && index(dimension) < 0 ) {
+ while (dimension > 0 && index(dimension) < 0) {
index(dimension) = cShape(dimension) - 1;
index(dimension - 1) = index(dimension - 1) - 1;
--dimension;
@@ -457,9 +423,9 @@ void MultiplyElements(const AArrayType &a,
}
template <typename TA, typename TB, typename TC>
-void MultiplyElements(const ArrayND<TA, 3> &a,
- const ArrayND<TB, 3> &b,
- ArrayND<TC, 3> *c) {
+void MultiplyElements(const ArrayND<TA, 3>& a,
+ const ArrayND<TB, 3>& b,
+ ArrayND<TC, 3>* c) {
// Specialization for N==3
c->ResizeLike(a);
assert(a.Shape(0) == b.Shape(0));
@@ -475,9 +441,9 @@ void MultiplyElements(const ArrayND<TA, 3> &a,
}
template <typename TA, typename TB, typename TC>
-void MultiplyElements(const Array3D<TA> &a,
- const Array3D<TB> &b,
- Array3D<TC> *c) {
+void MultiplyElements(const Array3D<TA>& a,
+ const Array3D<TB>& b,
+ Array3D<TC>* c) {
// Specialization for N==3
c->ResizeLike(a);
assert(a.Shape(0) == b.Shape(0));
diff --git a/intern/libmv/libmv/image/array_nd_test.cc b/intern/libmv/libmv/image/array_nd_test.cc
index dc7cfacf90d..67838d34051 100644
--- a/intern/libmv/libmv/image/array_nd_test.cc
+++ b/intern/libmv/libmv/image/array_nd_test.cc
@@ -21,9 +21,9 @@
#include "libmv/image/array_nd.h"
#include "testing/testing.h"
-using libmv::ArrayND;
using libmv::Array3D;
using libmv::Array3Df;
+using libmv::ArrayND;
namespace {
@@ -100,7 +100,7 @@ TEST(ArrayND, Size) {
int l[] = {0, 1, 2};
ArrayND<int, 3>::Index last(l);
- EXPECT_EQ(a.Size(), a.Offset(last)+1);
+ EXPECT_EQ(a.Size(), a.Offset(last) + 1);
EXPECT_TRUE(a.Contains(last));
EXPECT_FALSE(a.Contains(shape));
}
@@ -120,8 +120,8 @@ TEST(ArrayND, Parenthesis) {
int s[] = {3, 3};
ArrayND<int, 2> a(s);
- *(a.Data()+0) = 0;
- *(a.Data()+5) = 5;
+ *(a.Data() + 0) = 0;
+ *(a.Data() + 5) = 5;
int i1[] = {0, 0};
EXPECT_EQ(0, a(Index(i1)));
@@ -210,7 +210,7 @@ TEST(ArrayND, MultiplyElements) {
b(1, 1, 0) = 3;
ArrayND<int, 3> c;
MultiplyElements(a, b, &c);
- EXPECT_FLOAT_EQ(6, c(0, 0, 0));
+ EXPECT_FLOAT_EQ(6, c(0, 0, 0));
EXPECT_FLOAT_EQ(10, c(0, 1, 0));
EXPECT_FLOAT_EQ(12, c(1, 0, 0));
EXPECT_FLOAT_EQ(12, c(1, 1, 0));
diff --git a/intern/libmv/libmv/image/convolve.cc b/intern/libmv/libmv/image/convolve.cc
index 464043581d2..478e320377d 100644
--- a/intern/libmv/libmv/image/convolve.cc
+++ b/intern/libmv/libmv/image/convolve.cc
@@ -29,7 +29,7 @@ namespace libmv {
// Compute a Gaussian kernel and derivative, such that you can take the
// derivative of an image by convolving with the kernel horizontally then the
// derivative vertically to get (eg) the y derivative.
-void ComputeGaussianKernel(double sigma, Vec *kernel, Vec *derivative) {
+void ComputeGaussianKernel(double sigma, Vec* kernel, Vec* derivative) {
assert(sigma >= 0.0);
// 0.004 implies a 3 pixel kernel with 1 pixel sigma.
@@ -37,7 +37,7 @@ void ComputeGaussianKernel(double sigma, Vec *kernel, Vec *derivative) {
// Calculate the kernel size based on sigma such that it is odd.
float precisehalfwidth = GaussianInversePositive(truncation_factor, sigma);
- int width = lround(2*precisehalfwidth);
+ int width = lround(2 * precisehalfwidth);
if (width % 2 == 0) {
width++;
}
@@ -47,7 +47,7 @@ void ComputeGaussianKernel(double sigma, Vec *kernel, Vec *derivative) {
kernel->setZero();
derivative->setZero();
int halfwidth = width / 2;
- for (int i = -halfwidth; i <= halfwidth; ++i) {
+ for (int i = -halfwidth; i <= halfwidth; ++i) {
(*kernel)(i + halfwidth) = Gaussian(i, sigma);
(*derivative)(i + halfwidth) = GaussianDerivative(i, sigma);
}
@@ -57,16 +57,21 @@ void ComputeGaussianKernel(double sigma, Vec *kernel, Vec *derivative) {
// Normalize the derivative differently. See
// www.cs.duke.edu/courses/spring03/cps296.1/handouts/Image%20Processing.pdf
double factor = 0.;
- for (int i = -halfwidth; i <= halfwidth; ++i) {
- factor -= i*(*derivative)(i+halfwidth);
+ for (int i = -halfwidth; i <= halfwidth; ++i) {
+ factor -= i * (*derivative)(i + halfwidth);
}
*derivative /= factor;
}
template <int size, bool vertical>
-void FastConvolve(const Vec &kernel, int width, int height,
- const float* src, int src_stride, int src_line_stride,
- float* dst, int dst_stride) {
+void FastConvolve(const Vec& kernel,
+ int width,
+ int height,
+ const float* src,
+ int src_stride,
+ int src_line_stride,
+ float* dst,
+ int dst_stride) {
double coefficients[2 * size + 1];
for (int k = 0; k < 2 * size + 1; ++k) {
coefficients[k] = kernel(2 * size - k);
@@ -93,14 +98,14 @@ void FastConvolve(const Vec &kernel, int width, int height,
}
}
-template<bool vertical>
-void Convolve(const Array3Df &in,
- const Vec &kernel,
- Array3Df *out_pointer,
+template <bool vertical>
+void Convolve(const Array3Df& in,
+ const Vec& kernel,
+ Array3Df* out_pointer,
int plane) {
int width = in.Width();
int height = in.Height();
- Array3Df &out = *out_pointer;
+ Array3Df& out = *out_pointer;
if (plane == -1) {
out.ResizeLike(in);
plane = 0;
@@ -119,61 +124,62 @@ void Convolve(const Array3Df &in,
// fast path.
int half_width = kernel.size() / 2;
switch (half_width) {
-#define static_convolution(size) case size: \
- FastConvolve<size, vertical>(kernel, width, height, src, src_stride, \
- src_line_stride, dst, dst_stride); break;
- static_convolution(1)
- static_convolution(2)
- static_convolution(3)
- static_convolution(4)
- static_convolution(5)
- static_convolution(6)
- static_convolution(7)
+#define static_convolution(size) \
+ case size: \
+ FastConvolve<size, vertical>(kernel, \
+ width, \
+ height, \
+ src, \
+ src_stride, \
+ src_line_stride, \
+ dst, \
+ dst_stride); \
+ break;
+ static_convolution(1) static_convolution(2) static_convolution(3)
+ static_convolution(4) static_convolution(5) static_convolution(6)
+ static_convolution(7)
#undef static_convolution
- default:
- int dynamic_size = kernel.size() / 2;
- for (int y = 0; y < height; ++y) {
- for (int x = 0; x < width; ++x) {
- double sum = 0;
- // Slow path: this loop cannot be unrolled.
- for (int k = -dynamic_size; k <= dynamic_size; ++k) {
- if (vertical) {
- if (y + k >= 0 && y + k < height) {
- sum += src[k * src_line_stride] *
- kernel(2 * dynamic_size - (k + dynamic_size));
- }
- } else {
- if (x + k >= 0 && x + k < width) {
- sum += src[k * src_stride] *
- kernel(2 * dynamic_size - (k + dynamic_size));
- }
+ default : int dynamic_size = kernel.size() / 2;
+ for (int y = 0; y < height; ++y) {
+ for (int x = 0; x < width; ++x) {
+ double sum = 0;
+ // Slow path: this loop cannot be unrolled.
+ for (int k = -dynamic_size; k <= dynamic_size; ++k) {
+ if (vertical) {
+ if (y + k >= 0 && y + k < height) {
+ sum += src[k * src_line_stride] *
+ kernel(2 * dynamic_size - (k + dynamic_size));
+ }
+ } else {
+ if (x + k >= 0 && x + k < width) {
+ sum += src[k * src_stride] *
+ kernel(2 * dynamic_size - (k + dynamic_size));
}
}
- dst[0] = static_cast<float>(sum);
- src += src_stride;
- dst += dst_stride;
}
+ dst[0] = static_cast<float>(sum);
+ src += src_stride;
+ dst += dst_stride;
}
+ }
}
}
-void ConvolveHorizontal(const Array3Df &in,
- const Vec &kernel,
- Array3Df *out_pointer,
+void ConvolveHorizontal(const Array3Df& in,
+ const Vec& kernel,
+ Array3Df* out_pointer,
int plane) {
Convolve<false>(in, kernel, out_pointer, plane);
}
-void ConvolveVertical(const Array3Df &in,
- const Vec &kernel,
- Array3Df *out_pointer,
+void ConvolveVertical(const Array3Df& in,
+ const Vec& kernel,
+ Array3Df* out_pointer,
int plane) {
Convolve<true>(in, kernel, out_pointer, plane);
}
-void ConvolveGaussian(const Array3Df &in,
- double sigma,
- Array3Df *out_pointer) {
+void ConvolveGaussian(const Array3Df& in, double sigma, Array3Df* out_pointer) {
Vec kernel, derivative;
ComputeGaussianKernel(sigma, &kernel, &derivative);
@@ -182,10 +188,10 @@ void ConvolveGaussian(const Array3Df &in,
ConvolveHorizontal(tmp, kernel, out_pointer);
}
-void ImageDerivatives(const Array3Df &in,
+void ImageDerivatives(const Array3Df& in,
double sigma,
- Array3Df *gradient_x,
- Array3Df *gradient_y) {
+ Array3Df* gradient_x,
+ Array3Df* gradient_y) {
Vec kernel, derivative;
ComputeGaussianKernel(sigma, &kernel, &derivative);
Array3Df tmp;
@@ -199,11 +205,11 @@ void ImageDerivatives(const Array3Df &in,
ConvolveVertical(tmp, derivative, gradient_y);
}
-void BlurredImageAndDerivatives(const Array3Df &in,
+void BlurredImageAndDerivatives(const Array3Df& in,
double sigma,
- Array3Df *blurred_image,
- Array3Df *gradient_x,
- Array3Df *gradient_y) {
+ Array3Df* blurred_image,
+ Array3Df* gradient_x,
+ Array3Df* gradient_y) {
Vec kernel, derivative;
ComputeGaussianKernel(sigma, &kernel, &derivative);
Array3Df tmp;
@@ -224,9 +230,9 @@ void BlurredImageAndDerivatives(const Array3Df &in,
// image, and store the results in three channels. Since the blurred value and
// gradients are closer in memory, this leads to better performance if all
// three values are needed at the same time.
-void BlurredImageAndDerivativesChannels(const Array3Df &in,
+void BlurredImageAndDerivativesChannels(const Array3Df& in,
double sigma,
- Array3Df *blurred_and_gradxy) {
+ Array3Df* blurred_and_gradxy) {
assert(in.Depth() == 1);
Vec kernel, derivative;
@@ -246,10 +252,10 @@ void BlurredImageAndDerivativesChannels(const Array3Df &in,
ConvolveVertical(tmp, derivative, blurred_and_gradxy, 2);
}
-void BoxFilterHorizontal(const Array3Df &in,
+void BoxFilterHorizontal(const Array3Df& in,
int window_size,
- Array3Df *out_pointer) {
- Array3Df &out = *out_pointer;
+ Array3Df* out_pointer) {
+ Array3Df& out = *out_pointer;
out.ResizeLike(in);
int half_width = (window_size - 1) / 2;
@@ -266,7 +272,7 @@ void BoxFilterHorizontal(const Array3Df &in,
out(i, j, k) = sum;
}
// Fill interior.
- for (int j = half_width + 1; j < in.Width()-half_width; ++j) {
+ for (int j = half_width + 1; j < in.Width() - half_width; ++j) {
sum -= in(i, j - half_width - 1, k);
sum += in(i, j + half_width, k);
out(i, j, k) = sum;
@@ -280,10 +286,10 @@ void BoxFilterHorizontal(const Array3Df &in,
}
}
-void BoxFilterVertical(const Array3Df &in,
+void BoxFilterVertical(const Array3Df& in,
int window_size,
- Array3Df *out_pointer) {
- Array3Df &out = *out_pointer;
+ Array3Df* out_pointer) {
+ Array3Df& out = *out_pointer;
out.ResizeLike(in);
int half_width = (window_size - 1) / 2;
@@ -300,7 +306,7 @@ void BoxFilterVertical(const Array3Df &in,
out(i, j, k) = sum;
}
// Fill interior.
- for (int i = half_width + 1; i < in.Height()-half_width; ++i) {
+ for (int i = half_width + 1; i < in.Height() - half_width; ++i) {
sum -= in(i - half_width - 1, j, k);
sum += in(i + half_width, j, k);
out(i, j, k) = sum;
@@ -314,9 +320,7 @@ void BoxFilterVertical(const Array3Df &in,
}
}
-void BoxFilter(const Array3Df &in,
- int box_width,
- Array3Df *out) {
+void BoxFilter(const Array3Df& in, int box_width, Array3Df* out) {
Array3Df tmp;
BoxFilterHorizontal(in, box_width, &tmp);
BoxFilterVertical(tmp, box_width, out);
@@ -327,17 +331,17 @@ void LaplaceFilter(unsigned char* src,
int width,
int height,
int strength) {
- for (int y = 1; y < height-1; y++)
- for (int x = 1; x < width-1; x++) {
- const unsigned char* s = &src[y*width+x];
- int l = 128 +
- s[-width-1] + s[-width] + s[-width+1] +
- s[1] - 8*s[0] + s[1] +
- s[ width-1] + s[ width] + s[ width+1];
- int d = ((256-strength)*s[0] + strength*l) / 256;
- if (d < 0) d=0;
- if (d > 255) d=255;
- dst[y*width+x] = d;
+ for (int y = 1; y < height - 1; y++)
+ for (int x = 1; x < width - 1; x++) {
+ const unsigned char* s = &src[y * width + x];
+ int l = 128 + s[-width - 1] + s[-width] + s[-width + 1] + s[1] -
+ 8 * s[0] + s[1] + s[width - 1] + s[width] + s[width + 1];
+ int d = ((256 - strength) * s[0] + strength * l) / 256;
+ if (d < 0)
+ d = 0;
+ if (d > 255)
+ d = 255;
+ dst[y * width + x] = d;
}
}
diff --git a/intern/libmv/libmv/image/convolve.h b/intern/libmv/libmv/image/convolve.h
index d3b6da9794b..3794550eb73 100644
--- a/intern/libmv/libmv/image/convolve.h
+++ b/intern/libmv/libmv/image/convolve.h
@@ -30,70 +30,71 @@ namespace libmv {
// Zero mean Gaussian.
inline double Gaussian(double x, double sigma) {
- return 1/sqrt(2*M_PI*sigma*sigma) * exp(-(x*x/2/sigma/sigma));
+ return 1 / sqrt(2 * M_PI * sigma * sigma) * exp(-(x * x / 2 / sigma / sigma));
}
// 2D gaussian (zero mean)
// (9) in http://mathworld.wolfram.com/GaussianFunction.html
inline double Gaussian2D(double x, double y, double sigma) {
- return 1.0/(2.0*M_PI*sigma*sigma) * exp( -(x*x+y*y)/(2.0*sigma*sigma));
+ return 1.0 / (2.0 * M_PI * sigma * sigma) *
+ exp(-(x * x + y * y) / (2.0 * sigma * sigma));
}
inline double GaussianDerivative(double x, double sigma) {
return -x / sigma / sigma * Gaussian(x, sigma);
}
// Solve the inverse of the Gaussian for positive x.
inline double GaussianInversePositive(double y, double sigma) {
- return sqrt(-2 * sigma * sigma * log(y * sigma * sqrt(2*M_PI)));
+ return sqrt(-2 * sigma * sigma * log(y * sigma * sqrt(2 * M_PI)));
}
-void ComputeGaussianKernel(double sigma, Vec *kernel, Vec *derivative);
-void ConvolveHorizontal(const FloatImage &in,
- const Vec &kernel,
- FloatImage *out_pointer,
+void ComputeGaussianKernel(double sigma, Vec* kernel, Vec* derivative);
+void ConvolveHorizontal(const FloatImage& in,
+ const Vec& kernel,
+ FloatImage* out_pointer,
int plane = -1);
-void ConvolveVertical(const FloatImage &in,
- const Vec &kernel,
- FloatImage *out_pointer,
+void ConvolveVertical(const FloatImage& in,
+ const Vec& kernel,
+ FloatImage* out_pointer,
int plane = -1);
-void ConvolveGaussian(const FloatImage &in,
+void ConvolveGaussian(const FloatImage& in,
double sigma,
- FloatImage *out_pointer);
+ FloatImage* out_pointer);
-void ImageDerivatives(const FloatImage &in,
+void ImageDerivatives(const FloatImage& in,
double sigma,
- FloatImage *gradient_x,
- FloatImage *gradient_y);
+ FloatImage* gradient_x,
+ FloatImage* gradient_y);
-void BlurredImageAndDerivatives(const FloatImage &in,
+void BlurredImageAndDerivatives(const FloatImage& in,
double sigma,
- FloatImage *blurred_image,
- FloatImage *gradient_x,
- FloatImage *gradient_y);
+ FloatImage* blurred_image,
+ FloatImage* gradient_x,
+ FloatImage* gradient_y);
// Blur and take the gradients of an image, storing the results inside the
// three channels of blurred_and_gradxy.
-void BlurredImageAndDerivativesChannels(const FloatImage &in,
+void BlurredImageAndDerivativesChannels(const FloatImage& in,
double sigma,
- FloatImage *blurred_and_gradxy);
+ FloatImage* blurred_and_gradxy);
-void BoxFilterHorizontal(const FloatImage &in,
+void BoxFilterHorizontal(const FloatImage& in,
int window_size,
- FloatImage *out_pointer);
+ FloatImage* out_pointer);
-void BoxFilterVertical(const FloatImage &in,
+void BoxFilterVertical(const FloatImage& in,
int window_size,
- FloatImage *out_pointer);
+ FloatImage* out_pointer);
-void BoxFilter(const FloatImage &in,
- int box_width,
- FloatImage *out);
+void BoxFilter(const FloatImage& in, int box_width, FloatImage* out);
/*!
Convolve \a src into \a dst with the discrete laplacian operator.
\a src and \a dst should be \a width x \a height images.
- \a strength is an interpolation coefficient (0-256) between original image and the laplacian.
+ \a strength is an interpolation coefficient (0-256) between original image
+ and the laplacian.
- \note Make sure the search region is filtered with the same strength as the pattern.
+ \note Make sure the search region is filtered with the same strength as the
+ pattern.
*/
void LaplaceFilter(unsigned char* src,
unsigned char* dst,
@@ -104,4 +105,3 @@ void LaplaceFilter(unsigned char* src,
} // namespace libmv
#endif // LIBMV_IMAGE_CONVOLVE_H_
-
diff --git a/intern/libmv/libmv/image/convolve_test.cc b/intern/libmv/libmv/image/convolve_test.cc
index 0cdef8e1e72..1ad4cb9a40e 100644
--- a/intern/libmv/libmv/image/convolve_test.cc
+++ b/intern/libmv/libmv/image/convolve_test.cc
@@ -85,26 +85,26 @@ TEST(Convolve, BlurredImageAndDerivativesChannelsHorizontalSlope) {
FloatImage image(10, 10), blurred_and_derivatives;
for (int j = 0; j < 10; ++j) {
for (int i = 0; i < 10; ++i) {
- image(j, i) = 2*i;
+ image(j, i) = 2 * i;
}
}
BlurredImageAndDerivativesChannels(image, 0.9, &blurred_and_derivatives);
EXPECT_NEAR(blurred_and_derivatives(5, 5, 0), 10.0, 1e-7);
- EXPECT_NEAR(blurred_and_derivatives(5, 5, 1), 2.0, 1e-7);
- EXPECT_NEAR(blurred_and_derivatives(5, 5, 2), 0.0, 1e-7);
+ EXPECT_NEAR(blurred_and_derivatives(5, 5, 1), 2.0, 1e-7);
+ EXPECT_NEAR(blurred_and_derivatives(5, 5, 2), 0.0, 1e-7);
}
TEST(Convolve, BlurredImageAndDerivativesChannelsVerticalSlope) {
FloatImage image(10, 10), blurred_and_derivatives;
for (int j = 0; j < 10; ++j) {
for (int i = 0; i < 10; ++i) {
- image(j, i) = 2*j;
+ image(j, i) = 2 * j;
}
}
BlurredImageAndDerivativesChannels(image, 0.9, &blurred_and_derivatives);
EXPECT_NEAR(blurred_and_derivatives(5, 5, 0), 10.0, 1e-7);
- EXPECT_NEAR(blurred_and_derivatives(5, 5, 1), 0.0, 1e-7);
- EXPECT_NEAR(blurred_and_derivatives(5, 5, 2), 2.0, 1e-7);
+ EXPECT_NEAR(blurred_and_derivatives(5, 5, 1), 0.0, 1e-7);
+ EXPECT_NEAR(blurred_and_derivatives(5, 5, 2), 2.0, 1e-7);
}
} // namespace
diff --git a/intern/libmv/libmv/image/correlation.h b/intern/libmv/libmv/image/correlation.h
index c354f7e891e..25123efe8d0 100644
--- a/intern/libmv/libmv/image/correlation.h
+++ b/intern/libmv/libmv/image/correlation.h
@@ -21,14 +21,14 @@
#ifndef LIBMV_IMAGE_CORRELATION_H
#define LIBMV_IMAGE_CORRELATION_H
-#include "libmv/logging/logging.h"
#include "libmv/image/image.h"
+#include "libmv/logging/logging.h"
namespace libmv {
inline double PearsonProductMomentCorrelation(
- const FloatImage &image_and_gradient1_sampled,
- const FloatImage &image_and_gradient2_sampled) {
+ const FloatImage& image_and_gradient1_sampled,
+ const FloatImage& image_and_gradient2_sampled) {
assert(image_and_gradient1_sampled.Width() ==
image_and_gradient2_sampled.Width());
assert(image_and_gradient1_sampled.Height() ==
@@ -63,9 +63,8 @@ inline double PearsonProductMomentCorrelation(
double covariance_xy = sXY - sX * sY;
double correlation = covariance_xy / sqrt(var_x * var_y);
- LG << "Covariance xy: " << covariance_xy
- << ", var 1: " << var_x << ", var 2: " << var_y
- << ", correlation: " << correlation;
+ LG << "Covariance xy: " << covariance_xy << ", var 1: " << var_x
+ << ", var 2: " << var_y << ", correlation: " << correlation;
return correlation;
}
diff --git a/intern/libmv/libmv/image/image.h b/intern/libmv/libmv/image/image.h
index e0f200a4c93..40d6aa6f70a 100644
--- a/intern/libmv/libmv/image/image.h
+++ b/intern/libmv/libmv/image/image.h
@@ -39,14 +39,11 @@ typedef Array3Ds ShortImage;
// is the best solution after all.
class Image {
public:
-
// Create an image from an array. The image takes ownership of the array.
- Image(Array3Du *array) : array_type_(BYTE), array_(array) {}
- Image(Array3Df *array) : array_type_(FLOAT), array_(array) {}
+ Image(Array3Du* array) : array_type_(BYTE), array_(array) {}
+ Image(Array3Df* array) : array_type_(FLOAT), array_(array) {}
- Image(const Image &img): array_type_(NONE), array_(NULL) {
- *this = img;
- }
+ Image(const Image& img) : array_type_(NONE), array_(NULL) { *this = img; }
// Underlying data type.
enum DataType {
@@ -62,20 +59,18 @@ class Image {
int size;
switch (array_type_) {
case BYTE:
- size = reinterpret_cast<Array3Du *>(array_)->MemorySizeInBytes();
- break;
+ size = reinterpret_cast<Array3Du*>(array_)->MemorySizeInBytes();
+ break;
case FLOAT:
- size = reinterpret_cast<Array3Df *>(array_)->MemorySizeInBytes();
- break;
+ size = reinterpret_cast<Array3Df*>(array_)->MemorySizeInBytes();
+ break;
case INT:
- size = reinterpret_cast<Array3Di *>(array_)->MemorySizeInBytes();
- break;
+ size = reinterpret_cast<Array3Di*>(array_)->MemorySizeInBytes();
+ break;
case SHORT:
- size = reinterpret_cast<Array3Ds *>(array_)->MemorySizeInBytes();
- break;
- default :
- size = 0;
- assert(0);
+ size = reinterpret_cast<Array3Ds*>(array_)->MemorySizeInBytes();
+ break;
+ default: size = 0; assert(0);
}
size += sizeof(*this);
return size;
@@ -83,71 +78,57 @@ class Image {
~Image() {
switch (array_type_) {
- case BYTE:
- delete reinterpret_cast<Array3Du *>(array_);
-
- break;
- case FLOAT:
- delete reinterpret_cast<Array3Df *>(array_);
-
- break;
- case INT:
- delete reinterpret_cast<Array3Di *>(array_);
-
- break;
- case SHORT:
- delete reinterpret_cast<Array3Ds *>(array_);
-
- break;
- default:
- assert(0);
- }
+ case BYTE: delete reinterpret_cast<Array3Du*>(array_); break;
+ case FLOAT: delete reinterpret_cast<Array3Df*>(array_); break;
+ case INT: delete reinterpret_cast<Array3Di*>(array_); break;
+ case SHORT: delete reinterpret_cast<Array3Ds*>(array_); break;
+ default: assert(0);
+ }
}
- Image& operator= (const Image& f) {
+ Image& operator=(const Image& f) {
if (this != &f) {
array_type_ = f.array_type_;
switch (array_type_) {
case BYTE:
- delete reinterpret_cast<Array3Du *>(array_);
- array_ = new Array3Du(*(Array3Du *)f.array_);
- break;
+ delete reinterpret_cast<Array3Du*>(array_);
+ array_ = new Array3Du(*(Array3Du*)f.array_);
+ break;
case FLOAT:
- delete reinterpret_cast<Array3Df *>(array_);
- array_ = new Array3Df(*(Array3Df *)f.array_);
- break;
+ delete reinterpret_cast<Array3Df*>(array_);
+ array_ = new Array3Df(*(Array3Df*)f.array_);
+ break;
case INT:
- delete reinterpret_cast<Array3Di *>(array_);
- array_ = new Array3Di(*(Array3Di *)f.array_);
- break;
+ delete reinterpret_cast<Array3Di*>(array_);
+ array_ = new Array3Di(*(Array3Di*)f.array_);
+ break;
case SHORT:
- delete reinterpret_cast<Array3Ds *>(array_);
- array_ = new Array3Ds(*(Array3Ds *)f.array_);
- break;
- default:
- assert(0);
+ delete reinterpret_cast<Array3Ds*>(array_);
+ array_ = new Array3Ds(*(Array3Ds*)f.array_);
+ break;
+ default: assert(0);
}
}
return *this;
}
- Array3Du *AsArray3Du() const {
+ Array3Du* AsArray3Du() const {
if (array_type_ == BYTE) {
- return reinterpret_cast<Array3Du *>(array_);
+ return reinterpret_cast<Array3Du*>(array_);
}
return NULL;
}
- Array3Df *AsArray3Df() const {
+ Array3Df* AsArray3Df() const {
if (array_type_ == FLOAT) {
- return reinterpret_cast<Array3Df *>(array_);
+ return reinterpret_cast<Array3Df*>(array_);
}
return NULL;
}
private:
DataType array_type_;
- BaseArray *array_;
+ BaseArray* array_;
};
} // namespace libmv
diff --git a/intern/libmv/libmv/image/image_converter.h b/intern/libmv/libmv/image/image_converter.h
index b3a3fa2bf8c..41d53be6722 100644
--- a/intern/libmv/libmv/image/image_converter.h
+++ b/intern/libmv/libmv/image/image_converter.h
@@ -28,7 +28,7 @@ namespace libmv {
// The factor comes from http://www.easyrgb.com/
// RGB to XYZ : Y is the luminance channel
// var_R * 0.2126 + var_G * 0.7152 + var_B * 0.0722
-template<typename T>
+template <typename T>
inline T RGB2GRAY(const T r, const T g, const T b) {
return static_cast<T>(r * 0.2126 + g * 0.7152 + b * 0.0722);
}
@@ -42,8 +42,8 @@ inline unsigned char RGB2GRAY<unsigned char>(const unsigned char r,
return (unsigned char)(r * 0.2126 + g * 0.7152 + b * 0.0722 +0.5);
}*/
-template<class ImageIn, class ImageOut>
-void Rgb2Gray(const ImageIn &imaIn, ImageOut *imaOut) {
+template <class ImageIn, class ImageOut>
+void Rgb2Gray(const ImageIn& imaIn, ImageOut* imaOut) {
// It is all fine to cnvert RGBA image here as well,
// all the additional channels will be nicely ignored.
assert(imaIn.Depth() >= 3);
@@ -52,21 +52,22 @@ void Rgb2Gray(const ImageIn &imaIn, ImageOut *imaOut) {
// Convert each RGB pixel into Gray value (luminance)
for (int j = 0; j < imaIn.Height(); ++j) {
- for (int i = 0; i < imaIn.Width(); ++i) {
- (*imaOut)(j, i) = RGB2GRAY(imaIn(j, i, 0) , imaIn(j, i, 1), imaIn(j, i, 2));
+ for (int i = 0; i < imaIn.Width(); ++i) {
+ (*imaOut)(j, i) =
+ RGB2GRAY(imaIn(j, i, 0), imaIn(j, i, 1), imaIn(j, i, 2));
}
}
}
// Convert given float image to an unsigned char array of pixels.
-template<class Image>
-unsigned char *FloatImageToUCharArray(const Image &image) {
- unsigned char *buffer =
+template <class Image>
+unsigned char* FloatImageToUCharArray(const Image& image) {
+ unsigned char* buffer =
new unsigned char[image.Width() * image.Height() * image.Depth()];
for (int y = 0; y < image.Height(); y++) {
- for (int x = 0; x < image.Width(); x++) {
- for (int d = 0; d < image.Depth(); d++) {
+ for (int x = 0; x < image.Width(); x++) {
+ for (int d = 0; d < image.Depth(); d++) {
int index = (y * image.Width() + x) * image.Depth() + d;
buffer[index] = 255.0 * image(y, x, d);
}
diff --git a/intern/libmv/libmv/image/image_drawing.h b/intern/libmv/libmv/image/image_drawing.h
index f50e48b75a3..dd6a94dd7d4 100644
--- a/intern/libmv/libmv/image/image_drawing.h
+++ b/intern/libmv/libmv/image/image_drawing.h
@@ -34,9 +34,9 @@ namespace libmv {
/// Put the pixel in the image to the given color only if the point (xc,yc)
/// is inside the image.
template <class Image, class Color>
-inline void safePutPixel(int yc, int xc, const Color & col, Image *pim) {
+inline void safePutPixel(int yc, int xc, const Color& col, Image* pim) {
if (!pim)
- return;
+ return;
if (pim->Contains(yc, xc)) {
(*pim)(yc, xc) = col;
}
@@ -45,9 +45,9 @@ inline void safePutPixel(int yc, int xc, const Color & col, Image *pim) {
/// is inside the image. This function support multi-channel color
/// \note The color pointer col must have size as the image depth
template <class Image, class Color>
-inline void safePutPixel(int yc, int xc, const Color *col, Image *pim) {
+inline void safePutPixel(int yc, int xc, const Color* col, Image* pim) {
if (!pim)
- return;
+ return;
if (pim->Contains(yc, xc)) {
for (int i = 0; i < pim->Depth(); ++i)
(*pim)(yc, xc, i) = *(col + i);
@@ -59,19 +59,23 @@ inline void safePutPixel(int yc, int xc, const Color *col, Image *pim) {
// Add the rotation of the ellipse.
// As the algo. use symmetry we must use 4 rotations.
template <class Image, class Color>
-void DrawEllipse(int xc, int yc, int radiusA, int radiusB,
- const Color &col, Image *pim, double angle = 0.0) {
+void DrawEllipse(int xc,
+ int yc,
+ int radiusA,
+ int radiusB,
+ const Color& col,
+ Image* pim,
+ double angle = 0.0) {
int a = radiusA;
int b = radiusB;
// Counter Clockwise rotation matrix.
- double matXY[4] = { cos(angle), sin(angle),
- -sin(angle), cos(angle)};
+ double matXY[4] = {cos(angle), sin(angle), -sin(angle), cos(angle)};
int x, y;
double d1, d2;
x = 0;
y = b;
- d1 = b*b - a*a*b + a*a/4;
+ d1 = b * b - a * a * b + a * a / 4;
float rotX = (matXY[0] * x + matXY[1] * y);
float rotY = (matXY[2] * x + matXY[3] * y);
@@ -86,12 +90,12 @@ void DrawEllipse(int xc, int yc, int radiusA, int radiusB,
rotY = (-matXY[2] * x + matXY[3] * y);
safePutPixel(yc + rotY, xc + rotX, col, pim);
- while (a*a*(y-.5) > b*b*(x+1)) {
+ while (a * a * (y - .5) > b * b * (x + 1)) {
if (d1 < 0) {
- d1 += b*b*(2*x+3);
+ d1 += b * b * (2 * x + 3);
++x;
} else {
- d1 += b*b*(2*x+3) + a*a*(-2*y+2);
+ d1 += b * b * (2 * x + 3) + a * a * (-2 * y + 2);
++x;
--y;
}
@@ -108,14 +112,14 @@ void DrawEllipse(int xc, int yc, int radiusA, int radiusB,
rotY = (-matXY[2] * x + matXY[3] * y);
safePutPixel(yc + rotY, xc + rotX, col, pim);
}
- d2 = b*b*(x+.5)*(x+.5) + a*a*(y-1)*(y-1) - a*a*b*b;
+ d2 = b * b * (x + .5) * (x + .5) + a * a * (y - 1) * (y - 1) - a * a * b * b;
while (y > 0) {
if (d2 < 0) {
- d2 += b*b*(2*x+2) + a*a*(-2*y+3);
+ d2 += b * b * (2 * x + 2) + a * a * (-2 * y + 3);
--y;
++x;
} else {
- d2 += a*a*(-2*y+3);
+ d2 += a * a * (-2 * y + 3);
--y;
}
rotX = (matXY[0] * x + matXY[1] * y);
@@ -137,23 +141,23 @@ void DrawEllipse(int xc, int yc, int radiusA, int radiusB,
// So it's better the use the Andres method.
// http://fr.wikipedia.org/wiki/Algorithme_de_tracé_de_cercle_d'Andres.
template <class Image, class Color>
-void DrawCircle(int x, int y, int radius, const Color &col, Image *pim) {
- Image &im = *pim;
- if ( im.Contains(y + radius, x + radius)
- || im.Contains(y + radius, x - radius)
- || im.Contains(y - radius, x + radius)
- || im.Contains(y - radius, x - radius)) {
+void DrawCircle(int x, int y, int radius, const Color& col, Image* pim) {
+ Image& im = *pim;
+ if (im.Contains(y + radius, x + radius) ||
+ im.Contains(y + radius, x - radius) ||
+ im.Contains(y - radius, x + radius) ||
+ im.Contains(y - radius, x - radius)) {
int x1 = 0;
int y1 = radius;
int d = radius - 1;
while (y1 >= x1) {
// Draw the point for each octant.
- safePutPixel( y1 + y, x1 + x, col, pim);
- safePutPixel( x1 + y, y1 + x, col, pim);
- safePutPixel( y1 + y, -x1 + x, col, pim);
- safePutPixel( x1 + y, -y1 + x, col, pim);
- safePutPixel(-y1 + y, x1 + x, col, pim);
- safePutPixel(-x1 + y, y1 + x, col, pim);
+ safePutPixel(y1 + y, x1 + x, col, pim);
+ safePutPixel(x1 + y, y1 + x, col, pim);
+ safePutPixel(y1 + y, -x1 + x, col, pim);
+ safePutPixel(x1 + y, -y1 + x, col, pim);
+ safePutPixel(-y1 + y, x1 + x, col, pim);
+ safePutPixel(-x1 + y, y1 + x, col, pim);
safePutPixel(-y1 + y, -x1 + x, col, pim);
safePutPixel(-x1 + y, -y1 + x, col, pim);
if (d >= 2 * x1) {
@@ -163,7 +167,7 @@ void DrawCircle(int x, int y, int radius, const Color &col, Image *pim) {
if (d <= 2 * (radius - y1)) {
d = d + 2 * y1 - 1;
y1 -= 1;
- } else {
+ } else {
d = d + 2 * (y1 - x1 - 1);
y1 -= 1;
x1 += 1;
@@ -175,8 +179,8 @@ void DrawCircle(int x, int y, int radius, const Color &col, Image *pim) {
// Bresenham algorithm
template <class Image, class Color>
-void DrawLine(int xa, int ya, int xb, int yb, const Color &col, Image *pim) {
- Image &im = *pim;
+void DrawLine(int xa, int ya, int xb, int yb, const Color& col, Image* pim) {
+ Image& im = *pim;
// If one point is outside the image
// Replace the outside point by the intersection of the line and
@@ -185,35 +189,37 @@ void DrawLine(int xa, int ya, int xb, int yb, const Color &col, Image *pim) {
int width = pim->Width();
int height = pim->Height();
const bool xdir = xa < xb, ydir = ya < yb;
- float nx0 = xa, nx1 = xb, ny0 = ya, ny1 = yb,
- &xleft = xdir?nx0:nx1, &yleft = xdir?ny0:ny1,
- &xright = xdir?nx1:nx0, &yright = xdir?ny1:ny0,
- &xup = ydir?nx0:nx1, &yup = ydir?ny0:ny1,
- &xdown = ydir?nx1:nx0, &ydown = ydir?ny1:ny0;
+ float nx0 = xa, nx1 = xb, ny0 = ya, ny1 = yb, &xleft = xdir ? nx0 : nx1,
+ &yleft = xdir ? ny0 : ny1, &xright = xdir ? nx1 : nx0,
+ &yright = xdir ? ny1 : ny0, &xup = ydir ? nx0 : nx1,
+ &yup = ydir ? ny0 : ny1, &xdown = ydir ? nx1 : nx0,
+ &ydown = ydir ? ny1 : ny0;
- if (xright < 0 || xleft >= width) return;
+ if (xright < 0 || xleft >= width)
+ return;
if (xleft < 0) {
- yleft -= xleft*(yright - yleft)/(xright - xleft);
- xleft = 0;
+ yleft -= xleft * (yright - yleft) / (xright - xleft);
+ xleft = 0;
}
if (xright >= width) {
- yright -= (xright - width)*(yright - yleft)/(xright - xleft);
- xright = width - 1;
+ yright -= (xright - width) * (yright - yleft) / (xright - xleft);
+ xright = width - 1;
}
- if (ydown < 0 || yup >= height) return;
+ if (ydown < 0 || yup >= height)
+ return;
if (yup < 0) {
- xup -= yup*(xdown - xup)/(ydown - yup);
- yup = 0;
+ xup -= yup * (xdown - xup) / (ydown - yup);
+ yup = 0;
}
if (ydown >= height) {
- xdown -= (ydown - height)*(xdown - xup)/(ydown - yup);
- ydown = height - 1;
+ xdown -= (ydown - height) * (xdown - xup) / (ydown - yup);
+ ydown = height - 1;
}
- xa = (int) xleft;
- xb = (int) xright;
- ya = (int) yleft;
- yb = (int) yright;
+ xa = (int)xleft;
+ xb = (int)xright;
+ ya = (int)yleft;
+ yb = (int)yright;
}
int xbas, xhaut, ybas, yhaut;
@@ -241,7 +247,7 @@ void DrawLine(int xa, int ya, int xb, int yb, const Color &col, Image *pim) {
}
if (dy > 0) { // Positive slope will increment X.
incrmY = 1;
- } else { // Negative slope.
+ } else { // Negative slope.
incrmY = -1;
}
if (dx >= dy) {
@@ -255,9 +261,9 @@ void DrawLine(int xa, int ya, int xb, int yb, const Color &col, Image *pim) {
x += incrmX;
if (dp <= 0) { // Go in direction of the South Pixel.
dp += S;
- } else { // Go to the North.
+ } else { // Go to the North.
dp += N;
- y+=incrmY;
+ y += incrmY;
}
}
} else {
@@ -271,7 +277,7 @@ void DrawLine(int xa, int ya, int xb, int yb, const Color &col, Image *pim) {
y += incrmY;
if (dp <= 0) { // Go in direction of the South Pixel.
dp += S;
- } else { // Go to the North.
+ } else { // Go to the North.
dp += N;
x += incrmX;
}
diff --git a/intern/libmv/libmv/image/image_test.cc b/intern/libmv/libmv/image/image_test.cc
index 241f49f2244..a1730a9a55e 100644
--- a/intern/libmv/libmv/image/image_test.cc
+++ b/intern/libmv/libmv/image/image_test.cc
@@ -23,20 +23,20 @@
#include "libmv/image/image.h"
#include "testing/testing.h"
-using libmv::Image;
using libmv::Array3Df;
+using libmv::Image;
namespace {
TEST(Image, SimpleImageAccessors) {
- Array3Df *array = new Array3Df(2, 3);
+ Array3Df* array = new Array3Df(2, 3);
Image image(array);
EXPECT_EQ(array, image.AsArray3Df());
EXPECT_TRUE(NULL == image.AsArray3Du());
}
TEST(Image, MemorySizeInBytes) {
- Array3Df *array = new Array3Df(2, 3);
+ Array3Df* array = new Array3Df(2, 3);
Image image(array);
int size = sizeof(image) + array->MemorySizeInBytes();
EXPECT_EQ(size, image.MemorySizeInBytes());
diff --git a/intern/libmv/libmv/image/sample.h b/intern/libmv/libmv/image/sample.h
index 24eb9ccd57d..2b57baf27b9 100644
--- a/intern/libmv/libmv/image/sample.h
+++ b/intern/libmv/libmv/image/sample.h
@@ -26,17 +26,14 @@
namespace libmv {
/// Nearest neighbor interpolation.
-template<typename T>
-inline T SampleNearest(const Array3D<T> &image,
- float y, float x, int v = 0) {
+template <typename T>
+inline T SampleNearest(const Array3D<T>& image, float y, float x, int v = 0) {
const int i = int(round(y));
const int j = int(round(x));
return image(i, j, v);
}
-inline void LinearInitAxis(float x, int size,
- int *x1, int *x2,
- float *dx) {
+inline void LinearInitAxis(float x, int size, int* x1, int* x2, float* dx) {
const int ix = static_cast<int>(x);
if (ix < 0) {
*x1 = 0;
@@ -54,32 +51,32 @@ inline void LinearInitAxis(float x, int size,
}
/// Linear interpolation.
-template<typename T>
-inline T SampleLinear(const Array3D<T> &image, float y, float x, int v = 0) {
+template <typename T>
+inline T SampleLinear(const Array3D<T>& image, float y, float x, int v = 0) {
int x1, y1, x2, y2;
float dx, dy;
LinearInitAxis(y, image.Height(), &y1, &y2, &dy);
- LinearInitAxis(x, image.Width(), &x1, &x2, &dx);
+ LinearInitAxis(x, image.Width(), &x1, &x2, &dx);
const T im11 = image(y1, x1, v);
const T im12 = image(y1, x2, v);
const T im21 = image(y2, x1, v);
const T im22 = image(y2, x2, v);
- return T( dy * (dx * im11 + (1.0 - dx) * im12) +
+ return T(dy * (dx * im11 + (1.0 - dx) * im12) +
(1 - dy) * (dx * im21 + (1.0 - dx) * im22));
}
/// Linear interpolation, of all channels. The sample is assumed to have the
/// same size as the number of channels in image.
-template<typename T>
-inline void SampleLinear(const Array3D<T> &image, float y, float x, T *sample) {
+template <typename T>
+inline void SampleLinear(const Array3D<T>& image, float y, float x, T* sample) {
int x1, y1, x2, y2;
float dx, dy;
LinearInitAxis(y, image.Height(), &y1, &y2, &dy);
- LinearInitAxis(x, image.Width(), &x1, &x2, &dx);
+ LinearInitAxis(x, image.Width(), &x1, &x2, &dx);
for (int i = 0; i < image.Depth(); ++i) {
const T im11 = image(y1, x1, i);
@@ -87,7 +84,7 @@ inline void SampleLinear(const Array3D<T> &image, float y, float x, T *sample) {
const T im21 = image(y2, x1, i);
const T im22 = image(y2, x2, i);
- sample[i] = T( dy * (dx * im11 + (1.0 - dx) * im12) +
+ sample[i] = T(dy * (dx * im11 + (1.0 - dx) * im12) +
(1 - dy) * (dx * im21 + (1.0 - dx) * im22));
}
}
@@ -95,7 +92,7 @@ inline void SampleLinear(const Array3D<T> &image, float y, float x, T *sample) {
// Downsample all channels by 2. If the image has odd width or height, the last
// row or column is ignored.
// FIXME(MatthiasF): this implementation shouldn't be in an interface file
-inline void DownsampleChannelsBy2(const Array3Df &in, Array3Df *out) {
+inline void DownsampleChannelsBy2(const Array3Df& in, Array3Df* out) {
int height = in.Height() / 2;
int width = in.Width() / 2;
int depth = in.Depth();
@@ -106,10 +103,12 @@ inline void DownsampleChannelsBy2(const Array3Df &in, Array3Df *out) {
for (int r = 0; r < height; ++r) {
for (int c = 0; c < width; ++c) {
for (int k = 0; k < depth; ++k) {
+ // clang-format off
(*out)(r, c, k) = (in(2 * r, 2 * c, k) +
in(2 * r + 1, 2 * c, k) +
in(2 * r, 2 * c + 1, k) +
in(2 * r + 1, 2 * c + 1, k)) / 4.0f;
+ // clang-format on
}
}
}
@@ -117,11 +116,12 @@ inline void DownsampleChannelsBy2(const Array3Df &in, Array3Df *out) {
// Sample a region centered at x,y in image with size extending by half_width
// from x,y. Channels specifies the number of channels to sample from.
-inline void SamplePattern(const FloatImage &image,
- double x, double y,
- int half_width,
- int channels,
- FloatImage *sampled) {
+inline void SamplePattern(const FloatImage& image,
+ double x,
+ double y,
+ int half_width,
+ int channels,
+ FloatImage* sampled) {
sampled->Resize(2 * half_width + 1, 2 * half_width + 1, channels);
for (int r = -half_width; r <= half_width; ++r) {
for (int c = -half_width; c <= half_width; ++c) {
diff --git a/intern/libmv/libmv/image/sample_test.cc b/intern/libmv/libmv/image/sample_test.cc
index c8a0ce470c2..f1fb4c42c67 100644
--- a/intern/libmv/libmv/image/sample_test.cc
+++ b/intern/libmv/libmv/image/sample_test.cc
@@ -32,9 +32,9 @@ TEST(Image, Nearest) {
image(1, 0) = 2;
image(1, 1) = 3;
EXPECT_EQ(0, SampleNearest(image, -0.4f, -0.4f));
- EXPECT_EQ(0, SampleNearest(image, 0.4f, 0.4f));
- EXPECT_EQ(3, SampleNearest(image, 0.6f, 0.6f));
- EXPECT_EQ(3, SampleNearest(image, 1.4f, 1.4f));
+ EXPECT_EQ(0, SampleNearest(image, 0.4f, 0.4f));
+ EXPECT_EQ(3, SampleNearest(image, 0.6f, 0.6f));
+ EXPECT_EQ(3, SampleNearest(image, 1.4f, 1.4f));
}
TEST(Image, Linear) {
@@ -57,7 +57,7 @@ TEST(Image, DownsampleBy2) {
ASSERT_EQ(1, resampled_image.Height());
ASSERT_EQ(1, resampled_image.Width());
ASSERT_EQ(1, resampled_image.Depth());
- EXPECT_FLOAT_EQ(6./4., resampled_image(0, 0));
+ EXPECT_FLOAT_EQ(6. / 4., resampled_image(0, 0));
}
TEST(Image, DownsampleBy2MultiChannel) {
@@ -82,8 +82,8 @@ TEST(Image, DownsampleBy2MultiChannel) {
ASSERT_EQ(1, resampled_image.Height());
ASSERT_EQ(1, resampled_image.Width());
ASSERT_EQ(3, resampled_image.Depth());
- EXPECT_FLOAT_EQ((0+1+2+3)/4., resampled_image(0, 0, 0));
- EXPECT_FLOAT_EQ((5+6+7+8)/4., resampled_image(0, 0, 1));
- EXPECT_FLOAT_EQ((9+10+11+12)/4., resampled_image(0, 0, 2));
+ EXPECT_FLOAT_EQ((0 + 1 + 2 + 3) / 4., resampled_image(0, 0, 0));
+ EXPECT_FLOAT_EQ((5 + 6 + 7 + 8) / 4., resampled_image(0, 0, 1));
+ EXPECT_FLOAT_EQ((9 + 10 + 11 + 12) / 4., resampled_image(0, 0, 2));
}
} // namespace
diff --git a/intern/libmv/libmv/image/tuple.h b/intern/libmv/libmv/image/tuple.h
index c8dc36f2e18..447bf0cc81c 100644
--- a/intern/libmv/libmv/image/tuple.h
+++ b/intern/libmv/libmv/image/tuple.h
@@ -34,10 +34,14 @@ class Tuple {
Tuple(T initial_value) { Reset(initial_value); }
template <typename D>
- Tuple(D *values) { Reset(values); }
+ Tuple(D* values) {
+ Reset(values);
+ }
template <typename D>
- Tuple(const Tuple<D, N> &b) { Reset(b); }
+ Tuple(const Tuple<D, N>& b) {
+ Reset(b);
+ }
template <typename D>
Tuple& operator=(const Tuple<D, N>& b) {
@@ -46,30 +50,32 @@ class Tuple {
}
template <typename D>
- void Reset(const Tuple<D, N>& b) { Reset(b.Data()); }
+ void Reset(const Tuple<D, N>& b) {
+ Reset(b.Data());
+ }
template <typename D>
- void Reset(D *values) {
- for (int i = 0;i < N; i++) {
+ void Reset(D* values) {
+ for (int i = 0; i < N; i++) {
data_[i] = T(values[i]);
}
}
// Set all tuple values to the same thing.
void Reset(T value) {
- for (int i = 0;i < N; i++) {
+ for (int i = 0; i < N; i++) {
data_[i] = value;
}
}
// Pointer to the first element.
- T *Data() { return &data_[0]; }
- const T *Data() const { return &data_[0]; }
+ T* Data() { return &data_[0]; }
+ const T* Data() const { return &data_[0]; }
- T &operator()(int i) { return data_[i]; }
- const T &operator()(int i) const { return data_[i]; }
+ T& operator()(int i) { return data_[i]; }
+ const T& operator()(int i) const { return data_[i]; }
- bool operator==(const Tuple<T, N> &other) const {
+ bool operator==(const Tuple<T, N>& other) const {
for (int i = 0; i < N; ++i) {
if ((*this)(i) != other(i)) {
return false;
@@ -77,9 +83,7 @@ class Tuple {
}
return true;
}
- bool operator!=(const Tuple<T, N> &other) const {
- return !(*this == other);
- }
+ bool operator!=(const Tuple<T, N>& other) const { return !(*this == other); }
private:
T data_[N];
diff --git a/intern/libmv/libmv/multiview/conditioning.cc b/intern/libmv/libmv/multiview/conditioning.cc
index 0afbf119ea3..2f4bf653ca0 100644
--- a/intern/libmv/libmv/multiview/conditioning.cc
+++ b/intern/libmv/libmv/multiview/conditioning.cc
@@ -24,7 +24,7 @@
namespace libmv {
// HZ 4.4.4 pag.109: Point conditioning (non isotropic)
-void PreconditionerFromPoints(const Mat &points, Mat3 *T) {
+void PreconditionerFromPoints(const Mat& points, Mat3* T) {
Vec mean, variance;
MeanAndVarianceAlongRows(points, &mean, &variance);
@@ -38,12 +38,14 @@ void PreconditionerFromPoints(const Mat &points, Mat3 *T) {
if (variance(1) < 1e-8)
yfactor = mean(1) = 1.0;
+ // clang-format off
*T << xfactor, 0, -xfactor * mean(0),
0, yfactor, -yfactor * mean(1),
0, 0, 1;
+ // clang-format on
}
// HZ 4.4.4 pag.107: Point conditioning (isotropic)
-void IsotropicPreconditionerFromPoints(const Mat &points, Mat3 *T) {
+void IsotropicPreconditionerFromPoints(const Mat& points, Mat3* T) {
Vec mean, variance;
MeanAndVarianceAlongRows(points, &mean, &variance);
@@ -57,14 +59,16 @@ void IsotropicPreconditionerFromPoints(const Mat &points, Mat3 *T) {
mean.setOnes();
}
+ // clang-format off
*T << factor, 0, -factor * mean(0),
0, factor, -factor * mean(1),
0, 0, 1;
+ // clang-format on
}
-void ApplyTransformationToPoints(const Mat &points,
- const Mat3 &T,
- Mat *transformed_points) {
+void ApplyTransformationToPoints(const Mat& points,
+ const Mat3& T,
+ Mat* transformed_points) {
int n = points.cols();
transformed_points->resize(2, n);
Mat3X p(3, n);
@@ -73,26 +77,24 @@ void ApplyTransformationToPoints(const Mat &points,
HomogeneousToEuclidean(p, transformed_points);
}
-void NormalizePoints(const Mat &points,
- Mat *normalized_points,
- Mat3 *T) {
+void NormalizePoints(const Mat& points, Mat* normalized_points, Mat3* T) {
PreconditionerFromPoints(points, T);
ApplyTransformationToPoints(points, *T, normalized_points);
}
-void NormalizeIsotropicPoints(const Mat &points,
- Mat *normalized_points,
- Mat3 *T) {
+void NormalizeIsotropicPoints(const Mat& points,
+ Mat* normalized_points,
+ Mat3* T) {
IsotropicPreconditionerFromPoints(points, T);
ApplyTransformationToPoints(points, *T, normalized_points);
}
// Denormalize the results. See HZ page 109.
-void UnnormalizerT::Unnormalize(const Mat3 &T1, const Mat3 &T2, Mat3 *H) {
+void UnnormalizerT::Unnormalize(const Mat3& T1, const Mat3& T2, Mat3* H) {
*H = T2.transpose() * (*H) * T1;
}
-void UnnormalizerI::Unnormalize(const Mat3 &T1, const Mat3 &T2, Mat3 *H) {
+void UnnormalizerI::Unnormalize(const Mat3& T1, const Mat3& T2, Mat3* H) {
*H = T2.inverse() * (*H) * T1;
}
diff --git a/intern/libmv/libmv/multiview/conditioning.h b/intern/libmv/libmv/multiview/conditioning.h
index 8f3e3a76070..876c5af48e6 100644
--- a/intern/libmv/libmv/multiview/conditioning.h
+++ b/intern/libmv/libmv/multiview/conditioning.h
@@ -26,32 +26,30 @@
namespace libmv {
// Point conditioning (non isotropic)
-void PreconditionerFromPoints(const Mat &points, Mat3 *T);
+void PreconditionerFromPoints(const Mat& points, Mat3* T);
// Point conditioning (isotropic)
-void IsotropicPreconditionerFromPoints(const Mat &points, Mat3 *T);
+void IsotropicPreconditionerFromPoints(const Mat& points, Mat3* T);
-void ApplyTransformationToPoints(const Mat &points,
- const Mat3 &T,
- Mat *transformed_points);
+void ApplyTransformationToPoints(const Mat& points,
+ const Mat3& T,
+ Mat* transformed_points);
-void NormalizePoints(const Mat &points,
- Mat *normalized_points,
- Mat3 *T);
+void NormalizePoints(const Mat& points, Mat* normalized_points, Mat3* T);
-void NormalizeIsotropicPoints(const Mat &points,
- Mat *normalized_points,
- Mat3 *T);
+void NormalizeIsotropicPoints(const Mat& points,
+ Mat* normalized_points,
+ Mat3* T);
/// Use inverse for unnormalize
struct UnnormalizerI {
// Denormalize the results. See HZ page 109.
- static void Unnormalize(const Mat3 &T1, const Mat3 &T2, Mat3 *H);
+ static void Unnormalize(const Mat3& T1, const Mat3& T2, Mat3* H);
};
/// Use transpose for unnormalize
struct UnnormalizerT {
// Denormalize the results. See HZ page 109.
- static void Unnormalize(const Mat3 &T1, const Mat3 &T2, Mat3 *H);
+ static void Unnormalize(const Mat3& T1, const Mat3& T2, Mat3* H);
};
} // namespace libmv
diff --git a/intern/libmv/libmv/multiview/euclidean_resection.cc b/intern/libmv/libmv/multiview/euclidean_resection.cc
index 16a1a0caafa..249d7ebef3d 100644
--- a/intern/libmv/libmv/multiview/euclidean_resection.cc
+++ b/intern/libmv/libmv/multiview/euclidean_resection.cc
@@ -23,8 +23,8 @@
#include <cmath>
#include <limits>
-#include <Eigen/SVD>
#include <Eigen/Geometry>
+#include <Eigen/SVD>
#include "libmv/base/vector.h"
#include "libmv/logging/logging.h"
@@ -35,9 +35,10 @@ namespace euclidean_resection {
typedef unsigned int uint;
-bool EuclideanResection(const Mat2X &x_camera,
- const Mat3X &X_world,
- Mat3 *R, Vec3 *t,
+bool EuclideanResection(const Mat2X& x_camera,
+ const Mat3X& X_world,
+ Mat3* R,
+ Vec3* t,
ResectionMethod method) {
switch (method) {
case RESECTION_ANSAR_DANIILIDIS:
@@ -49,20 +50,20 @@ bool EuclideanResection(const Mat2X &x_camera,
case RESECTION_PPNP:
return EuclideanResectionPPnP(x_camera, X_world, R, t);
break;
- default:
- LOG(FATAL) << "Unknown resection method.";
+ default: LOG(FATAL) << "Unknown resection method.";
}
return false;
}
-bool EuclideanResection(const Mat &x_image,
- const Mat3X &X_world,
- const Mat3 &K,
- Mat3 *R, Vec3 *t,
+bool EuclideanResection(const Mat& x_image,
+ const Mat3X& X_world,
+ const Mat3& K,
+ Mat3* R,
+ Vec3* t,
ResectionMethod method) {
CHECK(x_image.rows() == 2 || x_image.rows() == 3)
- << "Invalid size for x_image: "
- << x_image.rows() << "x" << x_image.cols();
+ << "Invalid size for x_image: " << x_image.rows() << "x"
+ << x_image.cols();
Mat2X x_camera;
if (x_image.rows() == 2) {
@@ -73,18 +74,15 @@ bool EuclideanResection(const Mat &x_image,
return EuclideanResection(x_camera, X_world, R, t, method);
}
-void AbsoluteOrientation(const Mat3X &X,
- const Mat3X &Xp,
- Mat3 *R,
- Vec3 *t) {
+void AbsoluteOrientation(const Mat3X& X, const Mat3X& Xp, Mat3* R, Vec3* t) {
int num_points = X.cols();
- Vec3 C = X.rowwise().sum() / num_points; // Centroid of X.
+ Vec3 C = X.rowwise().sum() / num_points; // Centroid of X.
Vec3 Cp = Xp.rowwise().sum() / num_points; // Centroid of Xp.
// Normalize the two point sets.
Mat3X Xn(3, num_points), Xpn(3, num_points);
for (int i = 0; i < num_points; ++i) {
- Xn.col(i) = X.col(i) - C;
+ Xn.col(i) = X.col(i) - C;
Xpn.col(i) = Xp.col(i) - Cp;
}
@@ -100,10 +98,12 @@ void AbsoluteOrientation(const Mat3X &X,
double Szy = Xn.row(2).dot(Xpn.row(1));
Mat4 N;
+ // clang-format off
N << Sxx + Syy + Szz, Syz - Szy, Szx - Sxz, Sxy - Syx,
Syz - Szy, Sxx - Syy - Szz, Sxy + Syx, Szx + Sxz,
Szx - Sxz, Sxy + Syx, -Sxx + Syy - Szz, Syz + Szy,
Sxy - Syx, Szx + Sxz, Syz + Szy, -Sxx - Syy + Szz;
+ // clang-format on
// Find the unit quaternion q that maximizes qNq. It is the eigenvector
// corresponding to the lagest eigenvalue.
@@ -118,6 +118,7 @@ void AbsoluteOrientation(const Mat3X &X,
double q1q3 = q(1) * q(3);
double q2q3 = q(2) * q(3);
+ // clang-format off
(*R) << qq(0) + qq(1) - qq(2) - qq(3),
2 * (q1q2 - q0q3),
2 * (q1q3 + q0q2),
@@ -127,6 +128,7 @@ void AbsoluteOrientation(const Mat3X &X,
2 * (q1q3 - q0q2),
2 * (q2q3 + q0q1),
qq(0) - qq(1) - qq(2) + qq(3);
+ // clang-format on
// Fix the handedness of the R matrix.
if (R->determinant() < 0) {
@@ -176,9 +178,7 @@ static int Sign(double value) {
// Lambda to create the constraints in equation (5) in "Linear Pose Estimation
// from Points or Lines", by Ansar, A. and Daniilidis, PAMI 2003. vol. 25, no.
// 5.
-static Vec MatrixToConstraint(const Mat &A,
- int num_k_columns,
- int num_lambda) {
+static Vec MatrixToConstraint(const Mat& A, int num_k_columns, int num_lambda) {
Vec C(num_k_columns);
C.setZero();
int idx = 0;
@@ -195,17 +195,17 @@ static Vec MatrixToConstraint(const Mat &A,
}
// Normalizes the columns of vectors.
-static void NormalizeColumnVectors(Mat3X *vectors) {
+static void NormalizeColumnVectors(Mat3X* vectors) {
int num_columns = vectors->cols();
for (int i = 0; i < num_columns; ++i) {
vectors->col(i).normalize();
}
}
-void EuclideanResectionAnsarDaniilidis(const Mat2X &x_camera,
- const Mat3X &X_world,
- Mat3 *R,
- Vec3 *t) {
+void EuclideanResectionAnsarDaniilidis(const Mat2X& x_camera,
+ const Mat3X& X_world,
+ Mat3* R,
+ Vec3* t) {
CHECK(x_camera.cols() == X_world.cols());
CHECK(x_camera.cols() > 3);
@@ -229,14 +229,14 @@ void EuclideanResectionAnsarDaniilidis(const Mat2X &x_camera,
// them into the M matrix (8). Also store the initial (i, j) indices.
int row = 0;
for (int i = 0; i < num_points; ++i) {
- for (int j = i+1; j < num_points; ++j) {
+ for (int j = i + 1; j < num_points; ++j) {
M(row, row) = -2 * x_camera_unit.col(i).dot(x_camera_unit.col(j));
M(row, num_m_rows + i) = x_camera_unit.col(i).dot(x_camera_unit.col(i));
M(row, num_m_rows + j) = x_camera_unit.col(j).dot(x_camera_unit.col(j));
Vec3 Xdiff = X_world.col(i) - X_world.col(j);
double center_to_point_distance = Xdiff.norm();
M(row, num_m_columns - 1) =
- - center_to_point_distance * center_to_point_distance;
+ -center_to_point_distance * center_to_point_distance;
ij_index(row, 0) = i;
ij_index(row, 1) = j;
++row;
@@ -246,17 +246,17 @@ void EuclideanResectionAnsarDaniilidis(const Mat2X &x_camera,
}
int num_lambda = num_points + 1; // Dimension of the null space of M.
- Mat V = M.jacobiSvd(Eigen::ComputeFullV).matrixV().block(0,
- num_m_rows,
- num_m_columns,
- num_lambda);
+ Mat V = M.jacobiSvd(Eigen::ComputeFullV)
+ .matrixV()
+ .block(0, num_m_rows, num_m_columns, num_lambda);
// TODO(vess): The number of constraint equations in K (num_k_rows) must be
// (num_points + 1) * (num_points + 2)/2. This creates a performance issue
// for more than 4 points. It is fine for 4 points at the moment with 18
// instead of 15 equations.
- int num_k_rows = num_m_rows + num_points *
- (num_points*(num_points-1)/2 - num_points+1);
+ int num_k_rows =
+ num_m_rows +
+ num_points * (num_points * (num_points - 1) / 2 - num_points + 1);
int num_k_columns = num_lambda * (num_lambda + 1) / 2;
Mat K(num_k_rows, num_k_columns);
K.setZero();
@@ -275,8 +275,8 @@ void EuclideanResectionAnsarDaniilidis(const Mat2X &x_camera,
int idx4 = IJToPointIndex(i, k, num_points);
K.row(counter_k_row) =
- MatrixToConstraint(V.row(idx1).transpose() * V.row(idx2)-
- V.row(idx3).transpose() * V.row(idx4),
+ MatrixToConstraint(V.row(idx1).transpose() * V.row(idx2) -
+ V.row(idx3).transpose() * V.row(idx4),
num_k_columns,
num_lambda);
++counter_k_row;
@@ -296,8 +296,8 @@ void EuclideanResectionAnsarDaniilidis(const Mat2X &x_camera,
int idx4 = IJToPointIndex(i, k, num_points);
K.row(counter_k_row) =
- MatrixToConstraint(V.row(idx1).transpose() * V.row(idx2)-
- V.row(idx3).transpose() * V.row(idx4),
+ MatrixToConstraint(V.row(idx1).transpose() * V.row(idx2) -
+ V.row(idx3).transpose() * V.row(idx4),
num_k_columns,
num_lambda);
++counter_k_row;
@@ -317,14 +317,12 @@ void EuclideanResectionAnsarDaniilidis(const Mat2X &x_camera,
}
}
// Ensure positiveness of the largest value corresponding to lambda_ii.
- L_sq = L_sq * Sign(L_sq(IJToIndex(max_L_sq_index,
- max_L_sq_index,
- num_lambda)));
+ L_sq =
+ L_sq * Sign(L_sq(IJToIndex(max_L_sq_index, max_L_sq_index, num_lambda)));
Vec L(num_lambda);
- L(max_L_sq_index) = sqrt(L_sq(IJToIndex(max_L_sq_index,
- max_L_sq_index,
- num_lambda)));
+ L(max_L_sq_index) =
+ sqrt(L_sq(IJToIndex(max_L_sq_index, max_L_sq_index, num_lambda)));
for (int i = 0; i < num_lambda; ++i) {
if (i != max_L_sq_index) {
@@ -353,9 +351,9 @@ void EuclideanResectionAnsarDaniilidis(const Mat2X &x_camera,
}
// Selects 4 virtual control points using mean and PCA.
-static void SelectControlPoints(const Mat3X &X_world,
- Mat *X_centered,
- Mat34 *X_control_points) {
+static void SelectControlPoints(const Mat3X& X_world,
+ Mat* X_centered,
+ Mat34* X_control_points) {
size_t num_points = X_world.cols();
// The first virtual control point, C0, is the centroid.
@@ -379,13 +377,13 @@ static void SelectControlPoints(const Mat3X &X_world,
}
// Computes the barycentric coordinates for all real points
-static void ComputeBarycentricCoordinates(const Mat3X &X_world_centered,
- const Mat34 &X_control_points,
- Mat4X *alphas) {
+static void ComputeBarycentricCoordinates(const Mat3X& X_world_centered,
+ const Mat34& X_control_points,
+ Mat4X* alphas) {
size_t num_points = X_world_centered.cols();
Mat3 C2;
for (size_t c = 1; c < 4; c++) {
- C2.col(c-1) = X_control_points.col(c) - X_control_points.col(0);
+ C2.col(c - 1) = X_control_points.col(c) - X_control_points.col(0);
}
Mat3 C2inv = C2.inverse();
@@ -401,14 +399,15 @@ static void ComputeBarycentricCoordinates(const Mat3X &X_world_centered,
// Estimates the coordinates of all real points in the camera coordinate frame
static void ComputePointsCoordinatesInCameraFrame(
- const Mat4X &alphas,
- const Vec4 &betas,
- const Eigen::Matrix<double, 12, 12> &U,
- Mat3X *X_camera) {
+ const Mat4X& alphas,
+ const Vec4& betas,
+ const Eigen::Matrix<double, 12, 12>& U,
+ Mat3X* X_camera) {
size_t num_points = alphas.cols();
// Estimates the control points in the camera reference frame.
- Mat34 C2b; C2b.setZero();
+ Mat34 C2b;
+ C2b.setZero();
for (size_t cu = 0; cu < 4; cu++) {
for (size_t c = 0; c < 4; c++) {
C2b.col(c) += betas(cu) * U.block(11 - cu, c * 3, 1, 3).transpose();
@@ -436,9 +435,10 @@ static void ComputePointsCoordinatesInCameraFrame(
}
}
-bool EuclideanResectionEPnP(const Mat2X &x_camera,
- const Mat3X &X_world,
- Mat3 *R, Vec3 *t) {
+bool EuclideanResectionEPnP(const Mat2X& x_camera,
+ const Mat3X& X_world,
+ Mat3* R,
+ Vec3* t) {
CHECK(x_camera.cols() == X_world.cols());
CHECK(x_camera.cols() > 3);
size_t num_points = X_world.cols();
@@ -462,6 +462,7 @@ bool EuclideanResectionEPnP(const Mat2X &x_camera,
double a3 = alphas(3, c);
double ui = x_camera(0, c);
double vi = x_camera(1, c);
+ // clang-format off
M.block(2*c, 0, 2, 12) << a0, 0,
a0*(-ui), a1, 0,
a1*(-ui), a2, 0,
@@ -471,10 +472,11 @@ bool EuclideanResectionEPnP(const Mat2X &x_camera,
a1, a1*(-vi), 0,
a2, a2*(-vi), 0,
a3, a3*(-vi);
+ // clang-format on
}
// TODO(julien): Avoid the transpose by rewriting the u2.block() calls.
- Eigen::JacobiSVD<Mat> MtMsvd(M.transpose()*M, Eigen::ComputeFullU);
+ Eigen::JacobiSVD<Mat> MtMsvd(M.transpose() * M, Eigen::ComputeFullU);
Eigen::Matrix<double, 12, 12> u2 = MtMsvd.matrixU().transpose();
// Estimate the L matrix.
@@ -495,21 +497,22 @@ bool EuclideanResectionEPnP(const Mat2X &x_camera,
dv2.row(3) = u2.block(10, 3, 1, 3) - u2.block(10, 6, 1, 3);
dv2.row(4) = u2.block(10, 3, 1, 3) - u2.block(10, 9, 1, 3);
dv2.row(5) = u2.block(10, 6, 1, 3) - u2.block(10, 9, 1, 3);
- dv3.row(0) = u2.block(9, 0, 1, 3) - u2.block(9, 3, 1, 3);
- dv3.row(1) = u2.block(9, 0, 1, 3) - u2.block(9, 6, 1, 3);
- dv3.row(2) = u2.block(9, 0, 1, 3) - u2.block(9, 9, 1, 3);
- dv3.row(3) = u2.block(9, 3, 1, 3) - u2.block(9, 6, 1, 3);
- dv3.row(4) = u2.block(9, 3, 1, 3) - u2.block(9, 9, 1, 3);
- dv3.row(5) = u2.block(9, 6, 1, 3) - u2.block(9, 9, 1, 3);
- dv4.row(0) = u2.block(8, 0, 1, 3) - u2.block(8, 3, 1, 3);
- dv4.row(1) = u2.block(8, 0, 1, 3) - u2.block(8, 6, 1, 3);
- dv4.row(2) = u2.block(8, 0, 1, 3) - u2.block(8, 9, 1, 3);
- dv4.row(3) = u2.block(8, 3, 1, 3) - u2.block(8, 6, 1, 3);
- dv4.row(4) = u2.block(8, 3, 1, 3) - u2.block(8, 9, 1, 3);
- dv4.row(5) = u2.block(8, 6, 1, 3) - u2.block(8, 9, 1, 3);
+ dv3.row(0) = u2.block(9, 0, 1, 3) - u2.block(9, 3, 1, 3);
+ dv3.row(1) = u2.block(9, 0, 1, 3) - u2.block(9, 6, 1, 3);
+ dv3.row(2) = u2.block(9, 0, 1, 3) - u2.block(9, 9, 1, 3);
+ dv3.row(3) = u2.block(9, 3, 1, 3) - u2.block(9, 6, 1, 3);
+ dv3.row(4) = u2.block(9, 3, 1, 3) - u2.block(9, 9, 1, 3);
+ dv3.row(5) = u2.block(9, 6, 1, 3) - u2.block(9, 9, 1, 3);
+ dv4.row(0) = u2.block(8, 0, 1, 3) - u2.block(8, 3, 1, 3);
+ dv4.row(1) = u2.block(8, 0, 1, 3) - u2.block(8, 6, 1, 3);
+ dv4.row(2) = u2.block(8, 0, 1, 3) - u2.block(8, 9, 1, 3);
+ dv4.row(3) = u2.block(8, 3, 1, 3) - u2.block(8, 6, 1, 3);
+ dv4.row(4) = u2.block(8, 3, 1, 3) - u2.block(8, 9, 1, 3);
+ dv4.row(5) = u2.block(8, 6, 1, 3) - u2.block(8, 9, 1, 3);
Eigen::Matrix<double, 6, 10> L;
for (size_t r = 0; r < 6; r++) {
+ // clang-format off
L.row(r) << dv1.row(r).dot(dv1.row(r)),
2.0 * dv1.row(r).dot(dv2.row(r)),
dv2.row(r).dot(dv2.row(r)),
@@ -520,19 +523,23 @@ bool EuclideanResectionEPnP(const Mat2X &x_camera,
2.0 * dv2.row(r).dot(dv4.row(r)),
2.0 * dv3.row(r).dot(dv4.row(r)),
dv4.row(r).dot(dv4.row(r));
+ // clang-format on
}
Vec6 rho;
+ // clang-format off
rho << (X_control_points.col(0) - X_control_points.col(1)).squaredNorm(),
(X_control_points.col(0) - X_control_points.col(2)).squaredNorm(),
(X_control_points.col(0) - X_control_points.col(3)).squaredNorm(),
(X_control_points.col(1) - X_control_points.col(2)).squaredNorm(),
(X_control_points.col(1) - X_control_points.col(3)).squaredNorm(),
(X_control_points.col(2) - X_control_points.col(3)).squaredNorm();
+ // clang-format on
// There are three possible solutions based on the three approximations of L
// (betas). Below, each one is solved for then the best one is chosen.
Mat3X X_camera;
- Mat3 K; K.setIdentity();
+ Mat3 K;
+ K.setIdentity();
vector<Mat3> Rs(3);
vector<Vec3> ts(3);
Vec rmse(3);
@@ -546,7 +553,7 @@ bool EuclideanResectionEPnP(const Mat2X &x_camera,
//
// TODO(keir): Decide if setting this to infinity, effectively disabling the
// check, is the right approach. So far this seems the case.
- double kSuccessThreshold = std::numeric_limits<double>::max();
+ double kSuccessThreshold = std::numeric_limits<double>::max();
// Find the first possible solution for R, t corresponding to:
// Betas = [b00 b01 b11 b02 b12 b22 b03 b13 b23 b33]
@@ -563,7 +570,7 @@ bool EuclideanResectionEPnP(const Mat2X &x_camera,
if (b4(0) < 0) {
b4 = -b4;
}
- b4(0) = std::sqrt(b4(0));
+ b4(0) = std::sqrt(b4(0));
betas << b4(0), b4(1) / b4(0), b4(2) / b4(0), b4(3) / b4(0);
ComputePointsCoordinatesInCameraFrame(alphas, betas, u2, &X_camera);
AbsoluteOrientation(X_world, X_camera, &Rs[0], &ts[0]);
@@ -669,12 +676,12 @@ bool EuclideanResectionEPnP(const Mat2X &x_camera,
// TODO(julien): Improve the solutions with non-linear refinement.
return true;
}
-
+
/*
-
+
Straight from the paper:
http://www.diegm.uniud.it/fusiello/papers/3dimpvt12-b.pdf
-
+
function [R T] = ppnp(P,S,tol)
% input
% P : matrix (nx3) image coordinates in camera reference [u v 1]
@@ -708,33 +715,34 @@ bool EuclideanResectionEPnP(const Mat2X &x_camera,
end
T = -R*c;
end
-
+
*/
// TODO(keir): Re-do all the variable names and add comments matching the paper.
// This implementation has too much of the terseness of the original. On the
// other hand, it did work on the first try.
-bool EuclideanResectionPPnP(const Mat2X &x_camera,
- const Mat3X &X_world,
- Mat3 *R, Vec3 *t) {
+bool EuclideanResectionPPnP(const Mat2X& x_camera,
+ const Mat3X& X_world,
+ Mat3* R,
+ Vec3* t) {
int n = x_camera.cols();
Mat Z = Mat::Zero(n, n);
Vec e = Vec::Ones(n);
Mat A = Mat::Identity(n, n) - (e * e.transpose() / n);
Vec II = e / n;
-
+
Mat P(n, 3);
P.col(0) = x_camera.row(0);
P.col(1) = x_camera.row(1);
P.col(2).setConstant(1.0);
-
+
Mat S = X_world.transpose();
-
+
double error = std::numeric_limits<double>::infinity();
Mat E_old = 1000 * Mat::Ones(n, 3);
-
+
Vec3 c;
Mat E(n, 3);
-
+
int iteration = 0;
double tolerance = 1e-5;
// TODO(keir): The limit of 100 can probably be reduced, but this will require
@@ -748,20 +756,21 @@ bool EuclideanResectionPPnP(const Mat2X &x_camera,
s << 1, 1, (U * VT).determinant();
*R = U * s.asDiagonal() * VT;
Mat PR = P * *R; // n x 3
- c = (S - Z*PR).transpose() * II;
- Mat Y = S - e*c.transpose(); // n x 3
- Vec Zmindiag = (PR * Y.transpose()).diagonal()
- .cwiseQuotient(P.rowwise().squaredNorm());
+ c = (S - Z * PR).transpose() * II;
+ Mat Y = S - e * c.transpose(); // n x 3
+ Vec Zmindiag = (PR * Y.transpose())
+ .diagonal()
+ .cwiseQuotient(P.rowwise().squaredNorm());
for (int i = 0; i < n; ++i) {
Zmindiag[i] = std::max(Zmindiag[i], 0.0);
}
Z = Zmindiag.asDiagonal();
- E = Y - Z*PR;
+ E = Y - Z * PR;
error = (E - E_old).norm();
LG << "PPnP error(" << (iteration++) << "): " << error;
E_old = E;
}
- *t = -*R*c;
+ *t = -*R * c;
// TODO(keir): Figure out what the failure cases are. Is it too many
// iterations? Spend some time going through the math figuring out if there
@@ -769,6 +778,5 @@ bool EuclideanResectionPPnP(const Mat2X &x_camera,
return true;
}
-
-} // namespace resection
+} // namespace euclidean_resection
} // namespace libmv
diff --git a/intern/libmv/libmv/multiview/euclidean_resection.h b/intern/libmv/libmv/multiview/euclidean_resection.h
index 28eae92611c..3c4c3979ff6 100644
--- a/intern/libmv/libmv/multiview/euclidean_resection.h
+++ b/intern/libmv/libmv/multiview/euclidean_resection.h
@@ -21,8 +21,8 @@
#ifndef LIBMV_MULTIVIEW_EUCLIDEAN_RESECTION_H_
#define LIBMV_MULTIVIEW_EUCLIDEAN_RESECTION_H_
-#include "libmv/numeric/numeric.h"
#include "libmv/multiview/projection.h"
+#include "libmv/numeric/numeric.h"
namespace libmv {
namespace euclidean_resection {
@@ -33,7 +33,7 @@ enum ResectionMethod {
// The "EPnP" algorithm by Lepetit et al.
// http://cvlab.epfl.ch/~lepetit/papers/lepetit_ijcv08.pdf
RESECTION_EPNP,
-
+
// The Procrustes PNP algorithm ("PPnP")
// http://www.diegm.uniud.it/fusiello/papers/3dimpvt12-b.pdf
RESECTION_PPNP
@@ -50,9 +50,10 @@ enum ResectionMethod {
* \param t Solution for the camera translation vector
* \param method The resection method to use.
*/
-bool EuclideanResection(const Mat2X &x_camera,
- const Mat3X &X_world,
- Mat3 *R, Vec3 *t,
+bool EuclideanResection(const Mat2X& x_camera,
+ const Mat3X& X_world,
+ Mat3* R,
+ Vec3* t,
ResectionMethod method = RESECTION_EPNP);
/**
@@ -68,10 +69,11 @@ bool EuclideanResection(const Mat2X &x_camera,
* \param t Solution for the camera translation vector
* \param method Resection method
*/
-bool EuclideanResection(const Mat &x_image,
- const Mat3X &X_world,
- const Mat3 &K,
- Mat3 *R, Vec3 *t,
+bool EuclideanResection(const Mat& x_image,
+ const Mat3X& X_world,
+ const Mat3& K,
+ Mat3* R,
+ Vec3* t,
ResectionMethod method = RESECTION_EPNP);
/**
@@ -84,10 +86,7 @@ bool EuclideanResection(const Mat &x_image,
* Horn, Hilden, "Closed-form solution of absolute orientation using
* orthonormal matrices"
*/
-void AbsoluteOrientation(const Mat3X &X,
- const Mat3X &Xp,
- Mat3 *R,
- Vec3 *t);
+void AbsoluteOrientation(const Mat3X& X, const Mat3X& Xp, Mat3* R, Vec3* t);
/**
* Computes the extrinsic parameters, R and t for a calibrated camera from 4 or
@@ -102,9 +101,10 @@ void AbsoluteOrientation(const Mat3X &X,
* This is the algorithm described in: "Linear Pose Estimation from Points or
* Lines", by Ansar, A. and Daniilidis, PAMI 2003. vol. 25, no. 5.
*/
-void EuclideanResectionAnsarDaniilidis(const Mat2X &x_camera,
- const Mat3X &X_world,
- Mat3 *R, Vec3 *t);
+void EuclideanResectionAnsarDaniilidis(const Mat2X& x_camera,
+ const Mat3X& X_world,
+ Mat3* R,
+ Vec3* t);
/**
* Computes the extrinsic parameters, R and t for a calibrated camera from 4 or
* more 3D points and their images.
@@ -120,9 +120,10 @@ void EuclideanResectionAnsarDaniilidis(const Mat2X &x_camera,
* and F. Moreno-Noguer and P. Fua, IJCV 2009. vol. 81, no. 2
* \note: the non-linear optimization is not implemented here.
*/
-bool EuclideanResectionEPnP(const Mat2X &x_camera,
- const Mat3X &X_world,
- Mat3 *R, Vec3 *t);
+bool EuclideanResectionEPnP(const Mat2X& x_camera,
+ const Mat3X& X_world,
+ Mat3* R,
+ Vec3* t);
/**
* Computes the extrinsic parameters, R and t for a calibrated camera from 4 or
@@ -137,12 +138,12 @@ bool EuclideanResectionEPnP(const Mat2X &x_camera,
* Straight from the paper:
* http://www.diegm.uniud.it/fusiello/papers/3dimpvt12-b.pdf
*/
-bool EuclideanResectionPPnP(const Mat2X &x_camera,
- const Mat3X &X_world,
- Mat3 *R, Vec3 *t);
+bool EuclideanResectionPPnP(const Mat2X& x_camera,
+ const Mat3X& X_world,
+ Mat3* R,
+ Vec3* t);
} // namespace euclidean_resection
} // namespace libmv
-
#endif /* LIBMV_MULTIVIEW_EUCLIDEAN_RESECTION_H_ */
diff --git a/intern/libmv/libmv/multiview/euclidean_resection_test.cc b/intern/libmv/libmv/multiview/euclidean_resection_test.cc
index 378837d3d2d..3bb8e6e1710 100644
--- a/intern/libmv/libmv/multiview/euclidean_resection_test.cc
+++ b/intern/libmv/libmv/multiview/euclidean_resection_test.cc
@@ -19,9 +19,9 @@
// IN THE SOFTWARE.
#include "libmv/multiview/euclidean_resection.h"
-#include "libmv/numeric/numeric.h"
#include "libmv/logging/logging.h"
#include "libmv/multiview/projection.h"
+#include "libmv/numeric/numeric.h"
#include "testing/testing.h"
using namespace libmv::euclidean_resection;
@@ -33,10 +33,10 @@ static void CreateCameraSystem(const Mat3& KK,
const Vec& X_distances,
const Mat3& R_input,
const Vec3& T_input,
- Mat2X *x_camera,
- Mat3X *X_world,
- Mat3 *R_expected,
- Vec3 *T_expected) {
+ Mat2X* x_camera,
+ Mat3X* X_world,
+ Mat3* R_expected,
+ Vec3* T_expected) {
int num_points = x_image.cols();
Mat3X x_unit_cam(3, num_points);
@@ -76,9 +76,9 @@ TEST(AbsoluteOrientation, QuaternionSolution) {
// Create a random translation and rotation.
Mat3 R_input;
- R_input = Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ())
- * Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitY())
- * Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ());
+ R_input = Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ()) *
+ Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitY()) *
+ Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ());
Vec3 t_input;
t_input.setRandom();
@@ -109,26 +109,29 @@ TEST(EuclideanResection, Points4KnownImagePointsRandomTranslationRotation) {
image_dimensions << 1600, 1200;
Mat3 KK;
+ // clang-format off
KK << 2796, 0, 804,
0 , 2796, 641,
0, 0, 1;
+ // clang-format on
// The real image points.
int num_points = 4;
Mat3X x_image(3, num_points);
+ // clang-format off
x_image << 1164.06, 734.948, 749.599, 430.727,
681.386, 844.59, 496.315, 580.775,
1, 1, 1, 1;
-
+ // clang-format on
// A vector of the 4 distances to the 3D points.
Vec X_distances = 100 * Vec::Random(num_points).array().abs();
// Create the random camera motion R and t that resection should recover.
Mat3 R_input;
- R_input = Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ())
- * Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitY())
- * Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ());
+ R_input = Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ()) *
+ Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitY()) *
+ Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ());
Vec3 T_input;
T_input.setRandom();
@@ -140,15 +143,21 @@ TEST(EuclideanResection, Points4KnownImagePointsRandomTranslationRotation) {
Vec3 T_expected;
Mat3X X_world;
Mat2X x_camera;
- CreateCameraSystem(KK, x_image, X_distances, R_input, T_input,
- &x_camera, &X_world, &R_expected, &T_expected);
+ CreateCameraSystem(KK,
+ x_image,
+ X_distances,
+ R_input,
+ T_input,
+ &x_camera,
+ &X_world,
+ &R_expected,
+ &T_expected);
// Finally, run the code under test.
Mat3 R_output;
Vec3 T_output;
- EuclideanResection(x_camera, X_world,
- &R_output, &T_output,
- RESECTION_ANSAR_DANIILIDIS);
+ EuclideanResection(
+ x_camera, X_world, &R_output, &T_output, RESECTION_ANSAR_DANIILIDIS);
EXPECT_MATRIX_NEAR(T_output, T_expected, 1e-5);
EXPECT_MATRIX_NEAR(R_output, R_expected, 1e-7);
@@ -173,9 +182,11 @@ TEST(EuclideanResection, Points4KnownImagePointsRandomTranslationRotation) {
// TODO(jmichot): Reduce the code duplication here with the code above.
TEST(EuclideanResection, Points6AllRandomInput) {
Mat3 KK;
+ // clang-format off
KK << 2796, 0, 804,
0 , 2796, 641,
0, 0, 1;
+ // clang-format on
// Create random image points for a 1600x1200 image.
int w = 1600;
@@ -192,9 +203,9 @@ TEST(EuclideanResection, Points6AllRandomInput) {
// Create the random camera motion R and t that resection should recover.
Mat3 R_input;
- R_input = Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ())
- * Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitY())
- * Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ());
+ R_input = Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ()) *
+ Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitY()) *
+ Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ());
Vec3 T_input;
T_input.setRandom();
@@ -204,33 +215,36 @@ TEST(EuclideanResection, Points6AllRandomInput) {
Mat3 R_expected;
Vec3 T_expected;
Mat3X X_world;
- CreateCameraSystem(KK, x_image, X_distances, R_input, T_input,
- &x_camera, &X_world, &R_expected, &T_expected);
+ CreateCameraSystem(KK,
+ x_image,
+ X_distances,
+ R_input,
+ T_input,
+ &x_camera,
+ &X_world,
+ &R_expected,
+ &T_expected);
// Test each of the resection methods.
{
Mat3 R_output;
Vec3 T_output;
- EuclideanResection(x_camera, X_world,
- &R_output, &T_output,
- RESECTION_ANSAR_DANIILIDIS);
+ EuclideanResection(
+ x_camera, X_world, &R_output, &T_output, RESECTION_ANSAR_DANIILIDIS);
EXPECT_MATRIX_NEAR(T_output, T_expected, 1e-5);
EXPECT_MATRIX_NEAR(R_output, R_expected, 1e-7);
}
{
Mat3 R_output;
Vec3 T_output;
- EuclideanResection(x_camera, X_world,
- &R_output, &T_output,
- RESECTION_EPNP);
+ EuclideanResection(x_camera, X_world, &R_output, &T_output, RESECTION_EPNP);
EXPECT_MATRIX_NEAR(T_output, T_expected, 1e-5);
EXPECT_MATRIX_NEAR(R_output, R_expected, 1e-7);
}
{
Mat3 R_output;
Vec3 T_output;
- EuclideanResection(x_image, X_world, KK,
- &R_output, &T_output);
+ EuclideanResection(x_image, X_world, KK, &R_output, &T_output);
EXPECT_MATRIX_NEAR(T_output, T_expected, 1e-5);
EXPECT_MATRIX_NEAR(R_output, R_expected, 1e-7);
}
diff --git a/intern/libmv/libmv/multiview/fundamental.cc b/intern/libmv/libmv/multiview/fundamental.cc
index ea8594c8cc0..c8c94ecd7bb 100644
--- a/intern/libmv/libmv/multiview/fundamental.cc
+++ b/intern/libmv/libmv/multiview/fundamental.cc
@@ -22,15 +22,15 @@
#include "ceres/ceres.h"
#include "libmv/logging/logging.h"
-#include "libmv/numeric/numeric.h"
-#include "libmv/numeric/poly.h"
#include "libmv/multiview/conditioning.h"
#include "libmv/multiview/projection.h"
#include "libmv/multiview/triangulation.h"
+#include "libmv/numeric/numeric.h"
+#include "libmv/numeric/poly.h"
namespace libmv {
-static void EliminateRow(const Mat34 &P, int row, Mat *X) {
+static void EliminateRow(const Mat34& P, int row, Mat* X) {
X->resize(2, 4);
int first_row = (row + 1) % 3;
@@ -42,7 +42,7 @@ static void EliminateRow(const Mat34 &P, int row, Mat *X) {
}
}
-void ProjectionsFromFundamental(const Mat3 &F, Mat34 *P1, Mat34 *P2) {
+void ProjectionsFromFundamental(const Mat3& F, Mat34* P1, Mat34* P2) {
*P1 << Mat3::Identity(), Vec3::Zero();
Vec3 e2;
Mat3 Ft = F.transpose();
@@ -51,7 +51,7 @@ void ProjectionsFromFundamental(const Mat3 &F, Mat34 *P1, Mat34 *P2) {
}
// Addapted from vgg_F_from_P.
-void FundamentalFromProjections(const Mat34 &P1, const Mat34 &P2, Mat3 *F) {
+void FundamentalFromProjections(const Mat34& P1, const Mat34& P2, Mat3* F) {
Mat X[3];
Mat Y[3];
Mat XY;
@@ -71,7 +71,7 @@ void FundamentalFromProjections(const Mat34 &P1, const Mat34 &P2, Mat3 *F) {
// HZ 11.1 pag.279 (x1 = x, x2 = x')
// http://www.cs.unc.edu/~marc/tutorial/node54.html
-static double EightPointSolver(const Mat &x1, const Mat &x2, Mat3 *F) {
+static double EightPointSolver(const Mat& x1, const Mat& x2, Mat3* F) {
DCHECK_EQ(x1.rows(), 2);
DCHECK_GE(x1.cols(), 8);
DCHECK_EQ(x1.rows(), x2.rows());
@@ -98,7 +98,7 @@ static double EightPointSolver(const Mat &x1, const Mat &x2, Mat3 *F) {
}
// HZ 11.1.1 pag.280
-void EnforceFundamentalRank2Constraint(Mat3 *F) {
+void EnforceFundamentalRank2Constraint(Mat3* F) {
Eigen::JacobiSVD<Mat3> USV(*F, Eigen::ComputeFullU | Eigen::ComputeFullV);
Vec3 d = USV.singularValues();
d(2) = 0.0;
@@ -106,9 +106,7 @@ void EnforceFundamentalRank2Constraint(Mat3 *F) {
}
// HZ 11.2 pag.281 (x1 = x, x2 = x')
-double NormalizedEightPointSolver(const Mat &x1,
- const Mat &x2,
- Mat3 *F) {
+double NormalizedEightPointSolver(const Mat& x1, const Mat& x2, Mat3* F) {
DCHECK_EQ(x1.rows(), 2);
DCHECK_GE(x1.cols(), 8);
DCHECK_EQ(x1.rows(), x2.rows());
@@ -135,9 +133,9 @@ double NormalizedEightPointSolver(const Mat &x1,
// Seven-point algorithm.
// http://www.cs.unc.edu/~marc/tutorial/node55.html
-double FundamentalFrom7CorrespondencesLinear(const Mat &x1,
- const Mat &x2,
- std::vector<Mat3> *F) {
+double FundamentalFrom7CorrespondencesLinear(const Mat& x1,
+ const Mat& x2,
+ std::vector<Mat3>* F) {
DCHECK_EQ(x1.rows(), 2);
DCHECK_EQ(x1.cols(), 7);
DCHECK_EQ(x1.rows(), x2.rows());
@@ -169,25 +167,29 @@ double FundamentalFrom7CorrespondencesLinear(const Mat &x1,
// Then, use the condition det(F) = 0 to determine F. In other words, solve
// det(F1 + a*F2) = 0 for a.
- double a = F1(0, 0), j = F2(0, 0),
- b = F1(0, 1), k = F2(0, 1),
- c = F1(0, 2), l = F2(0, 2),
- d = F1(1, 0), m = F2(1, 0),
- e = F1(1, 1), n = F2(1, 1),
- f = F1(1, 2), o = F2(1, 2),
- g = F1(2, 0), p = F2(2, 0),
- h = F1(2, 1), q = F2(2, 1),
- i = F1(2, 2), r = F2(2, 2);
+ double a = F1(0, 0), j = F2(0, 0);
+ double b = F1(0, 1), k = F2(0, 1);
+ double c = F1(0, 2), l = F2(0, 2);
+ double d = F1(1, 0), m = F2(1, 0);
+ double e = F1(1, 1), n = F2(1, 1);
+ double f = F1(1, 2), o = F2(1, 2);
+ double g = F1(2, 0), p = F2(2, 0);
+ double h = F1(2, 1), q = F2(2, 1);
+ double i = F1(2, 2), r = F2(2, 2);
// Run fundamental_7point_coeffs.py to get the below coefficients.
// The coefficients are in ascending powers of alpha, i.e. P[N]*x^N.
double P[4] = {
- a*e*i + b*f*g + c*d*h - a*f*h - b*d*i - c*e*g,
- a*e*r + a*i*n + b*f*p + b*g*o + c*d*q + c*h*m + d*h*l + e*i*j + f*g*k -
- a*f*q - a*h*o - b*d*r - b*i*m - c*e*p - c*g*n - d*i*k - e*g*l - f*h*j,
- a*n*r + b*o*p + c*m*q + d*l*q + e*j*r + f*k*p + g*k*o + h*l*m + i*j*n -
- a*o*q - b*m*r - c*n*p - d*k*r - e*l*p - f*j*q - g*l*n - h*j*o - i*k*m,
- j*n*r + k*o*p + l*m*q - j*o*q - k*m*r - l*n*p,
+ a * e * i + b * f * g + c * d * h - a * f * h - b * d * i - c * e * g,
+ a * e * r + a * i * n + b * f * p + b * g * o + c * d * q + c * h * m +
+ d * h * l + e * i * j + f * g * k - a * f * q - a * h * o -
+ b * d * r - b * i * m - c * e * p - c * g * n - d * i * k -
+ e * g * l - f * h * j,
+ a * n * r + b * o * p + c * m * q + d * l * q + e * j * r + f * k * p +
+ g * k * o + h * l * m + i * j * n - a * o * q - b * m * r -
+ c * n * p - d * k * r - e * l * p - f * j * q - g * l * n -
+ h * j * o - i * k * m,
+ j * n * r + k * o * p + l * m * q - j * o * q - k * m * r - l * n * p,
};
// Solve for the roots of P[3]*x^3 + P[2]*x^2 + P[1]*x + P[0] = 0.
@@ -195,15 +197,15 @@ double FundamentalFrom7CorrespondencesLinear(const Mat &x1,
int num_roots = SolveCubicPolynomial(P, roots);
// Build the fundamental matrix for each solution.
- for (int kk = 0; kk < num_roots; ++kk) {
+ for (int kk = 0; kk < num_roots; ++kk) {
F->push_back(F1 + roots[kk] * F2);
}
return s;
}
-double FundamentalFromCorrespondences7Point(const Mat &x1,
- const Mat &x2,
- std::vector<Mat3> *F) {
+double FundamentalFromCorrespondences7Point(const Mat& x1,
+ const Mat& x2,
+ std::vector<Mat3>* F) {
DCHECK_EQ(x1.rows(), 2);
DCHECK_GE(x1.cols(), 7);
DCHECK_EQ(x1.rows(), x2.rows());
@@ -218,25 +220,25 @@ double FundamentalFromCorrespondences7Point(const Mat &x1,
ApplyTransformationToPoints(x2, T2, &x2_normalized);
// Estimate the fundamental matrix.
- double smaller_singular_value =
- FundamentalFrom7CorrespondencesLinear(x1_normalized, x2_normalized, &(*F));
+ double smaller_singular_value = FundamentalFrom7CorrespondencesLinear(
+ x1_normalized, x2_normalized, &(*F));
for (int k = 0; k < F->size(); ++k) {
- Mat3 & Fmat = (*F)[k];
+ Mat3& Fmat = (*F)[k];
// Denormalize the fundamental matrix.
Fmat = T2.transpose() * Fmat * T1;
}
return smaller_singular_value;
}
-void NormalizeFundamental(const Mat3 &F, Mat3 *F_normalized) {
+void NormalizeFundamental(const Mat3& F, Mat3* F_normalized) {
*F_normalized = F / FrobeniusNorm(F);
if ((*F_normalized)(2, 2) < 0) {
*F_normalized *= -1;
}
}
-double SampsonDistance(const Mat &F, const Vec2 &x1, const Vec2 &x2) {
+double SampsonDistance(const Mat& F, const Vec2& x1, const Vec2& x2) {
Vec3 x(x1(0), x1(1), 1.0);
Vec3 y(x2(0), x2(1), 1.0);
@@ -244,11 +246,11 @@ double SampsonDistance(const Mat &F, const Vec2 &x1, const Vec2 &x2) {
Vec3 Ft_y = F.transpose() * y;
double y_F_x = y.dot(F_x);
- return Square(y_F_x) / ( F_x.head<2>().squaredNorm()
- + Ft_y.head<2>().squaredNorm());
+ return Square(y_F_x) /
+ (F_x.head<2>().squaredNorm() + Ft_y.head<2>().squaredNorm());
}
-double SymmetricEpipolarDistance(const Mat &F, const Vec2 &x1, const Vec2 &x2) {
+double SymmetricEpipolarDistance(const Mat& F, const Vec2& x1, const Vec2& x2) {
Vec3 x(x1(0), x1(1), 1.0);
Vec3 y(x2(0), x2(1), 1.0);
@@ -256,43 +258,40 @@ double SymmetricEpipolarDistance(const Mat &F, const Vec2 &x1, const Vec2 &x2) {
Vec3 Ft_y = F.transpose() * y;
double y_F_x = y.dot(F_x);
- return Square(y_F_x) * ( 1 / F_x.head<2>().squaredNorm()
- + 1 / Ft_y.head<2>().squaredNorm());
+ return Square(y_F_x) *
+ (1 / F_x.head<2>().squaredNorm() + 1 / Ft_y.head<2>().squaredNorm());
}
// HZ 9.6 pag 257 (formula 9.12)
-void EssentialFromFundamental(const Mat3 &F,
- const Mat3 &K1,
- const Mat3 &K2,
- Mat3 *E) {
+void EssentialFromFundamental(const Mat3& F,
+ const Mat3& K1,
+ const Mat3& K2,
+ Mat3* E) {
*E = K2.transpose() * F * K1;
}
// HZ 9.6 pag 257 (formula 9.12)
// Or http://ai.stanford.edu/~birch/projective/node20.html
-void FundamentalFromEssential(const Mat3 &E,
- const Mat3 &K1,
- const Mat3 &K2,
- Mat3 *F) {
+void FundamentalFromEssential(const Mat3& E,
+ const Mat3& K1,
+ const Mat3& K2,
+ Mat3* F) {
*F = K2.inverse().transpose() * E * K1.inverse();
}
-void RelativeCameraMotion(const Mat3 &R1,
- const Vec3 &t1,
- const Mat3 &R2,
- const Vec3 &t2,
- Mat3 *R,
- Vec3 *t) {
+void RelativeCameraMotion(const Mat3& R1,
+ const Vec3& t1,
+ const Mat3& R2,
+ const Vec3& t2,
+ Mat3* R,
+ Vec3* t) {
*R = R2 * R1.transpose();
*t = t2 - (*R) * t1;
}
// HZ 9.6 pag 257
-void EssentialFromRt(const Mat3 &R1,
- const Vec3 &t1,
- const Mat3 &R2,
- const Vec3 &t2,
- Mat3 *E) {
+void EssentialFromRt(
+ const Mat3& R1, const Vec3& t1, const Mat3& R2, const Vec3& t2, Mat3* E) {
Mat3 R;
Vec3 t;
RelativeCameraMotion(R1, t1, R2, t2, &R, &t);
@@ -301,11 +300,11 @@ void EssentialFromRt(const Mat3 &R1,
}
// HZ 9.6 pag 259 (Result 9.19)
-void MotionFromEssential(const Mat3 &E,
- std::vector<Mat3> *Rs,
- std::vector<Vec3> *ts) {
+void MotionFromEssential(const Mat3& E,
+ std::vector<Mat3>* Rs,
+ std::vector<Vec3>* ts) {
Eigen::JacobiSVD<Mat3> USV(E, Eigen::ComputeFullU | Eigen::ComputeFullV);
- Mat3 U = USV.matrixU();
+ Mat3 U = USV.matrixU();
Mat3 Vt = USV.matrixV().transpose();
// Last column of U is undetermined since d = (a a 0).
@@ -318,9 +317,11 @@ void MotionFromEssential(const Mat3 &E,
}
Mat3 W;
+ // clang-format off
W << 0, -1, 0,
1, 0, 0,
0, 0, 1;
+ // clang-format on
Mat3 U_W_Vt = U * W * Vt;
Mat3 U_Wt_Vt = U * W.transpose() * Vt;
@@ -332,18 +333,18 @@ void MotionFromEssential(const Mat3 &E,
(*Rs)[3] = U_Wt_Vt;
ts->resize(4);
- (*ts)[0] = U.col(2);
+ (*ts)[0] = U.col(2);
(*ts)[1] = -U.col(2);
- (*ts)[2] = U.col(2);
+ (*ts)[2] = U.col(2);
(*ts)[3] = -U.col(2);
}
-int MotionFromEssentialChooseSolution(const std::vector<Mat3> &Rs,
- const std::vector<Vec3> &ts,
- const Mat3 &K1,
- const Vec2 &x1,
- const Mat3 &K2,
- const Vec2 &x2) {
+int MotionFromEssentialChooseSolution(const std::vector<Mat3>& Rs,
+ const std::vector<Vec3>& ts,
+ const Mat3& K1,
+ const Vec2& x1,
+ const Mat3& K2,
+ const Vec2& x2) {
DCHECK_EQ(4, Rs.size());
DCHECK_EQ(4, ts.size());
@@ -354,8 +355,8 @@ int MotionFromEssentialChooseSolution(const std::vector<Mat3> &Rs,
t1.setZero();
P_From_KRt(K1, R1, t1, &P1);
for (int i = 0; i < 4; ++i) {
- const Mat3 &R2 = Rs[i];
- const Vec3 &t2 = ts[i];
+ const Mat3& R2 = Rs[i];
+ const Vec3& t2 = ts[i];
P_From_KRt(K2, R2, t2, &P2);
Vec3 X;
TriangulateDLT(P1, x1, P2, x2, &X);
@@ -369,13 +370,13 @@ int MotionFromEssentialChooseSolution(const std::vector<Mat3> &Rs,
return -1;
}
-bool MotionFromEssentialAndCorrespondence(const Mat3 &E,
- const Mat3 &K1,
- const Vec2 &x1,
- const Mat3 &K2,
- const Vec2 &x2,
- Mat3 *R,
- Vec3 *t) {
+bool MotionFromEssentialAndCorrespondence(const Mat3& E,
+ const Mat3& K1,
+ const Vec2& x1,
+ const Mat3& K2,
+ const Vec2& x2,
+ Mat3* R,
+ Vec3* t) {
std::vector<Mat3> Rs;
std::vector<Vec3> ts;
MotionFromEssential(E, &Rs, &ts);
@@ -389,7 +390,7 @@ bool MotionFromEssentialAndCorrespondence(const Mat3 &E,
}
}
-void FundamentalToEssential(const Mat3 &F, Mat3 *E) {
+void FundamentalToEssential(const Mat3& F, Mat3* E) {
Eigen::JacobiSVD<Mat3> svd(F, Eigen::ComputeFullU | Eigen::ComputeFullV);
// See Hartley & Zisserman page 294, result 11.1, which shows how to get the
@@ -399,8 +400,8 @@ void FundamentalToEssential(const Mat3 &F, Mat3 *E) {
double s = (a + b) / 2.0;
LG << "Initial reconstruction's rotation is non-euclidean by "
- << (((a - b) / std::max(a, b)) * 100) << "%; singular values:"
- << svd.singularValues().transpose();
+ << (((a - b) / std::max(a, b)) * 100)
+ << "%; singular values:" << svd.singularValues().transpose();
Vec3 diag;
diag << s, s, 0;
@@ -410,9 +411,8 @@ void FundamentalToEssential(const Mat3 &F, Mat3 *E) {
// Default settings for fundamental estimation which should be suitable
// for a wide range of use cases.
-EstimateFundamentalOptions::EstimateFundamentalOptions(void) :
- max_num_iterations(50),
- expected_average_symmetric_distance(1e-16) {
+EstimateFundamentalOptions::EstimateFundamentalOptions(void)
+ : max_num_iterations(50), expected_average_symmetric_distance(1e-16) {
}
namespace {
@@ -420,12 +420,11 @@ namespace {
// used for fundamental matrix refinement.
class FundamentalSymmetricEpipolarCostFunctor {
public:
- FundamentalSymmetricEpipolarCostFunctor(const Vec2 &x,
- const Vec2 &y)
- : x_(x), y_(y) {}
+ FundamentalSymmetricEpipolarCostFunctor(const Vec2& x, const Vec2& y)
+ : x_(x), y_(y) {}
- template<typename T>
- bool operator()(const T *fundamental_parameters, T *residuals) const {
+ template <typename T>
+ bool operator()(const T* fundamental_parameters, T* residuals) const {
typedef Eigen::Matrix<T, 3, 3> Mat3;
typedef Eigen::Matrix<T, 3, 1> Vec3;
@@ -454,9 +453,10 @@ class FundamentalSymmetricEpipolarCostFunctor {
// average value.
class TerminationCheckingCallback : public ceres::IterationCallback {
public:
- TerminationCheckingCallback(const Mat &x1, const Mat &x2,
- const EstimateFundamentalOptions &options,
- Mat3 *F)
+ TerminationCheckingCallback(const Mat& x1,
+ const Mat& x2,
+ const EstimateFundamentalOptions& options,
+ Mat3* F)
: options_(options), x1_(x1), x2_(x2), F_(F) {}
virtual ceres::CallbackReturnType operator()(
@@ -469,9 +469,7 @@ class TerminationCheckingCallback : public ceres::IterationCallback {
// Calculate average of symmetric epipolar distance.
double average_distance = 0.0;
for (int i = 0; i < x1_.cols(); i++) {
- average_distance = SymmetricEpipolarDistance(*F_,
- x1_.col(i),
- x2_.col(i));
+ average_distance = SymmetricEpipolarDistance(*F_, x1_.col(i), x2_.col(i));
}
average_distance /= x1_.cols();
@@ -483,19 +481,19 @@ class TerminationCheckingCallback : public ceres::IterationCallback {
}
private:
- const EstimateFundamentalOptions &options_;
- const Mat &x1_;
- const Mat &x2_;
- Mat3 *F_;
+ const EstimateFundamentalOptions& options_;
+ const Mat& x1_;
+ const Mat& x2_;
+ Mat3* F_;
};
} // namespace
/* Fundamental transformation estimation. */
bool EstimateFundamentalFromCorrespondences(
- const Mat &x1,
- const Mat &x2,
- const EstimateFundamentalOptions &options,
- Mat3 *F) {
+ const Mat& x1,
+ const Mat& x2,
+ const EstimateFundamentalOptions& options,
+ Mat3* F) {
// Step 1: Algebraic fundamental estimation.
// Assume algebraic estiation always succeeds,
@@ -506,16 +504,15 @@ bool EstimateFundamentalFromCorrespondences(
// Step 2: Refine matrix using Ceres minimizer.
ceres::Problem problem;
for (int i = 0; i < x1.cols(); i++) {
- FundamentalSymmetricEpipolarCostFunctor
- *fundamental_symmetric_epipolar_cost_function =
- new FundamentalSymmetricEpipolarCostFunctor(x1.col(i),
- x2.col(i));
+ FundamentalSymmetricEpipolarCostFunctor*
+ fundamental_symmetric_epipolar_cost_function =
+ new FundamentalSymmetricEpipolarCostFunctor(x1.col(i), x2.col(i));
problem.AddResidualBlock(
- new ceres::AutoDiffCostFunction<
- FundamentalSymmetricEpipolarCostFunctor,
- 2, // num_residuals
- 9>(fundamental_symmetric_epipolar_cost_function),
+ new ceres::AutoDiffCostFunction<FundamentalSymmetricEpipolarCostFunctor,
+ 2, // num_residuals
+ 9>(
+ fundamental_symmetric_epipolar_cost_function),
NULL,
F->data());
}
diff --git a/intern/libmv/libmv/multiview/fundamental.h b/intern/libmv/libmv/multiview/fundamental.h
index a6c7a6802fe..6d25691c4a3 100644
--- a/intern/libmv/libmv/multiview/fundamental.h
+++ b/intern/libmv/libmv/multiview/fundamental.h
@@ -27,36 +27,34 @@
namespace libmv {
-void ProjectionsFromFundamental(const Mat3 &F, Mat34 *P1, Mat34 *P2);
-void FundamentalFromProjections(const Mat34 &P1, const Mat34 &P2, Mat3 *F);
+void ProjectionsFromFundamental(const Mat3& F, Mat34* P1, Mat34* P2);
+void FundamentalFromProjections(const Mat34& P1, const Mat34& P2, Mat3* F);
/**
* 7 points (minimal case, points coordinates must be normalized before):
*/
-double FundamentalFrom7CorrespondencesLinear(const Mat &x1,
- const Mat &x2,
- std::vector<Mat3> *F);
+double FundamentalFrom7CorrespondencesLinear(const Mat& x1,
+ const Mat& x2,
+ std::vector<Mat3>* F);
/**
* 7 points (points coordinates must be in image space):
*/
-double FundamentalFromCorrespondences7Point(const Mat &x1,
- const Mat &x2,
- std::vector<Mat3> *F);
+double FundamentalFromCorrespondences7Point(const Mat& x1,
+ const Mat& x2,
+ std::vector<Mat3>* F);
/**
* 8 points (points coordinates must be in image space):
*/
-double NormalizedEightPointSolver(const Mat &x1,
- const Mat &x2,
- Mat3 *F);
+double NormalizedEightPointSolver(const Mat& x1, const Mat& x2, Mat3* F);
/**
* Fundamental matrix utility function:
*/
-void EnforceFundamentalRank2Constraint(Mat3 *F);
+void EnforceFundamentalRank2Constraint(Mat3* F);
-void NormalizeFundamental(const Mat3 &F, Mat3 *F_normalized);
+void NormalizeFundamental(const Mat3& F, Mat3* F_normalized);
/**
* Approximate squared reprojection errror.
@@ -64,14 +62,14 @@ void NormalizeFundamental(const Mat3 &F, Mat3 *F_normalized);
* See page 287 of HZ equation 11.9. This avoids triangulating the point,
* relying only on the entries in F.
*/
-double SampsonDistance(const Mat &F, const Vec2 &x1, const Vec2 &x2);
+double SampsonDistance(const Mat& F, const Vec2& x1, const Vec2& x2);
/**
* Calculates the sum of the distances from the points to the epipolar lines.
*
* See page 288 of HZ equation 11.10.
*/
-double SymmetricEpipolarDistance(const Mat &F, const Vec2 &x1, const Vec2 &x2);
+double SymmetricEpipolarDistance(const Mat& F, const Vec2& x1, const Vec2& x2);
/**
* Compute the relative camera motion between two cameras.
@@ -81,32 +79,29 @@ double SymmetricEpipolarDistance(const Mat &F, const Vec2 &x1, const Vec2 &x2);
* If T1 and T2 are the camera motions, the computed relative motion is
* T = T2 T1^{-1}
*/
-void RelativeCameraMotion(const Mat3 &R1,
- const Vec3 &t1,
- const Mat3 &R2,
- const Vec3 &t2,
- Mat3 *R,
- Vec3 *t);
-
-void EssentialFromFundamental(const Mat3 &F,
- const Mat3 &K1,
- const Mat3 &K2,
- Mat3 *E);
-
-void FundamentalFromEssential(const Mat3 &E,
- const Mat3 &K1,
- const Mat3 &K2,
- Mat3 *F);
-
-void EssentialFromRt(const Mat3 &R1,
- const Vec3 &t1,
- const Mat3 &R2,
- const Vec3 &t2,
- Mat3 *E);
-
-void MotionFromEssential(const Mat3 &E,
- std::vector<Mat3> *Rs,
- std::vector<Vec3> *ts);
+void RelativeCameraMotion(const Mat3& R1,
+ const Vec3& t1,
+ const Mat3& R2,
+ const Vec3& t2,
+ Mat3* R,
+ Vec3* t);
+
+void EssentialFromFundamental(const Mat3& F,
+ const Mat3& K1,
+ const Mat3& K2,
+ Mat3* E);
+
+void FundamentalFromEssential(const Mat3& E,
+ const Mat3& K1,
+ const Mat3& K2,
+ Mat3* F);
+
+void EssentialFromRt(
+ const Mat3& R1, const Vec3& t1, const Mat3& R2, const Vec3& t2, Mat3* E);
+
+void MotionFromEssential(const Mat3& E,
+ std::vector<Mat3>* Rs,
+ std::vector<Vec3>* ts);
/**
* Choose one of the four possible motion solutions from an essential matrix.
@@ -117,25 +112,25 @@ void MotionFromEssential(const Mat3 &E,
*
* \return index of the right solution or -1 if no solution.
*/
-int MotionFromEssentialChooseSolution(const std::vector<Mat3> &Rs,
- const std::vector<Vec3> &ts,
- const Mat3 &K1,
- const Vec2 &x1,
- const Mat3 &K2,
- const Vec2 &x2);
-
-bool MotionFromEssentialAndCorrespondence(const Mat3 &E,
- const Mat3 &K1,
- const Vec2 &x1,
- const Mat3 &K2,
- const Vec2 &x2,
- Mat3 *R,
- Vec3 *t);
+int MotionFromEssentialChooseSolution(const std::vector<Mat3>& Rs,
+ const std::vector<Vec3>& ts,
+ const Mat3& K1,
+ const Vec2& x1,
+ const Mat3& K2,
+ const Vec2& x2);
+
+bool MotionFromEssentialAndCorrespondence(const Mat3& E,
+ const Mat3& K1,
+ const Vec2& x1,
+ const Mat3& K2,
+ const Vec2& x2,
+ Mat3* R,
+ Vec3* t);
/**
* Find closest essential matrix E to fundamental F
*/
-void FundamentalToEssential(const Mat3 &F, Mat3 *E);
+void FundamentalToEssential(const Mat3& F, Mat3* E);
/**
* This structure contains options that controls how the fundamental
@@ -170,10 +165,10 @@ struct EstimateFundamentalOptions {
* refinement.
*/
bool EstimateFundamentalFromCorrespondences(
- const Mat &x1,
- const Mat &x2,
- const EstimateFundamentalOptions &options,
- Mat3 *F);
+ const Mat& x1,
+ const Mat& x2,
+ const EstimateFundamentalOptions& options,
+ Mat3* F);
} // namespace libmv
diff --git a/intern/libmv/libmv/multiview/fundamental_test.cc b/intern/libmv/libmv/multiview/fundamental_test.cc
index da0eb449b8f..0ec91ca8d19 100644
--- a/intern/libmv/libmv/multiview/fundamental_test.cc
+++ b/intern/libmv/libmv/multiview/fundamental_test.cc
@@ -34,12 +34,14 @@ using namespace libmv;
TEST(Fundamental, FundamentalFromProjections) {
Mat34 P1_gt, P2_gt;
+ // clang-format off
P1_gt << 1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0;
P2_gt << 1, 1, 1, 3,
0, 2, 0, 3,
0, 1, 1, 0;
+ // clang-format on
Mat3 F_gt;
FundamentalFromProjections(P1_gt, P2_gt, &F_gt);
@@ -55,8 +57,10 @@ TEST(Fundamental, FundamentalFromProjections) {
TEST(Fundamental, PreconditionerFromPoints) {
int n = 4;
Mat points(2, n);
+ // clang-format off
points << 0, 0, 1, 1,
0, 2, 1, 3;
+ // clang-format on
Mat3 T;
PreconditionerFromPoints(points, &T);
@@ -152,8 +156,8 @@ TEST(Fundamental, MotionFromEssentialAndCorrespondence) {
Mat3 R_estimated;
Vec3 t_estimated;
- MotionFromEssentialAndCorrespondence(E, d.K1, x1, d.K2, x2,
- &R_estimated, &t_estimated);
+ MotionFromEssentialAndCorrespondence(
+ E, d.K1, x1, d.K2, x2, &R_estimated, &t_estimated);
EXPECT_LE(FrobeniusDistance(R_estimated, R), 1e-8);
EXPECT_LE(DistanceL2(t_estimated, t), 1e-8);
diff --git a/intern/libmv/libmv/multiview/homography.cc b/intern/libmv/libmv/multiview/homography.cc
index 69177743f94..2db2c0cd3d5 100644
--- a/intern/libmv/libmv/multiview/homography.cc
+++ b/intern/libmv/libmv/multiview/homography.cc
@@ -26,7 +26,7 @@
#include "libmv/multiview/homography_parameterization.h"
namespace libmv {
-/** 2D Homography transformation estimation in the case that points are in
+/** 2D Homography transformation estimation in the case that points are in
* euclidean coordinates.
*
* x = H y
@@ -44,10 +44,7 @@ namespace libmv {
* (-x2*a+x1*d)*y1 + (-x2*b+x1*e)*y2 + -x2*c+x1*f |0|
*/
static bool Homography2DFromCorrespondencesLinearEuc(
- const Mat &x1,
- const Mat &x2,
- Mat3 *H,
- double expected_precision) {
+ const Mat& x1, const Mat& x2, Mat3* H, double expected_precision) {
assert(2 == x1.rows());
assert(4 <= x1.cols());
assert(x1.rows() == x2.rows());
@@ -58,27 +55,27 @@ static bool Homography2DFromCorrespondencesLinearEuc(
Mat b = Mat::Zero(n * 3, 1);
for (int i = 0; i < n; ++i) {
int j = 3 * i;
- L(j, 0) = x1(0, i); // a
- L(j, 1) = x1(1, i); // b
- L(j, 2) = 1.0; // c
+ L(j, 0) = x1(0, i); // a
+ L(j, 1) = x1(1, i); // b
+ L(j, 2) = 1.0; // c
L(j, 6) = -x2(0, i) * x1(0, i); // g
L(j, 7) = -x2(0, i) * x1(1, i); // h
- b(j, 0) = x2(0, i); // i
+ b(j, 0) = x2(0, i); // i
++j;
- L(j, 3) = x1(0, i); // d
- L(j, 4) = x1(1, i); // e
- L(j, 5) = 1.0; // f
+ L(j, 3) = x1(0, i); // d
+ L(j, 4) = x1(1, i); // e
+ L(j, 5) = 1.0; // f
L(j, 6) = -x2(1, i) * x1(0, i); // g
L(j, 7) = -x2(1, i) * x1(1, i); // h
- b(j, 0) = x2(1, i); // i
+ b(j, 0) = x2(1, i); // i
// This ensures better stability
// TODO(julien) make a lite version without this 3rd set
++j;
- L(j, 0) = x2(1, i) * x1(0, i); // a
- L(j, 1) = x2(1, i) * x1(1, i); // b
- L(j, 2) = x2(1, i); // c
+ L(j, 0) = x2(1, i) * x1(0, i); // a
+ L(j, 1) = x2(1, i) * x1(1, i); // b
+ L(j, 2) = x2(1, i); // c
L(j, 3) = -x2(0, i) * x1(0, i); // d
L(j, 4) = -x2(0, i) * x1(1, i); // e
L(j, 5) = -x2(0, i); // f
@@ -86,14 +83,15 @@ static bool Homography2DFromCorrespondencesLinearEuc(
// Solve Lx=B
Vec h = L.fullPivLu().solve(b);
Homography2DNormalizedParameterization<double>::To(h, H);
- if ((L * h).isApprox(b, expected_precision)) {
+ if ((L * h).isApprox(b, expected_precision)) {
return true;
} else {
return false;
}
}
-/** 2D Homography transformation estimation in the case that points are in
+// clang-format off
+/** 2D Homography transformation estimation in the case that points are in
* homogeneous coordinates.
*
* | 0 -x3 x2| |a b c| |y1| -x3*d+x2*g -x3*e+x2*h -x3*f+x2*1 |y1| (-x3*d+x2*g)*y1 (-x3*e+x2*h)*y2 (-x3*f+x2*1)*y3 |0|
@@ -101,13 +99,14 @@ static bool Homography2DFromCorrespondencesLinearEuc(
* |-x2 x1 0| |g h 1| |y3| -x2*a+x1*d -x2*b+x1*e -x2*c+x1*f |y3| (-x2*a+x1*d)*y1 (-x2*b+x1*e)*y2 (-x2*c+x1*f)*y3 |0|
* X = |a b c d e f g h|^t
*/
-bool Homography2DFromCorrespondencesLinear(const Mat &x1,
- const Mat &x2,
- Mat3 *H,
+// clang-format on
+bool Homography2DFromCorrespondencesLinear(const Mat& x1,
+ const Mat& x2,
+ Mat3* H,
double expected_precision) {
if (x1.rows() == 2) {
- return Homography2DFromCorrespondencesLinearEuc(x1, x2, H,
- expected_precision);
+ return Homography2DFromCorrespondencesLinearEuc(
+ x1, x2, H, expected_precision);
}
assert(3 == x1.rows());
assert(4 <= x1.cols());
@@ -122,33 +121,33 @@ bool Homography2DFromCorrespondencesLinear(const Mat &x1,
Mat b = Mat::Zero(n * 3, 1);
for (int i = 0; i < n; ++i) {
int j = 3 * i;
- L(j, 0) = x2(w, i) * x1(x, i); // a
- L(j, 1) = x2(w, i) * x1(y, i); // b
- L(j, 2) = x2(w, i) * x1(w, i); // c
+ L(j, 0) = x2(w, i) * x1(x, i); // a
+ L(j, 1) = x2(w, i) * x1(y, i); // b
+ L(j, 2) = x2(w, i) * x1(w, i); // c
L(j, 6) = -x2(x, i) * x1(x, i); // g
L(j, 7) = -x2(x, i) * x1(y, i); // h
- b(j, 0) = x2(x, i) * x1(w, i);
+ b(j, 0) = x2(x, i) * x1(w, i);
++j;
- L(j, 3) = x2(w, i) * x1(x, i); // d
- L(j, 4) = x2(w, i) * x1(y, i); // e
- L(j, 5) = x2(w, i) * x1(w, i); // f
+ L(j, 3) = x2(w, i) * x1(x, i); // d
+ L(j, 4) = x2(w, i) * x1(y, i); // e
+ L(j, 5) = x2(w, i) * x1(w, i); // f
L(j, 6) = -x2(y, i) * x1(x, i); // g
L(j, 7) = -x2(y, i) * x1(y, i); // h
- b(j, 0) = x2(y, i) * x1(w, i);
+ b(j, 0) = x2(y, i) * x1(w, i);
// This ensures better stability
++j;
- L(j, 0) = x2(y, i) * x1(x, i); // a
- L(j, 1) = x2(y, i) * x1(y, i); // b
- L(j, 2) = x2(y, i) * x1(w, i); // c
+ L(j, 0) = x2(y, i) * x1(x, i); // a
+ L(j, 1) = x2(y, i) * x1(y, i); // b
+ L(j, 2) = x2(y, i) * x1(w, i); // c
L(j, 3) = -x2(x, i) * x1(x, i); // d
L(j, 4) = -x2(x, i) * x1(y, i); // e
L(j, 5) = -x2(x, i) * x1(w, i); // f
}
// Solve Lx=B
Vec h = L.fullPivLu().solve(b);
- if ((L * h).isApprox(b, expected_precision)) {
+ if ((L * h).isApprox(b, expected_precision)) {
Homography2DNormalizedParameterization<double>::To(h, H);
return true;
} else {
@@ -158,32 +157,30 @@ bool Homography2DFromCorrespondencesLinear(const Mat &x1,
// Default settings for homography estimation which should be suitable
// for a wide range of use cases.
-EstimateHomographyOptions::EstimateHomographyOptions(void) :
- use_normalization(true),
- max_num_iterations(50),
- expected_average_symmetric_distance(1e-16) {
+EstimateHomographyOptions::EstimateHomographyOptions(void)
+ : use_normalization(true),
+ max_num_iterations(50),
+ expected_average_symmetric_distance(1e-16) {
}
namespace {
-void GetNormalizedPoints(const Mat &original_points,
- Mat *normalized_points,
- Mat3 *normalization_matrix) {
+void GetNormalizedPoints(const Mat& original_points,
+ Mat* normalized_points,
+ Mat3* normalization_matrix) {
IsotropicPreconditionerFromPoints(original_points, normalization_matrix);
- ApplyTransformationToPoints(original_points,
- *normalization_matrix,
- normalized_points);
+ ApplyTransformationToPoints(
+ original_points, *normalization_matrix, normalized_points);
}
// Cost functor which computes symmetric geometric distance
// used for homography matrix refinement.
class HomographySymmetricGeometricCostFunctor {
public:
- HomographySymmetricGeometricCostFunctor(const Vec2 &x,
- const Vec2 &y)
- : x_(x), y_(y) { }
+ HomographySymmetricGeometricCostFunctor(const Vec2& x, const Vec2& y)
+ : x_(x), y_(y) {}
- template<typename T>
- bool operator()(const T *homography_parameters, T *residuals) const {
+ template <typename T>
+ bool operator()(const T* homography_parameters, T* residuals) const {
typedef Eigen::Matrix<T, 3, 3> Mat3;
typedef Eigen::Matrix<T, 3, 1> Vec3;
@@ -221,9 +218,10 @@ class HomographySymmetricGeometricCostFunctor {
// average value.
class TerminationCheckingCallback : public ceres::IterationCallback {
public:
- TerminationCheckingCallback(const Mat &x1, const Mat &x2,
- const EstimateHomographyOptions &options,
- Mat3 *H)
+ TerminationCheckingCallback(const Mat& x1,
+ const Mat& x2,
+ const EstimateHomographyOptions& options,
+ Mat3* H)
: options_(options), x1_(x1), x2_(x2), H_(H) {}
virtual ceres::CallbackReturnType operator()(
@@ -236,9 +234,8 @@ class TerminationCheckingCallback : public ceres::IterationCallback {
// Calculate average of symmetric geometric distance.
double average_distance = 0.0;
for (int i = 0; i < x1_.cols(); i++) {
- average_distance = SymmetricGeometricDistance(*H_,
- x1_.col(i),
- x2_.col(i));
+ average_distance =
+ SymmetricGeometricDistance(*H_, x1_.col(i), x2_.col(i));
}
average_distance /= x1_.cols();
@@ -250,10 +247,10 @@ class TerminationCheckingCallback : public ceres::IterationCallback {
}
private:
- const EstimateHomographyOptions &options_;
- const Mat &x1_;
- const Mat &x2_;
- Mat3 *H_;
+ const EstimateHomographyOptions& options_;
+ const Mat& x1_;
+ const Mat& x2_;
+ Mat3* H_;
};
} // namespace
@@ -261,10 +258,10 @@ class TerminationCheckingCallback : public ceres::IterationCallback {
* euclidean coordinates.
*/
bool EstimateHomography2DFromCorrespondences(
- const Mat &x1,
- const Mat &x2,
- const EstimateHomographyOptions &options,
- Mat3 *H) {
+ const Mat& x1,
+ const Mat& x2,
+ const EstimateHomographyOptions& options,
+ Mat3* H) {
// TODO(sergey): Support homogenous coordinates, not just euclidean.
assert(2 == x1.rows());
@@ -272,8 +269,7 @@ bool EstimateHomography2DFromCorrespondences(
assert(x1.rows() == x2.rows());
assert(x1.cols() == x2.cols());
- Mat3 T1 = Mat3::Identity(),
- T2 = Mat3::Identity();
+ Mat3 T1 = Mat3::Identity(), T2 = Mat3::Identity();
// Step 1: Algebraic homography estimation.
Mat x1_normalized, x2_normalized;
@@ -300,16 +296,15 @@ bool EstimateHomography2DFromCorrespondences(
// Step 2: Refine matrix using Ceres minimizer.
ceres::Problem problem;
for (int i = 0; i < x1.cols(); i++) {
- HomographySymmetricGeometricCostFunctor
- *homography_symmetric_geometric_cost_function =
- new HomographySymmetricGeometricCostFunctor(x1.col(i),
- x2.col(i));
+ HomographySymmetricGeometricCostFunctor*
+ homography_symmetric_geometric_cost_function =
+ new HomographySymmetricGeometricCostFunctor(x1.col(i), x2.col(i));
problem.AddResidualBlock(
- new ceres::AutoDiffCostFunction<
- HomographySymmetricGeometricCostFunctor,
- 4, // num_residuals
- 9>(homography_symmetric_geometric_cost_function),
+ new ceres::AutoDiffCostFunction<HomographySymmetricGeometricCostFunctor,
+ 4, // num_residuals
+ 9>(
+ homography_symmetric_geometric_cost_function),
NULL,
H->data());
}
@@ -335,15 +330,16 @@ bool EstimateHomography2DFromCorrespondences(
return summary.IsSolutionUsable();
}
+// clang-format off
/**
* x2 ~ A * x1
* x2^t * Hi * A *x1 = 0
- * H1 = H2 = H3 =
+ * H1 = H2 = H3 =
* | 0 0 0 1| |-x2w| |0 0 0 0| | 0 | | 0 0 1 0| |-x2z|
* | 0 0 0 0| -> | 0 | |0 0 1 0| -> |-x2z| | 0 0 0 0| -> | 0 |
* | 0 0 0 0| | 0 | |0-1 0 0| | x2y| |-1 0 0 0| | x2x|
* |-1 0 0 0| | x2x| |0 0 0 0| | 0 | | 0 0 0 0| | 0 |
- * H4 = H5 = H6 =
+ * H4 = H5 = H6 =
* |0 0 0 0| | 0 | | 0 1 0 0| |-x2y| |0 0 0 0| | 0 |
* |0 0 0 1| -> |-x2w| |-1 0 0 0| -> | x2x| |0 0 0 0| -> | 0 |
* |0 0 0 0| | 0 | | 0 0 0 0| | 0 | |0 0 0 1| |-x2w|
@@ -361,10 +357,11 @@ bool EstimateHomography2DFromCorrespondences(
* x2^t * H6 * A *x1 = (-x2w*i +x2z*m )*x1x + (-x2w*j +x2z*n )*x1y + (-x2w*k +x2z*o )*x1z + (-x2w*l +x2z*1 )*x1w = 0
*
* X = |a b c d e f g h i j k l m n o|^t
-*/
-bool Homography3DFromCorrespondencesLinear(const Mat &x1,
- const Mat &x2,
- Mat4 *H,
+ */
+// clang-format on
+bool Homography3DFromCorrespondencesLinear(const Mat& x1,
+ const Mat& x2,
+ Mat4* H,
double expected_precision) {
assert(4 == x1.rows());
assert(5 <= x1.cols());
@@ -379,68 +376,68 @@ bool Homography3DFromCorrespondencesLinear(const Mat &x1,
Mat b = Mat::Zero(n * 6, 1);
for (int i = 0; i < n; ++i) {
int j = 6 * i;
- L(j, 0) = -x2(w, i) * x1(x, i); // a
- L(j, 1) = -x2(w, i) * x1(y, i); // b
- L(j, 2) = -x2(w, i) * x1(z, i); // c
- L(j, 3) = -x2(w, i) * x1(w, i); // d
- L(j, 12) = x2(x, i) * x1(x, i); // m
- L(j, 13) = x2(x, i) * x1(y, i); // n
- L(j, 14) = x2(x, i) * x1(z, i); // o
- b(j, 0) = -x2(x, i) * x1(w, i);
+ L(j, 0) = -x2(w, i) * x1(x, i); // a
+ L(j, 1) = -x2(w, i) * x1(y, i); // b
+ L(j, 2) = -x2(w, i) * x1(z, i); // c
+ L(j, 3) = -x2(w, i) * x1(w, i); // d
+ L(j, 12) = x2(x, i) * x1(x, i); // m
+ L(j, 13) = x2(x, i) * x1(y, i); // n
+ L(j, 14) = x2(x, i) * x1(z, i); // o
+ b(j, 0) = -x2(x, i) * x1(w, i);
++j;
- L(j, 4) = -x2(z, i) * x1(x, i); // e
- L(j, 5) = -x2(z, i) * x1(y, i); // f
- L(j, 6) = -x2(z, i) * x1(z, i); // g
- L(j, 7) = -x2(z, i) * x1(w, i); // h
- L(j, 8) = x2(y, i) * x1(x, i); // i
- L(j, 9) = x2(y, i) * x1(y, i); // j
- L(j, 10) = x2(y, i) * x1(z, i); // k
- L(j, 11) = x2(y, i) * x1(w, i); // l
+ L(j, 4) = -x2(z, i) * x1(x, i); // e
+ L(j, 5) = -x2(z, i) * x1(y, i); // f
+ L(j, 6) = -x2(z, i) * x1(z, i); // g
+ L(j, 7) = -x2(z, i) * x1(w, i); // h
+ L(j, 8) = x2(y, i) * x1(x, i); // i
+ L(j, 9) = x2(y, i) * x1(y, i); // j
+ L(j, 10) = x2(y, i) * x1(z, i); // k
+ L(j, 11) = x2(y, i) * x1(w, i); // l
++j;
- L(j, 0) = -x2(z, i) * x1(x, i); // a
- L(j, 1) = -x2(z, i) * x1(y, i); // b
- L(j, 2) = -x2(z, i) * x1(z, i); // c
- L(j, 3) = -x2(z, i) * x1(w, i); // d
- L(j, 8) = x2(x, i) * x1(x, i); // i
- L(j, 9) = x2(x, i) * x1(y, i); // j
- L(j, 10) = x2(x, i) * x1(z, i); // k
- L(j, 11) = x2(x, i) * x1(w, i); // l
+ L(j, 0) = -x2(z, i) * x1(x, i); // a
+ L(j, 1) = -x2(z, i) * x1(y, i); // b
+ L(j, 2) = -x2(z, i) * x1(z, i); // c
+ L(j, 3) = -x2(z, i) * x1(w, i); // d
+ L(j, 8) = x2(x, i) * x1(x, i); // i
+ L(j, 9) = x2(x, i) * x1(y, i); // j
+ L(j, 10) = x2(x, i) * x1(z, i); // k
+ L(j, 11) = x2(x, i) * x1(w, i); // l
++j;
- L(j, 4) = -x2(w, i) * x1(x, i); // e
- L(j, 5) = -x2(w, i) * x1(y, i); // f
- L(j, 6) = -x2(w, i) * x1(z, i); // g
- L(j, 7) = -x2(w, i) * x1(w, i); // h
- L(j, 12) = x2(y, i) * x1(x, i); // m
- L(j, 13) = x2(y, i) * x1(y, i); // n
- L(j, 14) = x2(y, i) * x1(z, i); // o
- b(j, 0) = -x2(y, i) * x1(w, i);
+ L(j, 4) = -x2(w, i) * x1(x, i); // e
+ L(j, 5) = -x2(w, i) * x1(y, i); // f
+ L(j, 6) = -x2(w, i) * x1(z, i); // g
+ L(j, 7) = -x2(w, i) * x1(w, i); // h
+ L(j, 12) = x2(y, i) * x1(x, i); // m
+ L(j, 13) = x2(y, i) * x1(y, i); // n
+ L(j, 14) = x2(y, i) * x1(z, i); // o
+ b(j, 0) = -x2(y, i) * x1(w, i);
++j;
L(j, 0) = -x2(y, i) * x1(x, i); // a
L(j, 1) = -x2(y, i) * x1(y, i); // b
L(j, 2) = -x2(y, i) * x1(z, i); // c
L(j, 3) = -x2(y, i) * x1(w, i); // d
- L(j, 4) = x2(x, i) * x1(x, i); // e
- L(j, 5) = x2(x, i) * x1(y, i); // f
- L(j, 6) = x2(x, i) * x1(z, i); // g
- L(j, 7) = x2(x, i) * x1(w, i); // h
+ L(j, 4) = x2(x, i) * x1(x, i); // e
+ L(j, 5) = x2(x, i) * x1(y, i); // f
+ L(j, 6) = x2(x, i) * x1(z, i); // g
+ L(j, 7) = x2(x, i) * x1(w, i); // h
++j;
- L(j, 8) = -x2(w, i) * x1(x, i); // i
- L(j, 9) = -x2(w, i) * x1(y, i); // j
+ L(j, 8) = -x2(w, i) * x1(x, i); // i
+ L(j, 9) = -x2(w, i) * x1(y, i); // j
L(j, 10) = -x2(w, i) * x1(z, i); // k
L(j, 11) = -x2(w, i) * x1(w, i); // l
- L(j, 12) = x2(z, i) * x1(x, i); // m
- L(j, 13) = x2(z, i) * x1(y, i); // n
- L(j, 14) = x2(z, i) * x1(z, i); // o
- b(j, 0) = -x2(z, i) * x1(w, i);
+ L(j, 12) = x2(z, i) * x1(x, i); // m
+ L(j, 13) = x2(z, i) * x1(y, i); // n
+ L(j, 14) = x2(z, i) * x1(z, i); // o
+ b(j, 0) = -x2(z, i) * x1(w, i);
}
// Solve Lx=B
Vec h = L.fullPivLu().solve(b);
- if ((L * h).isApprox(b, expected_precision)) {
+ if ((L * h).isApprox(b, expected_precision)) {
Homography3DNormalizedParameterization<double>::To(h, H);
return true;
} else {
@@ -448,9 +445,9 @@ bool Homography3DFromCorrespondencesLinear(const Mat &x1,
}
}
-double SymmetricGeometricDistance(const Mat3 &H,
- const Vec2 &x1,
- const Vec2 &x2) {
+double SymmetricGeometricDistance(const Mat3& H,
+ const Vec2& x1,
+ const Vec2& x2) {
Vec3 x(x1(0), x1(1), 1.0);
Vec3 y(x2(0), x2(1), 1.0);
diff --git a/intern/libmv/libmv/multiview/homography.h b/intern/libmv/libmv/multiview/homography.h
index a76aa9405a5..0742c6f7c70 100644
--- a/intern/libmv/libmv/multiview/homography.h
+++ b/intern/libmv/libmv/multiview/homography.h
@@ -49,11 +49,11 @@ namespace libmv {
* \return True if the transformation estimation has succeeded.
* \note There must be at least 4 non-colinear points.
*/
-bool Homography2DFromCorrespondencesLinear(const Mat &x1,
- const Mat &x2,
- Mat3 *H,
- double expected_precision =
- EigenDouble::dummy_precision());
+bool Homography2DFromCorrespondencesLinear(
+ const Mat& x1,
+ const Mat& x2,
+ Mat3* H,
+ double expected_precision = EigenDouble::dummy_precision());
/**
* This structure contains options that controls how the homography
@@ -101,10 +101,10 @@ struct EstimateHomographyOptions {
* refinement.
*/
bool EstimateHomography2DFromCorrespondences(
- const Mat &x1,
- const Mat &x2,
- const EstimateHomographyOptions &options,
- Mat3 *H);
+ const Mat& x1,
+ const Mat& x2,
+ const EstimateHomographyOptions& options,
+ Mat3* H);
/**
* 3D Homography transformation estimation.
@@ -129,20 +129,20 @@ bool EstimateHomography2DFromCorrespondences(
* \note Need at least 5 non coplanar points
* \note Points coordinates must be in homogeneous coordinates
*/
-bool Homography3DFromCorrespondencesLinear(const Mat &x1,
- const Mat &x2,
- Mat4 *H,
- double expected_precision =
- EigenDouble::dummy_precision());
+bool Homography3DFromCorrespondencesLinear(
+ const Mat& x1,
+ const Mat& x2,
+ Mat4* H,
+ double expected_precision = EigenDouble::dummy_precision());
/**
* Calculate symmetric geometric cost:
*
* D(H * x1, x2)^2 + D(H^-1 * x2, x1)
*/
-double SymmetricGeometricDistance(const Mat3 &H,
- const Vec2 &x1,
- const Vec2 &x2);
+double SymmetricGeometricDistance(const Mat3& H,
+ const Vec2& x1,
+ const Vec2& x2);
} // namespace libmv
diff --git a/intern/libmv/libmv/multiview/homography_error.h b/intern/libmv/libmv/multiview/homography_error.h
index f8b9d45e73c..786ca245ea6 100644
--- a/intern/libmv/libmv/multiview/homography_error.h
+++ b/intern/libmv/libmv/multiview/homography_error.h
@@ -27,18 +27,18 @@ namespace libmv {
namespace homography {
namespace homography2D {
- /**
- * Structure for estimating the asymmetric error between a vector x2 and the
- * transformed x1 such that
- * Error = ||x2 - Psi(H * x1)||^2
- * where Psi is the function that transforms homogeneous to euclidean coords.
- * \note It should be distributed as Chi-squared with k = 2.
- */
+/**
+ * Structure for estimating the asymmetric error between a vector x2 and the
+ * transformed x1 such that
+ * Error = ||x2 - Psi(H * x1)||^2
+ * where Psi is the function that transforms homogeneous to euclidean coords.
+ * \note It should be distributed as Chi-squared with k = 2.
+ */
struct AsymmetricError {
/**
- * Computes the asymmetric residuals between a set of 2D points x2 and the
+ * Computes the asymmetric residuals between a set of 2D points x2 and the
* transformed 2D point set x1 such that
- * Residuals_i = x2_i - Psi(H * x1_i)
+ * Residuals_i = x2_i - Psi(H * x1_i)
* where Psi is the function that transforms homogeneous to euclidean coords.
*
* \param[in] H The 3x3 homography matrix.
@@ -47,8 +47,7 @@ struct AsymmetricError {
* \param[in] x2 A set of 2D points (2xN or 3xN matrix of column vectors).
* \param[out] dx A 2xN matrix of column vectors of residuals errors
*/
- static void Residuals(const Mat &H, const Mat &x1,
- const Mat &x2, Mat2X *dx) {
+ static void Residuals(const Mat& H, const Mat& x1, const Mat& x2, Mat2X* dx) {
dx->resize(2, x1.cols());
Mat3X x2h_est;
if (x1.rows() == 2)
@@ -63,19 +62,18 @@ struct AsymmetricError {
*dx = HomogeneousToEuclidean(static_cast<Mat3X>(x2)) - *dx;
}
/**
- * Computes the asymmetric residuals between a 2D point x2 and the transformed
+ * Computes the asymmetric residuals between a 2D point x2 and the transformed
* 2D point x1 such that
- * Residuals = x2 - Psi(H * x1)
+ * Residuals = x2 - Psi(H * x1)
* where Psi is the function that transforms homogeneous to euclidean coords.
*
* \param[in] H The 3x3 homography matrix.
* The estimated homography should approximatelly hold the condition y = H x.
- * \param[in] x1 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
- * \param[in] x2 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
+ * \param[in] x1 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
+ * \param[in] x2 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
* \param[out] dx A vector of size 2 of the residual error
*/
- static void Residuals(const Mat &H, const Vec &x1,
- const Vec &x2, Vec2 *dx) {
+ static void Residuals(const Mat& H, const Vec& x1, const Vec& x2, Vec2* dx) {
Vec3 x2h_est;
if (x1.rows() == 2)
x2h_est = H * EuclideanToHomogeneous(static_cast<Vec2>(x1));
@@ -85,10 +83,10 @@ struct AsymmetricError {
*dx = x2 - x2h_est.head<2>() / x2h_est[2];
else
*dx = HomogeneousToEuclidean(static_cast<Vec3>(x2)) -
- x2h_est.head<2>() / x2h_est[2];
+ x2h_est.head<2>() / x2h_est[2];
}
/**
- * Computes the squared norm of the residuals between a set of 2D points x2
+ * Computes the squared norm of the residuals between a set of 2D points x2
* and the transformed 2D point set x1 such that
* Error = || x2 - Psi(H * x1) ||^2
* where Psi is the function that transforms homogeneous to euclidean coords.
@@ -99,70 +97,70 @@ struct AsymmetricError {
* \param[in] x2 A set of 2D points (2xN or 3xN matrix of column vectors).
* \return The squared norm of the asymmetric residuals errors
*/
- static double Error(const Mat &H, const Mat &x1, const Mat &x2) {
+ static double Error(const Mat& H, const Mat& x1, const Mat& x2) {
Mat2X dx;
Residuals(H, x1, x2, &dx);
return dx.squaredNorm();
}
/**
- * Computes the squared norm of the residuals between a 2D point x2 and the
- * transformed 2D point x1 such that rms = || x2 - Psi(H * x1) ||^2
+ * Computes the squared norm of the residuals between a 2D point x2 and the
+ * transformed 2D point x1 such that rms = || x2 - Psi(H * x1) ||^2
* where Psi is the function that transforms homogeneous to euclidean coords.
*
* \param[in] H The 3x3 homography matrix.
* The estimated homography should approximatelly hold the condition y = H x.
- * \param[in] x1 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
- * \param[in] x2 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
+ * \param[in] x1 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
+ * \param[in] x2 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
* \return The squared norm of the asymmetric residual error
*/
- static double Error(const Mat &H, const Vec &x1, const Vec &x2) {
+ static double Error(const Mat& H, const Vec& x1, const Vec& x2) {
Vec2 dx;
Residuals(H, x1, x2, &dx);
return dx.squaredNorm();
}
};
- /**
- * Structure for estimating the symmetric error
- * between a vector x2 and the transformed x1 such that
- * Error = ||x2 - Psi(H * x1)||^2 + ||x1 - Psi(H^-1 * x2)||^2
- * where Psi is the function that transforms homogeneous to euclidean coords.
- * \note It should be distributed as Chi-squared with k = 4.
- */
+/**
+ * Structure for estimating the symmetric error
+ * between a vector x2 and the transformed x1 such that
+ * Error = ||x2 - Psi(H * x1)||^2 + ||x1 - Psi(H^-1 * x2)||^2
+ * where Psi is the function that transforms homogeneous to euclidean coords.
+ * \note It should be distributed as Chi-squared with k = 4.
+ */
struct SymmetricError {
/**
- * Computes the squared norm of the residuals between x2 and the
- * transformed x1 such that
+ * Computes the squared norm of the residuals between x2 and the
+ * transformed x1 such that
* Error = ||x2 - Psi(H * x1)||^2 + ||x1 - Psi(H^-1 * x2)||^2
* where Psi is the function that transforms homogeneous to euclidean coords.
*
* \param[in] H The 3x3 homography matrix.
* The estimated homography should approximatelly hold the condition y = H x.
- * \param[in] x1 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
- * \param[in] x2 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
+ * \param[in] x1 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
+ * \param[in] x2 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
* \return The squared norm of the symmetric residuals errors
*/
- static double Error(const Mat &H, const Vec &x1, const Vec &x2) {
+ static double Error(const Mat& H, const Vec& x1, const Vec& x2) {
// TODO(keir): This is awesomely inefficient because it does a 3x3
// inversion for each evaluation.
Mat3 Hinv = H.inverse();
- return AsymmetricError::Error(H, x1, x2) +
+ return AsymmetricError::Error(H, x1, x2) +
AsymmetricError::Error(Hinv, x2, x1);
}
// TODO(julien) Add residuals function \see AsymmetricError
};
- /**
- * Structure for estimating the algebraic error (cross product)
- * between a vector x2 and the transformed x1 such that
- * Error = ||[x2] * H * x1||^^2
- * where [x2] is the skew matrix of x2.
- */
+/**
+ * Structure for estimating the algebraic error (cross product)
+ * between a vector x2 and the transformed x1 such that
+ * Error = ||[x2] * H * x1||^^2
+ * where [x2] is the skew matrix of x2.
+ */
struct AlgebraicError {
// TODO(julien) Make an AlgebraicError2Rows and AlgebraicError3Rows
/**
- * Computes the algebraic residuals (cross product) between a set of 2D
- * points x2 and the transformed 2D point set x1 such that
+ * Computes the algebraic residuals (cross product) between a set of 2D
+ * points x2 and the transformed 2D point set x1 such that
* [x2] * H * x1 where [x2] is the skew matrix of x2.
*
* \param[in] H The 3x3 homography matrix.
@@ -171,8 +169,7 @@ struct AlgebraicError {
* \param[in] x2 A set of 2D points (2xN or 3xN matrix of column vectors).
* \param[out] dx A 3xN matrix of column vectors of residuals errors
*/
- static void Residuals(const Mat &H, const Mat &x1,
- const Mat &x2, Mat3X *dx) {
+ static void Residuals(const Mat& H, const Mat& x1, const Mat& x2, Mat3X* dx) {
dx->resize(3, x1.cols());
Vec3 col;
for (int i = 0; i < x1.cols(); ++i) {
@@ -181,18 +178,17 @@ struct AlgebraicError {
}
}
/**
- * Computes the algebraic residuals (cross product) between a 2D point x2
- * and the transformed 2D point x1 such that
+ * Computes the algebraic residuals (cross product) between a 2D point x2
+ * and the transformed 2D point x1 such that
* [x2] * H * x1 where [x2] is the skew matrix of x2.
*
* \param[in] H The 3x3 homography matrix.
* The estimated homography should approximatelly hold the condition y = H x.
- * \param[in] x1 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
- * \param[in] x2 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
+ * \param[in] x1 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
+ * \param[in] x2 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
* \param[out] dx A vector of size 3 of the residual error
*/
- static void Residuals(const Mat &H, const Vec &x1,
- const Vec &x2, Vec3 *dx) {
+ static void Residuals(const Mat& H, const Vec& x1, const Vec& x2, Vec3* dx) {
Vec3 x2h_est;
if (x1.rows() == 2)
x2h_est = H * EuclideanToHomogeneous(static_cast<Vec2>(x1));
@@ -206,8 +202,8 @@ struct AlgebraicError {
// identical 3x3 skew matrix for each evaluation.
}
/**
- * Computes the squared norm of the algebraic residuals between a set of 2D
- * points x2 and the transformed 2D point set x1 such that
+ * Computes the squared norm of the algebraic residuals between a set of 2D
+ * points x2 and the transformed 2D point set x1 such that
* [x2] * H * x1 where [x2] is the skew matrix of x2.
*
* \param[in] H The 3x3 homography matrix.
@@ -216,23 +212,23 @@ struct AlgebraicError {
* \param[in] x2 A set of 2D points (2xN or 3xN matrix of column vectors).
* \return The squared norm of the asymmetric residuals errors
*/
- static double Error(const Mat &H, const Mat &x1, const Mat &x2) {
+ static double Error(const Mat& H, const Mat& x1, const Mat& x2) {
Mat3X dx;
Residuals(H, x1, x2, &dx);
return dx.squaredNorm();
}
/**
- * Computes the squared norm of the algebraic residuals between a 2D point x2
- * and the transformed 2D point x1 such that
+ * Computes the squared norm of the algebraic residuals between a 2D point x2
+ * and the transformed 2D point x1 such that
* [x2] * H * x1 where [x2] is the skew matrix of x2.
*
* \param[in] H The 3x3 homography matrix.
* The estimated homography should approximatelly hold the condition y = H x.
- * \param[in] x1 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
- * \param[in] x2 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
+ * \param[in] x1 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
+ * \param[in] x2 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
* \return The squared norm of the asymmetric residual error
*/
- static double Error(const Mat &H, const Vec &x1, const Vec &x2) {
+ static double Error(const Mat& H, const Vec& x1, const Vec& x2) {
Vec3 dx;
Residuals(H, x1, x2, &dx);
return dx.squaredNorm();
diff --git a/intern/libmv/libmv/multiview/homography_parameterization.h b/intern/libmv/libmv/multiview/homography_parameterization.h
index ca8fbd8066e..ae2d74da9ff 100644
--- a/intern/libmv/libmv/multiview/homography_parameterization.h
+++ b/intern/libmv/libmv/multiview/homography_parameterization.h
@@ -25,64 +25,72 @@
namespace libmv {
-/** A parameterization of the 2D homography matrix that uses 8 parameters so
+/** A parameterization of the 2D homography matrix that uses 8 parameters so
* that the matrix is normalized (H(2,2) == 1).
* The homography matrix H is built from a list of 8 parameters (a, b,...g, h)
* as follows
- * |a b c|
+ * |a b c|
* H = |d e f|
- * |g h 1|
+ * |g h 1|
*/
-template<typename T = double>
+template <typename T = double>
class Homography2DNormalizedParameterization {
public:
typedef Eigen::Matrix<T, 8, 1> Parameters; // a, b, ... g, h
typedef Eigen::Matrix<T, 3, 3> Parameterized; // H
/// Convert from the 8 parameters to a H matrix.
- static void To(const Parameters &p, Parameterized *h) {
+ static void To(const Parameters& p, Parameterized* h) {
+ // clang-format off
*h << p(0), p(1), p(2),
p(3), p(4), p(5),
p(6), p(7), 1.0;
+ // clang-format on
}
/// Convert from a H matrix to the 8 parameters.
- static void From(const Parameterized &h, Parameters *p) {
+ static void From(const Parameterized& h, Parameters* p) {
+ // clang-format off
*p << h(0, 0), h(0, 1), h(0, 2),
h(1, 0), h(1, 1), h(1, 2),
h(2, 0), h(2, 1);
+ // clang-format on
}
};
-/** A parameterization of the 2D homography matrix that uses 15 parameters so
+/** A parameterization of the 2D homography matrix that uses 15 parameters so
* that the matrix is normalized (H(3,3) == 1).
* The homography matrix H is built from a list of 15 parameters (a, b,...n, o)
* as follows
- * |a b c d|
+ * |a b c d|
* H = |e f g h|
* |i j k l|
- * |m n o 1|
+ * |m n o 1|
*/
-template<typename T = double>
+template <typename T = double>
class Homography3DNormalizedParameterization {
public:
- typedef Eigen::Matrix<T, 15, 1> Parameters; // a, b, ... n, o
- typedef Eigen::Matrix<T, 4, 4> Parameterized; // H
+ typedef Eigen::Matrix<T, 15, 1> Parameters; // a, b, ... n, o
+ typedef Eigen::Matrix<T, 4, 4> Parameterized; // H
/// Convert from the 15 parameters to a H matrix.
- static void To(const Parameters &p, Parameterized *h) {
+ static void To(const Parameters& p, Parameterized* h) {
+ // clang-format off
*h << p(0), p(1), p(2), p(3),
p(4), p(5), p(6), p(7),
p(8), p(9), p(10), p(11),
p(12), p(13), p(14), 1.0;
+ // clang-format on
}
/// Convert from a H matrix to the 15 parameters.
- static void From(const Parameterized &h, Parameters *p) {
+ static void From(const Parameterized& h, Parameters* p) {
+ // clang-format off
*p << h(0, 0), h(0, 1), h(0, 2), h(0, 3),
h(1, 0), h(1, 1), h(1, 2), h(1, 3),
h(2, 0), h(2, 1), h(2, 2), h(2, 3),
h(3, 0), h(3, 1), h(3, 2);
+ // clang-format on
}
};
diff --git a/intern/libmv/libmv/multiview/homography_test.cc b/intern/libmv/libmv/multiview/homography_test.cc
index 8d7266e3d11..87d1c85028d 100644
--- a/intern/libmv/libmv/multiview/homography_test.cc
+++ b/intern/libmv/libmv/multiview/homography_test.cc
@@ -18,10 +18,10 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
-#include "testing/testing.h"
+#include "libmv/multiview/homography.h"
#include "libmv/logging/logging.h"
#include "libmv/multiview/projection.h"
-#include "libmv/multiview/homography.h"
+#include "testing/testing.h"
namespace {
using namespace libmv;
@@ -34,9 +34,7 @@ namespace {
// TODO(sergey): Consider using this in all tests since possible homography
// matrix is not fixed to a single value and different-looking matrices
// might actually crrespond to the same exact transform.
-void CheckHomography2DTransform(const Mat3 &H,
- const Mat &x1,
- const Mat &x2) {
+void CheckHomography2DTransform(const Mat3& H, const Mat& x1, const Mat& x2) {
for (int i = 0; i < x2.cols(); ++i) {
Vec3 x2_expected = x2.col(i);
Vec3 x2_observed = H * x1.col(i);
@@ -49,15 +47,19 @@ void CheckHomography2DTransform(const Mat3 &H,
TEST(Homography2DTest, Rotation45AndTranslationXY) {
Mat x1(3, 4);
+ // clang-format off
x1 << 0, 1, 0, 5,
0, 0, 2, 3,
1, 1, 1, 1;
+ // clang-format on
double angle = 45.0;
Mat3 m;
+ // clang-format off
m << cos(angle), -sin(angle), -2,
sin(angle), cos(angle), 5,
0, 0, 1;
+ // clang-format on
Mat x2 = x1;
// Transform point from ground truth matrix
@@ -76,13 +78,17 @@ TEST(Homography2DTest, Rotation45AndTranslationXY) {
TEST(Homography2DTest, AffineGeneral4) {
// TODO(julien) find why it doesn't work with 4 points!!!
Mat x1(3, 4);
+ // clang-format off
x1 << 0, 1, 0, 2,
0, 0, 1, 2,
1, 1, 1, 1;
+ // clang-format on
Mat3 m;
+ // clang-format off
m << 3, -1, 4,
6, -2, -3,
0, 0, 1;
+ // clang-format on
Mat x2 = x1;
for (int i = 0; i < x2.cols(); ++i) {
@@ -109,13 +115,17 @@ TEST(Homography2DTest, AffineGeneral4) {
TEST(Homography2DTest, AffineGeneral5) {
Mat x1(3, 5);
+ // clang-format off
x1 << 0, 1, 0, 2, 5,
0, 0, 1, 2, 2,
1, 1, 1, 1, 1;
+ // clang-format on
Mat3 m;
+ // clang-format off
m << 3, -1, 4,
6, -2, -3,
0, 0, 1;
+ // clang-format on
Mat x2 = x1;
for (int i = 0; i < x2.cols(); ++i)
@@ -142,13 +152,17 @@ TEST(Homography2DTest, AffineGeneral5) {
TEST(Homography2DTest, HomographyGeneral) {
Mat x1(3, 4);
+ // clang-format off
x1 << 0, 1, 0, 5,
0, 0, 2, 3,
1, 1, 1, 1;
+ // clang-format on
Mat3 m;
+ // clang-format off
m << 3, -1, 4,
6, -2, -3,
1, -3, 1;
+ // clang-format on
Mat x2 = x1;
for (int i = 0; i < x2.cols(); ++i)
@@ -164,10 +178,12 @@ TEST(Homography2DTest, HomographyGeneral) {
TEST(Homography3DTest, RotationAndTranslationXYZ) {
Mat x1(4, 5);
+ // clang-format off
x1 << 0, 0, 1, 5, 2,
0, 1, 2, 3, 5,
0, 2, 0, 1, 5,
1, 1, 1, 1, 1;
+ // clang-format on
Mat4 M;
M.setIdentity();
/*
@@ -178,24 +194,30 @@ TEST(Homography3DTest, RotationAndTranslationXYZ) {
// Rotation on x + translation
double angle = 45.0;
Mat4 rot;
+ // clang-format off
rot << 1, 0, 0, 1,
0, cos(angle), -sin(angle), 3,
0, sin(angle), cos(angle), -2,
0, 0, 0, 1;
+ // clang-format on
M *= rot;
// Rotation on y
angle = 25.0;
+ // clang-format off
rot << cos(angle), 0, sin(angle), 0,
0, 1, 0, 0,
-sin(angle), 0, cos(angle), 0,
0, 0, 0, 1;
+ // clang-format on
M *= rot;
// Rotation on z
angle = 5.0;
+ // clang-format off
rot << cos(angle), -sin(angle), 0, 0,
sin(angle), cos(angle), 0, 0,
0, 0, 1, 0,
0, 0, 0, 1;
+ // clang-format on
M *= rot;
Mat x2 = x1;
for (int i = 0; i < x2.cols(); ++i) {
@@ -212,15 +234,19 @@ TEST(Homography3DTest, RotationAndTranslationXYZ) {
TEST(Homography3DTest, AffineGeneral) {
Mat x1(4, 5);
+ // clang-format off
x1 << 0, 0, 1, 5, 2,
0, 1, 2, 3, 5,
0, 2, 0, 1, 5,
1, 1, 1, 1, 1;
+ // clang-format on
Mat4 m;
+ // clang-format off
m << 3, -1, 4, 1,
6, -2, -3, -6,
1, 0, 1, 2,
0, 0, 0, 1;
+ // clang-format on
Mat x2 = x1;
for (int i = 0; i < x2.cols(); ++i) {
@@ -236,15 +262,19 @@ TEST(Homography3DTest, AffineGeneral) {
TEST(Homography3DTest, HomographyGeneral) {
Mat x1(4, 5);
+ // clang-format off
x1 << 0, 0, 1, 5, 2,
0, 1, 2, 3, 5,
0, 2, 0, 1, 5,
1, 1, 1, 1, 1;
+ // clang-format on
Mat4 m;
+ // clang-format off
m << 3, -1, 4, 1,
6, -2, -3, -6,
1, 0, 1, 2,
-3, 1, 0, 1;
+ // clang-format on
Mat x2 = x1;
for (int i = 0; i < x2.cols(); ++i) {
diff --git a/intern/libmv/libmv/multiview/nviewtriangulation.h b/intern/libmv/libmv/multiview/nviewtriangulation.h
index f4614ab1a5c..b2a320953a7 100644
--- a/intern/libmv/libmv/multiview/nviewtriangulation.h
+++ b/intern/libmv/libmv/multiview/nviewtriangulation.h
@@ -34,22 +34,22 @@ namespace libmv {
// x's are 2D coordinates (x,y,1) in each image; Ps are projective cameras. The
// output, X, is a homogeneous four vectors.
-template<typename T>
-void NViewTriangulate(const Matrix<T, 2, Dynamic> &x,
- const vector<Matrix<T, 3, 4> > &Ps,
- Matrix<T, 4, 1> *X) {
+template <typename T>
+void NViewTriangulate(const Matrix<T, 2, Dynamic>& x,
+ const vector<Matrix<T, 3, 4>>& Ps,
+ Matrix<T, 4, 1>* X) {
int nviews = x.cols();
assert(nviews == Ps.size());
- Matrix<T, Dynamic, Dynamic> design(3*nviews, 4 + nviews);
+ Matrix<T, Dynamic, Dynamic> design(3 * nviews, 4 + nviews);
design.setConstant(0.0);
for (int i = 0; i < nviews; i++) {
- design.template block<3, 4>(3*i, 0) = -Ps[i];
- design(3*i + 0, 4 + i) = x(0, i);
- design(3*i + 1, 4 + i) = x(1, i);
- design(3*i + 2, 4 + i) = 1.0;
+ design.template block<3, 4>(3 * i, 0) = -Ps[i];
+ design(3 * i + 0, 4 + i) = x(0, i);
+ design(3 * i + 1, 4 + i) = x(1, i);
+ design(3 * i + 2, 4 + i) = 1.0;
}
- Matrix<T, Dynamic, 1> X_and_alphas;
+ Matrix<T, Dynamic, 1> X_and_alphas;
Nullspace(&design, &X_and_alphas);
X->resize(4);
*X = X_and_alphas.head(4);
@@ -60,16 +60,16 @@ void NViewTriangulate(const Matrix<T, 2, Dynamic> &x,
// This method uses the algebraic distance approximation.
// Note that this method works better when the 2D points are normalized
// with an isotopic normalization.
-template<typename T>
-void NViewTriangulateAlgebraic(const Matrix<T, 2, Dynamic> &x,
- const vector<Matrix<T, 3, 4> > &Ps,
- Matrix<T, 4, 1> *X) {
+template <typename T>
+void NViewTriangulateAlgebraic(const Matrix<T, 2, Dynamic>& x,
+ const vector<Matrix<T, 3, 4>>& Ps,
+ Matrix<T, 4, 1>* X) {
int nviews = x.cols();
assert(nviews == Ps.size());
- Matrix<T, Dynamic, 4> design(2*nviews, 4);
+ Matrix<T, Dynamic, 4> design(2 * nviews, 4);
for (int i = 0; i < nviews; i++) {
- design.template block<2, 4>(2*i, 0) = SkewMatMinimal(x.col(i)) * Ps[i];
+ design.template block<2, 4>(2 * i, 0) = SkewMatMinimal(x.col(i)) * Ps[i];
}
X->resize(4);
Nullspace(&design, X);
diff --git a/intern/libmv/libmv/multiview/nviewtriangulation_test.cc b/intern/libmv/libmv/multiview/nviewtriangulation_test.cc
index 5a4d8499753..dba5fd07d5c 100644
--- a/intern/libmv/libmv/multiview/nviewtriangulation_test.cc
+++ b/intern/libmv/libmv/multiview/nviewtriangulation_test.cc
@@ -54,7 +54,7 @@ TEST(NViewTriangulate, FiveViews) {
// Check reprojection error. Should be nearly zero.
for (int j = 0; j < nviews; ++j) {
- Vec3 x_reprojected = Ps[j]*X;
+ Vec3 x_reprojected = Ps[j] * X;
x_reprojected /= x_reprojected(2);
double error = (x_reprojected.head(2) - xs.col(j)).norm();
EXPECT_NEAR(error, 0.0, 1e-9);
@@ -84,7 +84,7 @@ TEST(NViewTriangulateAlgebraic, FiveViews) {
// Check reprojection error. Should be nearly zero.
for (int j = 0; j < nviews; ++j) {
- Vec3 x_reprojected = Ps[j]*X;
+ Vec3 x_reprojected = Ps[j] * X;
x_reprojected /= x_reprojected(2);
double error = (x_reprojected.head<2>() - xs.col(j)).norm();
EXPECT_NEAR(error, 0.0, 1e-9);
diff --git a/intern/libmv/libmv/multiview/panography.cc b/intern/libmv/libmv/multiview/panography.cc
index b62802948c4..42b1c19d65e 100644
--- a/intern/libmv/libmv/multiview/panography.cc
+++ b/intern/libmv/libmv/multiview/panography.cc
@@ -24,8 +24,9 @@
namespace libmv {
static bool Build_Minimal2Point_PolynomialFactor(
- const Mat & x1, const Mat & x2,
- double * P) { // P must be a double[4]
+ const Mat& x1,
+ const Mat& x2,
+ double* P) { // P must be a double[4]
assert(2 == x1.rows());
assert(2 == x1.cols());
assert(x1.rows() == x2.rows());
@@ -40,11 +41,11 @@ static bool Build_Minimal2Point_PolynomialFactor(
Vec yx2 = (x2.col(1)).transpose();
double b12 = xx2.dot(yx2);
- double a1 = xx1.squaredNorm();
- double a2 = yx1.squaredNorm();
+ double a1 = xx1.squaredNorm();
+ double a2 = yx1.squaredNorm();
- double b1 = xx2.squaredNorm();
- double b2 = yx2.squaredNorm();
+ double b1 = xx2.squaredNorm();
+ double b2 = yx2.squaredNorm();
// Build the 3rd degre polynomial in F^2.
//
@@ -52,10 +53,12 @@ static bool Build_Minimal2Point_PolynomialFactor(
//
// Coefficients in ascending powers of alpha, i.e. P[N]*x^N.
// Run panography_coeffs.py to get the below coefficients.
- P[0] = b1*b2*a12*a12-a1*a2*b12*b12;
- P[1] = -2*a1*a2*b12+2*a12*b1*b2+b1*a12*a12+b2*a12*a12-a1*b12*b12-a2*b12*b12;
- P[2] = b1*b2-a1*a2-2*a1*b12-2*a2*b12+2*a12*b1+2*a12*b2+a12*a12-b12*b12;
- P[3] = b1+b2-2*b12-a1-a2+2*a12;
+ P[0] = b1 * b2 * a12 * a12 - a1 * a2 * b12 * b12;
+ P[1] = -2 * a1 * a2 * b12 + 2 * a12 * b1 * b2 + b1 * a12 * a12 +
+ b2 * a12 * a12 - a1 * b12 * b12 - a2 * b12 * b12;
+ P[2] = b1 * b2 - a1 * a2 - 2 * a1 * b12 - 2 * a2 * b12 + 2 * a12 * b1 +
+ 2 * a12 * b2 + a12 * a12 - b12 * b12;
+ P[3] = b1 + b2 - 2 * b12 - a1 - a2 + 2 * a12;
// If P[3] equal to 0 we get ill conditionned data
return (P[3] != 0.0);
@@ -67,8 +70,9 @@ static bool Build_Minimal2Point_PolynomialFactor(
//
// [1] M. Brown and R. Hartley and D. Nister. Minimal Solutions for Panoramic
// Stitching. CVPR07.
-void F_FromCorrespondance_2points(const Mat &x1, const Mat &x2,
- vector<double> *fs) {
+void F_FromCorrespondance_2points(const Mat& x1,
+ const Mat& x2,
+ vector<double>* fs) {
// Build Polynomial factor to get squared focal value.
double P[4];
Build_Minimal2Point_PolynomialFactor(x1, x2, &P[0]);
@@ -79,8 +83,8 @@ void F_FromCorrespondance_2points(const Mat &x1, const Mat &x2,
//
double roots[3];
int num_roots = SolveCubicPolynomial(P, roots);
- for (int i = 0; i < num_roots; ++i) {
- if (roots[i] > 0.0) {
+ for (int i = 0; i < num_roots; ++i) {
+ if (roots[i] > 0.0) {
fs->push_back(sqrt(roots[i]));
}
}
@@ -92,17 +96,18 @@ void F_FromCorrespondance_2points(const Mat &x1, const Mat &x2,
// K. Arun,T. Huand and D. Blostein. Least-squares fitting of 2 3-D point
// sets. IEEE Transactions on Pattern Analysis and Machine Intelligence,
// 9:698-700, 1987.
-void GetR_FixedCameraCenter(const Mat &x1, const Mat &x2,
+void GetR_FixedCameraCenter(const Mat& x1,
+ const Mat& x2,
const double focal,
- Mat3 *R) {
+ Mat3* R) {
assert(3 == x1.rows());
assert(2 <= x1.cols());
assert(x1.rows() == x2.rows());
assert(x1.cols() == x2.cols());
// Build simplified K matrix
- Mat3 K(Mat3::Identity() * 1.0/focal);
- K(2, 2)= 1.0;
+ Mat3 K(Mat3::Identity() * 1.0 / focal);
+ K(2, 2) = 1.0;
// Build the correlation matrix; equation (22) in [1].
Mat3 C = Mat3::Zero();
@@ -115,9 +120,9 @@ void GetR_FixedCameraCenter(const Mat &x1, const Mat &x2,
// Solve for rotation. Equations (24) and (25) in [1].
Eigen::JacobiSVD<Mat> svd(C, Eigen::ComputeThinU | Eigen::ComputeThinV);
Mat3 scale = Mat3::Identity();
- scale(2, 2) = ((svd.matrixU() * svd.matrixV().transpose()).determinant() > 0.0)
- ? 1.0
- : -1.0;
+ scale(2, 2) =
+ ((svd.matrixU() * svd.matrixV().transpose()).determinant() > 0.0) ? 1.0
+ : -1.0;
(*R) = svd.matrixU() * scale * svd.matrixV().transpose();
}
diff --git a/intern/libmv/libmv/multiview/panography.h b/intern/libmv/libmv/multiview/panography.h
index 6e87bd71304..5860a34d1fd 100644
--- a/intern/libmv/libmv/multiview/panography.h
+++ b/intern/libmv/libmv/multiview/panography.h
@@ -22,9 +22,9 @@
#ifndef LIBMV_MULTIVIEW_PANOGRAPHY_H
#define LIBMV_MULTIVIEW_PANOGRAPHY_H
+#include "libmv/base/vector.h"
#include "libmv/numeric/numeric.h"
#include "libmv/numeric/poly.h"
-#include "libmv/base/vector.h"
namespace libmv {
@@ -53,8 +53,9 @@ namespace libmv {
// K = [0 f 0]
// [0 0 1]
//
-void F_FromCorrespondance_2points(const Mat &x1, const Mat &x2,
- vector<double> *fs);
+void F_FromCorrespondance_2points(const Mat& x1,
+ const Mat& x2,
+ vector<double>* fs);
// Compute the 3x3 rotation matrix that fits two 3D point clouds in the least
// square sense. The method is from:
@@ -90,9 +91,10 @@ void F_FromCorrespondance_2points(const Mat &x1, const Mat &x2,
//
// R = arg min || X2 - R * x1 ||
//
-void GetR_FixedCameraCenter(const Mat &x1, const Mat &x2,
+void GetR_FixedCameraCenter(const Mat& x1,
+ const Mat& x2,
const double focal,
- Mat3 *R);
+ Mat3* R);
} // namespace libmv
diff --git a/intern/libmv/libmv/multiview/panography_kernel.cc b/intern/libmv/libmv/multiview/panography_kernel.cc
index 8fdc9e79aed..e8ba648e352 100644
--- a/intern/libmv/libmv/multiview/panography_kernel.cc
+++ b/intern/libmv/libmv/multiview/panography_kernel.cc
@@ -25,7 +25,7 @@ namespace libmv {
namespace panography {
namespace kernel {
-void TwoPointSolver::Solve(const Mat &x1, const Mat &x2, vector<Mat3> *Hs) {
+void TwoPointSolver::Solve(const Mat& x1, const Mat& x2, vector<Mat3>* Hs) {
// Solve for the focal lengths.
vector<double> fs;
F_FromCorrespondance_2points(x1, x2, &fs);
@@ -34,7 +34,7 @@ void TwoPointSolver::Solve(const Mat &x1, const Mat &x2, vector<Mat3> *Hs) {
Mat x1h, x2h;
EuclideanToHomogeneous(x1, &x1h);
EuclideanToHomogeneous(x2, &x2h);
- for (int i = 0; i < fs.size(); ++i) {
+ for (int i = 0; i < fs.size(); ++i) {
Mat3 K1 = Mat3::Identity() * fs[i];
K1(2, 2) = 1.0;
diff --git a/intern/libmv/libmv/multiview/panography_kernel.h b/intern/libmv/libmv/multiview/panography_kernel.h
index a6adbd54b20..d50d3ea0789 100644
--- a/intern/libmv/libmv/multiview/panography_kernel.h
+++ b/intern/libmv/libmv/multiview/panography_kernel.h
@@ -23,9 +23,9 @@
#include "libmv/base/vector.h"
#include "libmv/multiview/conditioning.h"
+#include "libmv/multiview/homography_error.h"
#include "libmv/multiview/projection.h"
#include "libmv/multiview/two_view_kernel.h"
-#include "libmv/multiview/homography_error.h"
#include "libmv/numeric/numeric.h"
namespace libmv {
@@ -34,18 +34,18 @@ namespace kernel {
struct TwoPointSolver {
enum { MINIMUM_SAMPLES = 2 };
- static void Solve(const Mat &x1, const Mat &x2, vector<Mat3> *Hs);
+ static void Solve(const Mat& x1, const Mat& x2, vector<Mat3>* Hs);
};
-typedef two_view::kernel::Kernel<
- TwoPointSolver, homography::homography2D::AsymmetricError, Mat3>
- UnnormalizedKernel;
+typedef two_view::kernel::
+ Kernel<TwoPointSolver, homography::homography2D::AsymmetricError, Mat3>
+ UnnormalizedKernel;
typedef two_view::kernel::Kernel<
- two_view::kernel::NormalizedSolver<TwoPointSolver, UnnormalizerI>,
- homography::homography2D::AsymmetricError,
- Mat3>
- Kernel;
+ two_view::kernel::NormalizedSolver<TwoPointSolver, UnnormalizerI>,
+ homography::homography2D::AsymmetricError,
+ Mat3>
+ Kernel;
} // namespace kernel
} // namespace panography
diff --git a/intern/libmv/libmv/multiview/panography_test.cc b/intern/libmv/libmv/multiview/panography_test.cc
index 96d52acfc3c..a7cbd371d40 100644
--- a/intern/libmv/libmv/multiview/panography_test.cc
+++ b/intern/libmv/libmv/multiview/panography_test.cc
@@ -18,8 +18,8 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
-#include "libmv/logging/logging.h"
#include "libmv/multiview/panography.h"
+#include "libmv/logging/logging.h"
#include "libmv/multiview/panography_kernel.h"
#include "libmv/multiview/projection.h"
#include "libmv/numeric/numeric.h"
@@ -30,18 +30,16 @@ namespace {
TEST(Panography, PrintSomeSharedFocalEstimationValues) {
Mat x1(2, 2), x2(2, 2);
- x1<< 158, 78,
- 124, 113;
- x2<< 300, 214,
- 125, 114;
+ x1 << 158, 78, 124, 113;
+ x2 << 300, 214, 125, 114;
// Normalize data (set principal point 0,0 and image border to 1.0).
x1.block<1, 2>(0, 0) /= 320;
x1.block<1, 2>(1, 0) /= 240;
x2.block<1, 2>(0, 0) /= 320;
x2.block<1, 2>(1, 0) /= 240;
- x1+=Mat2::Constant(0.5);
- x2+=Mat2::Constant(0.5);
+ x1 += Mat2::Constant(0.5);
+ x2 += Mat2::Constant(0.5);
vector<double> fs;
F_FromCorrespondance_2points(x1, x2, &fs);
@@ -53,9 +51,11 @@ TEST(Panography, PrintSomeSharedFocalEstimationValues) {
TEST(Panography, GetR_FixedCameraCenterWithIdentity) {
Mat x1(3, 3);
+ // clang-format off
x1 << 0.5, 0.6, 0.7,
0.5, 0.5, 0.4,
10.0, 10.0, 10.0;
+ // clang-format on
Mat3 R;
GetR_FixedCameraCenter(x1, x1, 1.0, &R);
@@ -68,16 +68,20 @@ TEST(Panography, Homography_GetR_Test_PitchY30) {
int n = 3;
Mat x1(3, n);
+ // clang-format off
x1 << 0.5, 0.6, 0.7,
0.5, 0.5, 0.4,
10, 10, 10;
+ // clang-format on
Mat x2 = x1;
const double alpha = 30.0 * M_PI / 180.0;
Mat3 rotY;
+ // clang-format off
rotY << cos(alpha), 0, -sin(alpha),
0, 1, 0,
sin(alpha), 0, cos(alpha);
+ // clang-format on
for (int i = 0; i < n; ++i) {
x2.block<3, 1>(0, i) = rotY * x1.col(i);
@@ -101,17 +105,23 @@ TEST(Panography, Homography_GetR_Test_PitchY30) {
TEST(MinimalPanoramic, Real_Case_Kernel) {
const int n = 2;
Mat x1(2, n); // From image 0.jpg
+ // clang-format off
x1<< 158, 78,
124, 113;
+ // clang-format on
Mat x2(2, n); // From image 3.jpg
+ // clang-format off
x2<< 300, 214,
125, 114;
+ // clang-format on
Mat3 Ground_TruthHomography;
+ // clang-format off
Ground_TruthHomography<< 1, 0.02, 129.83,
-0.02, 1.012, 0.07823,
0, 0, 1;
+ // clang-format on
vector<Mat3> Hs;
@@ -130,7 +140,7 @@ TEST(MinimalPanoramic, Real_Case_Kernel) {
// Assert that residuals are small enough
for (int i = 0; i < n; ++i) {
Vec x1p = H * x1h.col(i);
- Vec residuals = x1p/x1p(2) - x2h.col(i);
+ Vec residuals = x1p / x1p(2) - x2h.col(i);
EXPECT_MATRIX_NEAR_ZERO(residuals, 1e-5);
}
}
diff --git a/intern/libmv/libmv/multiview/projection.cc b/intern/libmv/libmv/multiview/projection.cc
index f8bece3de68..001da89e127 100644
--- a/intern/libmv/libmv/multiview/projection.cc
+++ b/intern/libmv/libmv/multiview/projection.cc
@@ -23,13 +23,13 @@
namespace libmv {
-void P_From_KRt(const Mat3 &K, const Mat3 &R, const Vec3 &t, Mat34 *P) {
+void P_From_KRt(const Mat3& K, const Mat3& R, const Vec3& t, Mat34* P) {
P->block<3, 3>(0, 0) = R;
P->col(3) = t;
(*P) = K * (*P);
}
-void KRt_From_P(const Mat34 &P, Mat3 *Kp, Mat3 *Rp, Vec3 *tp) {
+void KRt_From_P(const Mat34& P, Mat3* Kp, Mat3* Rp, Vec3* tp) {
// Decompose using the RQ decomposition HZ A4.1.1 pag.579.
Mat3 K = P.block(0, 0, 3, 3);
@@ -44,9 +44,11 @@ void KRt_From_P(const Mat34 &P, Mat3 *Kp, Mat3 *Rp, Vec3 *tp) {
c /= l;
s /= l;
Mat3 Qx;
+ // clang-format off
Qx << 1, 0, 0,
0, c, -s,
0, s, c;
+ // clang-format on
K = K * Qx;
Q = Qx.transpose() * Q;
}
@@ -58,9 +60,11 @@ void KRt_From_P(const Mat34 &P, Mat3 *Kp, Mat3 *Rp, Vec3 *tp) {
c /= l;
s /= l;
Mat3 Qy;
+ // clang-format off
Qy << c, 0, s,
0, 1, 0,
-s, 0, c;
+ // clang-format on
K = K * Qy;
Q = Qy.transpose() * Q;
}
@@ -72,9 +76,11 @@ void KRt_From_P(const Mat34 &P, Mat3 *Kp, Mat3 *Rp, Vec3 *tp) {
c /= l;
s /= l;
Mat3 Qz;
+ // clang-format off
Qz << c, -s, 0,
s, c, 0,
0, 0, 1;
+ // clang-format on
K = K * Qz;
Q = Qz.transpose() * Q;
}
@@ -92,17 +98,21 @@ void KRt_From_P(const Mat34 &P, Mat3 *Kp, Mat3 *Rp, Vec3 *tp) {
}
if (K(1, 1) < 0) {
Mat3 S;
+ // clang-format off
S << 1, 0, 0,
0, -1, 0,
0, 0, 1;
+ // clang-format on
K = K * S;
R = S * R;
}
if (K(0, 0) < 0) {
Mat3 S;
+ // clang-format off
S << -1, 0, 0,
0, 1, 0,
0, 0, 1;
+ // clang-format on
K = K * S;
R = S * R;
}
@@ -122,26 +132,30 @@ void KRt_From_P(const Mat34 &P, Mat3 *Kp, Mat3 *Rp, Vec3 *tp) {
*tp = t;
}
-void ProjectionShiftPrincipalPoint(const Mat34 &P,
- const Vec2 &principal_point,
- const Vec2 &principal_point_new,
- Mat34 *P_new) {
+void ProjectionShiftPrincipalPoint(const Mat34& P,
+ const Vec2& principal_point,
+ const Vec2& principal_point_new,
+ Mat34* P_new) {
Mat3 T;
+ // clang-format off
T << 1, 0, principal_point_new(0) - principal_point(0),
0, 1, principal_point_new(1) - principal_point(1),
0, 0, 1;
+ // clang-format on
*P_new = T * P;
}
-void ProjectionChangeAspectRatio(const Mat34 &P,
- const Vec2 &principal_point,
+void ProjectionChangeAspectRatio(const Mat34& P,
+ const Vec2& principal_point,
double aspect_ratio,
double aspect_ratio_new,
- Mat34 *P_new) {
+ Mat34* P_new) {
Mat3 T;
+ // clang-format off
T << 1, 0, 0,
0, aspect_ratio_new / aspect_ratio, 0,
0, 0, 1;
+ // clang-format on
Mat34 P_temp;
ProjectionShiftPrincipalPoint(P, principal_point, Vec2(0, 0), &P_temp);
@@ -149,7 +163,7 @@ void ProjectionChangeAspectRatio(const Mat34 &P,
ProjectionShiftPrincipalPoint(P_temp, Vec2(0, 0), principal_point, P_new);
}
-void HomogeneousToEuclidean(const Mat &H, Mat *X) {
+void HomogeneousToEuclidean(const Mat& H, Mat* X) {
int d = H.rows() - 1;
int n = H.cols();
X->resize(d, n);
@@ -161,29 +175,29 @@ void HomogeneousToEuclidean(const Mat &H, Mat *X) {
}
}
-void HomogeneousToEuclidean(const Mat3X &h, Mat2X *e) {
+void HomogeneousToEuclidean(const Mat3X& h, Mat2X* e) {
e->resize(2, h.cols());
e->row(0) = h.row(0).array() / h.row(2).array();
e->row(1) = h.row(1).array() / h.row(2).array();
}
-void HomogeneousToEuclidean(const Mat4X &h, Mat3X *e) {
+void HomogeneousToEuclidean(const Mat4X& h, Mat3X* e) {
e->resize(3, h.cols());
e->row(0) = h.row(0).array() / h.row(3).array();
e->row(1) = h.row(1).array() / h.row(3).array();
e->row(2) = h.row(2).array() / h.row(3).array();
}
-void HomogeneousToEuclidean(const Vec3 &H, Vec2 *X) {
+void HomogeneousToEuclidean(const Vec3& H, Vec2* X) {
double w = H(2);
*X << H(0) / w, H(1) / w;
}
-void HomogeneousToEuclidean(const Vec4 &H, Vec3 *X) {
+void HomogeneousToEuclidean(const Vec4& H, Vec3* X) {
double w = H(3);
*X << H(0) / w, H(1) / w, H(2) / w;
}
-void EuclideanToHomogeneous(const Mat &X, Mat *H) {
+void EuclideanToHomogeneous(const Mat& X, Mat* H) {
int d = X.rows();
int n = X.cols();
H->resize(d + 1, n);
@@ -191,32 +205,32 @@ void EuclideanToHomogeneous(const Mat &X, Mat *H) {
H->row(d).setOnes();
}
-void EuclideanToHomogeneous(const Vec2 &X, Vec3 *H) {
+void EuclideanToHomogeneous(const Vec2& X, Vec3* H) {
*H << X(0), X(1), 1;
}
-void EuclideanToHomogeneous(const Vec3 &X, Vec4 *H) {
+void EuclideanToHomogeneous(const Vec3& X, Vec4* H) {
*H << X(0), X(1), X(2), 1;
}
// TODO(julien) Call conditioning.h/ApplyTransformationToPoints ?
-void EuclideanToNormalizedCamera(const Mat2X &x, const Mat3 &K, Mat2X *n) {
+void EuclideanToNormalizedCamera(const Mat2X& x, const Mat3& K, Mat2X* n) {
Mat3X x_image_h;
EuclideanToHomogeneous(x, &x_image_h);
Mat3X x_camera_h = K.inverse() * x_image_h;
HomogeneousToEuclidean(x_camera_h, n);
}
-void HomogeneousToNormalizedCamera(const Mat3X &x, const Mat3 &K, Mat2X *n) {
+void HomogeneousToNormalizedCamera(const Mat3X& x, const Mat3& K, Mat2X* n) {
Mat3X x_camera_h = K.inverse() * x;
HomogeneousToEuclidean(x_camera_h, n);
}
-double Depth(const Mat3 &R, const Vec3 &t, const Vec3 &X) {
- return (R*X)(2) + t(2);
+double Depth(const Mat3& R, const Vec3& t, const Vec3& X) {
+ return (R * X)(2) + t(2);
}
-double Depth(const Mat3 &R, const Vec3 &t, const Vec4 &X) {
+double Depth(const Mat3& R, const Vec3& t, const Vec4& X) {
Vec3 Xe = X.head<3>() / X(3);
return Depth(R, t, Xe);
}
diff --git a/intern/libmv/libmv/multiview/projection.h b/intern/libmv/libmv/multiview/projection.h
index 8f304f31ec6..ba8fc5d8303 100644
--- a/intern/libmv/libmv/multiview/projection.h
+++ b/intern/libmv/libmv/multiview/projection.h
@@ -25,108 +25,108 @@
namespace libmv {
-void P_From_KRt(const Mat3 &K, const Mat3 &R, const Vec3 &t, Mat34 *P);
-void KRt_From_P(const Mat34 &P, Mat3 *K, Mat3 *R, Vec3 *t);
+void P_From_KRt(const Mat3& K, const Mat3& R, const Vec3& t, Mat34* P);
+void KRt_From_P(const Mat34& P, Mat3* K, Mat3* R, Vec3* t);
// Applies a change of basis to the image coordinates of the projection matrix
// so that the principal point becomes principal_point_new.
-void ProjectionShiftPrincipalPoint(const Mat34 &P,
- const Vec2 &principal_point,
- const Vec2 &principal_point_new,
- Mat34 *P_new);
+void ProjectionShiftPrincipalPoint(const Mat34& P,
+ const Vec2& principal_point,
+ const Vec2& principal_point_new,
+ Mat34* P_new);
// Applies a change of basis to the image coordinates of the projection matrix
// so that the aspect ratio becomes aspect_ratio_new. This is done by
// stretching the y axis. The aspect ratio is defined as the quotient between
// the focal length of the y and the x axis.
-void ProjectionChangeAspectRatio(const Mat34 &P,
- const Vec2 &principal_point,
+void ProjectionChangeAspectRatio(const Mat34& P,
+ const Vec2& principal_point,
double aspect_ratio,
double aspect_ratio_new,
- Mat34 *P_new);
-
-void HomogeneousToEuclidean(const Mat &H, Mat *X);
-void HomogeneousToEuclidean(const Mat3X &h, Mat2X *e);
-void HomogeneousToEuclidean(const Mat4X &h, Mat3X *e);
-void HomogeneousToEuclidean(const Vec3 &H, Vec2 *X);
-void HomogeneousToEuclidean(const Vec4 &H, Vec3 *X);
-inline Vec2 HomogeneousToEuclidean(const Vec3 &h) {
+ Mat34* P_new);
+
+void HomogeneousToEuclidean(const Mat& H, Mat* X);
+void HomogeneousToEuclidean(const Mat3X& h, Mat2X* e);
+void HomogeneousToEuclidean(const Mat4X& h, Mat3X* e);
+void HomogeneousToEuclidean(const Vec3& H, Vec2* X);
+void HomogeneousToEuclidean(const Vec4& H, Vec3* X);
+inline Vec2 HomogeneousToEuclidean(const Vec3& h) {
return h.head<2>() / h(2);
}
-inline Vec3 HomogeneousToEuclidean(const Vec4 &h) {
+inline Vec3 HomogeneousToEuclidean(const Vec4& h) {
return h.head<3>() / h(3);
}
-inline Mat2X HomogeneousToEuclidean(const Mat3X &h) {
+inline Mat2X HomogeneousToEuclidean(const Mat3X& h) {
Mat2X e(2, h.cols());
e.row(0) = h.row(0).array() / h.row(2).array();
e.row(1) = h.row(1).array() / h.row(2).array();
return e;
}
-void EuclideanToHomogeneous(const Mat &X, Mat *H);
-inline Mat3X EuclideanToHomogeneous(const Mat2X &x) {
+void EuclideanToHomogeneous(const Mat& X, Mat* H);
+inline Mat3X EuclideanToHomogeneous(const Mat2X& x) {
Mat3X h(3, x.cols());
h.block(0, 0, 2, x.cols()) = x;
h.row(2).setOnes();
return h;
}
-inline void EuclideanToHomogeneous(const Mat2X &x, Mat3X *h) {
+inline void EuclideanToHomogeneous(const Mat2X& x, Mat3X* h) {
h->resize(3, x.cols());
h->block(0, 0, 2, x.cols()) = x;
h->row(2).setOnes();
}
-inline Mat4X EuclideanToHomogeneous(const Mat3X &x) {
+inline Mat4X EuclideanToHomogeneous(const Mat3X& x) {
Mat4X h(4, x.cols());
h.block(0, 0, 3, x.cols()) = x;
h.row(3).setOnes();
return h;
}
-inline void EuclideanToHomogeneous(const Mat3X &x, Mat4X *h) {
+inline void EuclideanToHomogeneous(const Mat3X& x, Mat4X* h) {
h->resize(4, x.cols());
h->block(0, 0, 3, x.cols()) = x;
h->row(3).setOnes();
}
-void EuclideanToHomogeneous(const Vec2 &X, Vec3 *H);
-void EuclideanToHomogeneous(const Vec3 &X, Vec4 *H);
-inline Vec3 EuclideanToHomogeneous(const Vec2 &x) {
+void EuclideanToHomogeneous(const Vec2& X, Vec3* H);
+void EuclideanToHomogeneous(const Vec3& X, Vec4* H);
+inline Vec3 EuclideanToHomogeneous(const Vec2& x) {
return Vec3(x(0), x(1), 1);
}
-inline Vec4 EuclideanToHomogeneous(const Vec3 &x) {
+inline Vec4 EuclideanToHomogeneous(const Vec3& x) {
return Vec4(x(0), x(1), x(2), 1);
}
// Conversion from image coordinates to normalized camera coordinates
-void EuclideanToNormalizedCamera(const Mat2X &x, const Mat3 &K, Mat2X *n);
-void HomogeneousToNormalizedCamera(const Mat3X &x, const Mat3 &K, Mat2X *n);
+void EuclideanToNormalizedCamera(const Mat2X& x, const Mat3& K, Mat2X* n);
+void HomogeneousToNormalizedCamera(const Mat3X& x, const Mat3& K, Mat2X* n);
-inline Vec2 Project(const Mat34 &P, const Vec3 &X) {
+inline Vec2 Project(const Mat34& P, const Vec3& X) {
Vec4 HX;
HX << X, 1.0;
Vec3 hx = P * HX;
return hx.head<2>() / hx(2);
}
-inline void Project(const Mat34 &P, const Vec4 &X, Vec3 *x) {
+inline void Project(const Mat34& P, const Vec4& X, Vec3* x) {
*x = P * X;
}
-inline void Project(const Mat34 &P, const Vec4 &X, Vec2 *x) {
+inline void Project(const Mat34& P, const Vec4& X, Vec2* x) {
Vec3 hx = P * X;
*x = hx.head<2>() / hx(2);
}
-inline void Project(const Mat34 &P, const Vec3 &X, Vec3 *x) {
+inline void Project(const Mat34& P, const Vec3& X, Vec3* x) {
Vec4 HX;
HX << X, 1.0;
Project(P, HX, x);
}
-inline void Project(const Mat34 &P, const Vec3 &X, Vec2 *x) {
+inline void Project(const Mat34& P, const Vec3& X, Vec2* x) {
Vec3 hx;
Project(P, X, &hx);
*x = hx.head<2>() / hx(2);
}
-inline void Project(const Mat34 &P, const Mat4X &X, Mat2X *x) {
+inline void Project(const Mat34& P, const Mat4X& X, Mat2X* x) {
x->resize(2, X.cols());
for (int c = 0; c < X.cols(); ++c) {
Vec3 hx = P * X.col(c);
@@ -134,13 +134,13 @@ inline void Project(const Mat34 &P, const Mat4X &X, Mat2X *x) {
}
}
-inline Mat2X Project(const Mat34 &P, const Mat4X &X) {
+inline Mat2X Project(const Mat34& P, const Mat4X& X) {
Mat2X x;
Project(P, X, &x);
return x;
}
-inline void Project(const Mat34 &P, const Mat3X &X, Mat2X *x) {
+inline void Project(const Mat34& P, const Mat3X& X, Mat2X* x) {
x->resize(2, X.cols());
for (int c = 0; c < X.cols(); ++c) {
Vec4 HX;
@@ -150,7 +150,7 @@ inline void Project(const Mat34 &P, const Mat3X &X, Mat2X *x) {
}
}
-inline void Project(const Mat34 &P, const Mat3X &X, const Vecu &ids, Mat2X *x) {
+inline void Project(const Mat34& P, const Mat3X& X, const Vecu& ids, Mat2X* x) {
x->resize(2, ids.size());
Vec4 HX;
Vec3 hx;
@@ -161,26 +161,26 @@ inline void Project(const Mat34 &P, const Mat3X &X, const Vecu &ids, Mat2X *x) {
}
}
-inline Mat2X Project(const Mat34 &P, const Mat3X &X) {
+inline Mat2X Project(const Mat34& P, const Mat3X& X) {
Mat2X x(2, X.cols());
Project(P, X, &x);
return x;
}
-inline Mat2X Project(const Mat34 &P, const Mat3X &X, const Vecu &ids) {
+inline Mat2X Project(const Mat34& P, const Mat3X& X, const Vecu& ids) {
Mat2X x(2, ids.size());
Project(P, X, ids, &x);
return x;
}
-double Depth(const Mat3 &R, const Vec3 &t, const Vec3 &X);
-double Depth(const Mat3 &R, const Vec3 &t, const Vec4 &X);
+double Depth(const Mat3& R, const Vec3& t, const Vec3& X);
+double Depth(const Mat3& R, const Vec3& t, const Vec4& X);
/**
-* Returns true if the homogenious 3D point X is in front of
-* the camera P.
-*/
-inline bool isInFrontOfCamera(const Mat34 &P, const Vec4 &X) {
+ * Returns true if the homogenious 3D point X is in front of
+ * the camera P.
+ */
+inline bool isInFrontOfCamera(const Mat34& P, const Vec4& X) {
double condition_1 = P.row(2).dot(X) * X[3];
double condition_2 = X[2] * X[3];
if (condition_1 > 0 && condition_2 > 0)
@@ -189,37 +189,37 @@ inline bool isInFrontOfCamera(const Mat34 &P, const Vec4 &X) {
return false;
}
-inline bool isInFrontOfCamera(const Mat34 &P, const Vec3 &X) {
+inline bool isInFrontOfCamera(const Mat34& P, const Vec3& X) {
Vec4 X_homo;
X_homo.segment<3>(0) = X;
X_homo(3) = 1;
- return isInFrontOfCamera( P, X_homo);
+ return isInFrontOfCamera(P, X_homo);
}
/**
-* Transforms a 2D point from pixel image coordinates to a 2D point in
-* normalized image coordinates.
-*/
-inline Vec2 ImageToNormImageCoordinates(Mat3 &Kinverse, Vec2 &x) {
- Vec3 x_h = Kinverse*EuclideanToHomogeneous(x);
- return HomogeneousToEuclidean( x_h );
+ * Transforms a 2D point from pixel image coordinates to a 2D point in
+ * normalized image coordinates.
+ */
+inline Vec2 ImageToNormImageCoordinates(Mat3& Kinverse, Vec2& x) {
+ Vec3 x_h = Kinverse * EuclideanToHomogeneous(x);
+ return HomogeneousToEuclidean(x_h);
}
/// Estimates the root mean square error (2D)
-inline double RootMeanSquareError(const Mat2X &x_image,
- const Mat4X &X_world,
- const Mat34 &P) {
+inline double RootMeanSquareError(const Mat2X& x_image,
+ const Mat4X& X_world,
+ const Mat34& P) {
size_t num_points = x_image.cols();
Mat2X dx = Project(P, X_world) - x_image;
return dx.norm() / num_points;
}
/// Estimates the root mean square error (2D)
-inline double RootMeanSquareError(const Mat2X &x_image,
- const Mat3X &X_world,
- const Mat3 &K,
- const Mat3 &R,
- const Vec3 &t) {
+inline double RootMeanSquareError(const Mat2X& x_image,
+ const Mat3X& X_world,
+ const Mat3& K,
+ const Mat3& R,
+ const Vec3& t) {
Mat34 P;
P_From_KRt(K, R, t, &P);
size_t num_points = x_image.cols();
diff --git a/intern/libmv/libmv/multiview/projection_test.cc b/intern/libmv/libmv/multiview/projection_test.cc
index 40e766bfae7..683edefa99c 100644
--- a/intern/libmv/libmv/multiview/projection_test.cc
+++ b/intern/libmv/libmv/multiview/projection_test.cc
@@ -29,14 +29,18 @@ using namespace libmv;
TEST(Projection, P_From_KRt) {
Mat3 K, Kp;
+ // clang-format off
K << 10, 1, 30,
0, 20, 40,
0, 0, 1;
+ // clang-format on
Mat3 R, Rp;
+ // clang-format off
R << 1, 0, 0,
0, 1, 0,
0, 0, 1;
+ // clang-format on
Vec3 t, tp;
t << 1, 2, 3;
@@ -62,16 +66,18 @@ Vec4 GetRandomPoint() {
TEST(Projection, isInFrontOfCamera) {
Mat34 P;
+ // clang-format off
P << 1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0;
+ // clang-format on
Vec4 X_front = GetRandomPoint();
Vec4 X_back = GetRandomPoint();
- X_front(2) = 10; /* Any point in the positive Z direction
- * where Z > 1 is in front of the camera. */
- X_back(2) = -10; /* Any point in the negative Z direction
- * is behind the camera. */
+ X_front(2) = 10; /* Any point in the positive Z direction
+ * where Z > 1 is in front of the camera. */
+ X_back(2) = -10; /* Any point in the negative Z direction
+ * is behind the camera. */
bool res_front = isInFrontOfCamera(P, X_front);
bool res_back = isInFrontOfCamera(P, X_back);
@@ -82,12 +88,14 @@ TEST(Projection, isInFrontOfCamera) {
TEST(AutoCalibration, ProjectionShiftPrincipalPoint) {
Mat34 P1, P2;
+ // clang-format off
P1 << 1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0;
P2 << 1, 0, 3, 0,
0, 1, 4, 0,
0, 0, 1, 0;
+ // clang-format on
Mat34 P1_computed, P2_computed;
ProjectionShiftPrincipalPoint(P1, Vec2(0, 0), Vec2(3, 4), &P2_computed);
ProjectionShiftPrincipalPoint(P2, Vec2(3, 4), Vec2(0, 0), &P1_computed);
@@ -98,12 +106,14 @@ TEST(AutoCalibration, ProjectionShiftPrincipalPoint) {
TEST(AutoCalibration, ProjectionChangeAspectRatio) {
Mat34 P1, P2;
+ // clang-format off
P1 << 1, 0, 3, 0,
0, 1, 4, 0,
0, 0, 1, 0;
P2 << 1, 0, 3, 0,
0, 2, 4, 0,
0, 0, 1, 0;
+ // clang-format on
Mat34 P1_computed, P2_computed;
ProjectionChangeAspectRatio(P1, Vec2(3, 4), 1, 2, &P2_computed);
ProjectionChangeAspectRatio(P2, Vec2(3, 4), 2, 1, &P1_computed);
diff --git a/intern/libmv/libmv/multiview/resection.h b/intern/libmv/libmv/multiview/resection.h
index c142d6deeb2..e543827c932 100644
--- a/intern/libmv/libmv/multiview/resection.h
+++ b/intern/libmv/libmv/multiview/resection.h
@@ -33,23 +33,23 @@ namespace libmv {
namespace resection {
// x's are 2D image coordinates, (x,y,1), and X's are homogeneous four vectors.
-template<typename T>
-void Resection(const Matrix<T, 2, Dynamic> &x,
- const Matrix<T, 4, Dynamic> &X,
- Matrix<T, 3, 4> *P) {
+template <typename T>
+void Resection(const Matrix<T, 2, Dynamic>& x,
+ const Matrix<T, 4, Dynamic>& X,
+ Matrix<T, 3, 4>* P) {
int N = x.cols();
assert(X.cols() == N);
- Matrix<T, Dynamic, 12> design(2*N, 12);
+ Matrix<T, Dynamic, 12> design(2 * N, 12);
design.setZero();
for (int i = 0; i < N; i++) {
T xi = x(0, i);
T yi = x(1, i);
// See equation (7.2) on page 179 of H&Z.
- design.template block<1, 4>(2*i, 4) = -X.col(i).transpose();
- design.template block<1, 4>(2*i, 8) = yi*X.col(i).transpose();
- design.template block<1, 4>(2*i + 1, 0) = X.col(i).transpose();
- design.template block<1, 4>(2*i + 1, 8) = -xi*X.col(i).transpose();
+ design.template block<1, 4>(2 * i, 4) = -X.col(i).transpose();
+ design.template block<1, 4>(2 * i, 8) = yi * X.col(i).transpose();
+ design.template block<1, 4>(2 * i + 1, 0) = X.col(i).transpose();
+ design.template block<1, 4>(2 * i + 1, 8) = -xi * X.col(i).transpose();
}
Matrix<T, 12, 1> p;
Nullspace(&design, &p);
diff --git a/intern/libmv/libmv/multiview/resection_test.cc b/intern/libmv/libmv/multiview/resection_test.cc
index 368e2281cfa..fb075d02d69 100644
--- a/intern/libmv/libmv/multiview/resection_test.cc
+++ b/intern/libmv/libmv/multiview/resection_test.cc
@@ -20,12 +20,12 @@
#include <iostream>
+#include "libmv/logging/logging.h"
#include "libmv/multiview/projection.h"
#include "libmv/multiview/resection.h"
#include "libmv/multiview/test_data_sets.h"
#include "libmv/numeric/numeric.h"
#include "testing/testing.h"
-#include "libmv/logging/logging.h"
namespace {
@@ -40,15 +40,15 @@ TEST(Resection, ThreeViews) {
Mat4X X(4, npoints);
X.block(0, 0, 3, npoints) = d.X;
X.row(3).setOnes();
- const Mat2X &x = d.x[i];
+ const Mat2X& x = d.x[i];
Mat34 P;
Resection(x, X, &P);
Mat34 P_expected = d.P(i);
// Because the P matrices are homogeneous, it is necessary to be tricky
// about the scale factor to make them match.
- P_expected *= 1/P_expected.array().abs().sum();
- P *= 1/P.array().abs().sum();
+ P_expected *= 1 / P_expected.array().abs().sum();
+ P *= 1 / P.array().abs().sum();
if (!((P(0, 0) > 0 && P_expected(0, 0) > 0) ||
(P(0, 0) < 0 && P_expected(0, 0) < 0))) {
P *= -1;
diff --git a/intern/libmv/libmv/multiview/test_data_sets.cc b/intern/libmv/libmv/multiview/test_data_sets.cc
index 110bde6f762..a927c166d19 100644
--- a/intern/libmv/libmv/multiview/test_data_sets.cc
+++ b/intern/libmv/libmv/multiview/test_data_sets.cc
@@ -22,24 +22,28 @@
#include <cmath>
-#include "libmv/numeric/numeric.h"
-#include "libmv/multiview/projection.h"
#include "libmv/multiview/fundamental.h"
+#include "libmv/multiview/projection.h"
+#include "libmv/numeric/numeric.h"
namespace libmv {
TwoViewDataSet TwoRealisticCameras(bool same_K) {
TwoViewDataSet d;
+ // clang-format off
d.K1 << 320, 0, 160,
0, 320, 120,
0, 0, 1;
+ // clang-format on
if (same_K) {
d.K2 = d.K1;
} else {
+ // clang-format off
d.K2 << 360, 0, 170,
0, 360, 110,
0, 0, 1;
+ // clang-format on
}
d.R1 = RotationAroundZ(-0.1);
d.R2 = RotationAroundX(-0.1);
@@ -59,10 +63,8 @@ TwoViewDataSet TwoRealisticCameras(bool same_K) {
return d;
}
-nViewDatasetConfigator::nViewDatasetConfigator(int fx , int fy,
- int cx, int cy,
- double distance,
- double jitter_amount) {
+nViewDatasetConfigator::nViewDatasetConfigator(
+ int fx, int fy, int cx, int cy, double distance, double jitter_amount) {
_fx = fx;
_fy = fy;
_cx = cx;
@@ -71,7 +73,8 @@ nViewDatasetConfigator::nViewDatasetConfigator(int fx , int fy,
_jitter_amount = jitter_amount;
}
-NViewDataSet NRealisticCamerasFull(int nviews, int npoints,
+NViewDataSet NRealisticCamerasFull(int nviews,
+ int npoints,
const nViewDatasetConfigator config) {
NViewDataSet d;
d.n = nviews;
@@ -102,9 +105,11 @@ NViewDataSet NRealisticCamerasFull(int nviews, int npoints,
jitter *= config._jitter_amount / camera_center.norm();
lookdir = -camera_center + jitter;
+ // clang-format off
d.K[i] << config._fx, 0, config._cx,
0, config._fy, config._cy,
0, 0, 1;
+ // clang-format on
d.R[i] = LookAt(lookdir);
d.t[i] = -d.R[i] * camera_center;
d.x[i] = Project(d.P(i), d.X);
@@ -113,9 +118,10 @@ NViewDataSet NRealisticCamerasFull(int nviews, int npoints,
return d;
}
-
-NViewDataSet NRealisticCamerasSparse(int nviews, int npoints,
- float view_ratio, unsigned min_projections,
+NViewDataSet NRealisticCamerasSparse(int nviews,
+ int npoints,
+ float view_ratio,
+ unsigned min_projections,
const nViewDatasetConfigator config) {
assert(view_ratio <= 1.0);
assert(view_ratio > 0.0);
@@ -137,7 +143,7 @@ NViewDataSet NRealisticCamerasSparse(int nviews, int npoints,
visibility.setZero();
Mat randoms(nviews, npoints);
randoms.setRandom();
- randoms = (randoms.array() + 1)/2.0;
+ randoms = (randoms.array() + 1) / 2.0;
unsigned num_visibles = 0;
for (size_t i = 0; i < nviews; ++i) {
num_visibles = 0;
@@ -174,15 +180,17 @@ NViewDataSet NRealisticCamerasSparse(int nviews, int npoints,
jitter *= config._jitter_amount / camera_center.norm();
lookdir = -camera_center + jitter;
+ // clang-format off
d.K[i] << config._fx, 0, config._cx,
0, config._fy, config._cy,
0, 0, 1;
+ // clang-format on
d.R[i] = LookAt(lookdir);
d.t[i] = -d.R[i] * camera_center;
j_visible = 0;
for (size_t j = 0; j < npoints; j++) {
if (visibility(i, j)) {
- X = d.X.col(j);
+ X = d.X.col(j);
d.x[i].col(j_visible) = Project(d.P(i), X);
d.x_ids[i][j_visible] = j;
j_visible++;
@@ -192,5 +200,4 @@ NViewDataSet NRealisticCamerasSparse(int nviews, int npoints,
return d;
}
-
} // namespace libmv
diff --git a/intern/libmv/libmv/multiview/test_data_sets.h b/intern/libmv/libmv/multiview/test_data_sets.h
index cf01663ca02..0c8785728bd 100644
--- a/intern/libmv/libmv/multiview/test_data_sets.h
+++ b/intern/libmv/libmv/multiview/test_data_sets.h
@@ -34,8 +34,8 @@ struct TwoViewDataSet {
Vec3 t1, t2; // Translation.
Mat34 P1, P2; // Projection matrix, P = K(R|t)
Mat3 F; // Fundamental matrix.
- Mat3X X; // 3D points.
- Mat2X x1, x2; // Projected points.
+ Mat3X X; // 3D points.
+ Mat2X x1, x2; // Projected points.
};
// Two cameras at (-1,-1,-10) and (2,1,-10) looking approximately towards z+.
@@ -45,13 +45,13 @@ TwoViewDataSet TwoRealisticCameras(bool same_K = false);
// and the other reconstruction data types is that all points are seen by all
// cameras.
struct NViewDataSet {
- vector<Mat3> K; // Internal parameters (fx, fy, etc).
- vector<Mat3> R; // Rotation.
- vector<Vec3> t; // Translation.
- vector<Vec3> C; // Camera centers.
- Mat3X X; // 3D points.
- vector<Mat2X> x; // Projected points; may have noise added.
- vector<Vecu> x_ids; // Indexes of points corresponding to the projections
+ vector<Mat3> K; // Internal parameters (fx, fy, etc).
+ vector<Mat3> R; // Rotation.
+ vector<Vec3> t; // Translation.
+ vector<Vec3> C; // Camera centers.
+ Mat3X X; // 3D points.
+ vector<Mat2X> x; // Projected points; may have noise added.
+ vector<Vecu> x_ids; // Indexes of points corresponding to the projections
int n; // Actual number of cameras.
@@ -83,22 +83,26 @@ struct nViewDatasetConfigator {
double _dist;
double _jitter_amount;
- nViewDatasetConfigator(int fx = 1000, int fy = 1000,
- int cx = 500, int cy = 500,
+ nViewDatasetConfigator(int fx = 1000,
+ int fy = 1000,
+ int cx = 500,
+ int cy = 500,
double distance = 1.5,
double jitter_amount = 0.01);
};
-NViewDataSet NRealisticCamerasFull(int nviews, int npoints,
- const nViewDatasetConfigator
- config = nViewDatasetConfigator());
+NViewDataSet NRealisticCamerasFull(
+ int nviews,
+ int npoints,
+ const nViewDatasetConfigator config = nViewDatasetConfigator());
// Generates sparse projections (not all points are projected)
-NViewDataSet NRealisticCamerasSparse(int nviews, int npoints,
- float view_ratio = 0.6,
- unsigned min_projections = 3,
- const nViewDatasetConfigator
- config = nViewDatasetConfigator());
+NViewDataSet NRealisticCamerasSparse(
+ int nviews,
+ int npoints,
+ float view_ratio = 0.6,
+ unsigned min_projections = 3,
+ const nViewDatasetConfigator config = nViewDatasetConfigator());
} // namespace libmv
diff --git a/intern/libmv/libmv/multiview/triangulation.cc b/intern/libmv/libmv/multiview/triangulation.cc
index 4d146c8f21b..568625de19d 100644
--- a/intern/libmv/libmv/multiview/triangulation.cc
+++ b/intern/libmv/libmv/multiview/triangulation.cc
@@ -20,15 +20,17 @@
#include "libmv/multiview/triangulation.h"
-#include "libmv/numeric/numeric.h"
#include "libmv/multiview/projection.h"
+#include "libmv/numeric/numeric.h"
namespace libmv {
// HZ 12.2 pag.312
-void TriangulateDLT(const Mat34 &P1, const Vec2 &x1,
- const Mat34 &P2, const Vec2 &x2,
- Vec4 *X_homogeneous) {
+void TriangulateDLT(const Mat34& P1,
+ const Vec2& x1,
+ const Mat34& P2,
+ const Vec2& x2,
+ Vec4* X_homogeneous) {
Mat4 design;
for (int i = 0; i < 4; ++i) {
design(0, i) = x1(0) * P1(2, i) - P1(0, i);
@@ -39,9 +41,11 @@ void TriangulateDLT(const Mat34 &P1, const Vec2 &x1,
Nullspace(&design, X_homogeneous);
}
-void TriangulateDLT(const Mat34 &P1, const Vec2 &x1,
- const Mat34 &P2, const Vec2 &x2,
- Vec3 *X_euclidean) {
+void TriangulateDLT(const Mat34& P1,
+ const Vec2& x1,
+ const Mat34& P2,
+ const Vec2& x2,
+ Vec3* X_euclidean) {
Vec4 X_homogeneous;
TriangulateDLT(P1, x1, P2, x2, &X_homogeneous);
HomogeneousToEuclidean(X_homogeneous, X_euclidean);
diff --git a/intern/libmv/libmv/multiview/triangulation.h b/intern/libmv/libmv/multiview/triangulation.h
index be878890242..c538433eb8b 100644
--- a/intern/libmv/libmv/multiview/triangulation.h
+++ b/intern/libmv/libmv/multiview/triangulation.h
@@ -25,13 +25,17 @@
namespace libmv {
-void TriangulateDLT(const Mat34 &P1, const Vec2 &x1,
- const Mat34 &P2, const Vec2 &x2,
- Vec4 *X_homogeneous);
+void TriangulateDLT(const Mat34& P1,
+ const Vec2& x1,
+ const Mat34& P2,
+ const Vec2& x2,
+ Vec4* X_homogeneous);
-void TriangulateDLT(const Mat34 &P1, const Vec2 &x1,
- const Mat34 &P2, const Vec2 &x2,
- Vec3 *X_euclidean);
+void TriangulateDLT(const Mat34& P1,
+ const Vec2& x1,
+ const Mat34& P2,
+ const Vec2& x2,
+ Vec3* X_euclidean);
} // namespace libmv
diff --git a/intern/libmv/libmv/multiview/triangulation_test.cc b/intern/libmv/libmv/multiview/triangulation_test.cc
index 66d1ee25a62..54d20f4ee17 100644
--- a/intern/libmv/libmv/multiview/triangulation_test.cc
+++ b/intern/libmv/libmv/multiview/triangulation_test.cc
@@ -20,10 +20,10 @@
#include <iostream>
-#include "libmv/multiview/triangulation.h"
#include "libmv/multiview/fundamental.h"
#include "libmv/multiview/projection.h"
#include "libmv/multiview/test_data_sets.h"
+#include "libmv/multiview/triangulation.h"
#include "libmv/numeric/numeric.h"
#include "testing/testing.h"
diff --git a/intern/libmv/libmv/multiview/two_view_kernel.h b/intern/libmv/libmv/multiview/two_view_kernel.h
index 7af0ed5ddab..4df99183ee0 100644
--- a/intern/libmv/libmv/multiview/two_view_kernel.h
+++ b/intern/libmv/libmv/multiview/two_view_kernel.h
@@ -30,10 +30,10 @@ namespace libmv {
namespace two_view {
namespace kernel {
-template<typename Solver, typename Unnormalizer>
+template <typename Solver, typename Unnormalizer>
struct NormalizedSolver {
enum { MINIMUM_SAMPLES = Solver::MINIMUM_SAMPLES };
- static void Solve(const Mat &x1, const Mat &x2, vector<Mat3> *models) {
+ static void Solve(const Mat& x1, const Mat& x2, vector<Mat3>* models) {
assert(2 == x1.rows());
assert(MINIMUM_SAMPLES <= x1.cols());
assert(x1.rows() == x2.rows());
@@ -53,10 +53,10 @@ struct NormalizedSolver {
}
};
-template<typename Solver, typename Unnormalizer>
+template <typename Solver, typename Unnormalizer>
struct IsotropicNormalizedSolver {
enum { MINIMUM_SAMPLES = Solver::MINIMUM_SAMPLES };
- static void Solve(const Mat &x1, const Mat &x2, vector<Mat3> *models) {
+ static void Solve(const Mat& x1, const Mat& x2, vector<Mat3>* models) {
assert(2 == x1.rows());
assert(MINIMUM_SAMPLES <= x1.cols());
assert(x1.rows() == x2.rows());
@@ -99,35 +99,32 @@ struct IsotropicNormalizedSolver {
//
// The fit routine must not clear existing entries in the vector of models; it
// should append new solutions to the end.
-template<typename SolverArg,
- typename ErrorArg,
- typename ModelArg = Mat3>
+template <typename SolverArg, typename ErrorArg, typename ModelArg = Mat3>
class Kernel {
public:
- Kernel(const Mat &x1, const Mat &x2) : x1_(x1), x2_(x2) {}
+ Kernel(const Mat& x1, const Mat& x2) : x1_(x1), x2_(x2) {}
typedef SolverArg Solver;
- typedef ModelArg Model;
+ typedef ModelArg Model;
enum { MINIMUM_SAMPLES = Solver::MINIMUM_SAMPLES };
- void Fit(const vector<int> &samples, vector<Model> *models) const {
+ void Fit(const vector<int>& samples, vector<Model>* models) const {
Mat x1 = ExtractColumns(x1_, samples);
Mat x2 = ExtractColumns(x2_, samples);
Solver::Solve(x1, x2, models);
}
- double Error(int sample, const Model &model) const {
+ double Error(int sample, const Model& model) const {
return ErrorArg::Error(model,
static_cast<Vec>(x1_.col(sample)),
static_cast<Vec>(x2_.col(sample)));
}
- int NumSamples() const {
- return x1_.cols();
- }
- static void Solve(const Mat &x1, const Mat &x2, vector<Model> *models) {
+ int NumSamples() const { return x1_.cols(); }
+ static void Solve(const Mat& x1, const Mat& x2, vector<Model>* models) {
// By offering this, Kernel types can be passed to templates.
Solver::Solve(x1, x2, models);
}
+
protected:
- const Mat &x1_;
- const Mat &x2_;
+ const Mat& x1_;
+ const Mat& x2_;
};
} // namespace kernel
diff --git a/intern/libmv/libmv/numeric/dogleg.h b/intern/libmv/libmv/numeric/dogleg.h
index bf6f996ddaf..62abfbdcd4b 100644
--- a/intern/libmv/libmv/numeric/dogleg.h
+++ b/intern/libmv/libmv/numeric/dogleg.h
@@ -32,18 +32,18 @@
#include <cmath>
#include <cstdio>
-#include "libmv/numeric/numeric.h"
-#include "libmv/numeric/function_derivative.h"
#include "libmv/logging/logging.h"
+#include "libmv/numeric/function_derivative.h"
+#include "libmv/numeric/numeric.h"
namespace libmv {
-template<typename Function,
- typename Jacobian = NumericJacobian<Function>,
- typename Solver = Eigen::PartialPivLU<
- Matrix<typename Function::FMatrixType::RealScalar,
- Function::XMatrixType::RowsAtCompileTime,
- Function::XMatrixType::RowsAtCompileTime> > >
+template <typename Function,
+ typename Jacobian = NumericJacobian<Function>,
+ typename Solver = Eigen::PartialPivLU<
+ Matrix<typename Function::FMatrixType::RealScalar,
+ Function::XMatrixType::RowsAtCompileTime,
+ Function::XMatrixType::RowsAtCompileTime>>>
class Dogleg {
public:
typedef typename Function::XMatrixType::RealScalar Scalar;
@@ -51,10 +51,12 @@ class Dogleg {
typedef typename Function::XMatrixType Parameters;
typedef Matrix<typename Function::FMatrixType::RealScalar,
Function::FMatrixType::RowsAtCompileTime,
- Function::XMatrixType::RowsAtCompileTime> JMatrixType;
+ Function::XMatrixType::RowsAtCompileTime>
+ JMatrixType;
typedef Matrix<typename JMatrixType::RealScalar,
JMatrixType::ColsAtCompileTime,
- JMatrixType::ColsAtCompileTime> AMatrixType;
+ JMatrixType::ColsAtCompileTime>
+ AMatrixType;
enum Status {
RUNNING,
@@ -71,34 +73,38 @@ class Dogleg {
STEEPEST_DESCENT,
};
- Dogleg(const Function &f)
- : f_(f), df_(f) {}
+ Dogleg(const Function& f) : f_(f), df_(f) {}
struct SolverParameters {
SolverParameters()
- : gradient_threshold(1e-16),
- relative_step_threshold(1e-16),
- error_threshold(1e-16),
- initial_trust_radius(1e0),
- max_iterations(500) {}
+ : gradient_threshold(1e-16),
+ relative_step_threshold(1e-16),
+ error_threshold(1e-16),
+ initial_trust_radius(1e0),
+ max_iterations(500) {}
Scalar gradient_threshold; // eps > max(J'*f(x))
Scalar relative_step_threshold; // eps > ||dx|| / ||x||
Scalar error_threshold; // eps > ||f(x)||
Scalar initial_trust_radius; // Initial u for solving normal equations.
- int max_iterations; // Maximum number of solver iterations.
+ int max_iterations; // Maximum number of solver iterations.
};
struct Results {
Scalar error_magnitude; // ||f(x)||
Scalar gradient_magnitude; // ||J'f(x)||
- int iterations;
+ int iterations;
Status status;
};
- Status Update(const Parameters &x, const SolverParameters &params,
- JMatrixType *J, AMatrixType *A, FVec *error, Parameters *g) {
+ Status Update(const Parameters& x,
+ const SolverParameters& params,
+ JMatrixType* J,
+ AMatrixType* A,
+ FVec* error,
+ Parameters* g) {
*J = df_(x);
- // TODO(keir): In the case of m = n, avoid computing A and just do J^-1 directly.
+ // TODO(keir): In the case of m = n, avoid computing A and just do J^-1
+ // directly.
*A = (*J).transpose() * (*J);
*error = f_(x);
*g = (*J).transpose() * *error;
@@ -110,12 +116,12 @@ class Dogleg {
return RUNNING;
}
- Step SolveDoglegDirection(const Parameters &dx_sd,
- const Parameters &dx_gn,
+ Step SolveDoglegDirection(const Parameters& dx_sd,
+ const Parameters& dx_gn,
Scalar radius,
Scalar alpha,
- Parameters *dx_dl,
- Scalar *beta) {
+ Parameters* dx_dl,
+ Scalar* beta) {
Parameters a, b_minus_a;
// Solve for Dogleg step dx_dl.
if (dx_gn.norm() < radius) {
@@ -128,30 +134,29 @@ class Dogleg {
} else {
Parameters a = alpha * dx_sd;
- const Parameters &b = dx_gn;
+ const Parameters& b = dx_gn;
b_minus_a = a - b;
Scalar Mbma2 = b_minus_a.squaredNorm();
Scalar Ma2 = a.squaredNorm();
Scalar c = a.dot(b_minus_a);
- Scalar radius2 = radius*radius;
+ Scalar radius2 = radius * radius;
if (c <= 0) {
- *beta = (-c + sqrt(c*c + Mbma2*(radius2 - Ma2)))/(Mbma2);
+ *beta = (-c + sqrt(c * c + Mbma2 * (radius2 - Ma2))) / (Mbma2);
} else {
- *beta = (radius2 - Ma2) /
- (c + sqrt(c*c + Mbma2*(radius2 - Ma2)));
+ *beta = (radius2 - Ma2) / (c + sqrt(c * c + Mbma2 * (radius2 - Ma2)));
}
- *dx_dl = alpha * dx_sd + (*beta) * (dx_gn - alpha*dx_sd);
+ *dx_dl = alpha * dx_sd + (*beta) * (dx_gn - alpha * dx_sd);
return DOGLEG;
}
}
- Results minimize(Parameters *x_and_min) {
+ Results minimize(Parameters* x_and_min) {
SolverParameters params;
return minimize(params, x_and_min);
}
- Results minimize(const SolverParameters &params, Parameters *x_and_min) {
- Parameters &x = *x_and_min;
+ Results minimize(const SolverParameters& params, Parameters* x_and_min) {
+ Parameters& x = *x_and_min;
JMatrixType J;
AMatrixType A;
FVec error;
@@ -167,18 +172,21 @@ class Dogleg {
Parameters dx_sd; // Steepest descent step.
Parameters dx_dl; // Dogleg step.
Parameters dx_gn; // Gauss-Newton step.
- printf("iteration ||f(x)|| max(g) radius\n");
+ printf("iteration ||f(x)|| max(g) radius\n");
int i = 0;
for (; results.status == RUNNING && i < params.max_iterations; ++i) {
printf("%9d %12g %12g %12g",
- i, f_(x).norm(), g.array().abs().maxCoeff(), radius);
+ i,
+ f_(x).norm(),
+ g.array().abs().maxCoeff(),
+ radius);
- //LG << "iteration: " << i;
- //LG << "||f(x)||: " << f_(x).norm();
- //LG << "max(g): " << g.cwise().abs().maxCoeff();
- //LG << "radius: " << radius;
+ // LG << "iteration: " << i;
+ // LG << "||f(x)||: " << f_(x).norm();
+ // LG << "max(g): " << g.cwise().abs().maxCoeff();
+ // LG << "radius: " << radius;
// Eqn 3.19 from [1]
- Scalar alpha = g.squaredNorm() / (J*g).squaredNorm();
+ Scalar alpha = g.squaredNorm() / (J * g).squaredNorm();
// Solve for steepest descent direction dx_sd.
dx_sd = -g;
@@ -199,11 +207,11 @@ class Dogleg {
// Solve for dogleg direction dx_dl.
Scalar beta = 0;
- Step step = SolveDoglegDirection(dx_sd, dx_gn, radius, alpha,
- &dx_dl, &beta);
+ Step step =
+ SolveDoglegDirection(dx_sd, dx_gn, radius, alpha, &dx_dl, &beta);
Scalar e3 = params.relative_step_threshold;
- if (dx_dl.norm() < e3*(x.norm() + e3)) {
+ if (dx_dl.norm() < e3 * (x.norm() + e3)) {
results.status = RELATIVE_STEP_SIZE_TOO_SMALL;
break;
}
@@ -214,16 +222,19 @@ class Dogleg {
if (step == GAUSS_NEWTON) {
predicted = f_(x).squaredNorm();
} else if (step == STEEPEST_DESCENT) {
- predicted = radius * (2*alpha*g.norm() - radius) / 2 / alpha;
+ predicted = radius * (2 * alpha * g.norm() - radius) / 2 / alpha;
} else if (step == DOGLEG) {
- predicted = 0.5 * alpha * (1-beta)*(1-beta)*g.squaredNorm() +
- beta*(2-beta)*f_(x).squaredNorm();
+ predicted = 0.5 * alpha * (1 - beta) * (1 - beta) * g.squaredNorm() +
+ beta * (2 - beta) * f_(x).squaredNorm();
}
Scalar rho = actual / predicted;
- if (step == GAUSS_NEWTON) printf(" GAUSS");
- if (step == STEEPEST_DESCENT) printf(" STEE");
- if (step == DOGLEG) printf(" DOGL");
+ if (step == GAUSS_NEWTON)
+ printf(" GAUSS");
+ if (step == STEEPEST_DESCENT)
+ printf(" STEE");
+ if (step == DOGLEG)
+ printf(" DOGL");
printf(" %12g %12g %12g\n", rho, actual, predicted);
@@ -234,7 +245,7 @@ class Dogleg {
x_updated = true;
}
if (rho > 0.75) {
- radius = std::max(radius, 3*dx_dl.norm());
+ radius = std::max(radius, 3 * dx_dl.norm());
} else if (rho < 0.25) {
radius /= 2;
if (radius < e3 * (x.norm() + e3)) {
@@ -252,10 +263,10 @@ class Dogleg {
}
private:
- const Function &f_;
+ const Function& f_;
Jacobian df_;
};
-} // namespace mv
+} // namespace libmv
#endif // LIBMV_NUMERIC_DOGLEG_H
diff --git a/intern/libmv/libmv/numeric/dogleg_test.cc b/intern/libmv/libmv/numeric/dogleg_test.cc
index 90c46c31672..a5ab1957632 100644
--- a/intern/libmv/libmv/numeric/dogleg_test.cc
+++ b/intern/libmv/libmv/numeric/dogleg_test.cc
@@ -18,8 +18,8 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
-#include "testing/testing.h"
#include "libmv/numeric/dogleg.h"
+#include "testing/testing.h"
using namespace libmv;
@@ -29,26 +29,26 @@ class F {
public:
typedef Vec4 FMatrixType;
typedef Vec3 XMatrixType;
- Vec4 operator()(const Vec3 &x) const {
+ Vec4 operator()(const Vec3& x) const {
double x1 = x.x() - 2;
double y1 = x.y() - 5;
double z1 = x.z();
- Vec4 fx; fx << x1*x1 + z1*z1,
- y1*y1 + z1*z1,
- z1*z1,
- x1*x1;
+ Vec4 fx;
+ fx << x1 * x1 + z1 * z1, y1 * y1 + z1 * z1, z1 * z1, x1 * x1;
return fx;
}
};
TEST(Dogleg, SimpleCase) {
- Vec3 x; x << 0.76026643, -30.01799744, 0.55192142;
+ Vec3 x;
+ x << 0.76026643, -30.01799744, 0.55192142;
F f;
Dogleg<F>::SolverParameters params;
Dogleg<F> lm(f);
/* TODO(sergey): Better error handling. */
/* Dogleg<F>::Results results = */ lm.minimize(params, &x);
- Vec3 expected_min_x; expected_min_x << 2, 5, 0;
+ Vec3 expected_min_x;
+ expected_min_x << 2, 5, 0;
EXPECT_MATRIX_NEAR(expected_min_x, x, 1e-5);
}
@@ -59,20 +59,21 @@ class F32 {
public:
typedef Vec2 FMatrixType;
typedef Vec2 XMatrixType;
- Vec2 operator()(const Vec2 &x) const {
+ Vec2 operator()(const Vec2& x) const {
double x1 = x(0);
- double x2 = 10*x(0)/(x(0) + 0.1) + 2*x(1)*x(1);
- Vec2 fx; fx << x1, x2;
+ double x2 = 10 * x(0) / (x(0) + 0.1) + 2 * x(1) * x(1);
+ Vec2 fx;
+ fx << x1, x2;
return fx;
}
};
class JF32 {
public:
- JF32(const F32 &f) { (void) f; }
- Mat2 operator()(const Vec2 &x) {
- Mat2 J; J << 1, 0,
- 1./pow(x(0) + 0.1, 2), 4*x(1)*x(1);
+ JF32(const F32& f) { (void)f; }
+ Mat2 operator()(const Vec2& x) {
+ Mat2 J;
+ J << 1, 0, 1. / pow(x(0) + 0.1, 2), 4 * x(1) * x(1);
return J;
}
};
diff --git a/intern/libmv/libmv/numeric/function_derivative.h b/intern/libmv/libmv/numeric/function_derivative.h
index 9820885f04e..30d7c4d34e9 100644
--- a/intern/libmv/libmv/numeric/function_derivative.h
+++ b/intern/libmv/libmv/numeric/function_derivative.h
@@ -23,8 +23,8 @@
#include <cmath>
-#include "libmv/numeric/numeric.h"
#include "libmv/logging/logging.h"
+#include "libmv/numeric/numeric.h"
namespace libmv {
@@ -36,7 +36,7 @@ enum NumericJacobianMode {
FORWARD,
};
-template<typename Function, NumericJacobianMode mode = CENTRAL>
+template <typename Function, NumericJacobianMode mode = CENTRAL>
class NumericJacobian {
public:
typedef typename Function::XMatrixType Parameters;
@@ -45,12 +45,12 @@ class NumericJacobian {
typedef Matrix<typename Function::FMatrixType::RealScalar,
Function::FMatrixType::RowsAtCompileTime,
Function::XMatrixType::RowsAtCompileTime>
- JMatrixType;
+ JMatrixType;
- NumericJacobian(const Function &f) : f_(f) {}
+ NumericJacobian(const Function& f) : f_(f) {}
// TODO(keir): Perhaps passing the jacobian back by value is not a good idea.
- JMatrixType operator()(const Parameters &x) {
+ JMatrixType operator()(const Parameters& x) {
// Empirically determined constant.
Parameters eps = x.array().abs() * XScalar(1e-5);
// To handle cases where a paremeter is exactly zero, instead use the mean
@@ -87,12 +87,13 @@ class NumericJacobian {
}
return jacobian;
}
+
private:
- const Function &f_;
+ const Function& f_;
};
-template<typename Function, typename Jacobian>
-bool CheckJacobian(const Function &f, const typename Function::XMatrixType &x) {
+template <typename Function, typename Jacobian>
+bool CheckJacobian(const Function& f, const typename Function::XMatrixType& x) {
Jacobian j_analytic(f);
NumericJacobian<Function> j_numeric(f);
diff --git a/intern/libmv/libmv/numeric/function_derivative_test.cc b/intern/libmv/libmv/numeric/function_derivative_test.cc
index 8d976d3e9a0..defeedaa8a4 100644
--- a/intern/libmv/libmv/numeric/function_derivative_test.cc
+++ b/intern/libmv/libmv/numeric/function_derivative_test.cc
@@ -18,9 +18,9 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
-#include "testing/testing.h"
-#include "libmv/numeric/numeric.h"
#include "libmv/numeric/function_derivative.h"
+#include "libmv/numeric/numeric.h"
+#include "testing/testing.h"
using namespace libmv;
@@ -30,22 +30,22 @@ class F {
public:
typedef Vec2 FMatrixType;
typedef Vec3 XMatrixType;
- Vec2 operator()(const Vec3 &x) const {
+ Vec2 operator()(const Vec3& x) const {
Vec2 fx;
- fx << 0.19*x(0) + 0.19*x(1)*x(1) + x(2),
- 3*sin(x(0)) + 2*cos(x(1));
+ fx << 0.19 * x(0) + 0.19 * x(1) * x(1) + x(2),
+ 3 * sin(x(0)) + 2 * cos(x(1));
return fx;
}
- Mat23 J(const Vec3 &x) const {
+ Mat23 J(const Vec3& x) const {
Mat23 jacobian;
- jacobian << 0.19, 2*0.19*x(1), 1.0,
- 3*cos(x(0)), -2*sin(x(1)), 0;
+ jacobian << 0.19, 2 * 0.19 * x(1), 1.0, 3 * cos(x(0)), -2 * sin(x(1)), 0;
return jacobian;
}
};
TEST(FunctionDerivative, SimpleCase) {
- Vec3 x; x << 0.76026643, 0.01799744, 0.55192142;
+ Vec3 x;
+ x << 0.76026643, 0.01799744, 0.55192142;
F f;
NumericJacobian<F, CENTRAL> J(f);
EXPECT_MATRIX_NEAR(f.J(x), J(x), 1e-8);
diff --git a/intern/libmv/libmv/numeric/levenberg_marquardt.h b/intern/libmv/libmv/numeric/levenberg_marquardt.h
index 2af9a62cf7b..30c04a5ad5c 100644
--- a/intern/libmv/libmv/numeric/levenberg_marquardt.h
+++ b/intern/libmv/libmv/numeric/levenberg_marquardt.h
@@ -31,18 +31,18 @@
#include <cmath>
-#include "libmv/numeric/numeric.h"
-#include "libmv/numeric/function_derivative.h"
#include "libmv/logging/logging.h"
+#include "libmv/numeric/function_derivative.h"
+#include "libmv/numeric/numeric.h"
namespace libmv {
-template<typename Function,
- typename Jacobian = NumericJacobian<Function>,
- typename Solver = Eigen::PartialPivLU<
- Matrix<typename Function::FMatrixType::RealScalar,
- Function::XMatrixType::RowsAtCompileTime,
- Function::XMatrixType::RowsAtCompileTime> > >
+template <typename Function,
+ typename Jacobian = NumericJacobian<Function>,
+ typename Solver = Eigen::PartialPivLU<
+ Matrix<typename Function::FMatrixType::RealScalar,
+ Function::XMatrixType::RowsAtCompileTime,
+ Function::XMatrixType::RowsAtCompileTime>>>
class LevenbergMarquardt {
public:
typedef typename Function::XMatrixType::RealScalar Scalar;
@@ -50,10 +50,12 @@ class LevenbergMarquardt {
typedef typename Function::XMatrixType Parameters;
typedef Matrix<typename Function::FMatrixType::RealScalar,
Function::FMatrixType::RowsAtCompileTime,
- Function::XMatrixType::RowsAtCompileTime> JMatrixType;
+ Function::XMatrixType::RowsAtCompileTime>
+ JMatrixType;
typedef Matrix<typename JMatrixType::RealScalar,
JMatrixType::ColsAtCompileTime,
- JMatrixType::ColsAtCompileTime> AMatrixType;
+ JMatrixType::ColsAtCompileTime>
+ AMatrixType;
// TODO(keir): Some of these knobs can be derived from each other and
// removed, instead of requiring the user to set them.
@@ -65,32 +67,35 @@ class LevenbergMarquardt {
HIT_MAX_ITERATIONS,
};
- LevenbergMarquardt(const Function &f)
- : f_(f), df_(f) {}
+ LevenbergMarquardt(const Function& f) : f_(f), df_(f) {}
struct SolverParameters {
SolverParameters()
- : gradient_threshold(1e-16),
- relative_step_threshold(1e-16),
- error_threshold(1e-16),
- initial_scale_factor(1e-3),
- max_iterations(100) {}
+ : gradient_threshold(1e-16),
+ relative_step_threshold(1e-16),
+ error_threshold(1e-16),
+ initial_scale_factor(1e-3),
+ max_iterations(100) {}
Scalar gradient_threshold; // eps > max(J'*f(x))
Scalar relative_step_threshold; // eps > ||dx|| / ||x||
Scalar error_threshold; // eps > ||f(x)||
Scalar initial_scale_factor; // Initial u for solving normal equations.
- int max_iterations; // Maximum number of solver iterations.
+ int max_iterations; // Maximum number of solver iterations.
};
struct Results {
Scalar error_magnitude; // ||f(x)||
Scalar gradient_magnitude; // ||J'f(x)||
- int iterations;
+ int iterations;
Status status;
};
- Status Update(const Parameters &x, const SolverParameters &params,
- JMatrixType *J, AMatrixType *A, FVec *error, Parameters *g) {
+ Status Update(const Parameters& x,
+ const SolverParameters& params,
+ JMatrixType* J,
+ AMatrixType* A,
+ FVec* error,
+ Parameters* g) {
*J = df_(x);
*A = (*J).transpose() * (*J);
*error = -f_(x);
@@ -103,13 +108,13 @@ class LevenbergMarquardt {
return RUNNING;
}
- Results minimize(Parameters *x_and_min) {
+ Results minimize(Parameters* x_and_min) {
SolverParameters params;
minimize(params, x_and_min);
}
- Results minimize(const SolverParameters &params, Parameters *x_and_min) {
- Parameters &x = *x_and_min;
+ Results minimize(const SolverParameters& params, Parameters* x_and_min) {
+ Parameters& x = *x_and_min;
JMatrixType J;
AMatrixType A;
FVec error;
@@ -118,7 +123,7 @@ class LevenbergMarquardt {
Results results;
results.status = Update(x, params, &J, &A, &error, &g);
- Scalar u = Scalar(params.initial_scale_factor*A.diagonal().maxCoeff());
+ Scalar u = Scalar(params.initial_scale_factor * A.diagonal().maxCoeff());
Scalar v = 2;
Parameters dx, x_new;
@@ -130,7 +135,8 @@ class LevenbergMarquardt {
VLOG(3) << "u: " << u;
VLOG(3) << "v: " << v;
- AMatrixType A_augmented = A + u*AMatrixType::Identity(J.cols(), J.cols());
+ AMatrixType A_augmented =
+ A + u * AMatrixType::Identity(J.cols(), J.cols());
Solver solver(A_augmented);
dx = solver.solve(g);
bool solved = (A_augmented * dx).isApprox(g);
@@ -146,14 +152,14 @@ class LevenbergMarquardt {
// Rho is the ratio of the actual reduction in error to the reduction
// in error that would be obtained if the problem was linear.
// See [1] for details.
- Scalar rho((error.squaredNorm() - f_(x_new).squaredNorm())
- / dx.dot(u*dx + g));
+ Scalar rho((error.squaredNorm() - f_(x_new).squaredNorm()) /
+ dx.dot(u * dx + g));
if (rho > 0) {
// Accept the Gauss-Newton step because the linear model fits well.
x = x_new;
results.status = Update(x, params, &J, &A, &error, &g);
- Scalar tmp = Scalar(2*rho-1);
- u = u*std::max(1/3., 1 - (tmp*tmp*tmp));
+ Scalar tmp = Scalar(2 * rho - 1);
+ u = u * std::max(1 / 3., 1 - (tmp * tmp * tmp));
v = 2;
continue;
}
@@ -174,10 +180,10 @@ class LevenbergMarquardt {
}
private:
- const Function &f_;
+ const Function& f_;
Jacobian df_;
};
-} // namespace mv
+} // namespace libmv
#endif // LIBMV_NUMERIC_LEVENBERG_MARQUARDT_H
diff --git a/intern/libmv/libmv/numeric/levenberg_marquardt_test.cc b/intern/libmv/libmv/numeric/levenberg_marquardt_test.cc
index fc3f9ebbb29..805aac275b4 100644
--- a/intern/libmv/libmv/numeric/levenberg_marquardt_test.cc
+++ b/intern/libmv/libmv/numeric/levenberg_marquardt_test.cc
@@ -18,8 +18,8 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
-#include "testing/testing.h"
#include "libmv/numeric/levenberg_marquardt.h"
+#include "testing/testing.h"
using namespace libmv;
@@ -29,14 +29,12 @@ class F {
public:
typedef Vec4 FMatrixType;
typedef Vec3 XMatrixType;
- Vec4 operator()(const Vec3 &x) const {
+ Vec4 operator()(const Vec3& x) const {
double x1 = x.x() - 2;
double y1 = x.y() - 5;
double z1 = x.z();
- Vec4 fx; fx << x1*x1 + z1*z1,
- y1*y1 + z1*z1,
- z1*z1,
- x1*x1;
+ Vec4 fx;
+ fx << x1 * x1 + z1 * z1, y1 * y1 + z1 * z1, z1 * z1, x1 * x1;
return fx;
}
};
diff --git a/intern/libmv/libmv/numeric/numeric.cc b/intern/libmv/libmv/numeric/numeric.cc
index 3fc1e3b2bfd..b7660950c2a 100644
--- a/intern/libmv/libmv/numeric/numeric.cc
+++ b/intern/libmv/libmv/numeric/numeric.cc
@@ -18,7 +18,6 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
-
#include "libmv/numeric/numeric.h"
namespace libmv {
@@ -27,9 +26,11 @@ Mat3 RotationAroundX(double angle) {
double c, s;
sincos(angle, &s, &c);
Mat3 R;
+ // clang-format off
R << 1, 0, 0,
0, c, -s,
0, s, c;
+ // clang-format on
return R;
}
@@ -37,9 +38,11 @@ Mat3 RotationAroundY(double angle) {
double c, s;
sincos(angle, &s, &c);
Mat3 R;
+ // clang-format off
R << c, 0, s,
0, 1, 0,
-s, 0, c;
+ // clang-format on
return R;
}
@@ -47,14 +50,15 @@ Mat3 RotationAroundZ(double angle) {
double c, s;
sincos(angle, &s, &c);
Mat3 R;
+ // clang-format off
R << c, -s, 0,
s, c, 0,
0, 0, 1;
+ // clang-format on
return R;
}
-
-Mat3 RotationRodrigues(const Vec3 &axis) {
+Mat3 RotationRodrigues(const Vec3& axis) {
double theta = axis.norm();
Vec3 w = axis / theta;
Mat3 W = CrossProductMatrix(w);
@@ -62,7 +66,6 @@ Mat3 RotationRodrigues(const Vec3 &axis) {
return Mat3::Identity() + sin(theta) * W + (1 - cos(theta)) * W * W;
}
-
Mat3 LookAt(Vec3 center) {
Vec3 zc = center.normalized();
Vec3 xc = Vec3::UnitY().cross(zc).normalized();
@@ -74,19 +77,21 @@ Mat3 LookAt(Vec3 center) {
return R;
}
-Mat3 CrossProductMatrix(const Vec3 &x) {
+Mat3 CrossProductMatrix(const Vec3& x) {
Mat3 X;
+ // clang-format off
X << 0, -x(2), x(1),
x(2), 0, -x(0),
-x(1), x(0), 0;
+ // clang-format on
return X;
}
-void MeanAndVarianceAlongRows(const Mat &A,
- Vec *mean_pointer,
- Vec *variance_pointer) {
- Vec &mean = *mean_pointer;
- Vec &variance = *variance_pointer;
+void MeanAndVarianceAlongRows(const Mat& A,
+ Vec* mean_pointer,
+ Vec* variance_pointer) {
+ Vec& mean = *mean_pointer;
+ Vec& variance = *variance_pointer;
int n = A.rows();
int m = A.cols();
mean.resize(n);
@@ -108,29 +113,28 @@ void MeanAndVarianceAlongRows(const Mat &A,
}
}
-void HorizontalStack(const Mat &left, const Mat &right, Mat *stacked) {
+void HorizontalStack(const Mat& left, const Mat& right, Mat* stacked) {
assert(left.rows() == right.rows());
int n = left.rows();
int m1 = left.cols();
int m2 = right.cols();
stacked->resize(n, m1 + m2);
- stacked->block(0, 0, n, m1) = left;
+ stacked->block(0, 0, n, m1) = left;
stacked->block(0, m1, n, m2) = right;
}
-void MatrixColumn(const Mat &A, int i, Vec2 *v) {
+void MatrixColumn(const Mat& A, int i, Vec2* v) {
assert(A.rows() == 2);
*v << A(0, i), A(1, i);
}
-void MatrixColumn(const Mat &A, int i, Vec3 *v) {
+void MatrixColumn(const Mat& A, int i, Vec3* v) {
assert(A.rows() == 3);
*v << A(0, i), A(1, i), A(2, i);
}
-void MatrixColumn(const Mat &A, int i, Vec4 *v) {
+void MatrixColumn(const Mat& A, int i, Vec4* v) {
assert(A.rows() == 4);
*v << A(0, i), A(1, i), A(2, i), A(3, i);
}
} // namespace libmv
-
diff --git a/intern/libmv/libmv/numeric/numeric.h b/intern/libmv/libmv/numeric/numeric.h
index f5478bee6ab..e3d44226338 100644
--- a/intern/libmv/libmv/numeric/numeric.h
+++ b/intern/libmv/libmv/numeric/numeric.h
@@ -34,10 +34,9 @@
#include <Eigen/SVD>
#if !defined(__MINGW64__)
-# if defined(_WIN32) || defined(__APPLE__) || \
- defined(__FreeBSD__) || defined(__NetBSD__) || \
- defined(__HAIKU__)
-inline void sincos(double x, double *sinx, double *cosx) {
+# if defined(_WIN32) || defined(__APPLE__) || defined(__FreeBSD__) || \
+ defined(__NetBSD__) || defined(__HAIKU__)
+inline void sincos(double x, double* sinx, double* cosx) {
*sinx = sin(x);
*cosx = cos(x);
}
@@ -46,11 +45,11 @@ inline void sincos(double x, double *sinx, double *cosx) {
#if (defined(WIN32) || defined(WIN64)) && !defined(__MINGW32__)
inline long lround(double d) {
- return (long)(d>0 ? d+0.5 : ceil(d-0.5));
+ return (long)(d > 0 ? d + 0.5 : ceil(d - 0.5));
}
# if _MSC_VER < 1800
inline int round(double d) {
- return (d>0) ? int(d+0.5) : int(d-0.5);
+ return (d > 0) ? int(d + 0.5) : int(d - 0.5);
}
# endif // _MSC_VER < 1800
typedef unsigned int uint;
@@ -92,25 +91,25 @@ typedef Eigen::Matrix<double, 4, 4, Eigen::RowMajor> RMat4;
typedef Eigen::Matrix<double, 2, Eigen::Dynamic> Mat2X;
typedef Eigen::Matrix<double, 3, Eigen::Dynamic> Mat3X;
typedef Eigen::Matrix<double, 4, Eigen::Dynamic> Mat4X;
-typedef Eigen::Matrix<double, Eigen::Dynamic, 2> MatX2;
-typedef Eigen::Matrix<double, Eigen::Dynamic, 3> MatX3;
-typedef Eigen::Matrix<double, Eigen::Dynamic, 4> MatX4;
-typedef Eigen::Matrix<double, Eigen::Dynamic, 5> MatX5;
-typedef Eigen::Matrix<double, Eigen::Dynamic, 6> MatX6;
-typedef Eigen::Matrix<double, Eigen::Dynamic, 7> MatX7;
-typedef Eigen::Matrix<double, Eigen::Dynamic, 8> MatX8;
-typedef Eigen::Matrix<double, Eigen::Dynamic, 9> MatX9;
+typedef Eigen::Matrix<double, Eigen::Dynamic, 2> MatX2;
+typedef Eigen::Matrix<double, Eigen::Dynamic, 3> MatX3;
+typedef Eigen::Matrix<double, Eigen::Dynamic, 4> MatX4;
+typedef Eigen::Matrix<double, Eigen::Dynamic, 5> MatX5;
+typedef Eigen::Matrix<double, Eigen::Dynamic, 6> MatX6;
+typedef Eigen::Matrix<double, Eigen::Dynamic, 7> MatX7;
+typedef Eigen::Matrix<double, Eigen::Dynamic, 8> MatX8;
+typedef Eigen::Matrix<double, Eigen::Dynamic, 9> MatX9;
typedef Eigen::Matrix<double, Eigen::Dynamic, 15> MatX15;
typedef Eigen::Matrix<double, Eigen::Dynamic, 16> MatX16;
typedef Eigen::Vector2d Vec2;
typedef Eigen::Vector3d Vec3;
typedef Eigen::Vector4d Vec4;
-typedef Eigen::Matrix<double, 5, 1> Vec5;
-typedef Eigen::Matrix<double, 6, 1> Vec6;
-typedef Eigen::Matrix<double, 7, 1> Vec7;
-typedef Eigen::Matrix<double, 8, 1> Vec8;
-typedef Eigen::Matrix<double, 9, 1> Vec9;
+typedef Eigen::Matrix<double, 5, 1> Vec5;
+typedef Eigen::Matrix<double, 6, 1> Vec6;
+typedef Eigen::Matrix<double, 7, 1> Vec7;
+typedef Eigen::Matrix<double, 8, 1> Vec8;
+typedef Eigen::Matrix<double, 9, 1> Vec9;
typedef Eigen::Matrix<double, 10, 1> Vec10;
typedef Eigen::Matrix<double, 11, 1> Vec11;
typedef Eigen::Matrix<double, 12, 1> Vec12;
@@ -133,15 +132,13 @@ typedef Eigen::Vector2i Vec2i;
typedef Eigen::Vector3i Vec3i;
typedef Eigen::Vector4i Vec4i;
-typedef Eigen::Matrix<float,
- Eigen::Dynamic,
- Eigen::Dynamic,
- Eigen::RowMajor> RMatf;
+typedef Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>
+ RMatf;
typedef Eigen::NumTraits<double> EigenDouble;
-using Eigen::Map;
using Eigen::Dynamic;
+using Eigen::Map;
using Eigen::Matrix;
// Find U, s, and VT such that
@@ -149,7 +146,7 @@ using Eigen::Matrix;
// A = U * diag(s) * VT
//
template <typename TMat, typename TVec>
-inline void SVD(TMat * /*A*/, Vec * /*s*/, Mat * /*U*/, Mat * /*VT*/) {
+inline void SVD(TMat* /*A*/, Vec* /*s*/, Mat* /*U*/, Mat* /*VT*/) {
assert(0);
}
@@ -158,11 +155,11 @@ inline void SVD(TMat * /*A*/, Vec * /*s*/, Mat * /*U*/, Mat * /*VT*/) {
// Destroys A and resizes x if necessary.
// TODO(maclean): Take the SVD of the transpose instead of this zero padding.
template <typename TMat, typename TVec>
-double Nullspace(TMat *A, TVec *nullspace) {
+double Nullspace(TMat* A, TVec* nullspace) {
Eigen::JacobiSVD<TMat> svd(*A, Eigen::ComputeFullV);
- (*nullspace) = svd.matrixV().col(A->cols()-1);
+ (*nullspace) = svd.matrixV().col(A->cols() - 1);
if (A->rows() >= A->cols())
- return svd.singularValues()(A->cols()-1);
+ return svd.singularValues()(A->cols() - 1);
else
return 0.0;
}
@@ -173,55 +170,55 @@ double Nullspace(TMat *A, TVec *nullspace) {
// the singluar value corresponding to the solution x1. Destroys A and resizes
// x if necessary.
template <typename TMat, typename TVec1, typename TVec2>
-double Nullspace2(TMat *A, TVec1 *x1, TVec2 *x2) {
+double Nullspace2(TMat* A, TVec1* x1, TVec2* x2) {
Eigen::JacobiSVD<TMat> svd(*A, Eigen::ComputeFullV);
*x1 = svd.matrixV().col(A->cols() - 1);
*x2 = svd.matrixV().col(A->cols() - 2);
if (A->rows() >= A->cols())
- return svd.singularValues()(A->cols()-1);
+ return svd.singularValues()(A->cols() - 1);
else
return 0.0;
}
// In place transpose for square matrices.
-template<class TA>
-inline void TransposeInPlace(TA *A) {
+template <class TA>
+inline void TransposeInPlace(TA* A) {
*A = A->transpose().eval();
}
-template<typename TVec>
-inline double NormL1(const TVec &x) {
+template <typename TVec>
+inline double NormL1(const TVec& x) {
return x.array().abs().sum();
}
-template<typename TVec>
-inline double NormL2(const TVec &x) {
+template <typename TVec>
+inline double NormL2(const TVec& x) {
return x.norm();
}
-template<typename TVec>
-inline double NormLInfinity(const TVec &x) {
+template <typename TVec>
+inline double NormLInfinity(const TVec& x) {
return x.array().abs().maxCoeff();
}
-template<typename TVec>
-inline double DistanceL1(const TVec &x, const TVec &y) {
+template <typename TVec>
+inline double DistanceL1(const TVec& x, const TVec& y) {
return (x - y).array().abs().sum();
}
-template<typename TVec>
-inline double DistanceL2(const TVec &x, const TVec &y) {
+template <typename TVec>
+inline double DistanceL2(const TVec& x, const TVec& y) {
return (x - y).norm();
}
-template<typename TVec>
-inline double DistanceLInfinity(const TVec &x, const TVec &y) {
+template <typename TVec>
+inline double DistanceLInfinity(const TVec& x, const TVec& y) {
return (x - y).array().abs().maxCoeff();
}
// Normalize a vector with the L1 norm, and return the norm before it was
// normalized.
-template<typename TVec>
-inline double NormalizeL1(TVec *x) {
+template <typename TVec>
+inline double NormalizeL1(TVec* x) {
double norm = NormL1(*x);
*x /= norm;
return norm;
@@ -229,8 +226,8 @@ inline double NormalizeL1(TVec *x) {
// Normalize a vector with the L2 norm, and return the norm before it was
// normalized.
-template<typename TVec>
-inline double NormalizeL2(TVec *x) {
+template <typename TVec>
+inline double NormalizeL2(TVec* x) {
double norm = NormL2(*x);
*x /= norm;
return norm;
@@ -238,15 +235,15 @@ inline double NormalizeL2(TVec *x) {
// Normalize a vector with the L^Infinity norm, and return the norm before it
// was normalized.
-template<typename TVec>
-inline double NormalizeLInfinity(TVec *x) {
+template <typename TVec>
+inline double NormalizeLInfinity(TVec* x) {
double norm = NormLInfinity(*x);
*x /= norm;
return norm;
}
// Return the square of a number.
-template<typename T>
+template <typename T>
inline T Square(T x) {
return x * x;
}
@@ -258,7 +255,7 @@ Mat3 RotationAroundZ(double angle);
// Returns the rotation matrix of a rotation of angle |axis| around axis.
// This is computed using the Rodrigues formula, see:
// http://mathworld.wolfram.com/RodriguesRotationFormula.html
-Mat3 RotationRodrigues(const Vec3 &axis);
+Mat3 RotationRodrigues(const Vec3& axis);
// Make a rotation matrix such that center becomes the direction of the
// positive z-axis, and y is oriented close to up.
@@ -266,177 +263,173 @@ Mat3 LookAt(Vec3 center);
// Return a diagonal matrix from a vector containg the diagonal values.
template <typename TVec>
-inline Mat Diag(const TVec &x) {
+inline Mat Diag(const TVec& x) {
return x.asDiagonal();
}
-template<typename TMat>
-inline double FrobeniusNorm(const TMat &A) {
+template <typename TMat>
+inline double FrobeniusNorm(const TMat& A) {
return sqrt(A.array().abs2().sum());
}
-template<typename TMat>
-inline double FrobeniusDistance(const TMat &A, const TMat &B) {
+template <typename TMat>
+inline double FrobeniusDistance(const TMat& A, const TMat& B) {
return FrobeniusNorm(A - B);
}
-inline Vec3 CrossProduct(const Vec3 &x, const Vec3 &y) {
+inline Vec3 CrossProduct(const Vec3& x, const Vec3& y) {
return x.cross(y);
}
-Mat3 CrossProductMatrix(const Vec3 &x);
+Mat3 CrossProductMatrix(const Vec3& x);
-void MeanAndVarianceAlongRows(const Mat &A,
- Vec *mean_pointer,
- Vec *variance_pointer);
+void MeanAndVarianceAlongRows(const Mat& A,
+ Vec* mean_pointer,
+ Vec* variance_pointer);
#if defined(_WIN32)
- // TODO(bomboze): un-#if this for both platforms once tested under Windows
- /* This solution was extensively discussed here
- http://forum.kde.org/viewtopic.php?f=74&t=61940 */
- #define SUM_OR_DYNAMIC(x, y) (x == Eigen::Dynamic || y == Eigen::Dynamic) ? Eigen::Dynamic : (x+y)
-
- template<typename Derived1, typename Derived2>
- struct hstack_return {
- typedef typename Derived1::Scalar Scalar;
- enum {
- RowsAtCompileTime = Derived1::RowsAtCompileTime,
- ColsAtCompileTime = SUM_OR_DYNAMIC(Derived1::ColsAtCompileTime,
- Derived2::ColsAtCompileTime),
- Options = Derived1::Flags&Eigen::RowMajorBit ? Eigen::RowMajor : 0,
- MaxRowsAtCompileTime = Derived1::MaxRowsAtCompileTime,
- MaxColsAtCompileTime = SUM_OR_DYNAMIC(Derived1::MaxColsAtCompileTime,
- Derived2::MaxColsAtCompileTime)
- };
- typedef Eigen::Matrix<Scalar,
- RowsAtCompileTime,
- ColsAtCompileTime,
- Options,
- MaxRowsAtCompileTime,
- MaxColsAtCompileTime> type;
+// TODO(bomboze): un-#if this for both platforms once tested under Windows
+/* This solution was extensively discussed here
+ http://forum.kde.org/viewtopic.php?f=74&t=61940 */
+# define SUM_OR_DYNAMIC(x, y) \
+ (x == Eigen::Dynamic || y == Eigen::Dynamic) ? Eigen::Dynamic : (x + y)
+
+template <typename Derived1, typename Derived2>
+struct hstack_return {
+ typedef typename Derived1::Scalar Scalar;
+ enum {
+ RowsAtCompileTime = Derived1::RowsAtCompileTime,
+ ColsAtCompileTime = SUM_OR_DYNAMIC(Derived1::ColsAtCompileTime,
+ Derived2::ColsAtCompileTime),
+ Options = Derived1::Flags & Eigen::RowMajorBit ? Eigen::RowMajor : 0,
+ MaxRowsAtCompileTime = Derived1::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = SUM_OR_DYNAMIC(Derived1::MaxColsAtCompileTime,
+ Derived2::MaxColsAtCompileTime)
};
-
- template<typename Derived1, typename Derived2>
- typename hstack_return<Derived1, Derived2>::type
- HStack(const Eigen::MatrixBase<Derived1>& lhs,
- const Eigen::MatrixBase<Derived2>& rhs) {
- typename hstack_return<Derived1, Derived2>::type res;
- res.resize(lhs.rows(), lhs.cols()+rhs.cols());
- res << lhs, rhs;
- return res;
- };
-
-
- template<typename Derived1, typename Derived2>
- struct vstack_return {
- typedef typename Derived1::Scalar Scalar;
- enum {
- RowsAtCompileTime = SUM_OR_DYNAMIC(Derived1::RowsAtCompileTime,
- Derived2::RowsAtCompileTime),
- ColsAtCompileTime = Derived1::ColsAtCompileTime,
- Options = Derived1::Flags&Eigen::RowMajorBit ? Eigen::RowMajor : 0,
- MaxRowsAtCompileTime = SUM_OR_DYNAMIC(Derived1::MaxRowsAtCompileTime,
- Derived2::MaxRowsAtCompileTime),
- MaxColsAtCompileTime = Derived1::MaxColsAtCompileTime
- };
- typedef Eigen::Matrix<Scalar,
- RowsAtCompileTime,
- ColsAtCompileTime,
- Options,
- MaxRowsAtCompileTime,
- MaxColsAtCompileTime> type;
+ typedef Eigen::Matrix<Scalar,
+ RowsAtCompileTime,
+ ColsAtCompileTime,
+ Options,
+ MaxRowsAtCompileTime,
+ MaxColsAtCompileTime>
+ type;
+};
+
+template <typename Derived1, typename Derived2>
+typename hstack_return<Derived1, Derived2>::type HStack(
+ const Eigen::MatrixBase<Derived1>& lhs,
+ const Eigen::MatrixBase<Derived2>& rhs) {
+ typename hstack_return<Derived1, Derived2>::type res;
+ res.resize(lhs.rows(), lhs.cols() + rhs.cols());
+ res << lhs, rhs;
+ return res;
+};
+
+template <typename Derived1, typename Derived2>
+struct vstack_return {
+ typedef typename Derived1::Scalar Scalar;
+ enum {
+ RowsAtCompileTime = SUM_OR_DYNAMIC(Derived1::RowsAtCompileTime,
+ Derived2::RowsAtCompileTime),
+ ColsAtCompileTime = Derived1::ColsAtCompileTime,
+ Options = Derived1::Flags & Eigen::RowMajorBit ? Eigen::RowMajor : 0,
+ MaxRowsAtCompileTime = SUM_OR_DYNAMIC(Derived1::MaxRowsAtCompileTime,
+ Derived2::MaxRowsAtCompileTime),
+ MaxColsAtCompileTime = Derived1::MaxColsAtCompileTime
};
-
- template<typename Derived1, typename Derived2>
- typename vstack_return<Derived1, Derived2>::type
- VStack(const Eigen::MatrixBase<Derived1>& lhs,
- const Eigen::MatrixBase<Derived2>& rhs) {
- typename vstack_return<Derived1, Derived2>::type res;
- res.resize(lhs.rows()+rhs.rows(), lhs.cols());
- res << lhs, rhs;
- return res;
- };
-
+ typedef Eigen::Matrix<Scalar,
+ RowsAtCompileTime,
+ ColsAtCompileTime,
+ Options,
+ MaxRowsAtCompileTime,
+ MaxColsAtCompileTime>
+ type;
+};
+
+template <typename Derived1, typename Derived2>
+typename vstack_return<Derived1, Derived2>::type VStack(
+ const Eigen::MatrixBase<Derived1>& lhs,
+ const Eigen::MatrixBase<Derived2>& rhs) {
+ typename vstack_return<Derived1, Derived2>::type res;
+ res.resize(lhs.rows() + rhs.rows(), lhs.cols());
+ res << lhs, rhs;
+ return res;
+};
#else // _WIN32
- // Since it is not possible to typedef privately here, use a macro.
- // Always take dynamic columns if either side is dynamic.
- #define COLS \
- ((ColsLeft == Eigen::Dynamic || ColsRight == Eigen::Dynamic) \
- ? Eigen::Dynamic : (ColsLeft + ColsRight))
-
- // Same as above, except that prefer fixed size if either is fixed.
- #define ROWS \
- ((RowsLeft == Eigen::Dynamic && RowsRight == Eigen::Dynamic) \
- ? Eigen::Dynamic \
- : ((RowsLeft == Eigen::Dynamic) \
- ? RowsRight \
- : RowsLeft \
- ) \
- )
-
- // TODO(keir): Add a static assert if both rows are at compiletime.
- template<typename T, int RowsLeft, int RowsRight, int ColsLeft, int ColsRight>
- Eigen::Matrix<T, ROWS, COLS>
- HStack(const Eigen::Matrix<T, RowsLeft, ColsLeft> &left,
- const Eigen::Matrix<T, RowsRight, ColsRight> &right) {
- assert(left.rows() == right.rows());
- int n = left.rows();
- int m1 = left.cols();
- int m2 = right.cols();
-
- Eigen::Matrix<T, ROWS, COLS> stacked(n, m1 + m2);
- stacked.block(0, 0, n, m1) = left;
- stacked.block(0, m1, n, m2) = right;
- return stacked;
- }
-
- // Reuse the above macros by swapping the order of Rows and Cols. Nasty, but
- // the duplication is worse.
- // TODO(keir): Add a static assert if both rows are at compiletime.
- // TODO(keir): Mail eigen list about making this work for general expressions
- // rather than only matrix types.
- template<typename T, int RowsLeft, int RowsRight, int ColsLeft, int ColsRight>
- Eigen::Matrix<T, COLS, ROWS>
- VStack(const Eigen::Matrix<T, ColsLeft, RowsLeft> &top,
- const Eigen::Matrix<T, ColsRight, RowsRight> &bottom) {
- assert(top.cols() == bottom.cols());
- int n1 = top.rows();
- int n2 = bottom.rows();
- int m = top.cols();
-
- Eigen::Matrix<T, COLS, ROWS> stacked(n1 + n2, m);
- stacked.block(0, 0, n1, m) = top;
- stacked.block(n1, 0, n2, m) = bottom;
- return stacked;
- }
- #undef COLS
- #undef ROWS
-#endif // _WIN32
+// Since it is not possible to typedef privately here, use a macro.
+// Always take dynamic columns if either side is dynamic.
+# define COLS \
+ ((ColsLeft == Eigen::Dynamic || ColsRight == Eigen::Dynamic) \
+ ? Eigen::Dynamic \
+ : (ColsLeft + ColsRight))
+
+// Same as above, except that prefer fixed size if either is fixed.
+# define ROWS \
+ ((RowsLeft == Eigen::Dynamic && RowsRight == Eigen::Dynamic) \
+ ? Eigen::Dynamic \
+ : ((RowsLeft == Eigen::Dynamic) ? RowsRight : RowsLeft))
+
+// TODO(keir): Add a static assert if both rows are at compiletime.
+template <typename T, int RowsLeft, int RowsRight, int ColsLeft, int ColsRight>
+Eigen::Matrix<T, ROWS, COLS> HStack(
+ const Eigen::Matrix<T, RowsLeft, ColsLeft>& left,
+ const Eigen::Matrix<T, RowsRight, ColsRight>& right) {
+ assert(left.rows() == right.rows());
+ int n = left.rows();
+ int m1 = left.cols();
+ int m2 = right.cols();
+
+ Eigen::Matrix<T, ROWS, COLS> stacked(n, m1 + m2);
+ stacked.block(0, 0, n, m1) = left;
+ stacked.block(0, m1, n, m2) = right;
+ return stacked;
+}
+// Reuse the above macros by swapping the order of Rows and Cols. Nasty, but
+// the duplication is worse.
+// TODO(keir): Add a static assert if both rows are at compiletime.
+// TODO(keir): Mail eigen list about making this work for general expressions
+// rather than only matrix types.
+template <typename T, int RowsLeft, int RowsRight, int ColsLeft, int ColsRight>
+Eigen::Matrix<T, COLS, ROWS> VStack(
+ const Eigen::Matrix<T, ColsLeft, RowsLeft>& top,
+ const Eigen::Matrix<T, ColsRight, RowsRight>& bottom) {
+ assert(top.cols() == bottom.cols());
+ int n1 = top.rows();
+ int n2 = bottom.rows();
+ int m = top.cols();
+ Eigen::Matrix<T, COLS, ROWS> stacked(n1 + n2, m);
+ stacked.block(0, 0, n1, m) = top;
+ stacked.block(n1, 0, n2, m) = bottom;
+ return stacked;
+}
+# undef COLS
+# undef ROWS
+#endif // _WIN32
-void HorizontalStack(const Mat &left, const Mat &right, Mat *stacked);
+void HorizontalStack(const Mat& left, const Mat& right, Mat* stacked);
-template<typename TTop, typename TBot, typename TStacked>
-void VerticalStack(const TTop &top, const TBot &bottom, TStacked *stacked) {
+template <typename TTop, typename TBot, typename TStacked>
+void VerticalStack(const TTop& top, const TBot& bottom, TStacked* stacked) {
assert(top.cols() == bottom.cols());
int n1 = top.rows();
int n2 = bottom.rows();
int m = top.cols();
stacked->resize(n1 + n2, m);
- stacked->block(0, 0, n1, m) = top;
+ stacked->block(0, 0, n1, m) = top;
stacked->block(n1, 0, n2, m) = bottom;
}
-void MatrixColumn(const Mat &A, int i, Vec2 *v);
-void MatrixColumn(const Mat &A, int i, Vec3 *v);
-void MatrixColumn(const Mat &A, int i, Vec4 *v);
+void MatrixColumn(const Mat& A, int i, Vec2* v);
+void MatrixColumn(const Mat& A, int i, Vec3* v);
+void MatrixColumn(const Mat& A, int i, Vec4* v);
template <typename TMat, typename TCols>
-TMat ExtractColumns(const TMat &A, const TCols &columns) {
+TMat ExtractColumns(const TMat& A, const TCols& columns) {
TMat compressed(A.rows(), columns.size());
for (int i = 0; i < columns.size(); ++i) {
compressed.col(i) = A.col(columns[i]);
@@ -445,12 +438,12 @@ TMat ExtractColumns(const TMat &A, const TCols &columns) {
}
template <typename TMat, typename TDest>
-void reshape(const TMat &a, int rows, int cols, TDest *b) {
- assert(a.rows()*a.cols() == rows*cols);
+void reshape(const TMat& a, int rows, int cols, TDest* b) {
+ assert(a.rows() * a.cols() == rows * cols);
b->resize(rows, cols);
for (int i = 0; i < rows; i++) {
for (int j = 0; j < cols; j++) {
- (*b)(i, j) = a[cols*i + j];
+ (*b)(i, j) = a[cols * i + j];
}
}
}
@@ -467,24 +460,21 @@ inline bool isnan(double i) {
/// and negative values
template <typename FloatType>
FloatType ceil0(const FloatType& value) {
- FloatType result = std::ceil(std::fabs(value));
- return (value < 0.0) ? -result : result;
+ FloatType result = std::ceil(std::fabs(value));
+ return (value < 0.0) ? -result : result;
}
/// Returns the skew anti-symmetric matrix of a vector
-inline Mat3 SkewMat(const Vec3 &x) {
+inline Mat3 SkewMat(const Vec3& x) {
Mat3 skew;
- skew << 0 , -x(2), x(1),
- x(2), 0 , -x(0),
- -x(1), x(0), 0;
+ skew << 0, -x(2), x(1), x(2), 0, -x(0), -x(1), x(0), 0;
return skew;
}
/// Returns the skew anti-symmetric matrix of a vector with only
/// the first two (independent) lines
-inline Mat23 SkewMatMinimal(const Vec2 &x) {
+inline Mat23 SkewMatMinimal(const Vec2& x) {
Mat23 skew;
- skew << 0, -1, x(1),
- 1, 0, -x(0);
+ skew << 0, -1, x(1), 1, 0, -x(0);
return skew;
}
@@ -496,7 +486,8 @@ inline Mat3 RotationFromEulerVector(Vec3 euler_vector) {
}
Vec3 w = euler_vector / theta;
Mat3 w_hat = CrossProductMatrix(w);
- return Mat3::Identity() + w_hat*sin(theta) + w_hat*w_hat*(1 - cos(theta));
+ return Mat3::Identity() + w_hat * sin(theta) +
+ w_hat * w_hat * (1 - cos(theta));
}
} // namespace libmv
diff --git a/intern/libmv/libmv/numeric/numeric_test.cc b/intern/libmv/libmv/numeric/numeric_test.cc
index 0cdfaf33ab2..b2650a04658 100644
--- a/intern/libmv/libmv/numeric/numeric_test.cc
+++ b/intern/libmv/libmv/numeric/numeric_test.cc
@@ -27,9 +27,11 @@ namespace {
TEST(Numeric, DynamicSizedNullspace) {
Mat A(3, 4);
+ // clang-format off
A << 0.76026643, 0.01799744, 0.55192142, 0.8699745,
0.42016166, 0.97863392, 0.33711682, 0.14479271,
0.51016811, 0.66528302, 0.54395496, 0.57794893;
+ // clang-format on
Vec x;
double s = Nullspace(&A, &x);
EXPECT_NEAR(0.0, s, 1e-15);
@@ -39,9 +41,11 @@ TEST(Numeric, DynamicSizedNullspace) {
TEST(Numeric, FixedSizeMatrixNullspace) {
Mat34 A;
+ // clang-format off
A << 0.76026643, 0.01799744, 0.55192142, 0.8699745,
0.42016166, 0.97863392, 0.33711682, 0.14479271,
0.51016811, 0.66528302, 0.54395496, 0.57794893;
+ // clang-format on
Vec x;
double s = Nullspace(&A, &x);
EXPECT_NEAR(0.0, s, 1e-15);
@@ -51,10 +55,12 @@ TEST(Numeric, FixedSizeMatrixNullspace) {
TEST(Numeric, NullspaceMatchesLapackSVD) {
Mat43 A;
+ // clang-format off
A << 0.76026643, 0.01799744, 0.55192142,
0.8699745, 0.42016166, 0.97863392,
0.33711682, 0.14479271, 0.51016811,
0.66528302, 0.54395496, 0.57794893;
+ // clang-format on
Vec x;
double s = Nullspace(&A, &x);
EXPECT_NEAR(1.0, x.norm(), 1e-15);
@@ -68,10 +74,12 @@ TEST(Numeric, NullspaceMatchesLapackSVD) {
TEST(Numeric, Nullspace2) {
Mat43 A;
+ // clang-format off
A << 0.76026643, 0.01799744, 0.55192142,
0.8699745, 0.42016166, 0.97863392,
0.33711682, 0.14479271, 0.51016811,
0.66528302, 0.54395496, 0.57794893;
+ // clang-format on
Vec3 x1, x2;
double s = Nullspace2(&A, &x1, &x2);
EXPECT_NEAR(1.0, x1.norm(), 1e-15);
@@ -80,14 +88,14 @@ TEST(Numeric, Nullspace2) {
EXPECT_NEAR(-0.64999717, x1(0), 1e-8);
EXPECT_NEAR(-0.18452646, x1(1), 1e-8);
- EXPECT_NEAR( 0.7371931, x1(2), 1e-8);
+ EXPECT_NEAR(0.7371931, x1(2), 1e-8);
if (x2(0) < 0) {
x2 *= -1;
}
- EXPECT_NEAR( 0.34679618, x2(0), 1e-8);
+ EXPECT_NEAR(0.34679618, x2(0), 1e-8);
EXPECT_NEAR(-0.93519689, x2(1), 1e-8);
- EXPECT_NEAR( 0.07168809, x2(2), 1e-8);
+ EXPECT_NEAR(0.07168809, x2(2), 1e-8);
}
TEST(Numeric, TinyMatrixSquareTranspose) {
@@ -105,8 +113,8 @@ TEST(Numeric, NormalizeL1) {
x << 1, 2;
double l1 = NormalizeL1(&x);
EXPECT_DOUBLE_EQ(3., l1);
- EXPECT_DOUBLE_EQ(1./3., x(0));
- EXPECT_DOUBLE_EQ(2./3., x(1));
+ EXPECT_DOUBLE_EQ(1. / 3., x(0));
+ EXPECT_DOUBLE_EQ(2. / 3., x(1));
}
TEST(Numeric, NormalizeL2) {
@@ -114,8 +122,8 @@ TEST(Numeric, NormalizeL2) {
x << 1, 2;
double l2 = NormalizeL2(&x);
EXPECT_DOUBLE_EQ(sqrt(5.0), l2);
- EXPECT_DOUBLE_EQ(1./sqrt(5.), x(0));
- EXPECT_DOUBLE_EQ(2./sqrt(5.), x(1));
+ EXPECT_DOUBLE_EQ(1. / sqrt(5.), x(0));
+ EXPECT_DOUBLE_EQ(2. / sqrt(5.), x(1));
}
TEST(Numeric, Diag) {
@@ -130,31 +138,32 @@ TEST(Numeric, Diag) {
TEST(Numeric, Determinant) {
Mat A(2, 2);
- A << 1, 2,
- -1, 3;
+ A << 1, 2, -1, 3;
double detA = A.determinant();
EXPECT_NEAR(5, detA, 1e-8);
Mat B(4, 4);
+ // clang-format off
B << 0, 1, 2, 3,
4, 5, 6, 7,
8, 9, 10, 11,
12, 13, 14, 15;
+ // clang-format on
double detB = B.determinant();
EXPECT_NEAR(0, detB, 1e-8);
Mat3 C;
- C << 0, 1, 2,
- 3, 4, 5,
- 6, 7, 1;
+ C << 0, 1, 2, 3, 4, 5, 6, 7, 1;
double detC = C.determinant();
EXPECT_NEAR(21, detC, 1e-8);
}
TEST(Numeric, Inverse) {
Mat A(2, 2), A1;
+ // clang-format off
A << 1, 2,
-1, 3;
+ // clang-format on
Mat I = A * A.inverse();
EXPECT_NEAR(1, I(0, 0), 1e-8);
@@ -163,10 +172,12 @@ TEST(Numeric, Inverse) {
EXPECT_NEAR(1, I(1, 1), 1e-8);
Mat B(4, 4), B1;
+ // clang-format off
B << 0, 1, 2, 3,
4, 5, 6, 7,
8, 9, 2, 11,
12, 13, 14, 4;
+ // clang-format on
Mat I2 = B * B.inverse();
EXPECT_NEAR(1, I2(0, 0), 1e-8);
EXPECT_NEAR(0, I2(0, 1), 1e-8);
@@ -182,8 +193,10 @@ TEST(Numeric, Inverse) {
TEST(Numeric, MeanAndVarianceAlongRows) {
int n = 4;
Mat points(2, n);
+ // clang-format off
points << 0, 0, 1, 1,
0, 2, 1, 3;
+ // clang-format on
Vec mean, variance;
MeanAndVarianceAlongRows(points, &mean, &variance);
@@ -213,15 +226,17 @@ TEST(Numeric, HStack) {
Mat x(2, 1), y(2, 1), z(2, 2);
x << 1, 2;
y << 3, 4;
+ // clang-format off
z << 1, 3,
2, 4;
+ // clang-format on
Vec2 xC = x, yC = y;
- Mat2 xy = HStack(x, y);
+ Mat2 xy = HStack(x, y);
EXPECT_MATRIX_EQ(z, xy);
- EXPECT_MATRIX_EQ(z, HStack(x, y));
- EXPECT_MATRIX_EQ(z, HStack(x, yC));
+ EXPECT_MATRIX_EQ(z, HStack(x, y));
+ EXPECT_MATRIX_EQ(z, HStack(x, yC));
EXPECT_MATRIX_EQ(z, HStack(xC, y));
EXPECT_MATRIX_EQ(z, HStack(xC, yC));
}
@@ -230,6 +245,7 @@ TEST(Numeric, HStack) {
// resulting stacked matrices properly propagate the fixed dimensions.
TEST(Numeric, VStack) {
Mat x(2, 2), y(2, 2), z(4, 2);
+ // clang-format off
x << 1, 2,
3, 4;
y << 10, 20,
@@ -238,13 +254,14 @@ TEST(Numeric, VStack) {
3, 4,
10, 20,
30, 40;
+ // clang-format on
Mat2 xC = x, yC = y;
- Mat xy = VStack(x, y);
+ Mat xy = VStack(x, y);
EXPECT_MATRIX_EQ(z, xy);
- EXPECT_MATRIX_EQ(z, VStack(x, y));
- EXPECT_MATRIX_EQ(z, VStack(x, yC));
+ EXPECT_MATRIX_EQ(z, VStack(x, y));
+ EXPECT_MATRIX_EQ(z, VStack(x, yC));
EXPECT_MATRIX_EQ(z, VStack(xC, y));
EXPECT_MATRIX_EQ(z, VStack(xC, yC));
}
@@ -293,17 +310,21 @@ TEST(Numeric, CrossProductMatrix) {
TEST(Numeric, MatrixColumn) {
Mat A2(2, 3);
Vec2 v2;
+ // clang-format off
A2 << 1, 2, 3,
4, 5, 6;
+ // clang-format on
MatrixColumn(A2, 1, &v2);
EXPECT_EQ(2, v2(0));
EXPECT_EQ(5, v2(1));
Mat A3(3, 3);
Vec3 v3;
+ // clang-format off
A3 << 1, 2, 3,
4, 5, 6,
7, 8, 9;
+ // clang-format on
MatrixColumn(A3, 1, &v3);
EXPECT_EQ(2, v3(0));
EXPECT_EQ(5, v3(1));
@@ -311,14 +332,16 @@ TEST(Numeric, MatrixColumn) {
Mat A4(4, 3);
Vec4 v4;
+ // clang-format off
A4 << 1, 2, 3,
4, 5, 6,
7, 8, 9,
10, 11, 12;
+ // clang-format on
MatrixColumn(A4, 1, &v4);
- EXPECT_EQ( 2, v4(0));
- EXPECT_EQ( 5, v4(1));
- EXPECT_EQ( 8, v4(2));
+ EXPECT_EQ(2, v4(0));
+ EXPECT_EQ(5, v4(1));
+ EXPECT_EQ(8, v4(2));
EXPECT_EQ(11, v4(3));
}
@@ -337,7 +360,8 @@ TEST(Numeric, Mat3MatProduct) {
// This gives a compile error.
TEST(Numeric, Vec3Negative) {
- Vec3 y; y << 1, 2, 3;
+ Vec3 y;
+ y << 1, 2, 3;
Vec3 x = -y;
EXPECT_EQ(-1, x(0));
EXPECT_EQ(-2, x(1));
@@ -357,19 +381,23 @@ TEST(Numeric, Vec3VecInteroperability) {
// This segfaults inside lapack.
TEST(Numeric, DeterminantLU7) {
Mat A(5, 5);
+ // clang-format off
A << 1, 0, 0, 0, 0,
0, 1, 0, 0, 0,
0, 0, 1, 0, 0,
0, 0, 0, 1, 0,
0, 0, 0, 0, 1;
+ // clang-format on
EXPECT_NEAR(1, A.determinant(), 1e-8);
}
// This segfaults inside lapack.
TEST(Numeric, DeterminantLU) {
Mat A(2, 2);
+ // clang-format off
A << 1, 2,
-1, 3;
+ // clang-format on
EXPECT_NEAR(5, A.determinant(), 1e-8);
}
@@ -377,19 +405,24 @@ TEST(Numeric, DeterminantLU) {
// Keir: Not with eigen2!
TEST(Numeric, InplaceProduct) {
Mat2 K, S;
+ // clang-format off
K << 1, 0,
0, 1;
S << 1, 0,
0, 1;
K = K * S;
+ // clang-format on
EXPECT_MATRIX_NEAR(Mat2::Identity(), K, 1e-8);
}
TEST(Numeric, ExtractColumns) {
Mat2X A(2, 5);
+ // clang-format off
A << 1, 2, 3, 4, 5,
6, 7, 8, 9, 10;
- Vec2i columns; columns << 0, 2;
+ // clang-format on
+ Vec2i columns;
+ columns << 0, 2;
Mat2X extracted = ExtractColumns(A, columns);
EXPECT_NEAR(1, extracted(0, 0), 1e-15);
EXPECT_NEAR(3, extracted(0, 1), 1e-15);
@@ -418,21 +451,22 @@ TEST(Numeric, RotationRodrigues) {
TEST(Numeric, LookAt) {
// Simple orthogonality check.
- Vec3 e; e << 1, 2, 3;
+ Vec3 e;
+ e << 1, 2, 3;
Mat3 R = LookAt(e), I = Mat3::Identity();
- Mat3 RRT = R*R.transpose();
- Mat3 RTR = R.transpose()*R;
+ Mat3 RRT = R * R.transpose();
+ Mat3 RTR = R.transpose() * R;
EXPECT_MATRIX_NEAR(I, RRT, 1e-15);
EXPECT_MATRIX_NEAR(I, RTR, 1e-15);
}
TEST(Numeric, Reshape) {
- Vec4 x; x << 1, 2, 3, 4;
+ Vec4 x;
+ x << 1, 2, 3, 4;
Mat2 M, M_expected;
reshape(x, 2, 2, &M);
- M_expected << 1, 2,
- 3, 4;
+ M_expected << 1, 2, 3, 4;
EXPECT_MATRIX_NEAR(M_expected, M, 1e-15);
}
diff --git a/intern/libmv/libmv/numeric/poly.h b/intern/libmv/libmv/numeric/poly.h
index 76ba062d475..a3d3801a399 100644
--- a/intern/libmv/libmv/numeric/poly.h
+++ b/intern/libmv/libmv/numeric/poly.h
@@ -21,8 +21,8 @@
#ifndef LIBMV_NUMERIC_POLY_H_
#define LIBMV_NUMERIC_POLY_H_
-#include <cmath>
#include <stdio.h>
+#include <cmath>
namespace libmv {
@@ -35,9 +35,8 @@ namespace libmv {
// if there are 2 roots, only x0 and x1 are set.
//
// The GSL cubic solver was used as a reference for this routine.
-template<typename Real>
-int SolveCubicPolynomial(Real a, Real b, Real c,
- Real *x0, Real *x1, Real *x2) {
+template <typename Real>
+int SolveCubicPolynomial(Real a, Real b, Real c, Real* x0, Real* x1, Real* x2) {
Real q = a * a - 3 * b;
Real r = 2 * a * a * a - 9 * a * b + 27 * c;
@@ -65,12 +64,12 @@ int SolveCubicPolynomial(Real a, Real b, Real c,
Real sqrtQ = sqrt(Q);
if (R > 0) {
*x0 = -2 * sqrtQ - a / 3;
- *x1 = sqrtQ - a / 3;
- *x2 = sqrtQ - a / 3;
+ *x1 = sqrtQ - a / 3;
+ *x2 = sqrtQ - a / 3;
} else {
- *x0 = -sqrtQ - a / 3;
- *x1 = -sqrtQ - a / 3;
- *x2 = 2 * sqrtQ - a / 3;
+ *x0 = -sqrtQ - a / 3;
+ *x1 = -sqrtQ - a / 3;
+ *x2 = 2 * sqrtQ - a / 3;
}
return 3;
@@ -97,15 +96,15 @@ int SolveCubicPolynomial(Real a, Real b, Real c,
return 3;
}
Real sgnR = (R >= 0 ? 1 : -1);
- Real A = -sgnR * pow(fabs(R) + sqrt(R2 - Q3), 1.0/3.0);
+ Real A = -sgnR * pow(fabs(R) + sqrt(R2 - Q3), 1.0 / 3.0);
Real B = Q / A;
*x0 = A + B - a / 3;
return 1;
}
// The coefficients are in ascending powers, i.e. coeffs[N]*x^N.
-template<typename Real>
-int SolveCubicPolynomial(const Real *coeffs, Real *solutions) {
+template <typename Real>
+int SolveCubicPolynomial(const Real* coeffs, Real* solutions) {
if (coeffs[0] == 0.0) {
// TODO(keir): This is a quadratic not a cubic. Implement a quadratic
// solver!
@@ -114,10 +113,8 @@ int SolveCubicPolynomial(const Real *coeffs, Real *solutions) {
Real a = coeffs[2] / coeffs[3];
Real b = coeffs[1] / coeffs[3];
Real c = coeffs[0] / coeffs[3];
- return SolveCubicPolynomial(a, b, c,
- solutions + 0,
- solutions + 1,
- solutions + 2);
+ return SolveCubicPolynomial(
+ a, b, c, solutions + 0, solutions + 1, solutions + 2);
}
} // namespace libmv
#endif // LIBMV_NUMERIC_POLY_H_
diff --git a/intern/libmv/libmv/numeric/poly_test.cc b/intern/libmv/libmv/numeric/poly_test.cc
index 69f887b416c..cb85c068468 100644
--- a/intern/libmv/libmv/numeric/poly_test.cc
+++ b/intern/libmv/libmv/numeric/poly_test.cc
@@ -18,8 +18,8 @@
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
// IN THE SOFTWARE.
-#include "libmv/numeric/numeric.h"
#include "libmv/numeric/poly.h"
+#include "libmv/numeric/numeric.h"
#include "testing/testing.h"
using namespace libmv;
@@ -34,8 +34,8 @@ namespace {
//
// x^3 - (c+b+a) * x^2 + (a*b+(b+a)*c) * x - a*b*c = 0.
// = p = q = r
-void CoeffsForCubicZeros(double a, double b, double c,
- double *p, double *q, double *r) {
+void CoeffsForCubicZeros(
+ double a, double b, double c, double* p, double* q, double* r) {
*p = -(c + b + a);
*q = (a * b + (b + a) * c);
*r = -a * b * c;
@@ -45,35 +45,45 @@ TEST(Poly, SolveCubicPolynomial) {
double a, b, c, aa, bb, cc;
double p, q, r;
- a = 1; b = 2; c = 3;
+ a = 1;
+ b = 2;
+ c = 3;
CoeffsForCubicZeros(a, b, c, &p, &q, &r);
ASSERT_EQ(3, SolveCubicPolynomial(p, q, r, &aa, &bb, &cc));
EXPECT_NEAR(a, aa, 1e-10);
EXPECT_NEAR(b, bb, 1e-10);
EXPECT_NEAR(c, cc, 1e-10);
- a = 0; b = 1; c = 3;
+ a = 0;
+ b = 1;
+ c = 3;
CoeffsForCubicZeros(a, b, c, &p, &q, &r);
ASSERT_EQ(3, SolveCubicPolynomial(p, q, r, &aa, &bb, &cc));
EXPECT_NEAR(a, aa, 1e-10);
EXPECT_NEAR(b, bb, 1e-10);
EXPECT_NEAR(c, cc, 1e-10);
- a = -10; b = 0; c = 1;
+ a = -10;
+ b = 0;
+ c = 1;
CoeffsForCubicZeros(a, b, c, &p, &q, &r);
ASSERT_EQ(3, SolveCubicPolynomial(p, q, r, &aa, &bb, &cc));
EXPECT_NEAR(a, aa, 1e-10);
EXPECT_NEAR(b, bb, 1e-10);
EXPECT_NEAR(c, cc, 1e-10);
- a = -8; b = 1; c = 3;
+ a = -8;
+ b = 1;
+ c = 3;
CoeffsForCubicZeros(a, b, c, &p, &q, &r);
ASSERT_EQ(3, SolveCubicPolynomial(p, q, r, &aa, &bb, &cc));
EXPECT_NEAR(a, aa, 1e-10);
EXPECT_NEAR(b, bb, 1e-10);
EXPECT_NEAR(c, cc, 1e-10);
- a = 28; b = 28; c = 105;
+ a = 28;
+ b = 28;
+ c = 105;
CoeffsForCubicZeros(a, b, c, &p, &q, &r);
ASSERT_EQ(3, SolveCubicPolynomial(p, q, r, &aa, &bb, &cc));
EXPECT_NEAR(a, aa, 1e-10);
diff --git a/intern/libmv/libmv/simple_pipeline/bundle.cc b/intern/libmv/libmv/simple_pipeline/bundle.cc
index 25a63c87e1b..e86c3bca57f 100644
--- a/intern/libmv/libmv/simple_pipeline/bundle.cc
+++ b/intern/libmv/libmv/simple_pipeline/bundle.cc
@@ -32,16 +32,16 @@
#include "libmv/multiview/projection.h"
#include "libmv/numeric/numeric.h"
#include "libmv/simple_pipeline/camera_intrinsics.h"
-#include "libmv/simple_pipeline/reconstruction.h"
-#include "libmv/simple_pipeline/tracks.h"
#include "libmv/simple_pipeline/distortion_models.h"
#include "libmv/simple_pipeline/packed_intrinsics.h"
+#include "libmv/simple_pipeline/reconstruction.h"
+#include "libmv/simple_pipeline/tracks.h"
namespace libmv {
namespace {
-bool NeedUseInvertIntrinsicsPipeline(const CameraIntrinsics *intrinsics) {
+bool NeedUseInvertIntrinsicsPipeline(const CameraIntrinsics* intrinsics) {
const DistortionModelType distortion_model =
intrinsics->GetDistortionModelType();
return (distortion_model == DISTORTION_MODEL_NUKE);
@@ -59,12 +59,14 @@ bool NeedUseInvertIntrinsicsPipeline(const CameraIntrinsics *intrinsics) {
// The invariant_intrinsics are used to access intrinsics which are never
// packed into parameter block: for example, distortion model type and image
// dimension.
-template<typename T>
+template <typename T>
void ApplyDistortionModelUsingIntrinsicsBlock(
- const CameraIntrinsics *invariant_intrinsics,
+ const CameraIntrinsics* invariant_intrinsics,
const T* const intrinsics_block,
- const T& normalized_x, const T& normalized_y,
- T* distorted_x, T* distorted_y) {
+ const T& normalized_x,
+ const T& normalized_y,
+ T* distorted_x,
+ T* distorted_y) {
// Unpack the intrinsics.
const T& focal_length =
intrinsics_block[PackedIntrinsics::OFFSET_FOCAL_LENGTH];
@@ -76,65 +78,75 @@ void ApplyDistortionModelUsingIntrinsicsBlock(
// TODO(keir): Do early bailouts for zero distortion; these are expensive
// jet operations.
switch (invariant_intrinsics->GetDistortionModelType()) {
- case DISTORTION_MODEL_POLYNOMIAL:
- {
- const T& k1 = intrinsics_block[PackedIntrinsics::OFFSET_K1];
- const T& k2 = intrinsics_block[PackedIntrinsics::OFFSET_K2];
- const T& k3 = intrinsics_block[PackedIntrinsics::OFFSET_K3];
- const T& p1 = intrinsics_block[PackedIntrinsics::OFFSET_P1];
- const T& p2 = intrinsics_block[PackedIntrinsics::OFFSET_P2];
-
- ApplyPolynomialDistortionModel(focal_length,
- focal_length,
- principal_point_x,
- principal_point_y,
- k1, k2, k3,
- p1, p2,
- normalized_x, normalized_y,
- distorted_x, distorted_y);
- return;
- }
-
- case DISTORTION_MODEL_DIVISION:
- {
- const T& k1 = intrinsics_block[PackedIntrinsics::OFFSET_K1];
- const T& k2 = intrinsics_block[PackedIntrinsics::OFFSET_K2];
-
- ApplyDivisionDistortionModel(focal_length,
+ case DISTORTION_MODEL_POLYNOMIAL: {
+ const T& k1 = intrinsics_block[PackedIntrinsics::OFFSET_K1];
+ const T& k2 = intrinsics_block[PackedIntrinsics::OFFSET_K2];
+ const T& k3 = intrinsics_block[PackedIntrinsics::OFFSET_K3];
+ const T& p1 = intrinsics_block[PackedIntrinsics::OFFSET_P1];
+ const T& p2 = intrinsics_block[PackedIntrinsics::OFFSET_P2];
+
+ ApplyPolynomialDistortionModel(focal_length,
focal_length,
principal_point_x,
principal_point_y,
- k1, k2,
- normalized_x, normalized_y,
- distorted_x, distorted_y);
- return;
- }
+ k1,
+ k2,
+ k3,
+ p1,
+ p2,
+ normalized_x,
+ normalized_y,
+ distorted_x,
+ distorted_y);
+ return;
+ }
- case DISTORTION_MODEL_NUKE:
- {
- LOG(FATAL) << "Unsupported distortion model.";
- return;
- }
+ case DISTORTION_MODEL_DIVISION: {
+ const T& k1 = intrinsics_block[PackedIntrinsics::OFFSET_K1];
+ const T& k2 = intrinsics_block[PackedIntrinsics::OFFSET_K2];
+
+ ApplyDivisionDistortionModel(focal_length,
+ focal_length,
+ principal_point_x,
+ principal_point_y,
+ k1,
+ k2,
+ normalized_x,
+ normalized_y,
+ distorted_x,
+ distorted_y);
+ return;
+ }
- case DISTORTION_MODEL_BROWN:
- {
- const T& k1 = intrinsics_block[PackedIntrinsics::OFFSET_K1];
- const T& k2 = intrinsics_block[PackedIntrinsics::OFFSET_K2];
- const T& k3 = intrinsics_block[PackedIntrinsics::OFFSET_K3];
- const T& k4 = intrinsics_block[PackedIntrinsics::OFFSET_K4];
- const T& p1 = intrinsics_block[PackedIntrinsics::OFFSET_P1];
- const T& p2 = intrinsics_block[PackedIntrinsics::OFFSET_P2];
-
- ApplyBrownDistortionModel(focal_length,
- focal_length,
- principal_point_x,
- principal_point_y,
- k1, k2, k3, k4,
- p1, p2,
- normalized_x, normalized_y,
- distorted_x, distorted_y);
- return;
- }
+ case DISTORTION_MODEL_NUKE: {
+ LOG(FATAL) << "Unsupported distortion model.";
+ return;
+ }
+
+ case DISTORTION_MODEL_BROWN: {
+ const T& k1 = intrinsics_block[PackedIntrinsics::OFFSET_K1];
+ const T& k2 = intrinsics_block[PackedIntrinsics::OFFSET_K2];
+ const T& k3 = intrinsics_block[PackedIntrinsics::OFFSET_K3];
+ const T& k4 = intrinsics_block[PackedIntrinsics::OFFSET_K4];
+ const T& p1 = intrinsics_block[PackedIntrinsics::OFFSET_P1];
+ const T& p2 = intrinsics_block[PackedIntrinsics::OFFSET_P2];
+
+ ApplyBrownDistortionModel(focal_length,
+ focal_length,
+ principal_point_x,
+ principal_point_y,
+ k1,
+ k2,
+ k3,
+ k4,
+ p1,
+ p2,
+ normalized_x,
+ normalized_y,
+ distorted_x,
+ distorted_y);
+ return;
+ }
}
LOG(FATAL) << "Unknown distortion model.";
@@ -152,12 +164,14 @@ void ApplyDistortionModelUsingIntrinsicsBlock(
// The invariant_intrinsics are used to access intrinsics which are never
// packed into parameter block: for example, distortion model type and image
// dimension.
-template<typename T>
+template <typename T>
void InvertDistortionModelUsingIntrinsicsBlock(
- const CameraIntrinsics *invariant_intrinsics,
+ const CameraIntrinsics* invariant_intrinsics,
const T* const intrinsics_block,
- const T& image_x, const T& image_y,
- T* normalized_x, T* normalized_y) {
+ const T& image_x,
+ const T& image_y,
+ T* normalized_x,
+ T* normalized_y) {
// Unpack the intrinsics.
const T& focal_length =
intrinsics_block[PackedIntrinsics::OFFSET_FOCAL_LENGTH];
@@ -175,31 +189,35 @@ void InvertDistortionModelUsingIntrinsicsBlock(
LOG(FATAL) << "Unsupported distortion model.";
return;
- case DISTORTION_MODEL_NUKE:
- {
- const T& k1 = intrinsics_block[PackedIntrinsics::OFFSET_K1];
- const T& k2 = intrinsics_block[PackedIntrinsics::OFFSET_K2];
-
- InvertNukeDistortionModel(focal_length,
- focal_length,
- principal_point_x,
- principal_point_y,
- invariant_intrinsics->image_width(),
- invariant_intrinsics->image_height(),
- k1, k2,
- image_x, image_y,
- normalized_x, normalized_y);
- return;
- }
+ case DISTORTION_MODEL_NUKE: {
+ const T& k1 = intrinsics_block[PackedIntrinsics::OFFSET_K1];
+ const T& k2 = intrinsics_block[PackedIntrinsics::OFFSET_K2];
+
+ InvertNukeDistortionModel(focal_length,
+ focal_length,
+ principal_point_x,
+ principal_point_y,
+ invariant_intrinsics->image_width(),
+ invariant_intrinsics->image_height(),
+ k1,
+ k2,
+ image_x,
+ image_y,
+ normalized_x,
+ normalized_y);
+ return;
+ }
}
LOG(FATAL) << "Unknown distortion model.";
}
-template<typename T>
+template <typename T>
void NormalizedToImageSpace(const T* const intrinsics_block,
- const T& normalized_x, const T& normalized_y,
- T* image_x, T* image_y) {
+ const T& normalized_x,
+ const T& normalized_y,
+ T* image_x,
+ T* image_y) {
// Unpack the intrinsics.
const T& focal_length =
intrinsics_block[PackedIntrinsics::OFFSET_FOCAL_LENGTH];
@@ -219,11 +237,10 @@ void NormalizedToImageSpace(const T* const intrinsics_block,
// This functor can only be used for distortion models which have analytically
// defined Apply() function.
struct ReprojectionErrorApplyIntrinsics {
- ReprojectionErrorApplyIntrinsics(
- const CameraIntrinsics *invariant_intrinsics,
- const double observed_distorted_x,
- const double observed_distorted_y,
- const double weight)
+ ReprojectionErrorApplyIntrinsics(const CameraIntrinsics* invariant_intrinsics,
+ const double observed_distorted_x,
+ const double observed_distorted_y,
+ const double weight)
: invariant_intrinsics_(invariant_intrinsics),
observed_distorted_x_(observed_distorted_x),
observed_distorted_y_(observed_distorted_y),
@@ -253,11 +270,12 @@ struct ReprojectionErrorApplyIntrinsics {
T yn = x[1] / x[2];
T predicted_distorted_x, predicted_distorted_y;
- ApplyDistortionModelUsingIntrinsicsBlock(
- invariant_intrinsics_,
- intrinsics,
- xn, yn,
- &predicted_distorted_x, &predicted_distorted_y);
+ ApplyDistortionModelUsingIntrinsicsBlock(invariant_intrinsics_,
+ intrinsics,
+ xn,
+ yn,
+ &predicted_distorted_x,
+ &predicted_distorted_y);
// The error is the difference between the predicted and observed position.
residuals[0] = (predicted_distorted_x - T(observed_distorted_x_)) * weight_;
@@ -265,7 +283,7 @@ struct ReprojectionErrorApplyIntrinsics {
return true;
}
- const CameraIntrinsics *invariant_intrinsics_;
+ const CameraIntrinsics* invariant_intrinsics_;
const double observed_distorted_x_;
const double observed_distorted_y_;
const double weight_;
@@ -279,7 +297,7 @@ struct ReprojectionErrorApplyIntrinsics {
// defined Invert() function.
struct ReprojectionErrorInvertIntrinsics {
ReprojectionErrorInvertIntrinsics(
- const CameraIntrinsics *invariant_intrinsics,
+ const CameraIntrinsics* invariant_intrinsics,
const double observed_distorted_x,
const double observed_distorted_y,
const double weight)
@@ -295,8 +313,7 @@ struct ReprojectionErrorInvertIntrinsics {
const T* const X, // Point coordinates 3x1.
T* residuals) const {
// Unpack the intrinsics.
- const T& focal_length =
- intrinsics[PackedIntrinsics::OFFSET_FOCAL_LENGTH];
+ const T& focal_length = intrinsics[PackedIntrinsics::OFFSET_FOCAL_LENGTH];
const T& principal_point_x =
intrinsics[PackedIntrinsics::OFFSET_PRINCIPAL_POINT_X];
const T& principal_point_y =
@@ -327,14 +344,17 @@ struct ReprojectionErrorInvertIntrinsics {
InvertDistortionModelUsingIntrinsicsBlock(
invariant_intrinsics_,
intrinsics,
- T(observed_distorted_x_), T(observed_distorted_y_),
- &observed_undistorted_normalized_x, &observed_undistorted_normalized_y);
+ T(observed_distorted_x_),
+ T(observed_distorted_y_),
+ &observed_undistorted_normalized_x,
+ &observed_undistorted_normalized_y);
T observed_undistorted_image_x, observed_undistorted_image_y;
- NormalizedToImageSpace(
- intrinsics,
- observed_undistorted_normalized_x, observed_undistorted_normalized_y,
- &observed_undistorted_image_x, &observed_undistorted_image_y);
+ NormalizedToImageSpace(intrinsics,
+ observed_undistorted_normalized_x,
+ observed_undistorted_normalized_y,
+ &observed_undistorted_image_x,
+ &observed_undistorted_image_y);
// The error is the difference between the predicted and observed position.
residuals[0] = (predicted_x - observed_undistorted_image_x) * weight_;
@@ -343,7 +363,7 @@ struct ReprojectionErrorInvertIntrinsics {
return true;
}
- const CameraIntrinsics *invariant_intrinsics_;
+ const CameraIntrinsics* invariant_intrinsics_;
const double observed_distorted_x_;
const double observed_distorted_y_;
const double weight_;
@@ -356,22 +376,23 @@ void BundleIntrinsicsLogMessage(const int bundle_intrinsics) {
} else {
std::string bundling_message = "";
-#define APPEND_BUNDLING_INTRINSICS(name, flag) \
- if (bundle_intrinsics & flag) { \
- if (!bundling_message.empty()) { \
- bundling_message += ", "; \
- } \
- bundling_message += name; \
- } (void)0
+#define APPEND_BUNDLING_INTRINSICS(name, flag) \
+ if (bundle_intrinsics & flag) { \
+ if (!bundling_message.empty()) { \
+ bundling_message += ", "; \
+ } \
+ bundling_message += name; \
+ } \
+ (void)0
- APPEND_BUNDLING_INTRINSICS("f", BUNDLE_FOCAL_LENGTH);
+ APPEND_BUNDLING_INTRINSICS("f", BUNDLE_FOCAL_LENGTH);
APPEND_BUNDLING_INTRINSICS("px, py", BUNDLE_PRINCIPAL_POINT);
- APPEND_BUNDLING_INTRINSICS("k1", BUNDLE_RADIAL_K1);
- APPEND_BUNDLING_INTRINSICS("k2", BUNDLE_RADIAL_K2);
- APPEND_BUNDLING_INTRINSICS("k3", BUNDLE_RADIAL_K3);
- APPEND_BUNDLING_INTRINSICS("k4", BUNDLE_RADIAL_K4);
- APPEND_BUNDLING_INTRINSICS("p1", BUNDLE_TANGENTIAL_P1);
- APPEND_BUNDLING_INTRINSICS("p2", BUNDLE_TANGENTIAL_P2);
+ APPEND_BUNDLING_INTRINSICS("k1", BUNDLE_RADIAL_K1);
+ APPEND_BUNDLING_INTRINSICS("k2", BUNDLE_RADIAL_K2);
+ APPEND_BUNDLING_INTRINSICS("k3", BUNDLE_RADIAL_K3);
+ APPEND_BUNDLING_INTRINSICS("k4", BUNDLE_RADIAL_K4);
+ APPEND_BUNDLING_INTRINSICS("p1", BUNDLE_TANGENTIAL_P1);
+ APPEND_BUNDLING_INTRINSICS("p2", BUNDLE_TANGENTIAL_P2);
LG << "Bundling " << bundling_message << ".";
}
@@ -383,7 +404,7 @@ void BundleIntrinsicsLogMessage(const int bundle_intrinsics) {
// Element with key i matches to a rotation+translation for
// camera at image i.
map<int, Vec6> PackCamerasRotationAndTranslation(
- const EuclideanReconstruction &reconstruction) {
+ const EuclideanReconstruction& reconstruction) {
map<int, Vec6> all_cameras_R_t;
vector<EuclideanCamera> all_cameras = reconstruction.AllCameras();
@@ -399,14 +420,13 @@ map<int, Vec6> PackCamerasRotationAndTranslation(
// Convert cameras rotations fro mangle axis back to rotation matrix.
void UnpackCamerasRotationAndTranslation(
- const map<int, Vec6> &all_cameras_R_t,
- EuclideanReconstruction *reconstruction) {
-
+ const map<int, Vec6>& all_cameras_R_t,
+ EuclideanReconstruction* reconstruction) {
for (map<int, Vec6>::value_type image_and_camera_R_T : all_cameras_R_t) {
const int image = image_and_camera_R_T.first;
const Vec6& camera_R_t = image_and_camera_R_T.second;
- EuclideanCamera *camera = reconstruction->CameraForImage(image);
+ EuclideanCamera* camera = reconstruction->CameraForImage(image);
if (!camera) {
continue;
}
@@ -421,8 +441,8 @@ void UnpackCamerasRotationAndTranslation(
//
// TODO(sergey): currently uses dense Eigen matrices, best would
// be to use sparse Eigen matrices
-void CRSMatrixToEigenMatrix(const ceres::CRSMatrix &crs_matrix,
- Mat *eigen_matrix) {
+void CRSMatrixToEigenMatrix(const ceres::CRSMatrix& crs_matrix,
+ Mat* eigen_matrix) {
eigen_matrix->resize(crs_matrix.num_rows, crs_matrix.num_cols);
eigen_matrix->setZero();
@@ -439,11 +459,11 @@ void CRSMatrixToEigenMatrix(const ceres::CRSMatrix &crs_matrix,
}
}
-void EuclideanBundlerPerformEvaluation(const Tracks &tracks,
- EuclideanReconstruction *reconstruction,
- map<int, Vec6> *all_cameras_R_t,
- ceres::Problem *problem,
- BundleEvaluation *evaluation) {
+void EuclideanBundlerPerformEvaluation(const Tracks& tracks,
+ EuclideanReconstruction* reconstruction,
+ map<int, Vec6>* all_cameras_R_t,
+ ceres::Problem* problem,
+ BundleEvaluation* evaluation) {
int max_track = tracks.MaxTrack();
// Number of camera rotations equals to number of translation,
int num_cameras = all_cameras_R_t->size();
@@ -451,7 +471,7 @@ void EuclideanBundlerPerformEvaluation(const Tracks &tracks,
vector<EuclideanPoint*> minimized_points;
for (int i = 0; i <= max_track; i++) {
- EuclideanPoint *point = reconstruction->PointForTrack(i);
+ EuclideanPoint* point = reconstruction->PointForTrack(i);
if (point) {
// We need to know whether the track is a constant zero weight.
// If it is so it wouldn't have a parameter block in the problem.
@@ -477,16 +497,16 @@ void EuclideanBundlerPerformEvaluation(const Tracks &tracks,
evaluation->num_cameras = num_cameras;
evaluation->num_points = num_points;
- if (evaluation->evaluate_jacobian) { // Evaluate jacobian matrix.
+ if (evaluation->evaluate_jacobian) { // Evaluate jacobian matrix.
ceres::CRSMatrix evaluated_jacobian;
ceres::Problem::EvaluateOptions eval_options;
// Cameras goes first in the ordering.
int max_image = tracks.MaxImage();
for (int i = 0; i <= max_image; i++) {
- const EuclideanCamera *camera = reconstruction->CameraForImage(i);
+ const EuclideanCamera* camera = reconstruction->CameraForImage(i);
if (camera) {
- double *current_camera_R_t = &(*all_cameras_R_t)[i](0);
+ double* current_camera_R_t = &(*all_cameras_R_t)[i](0);
// All cameras are variable now.
problem->SetParameterBlockVariable(current_camera_R_t);
@@ -497,63 +517,65 @@ void EuclideanBundlerPerformEvaluation(const Tracks &tracks,
// Points goes at the end of ordering,
for (int i = 0; i < minimized_points.size(); i++) {
- EuclideanPoint *point = minimized_points.at(i);
+ EuclideanPoint* point = minimized_points.at(i);
eval_options.parameter_blocks.push_back(&point->X(0));
}
- problem->Evaluate(eval_options,
- NULL, NULL, NULL,
- &evaluated_jacobian);
+ problem->Evaluate(eval_options, NULL, NULL, NULL, &evaluated_jacobian);
CRSMatrixToEigenMatrix(evaluated_jacobian, &evaluation->jacobian);
}
}
-template<typename CostFunction>
-void AddResidualBlockToProblemImpl(const CameraIntrinsics *invariant_intrinsics,
- double observed_x, double observed_y,
+template <typename CostFunction>
+void AddResidualBlockToProblemImpl(const CameraIntrinsics* invariant_intrinsics,
+ double observed_x,
+ double observed_y,
double weight,
- double *intrinsics_block,
- double *camera_R_t,
- EuclideanPoint *point,
+ double* intrinsics_block,
+ double* camera_R_t,
+ EuclideanPoint* point,
ceres::Problem* problem) {
- problem->AddResidualBlock(new ceres::AutoDiffCostFunction<
- CostFunction, 2, PackedIntrinsics::NUM_PARAMETERS, 6, 3>(
- new CostFunction(
- invariant_intrinsics,
- observed_x, observed_y,
- weight)),
+ problem->AddResidualBlock(
+ new ceres::AutoDiffCostFunction<CostFunction,
+ 2,
+ PackedIntrinsics::NUM_PARAMETERS,
+ 6,
+ 3>(new CostFunction(
+ invariant_intrinsics, observed_x, observed_y, weight)),
NULL,
intrinsics_block,
camera_R_t,
&point->X(0));
}
-void AddResidualBlockToProblem(const CameraIntrinsics *invariant_intrinsics,
- const Marker &marker,
+void AddResidualBlockToProblem(const CameraIntrinsics* invariant_intrinsics,
+ const Marker& marker,
double marker_weight,
double* intrinsics_block,
- double *camera_R_t,
- EuclideanPoint *point,
+ double* camera_R_t,
+ EuclideanPoint* point,
ceres::Problem* problem) {
if (NeedUseInvertIntrinsicsPipeline(invariant_intrinsics)) {
AddResidualBlockToProblemImpl<ReprojectionErrorInvertIntrinsics>(
- invariant_intrinsics,
- marker.x, marker.y,
- marker_weight,
- intrinsics_block,
- camera_R_t,
- point,
- problem);
+ invariant_intrinsics,
+ marker.x,
+ marker.y,
+ marker_weight,
+ intrinsics_block,
+ camera_R_t,
+ point,
+ problem);
} else {
AddResidualBlockToProblemImpl<ReprojectionErrorApplyIntrinsics>(
- invariant_intrinsics,
- marker.x, marker.y,
- marker_weight,
- intrinsics_block,
- camera_R_t,
- point,
- problem);
+ invariant_intrinsics,
+ marker.x,
+ marker.y,
+ marker_weight,
+ intrinsics_block,
+ camera_R_t,
+ point,
+ problem);
}
}
@@ -566,25 +588,25 @@ void AddResidualBlockToProblem(const CameraIntrinsics *invariant_intrinsics,
//
// At this point we only need to bundle points positions, cameras
// are to be totally still here.
-void EuclideanBundlePointsOnly(const CameraIntrinsics *invariant_intrinsics,
- const vector<Marker> &markers,
- map<int, Vec6> &all_cameras_R_t,
+void EuclideanBundlePointsOnly(const CameraIntrinsics* invariant_intrinsics,
+ const vector<Marker>& markers,
+ map<int, Vec6>& all_cameras_R_t,
double* intrinsics_block,
- EuclideanReconstruction *reconstruction) {
+ EuclideanReconstruction* reconstruction) {
ceres::Problem::Options problem_options;
ceres::Problem problem(problem_options);
int num_residuals = 0;
for (int i = 0; i < markers.size(); ++i) {
- const Marker &marker = markers[i];
- EuclideanCamera *camera = reconstruction->CameraForImage(marker.image);
- EuclideanPoint *point = reconstruction->PointForTrack(marker.track);
+ const Marker& marker = markers[i];
+ EuclideanCamera* camera = reconstruction->CameraForImage(marker.image);
+ EuclideanPoint* point = reconstruction->PointForTrack(marker.track);
if (camera == NULL || point == NULL) {
continue;
}
// Rotation of camera denoted in angle axis followed with
// camera translation.
- double *current_camera_R_t = &all_cameras_R_t[camera->image](0);
+ double* current_camera_R_t = &all_cameras_R_t[camera->image](0);
AddResidualBlockToProblem(invariant_intrinsics,
marker,
@@ -625,8 +647,8 @@ void EuclideanBundlePointsOnly(const CameraIntrinsics *invariant_intrinsics,
} // namespace
-void EuclideanBundle(const Tracks &tracks,
- EuclideanReconstruction *reconstruction) {
+void EuclideanBundle(const Tracks& tracks,
+ EuclideanReconstruction* reconstruction) {
PolynomialCameraIntrinsics empty_intrinsics;
EuclideanBundleCommonIntrinsics(tracks,
BUNDLE_NO_INTRINSICS,
@@ -636,13 +658,12 @@ void EuclideanBundle(const Tracks &tracks,
NULL);
}
-void EuclideanBundleCommonIntrinsics(
- const Tracks &tracks,
- const int bundle_intrinsics,
- const int bundle_constraints,
- EuclideanReconstruction *reconstruction,
- CameraIntrinsics *intrinsics,
- BundleEvaluation *evaluation) {
+void EuclideanBundleCommonIntrinsics(const Tracks& tracks,
+ const int bundle_intrinsics,
+ const int bundle_constraints,
+ EuclideanReconstruction* reconstruction,
+ CameraIntrinsics* intrinsics,
+ BundleEvaluation* evaluation) {
LG << "Original intrinsics: " << *intrinsics;
vector<Marker> markers = tracks.AllMarkers();
@@ -661,19 +682,19 @@ void EuclideanBundleCommonIntrinsics(
// Block for minimization has got the following structure:
// <3 elements for angle-axis> <3 elements for translation>
map<int, Vec6> all_cameras_R_t =
- PackCamerasRotationAndTranslation(*reconstruction);
+ PackCamerasRotationAndTranslation(*reconstruction);
// Parameterization used to restrict camera motion for modal solvers.
- ceres::SubsetParameterization *constant_translation_parameterization = NULL;
+ ceres::SubsetParameterization* constant_translation_parameterization = NULL;
if (bundle_constraints & BUNDLE_NO_TRANSLATION) {
- std::vector<int> constant_translation;
+ std::vector<int> constant_translation;
- // First three elements are rotation, ast three are translation.
- constant_translation.push_back(3);
- constant_translation.push_back(4);
- constant_translation.push_back(5);
+ // First three elements are rotation, ast three are translation.
+ constant_translation.push_back(3);
+ constant_translation.push_back(4);
+ constant_translation.push_back(5);
- constant_translation_parameterization =
+ constant_translation_parameterization =
new ceres::SubsetParameterization(6, constant_translation);
}
@@ -683,16 +704,16 @@ void EuclideanBundleCommonIntrinsics(
int num_residuals = 0;
bool have_locked_camera = false;
for (int i = 0; i < markers.size(); ++i) {
- const Marker &marker = markers[i];
- EuclideanCamera *camera = reconstruction->CameraForImage(marker.image);
- EuclideanPoint *point = reconstruction->PointForTrack(marker.track);
+ const Marker& marker = markers[i];
+ EuclideanCamera* camera = reconstruction->CameraForImage(marker.image);
+ EuclideanPoint* point = reconstruction->PointForTrack(marker.track);
if (camera == NULL || point == NULL) {
continue;
}
// Rotation of camera denoted in angle axis followed with
// camera translation.
- double *current_camera_R_t = &all_cameras_R_t[camera->image](0);
+ double* current_camera_R_t = &all_cameras_R_t[camera->image](0);
// Skip residual block for markers which does have absolutely
// no affect on the final solution.
@@ -706,7 +727,8 @@ void EuclideanBundleCommonIntrinsics(
point,
&problem);
- // We lock the first camera to better deal with scene orientation ambiguity.
+ // We lock the first camera to better deal with scene orientation
+ // ambiguity.
if (!have_locked_camera) {
problem.SetParameterBlockConstant(current_camera_R_t);
have_locked_camera = true;
@@ -729,7 +751,7 @@ void EuclideanBundleCommonIntrinsics(
}
if (intrinsics->GetDistortionModelType() == DISTORTION_MODEL_DIVISION &&
- (bundle_intrinsics & BUNDLE_TANGENTIAL) != 0) {
+ (bundle_intrinsics & BUNDLE_TANGENTIAL) != 0) {
LOG(FATAL) << "Division model doesn't support bundling "
"of tangential distortion";
}
@@ -745,29 +767,29 @@ void EuclideanBundleCommonIntrinsics(
// constant using some macro trickery.
std::vector<int> constant_intrinsics;
-#define MAYBE_SET_CONSTANT(bundle_enum, offset) \
- if (!(bundle_intrinsics & bundle_enum) || \
- !packed_intrinsics.IsParameterDefined(offset)) { \
- constant_intrinsics.push_back(offset); \
- }
+#define MAYBE_SET_CONSTANT(bundle_enum, offset) \
+ if (!(bundle_intrinsics & bundle_enum) || \
+ !packed_intrinsics.IsParameterDefined(offset)) { \
+ constant_intrinsics.push_back(offset); \
+ }
MAYBE_SET_CONSTANT(BUNDLE_FOCAL_LENGTH,
PackedIntrinsics::OFFSET_FOCAL_LENGTH);
MAYBE_SET_CONSTANT(BUNDLE_PRINCIPAL_POINT,
PackedIntrinsics::OFFSET_PRINCIPAL_POINT_X);
MAYBE_SET_CONSTANT(BUNDLE_PRINCIPAL_POINT,
PackedIntrinsics::OFFSET_PRINCIPAL_POINT_Y);
- MAYBE_SET_CONSTANT(BUNDLE_RADIAL_K1, PackedIntrinsics::OFFSET_K1);
- MAYBE_SET_CONSTANT(BUNDLE_RADIAL_K2, PackedIntrinsics::OFFSET_K2);
- MAYBE_SET_CONSTANT(BUNDLE_RADIAL_K3, PackedIntrinsics::OFFSET_K3);
- MAYBE_SET_CONSTANT(BUNDLE_RADIAL_K4, PackedIntrinsics::OFFSET_K4);
- MAYBE_SET_CONSTANT(BUNDLE_TANGENTIAL_P1, PackedIntrinsics::OFFSET_P1);
- MAYBE_SET_CONSTANT(BUNDLE_TANGENTIAL_P2, PackedIntrinsics::OFFSET_P2);
+ MAYBE_SET_CONSTANT(BUNDLE_RADIAL_K1, PackedIntrinsics::OFFSET_K1);
+ MAYBE_SET_CONSTANT(BUNDLE_RADIAL_K2, PackedIntrinsics::OFFSET_K2);
+ MAYBE_SET_CONSTANT(BUNDLE_RADIAL_K3, PackedIntrinsics::OFFSET_K3);
+ MAYBE_SET_CONSTANT(BUNDLE_RADIAL_K4, PackedIntrinsics::OFFSET_K4);
+ MAYBE_SET_CONSTANT(BUNDLE_TANGENTIAL_P1, PackedIntrinsics::OFFSET_P1);
+ MAYBE_SET_CONSTANT(BUNDLE_TANGENTIAL_P2, PackedIntrinsics::OFFSET_P2);
#undef MAYBE_SET_CONSTANT
if (!constant_intrinsics.empty()) {
- ceres::SubsetParameterization *subset_parameterization =
- new ceres::SubsetParameterization(PackedIntrinsics::NUM_PARAMETERS,
- constant_intrinsics);
+ ceres::SubsetParameterization* subset_parameterization =
+ new ceres::SubsetParameterization(PackedIntrinsics::NUM_PARAMETERS,
+ constant_intrinsics);
problem.SetParameterization(intrinsics_block, subset_parameterization);
}
@@ -800,8 +822,8 @@ void EuclideanBundleCommonIntrinsics(
LG << "Final intrinsics: " << *intrinsics;
if (evaluation) {
- EuclideanBundlerPerformEvaluation(tracks, reconstruction, &all_cameras_R_t,
- &problem, evaluation);
+ EuclideanBundlerPerformEvaluation(
+ tracks, reconstruction, &all_cameras_R_t, &problem, evaluation);
}
// Separate step to adjust positions of tracks which are
@@ -828,8 +850,8 @@ void EuclideanBundleCommonIntrinsics(
}
}
-void ProjectiveBundle(const Tracks & /*tracks*/,
- ProjectiveReconstruction * /*reconstruction*/) {
+void ProjectiveBundle(const Tracks& /*tracks*/,
+ ProjectiveReconstruction* /*reconstruction*/) {
// TODO(keir): Implement this! This can't work until we have a better bundler
// than SSBA, since SSBA has no support for projective bundling.
}
diff --git a/intern/libmv/libmv/simple_pipeline/bundle.h b/intern/libmv/libmv/simple_pipeline/bundle.h
index 5f420da0045..662313b396a 100644
--- a/intern/libmv/libmv/simple_pipeline/bundle.h
+++ b/intern/libmv/libmv/simple_pipeline/bundle.h
@@ -31,11 +31,8 @@ class ProjectiveReconstruction;
class Tracks;
struct BundleEvaluation {
- BundleEvaluation() :
- num_cameras(0),
- num_points(0),
- evaluate_jacobian(false) {
- }
+ BundleEvaluation()
+ : num_cameras(0), num_points(0), evaluate_jacobian(false) {}
// Number of cameras appeared in bundle adjustment problem
int num_cameras;
@@ -72,8 +69,8 @@ struct BundleEvaluation {
\sa EuclideanResect, EuclideanIntersect, EuclideanReconstructTwoFrames
*/
-void EuclideanBundle(const Tracks &tracks,
- EuclideanReconstruction *reconstruction);
+void EuclideanBundle(const Tracks& tracks,
+ EuclideanReconstruction* reconstruction);
/*!
Refine camera poses and 3D coordinates using bundle adjustment.
@@ -109,9 +106,7 @@ enum BundleIntrinsics {
BUNDLE_RADIAL_K2 = (1 << 3),
BUNDLE_RADIAL_K3 = (1 << 4),
BUNDLE_RADIAL_K4 = (1 << 5),
- BUNDLE_RADIAL = (BUNDLE_RADIAL_K1 |
- BUNDLE_RADIAL_K2 |
- BUNDLE_RADIAL_K3 |
+ BUNDLE_RADIAL = (BUNDLE_RADIAL_K1 | BUNDLE_RADIAL_K2 | BUNDLE_RADIAL_K3 |
BUNDLE_RADIAL_K4),
BUNDLE_TANGENTIAL_P1 = (1 << 6),
@@ -122,13 +117,12 @@ enum BundleConstraints {
BUNDLE_NO_CONSTRAINTS = 0,
BUNDLE_NO_TRANSLATION = 1,
};
-void EuclideanBundleCommonIntrinsics(
- const Tracks &tracks,
- const int bundle_intrinsics,
- const int bundle_constraints,
- EuclideanReconstruction *reconstruction,
- CameraIntrinsics *intrinsics,
- BundleEvaluation *evaluation = NULL);
+void EuclideanBundleCommonIntrinsics(const Tracks& tracks,
+ const int bundle_intrinsics,
+ const int bundle_constraints,
+ EuclideanReconstruction* reconstruction,
+ CameraIntrinsics* intrinsics,
+ BundleEvaluation* evaluation = NULL);
/*!
Refine camera poses and 3D coordinates using bundle adjustment.
@@ -147,10 +141,9 @@ void EuclideanBundleCommonIntrinsics(
\sa ProjectiveResect, ProjectiveIntersect, ProjectiveReconstructTwoFrames
*/
-void ProjectiveBundle(const Tracks &tracks,
- ProjectiveReconstruction *reconstruction);
+void ProjectiveBundle(const Tracks& tracks,
+ ProjectiveReconstruction* reconstruction);
} // namespace libmv
-#endif // LIBMV_SIMPLE_PIPELINE_BUNDLE_H
-
+#endif // LIBMV_SIMPLE_PIPELINE_BUNDLE_H
diff --git a/intern/libmv/libmv/simple_pipeline/callbacks.h b/intern/libmv/libmv/simple_pipeline/callbacks.h
index 58f7b0d3cc9..a6855a9e5e7 100644
--- a/intern/libmv/libmv/simple_pipeline/callbacks.h
+++ b/intern/libmv/libmv/simple_pipeline/callbacks.h
@@ -26,7 +26,7 @@ namespace libmv {
class ProgressUpdateCallback {
public:
virtual ~ProgressUpdateCallback() {}
- virtual void invoke(double progress, const char *message) = 0;
+ virtual void invoke(double progress, const char* message) = 0;
};
} // namespace libmv
diff --git a/intern/libmv/libmv/simple_pipeline/camera_intrinsics.cc b/intern/libmv/libmv/simple_pipeline/camera_intrinsics.cc
index ccb6e3d34c8..b86e316b139 100644
--- a/intern/libmv/libmv/simple_pipeline/camera_intrinsics.cc
+++ b/intern/libmv/libmv/simple_pipeline/camera_intrinsics.cc
@@ -29,13 +29,10 @@ namespace libmv {
namespace internal {
LookupWarpGrid::LookupWarpGrid()
- : offset_(NULL),
- width_(0),
- height_(0),
- overscan_(0.0),
- threads_(1) {}
+ : offset_(NULL), width_(0), height_(0), overscan_(0.0), threads_(1) {
+}
-LookupWarpGrid::LookupWarpGrid(const LookupWarpGrid &from)
+LookupWarpGrid::LookupWarpGrid(const LookupWarpGrid& from)
: offset_(NULL),
width_(from.width_),
height_(from.height_),
@@ -48,11 +45,11 @@ LookupWarpGrid::LookupWarpGrid(const LookupWarpGrid &from)
}
LookupWarpGrid::~LookupWarpGrid() {
- delete [] offset_;
+ delete[] offset_;
}
void LookupWarpGrid::Reset() {
- delete [] offset_;
+ delete[] offset_;
offset_ = NULL;
}
@@ -64,16 +61,16 @@ void LookupWarpGrid::SetThreads(int threads) {
} // namespace internal
CameraIntrinsics::CameraIntrinsics()
- : image_width_(0),
- image_height_(0),
- K_(Mat3::Identity()) {}
+ : image_width_(0), image_height_(0), K_(Mat3::Identity()) {
+}
-CameraIntrinsics::CameraIntrinsics(const CameraIntrinsics &from)
+CameraIntrinsics::CameraIntrinsics(const CameraIntrinsics& from)
: image_width_(from.image_width_),
image_height_(from.image_height_),
K_(from.K_),
distort_(from.distort_),
- undistort_(from.undistort_) {}
+ undistort_(from.undistort_) {
+}
// Set the image size in pixels.
void CameraIntrinsics::SetImageSize(int width, int height) {
@@ -89,16 +86,14 @@ void CameraIntrinsics::SetK(const Mat3 new_k) {
}
// Set both x and y focal length in pixels.
-void CameraIntrinsics::SetFocalLength(double focal_x,
- double focal_y) {
+void CameraIntrinsics::SetFocalLength(double focal_x, double focal_y) {
K_(0, 0) = focal_x;
K_(1, 1) = focal_y;
ResetLookupGrids();
}
// Set principal point in pixels.
-void CameraIntrinsics::SetPrincipalPoint(double cx,
- double cy) {
+void CameraIntrinsics::SetPrincipalPoint(double cx, double cy) {
K_(0, 2) = cx;
K_(1, 2) = cy;
ResetLookupGrids();
@@ -112,16 +107,16 @@ void CameraIntrinsics::SetThreads(int threads) {
void CameraIntrinsics::ImageSpaceToNormalized(double image_x,
double image_y,
- double *normalized_x,
- double *normalized_y) const {
+ double* normalized_x,
+ double* normalized_y) const {
*normalized_x = (image_x - principal_point_x()) / focal_length_x();
*normalized_y = (image_y - principal_point_y()) / focal_length_y();
}
void CameraIntrinsics::NormalizedToImageSpace(double normalized_x,
double normalized_y,
- double *image_x,
- double *image_y) const {
+ double* image_x,
+ double* image_y) const {
*image_x = normalized_x * focal_length_x() + principal_point_x();
*image_y = normalized_y * focal_length_y() + principal_point_y();
}
@@ -148,14 +143,13 @@ void CameraIntrinsics::Unpack(const PackedIntrinsics& packed_intrinsics) {
// Polynomial model.
-PolynomialCameraIntrinsics::PolynomialCameraIntrinsics()
- : CameraIntrinsics() {
+PolynomialCameraIntrinsics::PolynomialCameraIntrinsics() : CameraIntrinsics() {
SetRadialDistortion(0.0, 0.0, 0.0);
SetTangentialDistortion(0.0, 0.0);
}
PolynomialCameraIntrinsics::PolynomialCameraIntrinsics(
- const PolynomialCameraIntrinsics &from)
+ const PolynomialCameraIntrinsics& from)
: CameraIntrinsics(from) {
SetRadialDistortion(from.k1(), from.k2(), from.k3());
SetTangentialDistortion(from.p1(), from.p2());
@@ -170,8 +164,7 @@ void PolynomialCameraIntrinsics::SetRadialDistortion(double k1,
ResetLookupGrids();
}
-void PolynomialCameraIntrinsics::SetTangentialDistortion(double p1,
- double p2) {
+void PolynomialCameraIntrinsics::SetTangentialDistortion(double p1, double p2) {
parameters_[OFFSET_P1] = p1;
parameters_[OFFSET_P2] = p2;
ResetLookupGrids();
@@ -179,31 +172,36 @@ void PolynomialCameraIntrinsics::SetTangentialDistortion(double p1,
void PolynomialCameraIntrinsics::ApplyIntrinsics(double normalized_x,
double normalized_y,
- double *image_x,
- double *image_y) const {
+ double* image_x,
+ double* image_y) const {
ApplyPolynomialDistortionModel(focal_length_x(),
focal_length_y(),
principal_point_x(),
principal_point_y(),
- k1(), k2(), k3(),
- p1(), p2(),
+ k1(),
+ k2(),
+ k3(),
+ p1(),
+ p2(),
normalized_x,
normalized_y,
image_x,
image_y);
}
-void PolynomialCameraIntrinsics::InvertIntrinsics(
- double image_x,
- double image_y,
- double *normalized_x,
- double *normalized_y) const {
+void PolynomialCameraIntrinsics::InvertIntrinsics(double image_x,
+ double image_y,
+ double* normalized_x,
+ double* normalized_y) const {
InvertPolynomialDistortionModel(focal_length_x(),
focal_length_y(),
principal_point_x(),
principal_point_y(),
- k1(), k2(), k3(),
- p1(), p2(),
+ k1(),
+ k2(),
+ k3(),
+ p1(),
+ p2(),
image_x,
image_y,
normalized_x,
@@ -230,25 +228,22 @@ void PolynomialCameraIntrinsics::Unpack(
packed_intrinsics.GetK2(),
packed_intrinsics.GetK3());
- SetTangentialDistortion(packed_intrinsics.GetP1(),
- packed_intrinsics.GetP2());
+ SetTangentialDistortion(packed_intrinsics.GetP1(), packed_intrinsics.GetP2());
}
// Division model.
-DivisionCameraIntrinsics::DivisionCameraIntrinsics()
- : CameraIntrinsics() {
+DivisionCameraIntrinsics::DivisionCameraIntrinsics() : CameraIntrinsics() {
SetDistortion(0.0, 0.0);
}
DivisionCameraIntrinsics::DivisionCameraIntrinsics(
- const DivisionCameraIntrinsics &from)
+ const DivisionCameraIntrinsics& from)
: CameraIntrinsics(from) {
SetDistortion(from.k1(), from.k1());
}
-void DivisionCameraIntrinsics::SetDistortion(double k1,
- double k2) {
+void DivisionCameraIntrinsics::SetDistortion(double k1, double k2) {
parameters_[OFFSET_K1] = k1;
parameters_[OFFSET_K2] = k2;
ResetLookupGrids();
@@ -256,13 +251,14 @@ void DivisionCameraIntrinsics::SetDistortion(double k1,
void DivisionCameraIntrinsics::ApplyIntrinsics(double normalized_x,
double normalized_y,
- double *image_x,
- double *image_y) const {
+ double* image_x,
+ double* image_y) const {
ApplyDivisionDistortionModel(focal_length_x(),
focal_length_y(),
principal_point_x(),
principal_point_y(),
- k1(), k2(),
+ k1(),
+ k2(),
normalized_x,
normalized_y,
image_x,
@@ -271,21 +267,21 @@ void DivisionCameraIntrinsics::ApplyIntrinsics(double normalized_x,
void DivisionCameraIntrinsics::InvertIntrinsics(double image_x,
double image_y,
- double *normalized_x,
- double *normalized_y) const {
+ double* normalized_x,
+ double* normalized_y) const {
InvertDivisionDistortionModel(focal_length_x(),
focal_length_y(),
principal_point_x(),
principal_point_y(),
- k1(), k2(),
+ k1(),
+ k2(),
image_x,
image_y,
normalized_x,
normalized_y);
}
-void DivisionCameraIntrinsics::Pack(
- PackedIntrinsics* packed_intrinsics) const {
+void DivisionCameraIntrinsics::Pack(PackedIntrinsics* packed_intrinsics) const {
CameraIntrinsics::Pack(packed_intrinsics);
packed_intrinsics->SetK1(k1());
@@ -301,13 +297,11 @@ void DivisionCameraIntrinsics::Unpack(
// Nuke model.
-NukeCameraIntrinsics::NukeCameraIntrinsics()
- : CameraIntrinsics() {
+NukeCameraIntrinsics::NukeCameraIntrinsics() : CameraIntrinsics() {
SetDistortion(0.0, 0.0);
}
-NukeCameraIntrinsics::NukeCameraIntrinsics(
- const NukeCameraIntrinsics &from)
+NukeCameraIntrinsics::NukeCameraIntrinsics(const NukeCameraIntrinsics& from)
: CameraIntrinsics(from) {
SetDistortion(from.k1(), from.k2());
}
@@ -320,14 +314,16 @@ void NukeCameraIntrinsics::SetDistortion(double k1, double k2) {
void NukeCameraIntrinsics::ApplyIntrinsics(double normalized_x,
double normalized_y,
- double *image_x,
- double *image_y) const {
+ double* image_x,
+ double* image_y) const {
ApplyNukeDistortionModel(focal_length_x(),
focal_length_y(),
principal_point_x(),
principal_point_y(),
- image_width(), image_height(),
- k1(), k2(),
+ image_width(),
+ image_height(),
+ k1(),
+ k2(),
normalized_x,
normalized_y,
image_x,
@@ -335,31 +331,31 @@ void NukeCameraIntrinsics::ApplyIntrinsics(double normalized_x,
}
void NukeCameraIntrinsics::InvertIntrinsics(double image_x,
- double image_y,
- double *normalized_x,
- double *normalized_y) const {
+ double image_y,
+ double* normalized_x,
+ double* normalized_y) const {
InvertNukeDistortionModel(focal_length_x(),
focal_length_y(),
principal_point_x(),
principal_point_y(),
- image_width(), image_height(),
- k1(), k2(),
+ image_width(),
+ image_height(),
+ k1(),
+ k2(),
image_x,
image_y,
normalized_x,
normalized_y);
}
-void NukeCameraIntrinsics::Pack(
- PackedIntrinsics* packed_intrinsics) const {
+void NukeCameraIntrinsics::Pack(PackedIntrinsics* packed_intrinsics) const {
CameraIntrinsics::Pack(packed_intrinsics);
packed_intrinsics->SetK1(k1());
packed_intrinsics->SetK2(k2());
}
-void NukeCameraIntrinsics::Unpack(
- const PackedIntrinsics& packed_intrinsics) {
+void NukeCameraIntrinsics::Unpack(const PackedIntrinsics& packed_intrinsics) {
CameraIntrinsics::Unpack(packed_intrinsics);
SetDistortion(packed_intrinsics.GetK1(), packed_intrinsics.GetK2());
@@ -367,14 +363,12 @@ void NukeCameraIntrinsics::Unpack(
// Brown model.
-BrownCameraIntrinsics::BrownCameraIntrinsics()
- : CameraIntrinsics() {
+BrownCameraIntrinsics::BrownCameraIntrinsics() : CameraIntrinsics() {
SetRadialDistortion(0.0, 0.0, 0.0, 0.0);
SetTangentialDistortion(0.0, 0.0);
}
-BrownCameraIntrinsics::BrownCameraIntrinsics(
- const BrownCameraIntrinsics &from)
+BrownCameraIntrinsics::BrownCameraIntrinsics(const BrownCameraIntrinsics& from)
: CameraIntrinsics(from) {
SetRadialDistortion(from.k1(), from.k2(), from.k3(), from.k4());
SetTangentialDistortion(from.p1(), from.p2());
@@ -391,8 +385,7 @@ void BrownCameraIntrinsics::SetRadialDistortion(double k1,
ResetLookupGrids();
}
-void BrownCameraIntrinsics::SetTangentialDistortion(double p1,
- double p2) {
+void BrownCameraIntrinsics::SetTangentialDistortion(double p1, double p2) {
parameters_[OFFSET_P1] = p1;
parameters_[OFFSET_P2] = p2;
ResetLookupGrids();
@@ -400,39 +393,45 @@ void BrownCameraIntrinsics::SetTangentialDistortion(double p1,
void BrownCameraIntrinsics::ApplyIntrinsics(double normalized_x,
double normalized_y,
- double *image_x,
- double *image_y) const {
+ double* image_x,
+ double* image_y) const {
ApplyBrownDistortionModel(focal_length_x(),
focal_length_y(),
principal_point_x(),
principal_point_y(),
- k1(), k2(), k3(), k4(),
- p1(), p2(),
+ k1(),
+ k2(),
+ k3(),
+ k4(),
+ p1(),
+ p2(),
normalized_x,
normalized_y,
image_x,
image_y);
}
-void BrownCameraIntrinsics::InvertIntrinsics(
- double image_x,
- double image_y,
- double *normalized_x,
- double *normalized_y) const {
+void BrownCameraIntrinsics::InvertIntrinsics(double image_x,
+ double image_y,
+ double* normalized_x,
+ double* normalized_y) const {
InvertBrownDistortionModel(focal_length_x(),
focal_length_y(),
principal_point_x(),
principal_point_y(),
- k1(), k2(), k3(), k4(),
- p1(), p2(),
+ k1(),
+ k2(),
+ k3(),
+ k4(),
+ p1(),
+ p2(),
image_x,
image_y,
normalized_x,
normalized_y);
}
-void BrownCameraIntrinsics::Pack(
- PackedIntrinsics* packed_intrinsics) const {
+void BrownCameraIntrinsics::Pack(PackedIntrinsics* packed_intrinsics) const {
CameraIntrinsics::Pack(packed_intrinsics);
packed_intrinsics->SetK1(k1());
@@ -444,8 +443,7 @@ void BrownCameraIntrinsics::Pack(
packed_intrinsics->SetP2(p2());
}
-void BrownCameraIntrinsics::Unpack(
- const PackedIntrinsics& packed_intrinsics) {
+void BrownCameraIntrinsics::Unpack(const PackedIntrinsics& packed_intrinsics) {
CameraIntrinsics::Unpack(packed_intrinsics);
SetRadialDistortion(packed_intrinsics.GetK1(),
@@ -453,72 +451,65 @@ void BrownCameraIntrinsics::Unpack(
packed_intrinsics.GetK3(),
packed_intrinsics.GetK4());
- SetTangentialDistortion(packed_intrinsics.GetP1(),
- packed_intrinsics.GetP2());
+ SetTangentialDistortion(packed_intrinsics.GetP1(), packed_intrinsics.GetP2());
}
-std::ostream& operator <<(std::ostream &os,
- const CameraIntrinsics &intrinsics) {
+std::ostream& operator<<(std::ostream& os, const CameraIntrinsics& intrinsics) {
if (intrinsics.focal_length_x() == intrinsics.focal_length_x()) {
os << "f=" << intrinsics.focal_length();
} else {
- os << "fx=" << intrinsics.focal_length_x()
+ os << "fx=" << intrinsics.focal_length_x()
<< " fy=" << intrinsics.focal_length_y();
}
os << " cx=" << intrinsics.principal_point_x()
<< " cy=" << intrinsics.principal_point_y()
- << " w=" << intrinsics.image_width()
- << " h=" << intrinsics.image_height();
+ << " w=" << intrinsics.image_width() << " h=" << intrinsics.image_height();
-#define PRINT_NONZERO_COEFFICIENT(intrinsics, coeff) \
- { \
- if (intrinsics->coeff() != 0.0) { \
- os << " " #coeff "=" << intrinsics->coeff(); \
- } \
- } (void) 0
+#define PRINT_NONZERO_COEFFICIENT(intrinsics, coeff) \
+ { \
+ if (intrinsics->coeff() != 0.0) { \
+ os << " " #coeff "=" << intrinsics->coeff(); \
+ } \
+ } \
+ (void)0
switch (intrinsics.GetDistortionModelType()) {
- case DISTORTION_MODEL_POLYNOMIAL:
- {
- const PolynomialCameraIntrinsics *polynomial_intrinsics =
- static_cast<const PolynomialCameraIntrinsics *>(&intrinsics);
- PRINT_NONZERO_COEFFICIENT(polynomial_intrinsics, k1);
- PRINT_NONZERO_COEFFICIENT(polynomial_intrinsics, k2);
- PRINT_NONZERO_COEFFICIENT(polynomial_intrinsics, k3);
- PRINT_NONZERO_COEFFICIENT(polynomial_intrinsics, p1);
- PRINT_NONZERO_COEFFICIENT(polynomial_intrinsics, p2);
- break;
- }
- case DISTORTION_MODEL_DIVISION:
- {
- const DivisionCameraIntrinsics *division_intrinsics =
- static_cast<const DivisionCameraIntrinsics *>(&intrinsics);
- PRINT_NONZERO_COEFFICIENT(division_intrinsics, k1);
- PRINT_NONZERO_COEFFICIENT(division_intrinsics, k2);
- break;
- }
- case DISTORTION_MODEL_NUKE:
- {
- const NukeCameraIntrinsics *nuke_intrinsics =
- static_cast<const NukeCameraIntrinsics *>(&intrinsics);
- PRINT_NONZERO_COEFFICIENT(nuke_intrinsics, k1);
- PRINT_NONZERO_COEFFICIENT(nuke_intrinsics, k2);
- break;
- }
- case DISTORTION_MODEL_BROWN:
- {
- const BrownCameraIntrinsics *brown_intrinsics =
- static_cast<const BrownCameraIntrinsics *>(&intrinsics);
- PRINT_NONZERO_COEFFICIENT(brown_intrinsics, k1);
- PRINT_NONZERO_COEFFICIENT(brown_intrinsics, k2);
- PRINT_NONZERO_COEFFICIENT(brown_intrinsics, k3);
- PRINT_NONZERO_COEFFICIENT(brown_intrinsics, k4);
- PRINT_NONZERO_COEFFICIENT(brown_intrinsics, p1);
- PRINT_NONZERO_COEFFICIENT(brown_intrinsics, p2);
- break;
- }
- default:
- LOG(FATAL) << "Unknown distortion model.";
+ case DISTORTION_MODEL_POLYNOMIAL: {
+ const PolynomialCameraIntrinsics* polynomial_intrinsics =
+ static_cast<const PolynomialCameraIntrinsics*>(&intrinsics);
+ PRINT_NONZERO_COEFFICIENT(polynomial_intrinsics, k1);
+ PRINT_NONZERO_COEFFICIENT(polynomial_intrinsics, k2);
+ PRINT_NONZERO_COEFFICIENT(polynomial_intrinsics, k3);
+ PRINT_NONZERO_COEFFICIENT(polynomial_intrinsics, p1);
+ PRINT_NONZERO_COEFFICIENT(polynomial_intrinsics, p2);
+ break;
+ }
+ case DISTORTION_MODEL_DIVISION: {
+ const DivisionCameraIntrinsics* division_intrinsics =
+ static_cast<const DivisionCameraIntrinsics*>(&intrinsics);
+ PRINT_NONZERO_COEFFICIENT(division_intrinsics, k1);
+ PRINT_NONZERO_COEFFICIENT(division_intrinsics, k2);
+ break;
+ }
+ case DISTORTION_MODEL_NUKE: {
+ const NukeCameraIntrinsics* nuke_intrinsics =
+ static_cast<const NukeCameraIntrinsics*>(&intrinsics);
+ PRINT_NONZERO_COEFFICIENT(nuke_intrinsics, k1);
+ PRINT_NONZERO_COEFFICIENT(nuke_intrinsics, k2);
+ break;
+ }
+ case DISTORTION_MODEL_BROWN: {
+ const BrownCameraIntrinsics* brown_intrinsics =
+ static_cast<const BrownCameraIntrinsics*>(&intrinsics);
+ PRINT_NONZERO_COEFFICIENT(brown_intrinsics, k1);
+ PRINT_NONZERO_COEFFICIENT(brown_intrinsics, k2);
+ PRINT_NONZERO_COEFFICIENT(brown_intrinsics, k3);
+ PRINT_NONZERO_COEFFICIENT(brown_intrinsics, k4);
+ PRINT_NONZERO_COEFFICIENT(brown_intrinsics, p1);
+ PRINT_NONZERO_COEFFICIENT(brown_intrinsics, p2);
+ break;
+ }
+ default: LOG(FATAL) << "Unknown distortion model.";
}
#undef PRINT_NONZERO_COEFFICIENT
diff --git a/intern/libmv/libmv/simple_pipeline/camera_intrinsics.h b/intern/libmv/libmv/simple_pipeline/camera_intrinsics.h
index ba67ec468dc..efe0735bd93 100644
--- a/intern/libmv/libmv/simple_pipeline/camera_intrinsics.h
+++ b/intern/libmv/libmv/simple_pipeline/camera_intrinsics.h
@@ -43,11 +43,11 @@ namespace internal {
class LookupWarpGrid {
public:
LookupWarpGrid();
- LookupWarpGrid(const LookupWarpGrid &from);
+ LookupWarpGrid(const LookupWarpGrid& from);
~LookupWarpGrid();
// Width and height og the image, measured in pixels.
- int width() const { return width_; }
+ int width() const { return width_; }
int height() const { return height_; }
// Overscan factor of the image, so that
@@ -61,8 +61,8 @@ class LookupWarpGrid {
//
// See comment for CameraIntrinsics::DistortBuffer to get more
// details about what overscan is.
- template<typename WarpFunction>
- void Update(const CameraIntrinsics &intrinsics,
+ template <typename WarpFunction>
+ void Update(const CameraIntrinsics& intrinsics,
int width,
int height,
double overscan);
@@ -71,12 +71,12 @@ class LookupWarpGrid {
//
// See comment for CameraIntrinsics::DistortBuffer to get more
// details about template type.
- template<typename PixelType>
- void Apply(const PixelType *input_buffer,
+ template <typename PixelType>
+ void Apply(const PixelType* input_buffer,
int width,
int height,
int channels,
- PixelType *output_buffer);
+ PixelType* output_buffer);
// Reset lookup grids.
// This will tag the grid for update without re-computing it.
@@ -105,15 +105,15 @@ class LookupWarpGrid {
//
// width and height corresponds to a size of buffer which will
// be warped later.
- template<typename WarpFunction>
- void Compute(const CameraIntrinsics &intrinsics,
+ template <typename WarpFunction>
+ void Compute(const CameraIntrinsics& intrinsics,
int width,
int height,
double overscan);
// This is a buffer which contains per-pixel offset of the
// pixels from input buffer to correspond the warping function.
- Offset *offset_;
+ Offset* offset_;
// Dimensions of the image this lookup grid processes.
int width_, height_;
@@ -130,19 +130,19 @@ class LookupWarpGrid {
class CameraIntrinsics {
public:
CameraIntrinsics();
- CameraIntrinsics(const CameraIntrinsics &from);
+ CameraIntrinsics(const CameraIntrinsics& from);
virtual ~CameraIntrinsics() {}
virtual DistortionModelType GetDistortionModelType() const = 0;
- int image_width() const { return image_width_; }
+ int image_width() const { return image_width_; }
int image_height() const { return image_height_; }
- const Mat3 &K() const { return K_; }
+ const Mat3& K() const { return K_; }
- double focal_length() const { return K_(0, 0); }
- double focal_length_x() const { return K_(0, 0); }
- double focal_length_y() const { return K_(1, 1); }
+ double focal_length() const { return K_(0, 0); }
+ double focal_length_x() const { return K_(0, 0); }
+ double focal_length_y() const { return K_(1, 1); }
double principal_point_x() const { return K_(0, 2); }
double principal_point_y() const { return K_(1, 2); }
@@ -166,14 +166,14 @@ class CameraIntrinsics {
// Convert image space coordinates to normalized.
void ImageSpaceToNormalized(double image_x,
double image_y,
- double *normalized_x,
- double *normalized_y) const;
+ double* normalized_x,
+ double* normalized_y) const;
// Convert normalized coordinates to image space.
void NormalizedToImageSpace(double normalized_x,
double normalized_y,
- double *image_x,
- double *image_y) const;
+ double* image_x,
+ double* image_y) const;
// Apply camera intrinsics to the normalized point to get image coordinates.
//
@@ -182,8 +182,8 @@ class CameraIntrinsics {
// coordinates in pixels.
virtual void ApplyIntrinsics(double normalized_x,
double normalized_y,
- double *image_x,
- double *image_y) const = 0;
+ double* image_x,
+ double* image_y) const = 0;
// Invert camera intrinsics on the image point to get normalized coordinates.
//
@@ -191,8 +191,8 @@ class CameraIntrinsics {
// coordinates to get normalized camera coordinates.
virtual void InvertIntrinsics(double image_x,
double image_y,
- double *normalized_x,
- double *normalized_y) const = 0;
+ double* normalized_x,
+ double* normalized_y) const = 0;
virtual void Pack(PackedIntrinsics* packed_intrinsics) const;
virtual void Unpack(const PackedIntrinsics& packed_intrinsics);
@@ -218,13 +218,13 @@ class CameraIntrinsics {
// But in fact PixelType might be any type for which multiplication by
// a scalar and addition are implemented. For example PixelType might be
// Vec3 as well.
- template<typename PixelType>
- void DistortBuffer(const PixelType *input_buffer,
+ template <typename PixelType>
+ void DistortBuffer(const PixelType* input_buffer,
int width,
int height,
double overscan,
int channels,
- PixelType *output_buffer);
+ PixelType* output_buffer);
// Undistort an image using the current camera instrinsics
//
@@ -247,13 +247,13 @@ class CameraIntrinsics {
// But in fact PixelType might be any type for which multiplication by
// a scalar and addition are implemented. For example PixelType might be
// Vec3 as well.
- template<typename PixelType>
- void UndistortBuffer(const PixelType *input_buffer,
+ template <typename PixelType>
+ void UndistortBuffer(const PixelType* input_buffer,
int width,
int height,
double overscan,
int channels,
- PixelType *output_buffer);
+ PixelType* output_buffer);
private:
// This is the size of the image. This is necessary to, for example, handle
@@ -290,7 +290,7 @@ class PolynomialCameraIntrinsics : public CameraIntrinsics {
};
PolynomialCameraIntrinsics();
- PolynomialCameraIntrinsics(const PolynomialCameraIntrinsics &from);
+ PolynomialCameraIntrinsics(const PolynomialCameraIntrinsics& from);
DistortionModelType GetDistortionModelType() const override {
return DISTORTION_MODEL_POLYNOMIAL;
@@ -315,8 +315,8 @@ class PolynomialCameraIntrinsics : public CameraIntrinsics {
// coordinates in pixels.
void ApplyIntrinsics(double normalized_x,
double normalized_y,
- double *image_x,
- double *image_y) const override;
+ double* image_x,
+ double* image_y) const override;
// Invert camera intrinsics on the image point to get normalized coordinates.
//
@@ -324,8 +324,8 @@ class PolynomialCameraIntrinsics : public CameraIntrinsics {
// coordinates to get normalized camera coordinates.
void InvertIntrinsics(double image_x,
double image_y,
- double *normalized_x,
- double *normalized_y) const override;
+ double* normalized_x,
+ double* normalized_y) const override;
virtual void Pack(PackedIntrinsics* packed_intrinsics) const override;
virtual void Unpack(const PackedIntrinsics& packed_intrinsics) override;
@@ -352,7 +352,7 @@ class DivisionCameraIntrinsics : public CameraIntrinsics {
};
DivisionCameraIntrinsics();
- DivisionCameraIntrinsics(const DivisionCameraIntrinsics &from);
+ DivisionCameraIntrinsics(const DivisionCameraIntrinsics& from);
DistortionModelType GetDistortionModelType() const override {
return DISTORTION_MODEL_DIVISION;
@@ -371,8 +371,8 @@ class DivisionCameraIntrinsics : public CameraIntrinsics {
// coordinates in pixels.
void ApplyIntrinsics(double normalized_x,
double normalized_y,
- double *image_x,
- double *image_y) const override;
+ double* image_x,
+ double* image_y) const override;
// Invert camera intrinsics on the image point to get normalized coordinates.
//
@@ -380,8 +380,8 @@ class DivisionCameraIntrinsics : public CameraIntrinsics {
// coordinates to get normalized camera coordinates.
void InvertIntrinsics(double image_x,
double image_y,
- double *normalized_x,
- double *normalized_y) const override;
+ double* normalized_x,
+ double* normalized_y) const override;
virtual void Pack(PackedIntrinsics* packed_intrinsics) const override;
virtual void Unpack(const PackedIntrinsics& packed_intrinsics) override;
@@ -405,7 +405,7 @@ class NukeCameraIntrinsics : public CameraIntrinsics {
};
NukeCameraIntrinsics();
- NukeCameraIntrinsics(const NukeCameraIntrinsics &from);
+ NukeCameraIntrinsics(const NukeCameraIntrinsics& from);
DistortionModelType GetDistortionModelType() const override {
return DISTORTION_MODEL_NUKE;
@@ -424,8 +424,8 @@ class NukeCameraIntrinsics : public CameraIntrinsics {
// coordinates in pixels.
void ApplyIntrinsics(double normalized_x,
double normalized_y,
- double *image_x,
- double *image_y) const override;
+ double* image_x,
+ double* image_y) const override;
// Invert camera intrinsics on the image point to get normalized coordinates.
//
@@ -433,8 +433,8 @@ class NukeCameraIntrinsics : public CameraIntrinsics {
// coordinates to get normalized camera coordinates.
void InvertIntrinsics(double image_x,
double image_y,
- double *normalized_x,
- double *normalized_y) const override;
+ double* normalized_x,
+ double* normalized_y) const override;
virtual void Pack(PackedIntrinsics* packed_intrinsics) const override;
virtual void Unpack(const PackedIntrinsics& packed_intrinsics) override;
@@ -462,7 +462,7 @@ class BrownCameraIntrinsics : public CameraIntrinsics {
};
BrownCameraIntrinsics();
- BrownCameraIntrinsics(const BrownCameraIntrinsics &from);
+ BrownCameraIntrinsics(const BrownCameraIntrinsics& from);
DistortionModelType GetDistortionModelType() const override {
return DISTORTION_MODEL_BROWN;
@@ -488,8 +488,8 @@ class BrownCameraIntrinsics : public CameraIntrinsics {
// coordinates in pixels.
void ApplyIntrinsics(double normalized_x,
double normalized_y,
- double *image_x,
- double *image_y) const override;
+ double* image_x,
+ double* image_y) const override;
// Invert camera intrinsics on the image point to get normalized coordinates.
//
@@ -497,8 +497,8 @@ class BrownCameraIntrinsics : public CameraIntrinsics {
// coordinates to get normalized camera coordinates.
void InvertIntrinsics(double image_x,
double image_y,
- double *normalized_x,
- double *normalized_y) const override;
+ double* normalized_x,
+ double* normalized_y) const override;
virtual void Pack(PackedIntrinsics* packed_intrinsics) const override;
virtual void Unpack(const PackedIntrinsics& packed_intrinsics) override;
@@ -507,10 +507,8 @@ class BrownCameraIntrinsics : public CameraIntrinsics {
double parameters_[NUM_PARAMETERS];
};
-
/// A human-readable representation of the camera intrinsic parameters.
-std::ostream& operator <<(std::ostream &os,
- const CameraIntrinsics &intrinsics);
+std::ostream& operator<<(std::ostream& os, const CameraIntrinsics& intrinsics);
} // namespace libmv
diff --git a/intern/libmv/libmv/simple_pipeline/camera_intrinsics_impl.h b/intern/libmv/libmv/simple_pipeline/camera_intrinsics_impl.h
index e1b53992dfd..c8c4700f5c6 100644
--- a/intern/libmv/libmv/simple_pipeline/camera_intrinsics_impl.h
+++ b/intern/libmv/libmv/simple_pipeline/camera_intrinsics_impl.h
@@ -25,11 +25,11 @@ namespace {
// FIXME: C++ templates limitations makes thing complicated,
// but maybe there is a simpler method.
struct ApplyIntrinsicsFunction {
- ApplyIntrinsicsFunction(const CameraIntrinsics &intrinsics,
+ ApplyIntrinsicsFunction(const CameraIntrinsics& intrinsics,
double x,
double y,
- double *warp_x,
- double *warp_y) {
+ double* warp_x,
+ double* warp_y) {
double normalized_x, normalized_y;
intrinsics.ImageSpaceToNormalized(x, y, &normalized_x, &normalized_y);
intrinsics.ApplyIntrinsics(normalized_x, normalized_y, warp_x, warp_y);
@@ -37,14 +37,15 @@ struct ApplyIntrinsicsFunction {
};
struct InvertIntrinsicsFunction {
- InvertIntrinsicsFunction(const CameraIntrinsics &intrinsics,
+ InvertIntrinsicsFunction(const CameraIntrinsics& intrinsics,
double x,
double y,
- double *warp_x,
- double *warp_y) {
+ double* warp_x,
+ double* warp_y) {
double normalized_x, normalized_y;
intrinsics.InvertIntrinsics(x, y, &normalized_x, &normalized_y);
- intrinsics.NormalizedToImageSpace(normalized_x, normalized_y, warp_x, warp_y);
+ intrinsics.NormalizedToImageSpace(
+ normalized_x, normalized_y, warp_x, warp_y);
}
};
@@ -53,18 +54,18 @@ struct InvertIntrinsicsFunction {
namespace internal {
// TODO(MatthiasF): downsample lookup
-template<typename WarpFunction>
-void LookupWarpGrid::Compute(const CameraIntrinsics &intrinsics,
+template <typename WarpFunction>
+void LookupWarpGrid::Compute(const CameraIntrinsics& intrinsics,
int width,
int height,
double overscan) {
- double w = (double) width / (1.0 + overscan);
- double h = (double) height / (1.0 + overscan);
- double aspx = (double) w / intrinsics.image_width();
- double aspy = (double) h / intrinsics.image_height();
+ double w = (double)width / (1.0 + overscan);
+ double h = (double)height / (1.0 + overscan);
+ double aspx = (double)w / intrinsics.image_width();
+ double aspy = (double)h / intrinsics.image_height();
#if defined(_OPENMP)
-# pragma omp parallel for schedule(static) num_threads(threads_) \
- if (threads_ > 1 && height > 100)
+# pragma omp parallel for schedule(static) \
+ num_threads(threads_) if (threads_ > 1 && height > 100)
#endif
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
@@ -76,40 +77,47 @@ void LookupWarpGrid::Compute(const CameraIntrinsics &intrinsics,
warp_y = warp_y * aspy + 0.5 * overscan * h;
int ix = int(warp_x), iy = int(warp_y);
int fx = round((warp_x - ix) * 256), fy = round((warp_y - iy) * 256);
- if (fx == 256) { fx = 0; ix++; } // NOLINT
- if (fy == 256) { fy = 0; iy++; } // NOLINT
+ if (fx == 256) {
+ fx = 0;
+ ix++;
+ } // NOLINT
+ if (fy == 256) {
+ fy = 0;
+ iy++;
+ } // NOLINT
// Use nearest border pixel
- if (ix < 0) { ix = 0, fx = 0; } // NOLINT
- if (iy < 0) { iy = 0, fy = 0; } // NOLINT
- if (ix >= width - 2) ix = width - 2;
- if (iy >= height - 2) iy = height - 2;
-
- Offset offset = { (short) (ix - x),
- (short) (iy - y),
- (unsigned char) fx,
- (unsigned char) fy };
+ if (ix < 0) {
+ ix = 0, fx = 0;
+ } // NOLINT
+ if (iy < 0) {
+ iy = 0, fy = 0;
+ } // NOLINT
+ if (ix >= width - 2)
+ ix = width - 2;
+ if (iy >= height - 2)
+ iy = height - 2;
+
+ Offset offset = {(short)(ix - x),
+ (short)(iy - y),
+ (unsigned char)fx,
+ (unsigned char)fy};
offset_[y * width + x] = offset;
}
}
}
-template<typename WarpFunction>
-void LookupWarpGrid::Update(const CameraIntrinsics &intrinsics,
+template <typename WarpFunction>
+void LookupWarpGrid::Update(const CameraIntrinsics& intrinsics,
int width,
int height,
double overscan) {
- if (width_ != width ||
- height_ != height ||
- overscan_ != overscan) {
+ if (width_ != width || height_ != height || overscan_ != overscan) {
Reset();
}
if (offset_ == NULL) {
offset_ = new Offset[width * height];
- Compute<WarpFunction>(intrinsics,
- width,
- height,
- overscan);
+ Compute<WarpFunction>(intrinsics, width, height, overscan);
}
width_ = width;
@@ -118,29 +126,30 @@ void LookupWarpGrid::Update(const CameraIntrinsics &intrinsics,
}
// TODO(MatthiasF): cubic B-Spline image sampling, bilinear lookup
-template<typename PixelType>
-void LookupWarpGrid::Apply(const PixelType *input_buffer,
+template <typename PixelType>
+void LookupWarpGrid::Apply(const PixelType* input_buffer,
int width,
int height,
int channels,
- PixelType *output_buffer) {
+ PixelType* output_buffer) {
#if defined(_OPENMP)
-# pragma omp parallel for schedule(static) num_threads(threads_) \
- if (threads_ > 1 && height > 100)
+# pragma omp parallel for schedule(static) \
+ num_threads(threads_) if (threads_ > 1 && height > 100)
#endif
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
Offset offset = offset_[y * width + x];
- const int pixel_index = ((y + offset.iy) * width +
- (x + offset.ix)) * channels;
- const PixelType *s = &input_buffer[pixel_index];
+ const int pixel_index =
+ ((y + offset.iy) * width + (x + offset.ix)) * channels;
+ const PixelType* s = &input_buffer[pixel_index];
for (int i = 0; i < channels; i++) {
output_buffer[(y * width + x) * channels + i] =
- ((s[i] * (256 - offset.fx) +
- s[channels + i] * offset.fx) * (256 - offset.fy) +
- (s[width * channels + i] * (256 - offset.fx) +
- s[width * channels + channels + i] * offset.fx) * offset.fy)
- / (256 * 256);
+ ((s[i] * (256 - offset.fx) + s[channels + i] * offset.fx) *
+ (256 - offset.fy) +
+ (s[width * channels + i] * (256 - offset.fx) +
+ s[width * channels + channels + i] * offset.fx) *
+ offset.fy) /
+ (256 * 256);
}
}
}
@@ -148,45 +157,33 @@ void LookupWarpGrid::Apply(const PixelType *input_buffer,
} // namespace internal
-template<typename PixelType>
-void CameraIntrinsics::DistortBuffer(const PixelType *input_buffer,
+template <typename PixelType>
+void CameraIntrinsics::DistortBuffer(const PixelType* input_buffer,
int width,
int height,
double overscan,
int channels,
- PixelType *output_buffer) {
+ PixelType* output_buffer) {
assert(channels >= 1);
assert(channels <= 4);
- distort_.Update<InvertIntrinsicsFunction>(*this,
- width,
- height,
- overscan);
- distort_.Apply<PixelType>(input_buffer,
- width,
- height,
- channels,
- output_buffer);
+ distort_.Update<InvertIntrinsicsFunction>(*this, width, height, overscan);
+ distort_.Apply<PixelType>(
+ input_buffer, width, height, channels, output_buffer);
}
-template<typename PixelType>
-void CameraIntrinsics::UndistortBuffer(const PixelType *input_buffer,
+template <typename PixelType>
+void CameraIntrinsics::UndistortBuffer(const PixelType* input_buffer,
int width,
int height,
double overscan,
int channels,
- PixelType *output_buffer) {
+ PixelType* output_buffer) {
assert(channels >= 1);
assert(channels <= 4);
- undistort_.Update<ApplyIntrinsicsFunction>(*this,
- width,
- height,
- overscan);
-
- undistort_.Apply<PixelType>(input_buffer,
- width,
- height,
- channels,
- output_buffer);
+ undistort_.Update<ApplyIntrinsicsFunction>(*this, width, height, overscan);
+
+ undistort_.Apply<PixelType>(
+ input_buffer, width, height, channels, output_buffer);
}
} // namespace libmv
diff --git a/intern/libmv/libmv/simple_pipeline/camera_intrinsics_test.cc b/intern/libmv/libmv/simple_pipeline/camera_intrinsics_test.cc
index 96d35a29ef8..cfcc2d16682 100644
--- a/intern/libmv/libmv/simple_pipeline/camera_intrinsics_test.cc
+++ b/intern/libmv/libmv/simple_pipeline/camera_intrinsics_test.cc
@@ -22,10 +22,10 @@
#include <iostream>
-#include "testing/testing.h"
#include "libmv/image/image.h"
#include "libmv/image/image_drawing.h"
#include "libmv/logging/logging.h"
+#include "testing/testing.h"
namespace libmv {
@@ -59,26 +59,36 @@ TEST(PolynomialCameraIntrinsics, ApplyIntrinsics) {
const int N = 5;
double expected[N][N][2] = {
- { {75.312500, -24.687500}, {338.982239, -62.035522},
- {640.000000, -72.929688}, {941.017761, -62.035522},
- {1204.687500, -24.687500}},
-
- { {37.964478, 238.982239}, {323.664551, 223.664551},
- {640.000000, 219.193420}, {956.335449, 223.664551},
- {1242.035522, 238.982239}},
-
- { {27.070312, 540.000000}, {319.193420, 540.000000},
- {640.000000, 540.000000}, {960.806580, 540.000000},
- {1252.929688, 540.000000}},
-
- { {37.964478, 841.017761}, {323.664551, 856.335449},
- {640.000000, 860.806580}, {956.335449, 856.335449},
- {1242.035522, 841.017761}},
-
- { {75.312500, 1104.687500}, {338.982239, 1142.035522},
- {640.000000, 1152.929688}, {941.017761, 1142.035522},
- {1204.687500, 1104.687500}}
- };
+ {{75.312500, -24.687500},
+ {338.982239, -62.035522},
+ {640.000000, -72.929688},
+ {941.017761, -62.035522},
+ {1204.687500, -24.687500}},
+
+ {{37.964478, 238.982239},
+ {323.664551, 223.664551},
+ {640.000000, 219.193420},
+ {956.335449, 223.664551},
+ {1242.035522, 238.982239}},
+
+ {{27.070312, 540.000000},
+ {319.193420, 540.000000},
+ {640.000000, 540.000000},
+ {960.806580, 540.000000},
+ {1252.929688, 540.000000}},
+
+ {{37.964478, 841.017761},
+ {323.664551, 856.335449},
+ {640.000000, 860.806580},
+ {956.335449, 856.335449},
+ {1242.035522, 841.017761}},
+
+ {{75.312500, 1104.687500},
+ {338.982239, 1142.035522},
+ {640.000000, 1152.929688},
+ {941.017761, 1142.035522},
+ {1204.687500, 1104.687500}},
+ };
PolynomialCameraIntrinsics intrinsics;
intrinsics.SetFocalLength(1300.0, 1300.0);
@@ -89,12 +99,11 @@ TEST(PolynomialCameraIntrinsics, ApplyIntrinsics) {
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
- double normalized_x = j * step - 0.5,
- normalized_y = i * step - 0.5;
+ double normalized_x = j * step - 0.5, normalized_y = i * step - 0.5;
double distorted_x, distorted_y;
- intrinsics.ApplyIntrinsics(normalized_x, normalized_y,
- &distorted_x, &distorted_y);
+ intrinsics.ApplyIntrinsics(
+ normalized_x, normalized_y, &distorted_x, &distorted_y);
EXPECT_NEAR(expected[i][j][0], distorted_x, 1e-6);
EXPECT_NEAR(expected[i][j][1], distorted_y, 1e-6);
@@ -106,43 +115,51 @@ TEST(PolynomialCameraIntrinsics, InvertIntrinsics) {
const int N = 5;
double expected[N][N][2] = {
- { {-0.524482, -0.437069}, {-0.226237, -0.403994},
- { 0.031876, -0.398446}, { 0.293917, -0.408218},
- { 0.632438, -0.465028}},
-
- { {-0.493496, -0.189173}, {-0.219052, -0.179936},
- { 0.030975, -0.178107}, { 0.283742, -0.181280},
- { 0.574557, -0.194335}},
-
- { {-0.488013, 0.032534}, {-0.217537, 0.031077},
- { 0.030781, 0.030781}, { 0.281635, 0.031293},
- { 0.566344, 0.033314}},
-
- { {-0.498696, 0.257660}, {-0.220424, 0.244041},
- { 0.031150, 0.241409}, { 0.285660, 0.245985},
- { 0.582670, 0.265629}},
-
- { {-0.550617, 0.532263}, {-0.230399, 0.477255},
- { 0.032380, 0.469510}, { 0.299986, 0.483311},
- { 0.684740, 0.584043}}
- };
+ {{-0.524482, -0.437069},
+ {-0.226237, -0.403994},
+ {0.031876, -0.398446},
+ {0.293917, -0.408218},
+ {0.632438, -0.465028}},
+
+ {{-0.493496, -0.189173},
+ {-0.219052, -0.179936},
+ {0.030975, -0.178107},
+ {0.283742, -0.181280},
+ {0.574557, -0.194335}},
+
+ {{-0.488013, 0.032534},
+ {-0.217537, 0.031077},
+ {0.030781, 0.030781},
+ {0.281635, 0.031293},
+ {0.566344, 0.033314}},
+
+ {{-0.498696, 0.257660},
+ {-0.220424, 0.244041},
+ {0.031150, 0.241409},
+ {0.285660, 0.245985},
+ {0.582670, 0.265629}},
+
+ {{-0.550617, 0.532263},
+ {-0.230399, 0.477255},
+ {0.032380, 0.469510},
+ {0.299986, 0.483311},
+ {0.684740, 0.584043}},
+ };
PolynomialCameraIntrinsics intrinsics;
intrinsics.SetFocalLength(1300.0, 1300.0);
intrinsics.SetPrincipalPoint(600.0, 500.0);
intrinsics.SetRadialDistortion(-0.2, -0.1, -0.05);
- double step_x = 1280.0 / (N - 1),
- step_y = 1080.0 / (N - 1);
+ double step_x = 1280.0 / (N - 1), step_y = 1080.0 / (N - 1);
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
- double distorted_x = j * step_x,
- distorted_y = i * step_y;
+ double distorted_x = j * step_x, distorted_y = i * step_y;
double normalized_x, normalized_y;
- intrinsics.InvertIntrinsics(distorted_x, distorted_y,
- &normalized_x, &normalized_y);
+ intrinsics.InvertIntrinsics(
+ distorted_x, distorted_y, &normalized_x, &normalized_y);
EXPECT_NEAR(expected[i][j][0], normalized_x, 1e-6);
EXPECT_NEAR(expected[i][j][1], normalized_y, 1e-6);
@@ -190,10 +207,11 @@ TEST(PolynomialCameraIntrinsics, IdentityDistortBuffer) {
FloatImage distorted_image(h, w);
intrinsics.SetImageSize(w, h);
intrinsics.SetFocalLength(10.0, 10.0);
- intrinsics.SetPrincipalPoint((double) w / 2.0, (double) h / 2.0);
+ intrinsics.SetPrincipalPoint((double)w / 2.0, (double)h / 2.0);
intrinsics.SetRadialDistortion(0.0, 0.0, 0.0);
intrinsics.DistortBuffer(image.Data(),
- image.Width(), image.Height(),
+ image.Width(),
+ image.Height(),
0.0,
image.Depth(),
distorted_image.Data());
@@ -221,10 +239,11 @@ TEST(PolynomialCameraIntrinsics, IdentityUndistortBuffer) {
FloatImage distorted_image(h, w);
intrinsics.SetImageSize(w, h);
intrinsics.SetFocalLength(10.0, 10.0);
- intrinsics.SetPrincipalPoint((double) w / 2.0, (double) h / 2.0);
+ intrinsics.SetPrincipalPoint((double)w / 2.0, (double)h / 2.0);
intrinsics.SetRadialDistortion(0.0, 0.0, 0.0);
intrinsics.UndistortBuffer(image.Data(),
- image.Width(), image.Height(),
+ image.Width(),
+ image.Height(),
0.0,
image.Depth(),
distorted_image.Data());
diff --git a/intern/libmv/libmv/simple_pipeline/detect.cc b/intern/libmv/libmv/simple_pipeline/detect.cc
index 46599a4c49e..0897c5598c6 100644
--- a/intern/libmv/libmv/simple_pipeline/detect.cc
+++ b/intern/libmv/libmv/simple_pipeline/detect.cc
@@ -22,14 +22,14 @@
**
****************************************************************************/
-#include <stdlib.h>
#include <memory.h>
+#include <stdlib.h>
#include <queue>
#include "libmv/base/scoped_ptr.h"
#include "libmv/image/array_nd.h"
-#include "libmv/image/image_converter.h"
#include "libmv/image/convolve.h"
+#include "libmv/image/image_converter.h"
#include "libmv/logging/logging.h"
#include "libmv/simple_pipeline/detect.h"
@@ -55,7 +55,7 @@ double kDefaultHarrisThreshold = 1e-5;
class FeatureComparison {
public:
- bool operator() (const Feature &left, const Feature &right) const {
+ bool operator()(const Feature& left, const Feature& right) const {
return right.score > left.score;
}
};
@@ -63,18 +63,17 @@ class FeatureComparison {
// Filter the features so there are no features closer than
// minimal distance to each other.
// This is a naive implementation with O(n^2) asymptotic.
-void FilterFeaturesByDistance(const vector<Feature> &all_features,
+void FilterFeaturesByDistance(const vector<Feature>& all_features,
int min_distance,
- vector<Feature> *detected_features) {
+ vector<Feature>* detected_features) {
const int min_distance_squared = min_distance * min_distance;
// Use priority queue to sort the features by their score.
//
// Do this on copy of the input features to prevent possible
// distortion in callee function behavior.
- std::priority_queue<Feature,
- std::vector<Feature>,
- FeatureComparison> priority_features;
+ std::priority_queue<Feature, std::vector<Feature>, FeatureComparison>
+ priority_features;
for (int i = 0; i < all_features.size(); i++) {
priority_features.push(all_features.at(i));
@@ -85,7 +84,7 @@ void FilterFeaturesByDistance(const vector<Feature> &all_features,
Feature a = priority_features.top();
for (int i = 0; i < detected_features->size(); i++) {
- Feature &b = detected_features->at(i);
+ Feature& b = detected_features->at(i);
if (Square(a.x - b.x) + Square(a.y - b.y) < min_distance_squared) {
ok = false;
break;
@@ -100,9 +99,9 @@ void FilterFeaturesByDistance(const vector<Feature> &all_features,
}
}
-void DetectFAST(const FloatImage &grayscale_image,
- const DetectOptions &options,
- vector<Feature> *detected_features) {
+void DetectFAST(const FloatImage& grayscale_image,
+ const DetectOptions& options,
+ vector<Feature>* detected_features) {
#ifndef LIBMV_NO_FAST_DETECTOR
const int min_distance = options.min_distance;
const int min_trackness = options.fast_min_trackness;
@@ -111,12 +110,14 @@ void DetectFAST(const FloatImage &grayscale_image,
const int height = grayscale_image.Width() - 2 * margin;
const int stride = grayscale_image.Width();
- scoped_array<unsigned char> byte_image(FloatImageToUCharArray(grayscale_image));
+ scoped_array<unsigned char> byte_image(
+ FloatImageToUCharArray(grayscale_image));
const int byte_image_offset = margin * stride + margin;
- // TODO(MatthiasF): Support targetting a feature count (binary search trackness)
+ // TODO(MatthiasF): Support targetting a feature count (binary search
+ // trackness)
int num_features;
- xy *all = fast9_detect(byte_image.get() + byte_image_offset,
+ xy* all = fast9_detect(byte_image.get() + byte_image_offset,
width,
height,
stride,
@@ -126,13 +127,13 @@ void DetectFAST(const FloatImage &grayscale_image,
free(all);
return;
}
- int *scores = fast9_score(byte_image.get() + byte_image_offset,
+ int* scores = fast9_score(byte_image.get() + byte_image_offset,
stride,
all,
num_features,
min_trackness);
// TODO(MatthiasF): merge with close feature suppression
- xy *nonmax = nonmax_suppression(all, scores, num_features, &num_features);
+ xy* nonmax = nonmax_suppression(all, scores, num_features, &num_features);
free(all);
// Remove too close features
// TODO(MatthiasF): A resolution independent parameter would be better than
@@ -152,89 +153,104 @@ void DetectFAST(const FloatImage &grayscale_image,
free(scores);
free(nonmax);
#else
- (void) grayscale_image; // Ignored.
- (void) options; // Ignored.
- (void) detected_features; // Ignored.
+ (void)grayscale_image; // Ignored.
+ (void)options; // Ignored.
+ (void)detected_features; // Ignored.
LOG(FATAL) << "FAST detector is disabled in this build.";
#endif
}
#ifdef __SSE2__
-static unsigned int SAD(const ubyte* imageA, const ubyte* imageB,
- int strideA, int strideB) {
+static unsigned int SAD(const ubyte* imageA,
+ const ubyte* imageB,
+ int strideA,
+ int strideB) {
__m128i a = _mm_setzero_si128();
for (int i = 0; i < 16; i++) {
- a = _mm_adds_epu16(a,
- _mm_sad_epu8(_mm_loadu_si128((__m128i*)(imageA+i*strideA)),
- _mm_loadu_si128((__m128i*)(imageB+i*strideB))));
+ a = _mm_adds_epu16(
+ a,
+ _mm_sad_epu8(_mm_loadu_si128((__m128i*)(imageA + i * strideA)),
+ _mm_loadu_si128((__m128i*)(imageB + i * strideB))));
}
return _mm_extract_epi16(a, 0) + _mm_extract_epi16(a, 4);
}
#else
-static unsigned int SAD(const ubyte* imageA, const ubyte* imageB,
- int strideA, int strideB) {
+static unsigned int SAD(const ubyte* imageA,
+ const ubyte* imageB,
+ int strideA,
+ int strideB) {
unsigned int sad = 0;
for (int i = 0; i < 16; i++) {
for (int j = 0; j < 16; j++) {
- sad += abs((int)imageA[i*strideA+j] - imageB[i*strideB+j]);
+ sad += abs((int)imageA[i * strideA + j] - imageB[i * strideB + j]);
}
}
return sad;
}
#endif
-void DetectMORAVEC(const FloatImage &grayscale_image,
- const DetectOptions &options,
- vector<Feature> *detected_features) {
+void DetectMORAVEC(const FloatImage& grayscale_image,
+ const DetectOptions& options,
+ vector<Feature>* detected_features) {
const int distance = options.min_distance;
const int margin = options.margin;
- const unsigned char *pattern = options.moravec_pattern;
+ const unsigned char* pattern = options.moravec_pattern;
const int count = options.moravec_max_count;
const int width = grayscale_image.Width() - 2 * margin;
const int height = grayscale_image.Width() - 2 * margin;
const int stride = grayscale_image.Width();
- scoped_array<unsigned char> byte_image(FloatImageToUCharArray(grayscale_image));
+ scoped_array<unsigned char> byte_image(
+ FloatImageToUCharArray(grayscale_image));
unsigned short histogram[256];
memset(histogram, 0, sizeof(histogram));
- scoped_array<ubyte> scores(new ubyte[width*height]);
- memset(scores.get(), 0, width*height);
+ scoped_array<ubyte> scores(new ubyte[width * height]);
+ memset(scores.get(), 0, width * height);
const int r = 1; // radius for self similarity comparison
- for (int y = distance; y < height-distance; y++) {
- for (int x = distance; x < width-distance; x++) {
- const ubyte* s = &byte_image[y*stride+x];
- int score = // low self-similarity with overlapping patterns
- // OPTI: load pattern once
+ for (int y = distance; y < height - distance; y++) {
+ for (int x = distance; x < width - distance; x++) {
+ const ubyte* s = &byte_image[y * stride + x];
+ // low self-similarity with overlapping patterns
+ // OPTI: load pattern once
+ // clang-format off
+ int score =
SAD(s, s-r*stride-r, stride, stride)+SAD(s, s-r*stride, stride, stride)+SAD(s, s-r*stride+r, stride, stride)+
SAD(s, s -r, stride, stride)+ SAD(s, s +r, stride, stride)+
SAD(s, s+r*stride-r, stride, stride)+SAD(s, s+r*stride, stride, stride)+SAD(s, s+r*stride+r, stride, stride);
+ // clang-format on
+
score /= 256; // normalize
- if (pattern) // find only features similar to pattern
+ if (pattern) // find only features similar to pattern
score -= SAD(s, pattern, stride, 16);
- if (score <= 16) continue; // filter very self-similar features
+ if (score <= 16)
+ continue; // filter very self-similar features
score -= 16; // translate to score/histogram values
- if (score>255) score=255; // clip
- ubyte* c = &scores[y*width+x];
+ if (score > 255)
+ score = 255; // clip
+ ubyte* c = &scores[y * width + x];
for (int i = -distance; i < 0; i++) {
for (int j = -distance; j < distance; j++) {
- int s = c[i*width+j];
- if (s == 0) continue;
- if (s >= score) goto nonmax;
- c[i*width+j] = 0;
+ int s = c[i * width + j];
+ if (s == 0)
+ continue;
+ if (s >= score)
+ goto nonmax;
+ c[i * width + j] = 0;
histogram[s]--;
}
}
for (int i = 0, j = -distance; j < 0; j++) {
- int s = c[i*width+j];
- if (s == 0) continue;
- if (s >= score) goto nonmax;
- c[i*width+j] = 0;
+ int s = c[i * width + j];
+ if (s == 0)
+ continue;
+ if (s >= score)
+ goto nonmax;
+ c[i * width + j] = 0;
histogram[s]--;
}
c[0] = score, histogram[score]++;
- nonmax:
- { } // Do nothing.
+ nonmax : {} // Do nothing.
}
}
int min = 255, total = 0;
@@ -254,18 +270,16 @@ void DetectMORAVEC(const FloatImage &grayscale_image,
// Score calculation above uses top left corner of the
// patch as the origin, here we need to convert this value
// to a pattrn center by adding 8 pixels.
- detected_features->push_back(Feature((float) x + 8.0f,
- (float) y + 8.0f,
- (float) s,
- 16.0f));
+ detected_features->push_back(
+ Feature((float)x + 8.0f, (float)y + 8.0f, (float)s, 16.0f));
}
}
}
}
-void DetectHarris(const FloatImage &grayscale_image,
- const DetectOptions &options,
- vector<Feature> *detected_features) {
+void DetectHarris(const FloatImage& grayscale_image,
+ const DetectOptions& options,
+ vector<Feature>* detected_features) {
const double alpha = 0.06;
const double sigma = 0.9;
@@ -281,9 +295,7 @@ void DetectHarris(const FloatImage &grayscale_image,
MultiplyElements(gradient_y, gradient_y, &gradient_yy);
MultiplyElements(gradient_x, gradient_y, &gradient_xy);
- FloatImage gradient_xx_blurred,
- gradient_yy_blurred,
- gradient_xy_blurred;
+ FloatImage gradient_xx_blurred, gradient_yy_blurred, gradient_xy_blurred;
ConvolveGaussian(gradient_xx, sigma, &gradient_xx_blurred);
ConvolveGaussian(gradient_yy, sigma, &gradient_yy_blurred);
ConvolveGaussian(gradient_xy, sigma, &gradient_xy_blurred);
@@ -304,10 +316,8 @@ void DetectHarris(const FloatImage &grayscale_image,
double traceA = A.trace();
double harris_function = detA - alpha * traceA * traceA;
if (harris_function > threshold) {
- all_features.push_back(Feature((float) x,
- (float) y,
- (float) harris_function,
- 5.0f));
+ all_features.push_back(
+ Feature((float)x, (float)y, (float)harris_function, 5.0f));
}
}
}
@@ -318,17 +328,18 @@ void DetectHarris(const FloatImage &grayscale_image,
} // namespace
DetectOptions::DetectOptions()
- : type(DetectOptions::HARRIS),
- margin(0),
- min_distance(120),
- fast_min_trackness(kDefaultFastMinTrackness),
- moravec_max_count(0),
- moravec_pattern(NULL),
- harris_threshold(kDefaultHarrisThreshold) {}
-
-void Detect(const FloatImage &image,
- const DetectOptions &options,
- vector<Feature> *detected_features) {
+ : type(DetectOptions::HARRIS),
+ margin(0),
+ min_distance(120),
+ fast_min_trackness(kDefaultFastMinTrackness),
+ moravec_max_count(0),
+ moravec_pattern(NULL),
+ harris_threshold(kDefaultHarrisThreshold) {
+}
+
+void Detect(const FloatImage& image,
+ const DetectOptions& options,
+ vector<Feature>* detected_features) {
// Currently all the detectors requires image to be grayscale.
// Do it here to avoid code duplication.
FloatImage grayscale_image;
@@ -350,8 +361,7 @@ void Detect(const FloatImage &image,
}
}
-std::ostream& operator <<(std::ostream &os,
- const Feature &feature) {
+std::ostream& operator<<(std::ostream& os, const Feature& feature) {
os << "x: " << feature.x << ", y: " << feature.y;
os << ", score: " << feature.score;
os << ", size: " << feature.size;
diff --git a/intern/libmv/libmv/simple_pipeline/detect.h b/intern/libmv/libmv/simple_pipeline/detect.h
index 1035287bcf2..8ddf0025e4b 100644
--- a/intern/libmv/libmv/simple_pipeline/detect.h
+++ b/intern/libmv/libmv/simple_pipeline/detect.h
@@ -39,7 +39,7 @@ typedef unsigned char ubyte;
struct Feature {
Feature(float x, float y) : x(x), y(y) {}
Feature(float x, float y, float score, float size)
- : x(x), y(y), score(score), size(size) {}
+ : x(x), y(y), score(score), size(size) {}
// Position of the feature in pixels from top-left corner.
// Note: Libmv detector might eventually support subpixel precision.
@@ -88,9 +88,9 @@ struct DetectOptions {
// Find only features similar to this pattern. Only used by MORAVEC detector.
//
- // This is an image patch denoted in byte array with dimensions of 16px by 16px
- // used to filter features by similarity to this patch.
- unsigned char *moravec_pattern;
+ // This is an image patch denoted in byte array with dimensions of 16px by
+ // 16px used to filter features by similarity to this patch.
+ unsigned char* moravec_pattern;
// Threshold value of the Harris function to add new featrue
// to the result.
@@ -101,12 +101,11 @@ struct DetectOptions {
//
// Image could have 1-4 channels, it'll be converted to a grayscale
// by the detector function if needed.
-void Detect(const FloatImage &image,
- const DetectOptions &options,
- vector<Feature> *detected_features);
+void Detect(const FloatImage& image,
+ const DetectOptions& options,
+ vector<Feature>* detected_features);
-std::ostream& operator <<(std::ostream &os,
- const Feature &feature);
+std::ostream& operator<<(std::ostream& os, const Feature& feature);
} // namespace libmv
diff --git a/intern/libmv/libmv/simple_pipeline/detect_test.cc b/intern/libmv/libmv/simple_pipeline/detect_test.cc
index b226ad96595..718598d04e1 100644
--- a/intern/libmv/libmv/simple_pipeline/detect_test.cc
+++ b/intern/libmv/libmv/simple_pipeline/detect_test.cc
@@ -20,14 +20,14 @@
#include "libmv/simple_pipeline/detect.h"
-#include "testing/testing.h"
#include "libmv/logging/logging.h"
+#include "testing/testing.h"
namespace libmv {
namespace {
-void PreformSinglePointTest(const DetectOptions &options) {
+void PreformSinglePointTest(const DetectOptions& options) {
// Prepare the image.
FloatImage image(15, 15);
image.fill(1.0);
@@ -40,7 +40,7 @@ void PreformSinglePointTest(const DetectOptions &options) {
// Check detected features matches our expectations.
EXPECT_EQ(1, detected_features.size());
if (detected_features.size() == 1) {
- Feature &feature = detected_features[0];
+ Feature& feature = detected_features[0];
EXPECT_EQ(7, feature.x);
EXPECT_EQ(7, feature.y);
}
@@ -83,8 +83,8 @@ void PreformCheckerBoardTest(const DetectOptions &options) {
}
#endif
-void CheckExpectedFeatures(const vector<Feature> &detected_features,
- const vector<Feature> &expected_features) {
+void CheckExpectedFeatures(const vector<Feature>& detected_features,
+ const vector<Feature>& expected_features) {
EXPECT_EQ(expected_features.size(), detected_features.size());
// That's unsafe to iterate over vectors when their lengths
@@ -95,10 +95,10 @@ void CheckExpectedFeatures(const vector<Feature> &detected_features,
}
for (int i = 0; i < expected_features.size(); ++i) {
- const Feature &extected_feature = expected_features[i];
+ const Feature& extected_feature = expected_features[i];
bool found = false;
for (int j = 0; j < detected_features.size(); ++j) {
- const Feature &detected_feature = detected_features[j];
+ const Feature& detected_feature = detected_features[j];
if (extected_feature.x == detected_feature.x &&
extected_feature.y == detected_feature.y) {
found = true;
@@ -109,15 +109,14 @@ void CheckExpectedFeatures(const vector<Feature> &detected_features,
}
}
-void PreformSingleTriangleTest(const DetectOptions &options) {
+void PreformSingleTriangleTest(const DetectOptions& options) {
// Prepare the image.
FloatImage image(15, 21);
image.fill(1.0);
int vertex_x = 10, vertex_y = 5;
for (int i = 0; i < 6; ++i) {
- int current_x = vertex_x - i,
- current_y = vertex_y + i;
+ int current_x = vertex_x - i, current_y = vertex_y + i;
for (int j = 0; j < i * 2 + 1; ++j, ++current_x) {
image(current_y, current_x) = 0.0;
}
diff --git a/intern/libmv/libmv/simple_pipeline/distortion_models.cc b/intern/libmv/libmv/simple_pipeline/distortion_models.cc
index f602234b630..4556e3ceaf9 100644
--- a/intern/libmv/libmv/simple_pipeline/distortion_models.cc
+++ b/intern/libmv/libmv/simple_pipeline/distortion_models.cc
@@ -41,25 +41,34 @@ struct InvertPolynomialIntrinsicsCostFunction {
const double p2,
const double image_x,
const double image_y)
- : focal_length_x_(focal_length_x),
- focal_length_y_(focal_length_y),
- principal_point_x_(principal_point_x),
- principal_point_y_(principal_point_y),
- k1_(k1), k2_(k2), k3_(k3),
- p1_(p1), p2_(p2),
- x_(image_x), y_(image_y) {}
-
- Vec2 operator()(const Vec2 &u) const {
+ : focal_length_x_(focal_length_x),
+ focal_length_y_(focal_length_y),
+ principal_point_x_(principal_point_x),
+ principal_point_y_(principal_point_y),
+ k1_(k1),
+ k2_(k2),
+ k3_(k3),
+ p1_(p1),
+ p2_(p2),
+ x_(image_x),
+ y_(image_y) {}
+
+ Vec2 operator()(const Vec2& u) const {
double xx, yy;
ApplyPolynomialDistortionModel(focal_length_x_,
focal_length_y_,
principal_point_x_,
principal_point_y_,
- k1_, k2_, k3_,
- p1_, p2_,
- u(0), u(1),
- &xx, &yy);
+ k1_,
+ k2_,
+ k3_,
+ p1_,
+ p2_,
+ u(0),
+ u(1),
+ &xx,
+ &yy);
Vec2 fx;
fx << (xx - x_), (yy - y_);
@@ -87,23 +96,28 @@ struct InvertDivisionIntrinsicsCostFunction {
const double k2,
const double image_x,
const double image_y)
- : focal_length_x_(focal_length_x),
- focal_length_y_(focal_length_y),
- principal_point_x_(principal_point_x),
- principal_point_y_(principal_point_y),
- k1_(k1), k2_(k2),
- x_(image_x), y_(image_y) {}
-
- Vec2 operator()(const Vec2 &u) const {
+ : focal_length_x_(focal_length_x),
+ focal_length_y_(focal_length_y),
+ principal_point_x_(principal_point_x),
+ principal_point_y_(principal_point_y),
+ k1_(k1),
+ k2_(k2),
+ x_(image_x),
+ y_(image_y) {}
+
+ Vec2 operator()(const Vec2& u) const {
double xx, yy;
ApplyDivisionDistortionModel(focal_length_x_,
focal_length_y_,
principal_point_x_,
principal_point_y_,
- k1_, k2_,
- u(0), u(1),
- &xx, &yy);
+ k1_,
+ k2_,
+ u(0),
+ u(1),
+ &xx,
+ &yy);
Vec2 fx;
fx << (xx - x_), (yy - y_);
@@ -134,25 +148,36 @@ struct InvertBrownIntrinsicsCostFunction {
const double p2,
const double image_x,
const double image_y)
- : focal_length_x_(focal_length_x),
- focal_length_y_(focal_length_y),
- principal_point_x_(principal_point_x),
- principal_point_y_(principal_point_y),
- k1_(k1), k2_(k2), k3_(k3), k4_(k4),
- p1_(p1), p2_(p2),
- x_(image_x), y_(image_y) {}
-
- Vec2 operator()(const Vec2 &u) const {
+ : focal_length_x_(focal_length_x),
+ focal_length_y_(focal_length_y),
+ principal_point_x_(principal_point_x),
+ principal_point_y_(principal_point_y),
+ k1_(k1),
+ k2_(k2),
+ k3_(k3),
+ k4_(k4),
+ p1_(p1),
+ p2_(p2),
+ x_(image_x),
+ y_(image_y) {}
+
+ Vec2 operator()(const Vec2& u) const {
double xx, yy;
ApplyBrownDistortionModel(focal_length_x_,
focal_length_y_,
principal_point_x_,
principal_point_y_,
- k1_, k2_, k3_, k4_,
- p1_, p2_,
- u(0), u(1),
- &xx, &yy);
+ k1_,
+ k2_,
+ k3_,
+ k4_,
+ p1_,
+ p2_,
+ u(0),
+ u(1),
+ &xx,
+ &yy);
Vec2 fx;
fx << (xx - x_), (yy - y_);
@@ -180,8 +205,8 @@ void InvertPolynomialDistortionModel(const double focal_length_x,
const double p2,
const double image_x,
const double image_y,
- double *normalized_x,
- double *normalized_y) {
+ double* normalized_x,
+ double* normalized_y) {
// Compute the initial guess. For a camera with no distortion, this will also
// be the final answer; the LM iteration will terminate immediately.
Vec2 normalized;
@@ -194,13 +219,17 @@ void InvertPolynomialDistortionModel(const double focal_length_x,
focal_length_y,
principal_point_x,
principal_point_y,
- k1, k2, k3,
- p1, p2,
- image_x, image_y);
+ k1,
+ k2,
+ k3,
+ p1,
+ p2,
+ image_x,
+ image_y);
Solver::SolverParameters params;
Solver solver(intrinsics_cost);
- /*Solver::Results results =*/ solver.minimize(params, &normalized);
+ /*Solver::Results results =*/solver.minimize(params, &normalized);
// TODO(keir): Better error handling.
@@ -216,8 +245,8 @@ void InvertDivisionDistortionModel(const double focal_length_x,
const double k2,
const double image_x,
const double image_y,
- double *normalized_x,
- double *normalized_y) {
+ double* normalized_x,
+ double* normalized_y) {
// Compute the initial guess. For a camera with no distortion, this will also
// be the final answer; the LM iteration will terminate immediately.
Vec2 normalized;
@@ -231,12 +260,14 @@ void InvertDivisionDistortionModel(const double focal_length_x,
focal_length_y,
principal_point_x,
principal_point_y,
- k1, k2,
- image_x, image_y);
+ k1,
+ k2,
+ image_x,
+ image_y);
Solver::SolverParameters params;
Solver solver(intrinsics_cost);
- /*Solver::Results results =*/ solver.minimize(params, &normalized);
+ /*Solver::Results results =*/solver.minimize(params, &normalized);
// TODO(keir): Better error handling.
@@ -256,8 +287,8 @@ void InvertBrownDistortionModel(const double focal_length_x,
const double p2,
const double image_x,
const double image_y,
- double *normalized_x,
- double *normalized_y) {
+ double* normalized_x,
+ double* normalized_y) {
// Compute the initial guess. For a camera with no distortion, this will also
// be the final answer; the LM iteration will terminate immediately.
Vec2 normalized;
@@ -270,13 +301,18 @@ void InvertBrownDistortionModel(const double focal_length_x,
focal_length_y,
principal_point_x,
principal_point_y,
- k1, k2, k3, k4,
- p1, p2,
- image_x, image_y);
+ k1,
+ k2,
+ k3,
+ k4,
+ p1,
+ p2,
+ image_x,
+ image_y);
Solver::SolverParameters params;
Solver solver(intrinsics_cost);
- /*Solver::Results results =*/ solver.minimize(params, &normalized);
+ /*Solver::Results results =*/solver.minimize(params, &normalized);
// TODO(keir): Better error handling.
@@ -299,31 +335,36 @@ struct ApplyNukeIntrinsicsCostFunction {
const double k2,
const double expected_normalized_x,
const double expected_normalized_y)
- : focal_length_x_(focal_length_x),
- focal_length_y_(focal_length_y),
- principal_point_x_(principal_point_x),
- principal_point_y_(principal_point_y),
- image_width_(image_width),
- image_height_(image_height),
- k1_(k1), k2_(k2),
- expected_normalized_x_(expected_normalized_x),
- expected_normalized_y_(expected_normalized_y) {}
-
- Vec2 operator()(const Vec2 &image_coordinate) const {
+ : focal_length_x_(focal_length_x),
+ focal_length_y_(focal_length_y),
+ principal_point_x_(principal_point_x),
+ principal_point_y_(principal_point_y),
+ image_width_(image_width),
+ image_height_(image_height),
+ k1_(k1),
+ k2_(k2),
+ expected_normalized_x_(expected_normalized_x),
+ expected_normalized_y_(expected_normalized_y) {}
+
+ Vec2 operator()(const Vec2& image_coordinate) const {
double actual_normalized_x, actual_normalized_y;
InvertNukeDistortionModel(focal_length_x_,
focal_length_y_,
principal_point_x_,
principal_point_y_,
- image_width_, image_height_,
- k1_, k2_,
- image_coordinate(0), image_coordinate(1),
- &actual_normalized_x, &actual_normalized_y);
+ image_width_,
+ image_height_,
+ k1_,
+ k2_,
+ image_coordinate(0),
+ image_coordinate(1),
+ &actual_normalized_x,
+ &actual_normalized_y);
Vec2 fx;
fx << (actual_normalized_x - expected_normalized_x_),
- (actual_normalized_y - expected_normalized_y_);
+ (actual_normalized_y - expected_normalized_y_);
return fx;
}
double focal_length_x_;
@@ -346,8 +387,8 @@ void ApplyNukeDistortionModel(const double focal_length_x,
const double k2,
const double normalized_x,
const double normalized_y,
- double *image_x,
- double *image_y) {
+ double* image_x,
+ double* image_y) {
// Compute the initial guess. For a camera with no distortion, this will also
// be the final answer; the LM iteration will terminate immediately.
Vec2 image;
@@ -363,12 +404,14 @@ void ApplyNukeDistortionModel(const double focal_length_x,
principal_point_y,
image_width,
image_height,
- k1, k2,
- normalized_x, normalized_y);
+ k1,
+ k2,
+ normalized_x,
+ normalized_y);
Solver::SolverParameters params;
Solver solver(intrinsics_cost);
- /*Solver::Results results =*/ solver.minimize(params, &image);
+ /*Solver::Results results =*/solver.minimize(params, &image);
// TODO(keir): Better error handling.
diff --git a/intern/libmv/libmv/simple_pipeline/distortion_models.h b/intern/libmv/libmv/simple_pipeline/distortion_models.h
index 51300477956..5fe9fee8d54 100644
--- a/intern/libmv/libmv/simple_pipeline/distortion_models.h
+++ b/intern/libmv/libmv/simple_pipeline/distortion_models.h
@@ -46,37 +46,37 @@ void InvertPolynomialDistortionModel(const double focal_length_x,
const double p2,
const double image_x,
const double image_y,
- double *normalized_x,
- double *normalized_y);
+ double* normalized_x,
+ double* normalized_y);
// Apply camera intrinsics to the normalized point to get image coordinates.
// This applies the radial lens distortion to a point which is in normalized
// camera coordinates (i.e. the principal point is at (0, 0)) to get image
// coordinates in pixels. Templated for use with autodifferentiation.
template <typename T>
-inline void ApplyPolynomialDistortionModel(const T &focal_length_x,
- const T &focal_length_y,
- const T &principal_point_x,
- const T &principal_point_y,
- const T &k1,
- const T &k2,
- const T &k3,
- const T &p1,
- const T &p2,
- const T &normalized_x,
- const T &normalized_y,
- T *image_x,
- T *image_y) {
+inline void ApplyPolynomialDistortionModel(const T& focal_length_x,
+ const T& focal_length_y,
+ const T& principal_point_x,
+ const T& principal_point_y,
+ const T& k1,
+ const T& k2,
+ const T& k3,
+ const T& p1,
+ const T& p2,
+ const T& normalized_x,
+ const T& normalized_y,
+ T* image_x,
+ T* image_y) {
T x = normalized_x;
T y = normalized_y;
// Apply distortion to the normalized points to get (xd, yd).
- T r2 = x*x + y*y;
+ T r2 = x * x + y * y;
T r4 = r2 * r2;
T r6 = r4 * r2;
- T r_coeff = (T(1) + k1*r2 + k2*r4 + k3*r6);
- T xd = x * r_coeff + T(2)*p1*x*y + p2*(r2 + T(2)*x*x);
- T yd = y * r_coeff + T(2)*p2*x*y + p1*(r2 + T(2)*y*y);
+ T r_coeff = (T(1) + k1 * r2 + k2 * r4 + k3 * r6);
+ T xd = x * r_coeff + T(2) * p1 * x * y + p2 * (r2 + T(2) * x * x);
+ T yd = y * r_coeff + T(2) * p2 * x * y + p1 * (r2 + T(2) * y * y);
// Apply focal length and principal point to get the final image coordinates.
*image_x = focal_length_x * xd + principal_point_x;
@@ -96,8 +96,8 @@ void InvertDivisionDistortionModel(const double focal_length_x,
const double k2,
const double image_x,
const double image_y,
- double *normalized_x,
- double *normalized_y);
+ double* normalized_x,
+ double* normalized_y);
// Apply camera intrinsics to the normalized point to get image coordinates.
// This applies the radial lens distortion to a point which is in normalized
@@ -106,20 +106,19 @@ void InvertDivisionDistortionModel(const double focal_length_x,
//
// Uses division distortion model.
template <typename T>
-inline void ApplyDivisionDistortionModel(const T &focal_length_x,
- const T &focal_length_y,
- const T &principal_point_x,
- const T &principal_point_y,
- const T &k1,
- const T &k2,
- const T &normalized_x,
- const T &normalized_y,
- T *image_x,
- T *image_y) {
-
+inline void ApplyDivisionDistortionModel(const T& focal_length_x,
+ const T& focal_length_y,
+ const T& principal_point_x,
+ const T& principal_point_y,
+ const T& k1,
+ const T& k2,
+ const T& normalized_x,
+ const T& normalized_y,
+ T* image_x,
+ T* image_y) {
T x = normalized_x;
T y = normalized_y;
- T r2 = x*x + y*y;
+ T r2 = x * x + y * y;
T r4 = r2 * r2;
T xd = x / (T(1) + k1 * r2 + k2 * r4);
@@ -136,18 +135,18 @@ inline void ApplyDivisionDistortionModel(const T &focal_length_x,
//
// Uses Nuke distortion model.
template <typename T>
-void InvertNukeDistortionModel(const T &focal_length_x,
- const T &focal_length_y,
- const T &principal_point_x,
- const T &principal_point_y,
+void InvertNukeDistortionModel(const T& focal_length_x,
+ const T& focal_length_y,
+ const T& principal_point_x,
+ const T& principal_point_y,
const int image_width,
const int image_height,
- const T &k1,
- const T &k2,
- const T &image_x,
- const T &image_y,
- T *normalized_x,
- T *normalized_y) {
+ const T& k1,
+ const T& k2,
+ const T& image_x,
+ const T& image_y,
+ T* normalized_x,
+ T* normalized_y) {
// According to the documentation:
//
// xu = xd / (1 + k0 * rd^2 + k1 * rd^4)
@@ -174,9 +173,9 @@ void InvertNukeDistortionModel(const T &focal_length_x,
const T xd = (image_x - principal_point_x) / max_half_image_size;
const T yd = (image_y - principal_point_y) / max_half_image_size;
- T rd2 = xd*xd + yd*yd;
+ T rd2 = xd * xd + yd * yd;
T rd4 = rd2 * rd2;
- T r_coeff = T(1) / (T(1) + k1*rd2 + k2*rd4);
+ T r_coeff = T(1) / (T(1) + k1 * rd2 + k2 * rd4);
T xu = xd * r_coeff;
T yu = yd * r_coeff;
@@ -200,8 +199,8 @@ void ApplyNukeDistortionModel(const double focal_length_x,
const double k2,
const double normalized_x,
const double normalized_y,
- double *image_x,
- double *image_y);
+ double* image_x,
+ double* image_y);
// Invert camera intrinsics on the image point to get normalized coordinates.
// This inverts the radial lens distortion to a point which is in image pixel
@@ -218,24 +217,24 @@ void InvertBrownDistortionModel(const double focal_length_x,
const double p2,
const double image_x,
const double image_y,
- double *normalized_x,
- double *normalized_y);
+ double* normalized_x,
+ double* normalized_y);
template <typename T>
-inline void ApplyBrownDistortionModel(const T &focal_length_x,
- const T &focal_length_y,
- const T &principal_point_x,
- const T &principal_point_y,
- const T &k1,
- const T &k2,
- const T &k3,
- const T &k4,
- const T &p1,
- const T &p2,
- const T &normalized_x,
- const T &normalized_y,
- T *image_x,
- T *image_y) {
+inline void ApplyBrownDistortionModel(const T& focal_length_x,
+ const T& focal_length_y,
+ const T& principal_point_x,
+ const T& principal_point_y,
+ const T& k1,
+ const T& k2,
+ const T& k3,
+ const T& k4,
+ const T& p1,
+ const T& p2,
+ const T& normalized_x,
+ const T& normalized_y,
+ T* image_x,
+ T* image_y) {
T x = normalized_x;
T y = normalized_y;
@@ -253,8 +252,8 @@ inline void ApplyBrownDistortionModel(const T &focal_length_x,
// Apply focal length and principal point to get the final image coordinates.
*image_x = focal_length_x * xd + principal_point_x;
*image_y = focal_length_y * yd + principal_point_y;
-} // namespace libmv
+} // namespace libmv
-}
+} // namespace libmv
#endif // LIBMV_SIMPLE_PIPELINE_DISTORTION_MODELS_H_
diff --git a/intern/libmv/libmv/simple_pipeline/initialize_reconstruction.cc b/intern/libmv/libmv/simple_pipeline/initialize_reconstruction.cc
index 7a086c375d5..10ad0929007 100644
--- a/intern/libmv/libmv/simple_pipeline/initialize_reconstruction.cc
+++ b/intern/libmv/libmv/simple_pipeline/initialize_reconstruction.cc
@@ -32,8 +32,9 @@
namespace libmv {
namespace {
-void GetImagesInMarkers(const vector<Marker> &markers,
- int *image1, int *image2) {
+void GetImagesInMarkers(const vector<Marker>& markers,
+ int* image1,
+ int* image2) {
if (markers.size() < 2) {
return;
}
@@ -50,10 +51,11 @@ void GetImagesInMarkers(const vector<Marker> &markers,
} // namespace
-bool EuclideanReconstructTwoFrames(const vector<Marker> &markers,
- EuclideanReconstruction *reconstruction) {
+bool EuclideanReconstructTwoFrames(const vector<Marker>& markers,
+ EuclideanReconstruction* reconstruction) {
if (markers.size() < 16) {
- LG << "Not enough markers to initialize from two frames: " << markers.size();
+ LG << "Not enough markers to initialize from two frames: "
+ << markers.size();
return false;
}
@@ -76,10 +78,8 @@ bool EuclideanReconstructTwoFrames(const vector<Marker> &markers,
Mat3 R;
Vec3 t;
Mat3 K = Mat3::Identity();
- if (!MotionFromEssentialAndCorrespondence(E,
- K, x1.col(0),
- K, x2.col(0),
- &R, &t)) {
+ if (!MotionFromEssentialAndCorrespondence(
+ E, K, x1.col(0), K, x2.col(0), &R, &t)) {
LG << "Failed to compute R and t from E and K.";
return false;
}
@@ -88,14 +88,14 @@ bool EuclideanReconstructTwoFrames(const vector<Marker> &markers,
reconstruction->InsertCamera(image1, Mat3::Identity(), Vec3::Zero());
reconstruction->InsertCamera(image2, R, t);
- LG << "From two frame reconstruction got:\nR:\n" << R
- << "\nt:" << t.transpose();
+ LG << "From two frame reconstruction got:\nR:\n"
+ << R << "\nt:" << t.transpose();
return true;
}
namespace {
-Mat3 DecodeF(const Vec9 &encoded_F) {
+Mat3 DecodeF(const Vec9& encoded_F) {
// Decode F and force it to be rank 2.
Map<const Mat3> full_rank_F(encoded_F.data(), 3, 3);
Eigen::JacobiSVD<Mat3> svd(full_rank_F,
@@ -110,22 +110,22 @@ Mat3 DecodeF(const Vec9 &encoded_F) {
// doing a full SVD of F at each iteration. This uses sampson error.
struct FundamentalSampsonCostFunction {
public:
- typedef Vec FMatrixType;
+ typedef Vec FMatrixType;
typedef Vec9 XMatrixType;
// Assumes markers are ordered by track.
- explicit FundamentalSampsonCostFunction(const vector<Marker> &markers)
- : markers(markers) {}
+ explicit FundamentalSampsonCostFunction(const vector<Marker>& markers)
+ : markers(markers) {}
- Vec operator()(const Vec9 &encoded_F) const {
+ Vec operator()(const Vec9& encoded_F) const {
// Decode F and force it to be rank 2.
Mat3 F = DecodeF(encoded_F);
Vec residuals(markers.size() / 2);
residuals.setZero();
for (int i = 0; i < markers.size() / 2; ++i) {
- const Marker &marker1 = markers[2*i + 0];
- const Marker &marker2 = markers[2*i + 1];
+ const Marker& marker1 = markers[2 * i + 0];
+ const Marker& marker2 = markers[2 * i + 1];
CHECK_EQ(marker1.track, marker2.track);
Vec2 x1(marker1.x, marker1.y);
Vec2 x2(marker2.x, marker2.y);
@@ -134,13 +134,13 @@ struct FundamentalSampsonCostFunction {
}
return residuals;
}
- const vector<Marker> &markers;
+ const vector<Marker>& markers;
};
} // namespace
-bool ProjectiveReconstructTwoFrames(const vector<Marker> &markers,
- ProjectiveReconstruction *reconstruction) {
+bool ProjectiveReconstructTwoFrames(const vector<Marker>& markers,
+ ProjectiveReconstruction* reconstruction) {
if (markers.size() < 16) {
return false;
}
diff --git a/intern/libmv/libmv/simple_pipeline/initialize_reconstruction.h b/intern/libmv/libmv/simple_pipeline/initialize_reconstruction.h
index 32cd4285190..354db14971f 100644
--- a/intern/libmv/libmv/simple_pipeline/initialize_reconstruction.h
+++ b/intern/libmv/libmv/simple_pipeline/initialize_reconstruction.h
@@ -37,18 +37,19 @@ class ProjectiveReconstruction;
tracks visible in both frames. The pose estimation of the camera for
these frames will be inserted into \a *reconstruction.
- \note The two frames need to have both enough parallax and enough common tracks
- for accurate reconstruction. At least 8 tracks are suggested.
- \note The origin of the coordinate system is defined to be the camera of
- the first keyframe.
- \note This assumes a calibrated reconstruction, e.g. the markers are
- already corrected for camera intrinsics and radial distortion.
+ \note The two frames need to have both enough parallax and enough common
+ tracks for accurate reconstruction. At least 8 tracks are suggested.
+ \note The origin of the coordinate system is defined to be the camera of the
+ first keyframe.
+ \note This assumes a calibrated reconstruction, e.g. the
+ markers are already corrected for camera intrinsics and radial
+ distortion.
\note This assumes an outlier-free set of markers.
\sa EuclideanResect, EuclideanIntersect, EuclideanBundle
*/
-bool EuclideanReconstructTwoFrames(const vector<Marker> &markers,
- EuclideanReconstruction *reconstruction);
+bool EuclideanReconstructTwoFrames(const vector<Marker>& markers,
+ EuclideanReconstruction* reconstruction);
/*!
Initialize the \link ProjectiveReconstruction reconstruction \endlink using
@@ -58,17 +59,17 @@ bool EuclideanReconstructTwoFrames(const vector<Marker> &markers,
tracks visible in both frames. An estimate of the projection matrices for
the two frames will get added to the reconstruction.
- \note The two frames need to have both enough parallax and enough common tracks
- for accurate reconstruction. At least 8 tracks are suggested.
- \note The origin of the coordinate system is defined to be the camera of
- the first keyframe.
+ \note The two frames need to have both enough parallax and enough common
+ tracks for accurate reconstruction. At least 8 tracks are suggested.
+ \note The origin of the coordinate system is defined to be the camera of the
+ first keyframe.
\note This assumes the markers are already corrected for radial distortion.
\note This assumes an outlier-free set of markers.
\sa ProjectiveResect, ProjectiveIntersect, ProjectiveBundle
*/
-bool ProjectiveReconstructTwoFrames(const vector<Marker> &markers,
- ProjectiveReconstruction *reconstruction);
+bool ProjectiveReconstructTwoFrames(const vector<Marker>& markers,
+ ProjectiveReconstruction* reconstruction);
} // namespace libmv
#endif // LIBMV_SIMPLE_PIPELINE_INITIALIZE_RECONSTRUCTION_H
diff --git a/intern/libmv/libmv/simple_pipeline/intersect.cc b/intern/libmv/libmv/simple_pipeline/intersect.cc
index ddb713684a4..86efd26f778 100644
--- a/intern/libmv/libmv/simple_pipeline/intersect.cc
+++ b/intern/libmv/libmv/simple_pipeline/intersect.cc
@@ -22,11 +22,11 @@
#include "libmv/base/vector.h"
#include "libmv/logging/logging.h"
+#include "libmv/multiview/nviewtriangulation.h"
#include "libmv/multiview/projection.h"
#include "libmv/multiview/triangulation.h"
-#include "libmv/multiview/nviewtriangulation.h"
-#include "libmv/numeric/numeric.h"
#include "libmv/numeric/levenberg_marquardt.h"
+#include "libmv/numeric/numeric.h"
#include "libmv/simple_pipeline/reconstruction.h"
#include "libmv/simple_pipeline/tracks.h"
@@ -38,12 +38,12 @@ namespace {
class EuclideanIntersectCostFunctor {
public:
- EuclideanIntersectCostFunctor(const Marker &marker,
- const EuclideanCamera &camera)
+ EuclideanIntersectCostFunctor(const Marker& marker,
+ const EuclideanCamera& camera)
: marker_(marker), camera_(camera) {}
- template<typename T>
- bool operator()(const T *X, T *residuals) const {
+ template <typename T>
+ bool operator()(const T* X, T* residuals) const {
typedef Eigen::Matrix<T, 3, 3> Mat3;
typedef Eigen::Matrix<T, 3, 1> Vec3;
@@ -60,14 +60,14 @@ class EuclideanIntersectCostFunctor {
return true;
}
- const Marker &marker_;
- const EuclideanCamera &camera_;
+ const Marker& marker_;
+ const EuclideanCamera& camera_;
};
} // namespace
-bool EuclideanIntersect(const vector<Marker> &markers,
- EuclideanReconstruction *reconstruction) {
+bool EuclideanIntersect(const vector<Marker>& markers,
+ EuclideanReconstruction* reconstruction) {
if (markers.size() < 2) {
return false;
}
@@ -78,7 +78,7 @@ bool EuclideanIntersect(const vector<Marker> &markers,
vector<Mat34> cameras;
Mat34 P;
for (int i = 0; i < markers.size(); ++i) {
- EuclideanCamera *camera = reconstruction->CameraForImage(markers[i].image);
+ EuclideanCamera* camera = reconstruction->CameraForImage(markers[i].image);
P_From_KRt(K, camera->R, camera->t, &P);
cameras.push_back(P);
}
@@ -103,19 +103,19 @@ bool EuclideanIntersect(const vector<Marker> &markers,
// Add residual blocks to the problem.
int num_residuals = 0;
for (int i = 0; i < markers.size(); ++i) {
- const Marker &marker = markers[i];
+ const Marker& marker = markers[i];
if (marker.weight != 0.0) {
- const EuclideanCamera &camera =
+ const EuclideanCamera& camera =
*reconstruction->CameraForImage(marker.image);
problem.AddResidualBlock(
- new ceres::AutoDiffCostFunction<
- EuclideanIntersectCostFunctor,
- 2, /* num_residuals */
- 3>(new EuclideanIntersectCostFunctor(marker, camera)),
+ new ceres::AutoDiffCostFunction<EuclideanIntersectCostFunctor,
+ 2, /* num_residuals */
+ 3>(
+ new EuclideanIntersectCostFunctor(marker, camera)),
NULL,
&X(0));
- num_residuals++;
+ num_residuals++;
}
}
@@ -126,9 +126,9 @@ bool EuclideanIntersect(const vector<Marker> &markers,
if (!num_residuals) {
LG << "Skipping running minimizer with zero residuals";
- // We still add 3D point for the track regardless it was
- // optimized or not. If track is a constant zero it'll use
- // algebraic intersection result as a 3D coordinate.
+ // We still add 3D point for the track regardless it was
+ // optimized or not. If track is a constant zero it'll use
+ // algebraic intersection result as a 3D coordinate.
Vec3 point = X.head<3>();
reconstruction->InsertPoint(markers[0].track, point);
@@ -152,12 +152,12 @@ bool EuclideanIntersect(const vector<Marker> &markers,
// Try projecting the point; make sure it's in front of everyone.
for (int i = 0; i < cameras.size(); ++i) {
- const EuclideanCamera &camera =
+ const EuclideanCamera& camera =
*reconstruction->CameraForImage(markers[i].image);
Vec3 x = camera.R * X + camera.t;
if (x(2) < 0) {
- LOG(ERROR) << "POINT BEHIND CAMERA " << markers[i].image
- << ": " << x.transpose();
+ LOG(ERROR) << "POINT BEHIND CAMERA " << markers[i].image << ": "
+ << x.transpose();
return false;
}
}
@@ -173,35 +173,35 @@ namespace {
struct ProjectiveIntersectCostFunction {
public:
- typedef Vec FMatrixType;
+ typedef Vec FMatrixType;
typedef Vec4 XMatrixType;
ProjectiveIntersectCostFunction(
- const vector<Marker> &markers,
- const ProjectiveReconstruction &reconstruction)
- : markers(markers), reconstruction(reconstruction) {}
+ const vector<Marker>& markers,
+ const ProjectiveReconstruction& reconstruction)
+ : markers(markers), reconstruction(reconstruction) {}
- Vec operator()(const Vec4 &X) const {
+ Vec operator()(const Vec4& X) const {
Vec residuals(2 * markers.size());
residuals.setZero();
for (int i = 0; i < markers.size(); ++i) {
- const ProjectiveCamera &camera =
+ const ProjectiveCamera& camera =
*reconstruction.CameraForImage(markers[i].image);
Vec3 projected = camera.P * X;
projected /= projected(2);
- residuals[2*i + 0] = projected(0) - markers[i].x;
- residuals[2*i + 1] = projected(1) - markers[i].y;
+ residuals[2 * i + 0] = projected(0) - markers[i].x;
+ residuals[2 * i + 1] = projected(1) - markers[i].y;
}
return residuals;
}
- const vector<Marker> &markers;
- const ProjectiveReconstruction &reconstruction;
+ const vector<Marker>& markers;
+ const ProjectiveReconstruction& reconstruction;
};
} // namespace
-bool ProjectiveIntersect(const vector<Marker> &markers,
- ProjectiveReconstruction *reconstruction) {
+bool ProjectiveIntersect(const vector<Marker>& markers,
+ ProjectiveReconstruction* reconstruction) {
if (markers.size() < 2) {
return false;
}
@@ -209,7 +209,7 @@ bool ProjectiveIntersect(const vector<Marker> &markers,
// Get the cameras to use for the intersection.
vector<Mat34> cameras;
for (int i = 0; i < markers.size(); ++i) {
- ProjectiveCamera *camera = reconstruction->CameraForImage(markers[i].image);
+ ProjectiveCamera* camera = reconstruction->CameraForImage(markers[i].image);
cameras.push_back(camera->P);
}
@@ -232,16 +232,16 @@ bool ProjectiveIntersect(const vector<Marker> &markers,
Solver solver(triangulate_cost);
Solver::Results results = solver.minimize(params, &X);
- (void) results; // TODO(keir): Ensure results are good.
+ (void)results; // TODO(keir): Ensure results are good.
// Try projecting the point; make sure it's in front of everyone.
for (int i = 0; i < cameras.size(); ++i) {
- const ProjectiveCamera &camera =
+ const ProjectiveCamera& camera =
*reconstruction->CameraForImage(markers[i].image);
Vec3 x = camera.P * X;
if (x(2) < 0) {
- LOG(ERROR) << "POINT BEHIND CAMERA " << markers[i].image
- << ": " << x.transpose();
+ LOG(ERROR) << "POINT BEHIND CAMERA " << markers[i].image << ": "
+ << x.transpose();
}
}
diff --git a/intern/libmv/libmv/simple_pipeline/intersect.h b/intern/libmv/libmv/simple_pipeline/intersect.h
index 15d6f998557..aff3ffe66e2 100644
--- a/intern/libmv/libmv/simple_pipeline/intersect.h
+++ b/intern/libmv/libmv/simple_pipeline/intersect.h
@@ -22,8 +22,8 @@
#define LIBMV_SIMPLE_PIPELINE_INTERSECT_H
#include "libmv/base/vector.h"
-#include "libmv/simple_pipeline/tracks.h"
#include "libmv/simple_pipeline/reconstruction.h"
+#include "libmv/simple_pipeline/tracks.h"
namespace libmv {
@@ -38,7 +38,8 @@ namespace libmv {
\a markers should contain all \link Marker markers \endlink belonging to
tracks visible in all frames.
\a reconstruction should contain the cameras for all frames.
- The new \link Point points \endlink will be inserted in \a reconstruction.
+ The new \link Point points \endlink will be inserted in \a
+ reconstruction.
\note This assumes a calibrated reconstruction, e.g. the markers are
already corrected for camera intrinsics and radial distortion.
@@ -46,8 +47,8 @@ namespace libmv {
\sa EuclideanResect
*/
-bool EuclideanIntersect(const vector<Marker> &markers,
- EuclideanReconstruction *reconstruction);
+bool EuclideanIntersect(const vector<Marker>& markers,
+ EuclideanReconstruction* reconstruction);
/*!
Estimate the homogeneous coordinates of a track by intersecting rays.
@@ -60,7 +61,8 @@ bool EuclideanIntersect(const vector<Marker> &markers,
\a markers should contain all \link Marker markers \endlink belonging to
tracks visible in all frames.
\a reconstruction should contain the cameras for all frames.
- The new \link Point points \endlink will be inserted in \a reconstruction.
+ The new \link Point points \endlink will be inserted in \a
+ reconstruction.
\note This assumes that radial distortion is already corrected for, but
does not assume that e.g. focal length and principal point are
@@ -69,8 +71,8 @@ bool EuclideanIntersect(const vector<Marker> &markers,
\sa Resect
*/
-bool ProjectiveIntersect(const vector<Marker> &markers,
- ProjectiveReconstruction *reconstruction);
+bool ProjectiveIntersect(const vector<Marker>& markers,
+ ProjectiveReconstruction* reconstruction);
} // namespace libmv
diff --git a/intern/libmv/libmv/simple_pipeline/intersect_test.cc b/intern/libmv/libmv/simple_pipeline/intersect_test.cc
index dd4fdc789af..447cc095cb0 100644
--- a/intern/libmv/libmv/simple_pipeline/intersect_test.cc
+++ b/intern/libmv/libmv/simple_pipeline/intersect_test.cc
@@ -22,10 +22,10 @@
#include <iostream>
-#include "testing/testing.h"
+#include "libmv/logging/logging.h"
#include "libmv/multiview/projection.h"
#include "libmv/numeric/numeric.h"
-#include "libmv/logging/logging.h"
+#include "testing/testing.h"
namespace libmv {
@@ -40,13 +40,15 @@ TEST(Intersect, EuclideanIntersect) {
// 0, 0, 1;
Mat3 R1 = RotationAroundZ(-0.1);
Mat3 R2 = RotationAroundX(-0.1);
- Vec3 t1; t1 << 1, 1, 10;
- Vec3 t2; t2 << -2, -1, 10;
+ Vec3 t1;
+ t1 << 1, 1, 10;
+ Vec3 t2;
+ t2 << -2, -1, 10;
Mat34 P1, P2;
P_From_KRt(K1, R1, t1, &P1);
P_From_KRt(K2, R2, t2, &P2);
- //Mat3 F; FundamentalFromProjections(P1, P2, &F);
+ // Mat3 F; FundamentalFromProjections(P1, P2, &F);
Mat3X X;
X.resize(3, 30);
@@ -68,9 +70,9 @@ TEST(Intersect, EuclideanIntersect) {
reconstruction.InsertCamera(2, R2, t2);
vector<Marker> markers;
- Marker a = { 1, 0, x1.x(), x1.y(), 1.0 };
+ Marker a = {1, 0, x1.x(), x1.y(), 1.0};
markers.push_back(a);
- Marker b = { 2, 0, x2.x(), x2.y(), 1.0 };
+ Marker b = {2, 0, x2.x(), x2.y(), 1.0};
markers.push_back(b);
EuclideanIntersect(markers, &reconstruction);
@@ -78,4 +80,4 @@ TEST(Intersect, EuclideanIntersect) {
EXPECT_NEAR(0, DistanceLInfinity(estimated, expected), 1e-8);
}
}
-} // namespace
+} // namespace libmv
diff --git a/intern/libmv/libmv/simple_pipeline/keyframe_selection.cc b/intern/libmv/libmv/simple_pipeline/keyframe_selection.cc
index 241b5600505..5526d730651 100644
--- a/intern/libmv/libmv/simple_pipeline/keyframe_selection.cc
+++ b/intern/libmv/libmv/simple_pipeline/keyframe_selection.cc
@@ -20,20 +20,20 @@
#include "libmv/simple_pipeline/keyframe_selection.h"
-#include "libmv/numeric/numeric.h"
#include "ceres/ceres.h"
#include "libmv/logging/logging.h"
-#include "libmv/multiview/homography.h"
#include "libmv/multiview/fundamental.h"
-#include "libmv/simple_pipeline/intersect.h"
+#include "libmv/multiview/homography.h"
+#include "libmv/numeric/numeric.h"
#include "libmv/simple_pipeline/bundle.h"
+#include "libmv/simple_pipeline/intersect.h"
#include <Eigen/Eigenvalues>
namespace libmv {
namespace {
-Mat3 IntrinsicsNormalizationMatrix(const CameraIntrinsics &intrinsics) {
+Mat3 IntrinsicsNormalizationMatrix(const CameraIntrinsics& intrinsics) {
Mat3 T = Mat3::Identity(), S = Mat3::Identity();
T(0, 2) = -intrinsics.principal_point_x();
@@ -56,7 +56,7 @@ Mat3 IntrinsicsNormalizationMatrix(const CameraIntrinsics &intrinsics) {
// (k = 7 for a fundamental matrix or 8 for a homography)
// r is the dimension of the data
// (r = 4 for 2D correspondences between two frames)
-double GRIC(const Vec &e, int d, int k, int r) {
+double GRIC(const Vec& e, int d, int k, int r) {
int n = e.rows();
double lambda1 = log(static_cast<double>(r));
double lambda2 = log(static_cast<double>(r * n));
@@ -89,7 +89,7 @@ double GRIC(const Vec &e, int d, int k, int r) {
//
// TODO(keir): Consider moving this into the numeric code, since this is not
// related to keyframe selection.
-Mat PseudoInverseWithClampedEigenvalues(const Mat &matrix,
+Mat PseudoInverseWithClampedEigenvalues(const Mat& matrix,
int num_eigenvalues_to_clamp) {
Eigen::EigenSolver<Mat> eigen_solver(matrix);
Mat D = eigen_solver.pseudoEigenvalueMatrix();
@@ -112,27 +112,24 @@ Mat PseudoInverseWithClampedEigenvalues(const Mat &matrix,
return V * D * V.inverse();
}
-void FilterZeroWeightMarkersFromTracks(const Tracks &tracks,
- Tracks *filtered_tracks) {
+void FilterZeroWeightMarkersFromTracks(const Tracks& tracks,
+ Tracks* filtered_tracks) {
vector<Marker> all_markers = tracks.AllMarkers();
for (int i = 0; i < all_markers.size(); ++i) {
- Marker &marker = all_markers[i];
+ Marker& marker = all_markers[i];
if (marker.weight != 0.0) {
- filtered_tracks->Insert(marker.image,
- marker.track,
- marker.x,
- marker.y,
- marker.weight);
+ filtered_tracks->Insert(
+ marker.image, marker.track, marker.x, marker.y, marker.weight);
}
}
}
} // namespace
-void SelectKeyframesBasedOnGRICAndVariance(const Tracks &_tracks,
- const CameraIntrinsics &intrinsics,
- vector<int> &keyframes) {
+void SelectKeyframesBasedOnGRICAndVariance(const Tracks& _tracks,
+ const CameraIntrinsics& intrinsics,
+ vector<int>& keyframes) {
// Mirza Tahir Ahmed, Matthew N. Dailey
// Robust key frame extraction for 3D reconstruction from video streams
//
@@ -172,23 +169,21 @@ void SelectKeyframesBasedOnGRICAndVariance(const Tracks &_tracks,
candidate_image <= max_image;
candidate_image++) {
// Conjunction of all markers from both keyframes
- vector<Marker> all_markers =
- filtered_tracks.MarkersInBothImages(current_keyframe,
- candidate_image);
+ vector<Marker> all_markers = filtered_tracks.MarkersInBothImages(
+ current_keyframe, candidate_image);
// Match keypoints between frames current_keyframe and candidate_image
vector<Marker> tracked_markers =
- filtered_tracks.MarkersForTracksInBothImages(current_keyframe,
- candidate_image);
+ filtered_tracks.MarkersForTracksInBothImages(current_keyframe,
+ candidate_image);
// Correspondences in normalized space
Mat x1, x2;
CoordinatesForMarkersInImage(tracked_markers, current_keyframe, &x1);
CoordinatesForMarkersInImage(tracked_markers, candidate_image, &x2);
- LG << "Found " << x1.cols()
- << " correspondences between " << current_keyframe
- << " and " << candidate_image;
+ LG << "Found " << x1.cols() << " correspondences between "
+ << current_keyframe << " and " << candidate_image;
// Not enough points to construct fundamental matrix
if (x1.cols() < 8 || x2.cols() < 8)
@@ -199,9 +194,8 @@ void SelectKeyframesBasedOnGRICAndVariance(const Tracks &_tracks,
int Tf = all_markers.size();
double Rc = static_cast<double>(Tc) / Tf;
- LG << "Correspondence between " << current_keyframe
- << " and " << candidate_image
- << ": " << Rc;
+ LG << "Correspondence between " << current_keyframe << " and "
+ << candidate_image << ": " << Rc;
if (Rc < Tmin || Rc > Tmax)
continue;
@@ -210,19 +204,15 @@ void SelectKeyframesBasedOnGRICAndVariance(const Tracks &_tracks,
// Estimate homography using default options.
EstimateHomographyOptions estimate_homography_options;
- EstimateHomography2DFromCorrespondences(x1,
- x2,
- estimate_homography_options,
- &H);
+ EstimateHomography2DFromCorrespondences(
+ x1, x2, estimate_homography_options, &H);
// Convert homography to original pixel space.
H = N_inverse * H * N;
EstimateFundamentalOptions estimate_fundamental_options;
- EstimateFundamentalFromCorrespondences(x1,
- x2,
- estimate_fundamental_options,
- &F);
+ EstimateFundamentalFromCorrespondences(
+ x1, x2, estimate_fundamental_options, &F);
// Convert fundamental to original pixel space.
F = N_inverse * F * N;
@@ -238,11 +228,11 @@ void SelectKeyframesBasedOnGRICAndVariance(const Tracks &_tracks,
for (int i = 0; i < x1.cols(); i++) {
Vec2 current_x1, current_x2;
- intrinsics.NormalizedToImageSpace(x1(0, i), x1(1, i),
- &current_x1(0), &current_x1(1));
+ intrinsics.NormalizedToImageSpace(
+ x1(0, i), x1(1, i), &current_x1(0), &current_x1(1));
- intrinsics.NormalizedToImageSpace(x2(0, i), x2(1, i),
- &current_x2(0), &current_x2(1));
+ intrinsics.NormalizedToImageSpace(
+ x2(0, i), x2(1, i), &current_x2(0), &current_x2(1));
H_e(i) = SymmetricGeometricDistance(H, current_x1, current_x2);
F_e(i) = SymmetricEpipolarDistance(F, current_x1, current_x2);
@@ -255,10 +245,8 @@ void SelectKeyframesBasedOnGRICAndVariance(const Tracks &_tracks,
double GRIC_H = GRIC(H_e, 2, 8, 4);
double GRIC_F = GRIC(F_e, 3, 7, 4);
- LG << "GRIC values for frames " << current_keyframe
- << " and " << candidate_image
- << ", H-GRIC: " << GRIC_H
- << ", F-GRIC: " << GRIC_F;
+ LG << "GRIC values for frames " << current_keyframe << " and "
+ << candidate_image << ", H-GRIC: " << GRIC_H << ", F-GRIC: " << GRIC_F;
if (GRIC_H <= GRIC_F)
continue;
@@ -295,23 +283,19 @@ void SelectKeyframesBasedOnGRICAndVariance(const Tracks &_tracks,
Vec3 t;
Mat3 K = Mat3::Identity();
- if (!MotionFromEssentialAndCorrespondence(E,
- K, x1.col(0),
- K, x2.col(0),
- &R, &t)) {
+ if (!MotionFromEssentialAndCorrespondence(
+ E, K, x1.col(0), K, x2.col(0), &R, &t)) {
LG << "Failed to compute R and t from E and K";
continue;
}
- LG << "Camera transform between frames " << current_keyframe
- << " and " << candidate_image
- << ":\nR:\n" << R
- << "\nt:" << t.transpose();
+ LG << "Camera transform between frames " << current_keyframe << " and "
+ << candidate_image << ":\nR:\n"
+ << R << "\nt:" << t.transpose();
// First camera is identity, second one is relative to it
- reconstruction.InsertCamera(current_keyframe,
- Mat3::Identity(),
- Vec3::Zero());
+ reconstruction.InsertCamera(
+ current_keyframe, Mat3::Identity(), Vec3::Zero());
reconstruction.InsertCamera(candidate_image, R, t);
// Reconstruct 3D points
@@ -349,7 +333,7 @@ void SelectKeyframesBasedOnGRICAndVariance(const Tracks &_tracks,
}
double success_intersects_factor =
- (double) intersects_success / intersects_total;
+ (double)intersects_success / intersects_total;
if (success_intersects_factor < success_intersects_factor_best) {
LG << "Skip keyframe candidate because of "
@@ -372,7 +356,7 @@ void SelectKeyframesBasedOnGRICAndVariance(const Tracks &_tracks,
&empty_intrinsics,
&evaluation);
- Mat &jacobian = evaluation.jacobian;
+ Mat& jacobian = evaluation.jacobian;
Mat JT_J = jacobian.transpose() * jacobian;
// There are 7 degrees of freedom, so clamp them out.
@@ -380,10 +364,10 @@ void SelectKeyframesBasedOnGRICAndVariance(const Tracks &_tracks,
Mat temp_derived = JT_J * JT_J_inv * JT_J;
bool is_inversed = (temp_derived - JT_J).cwiseAbs2().sum() <
- 1e-4 * std::min(temp_derived.cwiseAbs2().sum(),
- JT_J.cwiseAbs2().sum());
+ 1e-4 * std::min(temp_derived.cwiseAbs2().sum(),
+ JT_J.cwiseAbs2().sum());
- LG << "Check on inversed: " << (is_inversed ? "true" : "false" )
+ LG << "Check on inversed: " << (is_inversed ? "true" : "false")
<< ", det(JT_J): " << JT_J.determinant();
if (!is_inversed) {
@@ -400,8 +384,7 @@ void SelectKeyframesBasedOnGRICAndVariance(const Tracks &_tracks,
double Sc = static_cast<double>(I + A) / Square(3 * I) * Sigma_P.trace();
- LG << "Expected estimation error between "
- << current_keyframe << " and "
+ LG << "Expected estimation error between " << current_keyframe << " and "
<< candidate_image << ": " << Sc;
// Pairing with a lower Sc indicates a better choice
diff --git a/intern/libmv/libmv/simple_pipeline/keyframe_selection.h b/intern/libmv/libmv/simple_pipeline/keyframe_selection.h
index 25253af32fe..a1b3910abd4 100644
--- a/intern/libmv/libmv/simple_pipeline/keyframe_selection.h
+++ b/intern/libmv/libmv/simple_pipeline/keyframe_selection.h
@@ -22,8 +22,8 @@
#define LIBMV_SIMPLE_PIPELINE_KEYFRAME_SELECTION_H_
#include "libmv/base/vector.h"
-#include "libmv/simple_pipeline/tracks.h"
#include "libmv/simple_pipeline/camera_intrinsics.h"
+#include "libmv/simple_pipeline/tracks.h"
namespace libmv {
@@ -43,10 +43,9 @@ namespace libmv {
// \param intrinsics: is camera intrinsics.
// \param keyframes: will contain all images number which are considered
// good to be used for reconstruction.
-void SelectKeyframesBasedOnGRICAndVariance(
- const Tracks &tracks,
- const CameraIntrinsics &intrinsics,
- vector<int> &keyframes);
+void SelectKeyframesBasedOnGRICAndVariance(const Tracks& tracks,
+ const CameraIntrinsics& intrinsics,
+ vector<int>& keyframes);
} // namespace libmv
diff --git a/intern/libmv/libmv/simple_pipeline/keyframe_selection_test.cc b/intern/libmv/libmv/simple_pipeline/keyframe_selection_test.cc
index 9d88362cc88..983349c0c5a 100644
--- a/intern/libmv/libmv/simple_pipeline/keyframe_selection_test.cc
+++ b/intern/libmv/libmv/simple_pipeline/keyframe_selection_test.cc
@@ -1,15 +1,15 @@
// Copyright (c) 2011 libmv authors.
-//
+//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to
// deal in the Software without restriction, including without limitation the
// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
// sell copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
-//
+//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
-//
+//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
@@ -20,9 +20,9 @@
#include "libmv/simple_pipeline/keyframe_selection.h"
-#include "testing/testing.h"
-#include "libmv/simple_pipeline/camera_intrinsics.h"
#include "libmv/logging/logging.h"
+#include "libmv/simple_pipeline/camera_intrinsics.h"
+#include "testing/testing.h"
namespace libmv {
@@ -30,7 +30,7 @@ namespace libmv {
// Should not be keyframe
TEST(KeyframeSelection, SyntheticNeighborFrame) {
PolynomialCameraIntrinsics intrinsics;
- intrinsics.SetFocalLength(900.0,900.0);
+ intrinsics.SetFocalLength(900.0, 900.0);
intrinsics.SetPrincipalPoint(640.0, 540.0);
intrinsics.SetRadialDistortion(0.0, 0.0, 0.0);
@@ -66,25 +66,56 @@ TEST(KeyframeSelection, FabrikEingangNeighborFrames) {
intrinsics.SetPrincipalPoint(960.000, 544.000);
intrinsics.SetRadialDistortion(0.0, 0.0, 0.0);
- Marker markers[] = {
- {1, 0, 737.599983, 646.397594, 1.0}, {2, 0, 737.906628, 648.113327, 1.0}, {1, 1, 863.045425, 646.081905, 1.0},
- {2, 1, 863.339767, 647.650040, 1.0}, {1, 2, 736.959972, 574.080151, 1.0}, {2, 2, 737.217350, 575.604900, 1.0},
- {1, 3, 864.097424, 573.374908, 1.0}, {2, 3, 864.383469, 574.900307, 1.0}, {1, 4, 789.429073, 631.677521, 1.0},
- {2, 4, 789.893131, 633.124451, 1.0}, {1, 5, 791.051960, 573.442028, 1.0}, {2, 5, 791.336575, 575.088890, 1.0},
- {1, 6, 738.973961, 485.130308, 1.0}, {2, 6, 739.435501, 486.734207, 1.0}, {1, 7, 862.403240, 514.866074, 1.0},
- {2, 7, 862.660618, 516.413261, 1.0}, {1, 8, 802.240162, 485.759838, 1.0}, {2, 8, 802.602253, 487.432899, 1.0},
- {1, 9, 754.340630, 500.624559, 1.0}, {2, 9, 754.559956, 502.079920, 1.0}, {1, 10, 849.398689, 484.480545, 1.0},
- {2, 10, 849.599934, 486.079937, 1.0}, {1, 11, 788.803768, 515.924391, 1.0}, {2, 11, 789.119911, 517.439932, 1.0},
- {1, 12, 838.733940, 558.212688, 1.0}, {2, 12, 839.039898, 559.679916, 1.0}, {1, 13, 760.014782, 575.194466, 1.0},
- {2, 13, 760.319881, 576.639904, 1.0}, {1, 14, 765.321636, 616.015957, 1.0}, {2, 14, 765.759945, 617.599915, 1.0},
- {1, 15, 800.963230, 660.032082, 1.0}, {2, 15, 801.279945, 661.759876, 1.0}, {1, 16, 846.321087, 602.313053, 1.0},
- {2, 16, 846.719913, 603.839878, 1.0}, {1, 17, 864.288311, 616.790524, 1.0}, {2, 17, 864.639931, 618.239918, 1.0},
- {1, 18, 800.006790, 602.573425, 1.0}, {2, 18, 800.319958, 604.159912, 1.0}, {1, 19, 739.026890, 617.944138, 1.0},
- {2, 19, 739.199924, 619.519924, 1.0}, {1, 20, 801.987419, 544.134888, 1.0}, {2, 20, 802.239933, 545.599911, 1.0},
- {1, 21, 753.619823, 542.961300, 1.0}, {2, 21, 753.919945, 544.639874, 1.0}, {1, 22, 787.921257, 499.910206, 1.0},
- {2, 22, 788.159924, 501.439917, 1.0}, {1, 23, 839.095459, 529.287903, 1.0}, {2, 23, 839.359932, 530.879934, 1.0},
- {1, 24, 811.760330, 630.732269, 1.0}, {2, 24, 812.159901, 632.319859, 1.0}
- };
+ Marker markers[] = {{1, 0, 737.599983, 646.397594, 1.0},
+ {2, 0, 737.906628, 648.113327, 1.0},
+ {1, 1, 863.045425, 646.081905, 1.0},
+ {2, 1, 863.339767, 647.650040, 1.0},
+ {1, 2, 736.959972, 574.080151, 1.0},
+ {2, 2, 737.217350, 575.604900, 1.0},
+ {1, 3, 864.097424, 573.374908, 1.0},
+ {2, 3, 864.383469, 574.900307, 1.0},
+ {1, 4, 789.429073, 631.677521, 1.0},
+ {2, 4, 789.893131, 633.124451, 1.0},
+ {1, 5, 791.051960, 573.442028, 1.0},
+ {2, 5, 791.336575, 575.088890, 1.0},
+ {1, 6, 738.973961, 485.130308, 1.0},
+ {2, 6, 739.435501, 486.734207, 1.0},
+ {1, 7, 862.403240, 514.866074, 1.0},
+ {2, 7, 862.660618, 516.413261, 1.0},
+ {1, 8, 802.240162, 485.759838, 1.0},
+ {2, 8, 802.602253, 487.432899, 1.0},
+ {1, 9, 754.340630, 500.624559, 1.0},
+ {2, 9, 754.559956, 502.079920, 1.0},
+ {1, 10, 849.398689, 484.480545, 1.0},
+ {2, 10, 849.599934, 486.079937, 1.0},
+ {1, 11, 788.803768, 515.924391, 1.0},
+ {2, 11, 789.119911, 517.439932, 1.0},
+ {1, 12, 838.733940, 558.212688, 1.0},
+ {2, 12, 839.039898, 559.679916, 1.0},
+ {1, 13, 760.014782, 575.194466, 1.0},
+ {2, 13, 760.319881, 576.639904, 1.0},
+ {1, 14, 765.321636, 616.015957, 1.0},
+ {2, 14, 765.759945, 617.599915, 1.0},
+ {1, 15, 800.963230, 660.032082, 1.0},
+ {2, 15, 801.279945, 661.759876, 1.0},
+ {1, 16, 846.321087, 602.313053, 1.0},
+ {2, 16, 846.719913, 603.839878, 1.0},
+ {1, 17, 864.288311, 616.790524, 1.0},
+ {2, 17, 864.639931, 618.239918, 1.0},
+ {1, 18, 800.006790, 602.573425, 1.0},
+ {2, 18, 800.319958, 604.159912, 1.0},
+ {1, 19, 739.026890, 617.944138, 1.0},
+ {2, 19, 739.199924, 619.519924, 1.0},
+ {1, 20, 801.987419, 544.134888, 1.0},
+ {2, 20, 802.239933, 545.599911, 1.0},
+ {1, 21, 753.619823, 542.961300, 1.0},
+ {2, 21, 753.919945, 544.639874, 1.0},
+ {1, 22, 787.921257, 499.910206, 1.0},
+ {2, 22, 788.159924, 501.439917, 1.0},
+ {1, 23, 839.095459, 529.287903, 1.0},
+ {2, 23, 839.359932, 530.879934, 1.0},
+ {1, 24, 811.760330, 630.732269, 1.0},
+ {2, 24, 812.159901, 632.319859, 1.0}};
int num_markers = sizeof(markers) / sizeof(Marker);
Tracks tracks;
@@ -108,18 +139,34 @@ TEST(KeyframeSelection, FabrikEingangFarFrames) {
intrinsics.SetPrincipalPoint(960.000, 544.000);
intrinsics.SetRadialDistortion(0.0, 0.0, 0.0);
- Marker markers[] = {
- {1, 0, 369.459200, 619.315258, 1.0}, {2, 0, 279.677496, 722.086842, 1.0}, {1, 1, 376.831970, 370.278397, 1.0},
- {2, 1, 221.695247, 460.065418, 1.0}, {1, 2, 1209.139023, 567.705605, 1.0}, {2, 2, 1080.760117, 659.230083, 1.0},
- {1, 3, 1643.495750, 903.620453, 1.0}, {2, 3, 1618.405037, 1015.374908, 1.0}, {1, 4, 1494.849815, 425.302460, 1.0},
- {2, 4, 1457.467575, 514.727587, 1.0}, {1, 5, 1794.637299, 328.728609, 1.0}, {2, 5, 1742.161446, 408.988636, 1.0},
- {1, 6, 1672.822723, 102.240358, 1.0}, {2, 6, 1539.287224, 153.536892, 1.0}, {1, 7, 1550.843925, 53.424943, 1.0},
- {2, 7, 1385.579109, 96.450085, 1.0}, {1, 8, 852.953281, 465.399578, 1.0}, {2, 8, 779.404564, 560.091843, 1.0},
- {1, 9, 906.853752, 299.827040, 1.0}, {2, 9, 786.923218, 385.570770, 1.0}, {1, 10, 406.322966, 87.556041, 1.0},
- {2, 10, 140.339413, 150.877481, 1.0}, {1, 11, 254.811573, 851.296478, 1.0}, {2, 11, 94.478302, 969.350189, 1.0},
- {1, 12, 729.087868, 806.092758, 1.0}, {2, 12, 606.212139, 919.876560, 1.0}, {1, 13, 1525.719452, 920.398083, 1.0},
- {2, 13, 1495.579720, 1031.971218, 1.0}
- };
+ Marker markers[] = {{1, 0, 369.459200, 619.315258, 1.0},
+ {2, 0, 279.677496, 722.086842, 1.0},
+ {1, 1, 376.831970, 370.278397, 1.0},
+ {2, 1, 221.695247, 460.065418, 1.0},
+ {1, 2, 1209.139023, 567.705605, 1.0},
+ {2, 2, 1080.760117, 659.230083, 1.0},
+ {1, 3, 1643.495750, 903.620453, 1.0},
+ {2, 3, 1618.405037, 1015.374908, 1.0},
+ {1, 4, 1494.849815, 425.302460, 1.0},
+ {2, 4, 1457.467575, 514.727587, 1.0},
+ {1, 5, 1794.637299, 328.728609, 1.0},
+ {2, 5, 1742.161446, 408.988636, 1.0},
+ {1, 6, 1672.822723, 102.240358, 1.0},
+ {2, 6, 1539.287224, 153.536892, 1.0},
+ {1, 7, 1550.843925, 53.424943, 1.0},
+ {2, 7, 1385.579109, 96.450085, 1.0},
+ {1, 8, 852.953281, 465.399578, 1.0},
+ {2, 8, 779.404564, 560.091843, 1.0},
+ {1, 9, 906.853752, 299.827040, 1.0},
+ {2, 9, 786.923218, 385.570770, 1.0},
+ {1, 10, 406.322966, 87.556041, 1.0},
+ {2, 10, 140.339413, 150.877481, 1.0},
+ {1, 11, 254.811573, 851.296478, 1.0},
+ {2, 11, 94.478302, 969.350189, 1.0},
+ {1, 12, 729.087868, 806.092758, 1.0},
+ {2, 12, 606.212139, 919.876560, 1.0},
+ {1, 13, 1525.719452, 920.398083, 1.0},
+ {2, 13, 1495.579720, 1031.971218, 1.0}};
int num_markers = sizeof(markers) / sizeof(Marker);
Tracks tracks;
@@ -144,17 +191,35 @@ TEST(KeyframeSelection, CopterManualKeyFrames) {
intrinsics.SetRadialDistortion(-0.08590, 0.0, 0.0);
Marker markers[] = {
- {1, 0, 645.792694, 403.115931, 1.0}, {2, 0, 630.641174, 307.996409, 1.0}, {1, 1, 783.469086, 403.904328, 1.0},
- {2, 1, 766.001129, 308.998225, 1.0}, {1, 2, 650.000000, 160.000001, 1.0}, {1, 3, 785.225906, 158.619039, 1.0},
- {2, 3, 767.526474, 70.449695, 1.0}, {1, 4, 290.640526, 382.335634, 1.0}, {2, 4, 273.001728, 86.993319, 1.0},
- {1, 5, 291.162739, 410.602684, 1.0}, {2, 5, 273.287849, 111.937487, 1.0}, {1, 6, 136.919317, 349.895797, 1.0},
- {1, 7, 490.844345, 47.572222, 1.0}, {1, 8, 454.406433, 488.935761, 1.0}, {1, 9, 378.655815, 618.522248, 1.0},
- {2, 9, 357.061806, 372.265077, 1.0}, {1, 10, 496.011391, 372.668824, 1.0}, {2, 10, 477.979164, 222.986112, 1.0},
- {1, 11, 680.060272, 256.103625, 1.0}, {2, 11, 670.587540, 204.830453, 1.0}, {1, 12, 1070.817108, 218.775322, 1.0},
- {2, 12, 1046.129913, 128.969783, 1.0}, {1, 14, 242.516403, 596.048512, 1.0}, {2, 14, 224.182606, 248.272154, 1.0},
- {1, 15, 613.936272, 287.519073, 1.0}, {2, 15, 600.467644, 196.085722, 1.0}, {1, 31, 844.637451, 256.354315, 1.0},
+ {1, 0, 645.792694, 403.115931, 1.0},
+ {2, 0, 630.641174, 307.996409, 1.0},
+ {1, 1, 783.469086, 403.904328, 1.0},
+ {2, 1, 766.001129, 308.998225, 1.0},
+ {1, 2, 650.000000, 160.000001, 1.0},
+ {1, 3, 785.225906, 158.619039, 1.0},
+ {2, 3, 767.526474, 70.449695, 1.0},
+ {1, 4, 290.640526, 382.335634, 1.0},
+ {2, 4, 273.001728, 86.993319, 1.0},
+ {1, 5, 291.162739, 410.602684, 1.0},
+ {2, 5, 273.287849, 111.937487, 1.0},
+ {1, 6, 136.919317, 349.895797, 1.0},
+ {1, 7, 490.844345, 47.572222, 1.0},
+ {1, 8, 454.406433, 488.935761, 1.0},
+ {1, 9, 378.655815, 618.522248, 1.0},
+ {2, 9, 357.061806, 372.265077, 1.0},
+ {1, 10, 496.011391, 372.668824, 1.0},
+ {2, 10, 477.979164, 222.986112, 1.0},
+ {1, 11, 680.060272, 256.103625, 1.0},
+ {2, 11, 670.587540, 204.830453, 1.0},
+ {1, 12, 1070.817108, 218.775322, 1.0},
+ {2, 12, 1046.129913, 128.969783, 1.0},
+ {1, 14, 242.516403, 596.048512, 1.0},
+ {2, 14, 224.182606, 248.272154, 1.0},
+ {1, 15, 613.936272, 287.519073, 1.0},
+ {2, 15, 600.467644, 196.085722, 1.0},
+ {1, 31, 844.637451, 256.354315, 1.0},
{2, 31, 823.200150, 165.714952, 1.0},
- };
+ };
int num_markers = sizeof(markers) / sizeof(Marker);
Tracks tracks;
@@ -180,52 +245,140 @@ TEST(KeyframeSelection, ElevatorManualKeyframesFrames) {
intrinsics.SetRadialDistortion(-0.034, 0.0, 0.0);
Marker markers[] = {
- {1, 2, 1139.861412, 1034.634984, 1.0}, {2, 2, 1143.512192, 1065.355718, 1.0}, {1, 3, 1760.821953, 644.658036, 1.0},
- {2, 3, 1770.901108, 697.899928, 1.0}, {1, 4, 858.071823, 1068.520746, 1.0}, {1, 6, 1633.952408, 797.050145, 1.0},
- {2, 6, 1642.508469, 849.157140, 1.0}, {1, 8, 1716.695824, 451.805491, 1.0}, {2, 8, 1726.513939, 502.095687, 1.0},
- {1, 9, 269.577627, 724.986935, 1.0}, {2, 9, 269.424820, 764.154246, 1.0}, {1, 10, 1891.321907, 706.948843, 1.0},
- {2, 10, 1903.338547, 766.068377, 1.0}, {1, 12, 1806.227074, 956.089604, 1.0}, {2, 12, 1816.619568, 1013.767376, 1.0},
- {1, 14, 269.544153, 1002.333570, 1.0}, {2, 14, 269.367542, 1043.509254, 1.0}, {1, 15, 1402.772141, 281.392962, 1.0},
- {2, 15, 1409.089165, 318.731629, 1.0}, {1, 16, 195.877233, 919.454341, 1.0}, {2, 16, 192.531109, 997.367899, 1.0},
- {1, 17, 1789.584045, 120.036661, 1.0}, {2, 17, 1800.391846, 167.822964, 1.0}, {1, 18, 999.363213, 765.004807, 1.0},
- {2, 18, 1002.345772, 790.560122, 1.0}, {1, 19, 647.342491, 1044.805727, 1.0}, {2, 19, 649.328041, 1058.682940, 1.0},
- {1, 20, 1365.486832, 440.901829, 1.0}, {2, 20, 1371.413040, 477.888730, 1.0}, {1, 21, 1787.125282, 301.431606, 1.0},
- {2, 21, 1798.527260, 355.224531, 1.0}, {1, 22, 1257.805824, 932.797258, 1.0}, {2, 22, 1263.017578, 969.376774, 1.0},
- {1, 23, 961.969528, 843.148112, 1.0}, {2, 23, 964.869461, 868.587620, 1.0}, {1, 24, 158.076110, 1052.643592, 1.0},
- {1, 25, 1072.884521, 1005.296981, 1.0}, {2, 25, 1076.091156, 1032.776856, 1.0}, {1, 26, 1107.656937, 526.577228, 1.0},
- {2, 26, 1111.618423, 555.524454, 1.0}, {1, 27, 1416.410751, 529.857581, 1.0}, {2, 27, 1422.663574, 570.025957, 1.0},
- {1, 28, 1498.673630, 1005.453086, 1.0}, {2, 28, 1505.381813, 1051.827149, 1.0}, {1, 29, 1428.647804, 652.473629, 1.0},
- {2, 29, 1434.898224, 692.715390, 1.0}, {1, 30, 1332.318764, 503.673599, 1.0}, {2, 30, 1338.000069, 540.507967, 1.0},
- {1, 32, 1358.642693, 709.837904, 1.0}, {2, 32, 1364.231529, 748.678265, 1.0}, {1, 33, 1850.911560, 460.475668, 1.0},
- {2, 33, 1862.221413, 512.797347, 1.0}, {1, 34, 1226.117821, 607.053959, 1.0}, {2, 34, 1230.736084, 641.091449, 1.0},
- {1, 35, 619.598236, 523.341744, 1.0}, {2, 35, 621.601124, 554.453287, 1.0}, {1, 36, 956.591492, 958.223183, 1.0},
- {2, 36, 959.289265, 983.289263, 1.0}, {1, 37, 1249.922218, 419.095856, 1.0}, {2, 37, 1255.005913, 452.556177, 1.0},
- {1, 39, 1300.528450, 386.251166, 1.0}, {2, 39, 1305.957413, 420.185595, 1.0}, {1, 40, 1128.689919, 972.558346, 1.0},
- {2, 40, 1132.413712, 1003.984737, 1.0}, {1, 41, 503.304749, 1053.504388, 1.0}, {2, 41, 505.019703, 1069.175613, 1.0},
- {1, 42, 1197.352982, 472.681564, 1.0}, {2, 42, 1201.910706, 503.459880, 1.0}, {1, 43, 1794.391022, 383.911400, 1.0},
- {2, 43, 1805.324135, 436.116468, 1.0}, {1, 44, 789.641418, 1058.045647, 1.0}, {1, 45, 1376.575241, 928.714979, 1.0},
- {2, 45, 1381.995850, 969.511957, 1.0}, {1, 46, 1598.023567, 93.975592, 1.0}, {2, 46, 1606.937141, 136.827035, 1.0},
- {1, 47, 1455.550232, 762.128685, 1.0}, {2, 47, 1462.014313, 805.164878, 1.0}, {1, 48, 1357.123489, 354.460326, 1.0},
- {2, 48, 1363.071899, 390.363121, 1.0}, {1, 49, 939.792652, 781.549895, 1.0}, {2, 49, 942.802620, 806.164012, 1.0},
- {1, 50, 1380.251083, 805.948620, 1.0}, {2, 50, 1385.637932, 845.592098, 1.0}, {1, 51, 1021.769943, 1049.802361, 1.0},
- {1, 52, 1065.634918, 608.099055, 1.0}, {2, 52, 1069.142189, 635.361736, 1.0}, {1, 53, 624.324188, 463.202863, 1.0},
- {2, 53, 626.395454, 494.994088, 1.0}, {1, 54, 1451.459885, 881.557624, 1.0}, {2, 54, 1457.679634, 924.345531, 1.0},
- {1, 55, 1201.885986, 1057.079022, 1.0}, {1, 56, 581.157532, 947.661438, 1.0}, {2, 56, 583.242359, 960.831449, 1.0},
- {1, 58, 513.593102, 954.175858, 1.0}, {2, 58, 515.470047, 971.309574, 1.0}, {1, 59, 928.069038, 901.774421, 1.0},
- {2, 59, 930.847950, 925.613744, 1.0}, {1, 60, 1065.860023, 740.395389, 1.0}, {2, 60, 1069.484253, 768.971086, 1.0},
- {1, 61, 990.479393, 906.264632, 1.0}, {2, 61, 993.217506, 933.088803, 1.0}, {1, 62, 1776.196747, 776.278453, 1.0},
- {2, 62, 1786.292496, 831.136880, 1.0}, {1, 63, 834.454365, 1012.449725, 1.0}, {2, 63, 836.868324, 1033.451807, 1.0},
- {1, 64, 1355.190697, 869.184809, 1.0}, {2, 64, 1360.736618, 909.773347, 1.0}, {1, 65, 702.072487, 897.519686, 1.0},
- {2, 65, 704.203377, 911.931131, 1.0}, {1, 66, 1214.022903, 856.199934, 1.0}, {2, 66, 1218.109016, 890.753052, 1.0},
- {1, 67, 327.676048, 236.814036, 1.0}, {2, 67, 328.335285, 277.251878, 1.0}, {1, 68, 289.064083, 454.793912, 1.0},
- {2, 68, 288.651924, 498.882444, 1.0}, {1, 69, 1626.240692, 278.374350, 1.0}, {2, 69, 1634.131508, 315.853672, 1.0},
- {1, 70, 1245.375710, 734.862142, 1.0}, {2, 70, 1250.047417, 769.670885, 1.0}, {1, 71, 497.015305, 510.718904, 1.0},
- {2, 71, 498.682308, 541.070201, 1.0}, {1, 72, 1280.542030, 153.939185, 1.0}, {2, 72, 1286.993637, 198.436196, 1.0},
- {1, 73, 1534.748840, 138.601043, 1.0}, {2, 73, 1542.961349, 180.170819, 1.0}, {1, 74, 1477.412682, 200.608061, 1.0},
- {2, 74, 1484.683914, 240.413260, 1.0}, {1, 76, 450.637321, 407.279642, 1.0}, {2, 76, 451.695642, 441.666291, 1.0},
- {1, 78, 246.981239, 220.786298, 1.0}, {2, 78, 244.524879, 290.016564, 1.0}, {1, 79, 36.696489, 420.023407, 1.0},
+ {1, 2, 1139.861412, 1034.634984, 1.0},
+ {2, 2, 1143.512192, 1065.355718, 1.0},
+ {1, 3, 1760.821953, 644.658036, 1.0},
+ {2, 3, 1770.901108, 697.899928, 1.0},
+ {1, 4, 858.071823, 1068.520746, 1.0},
+ {1, 6, 1633.952408, 797.050145, 1.0},
+ {2, 6, 1642.508469, 849.157140, 1.0},
+ {1, 8, 1716.695824, 451.805491, 1.0},
+ {2, 8, 1726.513939, 502.095687, 1.0},
+ {1, 9, 269.577627, 724.986935, 1.0},
+ {2, 9, 269.424820, 764.154246, 1.0},
+ {1, 10, 1891.321907, 706.948843, 1.0},
+ {2, 10, 1903.338547, 766.068377, 1.0},
+ {1, 12, 1806.227074, 956.089604, 1.0},
+ {2, 12, 1816.619568, 1013.767376, 1.0},
+ {1, 14, 269.544153, 1002.333570, 1.0},
+ {2, 14, 269.367542, 1043.509254, 1.0},
+ {1, 15, 1402.772141, 281.392962, 1.0},
+ {2, 15, 1409.089165, 318.731629, 1.0},
+ {1, 16, 195.877233, 919.454341, 1.0},
+ {2, 16, 192.531109, 997.367899, 1.0},
+ {1, 17, 1789.584045, 120.036661, 1.0},
+ {2, 17, 1800.391846, 167.822964, 1.0},
+ {1, 18, 999.363213, 765.004807, 1.0},
+ {2, 18, 1002.345772, 790.560122, 1.0},
+ {1, 19, 647.342491, 1044.805727, 1.0},
+ {2, 19, 649.328041, 1058.682940, 1.0},
+ {1, 20, 1365.486832, 440.901829, 1.0},
+ {2, 20, 1371.413040, 477.888730, 1.0},
+ {1, 21, 1787.125282, 301.431606, 1.0},
+ {2, 21, 1798.527260, 355.224531, 1.0},
+ {1, 22, 1257.805824, 932.797258, 1.0},
+ {2, 22, 1263.017578, 969.376774, 1.0},
+ {1, 23, 961.969528, 843.148112, 1.0},
+ {2, 23, 964.869461, 868.587620, 1.0},
+ {1, 24, 158.076110, 1052.643592, 1.0},
+ {1, 25, 1072.884521, 1005.296981, 1.0},
+ {2, 25, 1076.091156, 1032.776856, 1.0},
+ {1, 26, 1107.656937, 526.577228, 1.0},
+ {2, 26, 1111.618423, 555.524454, 1.0},
+ {1, 27, 1416.410751, 529.857581, 1.0},
+ {2, 27, 1422.663574, 570.025957, 1.0},
+ {1, 28, 1498.673630, 1005.453086, 1.0},
+ {2, 28, 1505.381813, 1051.827149, 1.0},
+ {1, 29, 1428.647804, 652.473629, 1.0},
+ {2, 29, 1434.898224, 692.715390, 1.0},
+ {1, 30, 1332.318764, 503.673599, 1.0},
+ {2, 30, 1338.000069, 540.507967, 1.0},
+ {1, 32, 1358.642693, 709.837904, 1.0},
+ {2, 32, 1364.231529, 748.678265, 1.0},
+ {1, 33, 1850.911560, 460.475668, 1.0},
+ {2, 33, 1862.221413, 512.797347, 1.0},
+ {1, 34, 1226.117821, 607.053959, 1.0},
+ {2, 34, 1230.736084, 641.091449, 1.0},
+ {1, 35, 619.598236, 523.341744, 1.0},
+ {2, 35, 621.601124, 554.453287, 1.0},
+ {1, 36, 956.591492, 958.223183, 1.0},
+ {2, 36, 959.289265, 983.289263, 1.0},
+ {1, 37, 1249.922218, 419.095856, 1.0},
+ {2, 37, 1255.005913, 452.556177, 1.0},
+ {1, 39, 1300.528450, 386.251166, 1.0},
+ {2, 39, 1305.957413, 420.185595, 1.0},
+ {1, 40, 1128.689919, 972.558346, 1.0},
+ {2, 40, 1132.413712, 1003.984737, 1.0},
+ {1, 41, 503.304749, 1053.504388, 1.0},
+ {2, 41, 505.019703, 1069.175613, 1.0},
+ {1, 42, 1197.352982, 472.681564, 1.0},
+ {2, 42, 1201.910706, 503.459880, 1.0},
+ {1, 43, 1794.391022, 383.911400, 1.0},
+ {2, 43, 1805.324135, 436.116468, 1.0},
+ {1, 44, 789.641418, 1058.045647, 1.0},
+ {1, 45, 1376.575241, 928.714979, 1.0},
+ {2, 45, 1381.995850, 969.511957, 1.0},
+ {1, 46, 1598.023567, 93.975592, 1.0},
+ {2, 46, 1606.937141, 136.827035, 1.0},
+ {1, 47, 1455.550232, 762.128685, 1.0},
+ {2, 47, 1462.014313, 805.164878, 1.0},
+ {1, 48, 1357.123489, 354.460326, 1.0},
+ {2, 48, 1363.071899, 390.363121, 1.0},
+ {1, 49, 939.792652, 781.549895, 1.0},
+ {2, 49, 942.802620, 806.164012, 1.0},
+ {1, 50, 1380.251083, 805.948620, 1.0},
+ {2, 50, 1385.637932, 845.592098, 1.0},
+ {1, 51, 1021.769943, 1049.802361, 1.0},
+ {1, 52, 1065.634918, 608.099055, 1.0},
+ {2, 52, 1069.142189, 635.361736, 1.0},
+ {1, 53, 624.324188, 463.202863, 1.0},
+ {2, 53, 626.395454, 494.994088, 1.0},
+ {1, 54, 1451.459885, 881.557624, 1.0},
+ {2, 54, 1457.679634, 924.345531, 1.0},
+ {1, 55, 1201.885986, 1057.079022, 1.0},
+ {1, 56, 581.157532, 947.661438, 1.0},
+ {2, 56, 583.242359, 960.831449, 1.0},
+ {1, 58, 513.593102, 954.175858, 1.0},
+ {2, 58, 515.470047, 971.309574, 1.0},
+ {1, 59, 928.069038, 901.774421, 1.0},
+ {2, 59, 930.847950, 925.613744, 1.0},
+ {1, 60, 1065.860023, 740.395389, 1.0},
+ {2, 60, 1069.484253, 768.971086, 1.0},
+ {1, 61, 990.479393, 906.264632, 1.0},
+ {2, 61, 993.217506, 933.088803, 1.0},
+ {1, 62, 1776.196747, 776.278453, 1.0},
+ {2, 62, 1786.292496, 831.136880, 1.0},
+ {1, 63, 834.454365, 1012.449725, 1.0},
+ {2, 63, 836.868324, 1033.451807, 1.0},
+ {1, 64, 1355.190697, 869.184809, 1.0},
+ {2, 64, 1360.736618, 909.773347, 1.0},
+ {1, 65, 702.072487, 897.519686, 1.0},
+ {2, 65, 704.203377, 911.931131, 1.0},
+ {1, 66, 1214.022903, 856.199934, 1.0},
+ {2, 66, 1218.109016, 890.753052, 1.0},
+ {1, 67, 327.676048, 236.814036, 1.0},
+ {2, 67, 328.335285, 277.251878, 1.0},
+ {1, 68, 289.064083, 454.793912, 1.0},
+ {2, 68, 288.651924, 498.882444, 1.0},
+ {1, 69, 1626.240692, 278.374350, 1.0},
+ {2, 69, 1634.131508, 315.853672, 1.0},
+ {1, 70, 1245.375710, 734.862142, 1.0},
+ {2, 70, 1250.047417, 769.670885, 1.0},
+ {1, 71, 497.015305, 510.718904, 1.0},
+ {2, 71, 498.682308, 541.070201, 1.0},
+ {1, 72, 1280.542030, 153.939185, 1.0},
+ {2, 72, 1286.993637, 198.436196, 1.0},
+ {1, 73, 1534.748840, 138.601043, 1.0},
+ {2, 73, 1542.961349, 180.170819, 1.0},
+ {1, 74, 1477.412682, 200.608061, 1.0},
+ {2, 74, 1484.683914, 240.413260, 1.0},
+ {1, 76, 450.637321, 407.279642, 1.0},
+ {2, 76, 451.695642, 441.666291, 1.0},
+ {1, 78, 246.981239, 220.786298, 1.0},
+ {2, 78, 244.524879, 290.016564, 1.0},
+ {1, 79, 36.696489, 420.023407, 1.0},
{2, 79, 21.364746, 591.245492, 1.0},
- };
+ };
int num_markers = sizeof(markers) / sizeof(Marker);
Tracks tracks;
@@ -249,41 +402,110 @@ TEST(KeyframeSelection, ElevatorReconstructionVarianceTest) {
intrinsics.SetRadialDistortion(-0.034, 0.0, 0.0);
Marker markers[] = {
- {1, 0, 182.999997, 1047.000010, 1.0}, {2, 0, 181.475730, 1052.091079, 1.0}, {3, 0, 181.741562, 1057.893341, 1.0},
- {4, 0, 183.190498, 1068.310440, 1.0}, {1, 1, 271.000013, 666.000009, 1.0}, {2, 1, 270.596180, 668.665760, 1.0},
- {3, 1, 270.523510, 671.559069, 1.0}, {4, 1, 271.856518, 676.818151, 1.0}, {5, 1, 268.989000, 727.051570, 1.0},
- {1, 2, 264.999990, 1018.000031, 1.0}, {2, 2, 264.020061, 1021.157591, 1.0}, {3, 2, 264.606056, 1024.823506, 1.0},
- {4, 2, 266.200933, 1031.168690, 1.0}, {1, 3, 270.000000, 938.000014, 1.0}, {2, 3, 269.022617, 941.153390, 1.0},
- {3, 3, 269.605579, 944.454954, 1.0}, {4, 3, 271.281366, 949.452167, 1.0}, {5, 3, 268.963480, 1004.417453, 1.0},
- {1, 4, 200.999994, 799.000003, 1.0}, {2, 4, 199.841366, 803.891838, 1.0}, {3, 4, 200.262208, 809.323246, 1.0},
- {4, 4, 201.456513, 819.271195, 1.0}, {5, 4, 195.026493, 924.363234, 1.0}, {1, 5, 1775.000038, 49.999998, 1.0},
- {2, 5, 1775.255127, 53.718264, 1.0}, {3, 5, 1776.449890, 55.951670, 1.0}, {4, 5, 1778.815727, 61.923309, 1.0},
- {5, 5, 1790.274124, 123.074923, 1.0}, {1, 6, 164.000001, 927.999988, 1.0}, {2, 6, 162.665462, 933.169527, 1.0},
- {3, 6, 163.067923, 938.577182, 1.0}, {4, 6, 164.370360, 948.840945, 1.0}, {5, 6, 157.199407, 1057.762341, 1.0},
- {1, 7, 618.000011, 477.999998, 1.0}, {2, 7, 617.583504, 480.124243, 1.0}, {3, 7, 618.356495, 482.441897, 1.0},
- {4, 7, 619.792500, 486.428132, 1.0}, {5, 7, 619.546051, 525.222627, 1.0}, {1, 8, 499.999981, 1036.999984, 1.0},
- {2, 8, 499.080162, 1038.720160, 1.0}, {3, 8, 499.949398, 1039.014344, 1.0}, {4, 8, 501.828003, 1041.286647, 1.0},
- {5, 8, 502.777576, 1055.196369, 1.0}, {1, 9, 1587.000046, 31.999999, 1.0}, {2, 9, 1586.988373, 34.635853, 1.0},
- {3, 9, 1588.155899, 37.444186, 1.0}, {4, 9, 1589.973106, 42.492081, 1.0}, {5, 9, 1598.683205, 96.526332, 1.0},
- {1, 10, 622.999992, 416.999999, 1.0}, {2, 10, 622.449017, 419.233485, 1.0}, {3, 10, 623.283234, 421.500703, 1.0},
- {4, 10, 624.620132, 425.537406, 1.0}, {5, 10, 624.290829, 465.078338, 1.0}, {1, 11, 577.999992, 931.999998, 1.0},
- {2, 11, 577.042294, 932.872703, 1.0}, {3, 11, 577.832451, 934.045451, 1.0}, {4, 11, 579.729137, 935.735435, 1.0},
- {5, 11, 580.691242, 948.396256, 1.0}, {1, 12, 510.999985, 931.999998, 1.0}, {2, 12, 510.111237, 933.152146, 1.0},
- {3, 12, 510.797081, 934.454219, 1.0}, {4, 12, 512.647362, 936.595910, 1.0}, {5, 12, 513.247204, 955.144157, 1.0},
- {1, 13, 330.459995, 177.059993, 1.0}, {2, 13, 329.876347, 179.615586, 1.0}, {3, 13, 330.681696, 182.757810, 1.0},
- {4, 13, 331.345053, 187.903853, 1.0}, {5, 13, 327.824135, 239.611639, 1.0}, {1, 14, 291.813097, 388.516195, 1.0},
- {2, 14, 290.984058, 391.382725, 1.0}, {3, 14, 291.526737, 394.778595, 1.0}, {4, 14, 292.763815, 400.310973, 1.0},
- {5, 14, 288.714552, 457.548015, 1.0}, {1, 15, 496.491680, 466.534005, 1.0}, {2, 15, 495.909519, 468.518561, 1.0},
- {3, 15, 496.588383, 470.853596, 1.0}, {4, 15, 497.976780, 474.731458, 1.0}, {5, 15, 496.998882, 512.568694, 1.0},
- {1, 16, 1273.000031, 89.000000, 1.0}, {2, 16, 1272.951965, 92.003637, 1.0}, {3, 16, 1273.934784, 94.972191, 1.0},
- {4, 16, 1275.493584, 100.139952, 1.0}, {5, 16, 1281.003571, 156.880163, 1.0}, {1, 17, 1524.713173, 78.852922, 1.0},
- {2, 17, 1524.782066, 81.427142, 1.0}, {3, 17, 1525.759048, 84.057939, 1.0}, {4, 17, 1527.579689, 88.966550, 1.0},
- {5, 17, 1535.262451, 141.186054, 1.0}, {1, 18, 1509.425011, 94.371824, 1.0}, {1, 19, 451.000013, 357.000003, 1.0},
- {2, 19, 450.354881, 359.312410, 1.0}, {3, 19, 451.107473, 361.837088, 1.0}, {4, 19, 452.186537, 366.318061, 1.0},
- {5, 19, 450.507660, 409.257599, 1.0}, {1, 20, 254.004936, 114.784185, 1.0}, {2, 20, 253.291512, 119.288486, 1.0},
- {3, 20, 253.745584, 124.114957, 1.0}, {4, 20, 254.453287, 132.795120, 1.0}, {5, 20, 246.772242, 225.165337, 1.0},
- {1, 21, 65.262880, 147.889409, 1.0}, {2, 21, 63.634465, 157.656807, 1.0}, {3, 21, 63.306799, 169.067053, 1.0},
- {4, 21, 62.462311, 189.724241, 1.0}, {5, 21, 35.396615, 430.308380, 1.0},
+ {1, 0, 182.999997, 1047.000010, 1.0},
+ {2, 0, 181.475730, 1052.091079, 1.0},
+ {3, 0, 181.741562, 1057.893341, 1.0},
+ {4, 0, 183.190498, 1068.310440, 1.0},
+ {1, 1, 271.000013, 666.000009, 1.0},
+ {2, 1, 270.596180, 668.665760, 1.0},
+ {3, 1, 270.523510, 671.559069, 1.0},
+ {4, 1, 271.856518, 676.818151, 1.0},
+ {5, 1, 268.989000, 727.051570, 1.0},
+ {1, 2, 264.999990, 1018.000031, 1.0},
+ {2, 2, 264.020061, 1021.157591, 1.0},
+ {3, 2, 264.606056, 1024.823506, 1.0},
+ {4, 2, 266.200933, 1031.168690, 1.0},
+ {1, 3, 270.000000, 938.000014, 1.0},
+ {2, 3, 269.022617, 941.153390, 1.0},
+ {3, 3, 269.605579, 944.454954, 1.0},
+ {4, 3, 271.281366, 949.452167, 1.0},
+ {5, 3, 268.963480, 1004.417453, 1.0},
+ {1, 4, 200.999994, 799.000003, 1.0},
+ {2, 4, 199.841366, 803.891838, 1.0},
+ {3, 4, 200.262208, 809.323246, 1.0},
+ {4, 4, 201.456513, 819.271195, 1.0},
+ {5, 4, 195.026493, 924.363234, 1.0},
+ {1, 5, 1775.000038, 49.999998, 1.0},
+ {2, 5, 1775.255127, 53.718264, 1.0},
+ {3, 5, 1776.449890, 55.951670, 1.0},
+ {4, 5, 1778.815727, 61.923309, 1.0},
+ {5, 5, 1790.274124, 123.074923, 1.0},
+ {1, 6, 164.000001, 927.999988, 1.0},
+ {2, 6, 162.665462, 933.169527, 1.0},
+ {3, 6, 163.067923, 938.577182, 1.0},
+ {4, 6, 164.370360, 948.840945, 1.0},
+ {5, 6, 157.199407, 1057.762341, 1.0},
+ {1, 7, 618.000011, 477.999998, 1.0},
+ {2, 7, 617.583504, 480.124243, 1.0},
+ {3, 7, 618.356495, 482.441897, 1.0},
+ {4, 7, 619.792500, 486.428132, 1.0},
+ {5, 7, 619.546051, 525.222627, 1.0},
+ {1, 8, 499.999981, 1036.999984, 1.0},
+ {2, 8, 499.080162, 1038.720160, 1.0},
+ {3, 8, 499.949398, 1039.014344, 1.0},
+ {4, 8, 501.828003, 1041.286647, 1.0},
+ {5, 8, 502.777576, 1055.196369, 1.0},
+ {1, 9, 1587.000046, 31.999999, 1.0},
+ {2, 9, 1586.988373, 34.635853, 1.0},
+ {3, 9, 1588.155899, 37.444186, 1.0},
+ {4, 9, 1589.973106, 42.492081, 1.0},
+ {5, 9, 1598.683205, 96.526332, 1.0},
+ {1, 10, 622.999992, 416.999999, 1.0},
+ {2, 10, 622.449017, 419.233485, 1.0},
+ {3, 10, 623.283234, 421.500703, 1.0},
+ {4, 10, 624.620132, 425.537406, 1.0},
+ {5, 10, 624.290829, 465.078338, 1.0},
+ {1, 11, 577.999992, 931.999998, 1.0},
+ {2, 11, 577.042294, 932.872703, 1.0},
+ {3, 11, 577.832451, 934.045451, 1.0},
+ {4, 11, 579.729137, 935.735435, 1.0},
+ {5, 11, 580.691242, 948.396256, 1.0},
+ {1, 12, 510.999985, 931.999998, 1.0},
+ {2, 12, 510.111237, 933.152146, 1.0},
+ {3, 12, 510.797081, 934.454219, 1.0},
+ {4, 12, 512.647362, 936.595910, 1.0},
+ {5, 12, 513.247204, 955.144157, 1.0},
+ {1, 13, 330.459995, 177.059993, 1.0},
+ {2, 13, 329.876347, 179.615586, 1.0},
+ {3, 13, 330.681696, 182.757810, 1.0},
+ {4, 13, 331.345053, 187.903853, 1.0},
+ {5, 13, 327.824135, 239.611639, 1.0},
+ {1, 14, 291.813097, 388.516195, 1.0},
+ {2, 14, 290.984058, 391.382725, 1.0},
+ {3, 14, 291.526737, 394.778595, 1.0},
+ {4, 14, 292.763815, 400.310973, 1.0},
+ {5, 14, 288.714552, 457.548015, 1.0},
+ {1, 15, 496.491680, 466.534005, 1.0},
+ {2, 15, 495.909519, 468.518561, 1.0},
+ {3, 15, 496.588383, 470.853596, 1.0},
+ {4, 15, 497.976780, 474.731458, 1.0},
+ {5, 15, 496.998882, 512.568694, 1.0},
+ {1, 16, 1273.000031, 89.000000, 1.0},
+ {2, 16, 1272.951965, 92.003637, 1.0},
+ {3, 16, 1273.934784, 94.972191, 1.0},
+ {4, 16, 1275.493584, 100.139952, 1.0},
+ {5, 16, 1281.003571, 156.880163, 1.0},
+ {1, 17, 1524.713173, 78.852922, 1.0},
+ {2, 17, 1524.782066, 81.427142, 1.0},
+ {3, 17, 1525.759048, 84.057939, 1.0},
+ {4, 17, 1527.579689, 88.966550, 1.0},
+ {5, 17, 1535.262451, 141.186054, 1.0},
+ {1, 18, 1509.425011, 94.371824, 1.0},
+ {1, 19, 451.000013, 357.000003, 1.0},
+ {2, 19, 450.354881, 359.312410, 1.0},
+ {3, 19, 451.107473, 361.837088, 1.0},
+ {4, 19, 452.186537, 366.318061, 1.0},
+ {5, 19, 450.507660, 409.257599, 1.0},
+ {1, 20, 254.004936, 114.784185, 1.0},
+ {2, 20, 253.291512, 119.288486, 1.0},
+ {3, 20, 253.745584, 124.114957, 1.0},
+ {4, 20, 254.453287, 132.795120, 1.0},
+ {5, 20, 246.772242, 225.165337, 1.0},
+ {1, 21, 65.262880, 147.889409, 1.0},
+ {2, 21, 63.634465, 157.656807, 1.0},
+ {3, 21, 63.306799, 169.067053, 1.0},
+ {4, 21, 62.462311, 189.724241, 1.0},
+ {5, 21, 35.396615, 430.308380, 1.0},
};
int num_markers = sizeof(markers) / sizeof(Marker);
@@ -304,4 +526,4 @@ TEST(KeyframeSelection, ElevatorReconstructionVarianceTest) {
}
}
-} // namespace libmv
+} // namespace libmv
diff --git a/intern/libmv/libmv/simple_pipeline/modal_solver.cc b/intern/libmv/libmv/simple_pipeline/modal_solver.cc
index 687c328b004..845b299e31e 100644
--- a/intern/libmv/libmv/simple_pipeline/modal_solver.cc
+++ b/intern/libmv/libmv/simple_pipeline/modal_solver.cc
@@ -34,7 +34,7 @@
namespace libmv {
namespace {
-void ProjectMarkerOnSphere(const Marker &marker, Vec3 &X) {
+void ProjectMarkerOnSphere(const Marker& marker, Vec3& X) {
X(0) = marker.x;
X(1) = marker.y;
X(2) = 1.0;
@@ -42,12 +42,14 @@ void ProjectMarkerOnSphere(const Marker &marker, Vec3 &X) {
X *= 5.0 / X.norm();
}
-void ModalSolverLogProgress(ProgressUpdateCallback *update_callback,
- double progress) {
+void ModalSolverLogProgress(ProgressUpdateCallback* update_callback,
+ double progress) {
if (update_callback) {
char message[256];
- snprintf(message, sizeof(message), "Solving progress %d%%",
+ snprintf(message,
+ sizeof(message),
+ "Solving progress %d%%",
(int)(progress * 100));
update_callback->invoke(progress, message);
@@ -58,25 +60,27 @@ struct ModalReprojectionError {
ModalReprojectionError(double observed_x,
double observed_y,
const double weight,
- const Vec3 &bundle)
- : observed_x_(observed_x), observed_y_(observed_y),
- weight_(weight), bundle_(bundle) { }
+ const Vec3& bundle)
+ : observed_x_(observed_x),
+ observed_y_(observed_y),
+ weight_(weight),
+ bundle_(bundle) {}
// TODO(keir): This should support bundling focal length as well.
template <typename T>
bool operator()(const T* quaternion, T* residuals) const {
// Convert bundle position from double to T.
- T X[3] = { T(bundle_(0)), T(bundle_(1)), T(bundle_(2)) };
+ T X[3] = {T(bundle_(0)), T(bundle_(1)), T(bundle_(2))};
// Compute the point position in camera coordinates: x = RX.
T x[3];
// This flips the sense of the quaternion, to adhere to Blender conventions.
T quaternion_inverse[4] = {
- quaternion[0],
- -quaternion[1],
- -quaternion[2],
- -quaternion[3],
+ quaternion[0],
+ -quaternion[1],
+ -quaternion[2],
+ -quaternion[3],
};
ceres::QuaternionRotatePoint(quaternion_inverse, X, x);
@@ -99,9 +103,9 @@ struct ModalReprojectionError {
};
} // namespace
-void ModalSolver(const Tracks &tracks,
- EuclideanReconstruction *reconstruction,
- ProgressUpdateCallback *update_callback) {
+void ModalSolver(const Tracks& tracks,
+ EuclideanReconstruction* reconstruction,
+ ProgressUpdateCallback* update_callback) {
int max_image = tracks.MaxImage();
int max_track = tracks.MaxTrack();
@@ -116,7 +120,7 @@ void ModalSolver(const Tracks &tracks,
for (int image = 0; image <= max_image; ++image) {
vector<Marker> all_markers = tracks.MarkersInImage(image);
- ModalSolverLogProgress(update_callback, (float) image / max_image);
+ ModalSolverLogProgress(update_callback, (float)image / max_image);
// Skip empty images without doing anything.
if (all_markers.size() == 0) {
@@ -133,8 +137,8 @@ void ModalSolver(const Tracks &tracks,
// 3D positions.
Mat x1, x2;
for (int i = 0; i < all_markers.size(); ++i) {
- Marker &marker = all_markers[i];
- EuclideanPoint *point = reconstruction->PointForTrack(marker.track);
+ Marker& marker = all_markers[i];
+ EuclideanPoint* point = reconstruction->PointForTrack(marker.track);
if (point) {
Vec3 X;
ProjectMarkerOnSphere(marker, X);
@@ -168,8 +172,7 @@ void ModalSolver(const Tracks &tracks,
ceres::AngleAxisToQuaternion(&angle_axis(0), &quaternion(0));
- LG << "Analytically computed quaternion "
- << quaternion.transpose();
+ LG << "Analytically computed quaternion " << quaternion.transpose();
}
// STEP 2: Refine rotation with Ceres.
@@ -181,17 +184,15 @@ void ModalSolver(const Tracks &tracks,
int num_residuals = 0;
for (int i = 0; i < all_markers.size(); ++i) {
- Marker &marker = all_markers[i];
- EuclideanPoint *point = reconstruction->PointForTrack(marker.track);
+ Marker& marker = all_markers[i];
+ EuclideanPoint* point = reconstruction->PointForTrack(marker.track);
if (point && marker.weight != 0.0) {
- problem.AddResidualBlock(new ceres::AutoDiffCostFunction<
- ModalReprojectionError,
- 2, /* num_residuals */
- 4>(new ModalReprojectionError(marker.x,
- marker.y,
- marker.weight,
- point->X)),
+ problem.AddResidualBlock(
+ new ceres::AutoDiffCostFunction<ModalReprojectionError,
+ 2, /* num_residuals */
+ 4>(new ModalReprojectionError(
+ marker.x, marker.y, marker.weight, point->X)),
NULL,
&quaternion(0));
num_residuals++;
diff --git a/intern/libmv/libmv/simple_pipeline/modal_solver.h b/intern/libmv/libmv/simple_pipeline/modal_solver.h
index 9801fd21d81..f7ce394b4b2 100644
--- a/intern/libmv/libmv/simple_pipeline/modal_solver.h
+++ b/intern/libmv/libmv/simple_pipeline/modal_solver.h
@@ -21,9 +21,9 @@
#ifndef LIBMV_SIMPLE_PIPELINE_MODAL_SOLVER_H_
#define LIBMV_SIMPLE_PIPELINE_MODAL_SOLVER_H_
-#include "libmv/simple_pipeline/tracks.h"
-#include "libmv/simple_pipeline/reconstruction.h"
#include "libmv/simple_pipeline/callbacks.h"
+#include "libmv/simple_pipeline/reconstruction.h"
+#include "libmv/simple_pipeline/tracks.h"
namespace libmv {
@@ -39,9 +39,9 @@ namespace libmv {
Reconstructed cameras and projected bundles would be added to reconstruction
object.
*/
-void ModalSolver(const Tracks &tracks,
- EuclideanReconstruction *reconstruction,
- ProgressUpdateCallback *update_callback = NULL);
+void ModalSolver(const Tracks& tracks,
+ EuclideanReconstruction* reconstruction,
+ ProgressUpdateCallback* update_callback = NULL);
} // namespace libmv
diff --git a/intern/libmv/libmv/simple_pipeline/modal_solver_test.cc b/intern/libmv/libmv/simple_pipeline/modal_solver_test.cc
index b4cae8defb2..0acf978e6f5 100644
--- a/intern/libmv/libmv/simple_pipeline/modal_solver_test.cc
+++ b/intern/libmv/libmv/simple_pipeline/modal_solver_test.cc
@@ -20,10 +20,10 @@
#include "libmv/simple_pipeline/modal_solver.h"
-#include "testing/testing.h"
#include "libmv/logging/logging.h"
#include "libmv/simple_pipeline/bundle.h"
#include "libmv/simple_pipeline/camera_intrinsics.h"
+#include "testing/testing.h"
#include <stdio.h>
@@ -38,14 +38,21 @@ TEST(ModalSolver, SyntheticCubeSceneMotion) {
intrinsics.SetRadialDistortion(0.0, 0.0, 0.0);
Marker markers[] = {
- {1, 0, 212.172775, 354.713538, 1.0}, {2, 0, 773.468399, 358.735306, 1.0},
- {1, 1, 62.415197, 287.905354, 1.0}, {2, 1, 619.103336, 324.402537, 1.0},
- {1, 2, 206.847939, 237.567925, 1.0}, {2, 2, 737.496986, 247.881383, 1.0},
- {1, 3, 351.743889, 316.415906, 1.0}, {2, 3, 908.779621, 290.703617, 1.0},
- {1, 4, 232.941413, 54.265443, 1.0}, {2, 4, 719.444847, 63.062531, 1.0},
- {1, 5, 96.391611, 119.283537, 1.0}, {2, 5, 611.413136, 160.890715, 1.0},
- {1, 6, 363.444958, 150.838144, 1.0}, {2, 6, 876.374531, 114.916206, 1.0},
- };
+ {1, 0, 212.172775, 354.713538, 1.0},
+ {2, 0, 773.468399, 358.735306, 1.0},
+ {1, 1, 62.415197, 287.905354, 1.0},
+ {2, 1, 619.103336, 324.402537, 1.0},
+ {1, 2, 206.847939, 237.567925, 1.0},
+ {2, 2, 737.496986, 247.881383, 1.0},
+ {1, 3, 351.743889, 316.415906, 1.0},
+ {2, 3, 908.779621, 290.703617, 1.0},
+ {1, 4, 232.941413, 54.265443, 1.0},
+ {2, 4, 719.444847, 63.062531, 1.0},
+ {1, 5, 96.391611, 119.283537, 1.0},
+ {2, 5, 611.413136, 160.890715, 1.0},
+ {1, 6, 363.444958, 150.838144, 1.0},
+ {2, 6, 876.374531, 114.916206, 1.0},
+ };
int num_markers = sizeof(markers) / sizeof(Marker);
Tracks tracks;
@@ -65,12 +72,14 @@ TEST(ModalSolver, SyntheticCubeSceneMotion) {
NULL);
Mat3 expected_rotation;
+ // clang-format off
expected_rotation << 0.98215101743472, 0.17798354937546, 0.06083777694542,
-0.16875283983360, 0.97665300495333, -0.13293376908719,
-0.08307742172243, 0.12029448893171, 0.98925597189636;
+ // clang-format on
- Mat3 &first_camera_R = reconstruction.CameraForImage(1)->R;
- Mat3 &second_camera_R = reconstruction.CameraForImage(2)->R;
+ Mat3& first_camera_R = reconstruction.CameraForImage(1)->R;
+ Mat3& second_camera_R = reconstruction.CameraForImage(2)->R;
EXPECT_TRUE(Mat3::Identity().isApprox(first_camera_R, kTolerance));
EXPECT_TRUE(expected_rotation.isApprox(second_camera_R, kTolerance));
diff --git a/intern/libmv/libmv/simple_pipeline/packed_intrinsics.h b/intern/libmv/libmv/simple_pipeline/packed_intrinsics.h
index cbea599fccd..79fa3ab8379 100644
--- a/intern/libmv/libmv/simple_pipeline/packed_intrinsics.h
+++ b/intern/libmv/libmv/simple_pipeline/packed_intrinsics.h
@@ -40,7 +40,7 @@ class PackedIntrinsics {
OFFSET_FOCAL_LENGTH,
OFFSET_PRINCIPAL_POINT_X,
OFFSET_PRINCIPAL_POINT_Y,
-
+
// Distortion model coefficients.
OFFSET_K1,
OFFSET_K2,
@@ -48,7 +48,7 @@ class PackedIntrinsics {
OFFSET_K4,
OFFSET_P1,
OFFSET_P2,
-
+
// Number of parameters which are to be stored in the block.
NUM_PARAMETERS,
};
@@ -66,12 +66,12 @@ class PackedIntrinsics {
// point.
#define DEFINE_PARAMETER(parameter_name) \
- void Set ## parameter_name(double value) { \
- SetParameter(OFFSET_ ## parameter_name, value); \
- } \
- double Get ## parameter_name() const { \
- return GetParameter(OFFSET_ ## parameter_name); \
+ void Set##parameter_name(double value) { \
+ SetParameter(OFFSET_##parameter_name, value); \
} \
+ double Get##parameter_name() const { \
+ return GetParameter(OFFSET_##parameter_name); \
+ }
DEFINE_PARAMETER(K1)
DEFINE_PARAMETER(K2)
@@ -94,11 +94,11 @@ class PackedIntrinsics {
// All intrinsics parameters packed into a single block.
// Use OFFSET_FOO indexes to access corresponding values.
- array<double, NUM_PARAMETERS> parameters_;
+ array<double, NUM_PARAMETERS> parameters_;
// Indexed by parameter offset, set to truth if the value of the parameter is
// explicitly specified.
- array<bool, NUM_PARAMETERS> known_parameters_;
+ array<bool, NUM_PARAMETERS> known_parameters_;
};
} // namespace libmv
diff --git a/intern/libmv/libmv/simple_pipeline/pipeline.cc b/intern/libmv/libmv/simple_pipeline/pipeline.cc
index 728601f3732..5d52aeb7406 100644
--- a/intern/libmv/libmv/simple_pipeline/pipeline.cc
+++ b/intern/libmv/libmv/simple_pipeline/pipeline.cc
@@ -24,11 +24,11 @@
#include "libmv/logging/logging.h"
#include "libmv/simple_pipeline/bundle.h"
+#include "libmv/simple_pipeline/camera_intrinsics.h"
#include "libmv/simple_pipeline/intersect.h"
-#include "libmv/simple_pipeline/resect.h"
#include "libmv/simple_pipeline/reconstruction.h"
+#include "libmv/simple_pipeline/resect.h"
#include "libmv/simple_pipeline/tracks.h"
-#include "libmv/simple_pipeline/camera_intrinsics.h"
#ifdef _MSC_VER
# define snprintf _snprintf
@@ -46,24 +46,25 @@ struct EuclideanPipelineRoutines {
typedef EuclideanCamera Camera;
typedef EuclideanPoint Point;
- static void Bundle(const Tracks &tracks,
- EuclideanReconstruction *reconstruction) {
+ static void Bundle(const Tracks& tracks,
+ EuclideanReconstruction* reconstruction) {
EuclideanBundle(tracks, reconstruction);
}
- static bool Resect(const vector<Marker> &markers,
- EuclideanReconstruction *reconstruction, bool final_pass) {
+ static bool Resect(const vector<Marker>& markers,
+ EuclideanReconstruction* reconstruction,
+ bool final_pass) {
return EuclideanResect(markers, reconstruction, final_pass);
}
- static bool Intersect(const vector<Marker> &markers,
- EuclideanReconstruction *reconstruction) {
+ static bool Intersect(const vector<Marker>& markers,
+ EuclideanReconstruction* reconstruction) {
return EuclideanIntersect(markers, reconstruction);
}
- static Marker ProjectMarker(const EuclideanPoint &point,
- const EuclideanCamera &camera,
- const CameraIntrinsics &intrinsics) {
+ static Marker ProjectMarker(const EuclideanPoint& point,
+ const EuclideanCamera& camera,
+ const CameraIntrinsics& intrinsics) {
Vec3 projected = camera.R * point.X + camera.t;
projected /= projected(2);
@@ -84,26 +85,27 @@ struct ProjectivePipelineRoutines {
typedef ProjectiveCamera Camera;
typedef ProjectivePoint Point;
- static void Bundle(const Tracks &tracks,
- ProjectiveReconstruction *reconstruction) {
+ static void Bundle(const Tracks& tracks,
+ ProjectiveReconstruction* reconstruction) {
ProjectiveBundle(tracks, reconstruction);
}
- static bool Resect(const vector<Marker> &markers,
- ProjectiveReconstruction *reconstruction, bool final_pass) {
- (void) final_pass; // Ignored.
+ static bool Resect(const vector<Marker>& markers,
+ ProjectiveReconstruction* reconstruction,
+ bool final_pass) {
+ (void)final_pass; // Ignored.
return ProjectiveResect(markers, reconstruction);
}
- static bool Intersect(const vector<Marker> &markers,
- ProjectiveReconstruction *reconstruction) {
+ static bool Intersect(const vector<Marker>& markers,
+ ProjectiveReconstruction* reconstruction) {
return ProjectiveIntersect(markers, reconstruction);
}
- static Marker ProjectMarker(const ProjectivePoint &point,
- const ProjectiveCamera &camera,
- const CameraIntrinsics &intrinsics) {
+ static Marker ProjectMarker(const ProjectivePoint& point,
+ const ProjectiveCamera& camera,
+ const CameraIntrinsics& intrinsics) {
Vec3 projected = camera.P * point.X;
projected /= projected(2);
@@ -122,28 +124,33 @@ struct ProjectivePipelineRoutines {
} // namespace
static void CompleteReconstructionLogProgress(
- ProgressUpdateCallback *update_callback,
+ ProgressUpdateCallback* update_callback,
double progress,
- const char *step = NULL) {
+ const char* step = NULL) {
if (update_callback) {
char message[256];
if (step)
- snprintf(message, sizeof(message), "Completing solution %d%% | %s",
- (int)(progress*100), step);
+ snprintf(message,
+ sizeof(message),
+ "Completing solution %d%% | %s",
+ (int)(progress * 100),
+ step);
else
- snprintf(message, sizeof(message), "Completing solution %d%%",
- (int)(progress*100));
+ snprintf(message,
+ sizeof(message),
+ "Completing solution %d%%",
+ (int)(progress * 100));
update_callback->invoke(progress, message);
}
}
-template<typename PipelineRoutines>
+template <typename PipelineRoutines>
void InternalCompleteReconstruction(
- const Tracks &tracks,
- typename PipelineRoutines::Reconstruction *reconstruction,
- ProgressUpdateCallback *update_callback = NULL) {
+ const Tracks& tracks,
+ typename PipelineRoutines::Reconstruction* reconstruction,
+ ProgressUpdateCallback* update_callback = NULL) {
int max_track = tracks.MaxTrack();
int max_image = tracks.MaxImage();
int num_resects = -1;
@@ -173,7 +180,7 @@ void InternalCompleteReconstruction(
<< " reconstructed markers for track " << track;
if (reconstructed_markers.size() >= 2) {
CompleteReconstructionLogProgress(update_callback,
- (double)tot_resects/(max_image));
+ (double)tot_resects / (max_image));
if (PipelineRoutines::Intersect(reconstructed_markers,
reconstruction)) {
num_intersects++;
@@ -184,9 +191,8 @@ void InternalCompleteReconstruction(
}
}
if (num_intersects) {
- CompleteReconstructionLogProgress(update_callback,
- (double)tot_resects/(max_image),
- "Bundling...");
+ CompleteReconstructionLogProgress(
+ update_callback, (double)tot_resects / (max_image), "Bundling...");
PipelineRoutines::Bundle(tracks, reconstruction);
LG << "Ran Bundle() after intersections.";
}
@@ -212,9 +218,9 @@ void InternalCompleteReconstruction(
<< " reconstructed markers for image " << image;
if (reconstructed_markers.size() >= 5) {
CompleteReconstructionLogProgress(update_callback,
- (double)tot_resects/(max_image));
- if (PipelineRoutines::Resect(reconstructed_markers,
- reconstruction, false)) {
+ (double)tot_resects / (max_image));
+ if (PipelineRoutines::Resect(
+ reconstructed_markers, reconstruction, false)) {
num_resects++;
tot_resects++;
LG << "Ran Resect() for image " << image;
@@ -224,9 +230,8 @@ void InternalCompleteReconstruction(
}
}
if (num_resects) {
- CompleteReconstructionLogProgress(update_callback,
- (double)tot_resects/(max_image),
- "Bundling...");
+ CompleteReconstructionLogProgress(
+ update_callback, (double)tot_resects / (max_image), "Bundling...");
PipelineRoutines::Bundle(tracks, reconstruction);
}
LG << "Did " << num_resects << " resects.";
@@ -249,9 +254,9 @@ void InternalCompleteReconstruction(
}
if (reconstructed_markers.size() >= 5) {
CompleteReconstructionLogProgress(update_callback,
- (double)tot_resects/(max_image));
- if (PipelineRoutines::Resect(reconstructed_markers,
- reconstruction, true)) {
+ (double)tot_resects / (max_image));
+ if (PipelineRoutines::Resect(
+ reconstructed_markers, reconstruction, true)) {
num_resects++;
LG << "Ran final Resect() for image " << image;
} else {
@@ -260,27 +265,26 @@ void InternalCompleteReconstruction(
}
}
if (num_resects) {
- CompleteReconstructionLogProgress(update_callback,
- (double)tot_resects/(max_image),
- "Bundling...");
+ CompleteReconstructionLogProgress(
+ update_callback, (double)tot_resects / (max_image), "Bundling...");
PipelineRoutines::Bundle(tracks, reconstruction);
}
}
-template<typename PipelineRoutines>
+template <typename PipelineRoutines>
double InternalReprojectionError(
- const Tracks &image_tracks,
- const typename PipelineRoutines::Reconstruction &reconstruction,
- const CameraIntrinsics &intrinsics) {
+ const Tracks& image_tracks,
+ const typename PipelineRoutines::Reconstruction& reconstruction,
+ const CameraIntrinsics& intrinsics) {
int num_skipped = 0;
int num_reprojected = 0;
double total_error = 0.0;
vector<Marker> markers = image_tracks.AllMarkers();
for (int i = 0; i < markers.size(); ++i) {
double weight = markers[i].weight;
- const typename PipelineRoutines::Camera *camera =
+ const typename PipelineRoutines::Camera* camera =
reconstruction.CameraForImage(markers[i].image);
- const typename PipelineRoutines::Point *point =
+ const typename PipelineRoutines::Point* point =
reconstruction.PointForTrack(markers[i].track);
if (!camera || !point || weight == 0.0) {
num_skipped++;
@@ -295,24 +299,25 @@ double InternalReprojectionError(
const int N = 100;
char line[N];
- snprintf(line, N,
- "image %-3d track %-3d "
- "x %7.1f y %7.1f "
- "rx %7.1f ry %7.1f "
- "ex %7.1f ey %7.1f"
- " e %7.1f",
- markers[i].image,
- markers[i].track,
- markers[i].x,
- markers[i].y,
- reprojected_marker.x,
- reprojected_marker.y,
- ex,
- ey,
- sqrt(ex*ex + ey*ey));
+ snprintf(line,
+ N,
+ "image %-3d track %-3d "
+ "x %7.1f y %7.1f "
+ "rx %7.1f ry %7.1f "
+ "ex %7.1f ey %7.1f"
+ " e %7.1f",
+ markers[i].image,
+ markers[i].track,
+ markers[i].x,
+ markers[i].y,
+ reprojected_marker.x,
+ reprojected_marker.y,
+ ex,
+ ey,
+ sqrt(ex * ex + ey * ey));
VLOG(1) << line;
- total_error += sqrt(ex*ex + ey*ey);
+ total_error += sqrt(ex * ex + ey * ey);
}
LG << "Skipped " << num_skipped << " markers.";
LG << "Reprojected " << num_reprojected << " markers.";
@@ -321,46 +326,41 @@ double InternalReprojectionError(
return total_error / num_reprojected;
}
-double EuclideanReprojectionError(const Tracks &image_tracks,
- const EuclideanReconstruction &reconstruction,
- const CameraIntrinsics &intrinsics) {
- return InternalReprojectionError<EuclideanPipelineRoutines>(image_tracks,
- reconstruction,
- intrinsics);
+double EuclideanReprojectionError(const Tracks& image_tracks,
+ const EuclideanReconstruction& reconstruction,
+ const CameraIntrinsics& intrinsics) {
+ return InternalReprojectionError<EuclideanPipelineRoutines>(
+ image_tracks, reconstruction, intrinsics);
}
double ProjectiveReprojectionError(
- const Tracks &image_tracks,
- const ProjectiveReconstruction &reconstruction,
- const CameraIntrinsics &intrinsics) {
- return InternalReprojectionError<ProjectivePipelineRoutines>(image_tracks,
- reconstruction,
- intrinsics);
+ const Tracks& image_tracks,
+ const ProjectiveReconstruction& reconstruction,
+ const CameraIntrinsics& intrinsics) {
+ return InternalReprojectionError<ProjectivePipelineRoutines>(
+ image_tracks, reconstruction, intrinsics);
}
-void EuclideanCompleteReconstruction(const Tracks &tracks,
- EuclideanReconstruction *reconstruction,
- ProgressUpdateCallback *update_callback) {
- InternalCompleteReconstruction<EuclideanPipelineRoutines>(tracks,
- reconstruction,
- update_callback);
+void EuclideanCompleteReconstruction(const Tracks& tracks,
+ EuclideanReconstruction* reconstruction,
+ ProgressUpdateCallback* update_callback) {
+ InternalCompleteReconstruction<EuclideanPipelineRoutines>(
+ tracks, reconstruction, update_callback);
}
-void ProjectiveCompleteReconstruction(const Tracks &tracks,
- ProjectiveReconstruction *reconstruction) {
+void ProjectiveCompleteReconstruction(
+ const Tracks& tracks, ProjectiveReconstruction* reconstruction) {
InternalCompleteReconstruction<ProjectivePipelineRoutines>(tracks,
reconstruction);
}
-void InvertIntrinsicsForTracks(const Tracks &raw_tracks,
- const CameraIntrinsics &camera_intrinsics,
- Tracks *calibrated_tracks) {
+void InvertIntrinsicsForTracks(const Tracks& raw_tracks,
+ const CameraIntrinsics& camera_intrinsics,
+ Tracks* calibrated_tracks) {
vector<Marker> markers = raw_tracks.AllMarkers();
for (int i = 0; i < markers.size(); ++i) {
- camera_intrinsics.InvertIntrinsics(markers[i].x,
- markers[i].y,
- &(markers[i].x),
- &(markers[i].y));
+ camera_intrinsics.InvertIntrinsics(
+ markers[i].x, markers[i].y, &(markers[i].x), &(markers[i].y));
}
*calibrated_tracks = Tracks(markers);
}
diff --git a/intern/libmv/libmv/simple_pipeline/pipeline.h b/intern/libmv/libmv/simple_pipeline/pipeline.h
index 4d1bd00c51f..d6b43536d46 100644
--- a/intern/libmv/libmv/simple_pipeline/pipeline.h
+++ b/intern/libmv/libmv/simple_pipeline/pipeline.h
@@ -22,8 +22,8 @@
#define LIBMV_SIMPLE_PIPELINE_PIPELINE_H_
#include "libmv/simple_pipeline/callbacks.h"
-#include "libmv/simple_pipeline/tracks.h"
#include "libmv/simple_pipeline/reconstruction.h"
+#include "libmv/simple_pipeline/tracks.h"
namespace libmv {
@@ -47,9 +47,9 @@ namespace libmv {
\sa EuclideanResect, EuclideanIntersect, EuclideanBundle
*/
void EuclideanCompleteReconstruction(
- const Tracks &tracks,
- EuclideanReconstruction *reconstruction,
- ProgressUpdateCallback *update_callback = NULL);
+ const Tracks& tracks,
+ EuclideanReconstruction* reconstruction,
+ ProgressUpdateCallback* update_callback = NULL);
/*!
Estimate camera matrices and homogeneous 3D coordinates for all frames and
@@ -71,27 +71,26 @@ void EuclideanCompleteReconstruction(
\sa ProjectiveResect, ProjectiveIntersect, ProjectiveBundle
*/
-void ProjectiveCompleteReconstruction(const Tracks &tracks,
- ProjectiveReconstruction *reconstruction);
-
+void ProjectiveCompleteReconstruction(const Tracks& tracks,
+ ProjectiveReconstruction* reconstruction);
class CameraIntrinsics;
// TODO(keir): Decide if we want these in the public API, and if so, what the
// appropriate include file is.
-double EuclideanReprojectionError(const Tracks &image_tracks,
- const EuclideanReconstruction &reconstruction,
- const CameraIntrinsics &intrinsics);
+double EuclideanReprojectionError(const Tracks& image_tracks,
+ const EuclideanReconstruction& reconstruction,
+ const CameraIntrinsics& intrinsics);
double ProjectiveReprojectionError(
- const Tracks &image_tracks,
- const ProjectiveReconstruction &reconstruction,
- const CameraIntrinsics &intrinsics);
+ const Tracks& image_tracks,
+ const ProjectiveReconstruction& reconstruction,
+ const CameraIntrinsics& intrinsics);
-void InvertIntrinsicsForTracks(const Tracks &raw_tracks,
- const CameraIntrinsics &camera_intrinsics,
- Tracks *calibrated_tracks);
+void InvertIntrinsicsForTracks(const Tracks& raw_tracks,
+ const CameraIntrinsics& camera_intrinsics,
+ Tracks* calibrated_tracks);
} // namespace libmv
diff --git a/intern/libmv/libmv/simple_pipeline/reconstruction.cc b/intern/libmv/libmv/simple_pipeline/reconstruction.cc
index 851eedb5bb1..584f7440caf 100644
--- a/intern/libmv/libmv/simple_pipeline/reconstruction.cc
+++ b/intern/libmv/libmv/simple_pipeline/reconstruction.cc
@@ -19,20 +19,21 @@
// IN THE SOFTWARE.
#include "libmv/simple_pipeline/reconstruction.h"
-#include "libmv/numeric/numeric.h"
#include "libmv/logging/logging.h"
+#include "libmv/numeric/numeric.h"
namespace libmv {
-EuclideanReconstruction::EuclideanReconstruction() {}
+EuclideanReconstruction::EuclideanReconstruction() {
+}
EuclideanReconstruction::EuclideanReconstruction(
- const EuclideanReconstruction &other) {
+ const EuclideanReconstruction& other) {
image_to_cameras_map_ = other.image_to_cameras_map_;
points_ = other.points_;
}
-EuclideanReconstruction &EuclideanReconstruction::operator=(
- const EuclideanReconstruction &other) {
+EuclideanReconstruction& EuclideanReconstruction::operator=(
+ const EuclideanReconstruction& other) {
if (&other != this) {
image_to_cameras_map_ = other.image_to_cameras_map_;
points_ = other.points_;
@@ -41,9 +42,9 @@ EuclideanReconstruction &EuclideanReconstruction::operator=(
}
void EuclideanReconstruction::InsertCamera(int image,
- const Mat3 &R,
- const Vec3 &t) {
- LG << "InsertCamera " << image << ":\nR:\n"<< R << "\nt:\n" << t;
+ const Mat3& R,
+ const Vec3& t) {
+ LG << "InsertCamera " << image << ":\nR:\n" << R << "\nt:\n" << t;
EuclideanCamera camera;
camera.image = image;
@@ -53,7 +54,7 @@ void EuclideanReconstruction::InsertCamera(int image,
image_to_cameras_map_.insert(make_pair(image, camera));
}
-void EuclideanReconstruction::InsertPoint(int track, const Vec3 &X) {
+void EuclideanReconstruction::InsertPoint(int track, const Vec3& X) {
LG << "InsertPoint " << track << ":\n" << X;
if (track >= points_.size()) {
points_.resize(track + 1);
@@ -62,13 +63,12 @@ void EuclideanReconstruction::InsertPoint(int track, const Vec3 &X) {
points_[track].X = X;
}
-EuclideanCamera *EuclideanReconstruction::CameraForImage(int image) {
- return const_cast<EuclideanCamera *>(
- static_cast<const EuclideanReconstruction *>(
- this)->CameraForImage(image));
+EuclideanCamera* EuclideanReconstruction::CameraForImage(int image) {
+ return const_cast<EuclideanCamera*>(
+ static_cast<const EuclideanReconstruction*>(this)->CameraForImage(image));
}
-const EuclideanCamera *EuclideanReconstruction::CameraForImage(
+const EuclideanCamera* EuclideanReconstruction::CameraForImage(
int image) const {
ImageToCameraMap::const_iterator it = image_to_cameras_map_.find(image);
if (it == image_to_cameras_map_.end()) {
@@ -86,16 +86,16 @@ vector<EuclideanCamera> EuclideanReconstruction::AllCameras() const {
return cameras;
}
-EuclideanPoint *EuclideanReconstruction::PointForTrack(int track) {
- return const_cast<EuclideanPoint *>(
- static_cast<const EuclideanReconstruction *>(this)->PointForTrack(track));
+EuclideanPoint* EuclideanReconstruction::PointForTrack(int track) {
+ return const_cast<EuclideanPoint*>(
+ static_cast<const EuclideanReconstruction*>(this)->PointForTrack(track));
}
-const EuclideanPoint *EuclideanReconstruction::PointForTrack(int track) const {
+const EuclideanPoint* EuclideanReconstruction::PointForTrack(int track) const {
if (track < 0 || track >= points_.size()) {
return NULL;
}
- const EuclideanPoint *point = &points_[track];
+ const EuclideanPoint* point = &points_[track];
if (point->track == -1) {
return NULL;
}
@@ -112,8 +112,8 @@ vector<EuclideanPoint> EuclideanReconstruction::AllPoints() const {
return points;
}
-void ProjectiveReconstruction::InsertCamera(int image, const Mat34 &P) {
- LG << "InsertCamera " << image << ":\nP:\n"<< P;
+void ProjectiveReconstruction::InsertCamera(int image, const Mat34& P) {
+ LG << "InsertCamera " << image << ":\nP:\n" << P;
ProjectiveCamera camera;
camera.image = image;
@@ -122,7 +122,7 @@ void ProjectiveReconstruction::InsertCamera(int image, const Mat34 &P) {
image_to_cameras_map_.insert(make_pair(image, camera));
}
-void ProjectiveReconstruction::InsertPoint(int track, const Vec4 &X) {
+void ProjectiveReconstruction::InsertPoint(int track, const Vec4& X) {
LG << "InsertPoint " << track << ":\n" << X;
if (track >= points_.size()) {
points_.resize(track + 1);
@@ -131,17 +131,17 @@ void ProjectiveReconstruction::InsertPoint(int track, const Vec4 &X) {
points_[track].X = X;
}
-ProjectiveCamera *ProjectiveReconstruction::CameraForImage(int image) {
- return const_cast<ProjectiveCamera *>(
- static_cast<const ProjectiveReconstruction *>(
- this)->CameraForImage(image));
+ProjectiveCamera* ProjectiveReconstruction::CameraForImage(int image) {
+ return const_cast<ProjectiveCamera*>(
+ static_cast<const ProjectiveReconstruction*>(this)->CameraForImage(
+ image));
}
-const ProjectiveCamera *ProjectiveReconstruction::CameraForImage(
+const ProjectiveCamera* ProjectiveReconstruction::CameraForImage(
int image) const {
ImageToCameraMap::const_iterator it = image_to_cameras_map_.find(image);
if (it == image_to_cameras_map_.end()) {
- return NULL;
+ return NULL;
}
return &it->second;
}
@@ -155,16 +155,17 @@ vector<ProjectiveCamera> ProjectiveReconstruction::AllCameras() const {
return cameras;
}
-ProjectivePoint *ProjectiveReconstruction::PointForTrack(int track) {
- return const_cast<ProjectivePoint *>(
- static_cast<const ProjectiveReconstruction *>(this)->PointForTrack(track));
+ProjectivePoint* ProjectiveReconstruction::PointForTrack(int track) {
+ return const_cast<ProjectivePoint*>(
+ static_cast<const ProjectiveReconstruction*>(this)->PointForTrack(track));
}
-const ProjectivePoint *ProjectiveReconstruction::PointForTrack(int track) const {
+const ProjectivePoint* ProjectiveReconstruction::PointForTrack(
+ int track) const {
if (track < 0 || track >= points_.size()) {
return NULL;
}
- const ProjectivePoint *point = &points_[track];
+ const ProjectivePoint* point = &points_[track];
if (point->track == -1) {
return NULL;
}
diff --git a/intern/libmv/libmv/simple_pipeline/reconstruction.h b/intern/libmv/libmv/simple_pipeline/reconstruction.h
index 544aeac042e..56b2ba34c91 100644
--- a/intern/libmv/libmv/simple_pipeline/reconstruction.h
+++ b/intern/libmv/libmv/simple_pipeline/reconstruction.h
@@ -21,14 +21,15 @@
#ifndef LIBMV_SIMPLE_PIPELINE_RECONSTRUCTION_H_
#define LIBMV_SIMPLE_PIPELINE_RECONSTRUCTION_H_
-#include "libmv/base/vector.h"
#include "libmv/base/map.h"
+#include "libmv/base/vector.h"
#include "libmv/numeric/numeric.h"
namespace libmv {
/*!
- A EuclideanCamera is the location and rotation of the camera viewing \a image.
+ A EuclideanCamera is the location and rotation of the camera viewing \a
+ image.
\a image identify which image from \link Tracks this camera represents.
\a R is a 3x3 matrix representing the rotation of the camera.
@@ -38,7 +39,7 @@ namespace libmv {
*/
struct EuclideanCamera {
EuclideanCamera() : image(-1) {}
- EuclideanCamera(const EuclideanCamera &c) : image(c.image), R(c.R), t(c.t) {}
+ EuclideanCamera(const EuclideanCamera& c) : image(c.image), R(c.R), t(c.t) {}
int image;
Mat3 R;
@@ -55,7 +56,7 @@ struct EuclideanCamera {
*/
struct EuclideanPoint {
EuclideanPoint() : track(-1) {}
- EuclideanPoint(const EuclideanPoint &p) : track(p.track), X(p.X) {}
+ EuclideanPoint(const EuclideanPoint& p) : track(p.track), X(p.X) {}
int track;
Vec3 X;
};
@@ -78,9 +79,9 @@ class EuclideanReconstruction {
EuclideanReconstruction();
/// Copy constructor.
- EuclideanReconstruction(const EuclideanReconstruction &other);
+ EuclideanReconstruction(const EuclideanReconstruction& other);
- EuclideanReconstruction &operator=(const EuclideanReconstruction &other);
+ EuclideanReconstruction& operator=(const EuclideanReconstruction& other);
/*!
Insert a camera into the set. If there is already a camera for the given
@@ -92,7 +93,7 @@ class EuclideanReconstruction {
\note You should use the same \a image identifier as in \link Tracks.
*/
- void InsertCamera(int image, const Mat3 &R, const Vec3 &t);
+ void InsertCamera(int image, const Mat3& R, const Vec3& t);
/*!
Insert a point into the reconstruction. If there is already a point for
@@ -104,18 +105,18 @@ class EuclideanReconstruction {
\note You should use the same \a track identifier as in \link Tracks.
*/
- void InsertPoint(int track, const Vec3 &X);
+ void InsertPoint(int track, const Vec3& X);
/// Returns a pointer to the camera corresponding to \a image.
- EuclideanCamera *CameraForImage(int image);
- const EuclideanCamera *CameraForImage(int image) const;
+ EuclideanCamera* CameraForImage(int image);
+ const EuclideanCamera* CameraForImage(int image) const;
/// Returns all cameras.
vector<EuclideanCamera> AllCameras() const;
/// Returns a pointer to the point corresponding to \a track.
- EuclideanPoint *PointForTrack(int track);
- const EuclideanPoint *PointForTrack(int track) const;
+ EuclideanPoint* PointForTrack(int track);
+ const EuclideanPoint* PointForTrack(int track) const;
/// Returns all points.
vector<EuclideanPoint> AllPoints() const;
@@ -139,7 +140,7 @@ class EuclideanReconstruction {
*/
struct ProjectiveCamera {
ProjectiveCamera() : image(-1) {}
- ProjectiveCamera(const ProjectiveCamera &c) : image(c.image), P(c.P) {}
+ ProjectiveCamera(const ProjectiveCamera& c) : image(c.image), P(c.P) {}
int image;
Mat34 P;
@@ -155,7 +156,7 @@ struct ProjectiveCamera {
*/
struct ProjectivePoint {
ProjectivePoint() : track(-1) {}
- ProjectivePoint(const ProjectivePoint &p) : track(p.track), X(p.X) {}
+ ProjectivePoint(const ProjectivePoint& p) : track(p.track), X(p.X) {}
int track;
Vec4 X;
};
@@ -184,7 +185,7 @@ class ProjectiveReconstruction {
\note You should use the same \a image identifier as in \link Tracks.
*/
- void InsertCamera(int image, const Mat34 &P);
+ void InsertCamera(int image, const Mat34& P);
/*!
Insert a point into the reconstruction. If there is already a point for
@@ -196,18 +197,18 @@ class ProjectiveReconstruction {
\note You should use the same \a track identifier as in \link Tracks.
*/
- void InsertPoint(int track, const Vec4 &X);
+ void InsertPoint(int track, const Vec4& X);
/// Returns a pointer to the camera corresponding to \a image.
- ProjectiveCamera *CameraForImage(int image);
- const ProjectiveCamera *CameraForImage(int image) const;
+ ProjectiveCamera* CameraForImage(int image);
+ const ProjectiveCamera* CameraForImage(int image) const;
/// Returns all cameras.
vector<ProjectiveCamera> AllCameras() const;
/// Returns a pointer to the point corresponding to \a track.
- ProjectivePoint *PointForTrack(int track);
- const ProjectivePoint *PointForTrack(int track) const;
+ ProjectivePoint* PointForTrack(int track);
+ const ProjectivePoint* PointForTrack(int track) const;
/// Returns all points.
vector<ProjectivePoint> AllPoints() const;
diff --git a/intern/libmv/libmv/simple_pipeline/reconstruction_scale.cc b/intern/libmv/libmv/simple_pipeline/reconstruction_scale.cc
index 40ac23be7a2..04fbb536d31 100644
--- a/intern/libmv/libmv/simple_pipeline/reconstruction_scale.cc
+++ b/intern/libmv/libmv/simple_pipeline/reconstruction_scale.cc
@@ -23,7 +23,7 @@
namespace libmv {
-void EuclideanScaleToUnity(EuclideanReconstruction *reconstruction) {
+void EuclideanScaleToUnity(EuclideanReconstruction* reconstruction) {
vector<EuclideanCamera> all_cameras = reconstruction->AllCameras();
vector<EuclideanPoint> all_points = reconstruction->AllPoints();
@@ -53,14 +53,14 @@ void EuclideanScaleToUnity(EuclideanReconstruction *reconstruction) {
// Rescale cameras positions.
for (int i = 0; i < all_cameras.size(); ++i) {
int image = all_cameras[i].image;
- EuclideanCamera *camera = reconstruction->CameraForImage(image);
+ EuclideanCamera* camera = reconstruction->CameraForImage(image);
camera->t = camera->t * scale_factor;
}
// Rescale points positions.
for (int i = 0; i < all_points.size(); ++i) {
int track = all_points[i].track;
- EuclideanPoint *point = reconstruction->PointForTrack(track);
+ EuclideanPoint* point = reconstruction->PointForTrack(track);
point->X = point->X * scale_factor;
}
}
diff --git a/intern/libmv/libmv/simple_pipeline/reconstruction_scale.h b/intern/libmv/libmv/simple_pipeline/reconstruction_scale.h
index f2349ff5146..c164878ee25 100644
--- a/intern/libmv/libmv/simple_pipeline/reconstruction_scale.h
+++ b/intern/libmv/libmv/simple_pipeline/reconstruction_scale.h
@@ -29,7 +29,7 @@ namespace libmv {
Scale euclidean reconstruction in a way variance of
camera centers equals to one.
*/
-void EuclideanScaleToUnity(EuclideanReconstruction *reconstruction);
+void EuclideanScaleToUnity(EuclideanReconstruction* reconstruction);
} // namespace libmv
diff --git a/intern/libmv/libmv/simple_pipeline/resect.cc b/intern/libmv/libmv/simple_pipeline/resect.cc
index e73fc44df2a..b5736767edd 100644
--- a/intern/libmv/libmv/simple_pipeline/resect.cc
+++ b/intern/libmv/libmv/simple_pipeline/resect.cc
@@ -25,17 +25,17 @@
#include "libmv/base/vector.h"
#include "libmv/logging/logging.h"
#include "libmv/multiview/euclidean_resection.h"
-#include "libmv/multiview/resection.h"
#include "libmv/multiview/projection.h"
-#include "libmv/numeric/numeric.h"
+#include "libmv/multiview/resection.h"
#include "libmv/numeric/levenberg_marquardt.h"
+#include "libmv/numeric/numeric.h"
#include "libmv/simple_pipeline/reconstruction.h"
#include "libmv/simple_pipeline/tracks.h"
namespace libmv {
namespace {
-Mat2X PointMatrixFromMarkers(const vector<Marker> &markers) {
+Mat2X PointMatrixFromMarkers(const vector<Marker>& markers) {
Mat2X points(2, markers.size());
for (int i = 0; i < markers.size(); ++i) {
points(0, i) = markers[i].x;
@@ -53,19 +53,19 @@ Mat2X PointMatrixFromMarkers(const vector<Marker> &markers) {
// axis to rotate around and the magnitude is the amount of the rotation.
struct EuclideanResectCostFunction {
public:
- typedef Vec FMatrixType;
+ typedef Vec FMatrixType;
typedef Vec6 XMatrixType;
- EuclideanResectCostFunction(const vector<Marker> &markers,
- const EuclideanReconstruction &reconstruction,
- const Mat3 &initial_R)
- : markers(markers),
- reconstruction(reconstruction),
- initial_R(initial_R) {}
+ EuclideanResectCostFunction(const vector<Marker>& markers,
+ const EuclideanReconstruction& reconstruction,
+ const Mat3& initial_R)
+ : markers(markers),
+ reconstruction(reconstruction),
+ initial_R(initial_R) {}
// dRt has dR (delta R) encoded as a euler vector in the first 3 parameters,
// followed by t in the next 3 parameters.
- Vec operator()(const Vec6 &dRt) const {
+ Vec operator()(const Vec6& dRt) const {
// Unpack R, t from dRt.
Mat3 R = RotationFromEulerVector(dRt.head<3>()) * initial_R;
Vec3 t = dRt.tail<3>();
@@ -74,25 +74,26 @@ struct EuclideanResectCostFunction {
Vec residuals(2 * markers.size());
residuals.setZero();
for (int i = 0; i < markers.size(); ++i) {
- const EuclideanPoint &point =
+ const EuclideanPoint& point =
*reconstruction.PointForTrack(markers[i].track);
Vec3 projected = R * point.X + t;
projected /= projected(2);
- residuals[2*i + 0] = projected(0) - markers[i].x;
- residuals[2*i + 1] = projected(1) - markers[i].y;
+ residuals[2 * i + 0] = projected(0) - markers[i].x;
+ residuals[2 * i + 1] = projected(1) - markers[i].y;
}
return residuals;
}
- const vector<Marker> &markers;
- const EuclideanReconstruction &reconstruction;
- const Mat3 &initial_R;
+ const vector<Marker>& markers;
+ const EuclideanReconstruction& reconstruction;
+ const Mat3& initial_R;
};
} // namespace
-bool EuclideanResect(const vector<Marker> &markers,
- EuclideanReconstruction *reconstruction, bool final_pass) {
+bool EuclideanResect(const vector<Marker>& markers,
+ EuclideanReconstruction* reconstruction,
+ bool final_pass) {
if (markers.size() < 5) {
return false;
}
@@ -106,9 +107,9 @@ bool EuclideanResect(const vector<Marker> &markers,
Mat3 R;
Vec3 t;
- if (0 || !euclidean_resection::EuclideanResection(
- points_2d, points_3d, &R, &t,
- euclidean_resection::RESECTION_EPNP)) {
+ if (0 ||
+ !euclidean_resection::EuclideanResection(
+ points_2d, points_3d, &R, &t, euclidean_resection::RESECTION_EPNP)) {
// printf("Resection for image %d failed\n", markers[0].image);
LG << "Resection for image " << markers[0].image << " failed;"
<< " trying fallback projective resection.";
@@ -116,7 +117,8 @@ bool EuclideanResect(const vector<Marker> &markers,
LG << "No fallback; failing resection for " << markers[0].image;
return false;
- if (!final_pass) return false;
+ if (!final_pass)
+ return false;
// Euclidean resection failed. Fall back to projective resection, which is
// less reliable but better conditioned when there are many points.
Mat34 P;
@@ -173,7 +175,9 @@ bool EuclideanResect(const vector<Marker> &markers,
t = dRt.tail<3>();
LG << "Resection for image " << markers[0].image << " got:\n"
- << "R:\n" << R << "\nt:\n" << t;
+ << "R:\n"
+ << R << "\nt:\n"
+ << t;
reconstruction->InsertCamera(markers[0].image, R, t);
return true;
}
@@ -186,15 +190,14 @@ namespace {
// freedom drift.
struct ProjectiveResectCostFunction {
public:
- typedef Vec FMatrixType;
+ typedef Vec FMatrixType;
typedef Vec12 XMatrixType;
- ProjectiveResectCostFunction(const vector<Marker> &markers,
- const ProjectiveReconstruction &reconstruction)
- : markers(markers),
- reconstruction(reconstruction) {}
+ ProjectiveResectCostFunction(const vector<Marker>& markers,
+ const ProjectiveReconstruction& reconstruction)
+ : markers(markers), reconstruction(reconstruction) {}
- Vec operator()(const Vec12 &vector_P) const {
+ Vec operator()(const Vec12& vector_P) const {
// Unpack P from vector_P.
Map<const Mat34> P(vector_P.data(), 3, 4);
@@ -202,24 +205,24 @@ struct ProjectiveResectCostFunction {
Vec residuals(2 * markers.size());
residuals.setZero();
for (int i = 0; i < markers.size(); ++i) {
- const ProjectivePoint &point =
+ const ProjectivePoint& point =
*reconstruction.PointForTrack(markers[i].track);
Vec3 projected = P * point.X;
projected /= projected(2);
- residuals[2*i + 0] = projected(0) - markers[i].x;
- residuals[2*i + 1] = projected(1) - markers[i].y;
+ residuals[2 * i + 0] = projected(0) - markers[i].x;
+ residuals[2 * i + 1] = projected(1) - markers[i].y;
}
return residuals;
}
- const vector<Marker> &markers;
- const ProjectiveReconstruction &reconstruction;
+ const vector<Marker>& markers;
+ const ProjectiveReconstruction& reconstruction;
};
} // namespace
-bool ProjectiveResect(const vector<Marker> &markers,
- ProjectiveReconstruction *reconstruction) {
+bool ProjectiveResect(const vector<Marker>& markers,
+ ProjectiveReconstruction* reconstruction) {
if (markers.size() < 5) {
return false;
}
@@ -263,7 +266,8 @@ bool ProjectiveResect(const vector<Marker> &markers,
P = Map<Mat34>(vector_P.data(), 3, 4);
LG << "Resection for image " << markers[0].image << " got:\n"
- << "P:\n" << P;
+ << "P:\n"
+ << P;
reconstruction->InsertCamera(markers[0].image, P);
return true;
}
diff --git a/intern/libmv/libmv/simple_pipeline/resect.h b/intern/libmv/libmv/simple_pipeline/resect.h
index f13d2e2d425..13c3d66bd37 100644
--- a/intern/libmv/libmv/simple_pipeline/resect.h
+++ b/intern/libmv/libmv/simple_pipeline/resect.h
@@ -22,8 +22,8 @@
#define LIBMV_SIMPLE_PIPELINE_RESECT_H
#include "libmv/base/vector.h"
-#include "libmv/simple_pipeline/tracks.h"
#include "libmv/simple_pipeline/reconstruction.h"
+#include "libmv/simple_pipeline/tracks.h"
namespace libmv {
@@ -51,8 +51,9 @@ namespace libmv {
\sa EuclideanIntersect, EuclideanReconstructTwoFrames
*/
-bool EuclideanResect(const vector<Marker> &markers,
- EuclideanReconstruction *reconstruction, bool final_pass);
+bool EuclideanResect(const vector<Marker>& markers,
+ EuclideanReconstruction* reconstruction,
+ bool final_pass);
/*!
Estimate the projective pose of a camera from 2D to 3D correspondences.
@@ -78,8 +79,8 @@ bool EuclideanResect(const vector<Marker> &markers,
\sa ProjectiveIntersect, ProjectiveReconstructTwoFrames
*/
-bool ProjectiveResect(const vector<Marker> &markers,
- ProjectiveReconstruction *reconstruction);
+bool ProjectiveResect(const vector<Marker>& markers,
+ ProjectiveReconstruction* reconstruction);
} // namespace libmv
diff --git a/intern/libmv/libmv/simple_pipeline/resect_test.cc b/intern/libmv/libmv/simple_pipeline/resect_test.cc
index 811edd282d8..ecf3f9b673d 100644
--- a/intern/libmv/libmv/simple_pipeline/resect_test.cc
+++ b/intern/libmv/libmv/simple_pipeline/resect_test.cc
@@ -153,7 +153,7 @@ TEST(EuclideanResection, Points4KnownImagePointsRandomTranslationRotation) {
// not precise enough with only 4 points.
//
// TODO(jmichot): Reenable this test when there is nonlinear refinement.
-#if 0
+# if 0
R_output.setIdentity();
T_output.setZero();
@@ -163,7 +163,7 @@ TEST(EuclideanResection, Points4KnownImagePointsRandomTranslationRotation) {
EXPECT_MATRIX_NEAR(T_output, T_expected, 1e-5);
EXPECT_MATRIX_NEAR(R_output, R_expected, 1e-7);*/
-#endif
+# endif
}
// TODO(jmichot): Reduce the code duplication here with the code above.
diff --git a/intern/libmv/libmv/simple_pipeline/tracks.cc b/intern/libmv/libmv/simple_pipeline/tracks.cc
index d5d009708ba..66243807a4b 100644
--- a/intern/libmv/libmv/simple_pipeline/tracks.cc
+++ b/intern/libmv/libmv/simple_pipeline/tracks.cc
@@ -21,31 +21,31 @@
#include "libmv/simple_pipeline/tracks.h"
#include <algorithm>
-#include <vector>
#include <iterator>
+#include <vector>
#include "libmv/numeric/numeric.h"
namespace libmv {
-Tracks::Tracks(const Tracks &other) {
+Tracks::Tracks(const Tracks& other) {
markers_ = other.markers_;
}
-Tracks::Tracks(const vector<Marker> &markers) : markers_(markers) {}
+Tracks::Tracks(const vector<Marker>& markers) : markers_(markers) {
+}
void Tracks::Insert(int image, int track, double x, double y, double weight) {
// TODO(keir): Wow, this is quadratic for repeated insertions. Fix this by
// adding a smarter data structure like a set<>.
for (int i = 0; i < markers_.size(); ++i) {
- if (markers_[i].image == image &&
- markers_[i].track == track) {
+ if (markers_[i].image == image && markers_[i].track == track) {
markers_[i].x = x;
markers_[i].y = y;
return;
}
}
- Marker marker = { image, track, x, y, weight };
+ Marker marker = {image, track, x, y, weight};
markers_.push_back(marker);
}
@@ -101,15 +101,17 @@ vector<Marker> Tracks::MarkersForTracksInBothImages(int image1,
std::sort(image2_tracks.begin(), image2_tracks.end());
std::vector<int> intersection;
- std::set_intersection(image1_tracks.begin(), image1_tracks.end(),
- image2_tracks.begin(), image2_tracks.end(),
+ std::set_intersection(image1_tracks.begin(),
+ image1_tracks.end(),
+ image2_tracks.begin(),
+ image2_tracks.end(),
std::back_inserter(intersection));
vector<Marker> markers;
for (int i = 0; i < markers_.size(); ++i) {
if ((markers_[i].image == image1 || markers_[i].image == image2) &&
- std::binary_search(intersection.begin(), intersection.end(),
- markers_[i].track)) {
+ std::binary_search(
+ intersection.begin(), intersection.end(), markers_[i].track)) {
markers.push_back(markers_[i]);
}
}
@@ -122,7 +124,7 @@ Marker Tracks::MarkerInImageForTrack(int image, int track) const {
return markers_[i];
}
}
- Marker null = { -1, -1, -1, -1, 0.0 };
+ Marker null = {-1, -1, -1, -1, 0.0};
return null;
}
@@ -168,12 +170,12 @@ int Tracks::NumMarkers() const {
return markers_.size();
}
-void CoordinatesForMarkersInImage(const vector<Marker> &markers,
+void CoordinatesForMarkersInImage(const vector<Marker>& markers,
int image,
- Mat *coordinates) {
+ Mat* coordinates) {
vector<Vec2> coords;
for (int i = 0; i < markers.size(); ++i) {
- const Marker &marker = markers[i];
+ const Marker& marker = markers[i];
if (markers[i].image == image) {
coords.push_back(Vec2(marker.x, marker.y));
}
diff --git a/intern/libmv/libmv/simple_pipeline/tracks.h b/intern/libmv/libmv/simple_pipeline/tracks.h
index 752d2790a1c..c3df3a223d8 100644
--- a/intern/libmv/libmv/simple_pipeline/tracks.h
+++ b/intern/libmv/libmv/simple_pipeline/tracks.h
@@ -36,7 +36,8 @@ namespace libmv {
\a weight is used by bundle adjustment and weight means how much the
track affects on a final solution.
- \note Markers are typically aggregated with the help of the \link Tracks class.
+ \note Markers are typically aggregated with the help of the \link Tracks
+ class.
\sa Tracks
*/
@@ -56,19 +57,20 @@ struct Marker {
images, which must get created before any 3D reconstruction can take place.
The container has several fast lookups for queries typically needed for
- structure from motion algorithms, such as \link MarkersForTracksInBothImages().
+ structure from motion algorithms, such as \link
+ MarkersForTracksInBothImages().
\sa Marker
*/
class Tracks {
public:
- Tracks() { }
+ Tracks() {}
// Copy constructor for a tracks object.
- Tracks(const Tracks &other);
+ Tracks(const Tracks& other);
/// Construct a new tracks object using the given markers to start.
- explicit Tracks(const vector<Marker> &markers);
+ explicit Tracks(const vector<Marker>& markers);
/*!
Inserts a marker into the set. If there is already a marker for the given
@@ -129,9 +131,9 @@ class Tracks {
vector<Marker> markers_;
};
-void CoordinatesForMarkersInImage(const vector<Marker> &markers,
+void CoordinatesForMarkersInImage(const vector<Marker>& markers,
int image,
- Mat *coordinates);
+ Mat* coordinates);
} // namespace libmv
diff --git a/intern/libmv/libmv/threading/threading.h b/intern/libmv/libmv/threading/threading.h
index f23bf6f172c..da625e02424 100644
--- a/intern/libmv/libmv/threading/threading.h
+++ b/intern/libmv/libmv/threading/threading.h
@@ -31,7 +31,7 @@
namespace libmv {
#if COMPILER_SUPPORTS_CXX11
-using mutex = std::mutex;
+using mutex = std::mutex;
using scoped_lock = std::unique_lock<std::mutex>;
using condition_variable = std::condition_variable;
#else
diff --git a/intern/libmv/libmv/tracking/brute_region_tracker.cc b/intern/libmv/libmv/tracking/brute_region_tracker.cc
index 85aecb7f633..7007fb9440b 100644
--- a/intern/libmv/libmv/tracking/brute_region_tracker.cc
+++ b/intern/libmv/libmv/tracking/brute_region_tracker.cc
@@ -25,31 +25,30 @@
#endif
#include "libmv/base/aligned_malloc.h"
-#include "libmv/image/image.h"
#include "libmv/image/convolve.h"
#include "libmv/image/correlation.h"
+#include "libmv/image/image.h"
#include "libmv/image/sample.h"
#include "libmv/logging/logging.h"
namespace libmv {
namespace {
-bool RegionIsInBounds(const FloatImage &image1,
- double x, double y,
+bool RegionIsInBounds(const FloatImage& image1,
+ double x,
+ double y,
int half_window_size) {
// Check the minimum coordinates.
int min_x = floor(x) - half_window_size - 1;
int min_y = floor(y) - half_window_size - 1;
- if (min_x < 0.0 ||
- min_y < 0.0) {
+ if (min_x < 0.0 || min_y < 0.0) {
return false;
}
// Check the maximum coordinates.
int max_x = ceil(x) + half_window_size + 1;
int max_y = ceil(y) + half_window_size + 1;
- if (max_x > image1.cols() ||
- max_y > image1.rows()) {
+ if (max_x > image1.cols() || max_y > image1.rows()) {
return false;
}
@@ -69,14 +68,15 @@ bool RegionIsInBounds(const FloatImage &image1,
// and "b", since the SSE load instructionst will pull in memory past the end
// of the arrays if their size is not a multiple of 16.
inline static __m128i SumOfAbsoluteDifferencesContiguousSSE(
- const unsigned char *a, // aligned
- const unsigned char *b, // not aligned
+ const unsigned char* a, // aligned
+ const unsigned char* b, // not aligned
unsigned int size,
__m128i sad) {
// Do the bulk of the work as 16-way integer operations.
for (unsigned int j = 0; j < size / 16; j++) {
- sad = _mm_add_epi32(sad, _mm_sad_epu8( _mm_load_si128 ((__m128i*)(a + 16 * j)),
- _mm_loadu_si128((__m128i*)(b + 16 * j))));
+ sad = _mm_add_epi32(sad,
+ _mm_sad_epu8(_mm_load_si128((__m128i*)(a + 16 * j)),
+ _mm_loadu_si128((__m128i*)(b + 16 * j))));
}
// Handle the trailing end.
// TODO(keir): Benchmark to verify that the below SSE is a win compared to a
@@ -90,32 +90,63 @@ inline static __m128i SumOfAbsoluteDifferencesContiguousSSE(
unsigned int remainder = size % 16u;
if (remainder) {
unsigned int j = size / 16;
- __m128i a_trail = _mm_load_si128 ((__m128i*)(a + 16 * j));
+ __m128i a_trail = _mm_load_si128((__m128i*)(a + 16 * j));
__m128i b_trail = _mm_loadu_si128((__m128i*)(b + 16 * j));
__m128i mask;
switch (remainder) {
-#define X 0xff
- case 1: mask = _mm_setr_epi8(X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); break;
- case 2: mask = _mm_setr_epi8(X, X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); break;
- case 3: mask = _mm_setr_epi8(X, X, X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); break;
- case 4: mask = _mm_setr_epi8(X, X, X, X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); break;
- case 5: mask = _mm_setr_epi8(X, X, X, X, X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); break;
- case 6: mask = _mm_setr_epi8(X, X, X, X, X, X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); break;
- case 7: mask = _mm_setr_epi8(X, X, X, X, X, X, X, 0, 0, 0, 0, 0, 0, 0, 0, 0); break;
- case 8: mask = _mm_setr_epi8(X, X, X, X, X, X, X, X, 0, 0, 0, 0, 0, 0, 0, 0); break;
- case 9: mask = _mm_setr_epi8(X, X, X, X, X, X, X, X, X, 0, 0, 0, 0, 0, 0, 0); break;
- case 10: mask = _mm_setr_epi8(X, X, X, X, X, X, X, X, X, X, 0, 0, 0, 0, 0, 0); break;
- case 11: mask = _mm_setr_epi8(X, X, X, X, X, X, X, X, X, X, X, 0, 0, 0, 0, 0); break;
- case 12: mask = _mm_setr_epi8(X, X, X, X, X, X, X, X, X, X, X, X, 0, 0, 0, 0); break;
- case 13: mask = _mm_setr_epi8(X, X, X, X, X, X, X, X, X, X, X, X, X, 0, 0, 0); break;
- case 14: mask = _mm_setr_epi8(X, X, X, X, X, X, X, X, X, X, X, X, X, X, 0, 0); break;
- case 15: mask = _mm_setr_epi8(X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, 0); break;
+# define X 0xff
+ case 1:
+ mask = _mm_setr_epi8(X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ break;
+ case 2:
+ mask = _mm_setr_epi8(X, X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ break;
+ case 3:
+ mask = _mm_setr_epi8(X, X, X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ break;
+ case 4:
+ mask = _mm_setr_epi8(X, X, X, X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ break;
+ case 5:
+ mask = _mm_setr_epi8(X, X, X, X, X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ break;
+ case 6:
+ mask = _mm_setr_epi8(X, X, X, X, X, X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ break;
+ case 7:
+ mask = _mm_setr_epi8(X, X, X, X, X, X, X, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+ break;
+ case 8:
+ mask = _mm_setr_epi8(X, X, X, X, X, X, X, X, 0, 0, 0, 0, 0, 0, 0, 0);
+ break;
+ case 9:
+ mask = _mm_setr_epi8(X, X, X, X, X, X, X, X, X, 0, 0, 0, 0, 0, 0, 0);
+ break;
+ case 10:
+ mask = _mm_setr_epi8(X, X, X, X, X, X, X, X, X, X, 0, 0, 0, 0, 0, 0);
+ break;
+ case 11:
+ mask = _mm_setr_epi8(X, X, X, X, X, X, X, X, X, X, X, 0, 0, 0, 0, 0);
+ break;
+ case 12:
+ mask = _mm_setr_epi8(X, X, X, X, X, X, X, X, X, X, X, X, 0, 0, 0, 0);
+ break;
+ case 13:
+ mask = _mm_setr_epi8(X, X, X, X, X, X, X, X, X, X, X, X, X, 0, 0, 0);
+ break;
+ case 14:
+ mask = _mm_setr_epi8(X, X, X, X, X, X, X, X, X, X, X, X, X, X, 0, 0);
+ break;
+ case 15:
+ mask = _mm_setr_epi8(X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, 0);
+ break;
// To silence compiler warning.
default: mask = _mm_setzero_si128(); break;
-#undef X
+# undef X
}
- sad = _mm_add_epi32(sad, _mm_sad_epu8(_mm_and_si128(mask, a_trail),
- _mm_and_si128(mask, b_trail)));
+ sad = _mm_add_epi32(sad,
+ _mm_sad_epu8(_mm_and_si128(mask, a_trail),
+ _mm_and_si128(mask, b_trail)));
}
return sad;
}
@@ -124,13 +155,12 @@ inline static __m128i SumOfAbsoluteDifferencesContiguousSSE(
// Computes the sum of absolute differences between pattern and image. Pattern
// must be 16-byte aligned, and the stride must be a multiple of 16. The image
// does pointer does not have to be aligned.
-int SumOfAbsoluteDifferencesContiguousImage(
- const unsigned char *pattern,
- unsigned int pattern_width,
- unsigned int pattern_height,
- unsigned int pattern_stride,
- const unsigned char *image,
- unsigned int image_stride) {
+int SumOfAbsoluteDifferencesContiguousImage(const unsigned char* pattern,
+ unsigned int pattern_width,
+ unsigned int pattern_height,
+ unsigned int pattern_stride,
+ const unsigned char* image,
+ unsigned int image_stride) {
#ifdef __SSE2__
// TODO(keir): Add interleaved accumulation, where accumulation is done into
// two or more SSE registers that then get combined at the end. This reduces
@@ -145,8 +175,7 @@ int SumOfAbsoluteDifferencesContiguousImage(
sad);
}
return _mm_cvtsi128_si32(
- _mm_add_epi32(sad,
- _mm_shuffle_epi32(sad, _MM_SHUFFLE(3, 0, 1, 2))));
+ _mm_add_epi32(sad, _mm_shuffle_epi32(sad, _MM_SHUFFLE(3, 0, 1, 2))));
#else
int sad = 0;
for (int r = 0; r < pattern_height; ++r) {
@@ -161,12 +190,13 @@ int SumOfAbsoluteDifferencesContiguousImage(
// Sample a region of size width, height centered at x,y in image, converting
// from float to byte in the process. Samples from the first channel. Puts
// result into *pattern.
-void SampleRectangularPattern(const FloatImage &image,
- double x, double y,
+void SampleRectangularPattern(const FloatImage& image,
+ double x,
+ double y,
int width,
int height,
int pattern_stride,
- unsigned char *pattern) {
+ unsigned char* pattern) {
// There are two cases for width and height: even or odd. If it's odd, then
// the bounds [-width / 2, width / 2] works as expected. However, for even,
// this results in one extra access past the end. So use < instead of <= in
@@ -175,7 +205,7 @@ void SampleRectangularPattern(const FloatImage &image,
int end_height = (height / 2) + (height % 2);
for (int r = -height / 2; r < end_height; ++r) {
for (int c = -width / 2; c < end_width; ++c) {
- pattern[pattern_stride * (r + height / 2) + c + width / 2] =
+ pattern[pattern_stride * (r + height / 2) + c + width / 2] =
SampleLinear(image, y + r, x + c, 0) * 255.0;
}
}
@@ -195,30 +225,30 @@ inline int PadToAlignment(int x, int alignment) {
// is returned in *pattern_stride.
//
// NOTE: Caller must free *pattern with aligned_malloc() from above.
-void SampleSquarePattern(const FloatImage &image,
- double x, double y,
+void SampleSquarePattern(const FloatImage& image,
+ double x,
+ double y,
int half_width,
- unsigned char **pattern,
- int *pattern_stride) {
+ unsigned char** pattern,
+ int* pattern_stride) {
int width = 2 * half_width + 1;
// Allocate an aligned block with padding on the end so each row of the
// pattern starts on a 16-byte boundary.
*pattern_stride = PadToAlignment(width, 16);
int pattern_size_bytes = *pattern_stride * width;
- *pattern = static_cast<unsigned char *>(
- aligned_malloc(pattern_size_bytes, 16));
- SampleRectangularPattern(image, x, y, width, width,
- *pattern_stride,
- *pattern);
+ *pattern =
+ static_cast<unsigned char*>(aligned_malloc(pattern_size_bytes, 16));
+ SampleRectangularPattern(
+ image, x, y, width, width, *pattern_stride, *pattern);
}
// NOTE: Caller must free *image with aligned_malloc() from above.
-void FloatArrayToByteArrayWithPadding(const FloatImage &float_image,
- unsigned char **image,
- int *image_stride) {
+void FloatArrayToByteArrayWithPadding(const FloatImage& float_image,
+ unsigned char** image,
+ int* image_stride) {
// Allocate enough so that accessing 16 elements past the end is fine.
*image_stride = float_image.Width() + 16;
- *image = static_cast<unsigned char *>(
+ *image = static_cast<unsigned char*>(
aligned_malloc(*image_stride * float_image.Height(), 16));
for (int i = 0; i < float_image.Height(); ++i) {
for (int j = 0; j < float_image.Width(); ++j) {
@@ -235,10 +265,12 @@ void FloatArrayToByteArrayWithPadding(const FloatImage &float_image,
// values for every hypothesis looks like.
//
// TODO(keir): Priority queue for multiple hypothesis.
-bool BruteRegionTracker::Track(const FloatImage &image1,
- const FloatImage &image2,
- double x1, double y1,
- double *x2, double *y2) const {
+bool BruteRegionTracker::Track(const FloatImage& image1,
+ const FloatImage& image2,
+ double x1,
+ double y1,
+ double* x2,
+ double* y2) const {
if (!RegionIsInBounds(image1, x1, y1, half_window_size)) {
LG << "Fell out of image1's window with x1=" << x1 << ", y1=" << y1
<< ", hw=" << half_window_size << ".";
@@ -252,28 +284,28 @@ bool BruteRegionTracker::Track(const FloatImage &image1,
BlurredImageAndDerivativesChannels(image2, 0.9, &image_and_gradient2);
// Sample the pattern to get it aligned to an image grid.
- unsigned char *pattern;
+ unsigned char* pattern;
int pattern_stride;
- SampleSquarePattern(image_and_gradient1, x1, y1, half_window_size,
- &pattern,
- &pattern_stride);
+ SampleSquarePattern(
+ image_and_gradient1, x1, y1, half_window_size, &pattern, &pattern_stride);
// Convert the search area directly to bytes without sampling.
- unsigned char *search_area;
+ unsigned char* search_area;
int search_area_stride;
- FloatArrayToByteArrayWithPadding(image_and_gradient2, &search_area,
- &search_area_stride);
+ FloatArrayToByteArrayWithPadding(
+ image_and_gradient2, &search_area, &search_area_stride);
// Try all possible locations inside the search area. Yes, everywhere.
int best_i = -1, best_j = -1, best_sad = INT_MAX;
for (int i = 0; i < image2.Height() - pattern_width; ++i) {
for (int j = 0; j < image2.Width() - pattern_width; ++j) {
- int sad = SumOfAbsoluteDifferencesContiguousImage(pattern,
- pattern_width,
- pattern_width,
- pattern_stride,
- search_area + search_area_stride * i + j,
- search_area_stride);
+ int sad = SumOfAbsoluteDifferencesContiguousImage(
+ pattern,
+ pattern_width,
+ pattern_width,
+ pattern_stride,
+ search_area + search_area_stride * i + j,
+ search_area_stride);
if (sad < best_sad) {
best_i = i;
best_j = j;
@@ -309,16 +341,23 @@ bool BruteRegionTracker::Track(const FloatImage &image1,
}
Array3Df image_and_gradient1_sampled, image_and_gradient2_sampled;
- SamplePattern(image_and_gradient1, x1, y1, half_window_size, 3,
+ SamplePattern(image_and_gradient1,
+ x1,
+ y1,
+ half_window_size,
+ 3,
&image_and_gradient1_sampled);
- SamplePattern(image_and_gradient2, *x2, *y2, half_window_size, 3,
+ SamplePattern(image_and_gradient2,
+ *x2,
+ *y2,
+ half_window_size,
+ 3,
&image_and_gradient2_sampled);
// Compute the Pearson product-moment correlation coefficient to check
// for sanity.
double correlation = PearsonProductMomentCorrelation(
- image_and_gradient1_sampled,
- image_and_gradient2_sampled);
+ image_and_gradient1_sampled, image_and_gradient2_sampled);
LG << "Final correlation: " << correlation;
diff --git a/intern/libmv/libmv/tracking/brute_region_tracker.h b/intern/libmv/libmv/tracking/brute_region_tracker.h
index a699c42ee92..183dc6df07b 100644
--- a/intern/libmv/libmv/tracking/brute_region_tracker.h
+++ b/intern/libmv/libmv/tracking/brute_region_tracker.h
@@ -27,17 +27,17 @@
namespace libmv {
struct BruteRegionTracker : public RegionTracker {
- BruteRegionTracker()
- : half_window_size(4),
- minimum_correlation(0.78) {}
+ BruteRegionTracker() : half_window_size(4), minimum_correlation(0.78) {}
virtual ~BruteRegionTracker() {}
// Tracker interface.
- virtual bool Track(const FloatImage &image1,
- const FloatImage &image2,
- double x1, double y1,
- double *x2, double *y2) const;
+ virtual bool Track(const FloatImage& image1,
+ const FloatImage& image2,
+ double x1,
+ double y1,
+ double* x2,
+ double* y2) const;
// No point in creating getters or setters.
int half_window_size;
diff --git a/intern/libmv/libmv/tracking/hybrid_region_tracker.cc b/intern/libmv/libmv/tracking/hybrid_region_tracker.cc
index ea3b0f5bfc0..f0392643a6c 100644
--- a/intern/libmv/libmv/tracking/hybrid_region_tracker.cc
+++ b/intern/libmv/libmv/tracking/hybrid_region_tracker.cc
@@ -20,17 +20,19 @@
#include "libmv/tracking/hybrid_region_tracker.h"
-#include "libmv/image/image.h"
#include "libmv/image/convolve.h"
+#include "libmv/image/image.h"
#include "libmv/image/sample.h"
#include "libmv/logging/logging.h"
namespace libmv {
-bool HybridRegionTracker::Track(const FloatImage &image1,
- const FloatImage &image2,
- double x1, double y1,
- double *x2, double *y2) const {
+bool HybridRegionTracker::Track(const FloatImage& image1,
+ const FloatImage& image2,
+ double x1,
+ double y1,
+ double* x2,
+ double* y2) const {
double x2_coarse = *x2;
double y2_coarse = *y2;
if (!coarse_tracker_->Track(image1, image2, x1, y1, &x2_coarse, &y2_coarse)) {
diff --git a/intern/libmv/libmv/tracking/hybrid_region_tracker.h b/intern/libmv/libmv/tracking/hybrid_region_tracker.h
index 967d2afd1e5..2730d2dbbf2 100644
--- a/intern/libmv/libmv/tracking/hybrid_region_tracker.h
+++ b/intern/libmv/libmv/tracking/hybrid_region_tracker.h
@@ -21,8 +21,8 @@
#ifndef LIBMV_REGION_TRACKING_HYBRID_REGION_TRACKER_H_
#define LIBMV_REGION_TRACKING_HYBRID_REGION_TRACKER_H_
-#include "libmv/image/image.h"
#include "libmv/base/scoped_ptr.h"
+#include "libmv/image/image.h"
#include "libmv/tracking/region_tracker.h"
namespace libmv {
@@ -30,18 +30,19 @@ namespace libmv {
// TODO(keir): Documentation!
class HybridRegionTracker : public RegionTracker {
public:
- HybridRegionTracker(RegionTracker *coarse_tracker,
- RegionTracker *fine_tracker)
- : coarse_tracker_(coarse_tracker),
- fine_tracker_(fine_tracker) {}
+ HybridRegionTracker(RegionTracker* coarse_tracker,
+ RegionTracker* fine_tracker)
+ : coarse_tracker_(coarse_tracker), fine_tracker_(fine_tracker) {}
virtual ~HybridRegionTracker() {}
// Tracker interface.
- virtual bool Track(const FloatImage &image1,
- const FloatImage &image2,
- double x1, double y1,
- double *x2, double *y2) const;
+ virtual bool Track(const FloatImage& image1,
+ const FloatImage& image2,
+ double x1,
+ double y1,
+ double* x2,
+ double* y2) const;
scoped_ptr<RegionTracker> coarse_tracker_;
scoped_ptr<RegionTracker> fine_tracker_;
diff --git a/intern/libmv/libmv/tracking/kalman_filter.h b/intern/libmv/libmv/tracking/kalman_filter.h
index 9841f0e912c..b1312d0e027 100644
--- a/intern/libmv/libmv/tracking/kalman_filter.h
+++ b/intern/libmv/libmv/tracking/kalman_filter.h
@@ -19,13 +19,14 @@
// IN THE SOFTWARE.
#ifndef LIBMV_TRACKING_KALMAN_FILTER_H_
+#define LIBMV_TRACKING_KALMAN_FILTER_H_
#include "libmv/numeric/numeric.h"
namespace mv {
// A Kalman filter with order N and observation size K.
-template<typename T, int N, int K>
+template <typename T, int N, int K>
class KalmanFilter {
public:
struct State {
@@ -38,54 +39,47 @@ class KalmanFilter {
const T* observation_data,
const T* process_covariance_data,
const T* default_measurement_covariance_data)
- : state_transition_matrix_(
- Eigen::Matrix<T, N, N, Eigen::RowMajor>(state_transition_data)),
- observation_matrix_(
- Eigen::Matrix<T, K, N, Eigen::RowMajor>(observation_data)),
- process_covariance_(
- Eigen::Matrix<T, N, N, Eigen::RowMajor>(process_covariance_data)),
- default_measurement_covariance_(
- Eigen::Matrix<T, K, K, Eigen::RowMajor>(
- default_measurement_covariance_data)) {
- }
+ : state_transition_matrix_(
+ Eigen::Matrix<T, N, N, Eigen::RowMajor>(state_transition_data)),
+ observation_matrix_(
+ Eigen::Matrix<T, K, N, Eigen::RowMajor>(observation_data)),
+ process_covariance_(
+ Eigen::Matrix<T, N, N, Eigen::RowMajor>(process_covariance_data)),
+ default_measurement_covariance_(Eigen::Matrix<T, K, K, Eigen::RowMajor>(
+ default_measurement_covariance_data)) {}
- KalmanFilter(
- const Eigen::Matrix<T, N, N> &state_transition_matrix,
- const Eigen::Matrix<T, K, N> &observation_matrix,
- const Eigen::Matrix<T, N, N> &process_covariance,
- const Eigen::Matrix<T, K, K> &default_measurement_covariance)
- : state_transition_matrix_(state_transition_matrix),
- observation_matrix_(observation_matrix),
- process_covariance_(process_covariance),
- default_measurement_covariance_(default_measurement_covariance) {
- }
+ KalmanFilter(const Eigen::Matrix<T, N, N>& state_transition_matrix,
+ const Eigen::Matrix<T, K, N>& observation_matrix,
+ const Eigen::Matrix<T, N, N>& process_covariance,
+ const Eigen::Matrix<T, K, K>& default_measurement_covariance)
+ : state_transition_matrix_(state_transition_matrix),
+ observation_matrix_(observation_matrix),
+ process_covariance_(process_covariance),
+ default_measurement_covariance_(default_measurement_covariance) {}
// Advances the system according to the current state estimate.
- void Step(State *state) const {
+ void Step(State* state) const {
state->mean = state_transition_matrix_ * state->mean;
- state->covariance = state_transition_matrix_ *
- state->covariance *
- state_transition_matrix_.transpose() +
+ state->covariance = state_transition_matrix_ * state->covariance *
+ state_transition_matrix_.transpose() +
process_covariance_;
}
// Updates a state with a new measurement.
- void Update(const Eigen::Matrix<T, K, 1> &measurement_mean,
- const Eigen::Matrix<T, K, K> &measurement_covariance,
- State *state) const {
+ void Update(const Eigen::Matrix<T, K, 1>& measurement_mean,
+ const Eigen::Matrix<T, K, K>& measurement_covariance,
+ State* state) const {
// Calculate the innovation, which is a distribution over prediction error.
- Eigen::Matrix<T, K, 1> innovation_mean = measurement_mean -
- observation_matrix_ *
- state->mean;
+ Eigen::Matrix<T, K, 1> innovation_mean =
+ measurement_mean - observation_matrix_ * state->mean;
Eigen::Matrix<T, K, K> innovation_covariance =
- observation_matrix_ *
- state->covariance *
- observation_matrix_.transpose() +
+ observation_matrix_ * state->covariance *
+ observation_matrix_.transpose() +
measurement_covariance;
// Calculate the Kalman gain.
Eigen::Matrix<T, 6, 2> kalman_gain = state->covariance *
- observation_matrix_.transpose() *
+ observation_matrix_.transpose() *
innovation_covariance.inverse();
// Update the state mean and covariance.
@@ -95,8 +89,8 @@ class KalmanFilter {
state->covariance;
}
- void Update(State *state,
- const Eigen::Matrix<T, K, 1> &measurement_mean) const {
+ void Update(State* state,
+ const Eigen::Matrix<T, K, 1>& measurement_mean) const {
Update(state, measurement_mean, default_measurement_covariance_);
}
diff --git a/intern/libmv/libmv/tracking/klt_region_tracker.cc b/intern/libmv/libmv/tracking/klt_region_tracker.cc
index dbbf9f0b996..df1ded65489 100644
--- a/intern/libmv/libmv/tracking/klt_region_tracker.cc
+++ b/intern/libmv/libmv/tracking/klt_region_tracker.cc
@@ -20,10 +20,10 @@
#include "libmv/tracking/klt_region_tracker.h"
-#include "libmv/logging/logging.h"
-#include "libmv/image/image.h"
#include "libmv/image/convolve.h"
+#include "libmv/image/image.h"
#include "libmv/image/sample.h"
+#include "libmv/logging/logging.h"
namespace libmv {
@@ -33,16 +33,18 @@ namespace libmv {
// TODO(keir): The calls to SampleLinear() do boundary checking that should
// instead happen outside the loop. Since this is the innermost loop, the extra
// bounds checking hurts performance.
-static void ComputeTrackingEquation(const Array3Df &image_and_gradient1,
- const Array3Df &image_and_gradient2,
- double x1, double y1,
- double x2, double y2,
+static void ComputeTrackingEquation(const Array3Df& image_and_gradient1,
+ const Array3Df& image_and_gradient2,
+ double x1,
+ double y1,
+ double x2,
+ double y2,
int half_width,
- float *gxx,
- float *gxy,
- float *gyy,
- float *ex,
- float *ey) {
+ float* gxx,
+ float* gxy,
+ float* gyy,
+ float* ex,
+ float* ey) {
*gxx = *gxy = *gyy = 0;
*ex = *ey = 0;
for (int r = -half_width; r <= half_width; ++r) {
@@ -51,8 +53,8 @@ static void ComputeTrackingEquation(const Array3Df &image_and_gradient1,
float yy1 = y1 + r;
float xx2 = x2 + c;
float yy2 = y2 + r;
- float I = SampleLinear(image_and_gradient1, yy1, xx1, 0);
- float J = SampleLinear(image_and_gradient2, yy2, xx2, 0);
+ float I = SampleLinear(image_and_gradient1, yy1, xx1, 0);
+ float J = SampleLinear(image_and_gradient2, yy2, xx2, 0);
float gx = SampleLinear(image_and_gradient2, yy2, xx2, 1);
float gy = SampleLinear(image_and_gradient2, yy2, xx2, 2);
*gxx += gx * gx;
@@ -64,22 +66,21 @@ static void ComputeTrackingEquation(const Array3Df &image_and_gradient1,
}
}
-static bool RegionIsInBounds(const FloatImage &image1,
- double x, double y,
- int half_window_size) {
+static bool RegionIsInBounds(const FloatImage& image1,
+ double x,
+ double y,
+ int half_window_size) {
// Check the minimum coordinates.
int min_x = floor(x) - half_window_size - 1;
int min_y = floor(y) - half_window_size - 1;
- if (min_x < 0.0 ||
- min_y < 0.0) {
+ if (min_x < 0.0 || min_y < 0.0) {
return false;
}
// Check the maximum coordinates.
int max_x = ceil(x) + half_window_size + 1;
int max_y = ceil(y) + half_window_size + 1;
- if (max_x > image1.cols() ||
- max_y > image1.rows()) {
+ if (max_x > image1.cols() || max_y > image1.rows()) {
return false;
}
@@ -87,10 +88,12 @@ static bool RegionIsInBounds(const FloatImage &image1,
return true;
}
-bool KltRegionTracker::Track(const FloatImage &image1,
- const FloatImage &image2,
- double x1, double y1,
- double *x2, double *y2) const {
+bool KltRegionTracker::Track(const FloatImage& image1,
+ const FloatImage& image2,
+ double x1,
+ double y1,
+ double* x2,
+ double* y2) const {
if (!RegionIsInBounds(image1, x1, y1, half_window_size)) {
LG << "Fell out of image1's window with x1=" << x1 << ", y1=" << y1
<< ", hw=" << half_window_size << ".";
@@ -116,10 +119,16 @@ bool KltRegionTracker::Track(const FloatImage &image1,
float gxx, gxy, gyy, ex, ey;
ComputeTrackingEquation(image_and_gradient1,
image_and_gradient2,
- x1, y1,
- *x2, *y2,
+ x1,
+ y1,
+ *x2,
+ *y2,
half_window_size,
- &gxx, &gxy, &gyy, &ex, &ey);
+ &gxx,
+ &gxy,
+ &gyy,
+ &ex,
+ &ey);
// Solve the tracking equation
//
diff --git a/intern/libmv/libmv/tracking/klt_region_tracker.h b/intern/libmv/libmv/tracking/klt_region_tracker.h
index 43977757084..07ed1b7155c 100644
--- a/intern/libmv/libmv/tracking/klt_region_tracker.h
+++ b/intern/libmv/libmv/tracking/klt_region_tracker.h
@@ -37,10 +37,12 @@ struct KltRegionTracker : public RegionTracker {
virtual ~KltRegionTracker() {}
// Tracker interface.
- virtual bool Track(const FloatImage &image1,
- const FloatImage &image2,
- double x1, double y1,
- double *x2, double *y2) const;
+ virtual bool Track(const FloatImage& image1,
+ const FloatImage& image2,
+ double x1,
+ double y1,
+ double* x2,
+ double* y2) const;
// No point in creating getters or setters.
int half_window_size;
diff --git a/intern/libmv/libmv/tracking/pyramid_region_tracker.cc b/intern/libmv/libmv/tracking/pyramid_region_tracker.cc
index 4db501050f3..52764a535e0 100644
--- a/intern/libmv/libmv/tracking/pyramid_region_tracker.cc
+++ b/intern/libmv/libmv/tracking/pyramid_region_tracker.cc
@@ -29,8 +29,9 @@
namespace libmv {
-static void MakePyramid(const FloatImage &image, int num_levels,
- std::vector<FloatImage> *pyramid) {
+static void MakePyramid(const FloatImage& image,
+ int num_levels,
+ std::vector<FloatImage>* pyramid) {
pyramid->resize(num_levels);
(*pyramid)[0] = image;
for (int i = 1; i < num_levels; ++i) {
@@ -38,10 +39,12 @@ static void MakePyramid(const FloatImage &image, int num_levels,
}
}
-bool PyramidRegionTracker::Track(const FloatImage &image1,
- const FloatImage &image2,
- double x1, double y1,
- double *x2, double *y2) const {
+bool PyramidRegionTracker::Track(const FloatImage& image1,
+ const FloatImage& image2,
+ double x1,
+ double y1,
+ double* x2,
+ double* y2) const {
// Shrink the guessed x and y location to match the coarsest level + 1 (which
// when gets corrected in the loop).
*x2 /= pow(2., num_levels_);
@@ -71,8 +74,8 @@ bool PyramidRegionTracker::Track(const FloatImage &image1,
// Track the point on this level with the base tracker.
LG << "Tracking on level " << i;
- bool succeeded = tracker_->Track(pyramid1[i], pyramid2[i], xx, yy,
- &x2_new, &y2_new);
+ bool succeeded =
+ tracker_->Track(pyramid1[i], pyramid2[i], xx, yy, &x2_new, &y2_new);
if (!succeeded) {
if (i == 0) {
diff --git a/intern/libmv/libmv/tracking/pyramid_region_tracker.h b/intern/libmv/libmv/tracking/pyramid_region_tracker.h
index 1f9675469f4..5fe21c95904 100644
--- a/intern/libmv/libmv/tracking/pyramid_region_tracker.h
+++ b/intern/libmv/libmv/tracking/pyramid_region_tracker.h
@@ -21,21 +21,24 @@
#ifndef LIBMV_CORRESPONDENCE_PYRAMID_TRACKER_H_
#define LIBMV_CORRESPONDENCE_PYRAMID_TRACKER_H_
-#include "libmv/image/image.h"
#include "libmv/base/scoped_ptr.h"
+#include "libmv/image/image.h"
#include "libmv/tracking/region_tracker.h"
namespace libmv {
class PyramidRegionTracker : public RegionTracker {
public:
- PyramidRegionTracker(RegionTracker *tracker, int num_levels)
+ PyramidRegionTracker(RegionTracker* tracker, int num_levels)
: tracker_(tracker), num_levels_(num_levels) {}
- virtual bool Track(const FloatImage &image1,
- const FloatImage &image2,
- double x1, double y1,
- double *x2, double *y2) const;
+ virtual bool Track(const FloatImage& image1,
+ const FloatImage& image2,
+ double x1,
+ double y1,
+ double* x2,
+ double* y2) const;
+
private:
scoped_ptr<RegionTracker> tracker_;
int num_levels_;
diff --git a/intern/libmv/libmv/tracking/pyramid_region_tracker_test.cc b/intern/libmv/libmv/tracking/pyramid_region_tracker_test.cc
index d90a1012237..2fcf292e404 100644
--- a/intern/libmv/libmv/tracking/pyramid_region_tracker_test.cc
+++ b/intern/libmv/libmv/tracking/pyramid_region_tracker_test.cc
@@ -19,8 +19,8 @@
// IN THE SOFTWARE.
#include "libmv/tracking/pyramid_region_tracker.h"
-#include "libmv/tracking/klt_region_tracker.h"
#include "libmv/image/image.h"
+#include "libmv/tracking/klt_region_tracker.h"
#include "testing/testing.h"
namespace libmv {
@@ -55,8 +55,7 @@ TEST(PyramidKltRegionTracker, Track) {
KltRegionTracker tracker;
tracker.half_window_size = half_window_size;
- EXPECT_FALSE(tracker.Track(image1, image2, x1, y1,
- &x2_actual, &y2_actual));
+ EXPECT_FALSE(tracker.Track(image1, image2, x1, y1, &x2_actual, &y2_actual));
}
// Verify that it works with the pyramid tracker.
@@ -64,12 +63,11 @@ TEST(PyramidKltRegionTracker, Track) {
double x2_actual = x1;
double y2_actual = y1;
- KltRegionTracker *klt_tracker = new KltRegionTracker;
+ KltRegionTracker* klt_tracker = new KltRegionTracker;
klt_tracker->half_window_size = half_window_size;
PyramidRegionTracker tracker(klt_tracker, 3);
- EXPECT_TRUE(tracker.Track(image1, image2, x1, y1,
- &x2_actual, &y2_actual));
+ EXPECT_TRUE(tracker.Track(image1, image2, x1, y1, &x2_actual, &y2_actual));
EXPECT_NEAR(x2_actual, x2, 0.001);
EXPECT_NEAR(y2_actual, y2, 0.001);
diff --git a/intern/libmv/libmv/tracking/region_tracker.h b/intern/libmv/libmv/tracking/region_tracker.h
index 4f7574df1a3..e753ac8be6c 100644
--- a/intern/libmv/libmv/tracking/region_tracker.h
+++ b/intern/libmv/libmv/tracking/region_tracker.h
@@ -37,10 +37,12 @@ class RegionTracker {
image2. If no guess is available, (\a x1, \a y1) is a good start. Returns
true on success, false otherwise
*/
- virtual bool Track(const FloatImage &image1,
- const FloatImage &image2,
- double x1, double y1,
- double *x2, double *y2) const = 0;
+ virtual bool Track(const FloatImage& image1,
+ const FloatImage& image2,
+ double x1,
+ double y1,
+ double* x2,
+ double* y2) const = 0;
};
} // namespace libmv
diff --git a/intern/libmv/libmv/tracking/retrack_region_tracker.cc b/intern/libmv/libmv/tracking/retrack_region_tracker.cc
index 4d230086d28..9152078053c 100644
--- a/intern/libmv/libmv/tracking/retrack_region_tracker.cc
+++ b/intern/libmv/libmv/tracking/retrack_region_tracker.cc
@@ -25,10 +25,12 @@
namespace libmv {
-bool RetrackRegionTracker::Track(const FloatImage &image1,
- const FloatImage &image2,
- double x1, double y1,
- double *x2, double *y2) const {
+bool RetrackRegionTracker::Track(const FloatImage& image1,
+ const FloatImage& image2,
+ double x1,
+ double y1,
+ double* x2,
+ double* y2) const {
// Track forward, getting x2 and y2.
if (!tracker_->Track(image1, image2, x1, y1, x2, y2)) {
return false;
diff --git a/intern/libmv/libmv/tracking/retrack_region_tracker.h b/intern/libmv/libmv/tracking/retrack_region_tracker.h
index ab05f320834..504cf697349 100644
--- a/intern/libmv/libmv/tracking/retrack_region_tracker.h
+++ b/intern/libmv/libmv/tracking/retrack_region_tracker.h
@@ -21,8 +21,8 @@
#ifndef LIBMV_TRACKING_RETRACK_REGION_TRACKER_H_
#define LIBMV_TRACKING_RETRACK_REGION_TRACKER_H_
-#include "libmv/image/image.h"
#include "libmv/base/scoped_ptr.h"
+#include "libmv/image/image.h"
#include "libmv/tracking/region_tracker.h"
namespace libmv {
@@ -31,13 +31,16 @@ namespace libmv {
// track that doesn't track backwards to the starting point.
class RetrackRegionTracker : public RegionTracker {
public:
- RetrackRegionTracker(RegionTracker *tracker, double tolerance)
+ RetrackRegionTracker(RegionTracker* tracker, double tolerance)
: tracker_(tracker), tolerance_(tolerance) {}
- virtual bool Track(const FloatImage &image1,
- const FloatImage &image2,
- double x1, double y1,
- double *x2, double *y2) const;
+ virtual bool Track(const FloatImage& image1,
+ const FloatImage& image2,
+ double x1,
+ double y1,
+ double* x2,
+ double* y2) const;
+
private:
scoped_ptr<RegionTracker> tracker_;
double tolerance_;
diff --git a/intern/libmv/libmv/tracking/track_region.cc b/intern/libmv/libmv/tracking/track_region.cc
index 895c9a1e23d..403b4088174 100644
--- a/intern/libmv/libmv/tracking/track_region.cc
+++ b/intern/libmv/libmv/tracking/track_region.cc
@@ -27,14 +27,14 @@
#include "libmv/tracking/track_region.h"
-#include <Eigen/SVD>
#include <Eigen/QR>
+#include <Eigen/SVD>
#include <iostream>
#include "ceres/ceres.h"
-#include "libmv/logging/logging.h"
+#include "libmv/image/convolve.h"
#include "libmv/image/image.h"
#include "libmv/image/sample.h"
-#include "libmv/image/convolve.h"
+#include "libmv/logging/logging.h"
#include "libmv/multiview/homography.h"
#include "libmv/numeric/numeric.h"
@@ -44,57 +44,45 @@
namespace ceres {
// A jet traits class to make it easier to work with mixed auto / numeric diff.
-template<typename T>
+template <typename T>
struct JetOps {
- static bool IsScalar() {
- return true;
- }
- static T GetScalar(const T& t) {
- return t;
- }
- static void SetScalar(const T& scalar, T* t) {
- *t = scalar;
- }
- static void ScaleDerivative(double scale_by, T *value) {
+ static bool IsScalar() { return true; }
+ static T GetScalar(const T& t) { return t; }
+ static void SetScalar(const T& scalar, T* t) { *t = scalar; }
+ static void ScaleDerivative(double scale_by, T* value) {
// For double, there is no derivative to scale.
- (void) scale_by; // Ignored.
- (void) value; // Ignored.
+ (void)scale_by; // Ignored.
+ (void)value; // Ignored.
}
};
-template<typename T, int N>
-struct JetOps<Jet<T, N> > {
- static bool IsScalar() {
- return false;
- }
- static T GetScalar(const Jet<T, N>& t) {
- return t.a;
- }
- static void SetScalar(const T& scalar, Jet<T, N>* t) {
- t->a = scalar;
- }
- static void ScaleDerivative(double scale_by, Jet<T, N> *value) {
+template <typename T, int N>
+struct JetOps<Jet<T, N>> {
+ static bool IsScalar() { return false; }
+ static T GetScalar(const Jet<T, N>& t) { return t.a; }
+ static void SetScalar(const T& scalar, Jet<T, N>* t) { t->a = scalar; }
+ static void ScaleDerivative(double scale_by, Jet<T, N>* value) {
value->v *= scale_by;
}
};
-template<typename FunctionType, int kNumArgs, typename ArgumentType>
+template <typename FunctionType, int kNumArgs, typename ArgumentType>
struct Chain {
- static ArgumentType Rule(const FunctionType &f,
+ static ArgumentType Rule(const FunctionType& f,
const FunctionType dfdx[kNumArgs],
const ArgumentType x[kNumArgs]) {
// In the default case of scalars, there's nothing to do since there are no
// derivatives to propagate.
- (void) dfdx; // Ignored.
- (void) x; // Ignored.
+ (void)dfdx; // Ignored.
+ (void)x; // Ignored.
return f;
}
};
// XXX Add documentation here!
-template<typename FunctionType, int kNumArgs, typename T, int N>
-struct Chain<FunctionType, kNumArgs, Jet<T, N> > {
- static Jet<T, N> Rule(const FunctionType &f,
+template <typename FunctionType, int kNumArgs, typename T, int N>
+struct Chain<FunctionType, kNumArgs, Jet<T, N>> {
+ static Jet<T, N> Rule(const FunctionType& f,
const FunctionType dfdx[kNumArgs],
const Jet<T, N> x[kNumArgs]) {
// x is itself a function of another variable ("z"); what this function
@@ -107,8 +95,8 @@ struct Chain<FunctionType, kNumArgs, Jet<T, N> > {
}
// Map the input gradient dfdx into an Eigen row vector.
- Eigen::Map<const Eigen::Matrix<FunctionType, 1, kNumArgs> >
- vector_dfdx(dfdx, 1, kNumArgs);
+ Eigen::Map<const Eigen::Matrix<FunctionType, 1, kNumArgs>> vector_dfdx(
+ dfdx, 1, kNumArgs);
// Now apply the chain rule to obtain df/dz. Combine the derivative with
// the scalar part to obtain f with full derivative information.
@@ -123,9 +111,9 @@ struct Chain<FunctionType, kNumArgs, Jet<T, N> > {
namespace libmv {
+using ceres::Chain;
using ceres::Jet;
using ceres::JetOps;
-using ceres::Chain;
TrackRegionOptions::TrackRegionOptions()
: mode(TRANSLATION),
@@ -144,17 +132,12 @@ TrackRegionOptions::TrackRegionOptions()
namespace {
// TODO(keir): Consider adding padding.
-template<typename T>
-bool InBounds(const FloatImage &image,
- const T &x,
- const T &y) {
- return 0.0 <= x && x < image.Width() &&
- 0.0 <= y && y < image.Height();
+template <typename T>
+bool InBounds(const FloatImage& image, const T& x, const T& y) {
+ return 0.0 <= x && x < image.Width() && 0.0 <= y && y < image.Height();
}
-bool AllInBounds(const FloatImage &image,
- const double *x,
- const double *y) {
+bool AllInBounds(const FloatImage& image, const double* x, const double* y) {
for (int i = 0; i < 4; ++i) {
if (!InBounds(image, x[i], y[i])) {
return false;
@@ -166,10 +149,10 @@ bool AllInBounds(const FloatImage &image,
// Sample the image at position (x, y) but use the gradient, if present, to
// propagate derivatives from x and y. This is needed to integrate the numeric
// image gradients with Ceres's autodiff framework.
-template<typename T>
-static T SampleWithDerivative(const FloatImage &image_and_gradient,
- const T &x,
- const T &y) {
+template <typename T>
+static T SampleWithDerivative(const FloatImage& image_and_gradient,
+ const T& x,
+ const T& y) {
float scalar_x = JetOps<T>::GetScalar(x);
float scalar_y = JetOps<T>::GetScalar(y);
@@ -184,18 +167,23 @@ static T SampleWithDerivative(const FloatImage &image_and_gradient,
// For the derivative case, sample the gradient as well.
SampleLinear(image_and_gradient, scalar_y, scalar_x, sample);
}
- T xy[2] = { x, y };
+ T xy[2] = {x, y};
return Chain<float, 2, T>::Rule(sample[0], sample + 1, xy);
}
-template<typename Warp>
+template <typename Warp>
class TerminationCheckingCallback : public ceres::IterationCallback {
public:
- TerminationCheckingCallback(const TrackRegionOptions &options,
+ TerminationCheckingCallback(const TrackRegionOptions& options,
const FloatImage& image2,
- const Warp &warp,
- const double *x1, const double *y1)
- : options_(options), image2_(image2), warp_(warp), x1_(x1), y1_(y1),
+ const Warp& warp,
+ const double* x1,
+ const double* y1)
+ : options_(options),
+ image2_(image2),
+ warp_(warp),
+ x1_(x1),
+ y1_(y1),
have_last_successful_step_(false) {}
virtual ceres::CallbackReturnType operator()(
@@ -229,7 +217,7 @@ class TerminationCheckingCallback : public ceres::IterationCallback {
for (int i = 0; i < 4; ++i) {
double dx = x2[i] - x2_last_successful_[i];
double dy = y2[i] - y2_last_successful_[i];
- double change_pixels = dx*dx + dy*dy;
+ double change_pixels = dx * dx + dy * dy;
if (change_pixels > max_change_pixels) {
max_change_pixels = change_pixels;
}
@@ -255,27 +243,27 @@ class TerminationCheckingCallback : public ceres::IterationCallback {
}
private:
- const TrackRegionOptions &options_;
- const FloatImage &image2_;
- const Warp &warp_;
- const double *x1_;
- const double *y1_;
+ const TrackRegionOptions& options_;
+ const FloatImage& image2_;
+ const Warp& warp_;
+ const double* x1_;
+ const double* y1_;
bool have_last_successful_step_;
double x2_last_successful_[4];
double y2_last_successful_[4];
};
-template<typename Warp>
+template <typename Warp>
class PixelDifferenceCostFunctor {
public:
- PixelDifferenceCostFunctor(const TrackRegionOptions &options,
- const FloatImage &image_and_gradient1,
- const FloatImage &image_and_gradient2,
- const Mat3 &canonical_to_image1,
+ PixelDifferenceCostFunctor(const TrackRegionOptions& options,
+ const FloatImage& image_and_gradient1,
+ const FloatImage& image_and_gradient2,
+ const Mat3& canonical_to_image1,
int num_samples_x,
int num_samples_y,
- const Warp &warp)
+ const Warp& warp)
: options_(options),
image_and_gradient1_(image_and_gradient1),
image_and_gradient2_(image_and_gradient2),
@@ -322,8 +310,8 @@ class PixelDifferenceCostFunctor {
src_mean_ /= num_samples;
}
- template<typename T>
- bool operator()(const T *warp_parameters, T *residuals) const {
+ template <typename T>
+ bool operator()(const T* warp_parameters, T* residuals) const {
if (options_.image1_mask != NULL) {
VLOG(2) << "Using a mask.";
}
@@ -333,8 +321,7 @@ class PixelDifferenceCostFunctor {
T dst_mean = T(1.0);
if (options_.use_normalized_intensities) {
- ComputeNormalizingCoefficient(warp_parameters,
- &dst_mean);
+ ComputeNormalizingCoefficient(warp_parameters, &dst_mean);
}
int cursor = 0;
@@ -374,9 +361,8 @@ class PixelDifferenceCostFunctor {
&image2_position[1]);
// Sample the destination, propagating derivatives.
- T dst_sample = SampleWithDerivative(image_and_gradient2_,
- image2_position[0],
- image2_position[1]);
+ T dst_sample = SampleWithDerivative(
+ image_and_gradient2_, image2_position[0], image2_position[1]);
// Sample the source. This is made complicated by ESM mode.
T src_sample;
@@ -386,8 +372,8 @@ class PixelDifferenceCostFunctor {
// better convergence. Copy the derivative of the warp parameters
// onto the jets for the image1 position. This is the ESM hack.
T image1_position_jet[2] = {
- image2_position[0], // Order is x, y. This matches the
- image2_position[1] // derivative order in the patch.
+ image2_position[0], // Order is x, y. This matches the
+ image2_position[1] // derivative order in the patch.
};
JetOps<T>::SetScalar(image1_position[0], image1_position_jet + 0);
JetOps<T>::SetScalar(image1_position[1], image1_position_jet + 1);
@@ -433,9 +419,9 @@ class PixelDifferenceCostFunctor {
}
// For normalized matching, the average and
- template<typename T>
- void ComputeNormalizingCoefficient(const T *warp_parameters,
- T *dst_mean) const {
+ template <typename T>
+ void ComputeNormalizingCoefficient(const T* warp_parameters,
+ T* dst_mean) const {
*dst_mean = T(0.0);
double num_samples = 0.0;
for (int r = 0; r < num_samples_y_; ++r) {
@@ -462,14 +448,12 @@ class PixelDifferenceCostFunctor {
&image2_position[0],
&image2_position[1]);
-
// Sample the destination, propagating derivatives.
// TODO(keir): This accumulation can, surprisingly, be done as a
// pre-pass by using integral images. This is complicated by the need
// to store the jets in the integral image, but it is possible.
- T dst_sample = SampleWithDerivative(image_and_gradient2_,
- image2_position[0],
- image2_position[1]);
+ T dst_sample = SampleWithDerivative(
+ image_and_gradient2_, image2_position[0], image2_position[1]);
// Weight the sample by the mask, if one is present.
if (options_.image1_mask != NULL) {
@@ -486,10 +470,10 @@ class PixelDifferenceCostFunctor {
// TODO(keir): Consider also computing the cost here.
double PearsonProductMomentCorrelationCoefficient(
- const double *warp_parameters) const {
+ const double* warp_parameters) const {
for (int i = 0; i < Warp::NUM_PARAMETERS; ++i) {
- VLOG(2) << "Correlation warp_parameters[" << i << "]: "
- << warp_parameters[i];
+ VLOG(2) << "Correlation warp_parameters[" << i
+ << "]: " << warp_parameters[i];
}
// The single-pass PMCC computation is somewhat numerically unstable, but
@@ -537,9 +521,9 @@ class PixelDifferenceCostFunctor {
}
sX += x;
sY += y;
- sXX += x*x;
- sYY += y*y;
- sXY += x*y;
+ sXX += x * x;
+ sYY += y * y;
+ sXY += x * y;
}
}
// Normalize.
@@ -549,25 +533,24 @@ class PixelDifferenceCostFunctor {
sYY /= num_samples;
sXY /= num_samples;
- double var_x = sXX - sX*sX;
- double var_y = sYY - sY*sY;
- double covariance_xy = sXY - sX*sY;
+ double var_x = sXX - sX * sX;
+ double var_y = sYY - sY * sY;
+ double covariance_xy = sXY - sX * sY;
double correlation = covariance_xy / sqrt(var_x * var_y);
- LG << "Covariance xy: " << covariance_xy
- << ", var 1: " << var_x << ", var 2: " << var_y
- << ", correlation: " << correlation;
+ LG << "Covariance xy: " << covariance_xy << ", var 1: " << var_x
+ << ", var 2: " << var_y << ", correlation: " << correlation;
return correlation;
}
private:
- const TrackRegionOptions &options_;
- const FloatImage &image_and_gradient1_;
- const FloatImage &image_and_gradient2_;
- const Mat3 &canonical_to_image1_;
+ const TrackRegionOptions& options_;
+ const FloatImage& image_and_gradient1_;
+ const FloatImage& image_and_gradient2_;
+ const Mat3& canonical_to_image1_;
int num_samples_x_;
int num_samples_y_;
- const Warp &warp_;
+ const Warp& warp_;
double src_mean_;
FloatImage pattern_and_gradient_;
@@ -579,15 +562,15 @@ class PixelDifferenceCostFunctor {
FloatImage pattern_mask_;
};
-template<typename Warp>
+template <typename Warp>
class WarpRegularizingCostFunctor {
public:
- WarpRegularizingCostFunctor(const TrackRegionOptions &options,
- const double *x1,
- const double *y1,
- const double *x2_original,
- const double *y2_original,
- const Warp &warp)
+ WarpRegularizingCostFunctor(const TrackRegionOptions& options,
+ const double* x1,
+ const double* y1,
+ const double* x2_original,
+ const double* y2_original,
+ const Warp& warp)
: options_(options),
x1_(x1),
y1_(y1),
@@ -606,11 +589,11 @@ class WarpRegularizingCostFunctor {
original_centroid_[1] /= 4;
}
- template<typename T>
- bool operator()(const T *warp_parameters, T *residuals) const {
- T dst_centroid[2] = { T(0.0), T(0.0) };
+ template <typename T>
+ bool operator()(const T* warp_parameters, T* residuals) const {
+ T dst_centroid[2] = {T(0.0), T(0.0)};
for (int i = 0; i < 4; ++i) {
- T image1_position[2] = { T(x1_[i]), T(y1_[i]) };
+ T image1_position[2] = {T(x1_[i]), T(y1_[i])};
T image2_position[2];
warp_.Forward(warp_parameters,
T(x1_[i]),
@@ -643,28 +626,30 @@ class WarpRegularizingCostFunctor {
return true;
}
- const TrackRegionOptions &options_;
- const double *x1_;
- const double *y1_;
- const double *x2_original_;
- const double *y2_original_;
+ const TrackRegionOptions& options_;
+ const double* x1_;
+ const double* y1_;
+ const double* x2_original_;
+ const double* y2_original_;
double original_centroid_[2];
- const Warp &warp_;
+ const Warp& warp_;
};
// Compute the warp from rectangular coordinates, where one corner is the
// origin, and the opposite corner is at (num_samples_x, num_samples_y).
-Mat3 ComputeCanonicalHomography(const double *x1,
- const double *y1,
+Mat3 ComputeCanonicalHomography(const double* x1,
+ const double* y1,
int num_samples_x,
int num_samples_y) {
Mat canonical(2, 4);
- canonical << 0, num_samples_x, num_samples_x, 0,
- 0, 0, num_samples_y, num_samples_y;
+ canonical << 0, num_samples_x, num_samples_x, 0, 0, 0, num_samples_y,
+ num_samples_y;
Mat xy1(2, 4);
+ // clang-format off
xy1 << x1[0], x1[1], x1[2], x1[3],
y1[0], y1[1], y1[2], y1[3];
+ // clang-format om
Mat3 H;
if (!Homography2DFromCorrespondencesLinear(canonical, xy1, &H, 1e-12)) {
@@ -675,7 +660,7 @@ Mat3 ComputeCanonicalHomography(const double *x1,
class Quad {
public:
- Quad(const double *x, const double *y) : x_(x), y_(y) {
+ Quad(const double* x, const double* y) : x_(x), y_(y) {
// Compute the centroid and store it.
centroid_ = Vec2(0.0, 0.0);
for (int i = 0; i < 4; ++i) {
@@ -685,9 +670,7 @@ class Quad {
}
// The centroid of the four points representing the quad.
- const Vec2& Centroid() const {
- return centroid_;
- }
+ const Vec2& Centroid() const { return centroid_; }
// The average magnitude of the four points relative to the centroid.
double Scale() const {
@@ -703,22 +686,24 @@ class Quad {
}
private:
- const double *x_;
- const double *y_;
+ const double* x_;
+ const double* y_;
Vec2 centroid_;
};
struct TranslationWarp {
- TranslationWarp(const double *x1, const double *y1,
- const double *x2, const double *y2) {
+ TranslationWarp(const double* x1,
+ const double* y1,
+ const double* x2,
+ const double* y2) {
Vec2 t = Quad(x2, y2).Centroid() - Quad(x1, y1).Centroid();
parameters[0] = t[0];
parameters[1] = t[1];
}
- template<typename T>
- void Forward(const T *warp_parameters,
- const T &x1, const T& y1, T *x2, T* y2) const {
+ template <typename T>
+ void Forward(
+ const T* warp_parameters, const T& x1, const T& y1, T* x2, T* y2) const {
*x2 = x1 + warp_parameters[0];
*y2 = y1 + warp_parameters[1];
}
@@ -729,8 +714,10 @@ struct TranslationWarp {
};
struct TranslationScaleWarp {
- TranslationScaleWarp(const double *x1, const double *y1,
- const double *x2, const double *y2)
+ TranslationScaleWarp(const double* x1,
+ const double* y1,
+ const double* x2,
+ const double* y2)
: q1(x1, y1) {
Quad q2(x2, y2);
@@ -746,9 +733,9 @@ struct TranslationScaleWarp {
// The strange way of parameterizing the translation and scaling is to make
// the knobs that the optimizer sees easy to adjust. This is less important
// for the scaling case than the rotation case.
- template<typename T>
- void Forward(const T *warp_parameters,
- const T &x1, const T& y1, T *x2, T* y2) const {
+ template <typename T>
+ void Forward(
+ const T* warp_parameters, const T& x1, const T& y1, T* x2, T* y2) const {
// Make the centroid of Q1 the origin.
const T x1_origin = x1 - q1.Centroid()(0);
const T y1_origin = y1 - q1.Centroid()(1);
@@ -775,15 +762,17 @@ struct TranslationScaleWarp {
};
// Assumes the given points are already zero-centroid and the same size.
-Mat2 OrthogonalProcrustes(const Mat2 &correlation_matrix) {
+Mat2 OrthogonalProcrustes(const Mat2& correlation_matrix) {
Eigen::JacobiSVD<Mat2> svd(correlation_matrix,
Eigen::ComputeFullU | Eigen::ComputeFullV);
return svd.matrixV() * svd.matrixU().transpose();
}
struct TranslationRotationWarp {
- TranslationRotationWarp(const double *x1, const double *y1,
- const double *x2, const double *y2)
+ TranslationRotationWarp(const double* x1,
+ const double* y1,
+ const double* x2,
+ const double* y2)
: q1(x1, y1) {
Quad q2(x2, y2);
@@ -816,9 +805,9 @@ struct TranslationRotationWarp {
//
// Instead, use the parameterization below that offers a parameterization
// that exposes the degrees of freedom in a way amenable to optimization.
- template<typename T>
- void Forward(const T *warp_parameters,
- const T &x1, const T& y1, T *x2, T* y2) const {
+ template <typename T>
+ void Forward(
+ const T* warp_parameters, const T& x1, const T& y1, T* x2, T* y2) const {
// Make the centroid of Q1 the origin.
const T x1_origin = x1 - q1.Centroid()(0);
const T y1_origin = y1 - q1.Centroid()(1);
@@ -847,8 +836,10 @@ struct TranslationRotationWarp {
};
struct TranslationRotationScaleWarp {
- TranslationRotationScaleWarp(const double *x1, const double *y1,
- const double *x2, const double *y2)
+ TranslationRotationScaleWarp(const double* x1,
+ const double* y1,
+ const double* x2,
+ const double* y2)
: q1(x1, y1) {
Quad q2(x2, y2);
@@ -884,9 +875,9 @@ struct TranslationRotationScaleWarp {
//
// Instead, use the parameterization below that offers a parameterization
// that exposes the degrees of freedom in a way amenable to optimization.
- template<typename T>
- void Forward(const T *warp_parameters,
- const T &x1, const T& y1, T *x2, T* y2) const {
+ template <typename T>
+ void Forward(
+ const T* warp_parameters, const T& x1, const T& y1, T* x2, T* y2) const {
// Make the centroid of Q1 the origin.
const T x1_origin = x1 - q1.Centroid()(0);
const T y1_origin = y1 - q1.Centroid()(1);
@@ -921,8 +912,10 @@ struct TranslationRotationScaleWarp {
};
struct AffineWarp {
- AffineWarp(const double *x1, const double *y1,
- const double *x2, const double *y2)
+ AffineWarp(const double* x1,
+ const double* y1,
+ const double* x2,
+ const double* y2)
: q1(x1, y1) {
Quad q2(x2, y2);
@@ -938,8 +931,8 @@ struct AffineWarp {
Vec2 v1 = q1.CornerRelativeToCentroid(i);
Vec2 v2 = q2.CornerRelativeToCentroid(i);
- Q1.row(2 * i + 0) << v1[0], v1[1], 0, 0;
- Q1.row(2 * i + 1) << 0, 0, v1[0], v1[1];
+ Q1.row(2 * i + 0) << v1[0], v1[1], 0, 0;
+ Q1.row(2 * i + 1) << 0, 0, v1[0], v1[1];
Q2(2 * i + 0) = v2[0];
Q2(2 * i + 1) = v2[1];
@@ -957,8 +950,8 @@ struct AffineWarp {
}
// See comments in other parameterizations about why the centroid is used.
- template<typename T>
- void Forward(const T *p, const T &x1, const T& y1, T *x2, T* y2) const {
+ template <typename T>
+ void Forward(const T* p, const T& x1, const T& y1, T* x2, T* y2) const {
// Make the centroid of Q1 the origin.
const T x1_origin = x1 - q1.Centroid()(0);
const T y1_origin = y1 - q1.Centroid()(1);
@@ -985,15 +978,21 @@ struct AffineWarp {
};
struct HomographyWarp {
- HomographyWarp(const double *x1, const double *y1,
- const double *x2, const double *y2) {
+ HomographyWarp(const double* x1,
+ const double* y1,
+ const double* x2,
+ const double* y2) {
Mat quad1(2, 4);
+ // clang-format off
quad1 << x1[0], x1[1], x1[2], x1[3],
y1[0], y1[1], y1[2], y1[3];
+ // clang-format on
Mat quad2(2, 4);
+ // clang-format off
quad2 << x2[0], x2[1], x2[2], x2[3],
y2[0], y2[1], y2[2], y2[3];
+ // clang-format on
Mat3 H;
if (!Homography2DFromCorrespondencesLinear(quad1, quad2, &H, 1e-12)) {
@@ -1014,13 +1013,12 @@ struct HomographyWarp {
}
}
- template<typename T>
- static void Forward(const T *p,
- const T &x1, const T& y1, T *x2, T* y2) {
+ template <typename T>
+ static void Forward(const T* p, const T& x1, const T& y1, T* x2, T* y2) {
// Homography warp with manual 3x3 matrix multiply.
- const T xx2 = (1.0 + p[0]) * x1 + p[1] * y1 + p[2];
- const T yy2 = p[3] * x1 + (1.0 + p[4]) * y1 + p[5];
- const T zz2 = p[6] * x1 + p[7] * y1 + 1.0;
+ const T xx2 = (1.0 + p[0]) * x1 + p[1] * y1 + p[2];
+ const T yy2 = p[3] * x1 + (1.0 + p[4]) * y1 + p[5];
+ const T zz2 = p[6] * x1 + p[7] * y1 + 1.0;
*x2 = xx2 / zz2;
*y2 = yy2 / zz2;
}
@@ -1036,11 +1034,14 @@ struct HomographyWarp {
//
// The idea is to take the maximum x or y distance. This may be oversampling.
// TODO(keir): Investigate the various choices; perhaps average is better?
-void PickSampling(const double *x1, const double *y1,
- const double *x2, const double *y2,
- int *num_samples_x, int *num_samples_y) {
- (void) x2; // Ignored.
- (void) y2; // Ignored.
+void PickSampling(const double* x1,
+ const double* y1,
+ const double* x2,
+ const double* y2,
+ int* num_samples_x,
+ int* num_samples_y) {
+ (void)x2; // Ignored.
+ (void)y2; // Ignored.
Vec2 a0(x1[0], y1[0]);
Vec2 a1(x1[1], y1[1]);
@@ -1053,18 +1054,10 @@ void PickSampling(const double *x1, const double *y1,
Vec2 b3(x1[3], y1[3]);
double x_dimensions[4] = {
- (a1 - a0).norm(),
- (a3 - a2).norm(),
- (b1 - b0).norm(),
- (b3 - b2).norm()
- };
+ (a1 - a0).norm(), (a3 - a2).norm(), (b1 - b0).norm(), (b3 - b2).norm()};
double y_dimensions[4] = {
- (a3 - a0).norm(),
- (a1 - a2).norm(),
- (b3 - b0).norm(),
- (b1 - b2).norm()
- };
+ (a3 - a0).norm(), (a1 - a2).norm(), (b3 - b0).norm(), (b1 - b2).norm()};
const double kScaleFactor = 1.0;
*num_samples_x = static_cast<int>(
kScaleFactor * *std::max_element(x_dimensions, x_dimensions + 4));
@@ -1074,17 +1067,18 @@ void PickSampling(const double *x1, const double *y1,
<< ", num_samples_y: " << *num_samples_y;
}
-bool SearchAreaTooBigForDescent(const FloatImage &image2,
- const double *x2, const double *y2) {
+bool SearchAreaTooBigForDescent(const FloatImage& image2,
+ const double* x2,
+ const double* y2) {
// TODO(keir): Check the bounds and enable only when it makes sense.
- (void) image2; // Ignored.
- (void) x2; // Ignored.
- (void) y2; // Ignored.
+ (void)image2; // Ignored.
+ (void)x2; // Ignored.
+ (void)y2; // Ignored.
return true;
}
-bool PointOnRightHalfPlane(const Vec2 &a, const Vec2 &b, double x, double y) {
+bool PointOnRightHalfPlane(const Vec2& a, const Vec2& b, double x, double y) {
Vec2 ba = b - a;
return ((Vec2(x, y) - b).transpose() * Vec2(-ba.y(), ba.x())) > 0;
}
@@ -1102,7 +1096,7 @@ bool PointOnRightHalfPlane(const Vec2 &a, const Vec2 &b, double x, double y) {
// y
//
// The implementation does up to four half-plane comparisons.
-bool PointInQuad(const double *xs, const double *ys, double x, double y) {
+bool PointInQuad(const double* xs, const double* ys, double x, double y) {
Vec2 a0(xs[0], ys[0]);
Vec2 a1(xs[1], ys[1]);
Vec2 a2(xs[2], ys[2]);
@@ -1116,24 +1110,27 @@ bool PointInQuad(const double *xs, const double *ys, double x, double y) {
// This makes it possible to map between Eigen float arrays and FloatImage
// without using comparisons.
-typedef Eigen::Array<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> FloatArray;
+typedef Eigen::Array<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>
+ FloatArray;
// This creates a pattern in the frame of image2, from the pixel is image1,
// based on the initial guess represented by the two quads x1, y1, and x2, y2.
-template<typename Warp>
-void CreateBrutePattern(const double *x1, const double *y1,
- const double *x2, const double *y2,
- const FloatImage &image1,
- const FloatImage *image1_mask,
- FloatArray *pattern,
- FloatArray *mask,
- int *origin_x,
- int *origin_y) {
+template <typename Warp>
+void CreateBrutePattern(const double* x1,
+ const double* y1,
+ const double* x2,
+ const double* y2,
+ const FloatImage& image1,
+ const FloatImage* image1_mask,
+ FloatArray* pattern,
+ FloatArray* mask,
+ int* origin_x,
+ int* origin_y) {
// Get integer bounding box of quad2 in image2.
int min_x = static_cast<int>(floor(*std::min_element(x2, x2 + 4)));
int min_y = static_cast<int>(floor(*std::min_element(y2, y2 + 4)));
- int max_x = static_cast<int>(ceil (*std::max_element(x2, x2 + 4)));
- int max_y = static_cast<int>(ceil (*std::max_element(y2, y2 + 4)));
+ int max_x = static_cast<int>(ceil(*std::max_element(x2, x2 + 4)));
+ int max_y = static_cast<int>(ceil(*std::max_element(y2, y2 + 4)));
int w = max_x - min_x;
int h = max_y - min_y;
@@ -1154,9 +1151,8 @@ void CreateBrutePattern(const double *x1, const double *y1,
double dst_y = r;
double src_x;
double src_y;
- inverse_warp.Forward(inverse_warp.parameters,
- dst_x, dst_y,
- &src_x, &src_y);
+ inverse_warp.Forward(
+ inverse_warp.parameters, dst_x, dst_y, &src_x, &src_y);
if (PointInQuad(x1, y1, src_x, src_y)) {
(*pattern)(i, j) = SampleLinear(image1, src_y, src_x);
@@ -1191,24 +1187,32 @@ void CreateBrutePattern(const double *x1, const double *y1,
// pattern, when doing brute initialization. Unfortunately that implies a
// totally different warping interface, since access to more than a the source
// and current destination frame is necessary.
-template<typename Warp>
-bool BruteTranslationOnlyInitialize(const FloatImage &image1,
- const FloatImage *image1_mask,
- const FloatImage &image2,
+template <typename Warp>
+bool BruteTranslationOnlyInitialize(const FloatImage& image1,
+ const FloatImage* image1_mask,
+ const FloatImage& image2,
const int num_extra_points,
const bool use_normalized_intensities,
- const double *x1, const double *y1,
- double *x2, double *y2) {
+ const double* x1,
+ const double* y1,
+ double* x2,
+ double* y2) {
// Create the pattern to match in the space of image2, assuming our inital
// guess isn't too far from the template in image1. If there is no image1
// mask, then the resulting mask is binary.
FloatArray pattern;
FloatArray mask;
int origin_x = -1, origin_y = -1;
- CreateBrutePattern<Warp>(x1, y1, x2, y2,
- image1, image1_mask,
- &pattern, &mask,
- &origin_x, &origin_y);
+ CreateBrutePattern<Warp>(x1,
+ y1,
+ x2,
+ y2,
+ image1,
+ image1_mask,
+ &pattern,
+ &mask,
+ &origin_x,
+ &origin_y);
// For normalization, premultiply the pattern by the inverse pattern mean.
double mask_sum = 1.0;
@@ -1251,8 +1255,10 @@ bool BruteTranslationOnlyInitialize(const FloatImage &image1,
// instead, reducing the mean calculation to an O(1) operation.
double inverse_search_mean =
mask_sum / ((mask * search.block(r, c, h, w)).sum());
- sad = (mask * (pattern - (search.block(r, c, h, w) *
- inverse_search_mean))).abs().sum();
+ sad = (mask *
+ (pattern - (search.block(r, c, h, w) * inverse_search_mean)))
+ .abs()
+ .sum();
} else {
sad = (mask * (pattern - search.block(r, c, h, w))).abs().sum();
}
@@ -1274,9 +1280,8 @@ bool BruteTranslationOnlyInitialize(const FloatImage &image1,
<< "best_c: " << best_c << ", best_r: " << best_r << ", "
<< "origin_x: " << origin_x << ", origin_y: " << origin_y << ", "
<< "dc: " << (best_c - origin_x) << ", "
- << "dr: " << (best_r - origin_y)
- << ", tried " << ((image2.Height() - h) * (image2.Width() - w))
- << " shifts.";
+ << "dr: " << (best_r - origin_y) << ", tried "
+ << ((image2.Height() - h) * (image2.Width() - w)) << " shifts.";
// Apply the shift.
for (int i = 0; i < 4 + num_extra_points; ++i) {
@@ -1286,8 +1291,10 @@ bool BruteTranslationOnlyInitialize(const FloatImage &image1,
return true;
}
-void CopyQuad(double *src_x, double *src_y,
- double *dst_x, double *dst_y,
+void CopyQuad(double* src_x,
+ double* src_y,
+ double* dst_x,
+ double* dst_y,
int num_extra_points) {
for (int i = 0; i < 4 + num_extra_points; ++i) {
dst_x[i] = src_x[i];
@@ -1297,16 +1304,18 @@ void CopyQuad(double *src_x, double *src_y,
} // namespace
-template<typename Warp>
-void TemplatedTrackRegion(const FloatImage &image1,
- const FloatImage &image2,
- const double *x1, const double *y1,
- const TrackRegionOptions &options,
- double *x2, double *y2,
- TrackRegionResult *result) {
+template <typename Warp>
+void TemplatedTrackRegion(const FloatImage& image1,
+ const FloatImage& image2,
+ const double* x1,
+ const double* y1,
+ const TrackRegionOptions& options,
+ double* x2,
+ double* y2,
+ TrackRegionResult* result) {
for (int i = 0; i < 4 + options.num_extra_points; ++i) {
- LG << "P" << i << ": (" << x1[i] << ", " << y1[i] << "); guess ("
- << x2[i] << ", " << y2[i] << "); (dx, dy): (" << (x2[i] - x1[i]) << ", "
+ LG << "P" << i << ": (" << x1[i] << ", " << y1[i] << "); guess (" << x2[i]
+ << ", " << y2[i] << "); (dx, dy): (" << (x2[i] - x1[i]) << ", "
<< (y2[i] - y1[i]) << ").";
}
@@ -1322,9 +1331,14 @@ void TemplatedTrackRegion(const FloatImage &image1,
double y2_first_try[5];
CopyQuad(x2, y2, x2_first_try, y2_first_try, options.num_extra_points);
- TemplatedTrackRegion<Warp>(image1, image2,
- x1, y1, modified_options,
- x2_first_try, y2_first_try, result);
+ TemplatedTrackRegion<Warp>(image1,
+ image2,
+ x1,
+ y1,
+ modified_options,
+ x2_first_try,
+ y2_first_try,
+ result);
// Of the things that can happen in the first pass, don't try the brute
// pass (and second attempt) if the error is one of the terminations below.
@@ -1368,22 +1382,25 @@ void TemplatedTrackRegion(const FloatImage &image1,
// Prepare the image and gradient.
Array3Df image_and_gradient1;
Array3Df image_and_gradient2;
- BlurredImageAndDerivativesChannels(image1, options.sigma,
- &image_and_gradient1);
- BlurredImageAndDerivativesChannels(image2, options.sigma,
- &image_and_gradient2);
+ BlurredImageAndDerivativesChannels(
+ image1, options.sigma, &image_and_gradient1);
+ BlurredImageAndDerivativesChannels(
+ image2, options.sigma, &image_and_gradient2);
// Possibly do a brute-force translation-only initialization.
if (SearchAreaTooBigForDescent(image2, x2, y2) &&
options.use_brute_initialization) {
LG << "Running brute initialization...";
- bool found_any_alignment = BruteTranslationOnlyInitialize<Warp>(
- image_and_gradient1,
- options.image1_mask,
- image2,
- options.num_extra_points,
- options.use_normalized_intensities,
- x1, y1, x2, y2);
+ bool found_any_alignment =
+ BruteTranslationOnlyInitialize<Warp>(image_and_gradient1,
+ options.image1_mask,
+ image2,
+ options.num_extra_points,
+ options.use_normalized_intensities,
+ x1,
+ y1,
+ x2,
+ y2);
if (!found_any_alignment) {
LG << "Brute failed to find an alignment; pattern too small. "
<< "Failing entire track operation.";
@@ -1391,9 +1408,9 @@ void TemplatedTrackRegion(const FloatImage &image1,
return;
}
for (int i = 0; i < 4; ++i) {
- LG << "P" << i << ": (" << x1[i] << ", " << y1[i] << "); brute ("
- << x2[i] << ", " << y2[i] << "); (dx, dy): (" << (x2[i] - x1[i])
- << ", " << (y2[i] - y1[i]) << ").";
+ LG << "P" << i << ": (" << x1[i] << ", " << y1[i] << "); brute (" << x2[i]
+ << ", " << y2[i] << "); (dx, dy): (" << (x2[i] - x1[i]) << ", "
+ << (y2[i] - y1[i]) << ").";
}
}
@@ -1408,14 +1425,13 @@ void TemplatedTrackRegion(const FloatImage &image1,
PickSampling(x1, y1, x2, y2, &num_samples_x, &num_samples_y);
// Compute the warp from rectangular coordinates.
- Mat3 canonical_homography = ComputeCanonicalHomography(x1, y1,
- num_samples_x,
- num_samples_y);
+ Mat3 canonical_homography =
+ ComputeCanonicalHomography(x1, y1, num_samples_x, num_samples_y);
ceres::Problem problem;
// Construct the warp cost function. AutoDiffCostFunction takes ownership.
- PixelDifferenceCostFunctor<Warp> *pixel_difference_cost_function =
+ PixelDifferenceCostFunctor<Warp>* pixel_difference_cost_function =
new PixelDifferenceCostFunctor<Warp>(options,
image_and_gradient1,
image_and_gradient2,
@@ -1424,28 +1440,24 @@ void TemplatedTrackRegion(const FloatImage &image1,
num_samples_y,
warp);
problem.AddResidualBlock(
- new ceres::AutoDiffCostFunction<
- PixelDifferenceCostFunctor<Warp>,
- ceres::DYNAMIC,
- Warp::NUM_PARAMETERS>(pixel_difference_cost_function,
- num_samples_x * num_samples_y),
- NULL,
- warp.parameters);
+ new ceres::AutoDiffCostFunction<PixelDifferenceCostFunctor<Warp>,
+ ceres::DYNAMIC,
+ Warp::NUM_PARAMETERS>(
+ pixel_difference_cost_function, num_samples_x * num_samples_y),
+ NULL,
+ warp.parameters);
// Construct the regularizing cost function
if (options.regularization_coefficient != 0.0) {
- WarpRegularizingCostFunctor<Warp> *regularizing_warp_cost_function =
- new WarpRegularizingCostFunctor<Warp>(options,
- x1, y2,
- x2_original,
- y2_original,
- warp);
+ WarpRegularizingCostFunctor<Warp>* regularizing_warp_cost_function =
+ new WarpRegularizingCostFunctor<Warp>(
+ options, x1, y2, x2_original, y2_original, warp);
problem.AddResidualBlock(
- new ceres::AutoDiffCostFunction<
- WarpRegularizingCostFunctor<Warp>,
- 8 /* num_residuals */,
- Warp::NUM_PARAMETERS>(regularizing_warp_cost_function),
+ new ceres::AutoDiffCostFunction<WarpRegularizingCostFunctor<Warp>,
+ 8 /* num_residuals */,
+ Warp::NUM_PARAMETERS>(
+ regularizing_warp_cost_function),
NULL,
warp.parameters);
}
@@ -1488,10 +1500,10 @@ void TemplatedTrackRegion(const FloatImage &image1,
return;
}
-#define HANDLE_TERMINATION(termination_enum) \
- if (summary.termination_type == ceres::termination_enum) { \
- result->termination = TrackRegionResult::termination_enum; \
- return; \
+#define HANDLE_TERMINATION(termination_enum) \
+ if (summary.termination_type == ceres::termination_enum) { \
+ result->termination = TrackRegionResult::termination_enum; \
+ return; \
}
// Avoid computing correlation for tracking failures.
@@ -1499,8 +1511,9 @@ void TemplatedTrackRegion(const FloatImage &image1,
// Otherwise, run a final correlation check.
if (options.minimum_correlation > 0.0) {
- result->correlation = pixel_difference_cost_function->
- PearsonProductMomentCorrelationCoefficient(warp.parameters);
+ result->correlation =
+ pixel_difference_cost_function
+ ->PearsonProductMomentCorrelationCoefficient(warp.parameters);
if (result->correlation < options.minimum_correlation) {
LG << "Failing with insufficient correlation.";
result->termination = TrackRegionResult::INSUFFICIENT_CORRELATION;
@@ -1523,36 +1536,39 @@ void TemplatedTrackRegion(const FloatImage &image1,
#undef HANDLE_TERMINATION
};
-void TrackRegion(const FloatImage &image1,
- const FloatImage &image2,
- const double *x1, const double *y1,
- const TrackRegionOptions &options,
- double *x2, double *y2,
- TrackRegionResult *result) {
+void TrackRegion(const FloatImage& image1,
+ const FloatImage& image2,
+ const double* x1,
+ const double* y1,
+ const TrackRegionOptions& options,
+ double* x2,
+ double* y2,
+ TrackRegionResult* result) {
// Enum is necessary due to templated nature of autodiff.
-#define HANDLE_MODE(mode_enum, mode_type) \
- if (options.mode == TrackRegionOptions::mode_enum) { \
- TemplatedTrackRegion<mode_type>(image1, image2, \
- x1, y1, \
- options, \
- x2, y2, \
- result); \
- return; \
+#define HANDLE_MODE(mode_enum, mode_type) \
+ if (options.mode == TrackRegionOptions::mode_enum) { \
+ TemplatedTrackRegion<mode_type>( \
+ image1, image2, x1, y1, options, x2, y2, result); \
+ return; \
}
- HANDLE_MODE(TRANSLATION, TranslationWarp);
- HANDLE_MODE(TRANSLATION_SCALE, TranslationScaleWarp);
- HANDLE_MODE(TRANSLATION_ROTATION, TranslationRotationWarp);
+ HANDLE_MODE(TRANSLATION, TranslationWarp);
+ HANDLE_MODE(TRANSLATION_SCALE, TranslationScaleWarp);
+ HANDLE_MODE(TRANSLATION_ROTATION, TranslationRotationWarp);
HANDLE_MODE(TRANSLATION_ROTATION_SCALE, TranslationRotationScaleWarp);
- HANDLE_MODE(AFFINE, AffineWarp);
- HANDLE_MODE(HOMOGRAPHY, HomographyWarp);
+ HANDLE_MODE(AFFINE, AffineWarp);
+ HANDLE_MODE(HOMOGRAPHY, HomographyWarp);
#undef HANDLE_MODE
}
-bool SamplePlanarPatch(const FloatImage &image,
- const double *xs, const double *ys,
- int num_samples_x, int num_samples_y,
- FloatImage *mask, FloatImage *patch,
- double *warped_position_x, double *warped_position_y) {
+bool SamplePlanarPatch(const FloatImage& image,
+ const double* xs,
+ const double* ys,
+ int num_samples_x,
+ int num_samples_y,
+ FloatImage* mask,
+ FloatImage* patch,
+ double* warped_position_x,
+ double* warped_position_y) {
// Bail early if the points are outside the image.
if (!AllInBounds(image, xs, ys)) {
LG << "Can't sample patch: out of bounds.";
@@ -1563,9 +1579,8 @@ bool SamplePlanarPatch(const FloatImage &image,
patch->Resize(num_samples_y, num_samples_x, image.Depth());
// Compute the warp from rectangular coordinates.
- Mat3 canonical_homography = ComputeCanonicalHomography(xs, ys,
- num_samples_x,
- num_samples_y);
+ Mat3 canonical_homography =
+ ComputeCanonicalHomography(xs, ys, num_samples_x, num_samples_y);
// Walk over the coordinates in the canonical space, sampling from the image
// in the original space and copying the result into the patch.
@@ -1573,12 +1588,11 @@ bool SamplePlanarPatch(const FloatImage &image,
for (int c = 0; c < num_samples_x; ++c) {
Vec3 image_position = canonical_homography * Vec3(c, r, 1);
image_position /= image_position(2);
- SampleLinear(image, image_position(1),
- image_position(0),
- &(*patch)(r, c, 0));
+ SampleLinear(
+ image, image_position(1), image_position(0), &(*patch)(r, c, 0));
if (mask) {
- float mask_value = SampleLinear(*mask, image_position(1),
- image_position(0), 0);
+ float mask_value =
+ SampleLinear(*mask, image_position(1), image_position(0), 0);
for (int d = 0; d < image.Depth(); d++)
(*patch)(r, c, d) *= mask_value;
diff --git a/intern/libmv/libmv/tracking/track_region.h b/intern/libmv/libmv/tracking/track_region.h
index 61dce22bcb8..7868e3b8b77 100644
--- a/intern/libmv/libmv/tracking/track_region.h
+++ b/intern/libmv/libmv/tracking/track_region.h
@@ -19,6 +19,7 @@
// IN THE SOFTWARE.
#ifndef LIBMV_TRACKING_TRACK_REGION_H_
+#define LIBMV_TRACKING_TRACK_REGION_H_
#include "libmv/image/image.h"
#include "libmv/image/sample.h"
@@ -107,7 +108,7 @@ struct TrackRegionOptions {
// If non-null, this is used as the pattern mask. It should match the size of
// image1, even though only values inside the image1 quad are examined. The
// values must be in the range 0.0 to 0.1.
- FloatImage *image1_mask;
+ FloatImage* image1_mask;
};
struct TrackRegionResult {
@@ -128,8 +129,7 @@ struct TrackRegionResult {
Termination termination;
bool is_usable() {
- return termination == CONVERGENCE ||
- termination == NO_CONVERGENCE;
+ return termination == CONVERGENCE || termination == NO_CONVERGENCE;
}
int num_iterations;
@@ -140,12 +140,14 @@ struct TrackRegionResult {
};
// Always needs 4 correspondences.
-void TrackRegion(const FloatImage &image1,
- const FloatImage &image2,
- const double *x1, const double *y1,
- const TrackRegionOptions &options,
- double *x2, double *y2,
- TrackRegionResult *result);
+void TrackRegion(const FloatImage& image1,
+ const FloatImage& image2,
+ const double* x1,
+ const double* y1,
+ const TrackRegionOptions& options,
+ double* x2,
+ double* y2,
+ TrackRegionResult* result);
// Sample a "canonical" version of the passed planar patch, using bilinear
// sampling. The passed corners must be within the image, and have at least two
@@ -156,11 +158,15 @@ void TrackRegion(const FloatImage &image1,
// the size of image.
// Warped coordinates of marker's position would be returned in
// warped_position_x and warped_position_y
-bool SamplePlanarPatch(const FloatImage &image,
- const double *xs, const double *ys,
- int num_samples_x, int num_samples_y,
- FloatImage *mask, FloatImage *patch,
- double *warped_position_x, double *warped_position_y);
+bool SamplePlanarPatch(const FloatImage& image,
+ const double* xs,
+ const double* ys,
+ int num_samples_x,
+ int num_samples_y,
+ FloatImage* mask,
+ FloatImage* patch,
+ double* warped_position_x,
+ double* warped_position_y);
} // namespace libmv
diff --git a/intern/libmv/libmv/tracking/trklt_region_tracker.cc b/intern/libmv/libmv/tracking/trklt_region_tracker.cc
index 05ef3d1d272..7ffa7555467 100644
--- a/intern/libmv/libmv/tracking/trklt_region_tracker.cc
+++ b/intern/libmv/libmv/tracking/trklt_region_tracker.cc
@@ -20,27 +20,29 @@
#include "libmv/tracking/trklt_region_tracker.h"
-#include "libmv/logging/logging.h"
-#include "libmv/numeric/numeric.h"
-#include "libmv/image/image.h"
#include "libmv/image/convolve.h"
+#include "libmv/image/image.h"
#include "libmv/image/sample.h"
+#include "libmv/logging/logging.h"
+#include "libmv/numeric/numeric.h"
namespace libmv {
// TODO(keir): Switch this to use the smarter LM loop like in ESM.
// Computes U and e from the Ud = e equation (number 14) from the paper.
-static void ComputeTrackingEquation(const Array3Df &image_and_gradient1,
- const Array3Df &image_and_gradient2,
- double x1, double y1,
- double x2, double y2,
+static void ComputeTrackingEquation(const Array3Df& image_and_gradient1,
+ const Array3Df& image_and_gradient2,
+ double x1,
+ double y1,
+ double x2,
+ double y2,
int half_width,
double lambda,
- Mat2f *U,
- Vec2f *e) {
+ Mat2f* U,
+ Vec2f* e) {
Mat2f A, B, C, D;
- A = B = C = D = Mat2f::Zero();
+ A = B = C = D = Mat2f::Zero();
Vec2f R, S, V, W;
R = S = V = W = Vec2f::Zero();
@@ -57,9 +59,9 @@ static void ComputeTrackingEquation(const Array3Df &image_and_gradient1,
Vec2f gI, gJ;
gI << SampleLinear(image_and_gradient1, yy1, xx1, 1),
- SampleLinear(image_and_gradient1, yy1, xx1, 2);
+ SampleLinear(image_and_gradient1, yy1, xx1, 2);
gJ << SampleLinear(image_and_gradient2, yy2, xx2, 1),
- SampleLinear(image_and_gradient2, yy2, xx2, 2);
+ SampleLinear(image_and_gradient2, yy2, xx2, 2);
// Equation 15 from the paper.
A += gI * gI.transpose();
@@ -77,26 +79,25 @@ static void ComputeTrackingEquation(const Array3Df &image_and_gradient1,
Mat2f Di = B.transpose().inverse();
// Equation 14 from the paper.
- *U = A*Di*C + lambda*Di*C - 0.5*B;
- *e = (A + lambda*Mat2f::Identity())*Di*(V - W) + 0.5*(S - R);
+ *U = A * Di * C + lambda * Di * C - 0.5 * B;
+ *e = (A + lambda * Mat2f::Identity()) * Di * (V - W) + 0.5 * (S - R);
}
-static bool RegionIsInBounds(const FloatImage &image1,
- double x, double y,
- int half_window_size) {
+static bool RegionIsInBounds(const FloatImage& image1,
+ double x,
+ double y,
+ int half_window_size) {
// Check the minimum coordinates.
int min_x = floor(x) - half_window_size - 1;
int min_y = floor(y) - half_window_size - 1;
- if (min_x < 0.0 ||
- min_y < 0.0) {
+ if (min_x < 0.0 || min_y < 0.0) {
return false;
}
// Check the maximum coordinates.
int max_x = ceil(x) + half_window_size + 1;
int max_y = ceil(y) + half_window_size + 1;
- if (max_x > image1.cols() ||
- max_y > image1.rows()) {
+ if (max_x > image1.cols() || max_y > image1.rows()) {
return false;
}
@@ -104,10 +105,12 @@ static bool RegionIsInBounds(const FloatImage &image1,
return true;
}
-bool TrkltRegionTracker::Track(const FloatImage &image1,
- const FloatImage &image2,
- double x1, double y1,
- double *x2, double *y2) const {
+bool TrkltRegionTracker::Track(const FloatImage& image1,
+ const FloatImage& image2,
+ double x1,
+ double y1,
+ double* x2,
+ double* y2) const {
if (!RegionIsInBounds(image1, x1, y1, half_window_size)) {
LG << "Fell out of image1's window with x1=" << x1 << ", y1=" << y1
<< ", hw=" << half_window_size << ".";
@@ -134,11 +137,14 @@ bool TrkltRegionTracker::Track(const FloatImage &image1,
Vec2f e;
ComputeTrackingEquation(image_and_gradient1,
image_and_gradient2,
- x1, y1,
- *x2, *y2,
+ x1,
+ y1,
+ *x2,
+ *y2,
half_window_size,
lambda,
- &U, &e);
+ &U,
+ &e);
// Solve the linear system for the best update to x2 and y2.
d = U.lu().solve(e);
@@ -161,7 +167,6 @@ bool TrkltRegionTracker::Track(const FloatImage &image1,
LG << "x=" << *x2 << ", y=" << *y2 << ", dx=" << d[0] << ", dy=" << d[1]
<< ", det=" << determinant;
-
// If the update is small, then we probably found the target.
if (d.squaredNorm() < min_update_squared_distance) {
LG << "Successful track in " << i << " iterations.";
diff --git a/intern/libmv/libmv/tracking/trklt_region_tracker.h b/intern/libmv/libmv/tracking/trklt_region_tracker.h
index 26d0621aa02..a9cf5580f61 100644
--- a/intern/libmv/libmv/tracking/trklt_region_tracker.h
+++ b/intern/libmv/libmv/tracking/trklt_region_tracker.h
@@ -46,10 +46,12 @@ struct TrkltRegionTracker : public RegionTracker {
virtual ~TrkltRegionTracker() {}
// Tracker interface.
- virtual bool Track(const FloatImage &image1,
- const FloatImage &image2,
- double x1, double y1,
- double *x2, double *y2) const;
+ virtual bool Track(const FloatImage& image1,
+ const FloatImage& image2,
+ double x1,
+ double y1,
+ double* x2,
+ double* y2) const;
// No point in creating getters or setters.
int half_window_size;