Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSergey Sharybin <sergey.vfx@gmail.com>2016-01-04 16:22:27 +0300
committerSergey Sharybin <sergey.vfx@gmail.com>2016-01-04 17:39:13 +0300
commitba432299cdd4382130103cf5a84c34693d4a3bdc (patch)
tree8d31ffec1aad8f9216aa1fc7108f494b65f74e7e /intern/libmv
parent6fb6a08bf84d5d16ebac35527a77bec37112494e (diff)
Move Libmv from extern/ to intern/
Logically it is intern library since being mainly developed by 1.5 blender guys.
Diffstat (limited to 'intern/libmv')
-rw-r--r--intern/libmv/CMakeLists.txt238
-rw-r--r--intern/libmv/ChangeLog609
-rwxr-xr-xintern/libmv/bundle.sh187
-rw-r--r--intern/libmv/files.txt138
-rw-r--r--intern/libmv/intern/autotrack.cc99
-rw-r--r--intern/libmv/intern/autotrack.h71
-rw-r--r--intern/libmv/intern/camera_intrinsics.cc355
-rw-r--r--intern/libmv/intern/camera_intrinsics.h138
-rw-r--r--intern/libmv/intern/detector.cc148
-rw-r--r--intern/libmv/intern/detector.h77
-rw-r--r--intern/libmv/intern/frame_accessor.cc164
-rw-r--r--intern/libmv/intern/frame_accessor.h79
-rw-r--r--intern/libmv/intern/homography.cc59
-rw-r--r--intern/libmv/intern/homography.h43
-rw-r--r--intern/libmv/intern/image.cc272
-rw-r--r--intern/libmv/intern/image.h99
-rw-r--r--intern/libmv/intern/logging.cc55
-rw-r--r--intern/libmv/intern/logging.h47
-rw-r--r--intern/libmv/intern/reconstruction.cc542
-rw-r--r--intern/libmv/intern/reconstruction.h102
-rw-r--r--intern/libmv/intern/region.h43
-rw-r--r--intern/libmv/intern/stub.cc403
-rw-r--r--intern/libmv/intern/track_region.cc177
-rw-r--r--intern/libmv/intern/track_region.h81
-rw-r--r--intern/libmv/intern/tracks.cc52
-rw-r--r--intern/libmv/intern/tracks.h51
-rw-r--r--intern/libmv/intern/tracksN.cc138
-rw-r--r--intern/libmv/intern/tracksN.h129
-rw-r--r--intern/libmv/intern/utildefines.h62
-rw-r--r--intern/libmv/libmv-capi.h42
-rw-r--r--intern/libmv/libmv/autotrack/autotrack.cc291
-rw-r--r--intern/libmv/libmv/autotrack/autotrack.h226
-rw-r--r--intern/libmv/libmv/autotrack/callbacks.h38
-rw-r--r--intern/libmv/libmv/autotrack/frame_accessor.h86
-rw-r--r--intern/libmv/libmv/autotrack/marker.h144
-rw-r--r--intern/libmv/libmv/autotrack/model.h44
-rw-r--r--intern/libmv/libmv/autotrack/predict_tracks.cc316
-rw-r--r--intern/libmv/libmv/autotrack/predict_tracks.h37
-rw-r--r--intern/libmv/libmv/autotrack/predict_tracks_test.cc201
-rw-r--r--intern/libmv/libmv/autotrack/quad.h57
-rw-r--r--intern/libmv/libmv/autotrack/reconstruction.h89
-rw-r--r--intern/libmv/libmv/autotrack/region.h67
-rw-r--r--intern/libmv/libmv/autotrack/tracks.cc193
-rw-r--r--intern/libmv/libmv/autotrack/tracks.h82
-rw-r--r--intern/libmv/libmv/autotrack/tracks_test.cc52
-rw-r--r--intern/libmv/libmv/base/aligned_malloc.cc74
-rw-r--r--intern/libmv/libmv/base/aligned_malloc.h34
-rw-r--r--intern/libmv/libmv/base/id_generator.h37
-rw-r--r--intern/libmv/libmv/base/scoped_ptr.h105
-rw-r--r--intern/libmv/libmv/base/scoped_ptr_test.cc79
-rw-r--r--intern/libmv/libmv/base/vector.h176
-rw-r--r--intern/libmv/libmv/base/vector_test.cc223
-rw-r--r--intern/libmv/libmv/base/vector_utils.h34
-rw-r--r--intern/libmv/libmv/image/array_nd.cc108
-rw-r--r--intern/libmv/libmv/image/array_nd.h497
-rw-r--r--intern/libmv/libmv/image/array_nd_test.cc324
-rw-r--r--intern/libmv/libmv/image/convolve.cc344
-rw-r--r--intern/libmv/libmv/image/convolve.h107
-rw-r--r--intern/libmv/libmv/image/convolve_test.cc110
-rw-r--r--intern/libmv/libmv/image/correlation.h74
-rw-r--r--intern/libmv/libmv/image/image.h155
-rw-r--r--intern/libmv/libmv/image/image_converter.h81
-rw-r--r--intern/libmv/libmv/image/image_drawing.h285
-rw-r--r--intern/libmv/libmv/image/image_test.cc45
-rw-r--r--intern/libmv/libmv/image/sample.h138
-rw-r--r--intern/libmv/libmv/image/sample_test.cc89
-rw-r--r--intern/libmv/libmv/image/tuple.h90
-rw-r--r--intern/libmv/libmv/image/tuple_test.cc83
-rw-r--r--intern/libmv/libmv/logging/logging.h31
-rw-r--r--intern/libmv/libmv/multiview/conditioning.cc99
-rw-r--r--intern/libmv/libmv/multiview/conditioning.h59
-rw-r--r--intern/libmv/libmv/multiview/euclidean_resection.cc774
-rw-r--r--intern/libmv/libmv/multiview/euclidean_resection.h148
-rw-r--r--intern/libmv/libmv/multiview/euclidean_resection_test.cc237
-rw-r--r--intern/libmv/libmv/multiview/fundamental.cc544
-rw-r--r--intern/libmv/libmv/multiview/fundamental.h187
-rw-r--r--intern/libmv/libmv/multiview/fundamental_test.cc162
-rw-r--r--intern/libmv/libmv/multiview/homography.cc465
-rw-r--r--intern/libmv/libmv/multiview/homography.h145
-rw-r--r--intern/libmv/libmv/multiview/homography_error.h248
-rw-r--r--intern/libmv/libmv/multiview/homography_parameterization.h91
-rw-r--r--intern/libmv/libmv/multiview/homography_test.cc261
-rw-r--r--intern/libmv/libmv/multiview/nviewtriangulation.h80
-rw-r--r--intern/libmv/libmv/multiview/nviewtriangulation_test.cc94
-rw-r--r--intern/libmv/libmv/multiview/panography.cc125
-rw-r--r--intern/libmv/libmv/multiview/panography.h99
-rw-r--r--intern/libmv/libmv/multiview/panography_kernel.cc51
-rw-r--r--intern/libmv/libmv/multiview/panography_kernel.h54
-rw-r--r--intern/libmv/libmv/multiview/panography_test.cc144
-rw-r--r--intern/libmv/libmv/multiview/projection.cc224
-rw-r--r--intern/libmv/libmv/multiview/projection.h231
-rw-r--r--intern/libmv/libmv/multiview/projection_test.cc115
-rw-r--r--intern/libmv/libmv/multiview/resection.h62
-rw-r--r--intern/libmv/libmv/multiview/resection_test.cc61
-rw-r--r--intern/libmv/libmv/multiview/test_data_sets.cc196
-rw-r--r--intern/libmv/libmv/multiview/test_data_sets.h105
-rw-r--r--intern/libmv/libmv/multiview/triangulation.cc50
-rw-r--r--intern/libmv/libmv/multiview/triangulation.h38
-rw-r--r--intern/libmv/libmv/multiview/triangulation_test.cc47
-rw-r--r--intern/libmv/libmv/multiview/two_view_kernel.h137
-rw-r--r--intern/libmv/libmv/numeric/dogleg.h261
-rw-r--r--intern/libmv/libmv/numeric/dogleg_test.cc95
-rw-r--r--intern/libmv/libmv/numeric/function_derivative.h107
-rw-r--r--intern/libmv/libmv/numeric/function_derivative_test.cc57
-rw-r--r--intern/libmv/libmv/numeric/levenberg_marquardt.h183
-rw-r--r--intern/libmv/libmv/numeric/levenberg_marquardt_test.cc56
-rw-r--r--intern/libmv/libmv/numeric/numeric.cc136
-rw-r--r--intern/libmv/libmv/numeric/numeric.h502
-rw-r--r--intern/libmv/libmv/numeric/numeric_test.cc439
-rw-r--r--intern/libmv/libmv/numeric/poly.cc23
-rw-r--r--intern/libmv/libmv/numeric/poly.h123
-rw-r--r--intern/libmv/libmv/numeric/poly_test.cc98
-rw-r--r--intern/libmv/libmv/simple_pipeline/bundle.cc658
-rw-r--r--intern/libmv/libmv/simple_pipeline/bundle.h148
-rw-r--r--intern/libmv/libmv/simple_pipeline/callbacks.h34
-rw-r--r--intern/libmv/libmv/simple_pipeline/camera_intrinsics.cc293
-rw-r--r--intern/libmv/libmv/simple_pipeline/camera_intrinsics.h406
-rw-r--r--intern/libmv/libmv/simple_pipeline/camera_intrinsics_impl.h192
-rw-r--r--intern/libmv/libmv/simple_pipeline/camera_intrinsics_test.cc239
-rw-r--r--intern/libmv/libmv/simple_pipeline/detect.cc361
-rw-r--r--intern/libmv/libmv/simple_pipeline/detect.h113
-rw-r--r--intern/libmv/libmv/simple_pipeline/detect_test.cc230
-rw-r--r--intern/libmv/libmv/simple_pipeline/distortion_models.cc197
-rw-r--r--intern/libmv/libmv/simple_pipeline/distortion_models.h131
-rw-r--r--intern/libmv/libmv/simple_pipeline/initialize_reconstruction.cc195
-rw-r--r--intern/libmv/libmv/simple_pipeline/initialize_reconstruction.h74
-rw-r--r--intern/libmv/libmv/simple_pipeline/intersect.cc254
-rw-r--r--intern/libmv/libmv/simple_pipeline/intersect.h77
-rw-r--r--intern/libmv/libmv/simple_pipeline/intersect_test.cc81
-rw-r--r--intern/libmv/libmv/simple_pipeline/keyframe_selection.cc450
-rw-r--r--intern/libmv/libmv/simple_pipeline/keyframe_selection.h53
-rw-r--r--intern/libmv/libmv/simple_pipeline/keyframe_selection_test.cc307
-rw-r--r--intern/libmv/libmv/simple_pipeline/modal_solver.cc251
-rw-r--r--intern/libmv/libmv/simple_pipeline/modal_solver.h48
-rw-r--r--intern/libmv/libmv/simple_pipeline/modal_solver_test.cc79
-rw-r--r--intern/libmv/libmv/simple_pipeline/pipeline.cc368
-rw-r--r--intern/libmv/libmv/simple_pipeline/pipeline.h98
-rw-r--r--intern/libmv/libmv/simple_pipeline/reconstruction.cc191
-rw-r--r--intern/libmv/libmv/simple_pipeline/reconstruction.h217
-rw-r--r--intern/libmv/libmv/simple_pipeline/reconstruction_scale.cc68
-rw-r--r--intern/libmv/libmv/simple_pipeline/reconstruction_scale.h36
-rw-r--r--intern/libmv/libmv/simple_pipeline/resect.cc270
-rw-r--r--intern/libmv/libmv/simple_pipeline/resect.h86
-rw-r--r--intern/libmv/libmv/simple_pipeline/resect_test.cc234
-rw-r--r--intern/libmv/libmv/simple_pipeline/tracks.cc187
-rw-r--r--intern/libmv/libmv/simple_pipeline/tracks.h138
-rw-r--r--intern/libmv/libmv/tracking/brute_region_tracker.cc331
-rw-r--r--intern/libmv/libmv/tracking/brute_region_tracker.h49
-rw-r--r--intern/libmv/libmv/tracking/brute_region_tracker_test.cc51
-rw-r--r--intern/libmv/libmv/tracking/hybrid_region_tracker.cc67
-rw-r--r--intern/libmv/libmv/tracking/hybrid_region_tracker.h52
-rw-r--r--intern/libmv/libmv/tracking/kalman_filter.h112
-rw-r--r--intern/libmv/libmv/tracking/klt_region_tracker.cc162
-rw-r--r--intern/libmv/libmv/tracking/klt_region_tracker.h55
-rw-r--r--intern/libmv/libmv/tracking/klt_region_tracker_test.cc51
-rw-r--r--intern/libmv/libmv/tracking/pyramid_region_tracker.cc98
-rw-r--r--intern/libmv/libmv/tracking/pyramid_region_tracker.h46
-rw-r--r--intern/libmv/libmv/tracking/pyramid_region_tracker_test.cc80
-rw-r--r--intern/libmv/libmv/tracking/region_tracker.h48
-rw-r--r--intern/libmv/libmv/tracking/retrack_region_tracker.cc47
-rw-r--r--intern/libmv/libmv/tracking/retrack_region_tracker.h48
-rw-r--r--intern/libmv/libmv/tracking/track_region.cc1601
-rw-r--r--intern/libmv/libmv/tracking/track_region.h170
-rw-r--r--intern/libmv/libmv/tracking/trklt_region_tracker.cc176
-rw-r--r--intern/libmv/libmv/tracking/trklt_region_tracker.h65
-rwxr-xr-xintern/libmv/mkfiles.sh6
-rw-r--r--intern/libmv/third_party/msinttypes/README.libmv5
-rw-r--r--intern/libmv/third_party/msinttypes/inttypes.h305
-rw-r--r--intern/libmv/third_party/msinttypes/stdint.h247
169 files changed, 27995 insertions, 0 deletions
diff --git a/intern/libmv/CMakeLists.txt b/intern/libmv/CMakeLists.txt
new file mode 100644
index 00000000000..65f8af5694e
--- /dev/null
+++ b/intern/libmv/CMakeLists.txt
@@ -0,0 +1,238 @@
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# The Original Code is Copyright (C) 2011, Blender Foundation
+# All rights reserved.
+#
+# Contributor(s): Blender Foundation,
+# Sergey Sharybin
+#
+# ***** END GPL LICENSE BLOCK *****
+
+# NOTE: This file is automatically generated by bundle.sh script
+# If you're doing changes in this file, please update template
+# in that script too
+
+set(INC
+ .
+)
+
+set(INC_SYS
+)
+
+set(SRC
+ libmv-capi.h
+)
+
+if(WITH_LIBMV)
+ add_definitions(${GFLAGS_DEFINES})
+ add_definitions(${GLOG_DEFINES})
+ add_definitions(${CERES_DEFINES})
+
+ list(APPEND INC
+ ../../extern/gflags/src
+ ../../extern/glog/src
+ ../../extern/ceres/include
+ ../../extern/ceres/config
+ ../guardedalloc
+ )
+
+ list(APPEND INC_SYS
+ ${EIGEN3_INCLUDE_DIRS}
+ ${PNG_INCLUDE_DIRS}
+ ${ZLIB_INCLUDE_DIRS}
+ )
+
+ add_definitions(
+ -DWITH_LIBMV_GUARDED_ALLOC
+ -DLIBMV_NO_FAST_DETECTOR=
+ )
+
+ list(APPEND SRC
+ intern/autotrack.cc
+ intern/camera_intrinsics.cc
+ intern/detector.cc
+ intern/frame_accessor.cc
+ intern/homography.cc
+ intern/image.cc
+ intern/logging.cc
+ intern/reconstruction.cc
+ intern/track_region.cc
+ intern/tracks.cc
+ intern/tracksN.cc
+ libmv/autotrack/autotrack.cc
+ libmv/autotrack/predict_tracks.cc
+ libmv/autotrack/tracks.cc
+ libmv/base/aligned_malloc.cc
+ libmv/image/array_nd.cc
+ libmv/image/convolve.cc
+ libmv/multiview/conditioning.cc
+ libmv/multiview/euclidean_resection.cc
+ libmv/multiview/fundamental.cc
+ libmv/multiview/homography.cc
+ libmv/multiview/panography.cc
+ libmv/multiview/panography_kernel.cc
+ libmv/multiview/projection.cc
+ libmv/multiview/triangulation.cc
+ libmv/numeric/numeric.cc
+ libmv/numeric/poly.cc
+ libmv/simple_pipeline/bundle.cc
+ libmv/simple_pipeline/camera_intrinsics.cc
+ libmv/simple_pipeline/detect.cc
+ libmv/simple_pipeline/distortion_models.cc
+ libmv/simple_pipeline/initialize_reconstruction.cc
+ libmv/simple_pipeline/intersect.cc
+ libmv/simple_pipeline/keyframe_selection.cc
+ libmv/simple_pipeline/modal_solver.cc
+ libmv/simple_pipeline/pipeline.cc
+ libmv/simple_pipeline/reconstruction.cc
+ libmv/simple_pipeline/reconstruction_scale.cc
+ libmv/simple_pipeline/resect.cc
+ libmv/simple_pipeline/tracks.cc
+ libmv/tracking/brute_region_tracker.cc
+ libmv/tracking/hybrid_region_tracker.cc
+ libmv/tracking/klt_region_tracker.cc
+ libmv/tracking/pyramid_region_tracker.cc
+ libmv/tracking/retrack_region_tracker.cc
+ libmv/tracking/track_region.cc
+ libmv/tracking/trklt_region_tracker.cc
+
+
+ intern/autotrack.h
+ intern/camera_intrinsics.h
+ intern/detector.h
+ intern/frame_accessor.h
+ intern/homography.h
+ intern/image.h
+ intern/logging.h
+ intern/reconstruction.h
+ intern/track_region.h
+ intern/tracks.h
+ intern/tracksN.h
+ libmv/autotrack/autotrack.h
+ libmv/autotrack/callbacks.h
+ libmv/autotrack/frame_accessor.h
+ libmv/autotrack/marker.h
+ libmv/autotrack/model.h
+ libmv/autotrack/predict_tracks.h
+ libmv/autotrack/quad.h
+ libmv/autotrack/reconstruction.h
+ libmv/autotrack/region.h
+ libmv/autotrack/tracks.h
+ libmv/base/aligned_malloc.h
+ libmv/base/id_generator.h
+ libmv/base/scoped_ptr.h
+ libmv/base/vector.h
+ libmv/base/vector_utils.h
+ libmv/image/array_nd.h
+ libmv/image/convolve.h
+ libmv/image/correlation.h
+ libmv/image/image_converter.h
+ libmv/image/image_drawing.h
+ libmv/image/image.h
+ libmv/image/sample.h
+ libmv/image/tuple.h
+ libmv/logging/logging.h
+ libmv/multiview/conditioning.h
+ libmv/multiview/euclidean_resection.h
+ libmv/multiview/fundamental.h
+ libmv/multiview/homography_error.h
+ libmv/multiview/homography.h
+ libmv/multiview/homography_parameterization.h
+ libmv/multiview/nviewtriangulation.h
+ libmv/multiview/panography.h
+ libmv/multiview/panography_kernel.h
+ libmv/multiview/projection.h
+ libmv/multiview/resection.h
+ libmv/multiview/triangulation.h
+ libmv/multiview/two_view_kernel.h
+ libmv/numeric/dogleg.h
+ libmv/numeric/function_derivative.h
+ libmv/numeric/levenberg_marquardt.h
+ libmv/numeric/numeric.h
+ libmv/numeric/poly.h
+ libmv/simple_pipeline/bundle.h
+ libmv/simple_pipeline/callbacks.h
+ libmv/simple_pipeline/camera_intrinsics.h
+ libmv/simple_pipeline/camera_intrinsics_impl.h
+ libmv/simple_pipeline/detect.h
+ libmv/simple_pipeline/distortion_models.h
+ libmv/simple_pipeline/initialize_reconstruction.h
+ libmv/simple_pipeline/intersect.h
+ libmv/simple_pipeline/keyframe_selection.h
+ libmv/simple_pipeline/modal_solver.h
+ libmv/simple_pipeline/pipeline.h
+ libmv/simple_pipeline/reconstruction.h
+ libmv/simple_pipeline/reconstruction_scale.h
+ libmv/simple_pipeline/resect.h
+ libmv/simple_pipeline/tracks.h
+ libmv/tracking/brute_region_tracker.h
+ libmv/tracking/hybrid_region_tracker.h
+ libmv/tracking/kalman_filter.h
+ libmv/tracking/klt_region_tracker.h
+ libmv/tracking/pyramid_region_tracker.h
+ libmv/tracking/region_tracker.h
+ libmv/tracking/retrack_region_tracker.h
+ libmv/tracking/track_region.h
+ libmv/tracking/trklt_region_tracker.h
+
+ third_party/msinttypes/inttypes.h
+ third_party/msinttypes/stdint.h
+ )
+
+
+ if(WITH_GTESTS)
+ blender_add_lib(libmv_test_dataset "./libmv/multiview/test_data_sets.cc" "" "")
+
+ BLENDER_SRC_GTEST("libmv_predict_tracks" "./libmv/autotrack/predict_tracks_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
+ BLENDER_SRC_GTEST("libmv_tracks" "./libmv/autotrack/tracks_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
+ BLENDER_SRC_GTEST("libmv_scoped_ptr" "./libmv/base/scoped_ptr_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
+ BLENDER_SRC_GTEST("libmv_vector" "./libmv/base/vector_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
+ BLENDER_SRC_GTEST("libmv_array_nd" "./libmv/image/array_nd_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
+ BLENDER_SRC_GTEST("libmv_convolve" "./libmv/image/convolve_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
+ BLENDER_SRC_GTEST("libmv_image" "./libmv/image/image_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
+ BLENDER_SRC_GTEST("libmv_sample" "./libmv/image/sample_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
+ BLENDER_SRC_GTEST("libmv_tuple" "./libmv/image/tuple_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
+ BLENDER_SRC_GTEST("libmv_euclidean_resection" "./libmv/multiview/euclidean_resection_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
+ BLENDER_SRC_GTEST("libmv_fundamental" "./libmv/multiview/fundamental_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
+ BLENDER_SRC_GTEST("libmv_homography" "./libmv/multiview/homography_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
+ BLENDER_SRC_GTEST("libmv_nviewtriangulation" "./libmv/multiview/nviewtriangulation_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
+ BLENDER_SRC_GTEST("libmv_panography" "./libmv/multiview/panography_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
+ BLENDER_SRC_GTEST("libmv_projection" "./libmv/multiview/projection_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
+ BLENDER_SRC_GTEST("libmv_resection" "./libmv/multiview/resection_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
+ BLENDER_SRC_GTEST("libmv_triangulation" "./libmv/multiview/triangulation_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
+ BLENDER_SRC_GTEST("libmv_dogleg" "./libmv/numeric/dogleg_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
+ BLENDER_SRC_GTEST("libmv_function_derivative" "./libmv/numeric/function_derivative_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
+ BLENDER_SRC_GTEST("libmv_levenberg_marquardt" "./libmv/numeric/levenberg_marquardt_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
+ BLENDER_SRC_GTEST("libmv_numeric" "./libmv/numeric/numeric_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
+ BLENDER_SRC_GTEST("libmv_poly" "./libmv/numeric/poly_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
+ BLENDER_SRC_GTEST("libmv_camera_intrinsics" "./libmv/simple_pipeline/camera_intrinsics_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
+ BLENDER_SRC_GTEST("libmv_detect" "./libmv/simple_pipeline/detect_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
+ BLENDER_SRC_GTEST("libmv_intersect" "./libmv/simple_pipeline/intersect_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
+ BLENDER_SRC_GTEST("libmv_keyframe_selection" "./libmv/simple_pipeline/keyframe_selection_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
+ BLENDER_SRC_GTEST("libmv_modal_solver" "./libmv/simple_pipeline/modal_solver_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
+ BLENDER_SRC_GTEST("libmv_resect" "./libmv/simple_pipeline/resect_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
+ BLENDER_SRC_GTEST("libmv_brute_region_tracker" "./libmv/tracking/brute_region_tracker_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
+ BLENDER_SRC_GTEST("libmv_klt_region_tracker" "./libmv/tracking/klt_region_tracker_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
+ BLENDER_SRC_GTEST("libmv_pyramid_region_tracker" "./libmv/tracking/pyramid_region_tracker_test.cc" "libmv_test_dataset;extern_libmv;extern_ceres")
+ endif()
+else()
+ list(APPEND SRC
+ intern/stub.cc
+ )
+endif()
+
+blender_add_lib(bf_intern_libmv "${SRC}" "${INC}" "${INC_SYS}")
diff --git a/intern/libmv/ChangeLog b/intern/libmv/ChangeLog
new file mode 100644
index 00000000000..63c9963a22f
--- /dev/null
+++ b/intern/libmv/ChangeLog
@@ -0,0 +1,609 @@
+commit d3537e3709fe11f42312e82cb1c9837c9e742385
+Author: Sergey Sharybin <sergey.vfx@gmail.com>
+Date: Sun Jan 3 14:17:10 2016 +0500
+
+ GLog/GFlags: Reduce difference between upstream and bundled versions
+
+ Several things here:
+
+ - Re-bundled sources using own fork with pull-requests applied on the sources.
+
+ - Got rid of changes around include "config.h", it was needed by Blender to
+ make it's include directories configuration to work. This could be addressed
+ differently from Blender side.
+
+ - Moved some customization to defines set by CMakeLists.
+
+commit 1ec37bba2cfbbf0d6568429fa3035ee2164c23e6
+Author: Sergey Sharybin <sergey.vfx@gmail.com>
+Date: Sat Jan 2 12:42:55 2016 +0500
+
+ GFlags linking errors fix for MSVC
+
+commit df7642b270e8e43685e9ffb404b59d7b226a9f60
+Author: Sergey Sharybin <sergey.vfx@gmail.com>
+Date: Thu Dec 31 17:56:12 2015 +0500
+
+ Alternative fix for missing prototype for a couple of functions
+
+commit 08f685797b7d776cdaa579136c82b15ddc6ffb30
+Author: Sergey Sharybin <sergey.vfx@gmail.com>
+Date: Thu Dec 31 17:33:05 2015 +0500
+
+ Update GFlags to the latest upstream version
+
+ Makes it easier to synchronize some compiler/warning fixes.
+
+commit e0ef5b09203e3906a555e6c2010f25cb667da9cd
+Author: Sergey Sharybin <sergey.vfx@gmail.com>
+Date: Thu Dec 31 16:15:59 2015 +0500
+
+ GLog: Solve some compilation warnings
+
+ Those are actually sent to a pull-request, see
+
+ https://github.com/google/glog/pull/81
+
+commit 2072b213d4d3a55d099a063ed1e7331cc773454e
+Author: Sergey Sharybin <sergey.vfx@gmail.com>
+Date: Thu Dec 31 16:13:53 2015 +0500
+
+ Add Martijn Berger to the AUTHORS file
+
+commit 4dd0770d98d67896e4f936356e281f63d927410e
+Author: Martijn Berger <martijn.berger@gmail.com>
+Date: Thu Dec 31 16:13:08 2015 +0500
+
+ Fix compilation error of Glog and Gflags with MSVC2015
+
+commit 2712f42be2ad79e7d3a6c9905f6d8d1e3b7133ac
+Author: Brecht Van Lommel <brechtvanlommel@gmail.com>
+Date: Thu Dec 31 14:00:58 2015 +0500
+
+ Fix OS X (with 10.11 SDK) glog build errors due to using deprecated code.
+
+ Some values are now restored to the ones from before the upgrade today.
+
+commit d249280fdf7c937fd6ebbc465508843a70aafd4c
+Author: Sergey Sharybin <sergey.vfx@gmail.com>
+Date: Wed Dec 30 16:59:28 2015 +0500
+
+ Tweaks to Glog to support building on all platforms
+
+ This makes it possible to compile Libmv on all platforms,
+ amount of hacks is lower, which could bring some warnings
+ up, but those are better be addressed via upstream which
+ is now rather active.
+
+commit 86c57750ddb857643fb5dd2c83b4953da83dd57d
+Author: Sergey Sharybin <sergey.vfx@gmail.com>
+Date: Wed Dec 30 16:15:47 2015 +0500
+
+ Enable explicit Schur complement matrix by default
+
+ Gives up to 2x speed up of camera solving process in average scene.
+ In the really huge one it might be slower, but that we need to investigate.
+
+commit d6c52a70b5a0664b7c74bda68f59a895fe8aa235
+Author: Sergey Sharybin <sergey.vfx@gmail.com>
+Date: Wed Dec 30 16:13:03 2015 +0500
+
+ Fix one frame memory leak when tracking last frame
+
+commit 6e2ac41d25d5923b2a62c96d27d919a36eff9b48
+Author: Brecht Van Lommel <brechtvanlommel@gmail.com>
+Date: Wed Dec 30 16:11:24 2015 +0500
+
+ Motion tracking not workig with Xcode 7 on OS X.
+
+ Caused by use of the uninitialized shape_ variable in Resize().
+
+commit fc72ae06fb4ae559ac37d14d1b34d6669505cc86
+Author: Sergey Sharybin <sergey.vfx@gmail.com>
+Date: Wed Dec 30 15:56:40 2015 +0500
+
+ Update GLog to latest upstream
+
+ Should fix issues building with MSVC2015.
+
+commit d4b2d15bd3d195074b074331354de96a1b51042f
+Author: Sergey Sharybin <sergey.vfx@gmail.com>
+Date: Wed Dec 30 16:01:10 2015 +0500
+
+ Fix wrong README file reference
+
+commit 2b4aa0b7720cae9a408284834559bea9960157ee
+Author: Keir Mierle <mierle@gmail.com>
+Date: Mon May 11 02:16:53 2015 -0700
+
+ Make README more informative for GitHub viewers
+
+ Reviewers: sergey
+
+ Reviewed By: sergey
+
+ Differential Revision: https://developer.blender.org/D1295
+
+commit 514e4491aea655d20be047ed87f002fb7854d5c9
+Author: Keir Mierle <mierle@gmail.com>
+Date: Mon May 11 01:54:09 2015 -0700
+
+ Simplify the modal solver Ceres cost function
+
+ Fix test by flipping the quaternion.
+
+ Reviewers: sergey
+
+ Reviewed By: sergey
+
+ Projects: #libmv
+
+ Differential Revision: https://developer.blender.org/D756
+
+commit e55fafd31f7d53d42af7c6b7df2eebe3c2568da9
+Author: Sergey Sharybin <sergey.vfx@gmail.com>
+Date: Wed Dec 31 19:05:51 2014 +0500
+
+ Synchronize MSVC compilation fixes from Blender
+
+commit 7d6020d2ec42c6cb2749bc891186b4880d26d40b
+Author: Sergey Sharybin <sergey.vfx@gmail.com>
+Date: Wed Dec 31 15:32:07 2014 +0500
+
+ Update GLog to latest upstream revision 143
+
+ Mainly to solve compilation error with demangle.cc.
+
+commit 5dc746700eaf85cb674f0fb73ff3c1b49a7f6315
+Author: Sergey Sharybin <sergey.vfx@gmail.com>
+Date: Fri Dec 12 14:59:55 2014 +0500
+
+ Update GFlags to latest release 2.1.1
+
+ Main purpose of this (andsome of upcoming) update is to check if the
+ upstream sources are useable without any modifications for us. And if
+ not, then we'll need to consider moving some changes into upstream.
+
+ This commit contains an one-to-one copy of the upstream GFlags library
+ and also changes namespace usage since it's changed in the upstream.
+
+commit 6fe6d75f7e90e161b44643b953f058a3829a5247
+Author: Sergey Sharybin <sergey.vfx@gmail.com>
+Date: Sat Nov 1 02:53:36 2014 +0500
+
+ Libmv: Code cleanup, mixed class/struct in declaration/definition
+
+commit d2a5f7953812d2d09765431b59c6c4ac72faf35b
+Author: Sergey Sharybin <sergey.vfx@gmail.com>
+Date: Thu Oct 30 23:13:53 2014 +0500
+
+ Libmv: Support disabled color channels in tracking settings
+
+ This was never ported to a new tracking pipeline and now it's done using
+ FrameAccessor::Transform routines. Quite striaghtforward, but i've changed
+ order of grayscale conversion in blender side with call of transform callback.
+
+ This way it's much easier to perform rescaling in libmv side.
+
+commit d976e034cdf74b34860e0632d7b29713f47c5756
+Author: Keir Mierle <mierle@gmail.com>
+Date: Sat Aug 23 00:38:01 2014 -0700
+
+ Minor keyframe selection cleanups
+
+ Reviewers: sergey
+
+ Reviewed By: sergey
+
+ Differential Revision: https://developer.blender.org/D757
+
+commit bc99ca55dadfca89fde0f93764397c2fe028943d
+Author: Sergey Sharybin <sergey.vfx@gmail.com>
+Date: Sat Aug 23 01:55:32 2014 +0600
+
+ implement backward prediction
+
+ The title actually says it all, just extend current implementation
+ of PredictMarkerPosition() to cases when tracking happens in the reverse
+ order (from the end frame to start).
+
+ it's still doesn't solve all the ambiguity happening in the function
+ in cases when one tracks the feature and then re-tracks it in order
+ to refine the sliding. This is considered a separate TODO for now and
+ will likely be solved by passing tracking direction to the prediction
+ function.
+
+ Reviewers: keir
+
+ Reviewed By: keir
+
+ Differential Revision: https://developer.blender.org/D663
+
+commit 5b87682d98df65ade02638bc6482d824cf0dd0b3
+Author: Keir Mierle <mierle@gmail.com>
+Date: Thu Aug 21 22:45:22 2014 -0700
+
+ Make libmv compile on Ubuntu 14.04
+
+ Reviewers: fsiddi
+
+ Reviewed By: fsiddi
+
+ Subscribers: sergey
+
+ Differential Revision: https://developer.blender.org/D755
+
+commit 0a81db623c458e0384b4f7060d1bcff8993fb469
+Author: Sergey Sharybin <sergey.vfx@gmail.com>
+Date: Wed Jul 23 00:42:00 2014 +0600
+
+ Fix wrong residual blocks counter
+
+ This happened in cases when having zero-weighted tracks
+ and could lead to some assert failures on marking parameter
+ block constant.
+
+commit 2824dbac54cacf74828678be7a5c9fd960ce83e2
+Author: Sergey Sharybin <sergey.vfx@gmail.com>
+Date: Fri Jul 18 12:52:03 2014 +0600
+
+ Fix search area sliding issue
+
+ The only way to do this is to store search region in floats
+ and round when we need to sample it. Otherwise you'll always
+ have sliding effect caused by rounding the issues, especially
+ when doing incremental offset (thing which happens in the
+ prediction code).
+
+ Pretty much straightforward change apart from stuff to be kept
+ in mind: offset calculation int should happen relative to the
+ rounded search region. This is because tracker works in the space
+ of the search window image which get's rounded on the frame access,
+
+ This makes API a bit creepy because frame accessor uses the same
+ Region struct as the search window in Marker and ideally we would
+ need to have either IntRegion or Region<int> in order to make
+ Libmv fully track on what's getting rounded and when.
+
+ Reviewers: keir
+
+ Reviewed By: keir
+
+ Differential Revision: https://developer.blender.org/D616
+
+commit 04862c479332308be47a0f27361402444ace8880
+Author: Keir Mierle <mierle@gmail.com>
+Date: Fri May 9 23:00:03 2014 +0200
+
+ Start the automatic 2D tracking code
+
+ This starts the 2D automatic tracking code. It is totally unfinished.
+
+ Reviewers: sergey
+
+ Reviewed By: sergey
+
+ Differential Revision: https://developer.blender.org/D523
+
+commit be679f67d807a2139c1f7d7e2ca45141940b30d5
+Author: Keir Mierle <mierle@gmail.com>
+Date: Fri May 9 14:36:04 2014 +0200
+
+ Also shift the search window
+
+ Reviewers: sergey
+
+ Reviewed By: sergey
+
+ Differential Revision: https://developer.blender.org/D520
+
+commit 66b8f5eef2633ebcde32a388fc14c60171011821
+Author: Keir Mierle <mierle@gmail.com>
+Date: Fri May 9 13:06:28 2014 +0200
+
+ Change the search region to absolute frame coordinates
+
+ Smarter Eigen usage
+
+ Better error logging
+
+ Reviewers: sergey
+
+ Reviewed By: sergey
+
+ Differential Revision: https://developer.blender.org/D519
+
+commit a08193319ae409fad8f08887eae1f79f02e91eaa
+Author: Keir Mierle <mierle@gmail.com>
+Date: Fri May 9 12:02:47 2014 +0200
+
+ First cut at predictive tracing
+
+ This adds a Kalman filter-based approach to predict where a marker
+ will go in the next frame to track. Hopefully this will make the
+ tracker work faster by avoiding lengthy searches. This code
+ compiles, but is otherwise untested, and likely does not work.
+
+ Fix else branch
+
+ Add some tests
+
+ Update patch coordinates as well (and test)
+
+ Reviewers: sergey
+
+ Reviewed By: sergey
+
+ Differential Revision: https://developer.blender.org/D518
+
+commit 607ffb2f62b56e34a841abbb952d83e19cd1e23c
+Author: Keir Mierle <mierle@gmail.com>
+Date: Thu May 8 16:05:28 2014 +0200
+
+ Add constructor to AutoTrack
+
+commit c39e20a0c27da3733804c3848454b5d4c4f0e66b
+Author: Keir Mierle <mierle@gmail.com>
+Date: Thu May 8 16:04:20 2014 +0200
+
+ Fix GetMarker compilation issue
+
+commit 8dd93e431b6e44439c803bfd26ec2669b656177e
+Author: Keir Mierle <mierle@gmail.com>
+Date: Thu May 8 15:50:26 2014 +0200
+
+ Expose GetMarker() in AutoTrack
+
+ Reviewers: sergey
+
+ Reviewed By: sergey
+
+ Differential Revision: https://developer.blender.org/D516
+
+commit 4405dff60ea08d454b64da1a7c0595d9328cf8a3
+Author: Keir Mierle <mierle@gmail.com>
+Date: Thu May 8 15:38:14 2014 +0200
+
+ Add public SetMarkers to AutoTrack
+
+ Reviewers: sergey
+
+ Reviewed By: sergey
+
+ Differential Revision: https://developer.blender.org/D515
+
+commit c90837f6db276a3b1f610eaad509155f6a43b24f
+Author: Keir Mierle <mierle@gmail.com>
+Date: Thu May 8 15:17:48 2014 +0200
+
+ Make autotrack skeleton compile
+
+ Reviewers: sergey
+
+ Reviewed By: sergey
+
+ Differential Revision: https://developer.blender.org/D514
+
+commit be01baa2e82e36f63e548f073157e68d2ff870c0
+Author: Keir Mierle <mierle@gmail.com>
+Date: Wed May 7 18:48:55 2014 +0200
+
+ Add preliminary TrackMarkerToFrame in autotrack
+
+ Reviewers: sergey
+
+ Reviewed By: sergey
+
+ Differential Revision: https://developer.blender.org/D509
+
+commit 0cab028d591b3d08672ca86eb6c6e4ac1aacf1d0
+Author: Sergey Sharybin <sergey.vfx@gmail.com>
+Date: Wed May 7 17:59:11 2014 +0200
+
+ Remove assert from ArrayND Resize
+
+ That assert broke initialization of arrays which doesn't
+ own the data since constructor uses Resize to set shape
+ and strides.
+
+ Strides are still to be fixed, but that's for later.
+
+commit 64f9c118029a9351e9023e96527c120e1d724d5b
+Author: Sergey Sharybin <sergey.vfx@gmail.com>
+Date: Wed May 7 17:42:21 2014 +0200
+
+ Fix ArrayND freeing the data it doesn't own
+
+ Can't really guarantee it works fully correct now,
+ but at least this check is needed anyway and compilation
+ works just fine.
+
+ Reviewers: keir
+
+ Reviewed By: keir
+
+ Differential Revision: https://developer.blender.org/D508
+
+commit 0618f1c8e88dfc738cdde55784da80b889905e7c
+Author: Keir Mierle <mierle@gmail.com>
+Date: Wed May 7 12:03:32 2014 +0200
+
+ Minor changes
+
+ Reviewers: sergey
+
+ Reviewed By: sergey
+
+ Differential Revision: https://developer.blender.org/D505
+
+commit 5c34335e1bb90c4ed701ee830c718ed4e20dbffa
+Author: Sergey Sharybin <sergey.vfx@gmail.com>
+Date: Wed May 7 11:12:23 2014 +0200
+
+ Fix compilation error in frame accessor
+
+ - int64 is not a standard type, we've got int64_t defined in
+ std int. We also have an msvc port of this header, so should
+ not be an issue.
+
+ - Fixed inconsistency in usage of CacheKey and Key, used Key.
+
+ - Some functions weren't marked as virtual.
+
+ Additional change: added self to authors.
+
+ Reviewers: keir
+
+ Reviewed By: keir
+
+ Differential Revision: https://developer.blender.org/D504
+
+commit 06bc207614e262cd688e2c3ed820ade7c77bdb66
+Author: Keir Mierle <mierle@gmail.com>
+Date: Tue May 6 22:30:59 2014 +0200
+
+ Start new Tracks implementation
+
+ This adds the new Tracks implementation, as well as a
+ trivial test to show it compiles.
+
+ Reviewers: sergey
+
+ Reviewed By: sergey
+
+ Differential Revision: https://developer.blender.org/D502
+
+commit 25ce061e6da69881460ba7718bb0d660a2380a02
+Author: Keir Mierle <mierle@gmail.com>
+Date: Tue May 6 19:10:51 2014 +0200
+
+ Add Reconstruction class for new API
+
+ This starts the new Reconstruction class (with support for e.g. planes). This
+ also starts the new namespace "mv" which will eventually have all the symbols
+ we wish to export.
+
+ Reviewers: sergey
+
+ Reviewed By: sergey
+
+ Differential Revision: https://developer.blender.org/D501
+
+commit 0a6af3e29016048978aea607673340500e050339
+Author: Keir Mierle <mierle@gmail.com>
+Date: Tue May 6 17:52:53 2014 +0200
+
+ Add a new Tracks implementation
+
+ Reviewers: sergey
+
+ Reviewed By: sergey
+
+ Differential Revision: https://developer.blender.org/D500
+
+commit 887b68d29c2b198f4939f9ab5153881aa2c1806e
+Author: Keir Mierle <mierle@gmail.com>
+Date: Tue May 6 17:01:39 2014 +0200
+
+ Initial commit of unfinished AutoTrack API
+
+ This starts the creating the new AutoTrack API. The new API will
+ make it possible for libmv to do full autotracking, including
+ predictive tracking and also support multiple motion models (3D
+ planes etc).
+
+ The first goal (not in this patch) is to convert Blender to use
+ the new API without adding any new functionality.
+
+ Note: This does not add any of the API to the build system!
+ It likely does not compile.
+
+ Reviewers: sergey
+
+ Reviewed By: sergey
+
+ Differential Revision: https://developer.blender.org/D499
+
+commit 08cc227d431d257d27f300fbb8e6991e663302da
+Author: Sergey Sharybin <sergey.vfx@gmail.com>
+Date: Tue May 6 13:09:22 2014 +0200
+
+ Fix homography test failure
+
+ It was caused by assuming that reconstructed homography matrix
+ should look exactly the same as the matrix used to generate a
+ test case.
+
+ It's not actually valid assumption because different-looking
+ matrices could correspond to the same exact transform.
+
+ In this change we make it so actual "re-projected" vectors
+ are being checked, not the values in matrix. This makes it
+ more predictable verification.
+
+ Reviewers: keir
+
+ Reviewed By: keir
+
+ Differential Revision: https://developer.blender.org/D488
+
+commit 0b7d83dc9627447dc7df64d7e3a468aefe9ddc13
+Author: Sergey Sharybin <sergey.vfx@gmail.com>
+Date: Wed Apr 23 19:14:55 2014 +0600
+
+ Fix compilation on OSX after previous commit
+
+ EXPECT_EQ wasn't defined in the scope.
+
+commit d14049e00dabf8fdf49056779f0a3718fbb39e8f
+Author: Sergey Sharybin <sergey.vfx@gmail.com>
+Date: Wed Apr 23 15:08:16 2014 +0600
+
+ Move aligned malloc implementation into own file
+
+ It was rather stupid having it in brute region tracker,
+ now it is in own file in base library (which was also
+ added in this commit, before this it consist of header
+ files only).
+
+ Reviewers: keir
+
+ Reviewed By: keir
+
+ Differential Revision: https://developer.blender.org/D479
+
+commit 0ddf3851bfcb8de43660b119a25a77a25674200d
+Author: Sergey Sharybin <sergey.vfx@gmail.com>
+Date: Mon Apr 21 14:14:03 2014 +0600
+
+ Optimization of PearsonProductMomentCorrelation
+
+ Pass the arrays by reference rather than by value,
+ should give some percent of speedup.
+
+ Also don't pass the dimensions to the function but
+ get them from the images themselves.
+
+ Hopefully this will give some %% of tracker speedup.
+
+commit f68fdbe5896a6c5bd8b500caeec61b876c5e44c6
+Author: Sergey Sharybin <sergey.vfx@gmail.com>
+Date: Mon Apr 21 14:10:43 2014 +0600
+
+ Fix wrong assert in ResizeImage()
+
+ The assert didn't make any sense because ComputeBoundingBox()
+ is intended to return bounding box in the following way:
+ (xmin, xmax, ymin, ymax).
+
+commit 1d386b6775a71c499e9b8e4a288c0785c4937677
+Author: Sergey Sharybin <sergey.vfx@gmail.com>
+Date: Thu Apr 17 18:42:43 2014 +0600
+
+ Add unit tests for buffer (un)distortion
+
+ Currently only uses identity camera intrinsics just to
+ see whether lookup grids are properly allocated.
+
+ Should prevent accidents like that one happened recently
+ with crashing Blender after Libmv re-integration.
diff --git a/intern/libmv/bundle.sh b/intern/libmv/bundle.sh
new file mode 100755
index 00000000000..e719e592c02
--- /dev/null
+++ b/intern/libmv/bundle.sh
@@ -0,0 +1,187 @@
+#!/bin/sh
+
+if [ "x$1" = "x--i-really-know-what-im-doing" ] ; then
+ echo Proceeding as requested by command line ...
+else
+ echo "*** Please run again with --i-really-know-what-im-doing ..."
+ exit 1
+fi
+
+BRANCH="master"
+
+repo="git://git.blender.org/libmv.git"
+tmp=`mktemp -d`
+
+git clone -b $BRANCH $repo $tmp/libmv
+
+git --git-dir $tmp/libmv/.git --work-tree $tmp/libmv log -n 50 > ChangeLog
+
+find libmv -type f -exec rm -rf {} \;
+find third_party -type f -exec rm -rf {} \;
+
+cat "files.txt" | while read f; do
+ mkdir -p `dirname $f`
+ cp $tmp/libmv/src/$f $f
+done
+
+rm -rf $tmp
+
+sources=`find ./libmv -type f -iname '*.cc' -or -iname '*.cpp' -or -iname '*.c' | grep -v _test.cc | grep -v test_data_sets | sed -r 's/^\.\//\t\t/' | sort -d`
+headers=`find ./libmv -type f -iname '*.h' | grep -v test_data_sets | sed -r 's/^\.\//\t\t/' | sort -d`
+
+third_sources=`find ./third_party -type f -iname '*.cc' -or -iname '*.cpp' -or -iname '*.c' | sed -r 's/^\.\//\t\t/' | sort -d`
+third_headers=`find ./third_party -type f -iname '*.h' | sed -r 's/^\.\//\t\t/' | sort -d`
+
+tests=`find ./libmv -type f -iname '*_test.cc' | sort -d | awk ' { name=gensub(".*/([A-Za-z_]+)_test.cc", "\\\\1", $1); printf("\t\tBLENDER_SRC_GTEST(\"libmv_%s\" \"%s\" \"libmv_test_dataset;extern_libmv;extern_ceres\")\n", name, $1) } '`
+
+src_dir=`find ./libmv -type f -iname '*.cc' -exec dirname {} \; -or -iname '*.cpp' -exec dirname {} \; -or -iname '*.c' -exec dirname {} \; | sed -r 's/^\.\//\t\t/' | sort -d | uniq`
+src_third_dir=`find ./third_party -type f -iname '*.cc' -exec dirname {} \; -or -iname '*.cpp' -exec dirname {} \; -or -iname '*.c' -exec dirname {} \; | sed -r 's/^\.\//\t\t/' | sort -d | uniq`
+src=""
+win_src=""
+for x in $src_dir $src_third_dir; do
+ t=""
+
+ if stat $x/*.cpp > /dev/null 2>&1; then
+ t=" src += env.Glob('`echo $x'/*.cpp'`')"
+ fi
+
+ if stat $x/*.c > /dev/null 2>&1; then
+ if [ -z "$t" ]; then
+ t=" src += env.Glob('`echo $x'/*.c'`')"
+ else
+ t="$t + env.Glob('`echo $x'/*.c'`')"
+ fi
+ fi
+
+ if stat $x/*.cc > /dev/null 2>&1; then
+ if [ -z "$t" ]; then
+ t=" src += env.Glob('`echo $x'/*.cc'`')"
+ else
+ t="$t + env.Glob('`echo $x'/*.cc'`')"
+ fi
+ fi
+
+ if test `echo $x | grep -c "windows\|gflags" ` -eq 0; then
+ if [ -z "$src" ]; then
+ src=$t
+ else
+ src=`echo "$src\n$t"`
+ fi
+ else
+ if [ -z "$win_src" ]; then
+ win_src=`echo " $t"`
+ else
+ win_src=`echo "$win_src\n $t"`
+ fi
+ fi
+done
+
+cat > CMakeLists.txt << EOF
+# ***** BEGIN GPL LICENSE BLOCK *****
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software Foundation,
+# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+# The Original Code is Copyright (C) 2011, Blender Foundation
+# All rights reserved.
+#
+# Contributor(s): Blender Foundation,
+# Sergey Sharybin
+#
+# ***** END GPL LICENSE BLOCK *****
+
+# NOTE: This file is automatically generated by bundle.sh script
+# If you're doing changes in this file, please update template
+# in that script too
+
+set(INC
+ .
+)
+
+set(INC_SYS
+)
+
+set(SRC
+ libmv-capi.h
+)
+
+if(WITH_LIBMV)
+ add_definitions(\${GFLAGS_DEFINES})
+ add_definitions(\${GLOG_DEFINES})
+ add_definitions(\${CERES_DEFINES})
+
+ list(APPEND INC
+ ../../extern/gflags/src
+ ../../extern/glog/src
+ ../../extern/ceres/include
+ ../../extern/ceres/config
+ ../guardedalloc
+ )
+
+ list(APPEND INC_SYS
+ \${EIGEN3_INCLUDE_DIRS}
+ \${PNG_INCLUDE_DIRS}
+ \${ZLIB_INCLUDE_DIRS}
+ )
+
+ add_definitions(
+ -DWITH_LIBMV_GUARDED_ALLOC
+ -DLIBMV_NO_FAST_DETECTOR=
+ )
+
+ list(APPEND SRC
+ intern/autotrack.cc
+ intern/camera_intrinsics.cc
+ intern/detector.cc
+ intern/frame_accessor.cc
+ intern/homography.cc
+ intern/image.cc
+ intern/logging.cc
+ intern/reconstruction.cc
+ intern/track_region.cc
+ intern/tracks.cc
+ intern/tracksN.cc
+${sources}
+${third_sources}
+
+ intern/autotrack.h
+ intern/camera_intrinsics.h
+ intern/detector.h
+ intern/frame_accessor.h
+ intern/homography.h
+ intern/image.h
+ intern/logging.h
+ intern/reconstruction.h
+ intern/track_region.h
+ intern/tracks.h
+ intern/tracksN.h
+${headers}
+
+${third_headers}
+ )
+
+
+ if(WITH_GTESTS)
+ blender_add_lib(libmv_test_dataset "./libmv/multiview/test_data_sets.cc" "${INC}" "${INC_SYS}")
+
+${tests}
+ endif()
+else()
+ list(APPEND SRC
+ intern/stub.cc
+ )
+endif()
+
+blender_add_lib(bf_intern_libmv "\${SRC}" "\${INC}" "\${INC_SYS}")
+EOF
diff --git a/intern/libmv/files.txt b/intern/libmv/files.txt
new file mode 100644
index 00000000000..223066bb02f
--- /dev/null
+++ b/intern/libmv/files.txt
@@ -0,0 +1,138 @@
+libmv/autotrack/autotrack.cc
+libmv/autotrack/autotrack.h
+libmv/autotrack/callbacks.h
+libmv/autotrack/frame_accessor.h
+libmv/autotrack/marker.h
+libmv/autotrack/model.h
+libmv/autotrack/predict_tracks.cc
+libmv/autotrack/predict_tracks.h
+libmv/autotrack/predict_tracks_test.cc
+libmv/autotrack/quad.h
+libmv/autotrack/reconstruction.h
+libmv/autotrack/region.h
+libmv/autotrack/tracks.cc
+libmv/autotrack/tracks.h
+libmv/autotrack/tracks_test.cc
+libmv/base/aligned_malloc.cc
+libmv/base/aligned_malloc.h
+libmv/base/id_generator.h
+libmv/base/scoped_ptr.h
+libmv/base/scoped_ptr_test.cc
+libmv/base/vector.h
+libmv/base/vector_test.cc
+libmv/base/vector_utils.h
+libmv/image/array_nd.cc
+libmv/image/array_nd.h
+libmv/image/array_nd_test.cc
+libmv/image/convolve.cc
+libmv/image/convolve.h
+libmv/image/convolve_test.cc
+libmv/image/correlation.h
+libmv/image/image_converter.h
+libmv/image/image_drawing.h
+libmv/image/image.h
+libmv/image/image_test.cc
+libmv/image/sample.h
+libmv/image/sample_test.cc
+libmv/image/tuple.h
+libmv/image/tuple_test.cc
+libmv/logging/logging.h
+libmv/multiview/conditioning.cc
+libmv/multiview/conditioning.h
+libmv/multiview/euclidean_resection.cc
+libmv/multiview/euclidean_resection.h
+libmv/multiview/euclidean_resection_test.cc
+libmv/multiview/fundamental.cc
+libmv/multiview/fundamental.h
+libmv/multiview/fundamental_test.cc
+libmv/multiview/homography.cc
+libmv/multiview/homography_error.h
+libmv/multiview/homography.h
+libmv/multiview/homography_parameterization.h
+libmv/multiview/homography_test.cc
+libmv/multiview/nviewtriangulation.h
+libmv/multiview/nviewtriangulation_test.cc
+libmv/multiview/panography.cc
+libmv/multiview/panography.h
+libmv/multiview/panography_kernel.cc
+libmv/multiview/panography_kernel.h
+libmv/multiview/panography_test.cc
+libmv/multiview/projection.cc
+libmv/multiview/projection.h
+libmv/multiview/projection_test.cc
+libmv/multiview/resection.h
+libmv/multiview/resection_test.cc
+libmv/multiview/test_data_sets.cc
+libmv/multiview/test_data_sets.h
+libmv/multiview/triangulation.cc
+libmv/multiview/triangulation.h
+libmv/multiview/triangulation_test.cc
+libmv/multiview/two_view_kernel.h
+libmv/numeric/dogleg.h
+libmv/numeric/dogleg_test.cc
+libmv/numeric/function_derivative.h
+libmv/numeric/function_derivative_test.cc
+libmv/numeric/levenberg_marquardt.h
+libmv/numeric/levenberg_marquardt_test.cc
+libmv/numeric/numeric.cc
+libmv/numeric/numeric.h
+libmv/numeric/numeric_test.cc
+libmv/numeric/poly.cc
+libmv/numeric/poly.h
+libmv/numeric/poly_test.cc
+libmv/simple_pipeline/bundle.cc
+libmv/simple_pipeline/bundle.h
+libmv/simple_pipeline/callbacks.h
+libmv/simple_pipeline/camera_intrinsics.cc
+libmv/simple_pipeline/camera_intrinsics.h
+libmv/simple_pipeline/camera_intrinsics_impl.h
+libmv/simple_pipeline/camera_intrinsics_test.cc
+libmv/simple_pipeline/detect.cc
+libmv/simple_pipeline/detect.h
+libmv/simple_pipeline/detect_test.cc
+libmv/simple_pipeline/distortion_models.cc
+libmv/simple_pipeline/distortion_models.h
+libmv/simple_pipeline/initialize_reconstruction.cc
+libmv/simple_pipeline/initialize_reconstruction.h
+libmv/simple_pipeline/intersect.cc
+libmv/simple_pipeline/intersect.h
+libmv/simple_pipeline/intersect_test.cc
+libmv/simple_pipeline/keyframe_selection.cc
+libmv/simple_pipeline/keyframe_selection.h
+libmv/simple_pipeline/keyframe_selection_test.cc
+libmv/simple_pipeline/modal_solver.cc
+libmv/simple_pipeline/modal_solver.h
+libmv/simple_pipeline/modal_solver_test.cc
+libmv/simple_pipeline/pipeline.cc
+libmv/simple_pipeline/pipeline.h
+libmv/simple_pipeline/reconstruction.cc
+libmv/simple_pipeline/reconstruction.h
+libmv/simple_pipeline/reconstruction_scale.cc
+libmv/simple_pipeline/reconstruction_scale.h
+libmv/simple_pipeline/resect.cc
+libmv/simple_pipeline/resect.h
+libmv/simple_pipeline/resect_test.cc
+libmv/simple_pipeline/tracks.cc
+libmv/simple_pipeline/tracks.h
+libmv/tracking/brute_region_tracker.cc
+libmv/tracking/brute_region_tracker.h
+libmv/tracking/brute_region_tracker_test.cc
+libmv/tracking/hybrid_region_tracker.cc
+libmv/tracking/hybrid_region_tracker.h
+libmv/tracking/kalman_filter.h
+libmv/tracking/klt_region_tracker.cc
+libmv/tracking/klt_region_tracker.h
+libmv/tracking/klt_region_tracker_test.cc
+libmv/tracking/pyramid_region_tracker.cc
+libmv/tracking/pyramid_region_tracker.h
+libmv/tracking/pyramid_region_tracker_test.cc
+libmv/tracking/region_tracker.h
+libmv/tracking/retrack_region_tracker.cc
+libmv/tracking/retrack_region_tracker.h
+libmv/tracking/track_region.cc
+libmv/tracking/track_region.h
+libmv/tracking/trklt_region_tracker.cc
+libmv/tracking/trklt_region_tracker.h
+third_party/msinttypes/inttypes.h
+third_party/msinttypes/README.libmv
+third_party/msinttypes/stdint.h
diff --git a/intern/libmv/intern/autotrack.cc b/intern/libmv/intern/autotrack.cc
new file mode 100644
index 00000000000..f0cbc68f11e
--- /dev/null
+++ b/intern/libmv/intern/autotrack.cc
@@ -0,0 +1,99 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2014 Blender Foundation.
+ * All rights reserved.
+ *
+ * Contributor(s): Blender Foundation,
+ * Sergey Sharybin
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+#include "intern/autotrack.h"
+#include "intern/tracksN.h"
+#include "intern/utildefines.h"
+#include "libmv/autotrack/autotrack.h"
+
+using mv::AutoTrack;
+using mv::FrameAccessor;
+using mv::Marker;
+using libmv::TrackRegionOptions;
+using libmv::TrackRegionResult;
+
+libmv_AutoTrack* libmv_autoTrackNew(libmv_FrameAccessor *frame_accessor) {
+ return (libmv_AutoTrack*) LIBMV_OBJECT_NEW(AutoTrack,
+ (FrameAccessor*) frame_accessor);
+}
+
+void libmv_autoTrackDestroy(libmv_AutoTrack* libmv_autotrack) {
+ LIBMV_OBJECT_DELETE(libmv_autotrack, AutoTrack);
+}
+
+void libmv_autoTrackSetOptions(libmv_AutoTrack* libmv_autotrack,
+ const libmv_AutoTrackOptions* options) {
+ AutoTrack *autotrack = ((AutoTrack*) libmv_autotrack);
+ libmv_configureTrackRegionOptions(options->track_region,
+ &autotrack->options.track_region);
+
+ autotrack->options.search_region.min(0) = options->search_region.min[0];
+ autotrack->options.search_region.min(1) = options->search_region.min[1];
+ autotrack->options.search_region.max(0) = options->search_region.max[0];
+ autotrack->options.search_region.max(1) = options->search_region.max[1];
+}
+
+int libmv_autoTrackMarker(libmv_AutoTrack* libmv_autotrack,
+ const libmv_TrackRegionOptions* libmv_options,
+ libmv_Marker *libmv_tracked_marker,
+ libmv_TrackRegionResult* libmv_result) {
+
+ Marker tracked_marker;
+ TrackRegionOptions options;
+ TrackRegionResult result;
+ libmv_apiMarkerToMarker(*libmv_tracked_marker, &tracked_marker);
+ libmv_configureTrackRegionOptions(*libmv_options,
+ &options);
+ (((AutoTrack*) libmv_autotrack)->TrackMarker(&tracked_marker,
+ &result,
+ &options));
+ libmv_markerToApiMarker(tracked_marker, libmv_tracked_marker);
+ libmv_regionTrackergetResult(result, libmv_result);
+ return result.is_usable();
+}
+
+void libmv_autoTrackAddMarker(libmv_AutoTrack* libmv_autotrack,
+ const libmv_Marker* libmv_marker) {
+ Marker marker;
+ libmv_apiMarkerToMarker(*libmv_marker, &marker);
+ ((AutoTrack*) libmv_autotrack)->AddMarker(marker);
+}
+
+int libmv_autoTrackGetMarker(libmv_AutoTrack* libmv_autotrack,
+ int clip,
+ int frame,
+ int track,
+ libmv_Marker *libmv_marker) {
+ Marker marker;
+ int ok = ((AutoTrack*) libmv_autotrack)->GetMarker(clip,
+ frame,
+ track,
+ &marker);
+ if (ok) {
+ libmv_markerToApiMarker(marker, libmv_marker);
+ }
+ return ok;
+}
diff --git a/intern/libmv/intern/autotrack.h b/intern/libmv/intern/autotrack.h
new file mode 100644
index 00000000000..2a4a8f3c97f
--- /dev/null
+++ b/intern/libmv/intern/autotrack.h
@@ -0,0 +1,71 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2014 Blender Foundation.
+ * All rights reserved.
+ *
+ * Contributor(s): Blender Foundation,
+ * Sergey Sharybin
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+#ifndef LIBMV_C_API_AUTOTRACK_H_
+#define LIBMV_C_API_AUTOTRACK_H_
+
+#include "intern/frame_accessor.h"
+#include "intern/tracksN.h"
+#include "intern/track_region.h"
+#include "intern/region.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct libmv_AutoTrack libmv_AutoTrack;
+
+typedef struct libmv_AutoTrackOptions {
+ libmv_TrackRegionOptions track_region;
+ libmv_Region search_region;
+} libmv_AutoTrackOptions;
+
+libmv_AutoTrack* libmv_autoTrackNew(libmv_FrameAccessor *frame_accessor);
+
+void libmv_autoTrackDestroy(libmv_AutoTrack* libmv_autotrack);
+
+void libmv_autoTrackSetOptions(libmv_AutoTrack* libmv_autotrack,
+ const libmv_AutoTrackOptions* options);
+
+int libmv_autoTrackMarker(libmv_AutoTrack* libmv_autotrack,
+ const libmv_TrackRegionOptions* libmv_options,
+ libmv_Marker *libmv_tracker_marker,
+ libmv_TrackRegionResult* libmv_result);
+
+void libmv_autoTrackAddMarker(libmv_AutoTrack* libmv_autotrack,
+ const libmv_Marker* libmv_marker);
+
+int libmv_autoTrackGetMarker(libmv_AutoTrack* libmv_autotrack,
+ int clip,
+ int frame,
+ int track,
+ libmv_Marker *libmv_marker);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // LIBMV_C_API_TRACKS_H_
diff --git a/intern/libmv/intern/camera_intrinsics.cc b/intern/libmv/intern/camera_intrinsics.cc
new file mode 100644
index 00000000000..0ce757cc93b
--- /dev/null
+++ b/intern/libmv/intern/camera_intrinsics.cc
@@ -0,0 +1,355 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2011 Blender Foundation.
+ * All rights reserved.
+ *
+ * Contributor(s): Blender Foundation,
+ * Sergey Sharybin
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+#include "intern/camera_intrinsics.h"
+#include "intern/utildefines.h"
+#include "libmv/simple_pipeline/camera_intrinsics.h"
+
+using libmv::CameraIntrinsics;
+using libmv::DivisionCameraIntrinsics;
+using libmv::PolynomialCameraIntrinsics;
+
+libmv_CameraIntrinsics *libmv_cameraIntrinsicsNew(
+ const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options) {
+ CameraIntrinsics *camera_intrinsics =
+ libmv_cameraIntrinsicsCreateFromOptions(libmv_camera_intrinsics_options);
+ return (libmv_CameraIntrinsics *) camera_intrinsics;
+}
+
+libmv_CameraIntrinsics *libmv_cameraIntrinsicsCopy(
+ const libmv_CameraIntrinsics* libmv_intrinsics) {
+ const CameraIntrinsics *orig_intrinsics =
+ (const CameraIntrinsics *) libmv_intrinsics;
+
+ CameraIntrinsics *new_intrinsics = NULL;
+ switch (orig_intrinsics->GetDistortionModelType()) {
+ case libmv::DISTORTION_MODEL_POLYNOMIAL:
+ {
+ const PolynomialCameraIntrinsics *polynomial_intrinsics =
+ static_cast<const PolynomialCameraIntrinsics*>(orig_intrinsics);
+ new_intrinsics = LIBMV_OBJECT_NEW(PolynomialCameraIntrinsics,
+ *polynomial_intrinsics);
+ break;
+ }
+ case libmv::DISTORTION_MODEL_DIVISION:
+ {
+ const DivisionCameraIntrinsics *division_intrinsics =
+ static_cast<const DivisionCameraIntrinsics*>(orig_intrinsics);
+ new_intrinsics = LIBMV_OBJECT_NEW(DivisionCameraIntrinsics,
+ *division_intrinsics);
+ break;
+ }
+ default:
+ assert(!"Unknown distortion model");
+ }
+ return (libmv_CameraIntrinsics *) new_intrinsics;
+}
+
+void libmv_cameraIntrinsicsDestroy(libmv_CameraIntrinsics* libmv_intrinsics) {
+ LIBMV_OBJECT_DELETE(libmv_intrinsics, CameraIntrinsics);
+}
+
+void libmv_cameraIntrinsicsUpdate(
+ const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options,
+ libmv_CameraIntrinsics* libmv_intrinsics) {
+ CameraIntrinsics *camera_intrinsics = (CameraIntrinsics *) libmv_intrinsics;
+
+ double focal_length = libmv_camera_intrinsics_options->focal_length;
+ double principal_x = libmv_camera_intrinsics_options->principal_point_x;
+ double principal_y = libmv_camera_intrinsics_options->principal_point_y;
+ int image_width = libmv_camera_intrinsics_options->image_width;
+ int image_height = libmv_camera_intrinsics_options->image_height;
+
+ /* Try avoid unnecessary updates, so pre-computed distortion grids
+ * are not freed.
+ */
+
+ if (camera_intrinsics->focal_length() != focal_length) {
+ camera_intrinsics->SetFocalLength(focal_length, focal_length);
+ }
+
+ if (camera_intrinsics->principal_point_x() != principal_x ||
+ camera_intrinsics->principal_point_y() != principal_y) {
+ camera_intrinsics->SetPrincipalPoint(principal_x, principal_y);
+ }
+
+ if (camera_intrinsics->image_width() != image_width ||
+ camera_intrinsics->image_height() != image_height) {
+ camera_intrinsics->SetImageSize(image_width, image_height);
+ }
+
+ switch (libmv_camera_intrinsics_options->distortion_model) {
+ case LIBMV_DISTORTION_MODEL_POLYNOMIAL:
+ {
+ assert(camera_intrinsics->GetDistortionModelType() ==
+ libmv::DISTORTION_MODEL_POLYNOMIAL);
+
+ PolynomialCameraIntrinsics *polynomial_intrinsics =
+ (PolynomialCameraIntrinsics *) camera_intrinsics;
+
+ double k1 = libmv_camera_intrinsics_options->polynomial_k1;
+ double k2 = libmv_camera_intrinsics_options->polynomial_k2;
+ double k3 = libmv_camera_intrinsics_options->polynomial_k3;
+
+ if (polynomial_intrinsics->k1() != k1 ||
+ polynomial_intrinsics->k2() != k2 ||
+ polynomial_intrinsics->k3() != k3) {
+ polynomial_intrinsics->SetRadialDistortion(k1, k2, k3);
+ }
+ break;
+ }
+
+ case LIBMV_DISTORTION_MODEL_DIVISION:
+ {
+ assert(camera_intrinsics->GetDistortionModelType() ==
+ libmv::DISTORTION_MODEL_DIVISION);
+
+ DivisionCameraIntrinsics *division_intrinsics =
+ (DivisionCameraIntrinsics *) camera_intrinsics;
+
+ double k1 = libmv_camera_intrinsics_options->division_k1;
+ double k2 = libmv_camera_intrinsics_options->division_k2;
+
+ if (division_intrinsics->k1() != k1 ||
+ division_intrinsics->k2() != k2) {
+ division_intrinsics->SetDistortion(k1, k2);
+ }
+
+ break;
+ }
+
+ default:
+ assert(!"Unknown distortion model");
+ }
+}
+
+void libmv_cameraIntrinsicsSetThreads(libmv_CameraIntrinsics* libmv_intrinsics,
+ int threads) {
+ CameraIntrinsics *camera_intrinsics = (CameraIntrinsics *) libmv_intrinsics;
+ camera_intrinsics->SetThreads(threads);
+}
+
+void libmv_cameraIntrinsicsExtractOptions(
+ const libmv_CameraIntrinsics* libmv_intrinsics,
+ libmv_CameraIntrinsicsOptions* camera_intrinsics_options) {
+ const CameraIntrinsics *camera_intrinsics =
+ (const CameraIntrinsics *) libmv_intrinsics;
+
+ // Fill in options which are common for all distortion models.
+ camera_intrinsics_options->focal_length = camera_intrinsics->focal_length();
+ camera_intrinsics_options->principal_point_x =
+ camera_intrinsics->principal_point_x();
+ camera_intrinsics_options->principal_point_y =
+ camera_intrinsics->principal_point_y();
+
+ camera_intrinsics_options->image_width = camera_intrinsics->image_width();
+ camera_intrinsics_options->image_height = camera_intrinsics->image_height();
+
+ switch (camera_intrinsics->GetDistortionModelType()) {
+ case libmv::DISTORTION_MODEL_POLYNOMIAL:
+ {
+ const PolynomialCameraIntrinsics *polynomial_intrinsics =
+ static_cast<const PolynomialCameraIntrinsics *>(camera_intrinsics);
+ camera_intrinsics_options->distortion_model =
+ LIBMV_DISTORTION_MODEL_POLYNOMIAL;
+ camera_intrinsics_options->polynomial_k1 = polynomial_intrinsics->k1();
+ camera_intrinsics_options->polynomial_k2 = polynomial_intrinsics->k2();
+ camera_intrinsics_options->polynomial_k3 = polynomial_intrinsics->k3();
+ camera_intrinsics_options->polynomial_p1 = polynomial_intrinsics->p1();
+ camera_intrinsics_options->polynomial_p1 = polynomial_intrinsics->p2();
+ break;
+ }
+
+ case libmv::DISTORTION_MODEL_DIVISION:
+ {
+ const DivisionCameraIntrinsics *division_intrinsics =
+ static_cast<const DivisionCameraIntrinsics *>(camera_intrinsics);
+ camera_intrinsics_options->distortion_model =
+ LIBMV_DISTORTION_MODEL_DIVISION;
+ camera_intrinsics_options->division_k1 = division_intrinsics->k1();
+ camera_intrinsics_options->division_k2 = division_intrinsics->k2();
+ break;
+ }
+
+ default:
+ assert(!"Uknown distortion model");
+ }
+}
+
+void libmv_cameraIntrinsicsUndistortByte(
+ const libmv_CameraIntrinsics* libmv_intrinsics,
+ const unsigned char *source_image,
+ int width,
+ int height,
+ float overscan,
+ int channels,
+ unsigned char* destination_image) {
+ CameraIntrinsics *camera_intrinsics = (CameraIntrinsics *) libmv_intrinsics;
+ camera_intrinsics->UndistortBuffer(source_image,
+ width, height,
+ overscan,
+ channels,
+ destination_image);
+}
+
+void libmv_cameraIntrinsicsUndistortFloat(
+ const libmv_CameraIntrinsics* libmv_intrinsics,
+ const float* source_image,
+ int width,
+ int height,
+ float overscan,
+ int channels,
+ float* destination_image) {
+ CameraIntrinsics *intrinsics = (CameraIntrinsics *) libmv_intrinsics;
+ intrinsics->UndistortBuffer(source_image,
+ width, height,
+ overscan,
+ channels,
+ destination_image);
+}
+
+void libmv_cameraIntrinsicsDistortByte(
+ const struct libmv_CameraIntrinsics* libmv_intrinsics,
+ const unsigned char *source_image,
+ int width,
+ int height,
+ float overscan,
+ int channels,
+ unsigned char *destination_image) {
+ CameraIntrinsics *intrinsics = (CameraIntrinsics *) libmv_intrinsics;
+ intrinsics->DistortBuffer(source_image,
+ width, height,
+ overscan,
+ channels,
+ destination_image);
+}
+
+void libmv_cameraIntrinsicsDistortFloat(
+ const libmv_CameraIntrinsics* libmv_intrinsics,
+ float* source_image,
+ int width,
+ int height,
+ float overscan,
+ int channels,
+ float* destination_image) {
+ CameraIntrinsics *intrinsics = (CameraIntrinsics *) libmv_intrinsics;
+ intrinsics->DistortBuffer(source_image,
+ width, height,
+ overscan,
+ channels,
+ destination_image);
+}
+
+void libmv_cameraIntrinsicsApply(
+ const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options,
+ double x,
+ double y,
+ double* x1,
+ double* y1) {
+ /* Do a lens distortion if focal length is non-zero only. */
+ if (libmv_camera_intrinsics_options->focal_length) {
+ CameraIntrinsics* camera_intrinsics =
+ libmv_cameraIntrinsicsCreateFromOptions(libmv_camera_intrinsics_options);
+ camera_intrinsics->ApplyIntrinsics(x, y, x1, y1);
+ LIBMV_OBJECT_DELETE(camera_intrinsics, CameraIntrinsics);
+ }
+}
+
+void libmv_cameraIntrinsicsInvert(
+ const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options,
+ double x,
+ double y,
+ double* x1,
+ double* y1) {
+ /* Do a lens un-distortion if focal length is non-zero only/ */
+ if (libmv_camera_intrinsics_options->focal_length) {
+ CameraIntrinsics *camera_intrinsics =
+ libmv_cameraIntrinsicsCreateFromOptions(libmv_camera_intrinsics_options);
+ camera_intrinsics->InvertIntrinsics(x, y, x1, y1);
+ LIBMV_OBJECT_DELETE(camera_intrinsics, CameraIntrinsics);
+ }
+}
+
+static void libmv_cameraIntrinsicsFillFromOptions(
+ const libmv_CameraIntrinsicsOptions* camera_intrinsics_options,
+ CameraIntrinsics* camera_intrinsics) {
+ camera_intrinsics->SetFocalLength(camera_intrinsics_options->focal_length,
+ camera_intrinsics_options->focal_length);
+
+ camera_intrinsics->SetPrincipalPoint(
+ camera_intrinsics_options->principal_point_x,
+ camera_intrinsics_options->principal_point_y);
+
+ camera_intrinsics->SetImageSize(camera_intrinsics_options->image_width,
+ camera_intrinsics_options->image_height);
+
+ switch (camera_intrinsics_options->distortion_model) {
+ case LIBMV_DISTORTION_MODEL_POLYNOMIAL:
+ {
+ PolynomialCameraIntrinsics *polynomial_intrinsics =
+ static_cast<PolynomialCameraIntrinsics*>(camera_intrinsics);
+
+ polynomial_intrinsics->SetRadialDistortion(
+ camera_intrinsics_options->polynomial_k1,
+ camera_intrinsics_options->polynomial_k2,
+ camera_intrinsics_options->polynomial_k3);
+
+ break;
+ }
+
+ case LIBMV_DISTORTION_MODEL_DIVISION:
+ {
+ DivisionCameraIntrinsics *division_intrinsics =
+ static_cast<DivisionCameraIntrinsics*>(camera_intrinsics);
+
+ division_intrinsics->SetDistortion(
+ camera_intrinsics_options->division_k1,
+ camera_intrinsics_options->division_k2);
+ break;
+ }
+
+ default:
+ assert(!"Unknown distortion model");
+ }
+}
+
+CameraIntrinsics* libmv_cameraIntrinsicsCreateFromOptions(
+ const libmv_CameraIntrinsicsOptions* camera_intrinsics_options) {
+ CameraIntrinsics *camera_intrinsics = NULL;
+ switch (camera_intrinsics_options->distortion_model) {
+ case LIBMV_DISTORTION_MODEL_POLYNOMIAL:
+ camera_intrinsics = LIBMV_OBJECT_NEW(PolynomialCameraIntrinsics);
+ break;
+ case LIBMV_DISTORTION_MODEL_DIVISION:
+ camera_intrinsics = LIBMV_OBJECT_NEW(DivisionCameraIntrinsics);
+ break;
+ default:
+ assert(!"Unknown distortion model");
+ }
+ libmv_cameraIntrinsicsFillFromOptions(camera_intrinsics_options,
+ camera_intrinsics);
+ return camera_intrinsics;
+}
diff --git a/intern/libmv/intern/camera_intrinsics.h b/intern/libmv/intern/camera_intrinsics.h
new file mode 100644
index 00000000000..9910d16a108
--- /dev/null
+++ b/intern/libmv/intern/camera_intrinsics.h
@@ -0,0 +1,138 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2011 Blender Foundation.
+ * All rights reserved.
+ *
+ * Contributor(s): Blender Foundation,
+ * Sergey Sharybin
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+#ifndef LIBMV_C_API_CAMERA_INTRINSICS_H_
+#define LIBMV_C_API_CAMERA_INTRINSICS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct libmv_CameraIntrinsics libmv_CameraIntrinsics;
+
+enum {
+ LIBMV_DISTORTION_MODEL_POLYNOMIAL = 0,
+ LIBMV_DISTORTION_MODEL_DIVISION = 1,
+};
+
+typedef struct libmv_CameraIntrinsicsOptions {
+ // Common settings of all distortion models.
+ int distortion_model;
+ int image_width, image_height;
+ double focal_length;
+ double principal_point_x, principal_point_y;
+
+ // Radial distortion model.
+ double polynomial_k1, polynomial_k2, polynomial_k3;
+ double polynomial_p1, polynomial_p2;
+
+ // Division distortion model.
+ double division_k1, division_k2;
+} libmv_CameraIntrinsicsOptions;
+
+libmv_CameraIntrinsics *libmv_cameraIntrinsicsNew(
+ const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options);
+
+libmv_CameraIntrinsics *libmv_cameraIntrinsicsCopy(
+ const libmv_CameraIntrinsics* libmv_intrinsics);
+
+void libmv_cameraIntrinsicsDestroy(libmv_CameraIntrinsics* libmv_intrinsics);
+void libmv_cameraIntrinsicsUpdate(
+ const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options,
+ libmv_CameraIntrinsics* libmv_intrinsics);
+
+void libmv_cameraIntrinsicsSetThreads(libmv_CameraIntrinsics* libmv_intrinsics,
+ int threads);
+
+void libmv_cameraIntrinsicsExtractOptions(
+ const libmv_CameraIntrinsics* libmv_intrinsics,
+ libmv_CameraIntrinsicsOptions* camera_intrinsics_options);
+
+void libmv_cameraIntrinsicsUndistortByte(
+ const libmv_CameraIntrinsics* libmv_intrinsics,
+ const unsigned char *source_image,
+ int width,
+ int height,
+ float overscan,
+ int channels,
+ unsigned char* destination_image);
+
+void libmv_cameraIntrinsicsUndistortFloat(
+ const libmv_CameraIntrinsics* libmv_intrinsics,
+ const float* source_image,
+ int width,
+ int height,
+ float overscan,
+ int channels,
+ float* destination_image);
+
+void libmv_cameraIntrinsicsDistortByte(
+ const struct libmv_CameraIntrinsics* libmv_intrinsics,
+ const unsigned char *source_image,
+ int width,
+ int height,
+ float overscan,
+ int channels,
+ unsigned char *destination_image);
+
+void libmv_cameraIntrinsicsDistortFloat(
+ const libmv_CameraIntrinsics* libmv_intrinsics,
+ float* source_image,
+ int width,
+ int height,
+ float overscan,
+ int channels,
+ float* destination_image);
+
+void libmv_cameraIntrinsicsApply(
+ const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options,
+ double x,
+ double y,
+ double* x1,
+ double* y1);
+
+void libmv_cameraIntrinsicsInvert(
+ const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options,
+ double x,
+ double y,
+ double* x1,
+ double* y1);
+
+#ifdef __cplusplus
+}
+#endif
+
+#ifdef __cplusplus
+
+namespace libmv {
+ class CameraIntrinsics;
+}
+
+libmv::CameraIntrinsics* libmv_cameraIntrinsicsCreateFromOptions(
+ const libmv_CameraIntrinsicsOptions* camera_intrinsics_options);
+#endif
+
+#endif // LIBMV_C_API_CAMERA_INTRINSICS_H_
diff --git a/intern/libmv/intern/detector.cc b/intern/libmv/intern/detector.cc
new file mode 100644
index 00000000000..8abc9014115
--- /dev/null
+++ b/intern/libmv/intern/detector.cc
@@ -0,0 +1,148 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2011 Blender Foundation.
+ * All rights reserved.
+ *
+ * Contributor(s): Blender Foundation,
+ * Sergey Sharybin
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+#include "intern/detector.h"
+#include "intern/image.h"
+#include "intern/utildefines.h"
+#include "libmv/simple_pipeline/detect.h"
+
+using libmv::Detect;
+using libmv::DetectOptions;
+using libmv::Feature;
+using libmv::FloatImage;
+
+struct libmv_Features {
+ int count;
+ Feature* features;
+};
+
+namespace {
+
+libmv_Features *libmv_featuresFromVector(
+ const libmv::vector<Feature>& features) {
+ libmv_Features* libmv_features = LIBMV_STRUCT_NEW(libmv_Features, 1);
+ int count = features.size();
+ if (count) {
+ libmv_features->features = LIBMV_STRUCT_NEW(Feature, count);
+ for (int i = 0; i < count; i++) {
+ libmv_features->features[i] = features.at(i);
+ }
+ } else {
+ libmv_features->features = NULL;
+ }
+ libmv_features->count = count;
+ return libmv_features;
+}
+
+void libmv_convertDetectorOptions(libmv_DetectOptions *options,
+ DetectOptions *detector_options) {
+ switch (options->detector) {
+#define LIBMV_CONVERT(the_detector) \
+ case LIBMV_DETECTOR_ ## the_detector: \
+ detector_options->type = DetectOptions::the_detector; \
+ break;
+ LIBMV_CONVERT(FAST)
+ LIBMV_CONVERT(MORAVEC)
+ LIBMV_CONVERT(HARRIS)
+#undef LIBMV_CONVERT
+ }
+ detector_options->margin = options->margin;
+ detector_options->min_distance = options->min_distance;
+ detector_options->fast_min_trackness = options->fast_min_trackness;
+ detector_options->moravec_max_count = options->moravec_max_count;
+ detector_options->moravec_pattern = options->moravec_pattern;
+ detector_options->harris_threshold = options->harris_threshold;
+}
+
+} // namespace
+
+libmv_Features *libmv_detectFeaturesByte(const unsigned char* image_buffer,
+ int width,
+ int height,
+ int channels,
+ libmv_DetectOptions* options) {
+ // Prepare the image.
+ FloatImage image;
+ libmv_byteBufferToFloatImage(image_buffer, width, height, channels, &image);
+
+ // Configure detector.
+ DetectOptions detector_options;
+ libmv_convertDetectorOptions(options, &detector_options);
+
+ // Run the detector.
+ libmv::vector<Feature> detected_features;
+ Detect(image, detector_options, &detected_features);
+
+ // Convert result to C-API.
+ libmv_Features* result = libmv_featuresFromVector(detected_features);
+ return result;
+}
+
+libmv_Features* libmv_detectFeaturesFloat(const float* image_buffer,
+ int width,
+ int height,
+ int channels,
+ libmv_DetectOptions* options) {
+ // Prepare the image.
+ FloatImage image;
+ libmv_floatBufferToFloatImage(image_buffer, width, height, channels, &image);
+
+ // Configure detector.
+ DetectOptions detector_options;
+ libmv_convertDetectorOptions(options, &detector_options);
+
+ // Run the detector.
+ libmv::vector<Feature> detected_features;
+ Detect(image, detector_options, &detected_features);
+
+ // Convert result to C-API.
+ libmv_Features* result = libmv_featuresFromVector(detected_features);
+ return result;
+}
+
+void libmv_featuresDestroy(libmv_Features* libmv_features) {
+ if (libmv_features->features) {
+ LIBMV_STRUCT_DELETE(libmv_features->features);
+ }
+ LIBMV_STRUCT_DELETE(libmv_features);
+}
+
+int libmv_countFeatures(const libmv_Features* libmv_features) {
+ return libmv_features->count;
+}
+
+void libmv_getFeature(const libmv_Features* libmv_features,
+ int number,
+ double* x,
+ double* y,
+ double* score,
+ double* size) {
+ Feature &feature = libmv_features->features[number];
+ *x = feature.x;
+ *y = feature.y;
+ *score = feature.score;
+ *size = feature.size;
+}
diff --git a/intern/libmv/intern/detector.h b/intern/libmv/intern/detector.h
new file mode 100644
index 00000000000..f72b0dd8d6e
--- /dev/null
+++ b/intern/libmv/intern/detector.h
@@ -0,0 +1,77 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2011 Blender Foundation.
+ * All rights reserved.
+ *
+ * Contributor(s): Blender Foundation,
+ * Sergey Sharybin
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+#ifndef LIBMV_C_API_DETECTOR_H_
+#define LIBMV_C_API_DETECTOR_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct libmv_Features libmv_Features;
+
+enum {
+ LIBMV_DETECTOR_FAST,
+ LIBMV_DETECTOR_MORAVEC,
+ LIBMV_DETECTOR_HARRIS,
+};
+
+typedef struct libmv_DetectOptions {
+ int detector;
+ int margin;
+ int min_distance;
+ int fast_min_trackness;
+ int moravec_max_count;
+ unsigned char *moravec_pattern;
+ double harris_threshold;
+} libmv_DetectOptions;
+
+libmv_Features* libmv_detectFeaturesByte(const unsigned char* image_buffer,
+ int width,
+ int height,
+ int channels,
+ libmv_DetectOptions* options);
+
+libmv_Features* libmv_detectFeaturesFloat(const float* image_buffer,
+ int width,
+ int height,
+ int channels,
+ libmv_DetectOptions* options);
+
+void libmv_featuresDestroy(libmv_Features* libmv_features);
+int libmv_countFeatures(const libmv_Features* libmv_features);
+void libmv_getFeature(const libmv_Features* libmv_features,
+ int number,
+ double* x,
+ double* y,
+ double* score,
+ double* size);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // LIBMV_C_API_DETECTOR_H_
diff --git a/intern/libmv/intern/frame_accessor.cc b/intern/libmv/intern/frame_accessor.cc
new file mode 100644
index 00000000000..8bf2cab914b
--- /dev/null
+++ b/intern/libmv/intern/frame_accessor.cc
@@ -0,0 +1,164 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2014 Blender Foundation.
+ * All rights reserved.
+ *
+ * Contributor(s): Blender Foundation,
+ * Sergey Sharybin
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+#include "intern/frame_accessor.h"
+#include "intern/image.h"
+#include "intern/utildefines.h"
+#include "libmv/autotrack/frame_accessor.h"
+#include "libmv/autotrack/region.h"
+#include "libmv/image/image.h"
+
+namespace {
+
+using libmv::FloatImage;
+using mv::FrameAccessor;
+using mv::Region;
+
+struct LibmvFrameAccessor : public FrameAccessor {
+ LibmvFrameAccessor(libmv_FrameAccessorUserData* user_data,
+ libmv_GetImageCallback get_image_callback,
+ libmv_ReleaseImageCallback release_image_callback)
+ : user_data_(user_data),
+ get_image_callback_(get_image_callback),
+ release_image_callback_(release_image_callback) { }
+
+ libmv_InputMode get_libmv_input_mode(InputMode input_mode) {
+ switch (input_mode) {
+#define CHECK_INPUT_MODE(mode) \
+ case mode: \
+ return LIBMV_IMAGE_MODE_ ## mode;
+ CHECK_INPUT_MODE(MONO)
+ CHECK_INPUT_MODE(RGBA)
+#undef CHECK_INPUT_MODE
+ }
+ assert(!"unknown input mode passed from Libmv.");
+ // TODO(sergey): Proper error handling here in the future.
+ return LIBMV_IMAGE_MODE_MONO;
+ }
+
+ void get_libmv_region(const Region& region,
+ libmv_Region* libmv_region) {
+ libmv_region->min[0] = region.min(0);
+ libmv_region->min[1] = region.min(1);
+ libmv_region->max[0] = region.max(0);
+ libmv_region->max[1] = region.max(1);
+ }
+
+ Key GetImage(int clip,
+ int frame,
+ InputMode input_mode,
+ int downscale,
+ const Region* region,
+ const Transform* transform,
+ FloatImage* destination) {
+ float *float_buffer;
+ int width, height, channels;
+ libmv_Region libmv_region;
+ if (region) {
+ get_libmv_region(*region, &libmv_region);
+ }
+ Key cache_key = get_image_callback_(user_data_,
+ clip,
+ frame,
+ get_libmv_input_mode(input_mode),
+ downscale,
+ region != NULL ? &libmv_region : NULL,
+ (libmv_FrameTransform*) transform,
+ &float_buffer,
+ &width,
+ &height,
+ &channels);
+
+ // TODO(sergey): Dumb code for until we can set data directly.
+ FloatImage temp_image(float_buffer,
+ height,
+ width,
+ channels);
+ destination->CopyFrom(temp_image);
+
+ return cache_key;
+ }
+
+ void ReleaseImage(Key cache_key) {
+ release_image_callback_(cache_key);
+ }
+
+ bool GetClipDimensions(int clip, int *width, int *height) {
+ return false;
+ }
+
+ int NumClips() {
+ return 1;
+ }
+
+ int NumFrames(int clip) {
+ return 0;
+ }
+
+ libmv_FrameAccessorUserData* user_data_;
+ libmv_GetImageCallback get_image_callback_;
+ libmv_ReleaseImageCallback release_image_callback_;
+};
+
+} // namespace
+
+libmv_FrameAccessor* libmv_FrameAccessorNew(
+ libmv_FrameAccessorUserData* user_data,
+ libmv_GetImageCallback get_image_callback,
+ libmv_ReleaseImageCallback release_image_callback) {
+ return (libmv_FrameAccessor*) LIBMV_OBJECT_NEW(LibmvFrameAccessor,
+ user_data,
+ get_image_callback,
+ release_image_callback);
+}
+
+void libmv_FrameAccessorDestroy(libmv_FrameAccessor* frame_accessor) {
+ LIBMV_OBJECT_DELETE(frame_accessor, LibmvFrameAccessor);
+}
+
+int64_t libmv_frameAccessorgetTransformKey(const libmv_FrameTransform *transform) {
+ return ((FrameAccessor::Transform*) transform)->key();
+}
+
+void libmv_frameAccessorgetTransformRun(const libmv_FrameTransform *transform,
+ const libmv_FloatImage *input_image,
+ libmv_FloatImage *output_image) {
+ const FloatImage input(input_image->buffer,
+ input_image->height,
+ input_image->width,
+ input_image->channels);
+
+ FloatImage output;
+ ((FrameAccessor::Transform*) transform)->run(input,
+ &output);
+
+ int num_pixels = output.Width() *output.Height() * output.Depth();
+ output_image->buffer = new float[num_pixels];
+ memcpy(output_image->buffer, output.Data(), num_pixels * sizeof(float));
+ output_image->width = output.Width();
+ output_image->height = output.Height();
+ output_image->channels = output.Depth();
+}
diff --git a/intern/libmv/intern/frame_accessor.h b/intern/libmv/intern/frame_accessor.h
new file mode 100644
index 00000000000..3e813fe7581
--- /dev/null
+++ b/intern/libmv/intern/frame_accessor.h
@@ -0,0 +1,79 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2014 Blender Foundation.
+ * All rights reserved.
+ *
+ * Contributor(s): Blender Foundation,
+ * Sergey Sharybin
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+#ifndef LIBMV_C_API_FRAME_ACCESSOR_H_
+#define LIBMV_C_API_FRAME_ACCESSOR_H_
+
+#include <stdint.h>
+
+#include "intern/image.h"
+#include "intern/region.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct libmv_FrameAccessor libmv_FrameAccessor;
+typedef struct libmv_FrameTransform libmv_FrameTransform;
+typedef struct libmv_FrameAccessorUserData libmv_FrameAccessorUserData;
+typedef void *libmv_CacheKey;
+
+typedef enum {
+ LIBMV_IMAGE_MODE_MONO,
+ LIBMV_IMAGE_MODE_RGBA,
+} libmv_InputMode;
+
+typedef libmv_CacheKey (*libmv_GetImageCallback) (
+ libmv_FrameAccessorUserData* user_data,
+ int clip,
+ int frame,
+ libmv_InputMode input_mode,
+ int downscale,
+ const libmv_Region* region,
+ const libmv_FrameTransform* transform,
+ float** destination,
+ int* width,
+ int* height,
+ int* channels);
+
+typedef void (*libmv_ReleaseImageCallback) (libmv_CacheKey cache_key);
+
+libmv_FrameAccessor* libmv_FrameAccessorNew(
+ libmv_FrameAccessorUserData* user_data,
+ libmv_GetImageCallback get_image_callback,
+ libmv_ReleaseImageCallback release_image_callback);
+void libmv_FrameAccessorDestroy(libmv_FrameAccessor* frame_accessor);
+
+int64_t libmv_frameAccessorgetTransformKey(const libmv_FrameTransform *transform);
+
+void libmv_frameAccessorgetTransformRun(const libmv_FrameTransform *transform,
+ const libmv_FloatImage *input_image,
+ libmv_FloatImage *output_image);
+#ifdef __cplusplus
+}
+#endif
+
+#endif // LIBMV_C_API_FRAME_ACCESSOR_H_
diff --git a/intern/libmv/intern/homography.cc b/intern/libmv/intern/homography.cc
new file mode 100644
index 00000000000..6b61bd9ab42
--- /dev/null
+++ b/intern/libmv/intern/homography.cc
@@ -0,0 +1,59 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2011 Blender Foundation.
+ * All rights reserved.
+ *
+ * Contributor(s): Blender Foundation,
+ * Sergey Sharybin
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+#include "intern/homography.h"
+#include "intern/utildefines.h"
+#include "libmv/logging/logging.h"
+#include "libmv/multiview/homography.h"
+
+void libmv_homography2DFromCorrespondencesEuc(/* const */ double (*x1)[2],
+ /* const */ double (*x2)[2],
+ int num_points,
+ double H[3][3]) {
+ libmv::Mat x1_mat, x2_mat;
+ libmv::Mat3 H_mat;
+
+ x1_mat.resize(2, num_points);
+ x2_mat.resize(2, num_points);
+
+ for (int i = 0; i < num_points; i++) {
+ x1_mat.col(i) = libmv::Vec2(x1[i][0], x1[i][1]);
+ x2_mat.col(i) = libmv::Vec2(x2[i][0], x2[i][1]);
+ }
+
+ LG << "x1: " << x1_mat;
+ LG << "x2: " << x2_mat;
+
+ libmv::EstimateHomographyOptions options;
+ libmv::EstimateHomography2DFromCorrespondences(x1_mat,
+ x2_mat,
+ options,
+ &H_mat);
+
+ LG << "H: " << H_mat;
+
+ memcpy(H, H_mat.data(), 9 * sizeof(double));
+}
diff --git a/intern/libmv/intern/homography.h b/intern/libmv/intern/homography.h
new file mode 100644
index 00000000000..175108e8171
--- /dev/null
+++ b/intern/libmv/intern/homography.h
@@ -0,0 +1,43 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2011 Blender Foundation.
+ * All rights reserved.
+ *
+ * Contributor(s): Blender Foundation,
+ * Sergey Sharybin
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+#ifndef LIBMV_C_API_HOMOGRAPHY_H_
+#define LIBMV_C_API_HOMOGRAPHY_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void libmv_homography2DFromCorrespondencesEuc(/* const */ double (*x1)[2],
+ /* const */ double (*x2)[2],
+ int num_points,
+ double H[3][3]);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // LIBMV_C_API_HOMOGRAPHY_H_
diff --git a/intern/libmv/intern/image.cc b/intern/libmv/intern/image.cc
new file mode 100644
index 00000000000..5018caef5b1
--- /dev/null
+++ b/intern/libmv/intern/image.cc
@@ -0,0 +1,272 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2011 Blender Foundation.
+ * All rights reserved.
+ *
+ * Contributor(s): Blender Foundation,
+ * Sergey Sharybin
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+#include "intern/image.h"
+#include "intern/utildefines.h"
+#include "libmv/tracking/track_region.h"
+
+#include <cassert>
+#include <png.h>
+
+using libmv::FloatImage;
+using libmv::SamplePlanarPatch;
+
+void libmv_floatImageDestroy(libmv_FloatImage *image) {
+ delete [] image->buffer;
+}
+
+/* Image <-> buffers conversion */
+
+void libmv_byteBufferToFloatImage(const unsigned char* buffer,
+ int width,
+ int height,
+ int channels,
+ FloatImage* image) {
+ image->Resize(height, width, channels);
+ for (int y = 0, a = 0; y < height; y++) {
+ for (int x = 0; x < width; x++) {
+ for (int k = 0; k < channels; k++) {
+ (*image)(y, x, k) = (float)buffer[a++] / 255.0f;
+ }
+ }
+ }
+}
+
+void libmv_floatBufferToFloatImage(const float* buffer,
+ int width,
+ int height,
+ int channels,
+ FloatImage* image) {
+ image->Resize(height, width, channels);
+ for (int y = 0, a = 0; y < height; y++) {
+ for (int x = 0; x < width; x++) {
+ for (int k = 0; k < channels; k++) {
+ (*image)(y, x, k) = buffer[a++];
+ }
+ }
+ }
+}
+
+void libmv_floatImageToFloatBuffer(const FloatImage &image,
+ float* buffer) {
+ for (int y = 0, a = 0; y < image.Height(); y++) {
+ for (int x = 0; x < image.Width(); x++) {
+ for (int k = 0; k < image.Depth(); k++) {
+ buffer[a++] = image(y, x, k);
+ }
+ }
+ }
+}
+
+void libmv_floatImageToByteBuffer(const libmv::FloatImage &image,
+ unsigned char* buffer) {
+ for (int y = 0, a= 0; y < image.Height(); y++) {
+ for (int x = 0; x < image.Width(); x++) {
+ for (int k = 0; k < image.Depth(); k++) {
+ buffer[a++] = image(y, x, k) * 255.0f;
+ }
+ }
+ }
+}
+
+static bool savePNGImage(png_bytep* row_pointers,
+ int width,
+ int height,
+ int depth,
+ int color_type,
+ const char* file_name) {
+ png_infop info_ptr;
+ png_structp png_ptr;
+ FILE *fp = fopen(file_name, "wb");
+
+ if (fp == NULL) {
+ return false;
+ }
+
+ /* Initialize stuff */
+ png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
+ info_ptr = png_create_info_struct(png_ptr);
+
+ if (setjmp(png_jmpbuf(png_ptr))) {
+ fclose(fp);
+ return false;
+ }
+
+ png_init_io(png_ptr, fp);
+
+ /* Write PNG header. */
+ if (setjmp(png_jmpbuf(png_ptr))) {
+ fclose(fp);
+ return false;
+ }
+
+ png_set_IHDR(png_ptr,
+ info_ptr,
+ width,
+ height,
+ depth,
+ color_type,
+ PNG_INTERLACE_NONE,
+ PNG_COMPRESSION_TYPE_BASE,
+ PNG_FILTER_TYPE_BASE);
+
+ png_write_info(png_ptr, info_ptr);
+
+ /* Write bytes/ */
+ if (setjmp(png_jmpbuf(png_ptr))) {
+ fclose(fp);
+ return false;
+ }
+
+ png_write_image(png_ptr, row_pointers);
+
+ /* End write/ */
+ if (setjmp(png_jmpbuf(png_ptr))) {
+ fclose(fp);
+ return false;
+ }
+
+ png_write_end(png_ptr, NULL);
+ fclose(fp);
+
+ return true;
+}
+
+bool libmv_saveImage(const FloatImage& image,
+ const char* prefix,
+ int x0,
+ int y0) {
+ int x, y;
+ png_bytep *row_pointers;
+
+ assert(image.Depth() == 1);
+
+ row_pointers = new png_bytep[image.Height()];
+
+ for (y = 0; y < image.Height(); y++) {
+ row_pointers[y] = new png_byte[4 * image.Width()];
+
+ for (x = 0; x < image.Width(); x++) {
+ if (x0 == x && image.Height() - y0 - 1 == y) {
+ row_pointers[y][x * 4 + 0] = 255;
+ row_pointers[y][x * 4 + 1] = 0;
+ row_pointers[y][x * 4 + 2] = 0;
+ row_pointers[y][x * 4 + 3] = 255;
+ } else {
+ float pixel = image(image.Height() - y - 1, x, 0);
+ row_pointers[y][x * 4 + 0] = pixel * 255;
+ row_pointers[y][x * 4 + 1] = pixel * 255;
+ row_pointers[y][x * 4 + 2] = pixel * 255;
+ row_pointers[y][x * 4 + 3] = 255;
+ }
+ }
+ }
+
+ static int image_counter = 0;
+ char file_name[128];
+ snprintf(file_name, sizeof(file_name),
+ "%s_%02d.png",
+ prefix, ++image_counter);
+ bool result = savePNGImage(row_pointers,
+ image.Width(),
+ image.Height(),
+ 8,
+ PNG_COLOR_TYPE_RGBA,
+ file_name);
+
+ for (y = 0; y < image.Height(); y++) {
+ delete [] row_pointers[y];
+ }
+ delete [] row_pointers;
+
+ return result;
+}
+
+void libmv_samplePlanarPatchFloat(const float* image,
+ int width,
+ int height,
+ int channels,
+ const double* xs,
+ const double* ys,
+ int num_samples_x,
+ int num_samples_y,
+ const float* mask,
+ float* patch,
+ double* warped_position_x,
+ double* warped_position_y) {
+ FloatImage libmv_image, libmv_patch, libmv_mask;
+ FloatImage *libmv_mask_for_sample = NULL;
+
+ libmv_floatBufferToFloatImage(image, width, height, channels, &libmv_image);
+
+ if (mask) {
+ libmv_floatBufferToFloatImage(mask, width, height, 1, &libmv_mask);
+ libmv_mask_for_sample = &libmv_mask;
+ }
+
+ SamplePlanarPatch(libmv_image,
+ xs, ys,
+ num_samples_x, num_samples_y,
+ libmv_mask_for_sample,
+ &libmv_patch,
+ warped_position_x,
+ warped_position_y);
+
+ libmv_floatImageToFloatBuffer(libmv_patch, patch);
+}
+
+void libmv_samplePlanarPatchByte(const unsigned char* image,
+ int width,
+ int height,
+ int channels,
+ const double* xs,
+ const double* ys,
+ int num_samples_x,
+ int num_samples_y,
+ const float* mask,
+ unsigned char* patch,
+ double* warped_position_x,
+ double* warped_position_y) {
+ libmv::FloatImage libmv_image, libmv_patch, libmv_mask;
+ libmv::FloatImage *libmv_mask_for_sample = NULL;
+
+ libmv_byteBufferToFloatImage(image, width, height, channels, &libmv_image);
+
+ if (mask) {
+ libmv_floatBufferToFloatImage(mask, width, height, 1, &libmv_mask);
+ libmv_mask_for_sample = &libmv_mask;
+ }
+
+ libmv::SamplePlanarPatch(libmv_image,
+ xs, ys,
+ num_samples_x, num_samples_y,
+ libmv_mask_for_sample,
+ &libmv_patch,
+ warped_position_x,
+ warped_position_y);
+
+ libmv_floatImageToByteBuffer(libmv_patch, patch);
+}
diff --git a/intern/libmv/intern/image.h b/intern/libmv/intern/image.h
new file mode 100644
index 00000000000..1213943aac4
--- /dev/null
+++ b/intern/libmv/intern/image.h
@@ -0,0 +1,99 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2011 Blender Foundation.
+ * All rights reserved.
+ *
+ * Contributor(s): Blender Foundation,
+ * Sergey Sharybin
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+#ifndef LIBMV_IMAGE_H_
+#define LIBMV_IMAGE_H_
+
+#ifdef __cplusplus
+# include "libmv/image/image.h"
+void libmv_byteBufferToFloatImage(const unsigned char* buffer,
+ int width,
+ int height,
+ int channels,
+ libmv::FloatImage* image);
+
+void libmv_floatBufferToFloatImage(const float* buffer,
+ int width,
+ int height,
+ int channels,
+ libmv::FloatImage* image);
+
+void libmv_floatImageToFloatBuffer(const libmv::FloatImage& image,
+ float *buffer);
+
+void libmv_floatImageToByteBuffer(const libmv::FloatImage& image,
+ unsigned char* buffer);
+
+bool libmv_saveImage(const libmv::FloatImage& image,
+ const char* prefix,
+ int x0,
+ int y0);
+#endif // __cplusplus
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct libmv_FloatImage {
+ float *buffer;
+ int width;
+ int height;
+ int channels;
+} libmv_FloatImage;
+
+void libmv_floatImageDestroy(libmv_FloatImage *image);
+
+void libmv_samplePlanarPatchFloat(const float* image,
+ int width,
+ int height,
+ int channels,
+ const double* xs,
+ const double* ys,
+ int num_samples_x,
+ int num_samples_y,
+ const float* mask,
+ float* patch,
+ double* warped_position_x,
+ double* warped_position_y);
+
+ void libmv_samplePlanarPatchByte(const unsigned char* image,
+ int width,
+ int height,
+ int channels,
+ const double* xs,
+ const double* ys,
+ int num_samples_x,
+ int num_samples_y,
+ const float* mask,
+ unsigned char* patch,
+ double* warped_position_x,
+ double* warped_position_y);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // LIBMV_IMAGE_H_
diff --git a/intern/libmv/intern/logging.cc b/intern/libmv/intern/logging.cc
new file mode 100644
index 00000000000..77b56ef4df3
--- /dev/null
+++ b/intern/libmv/intern/logging.cc
@@ -0,0 +1,55 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2011 Blender Foundation.
+ * All rights reserved.
+ *
+ * Contributor(s): Blender Foundation,
+ * Sergey Sharybin
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+#include "intern/logging.h"
+#include "intern/utildefines.h"
+#include "libmv/logging/logging.h"
+
+void libmv_initLogging(const char* argv0) {
+ // Make it so FATAL messages are always print into console.
+ char severity_fatal[32];
+ snprintf(severity_fatal, sizeof(severity_fatal), "%d",
+ google::GLOG_FATAL);
+
+ google::InitGoogleLogging(argv0);
+ gflags::SetCommandLineOption("logtostderr", "1");
+ gflags::SetCommandLineOption("v", "0");
+ gflags::SetCommandLineOption("stderrthreshold", severity_fatal);
+ gflags::SetCommandLineOption("minloglevel", severity_fatal);
+}
+
+void libmv_startDebugLogging(void) {
+ gflags::SetCommandLineOption("logtostderr", "1");
+ gflags::SetCommandLineOption("v", "2");
+ gflags::SetCommandLineOption("stderrthreshold", "1");
+ gflags::SetCommandLineOption("minloglevel", "0");
+}
+
+void libmv_setLoggingVerbosity(int verbosity) {
+ char val[10];
+ snprintf(val, sizeof(val), "%d", verbosity);
+ gflags::SetCommandLineOption("v", val);
+}
diff --git a/intern/libmv/intern/logging.h b/intern/libmv/intern/logging.h
new file mode 100644
index 00000000000..479ed3d6288
--- /dev/null
+++ b/intern/libmv/intern/logging.h
@@ -0,0 +1,47 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2011 Blender Foundation.
+ * All rights reserved.
+ *
+ * Contributor(s): Blender Foundation,
+ * Sergey Sharybin
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+#ifndef LIBMV_C_API_LOGGING_H_
+#define LIBMV_C_API_LOGGING_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// Initialize GLog logging.
+void libmv_initLogging(const char* argv0);
+
+// Switch Glog to debug logging level.
+void libmv_startDebugLogging(void);
+
+// Set GLog logging verbosity level.
+void libmv_setLoggingVerbosity(int verbosity);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // LIBMV_C_API_LOGGING_H_
diff --git a/intern/libmv/intern/reconstruction.cc b/intern/libmv/intern/reconstruction.cc
new file mode 100644
index 00000000000..046671e467f
--- /dev/null
+++ b/intern/libmv/intern/reconstruction.cc
@@ -0,0 +1,542 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2011 Blender Foundation.
+ * All rights reserved.
+ *
+ * Contributor(s): Blender Foundation,
+ * Sergey Sharybin
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+#include "intern/reconstruction.h"
+#include "intern/camera_intrinsics.h"
+#include "intern/tracks.h"
+#include "intern/utildefines.h"
+
+#include "libmv/logging/logging.h"
+#include "libmv/simple_pipeline/bundle.h"
+#include "libmv/simple_pipeline/keyframe_selection.h"
+#include "libmv/simple_pipeline/initialize_reconstruction.h"
+#include "libmv/simple_pipeline/modal_solver.h"
+#include "libmv/simple_pipeline/pipeline.h"
+#include "libmv/simple_pipeline/reconstruction_scale.h"
+#include "libmv/simple_pipeline/tracks.h"
+
+using libmv::CameraIntrinsics;
+using libmv::EuclideanCamera;
+using libmv::EuclideanPoint;
+using libmv::EuclideanReconstruction;
+using libmv::EuclideanScaleToUnity;
+using libmv::Marker;
+using libmv::ProgressUpdateCallback;
+
+using libmv::PolynomialCameraIntrinsics;
+using libmv::Tracks;
+using libmv::EuclideanBundle;
+using libmv::EuclideanCompleteReconstruction;
+using libmv::EuclideanReconstructTwoFrames;
+using libmv::EuclideanReprojectionError;
+
+struct libmv_Reconstruction {
+ EuclideanReconstruction reconstruction;
+
+ /* Used for per-track average error calculation after reconstruction */
+ Tracks tracks;
+ CameraIntrinsics *intrinsics;
+
+ double error;
+ bool is_valid;
+};
+
+namespace {
+
+class ReconstructUpdateCallback : public ProgressUpdateCallback {
+ public:
+ ReconstructUpdateCallback(
+ reconstruct_progress_update_cb progress_update_callback,
+ void *callback_customdata) {
+ progress_update_callback_ = progress_update_callback;
+ callback_customdata_ = callback_customdata;
+ }
+
+ void invoke(double progress, const char* message) {
+ if (progress_update_callback_) {
+ progress_update_callback_(callback_customdata_, progress, message);
+ }
+ }
+ protected:
+ reconstruct_progress_update_cb progress_update_callback_;
+ void* callback_customdata_;
+};
+
+void libmv_solveRefineIntrinsics(
+ const Tracks &tracks,
+ const int refine_intrinsics,
+ const int bundle_constraints,
+ reconstruct_progress_update_cb progress_update_callback,
+ void* callback_customdata,
+ EuclideanReconstruction* reconstruction,
+ CameraIntrinsics* intrinsics) {
+ /* only a few combinations are supported but trust the caller/ */
+ int bundle_intrinsics = 0;
+
+ if (refine_intrinsics & LIBMV_REFINE_FOCAL_LENGTH) {
+ bundle_intrinsics |= libmv::BUNDLE_FOCAL_LENGTH;
+ }
+ if (refine_intrinsics & LIBMV_REFINE_PRINCIPAL_POINT) {
+ bundle_intrinsics |= libmv::BUNDLE_PRINCIPAL_POINT;
+ }
+ if (refine_intrinsics & LIBMV_REFINE_RADIAL_DISTORTION_K1) {
+ bundle_intrinsics |= libmv::BUNDLE_RADIAL_K1;
+ }
+ if (refine_intrinsics & LIBMV_REFINE_RADIAL_DISTORTION_K2) {
+ bundle_intrinsics |= libmv::BUNDLE_RADIAL_K2;
+ }
+
+ progress_update_callback(callback_customdata, 1.0, "Refining solution");
+
+ EuclideanBundleCommonIntrinsics(tracks,
+ bundle_intrinsics,
+ bundle_constraints,
+ reconstruction,
+ intrinsics);
+}
+
+void finishReconstruction(
+ const Tracks &tracks,
+ const CameraIntrinsics &camera_intrinsics,
+ libmv_Reconstruction *libmv_reconstruction,
+ reconstruct_progress_update_cb progress_update_callback,
+ void *callback_customdata) {
+ EuclideanReconstruction &reconstruction =
+ libmv_reconstruction->reconstruction;
+
+ /* Reprojection error calculation. */
+ progress_update_callback(callback_customdata, 1.0, "Finishing solution");
+ libmv_reconstruction->tracks = tracks;
+ libmv_reconstruction->error = EuclideanReprojectionError(tracks,
+ reconstruction,
+ camera_intrinsics);
+}
+
+bool selectTwoKeyframesBasedOnGRICAndVariance(
+ Tracks& tracks,
+ Tracks& normalized_tracks,
+ CameraIntrinsics& camera_intrinsics,
+ int& keyframe1,
+ int& keyframe2) {
+ libmv::vector<int> keyframes;
+
+ /* Get list of all keyframe candidates first. */
+ SelectKeyframesBasedOnGRICAndVariance(normalized_tracks,
+ camera_intrinsics,
+ keyframes);
+
+ if (keyframes.size() < 2) {
+ LG << "Not enough keyframes detected by GRIC";
+ return false;
+ } else if (keyframes.size() == 2) {
+ keyframe1 = keyframes[0];
+ keyframe2 = keyframes[1];
+ return true;
+ }
+
+ /* Now choose two keyframes with minimal reprojection error after initial
+ * reconstruction choose keyframes with the least reprojection error after
+ * solving from two candidate keyframes.
+ *
+ * In fact, currently libmv returns single pair only, so this code will
+ * not actually run. But in the future this could change, so let's stay
+ * prepared.
+ */
+ int previous_keyframe = keyframes[0];
+ double best_error = std::numeric_limits<double>::max();
+ for (int i = 1; i < keyframes.size(); i++) {
+ EuclideanReconstruction reconstruction;
+ int current_keyframe = keyframes[i];
+ libmv::vector<Marker> keyframe_markers =
+ normalized_tracks.MarkersForTracksInBothImages(previous_keyframe,
+ current_keyframe);
+
+ Tracks keyframe_tracks(keyframe_markers);
+
+ /* get a solution from two keyframes only */
+ EuclideanReconstructTwoFrames(keyframe_markers, &reconstruction);
+ EuclideanBundle(keyframe_tracks, &reconstruction);
+ EuclideanCompleteReconstruction(keyframe_tracks,
+ &reconstruction,
+ NULL);
+
+ double current_error = EuclideanReprojectionError(tracks,
+ reconstruction,
+ camera_intrinsics);
+
+ LG << "Error between " << previous_keyframe
+ << " and " << current_keyframe
+ << ": " << current_error;
+
+ if (current_error < best_error) {
+ best_error = current_error;
+ keyframe1 = previous_keyframe;
+ keyframe2 = current_keyframe;
+ }
+
+ previous_keyframe = current_keyframe;
+ }
+
+ return true;
+}
+
+Marker libmv_projectMarker(const EuclideanPoint& point,
+ const EuclideanCamera& camera,
+ const CameraIntrinsics& intrinsics) {
+ libmv::Vec3 projected = camera.R * point.X + camera.t;
+ projected /= projected(2);
+
+ libmv::Marker reprojected_marker;
+ intrinsics.ApplyIntrinsics(projected(0), projected(1),
+ &reprojected_marker.x,
+ &reprojected_marker.y);
+
+ reprojected_marker.image = camera.image;
+ reprojected_marker.track = point.track;
+ return reprojected_marker;
+}
+
+void libmv_getNormalizedTracks(const Tracks &tracks,
+ const CameraIntrinsics &camera_intrinsics,
+ Tracks *normalized_tracks) {
+ libmv::vector<Marker> markers = tracks.AllMarkers();
+ for (int i = 0; i < markers.size(); ++i) {
+ Marker &marker = markers[i];
+ camera_intrinsics.InvertIntrinsics(marker.x, marker.y,
+ &marker.x, &marker.y);
+ normalized_tracks->Insert(marker.image,
+ marker.track,
+ marker.x, marker.y,
+ marker.weight);
+ }
+}
+
+} // namespace
+
+libmv_Reconstruction *libmv_solveReconstruction(
+ const libmv_Tracks* libmv_tracks,
+ const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options,
+ libmv_ReconstructionOptions* libmv_reconstruction_options,
+ reconstruct_progress_update_cb progress_update_callback,
+ void* callback_customdata) {
+ libmv_Reconstruction *libmv_reconstruction =
+ LIBMV_OBJECT_NEW(libmv_Reconstruction);
+
+ Tracks &tracks = *((Tracks *) libmv_tracks);
+ EuclideanReconstruction &reconstruction =
+ libmv_reconstruction->reconstruction;
+
+ ReconstructUpdateCallback update_callback =
+ ReconstructUpdateCallback(progress_update_callback,
+ callback_customdata);
+
+ /* Retrieve reconstruction options from C-API to libmv API. */
+ CameraIntrinsics *camera_intrinsics;
+ camera_intrinsics = libmv_reconstruction->intrinsics =
+ libmv_cameraIntrinsicsCreateFromOptions(libmv_camera_intrinsics_options);
+
+ /* Invert the camera intrinsics/ */
+ Tracks normalized_tracks;
+ libmv_getNormalizedTracks(tracks, *camera_intrinsics, &normalized_tracks);
+
+ /* keyframe selection. */
+ int keyframe1 = libmv_reconstruction_options->keyframe1,
+ keyframe2 = libmv_reconstruction_options->keyframe2;
+
+ if (libmv_reconstruction_options->select_keyframes) {
+ LG << "Using automatic keyframe selection";
+
+ update_callback.invoke(0, "Selecting keyframes");
+
+ selectTwoKeyframesBasedOnGRICAndVariance(tracks,
+ normalized_tracks,
+ *camera_intrinsics,
+ keyframe1,
+ keyframe2);
+
+ /* so keyframes in the interface would be updated */
+ libmv_reconstruction_options->keyframe1 = keyframe1;
+ libmv_reconstruction_options->keyframe2 = keyframe2;
+ }
+
+ /* Actual reconstruction. */
+ LG << "frames to init from: " << keyframe1 << " " << keyframe2;
+
+ libmv::vector<Marker> keyframe_markers =
+ normalized_tracks.MarkersForTracksInBothImages(keyframe1, keyframe2);
+
+ LG << "number of markers for init: " << keyframe_markers.size();
+
+ if (keyframe_markers.size() < 8) {
+ LG << "No enough markers to initialize from";
+ libmv_reconstruction->is_valid = false;
+ return libmv_reconstruction;
+ }
+
+ update_callback.invoke(0, "Initial reconstruction");
+
+ EuclideanReconstructTwoFrames(keyframe_markers, &reconstruction);
+ EuclideanBundle(normalized_tracks, &reconstruction);
+ EuclideanCompleteReconstruction(normalized_tracks,
+ &reconstruction,
+ &update_callback);
+
+ /* Refinement/ */
+ if (libmv_reconstruction_options->refine_intrinsics) {
+ libmv_solveRefineIntrinsics(
+ tracks,
+ libmv_reconstruction_options->refine_intrinsics,
+ libmv::BUNDLE_NO_CONSTRAINTS,
+ progress_update_callback,
+ callback_customdata,
+ &reconstruction,
+ camera_intrinsics);
+ }
+
+ /* Set reconstruction scale to unity. */
+ EuclideanScaleToUnity(&reconstruction);
+
+ /* Finish reconstruction. */
+ finishReconstruction(tracks,
+ *camera_intrinsics,
+ libmv_reconstruction,
+ progress_update_callback,
+ callback_customdata);
+
+ libmv_reconstruction->is_valid = true;
+ return (libmv_Reconstruction *) libmv_reconstruction;
+}
+
+libmv_Reconstruction *libmv_solveModal(
+ const libmv_Tracks *libmv_tracks,
+ const libmv_CameraIntrinsicsOptions *libmv_camera_intrinsics_options,
+ const libmv_ReconstructionOptions *libmv_reconstruction_options,
+ reconstruct_progress_update_cb progress_update_callback,
+ void *callback_customdata) {
+ libmv_Reconstruction *libmv_reconstruction =
+ LIBMV_OBJECT_NEW(libmv_Reconstruction);
+
+ Tracks &tracks = *((Tracks *) libmv_tracks);
+ EuclideanReconstruction &reconstruction =
+ libmv_reconstruction->reconstruction;
+
+ ReconstructUpdateCallback update_callback =
+ ReconstructUpdateCallback(progress_update_callback,
+ callback_customdata);
+
+ /* Retrieve reconstruction options from C-API to libmv API. */
+ CameraIntrinsics *camera_intrinsics;
+ camera_intrinsics = libmv_reconstruction->intrinsics =
+ libmv_cameraIntrinsicsCreateFromOptions(
+ libmv_camera_intrinsics_options);
+
+ /* Invert the camera intrinsics. */
+ Tracks normalized_tracks;
+ libmv_getNormalizedTracks(tracks, *camera_intrinsics, &normalized_tracks);
+
+ /* Actual reconstruction. */
+ ModalSolver(normalized_tracks, &reconstruction, &update_callback);
+
+ PolynomialCameraIntrinsics empty_intrinsics;
+ EuclideanBundleCommonIntrinsics(normalized_tracks,
+ libmv::BUNDLE_NO_INTRINSICS,
+ libmv::BUNDLE_NO_TRANSLATION,
+ &reconstruction,
+ &empty_intrinsics);
+
+ /* Refinement. */
+ if (libmv_reconstruction_options->refine_intrinsics) {
+ libmv_solveRefineIntrinsics(
+ tracks,
+ libmv_reconstruction_options->refine_intrinsics,
+ libmv::BUNDLE_NO_TRANSLATION,
+ progress_update_callback, callback_customdata,
+ &reconstruction,
+ camera_intrinsics);
+ }
+
+ /* Finish reconstruction. */
+ finishReconstruction(tracks,
+ *camera_intrinsics,
+ libmv_reconstruction,
+ progress_update_callback,
+ callback_customdata);
+
+ libmv_reconstruction->is_valid = true;
+ return (libmv_Reconstruction *) libmv_reconstruction;
+}
+
+int libmv_reconstructionIsValid(libmv_Reconstruction *libmv_reconstruction) {
+ return libmv_reconstruction->is_valid;
+}
+
+void libmv_reconstructionDestroy(libmv_Reconstruction *libmv_reconstruction) {
+ LIBMV_OBJECT_DELETE(libmv_reconstruction->intrinsics, CameraIntrinsics);
+ LIBMV_OBJECT_DELETE(libmv_reconstruction, libmv_Reconstruction);
+}
+
+int libmv_reprojectionPointForTrack(
+ const libmv_Reconstruction *libmv_reconstruction,
+ int track,
+ double pos[3]) {
+ const EuclideanReconstruction *reconstruction =
+ &libmv_reconstruction->reconstruction;
+ const EuclideanPoint *point =
+ reconstruction->PointForTrack(track);
+ if (point) {
+ pos[0] = point->X[0];
+ pos[1] = point->X[2];
+ pos[2] = point->X[1];
+ return 1;
+ }
+ return 0;
+}
+
+double libmv_reprojectionErrorForTrack(
+ const libmv_Reconstruction *libmv_reconstruction,
+ int track) {
+ const EuclideanReconstruction *reconstruction =
+ &libmv_reconstruction->reconstruction;
+ const CameraIntrinsics *intrinsics = libmv_reconstruction->intrinsics;
+ libmv::vector<Marker> markers =
+ libmv_reconstruction->tracks.MarkersForTrack(track);
+
+ int num_reprojected = 0;
+ double total_error = 0.0;
+
+ for (int i = 0; i < markers.size(); ++i) {
+ double weight = markers[i].weight;
+ const EuclideanCamera *camera =
+ reconstruction->CameraForImage(markers[i].image);
+ const EuclideanPoint *point =
+ reconstruction->PointForTrack(markers[i].track);
+
+ if (!camera || !point || weight == 0.0) {
+ continue;
+ }
+
+ num_reprojected++;
+
+ Marker reprojected_marker =
+ libmv_projectMarker(*point, *camera, *intrinsics);
+ double ex = (reprojected_marker.x - markers[i].x) * weight;
+ double ey = (reprojected_marker.y - markers[i].y) * weight;
+
+ total_error += sqrt(ex * ex + ey * ey);
+ }
+
+ return total_error / num_reprojected;
+}
+
+double libmv_reprojectionErrorForImage(
+ const libmv_Reconstruction *libmv_reconstruction,
+ int image) {
+ const EuclideanReconstruction *reconstruction =
+ &libmv_reconstruction->reconstruction;
+ const CameraIntrinsics *intrinsics = libmv_reconstruction->intrinsics;
+ libmv::vector<Marker> markers =
+ libmv_reconstruction->tracks.MarkersInImage(image);
+ const EuclideanCamera *camera = reconstruction->CameraForImage(image);
+ int num_reprojected = 0;
+ double total_error = 0.0;
+
+ if (!camera) {
+ return 0.0;
+ }
+
+ for (int i = 0; i < markers.size(); ++i) {
+ const EuclideanPoint *point =
+ reconstruction->PointForTrack(markers[i].track);
+
+ if (!point) {
+ continue;
+ }
+
+ num_reprojected++;
+
+ Marker reprojected_marker =
+ libmv_projectMarker(*point, *camera, *intrinsics);
+ double ex = (reprojected_marker.x - markers[i].x) * markers[i].weight;
+ double ey = (reprojected_marker.y - markers[i].y) * markers[i].weight;
+
+ total_error += sqrt(ex * ex + ey * ey);
+ }
+
+ return total_error / num_reprojected;
+}
+
+int libmv_reprojectionCameraForImage(
+ const libmv_Reconstruction *libmv_reconstruction,
+ int image,
+ double mat[4][4]) {
+ const EuclideanReconstruction *reconstruction =
+ &libmv_reconstruction->reconstruction;
+ const EuclideanCamera *camera =
+ reconstruction->CameraForImage(image);
+
+ if (camera) {
+ for (int j = 0; j < 3; ++j) {
+ for (int k = 0; k < 3; ++k) {
+ int l = k;
+
+ if (k == 1) {
+ l = 2;
+ } else if (k == 2) {
+ l = 1;
+ }
+
+ if (j == 2) {
+ mat[j][l] = -camera->R(j, k);
+ } else {
+ mat[j][l] = camera->R(j, k);
+ }
+ }
+ mat[j][3] = 0.0;
+ }
+
+ libmv::Vec3 optical_center = -camera->R.transpose() * camera->t;
+
+ mat[3][0] = optical_center(0);
+ mat[3][1] = optical_center(2);
+ mat[3][2] = optical_center(1);
+
+ mat[3][3] = 1.0;
+
+ return 1;
+ }
+
+ return 0;
+}
+
+double libmv_reprojectionError(
+ const libmv_Reconstruction *libmv_reconstruction) {
+ return libmv_reconstruction->error;
+}
+
+libmv_CameraIntrinsics *libmv_reconstructionExtractIntrinsics(
+ libmv_Reconstruction *libmv_reconstruction) {
+ return (libmv_CameraIntrinsics *) libmv_reconstruction->intrinsics;
+}
diff --git a/intern/libmv/intern/reconstruction.h b/intern/libmv/intern/reconstruction.h
new file mode 100644
index 00000000000..8b6b34a6142
--- /dev/null
+++ b/intern/libmv/intern/reconstruction.h
@@ -0,0 +1,102 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2011 Blender Foundation.
+ * All rights reserved.
+ *
+ * Contributor(s): Blender Foundation,
+ * Sergey Sharybin
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+#ifndef LIBMV_C_API_RECONSTRUCTION_H_
+#define LIBMV_C_API_RECONSTRUCTION_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct libmv_Tracks;
+struct libmv_CameraIntrinsics;
+struct libmv_CameraIntrinsicsOptions;
+
+typedef struct libmv_Reconstruction libmv_Reconstruction;
+
+enum {
+ LIBMV_REFINE_FOCAL_LENGTH = (1 << 0),
+ LIBMV_REFINE_PRINCIPAL_POINT = (1 << 1),
+ LIBMV_REFINE_RADIAL_DISTORTION_K1 = (1 << 2),
+ LIBMV_REFINE_RADIAL_DISTORTION_K2 = (1 << 4),
+};
+
+typedef struct libmv_ReconstructionOptions {
+ int select_keyframes;
+ int keyframe1, keyframe2;
+ int refine_intrinsics;
+} libmv_ReconstructionOptions;
+
+typedef void (*reconstruct_progress_update_cb) (void* customdata,
+ double progress,
+ const char* message);
+
+libmv_Reconstruction* libmv_solveReconstruction(
+ const struct libmv_Tracks* libmv_tracks,
+ const struct libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options,
+ libmv_ReconstructionOptions* libmv_reconstruction_options,
+ reconstruct_progress_update_cb progress_update_callback,
+ void* callback_customdata);
+
+libmv_Reconstruction* libmv_solveModal(
+ const struct libmv_Tracks* libmv_tracks,
+ const struct libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options,
+ const libmv_ReconstructionOptions* libmv_reconstruction_options,
+ reconstruct_progress_update_cb progress_update_callback,
+ void* callback_customdata);
+
+int libmv_reconstructionIsValid(libmv_Reconstruction *libmv_reconstruction);
+
+void libmv_reconstructionDestroy(libmv_Reconstruction* libmv_reconstruction);
+
+int libmv_reprojectionPointForTrack(
+ const libmv_Reconstruction* libmv_reconstruction,
+ int track,
+ double pos[3]);
+
+double libmv_reprojectionErrorForTrack(
+ const libmv_Reconstruction* libmv_reconstruction,
+ int track);
+
+double libmv_reprojectionErrorForImage(
+ const libmv_Reconstruction* libmv_reconstruction,
+ int image);
+
+int libmv_reprojectionCameraForImage(
+ const libmv_Reconstruction* libmv_reconstruction,
+ int image,
+ double mat[4][4]);
+
+double libmv_reprojectionError(const libmv_Reconstruction* libmv_reconstruction);
+
+struct libmv_CameraIntrinsics* libmv_reconstructionExtractIntrinsics(
+ libmv_Reconstruction *libmv_Reconstruction);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // LIBMV_C_API_RECONSTRUCTION_H_
diff --git a/intern/libmv/intern/region.h b/intern/libmv/intern/region.h
new file mode 100644
index 00000000000..9f114bbad3b
--- /dev/null
+++ b/intern/libmv/intern/region.h
@@ -0,0 +1,43 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2014 Blender Foundation.
+ * All rights reserved.
+ *
+ * Contributor(s): Blender Foundation,
+ * Sergey Sharybin
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+#ifndef LIBMV_C_API_REGION_H_
+#define LIBMV_C_API_REGION_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct libmv_Region {
+ float min[2];
+ float max[2];
+} libmv_Region;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // LIBMV_C_API_REGION_H_
diff --git a/intern/libmv/intern/stub.cc b/intern/libmv/intern/stub.cc
new file mode 100644
index 00000000000..5d667baa880
--- /dev/null
+++ b/intern/libmv/intern/stub.cc
@@ -0,0 +1,403 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2013 Blender Foundation.
+ * All rights reserved.
+ *
+ * Contributor(s): Blender Foundation,
+ * Sergey Sharybin
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+#include "libmv-capi.h"
+
+#include <cstdlib>
+#include <cstring>
+
+/* ************ Logging ************ */
+
+void libmv_initLogging(const char * /*argv0*/) {
+}
+
+void libmv_startDebugLogging(void) {
+}
+
+void libmv_setLoggingVerbosity(int /*verbosity*/) {
+}
+
+/* ************ Planar tracker ************ */
+
+/* TrackRegion (new planar tracker) */
+int libmv_trackRegion(const libmv_TrackRegionOptions * /*options*/,
+ const float * /*image1*/,
+ int /*image1_width*/,
+ int /*image1_height*/,
+ const float * /*image2*/,
+ int /*image2_width*/,
+ int /*image2_height*/,
+ const double *x1,
+ const double *y1,
+ libmv_TrackRegionResult *result,
+ double *x2,
+ double *y2) {
+ /* Convert to doubles for the libmv api. The four corners and the center. */
+ for (int i = 0; i < 5; ++i) {
+ x2[i] = x1[i];
+ y2[i] = y1[i];
+ }
+
+ result->termination = -1;
+ result->termination_reason = "Built without libmv support";
+ result->correlation = 0.0;
+
+ return false;
+}
+
+void libmv_samplePlanarPatchFloat(const float * /*image*/,
+ int /*width*/,
+ int /*height*/,
+ int /*channels*/,
+ const double * /*xs*/,
+ const double * /*ys*/,
+ int /*num_samples_x*/,
+ int /*num_samples_y*/,
+ const float * /*mask*/,
+ float * /*patch*/,
+ double * /*warped_position_x*/,
+ double * /*warped_position_y*/) {
+ /* TODO(sergey): implement */
+}
+
+void libmv_samplePlanarPatchByte(const unsigned char * /*image*/,
+ int /*width*/,
+ int /*height*/,
+ int /*channels*/,
+ const double * /*xs*/,
+ const double * /*ys*/,
+ int /*num_samples_x*/, int /*num_samples_y*/,
+ const float * /*mask*/,
+ unsigned char * /*patch*/,
+ double * /*warped_position_x*/,
+ double * /*warped_position_y*/) {
+ /* TODO(sergey): implement */
+}
+
+void libmv_floatImageDestroy(libmv_FloatImage* /*image*/)
+{
+}
+
+/* ************ Tracks ************ */
+
+libmv_Tracks *libmv_tracksNew(void) {
+ return NULL;
+}
+
+void libmv_tracksInsert(libmv_Tracks * /*libmv_tracks*/,
+ int /*image*/,
+ int /*track*/,
+ double /*x*/,
+ double /*y*/,
+ double /*weight*/) {
+}
+
+void libmv_tracksDestroy(libmv_Tracks * /*libmv_tracks*/) {
+}
+
+/* ************ Reconstruction solver ************ */
+
+libmv_Reconstruction *libmv_solveReconstruction(
+ const libmv_Tracks * /*libmv_tracks*/,
+ const libmv_CameraIntrinsicsOptions * /*libmv_camera_intrinsics_options*/,
+ libmv_ReconstructionOptions * /*libmv_reconstruction_options*/,
+ reconstruct_progress_update_cb /*progress_update_callback*/,
+ void * /*callback_customdata*/) {
+ return NULL;
+}
+
+libmv_Reconstruction *libmv_solveModal(
+ const libmv_Tracks * /*libmv_tracks*/,
+ const libmv_CameraIntrinsicsOptions * /*libmv_camera_intrinsics_options*/,
+ const libmv_ReconstructionOptions * /*libmv_reconstruction_options*/,
+ reconstruct_progress_update_cb /*progress_update_callback*/,
+ void * /*callback_customdata*/) {
+ return NULL;
+}
+
+int libmv_reconstructionIsValid(libmv_Reconstruction * /*libmv_reconstruction*/) {
+ return 0;
+}
+
+int libmv_reprojectionPointForTrack(
+ const libmv_Reconstruction * /*libmv_reconstruction*/,
+ int /*track*/,
+ double /*pos*/[3]) {
+ return 0;
+}
+
+double libmv_reprojectionErrorForTrack(
+ const libmv_Reconstruction * /*libmv_reconstruction*/,
+ int /*track*/) {
+ return 0.0;
+}
+
+double libmv_reprojectionErrorForImage(
+ const libmv_Reconstruction * /*libmv_reconstruction*/,
+ int /*image*/) {
+ return 0.0;
+}
+
+int libmv_reprojectionCameraForImage(
+ const libmv_Reconstruction * /*libmv_reconstruction*/,
+ int /*image*/,
+ double /*mat*/[4][4]) {
+ return 0;
+}
+
+double libmv_reprojectionError(
+ const libmv_Reconstruction * /*libmv_reconstruction*/) {
+ return 0.0;
+}
+
+void libmv_reconstructionDestroy(
+ struct libmv_Reconstruction * /*libmv_reconstruction*/) {
+}
+
+/* ************ Feature detector ************ */
+
+libmv_Features *libmv_detectFeaturesByte(const unsigned char * /*image_buffer*/,
+ int /*width*/,
+ int /*height*/,
+ int /*channels*/,
+ libmv_DetectOptions * /*options*/) {
+ return NULL;
+}
+
+struct libmv_Features *libmv_detectFeaturesFloat(
+ const float * /*image_buffer*/,
+ int /*width*/,
+ int /*height*/,
+ int /*channels*/,
+ libmv_DetectOptions * /*options*/) {
+ return NULL;
+}
+
+int libmv_countFeatures(const libmv_Features * /*libmv_features*/) {
+ return 0;
+}
+
+void libmv_getFeature(const libmv_Features * /*libmv_features*/,
+ int /*number*/,
+ double *x,
+ double *y,
+ double *score,
+ double *size) {
+ *x = 0.0;
+ *y = 0.0;
+ *score = 0.0;
+ *size = 0.0;
+}
+
+void libmv_featuresDestroy(struct libmv_Features * /*libmv_features*/) {
+}
+
+/* ************ Camera intrinsics ************ */
+
+libmv_CameraIntrinsics *libmv_reconstructionExtractIntrinsics(
+ libmv_Reconstruction * /*libmv_reconstruction*/) {
+ return NULL;
+}
+
+libmv_CameraIntrinsics *libmv_cameraIntrinsicsNew(
+ const libmv_CameraIntrinsicsOptions * /*libmv_camera_intrinsics_options*/) {
+ return NULL;
+}
+
+libmv_CameraIntrinsics *libmv_cameraIntrinsicsCopy(
+ const libmv_CameraIntrinsics * /*libmvIntrinsics*/) {
+ return NULL;
+}
+
+void libmv_cameraIntrinsicsDestroy(
+ libmv_CameraIntrinsics * /*libmvIntrinsics*/) {
+}
+
+void libmv_cameraIntrinsicsUpdate(
+ const libmv_CameraIntrinsicsOptions * /*libmv_camera_intrinsics_options*/,
+ libmv_CameraIntrinsics * /*libmv_intrinsics*/) {
+}
+
+void libmv_cameraIntrinsicsSetThreads(
+ libmv_CameraIntrinsics * /*libmv_intrinsics*/,
+ int /*threads*/) {
+}
+
+void libmv_cameraIntrinsicsExtractOptions(
+ const libmv_CameraIntrinsics * /*libmv_intrinsics*/,
+ libmv_CameraIntrinsicsOptions *camera_intrinsics_options) {
+ memset(camera_intrinsics_options, 0, sizeof(libmv_CameraIntrinsicsOptions));
+ camera_intrinsics_options->focal_length = 1.0;
+}
+
+void libmv_cameraIntrinsicsUndistortByte(
+ const libmv_CameraIntrinsics * /*libmv_intrinsics*/,
+ const unsigned char *source_image,
+ int width, int height,
+ float overscan, int channels,
+ unsigned char *destination_image) {
+ memcpy(destination_image, source_image,
+ channels * width * height * sizeof(unsigned char));
+}
+
+void libmv_cameraIntrinsicsUndistortFloat(
+ const libmv_CameraIntrinsics* /*libmv_intrinsics*/,
+ const float* source_image,
+ int width,
+ int height,
+ float overscan,
+ int channels,
+ float* destination_image) {
+ memcpy(destination_image, source_image,
+ channels * width * height * sizeof(float));
+}
+
+void libmv_cameraIntrinsicsDistortByte(
+ const struct libmv_CameraIntrinsics* /*libmv_intrinsics*/,
+ const unsigned char *source_image,
+ int width,
+ int height,
+ float overscan,
+ int channels,
+ unsigned char *destination_image) {
+ memcpy(destination_image, source_image,
+ channels * width * height * sizeof(unsigned char));
+}
+
+void libmv_cameraIntrinsicsDistortFloat(
+ const libmv_CameraIntrinsics* /*libmv_intrinsics*/,
+ float* source_image,
+ int width,
+ int height,
+ float overscan,
+ int channels,
+ float* destination_image) {
+ memcpy(destination_image, source_image,
+ channels * width * height * sizeof(float));
+}
+
+/* ************ utils ************ */
+
+void libmv_cameraIntrinsicsApply(
+ const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options,
+ double x,
+ double y,
+ double* x1,
+ double* y1) {
+ double focal_length = libmv_camera_intrinsics_options->focal_length;
+ double principal_x = libmv_camera_intrinsics_options->principal_point_x;
+ double principal_y = libmv_camera_intrinsics_options->principal_point_y;
+ *x1 = x * focal_length + principal_x;
+ *y1 = y * focal_length + principal_y;
+}
+
+void libmv_cameraIntrinsicsInvert(
+ const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options,
+ double x,
+ double y,
+ double* x1,
+ double* y1) {
+ double focal_length = libmv_camera_intrinsics_options->focal_length;
+ double principal_x = libmv_camera_intrinsics_options->principal_point_x;
+ double principal_y = libmv_camera_intrinsics_options->principal_point_y;
+ *x1 = (x - principal_x) / focal_length;
+ *y1 = (y - principal_y) / focal_length;
+}
+
+void libmv_homography2DFromCorrespondencesEuc(/* const */ double (*x1)[2],
+ /* const */ double (*x2)[2],
+ int num_points,
+ double H[3][3]) {
+ memset(H, 0, sizeof(double[3][3]));
+ H[0][0] = 1.0f;
+ H[1][1] = 1.0f;
+ H[2][2] = 1.0f;
+}
+
+/* ************ autotrack ************ */
+
+libmv_AutoTrack* libmv_autoTrackNew(libmv_FrameAccessor* /*frame_accessor*/)
+{
+ return NULL;
+}
+
+void libmv_autoTrackDestroy(libmv_AutoTrack* /*libmv_autotrack*/)
+{
+}
+
+void libmv_autoTrackSetOptions(libmv_AutoTrack* /*libmv_autotrack*/,
+ const libmv_AutoTrackOptions* /*options*/)
+{
+}
+
+int libmv_autoTrackMarker(libmv_AutoTrack* /*libmv_autotrack*/,
+ const libmv_TrackRegionOptions* /*libmv_options*/,
+ libmv_Marker * /*libmv_tracker_marker*/,
+ libmv_TrackRegionResult* /*libmv_result*/)
+{
+ return 0;
+}
+
+void libmv_autoTrackAddMarker(libmv_AutoTrack* /*libmv_autotrack*/,
+ const libmv_Marker* /*libmv_marker*/)
+{
+}
+
+int libmv_autoTrackGetMarker(libmv_AutoTrack* /*libmv_autotrack*/,
+ int /*clip*/,
+ int /*frame*/,
+ int /*track*/,
+ libmv_Marker* /*libmv_marker*/)
+{
+ return 0;
+}
+
+/* ************ frame accessor ************ */
+
+libmv_FrameAccessor* libmv_FrameAccessorNew(
+ libmv_FrameAccessorUserData* /*user_data**/,
+ libmv_GetImageCallback /*get_image_callback*/,
+ libmv_ReleaseImageCallback /*release_image_callback*/)
+{
+ return NULL;
+}
+
+void libmv_FrameAccessorDestroy(libmv_FrameAccessor* /*frame_accessor*/)
+{
+}
+
+int64_t libmv_frameAccessorgetTransformKey(
+ const libmv_FrameTransform * /*transform*/)
+{
+ return 0;
+}
+
+void libmv_frameAccessorgetTransformRun(const libmv_FrameTransform* /*transform*/,
+ const libmv_FloatImage* /*input_image*/,
+ libmv_FloatImage* /*output_image*/)
+{
+}
+
diff --git a/intern/libmv/intern/track_region.cc b/intern/libmv/intern/track_region.cc
new file mode 100644
index 00000000000..24fbc78c1a1
--- /dev/null
+++ b/intern/libmv/intern/track_region.cc
@@ -0,0 +1,177 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2011 Blender Foundation.
+ * All rights reserved.
+ *
+ * Contributor(s): Blender Foundation,
+ * Sergey Sharybin
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+#include "intern/track_region.h"
+#include "intern/image.h"
+#include "intern/utildefines.h"
+#include "libmv/image/image.h"
+#include "libmv/tracking/track_region.h"
+
+/* define this to generate PNG images with content of search areas
+ tracking between which failed */
+#undef DUMP_FAILURE
+
+/* define this to generate PNG images with content of search areas
+ on every itteration of tracking */
+#undef DUMP_ALWAYS
+
+using libmv::FloatImage;
+using libmv::TrackRegionOptions;
+using libmv::TrackRegionResult;
+using libmv::TrackRegion;
+
+void libmv_configureTrackRegionOptions(
+ const libmv_TrackRegionOptions& options,
+ TrackRegionOptions* track_region_options) {
+ switch (options.motion_model) {
+#define LIBMV_CONVERT(the_model) \
+ case TrackRegionOptions::the_model: \
+ track_region_options->mode = TrackRegionOptions::the_model; \
+ break;
+ LIBMV_CONVERT(TRANSLATION)
+ LIBMV_CONVERT(TRANSLATION_ROTATION)
+ LIBMV_CONVERT(TRANSLATION_SCALE)
+ LIBMV_CONVERT(TRANSLATION_ROTATION_SCALE)
+ LIBMV_CONVERT(AFFINE)
+ LIBMV_CONVERT(HOMOGRAPHY)
+#undef LIBMV_CONVERT
+ }
+
+ track_region_options->minimum_correlation = options.minimum_correlation;
+ track_region_options->max_iterations = options.num_iterations;
+ track_region_options->sigma = options.sigma;
+ track_region_options->num_extra_points = 1;
+ track_region_options->image1_mask = NULL;
+ track_region_options->use_brute_initialization = options.use_brute;
+ /* TODO(keir): This will make some cases better, but may be a regression until
+ * the motion model is in. Since this is on trunk, enable it for now.
+ *
+ * TODO(sergey): This gives much worse results on mango footage (see 04_2e)
+ * so disabling for now for until proper prediction model is landed.
+ *
+ * The thing is, currently blender sends input coordinates as the guess to
+ * region tracker and in case of fast motion such an early out ruins the track.
+ */
+ track_region_options->attempt_refine_before_brute = false;
+ track_region_options->use_normalized_intensities = options.use_normalization;
+}
+
+void libmv_regionTrackergetResult(const TrackRegionResult& track_region_result,
+ libmv_TrackRegionResult* result) {
+ result->termination = (int) track_region_result.termination;
+ result->termination_reason = "";
+ result->correlation = track_region_result.correlation;
+}
+
+int libmv_trackRegion(const libmv_TrackRegionOptions* options,
+ const float* image1,
+ int image1_width,
+ int image1_height,
+ const float* image2,
+ int image2_width,
+ int image2_height,
+ const double* x1,
+ const double* y1,
+ libmv_TrackRegionResult* result,
+ double* x2,
+ double* y2) {
+ double xx1[5], yy1[5];
+ double xx2[5], yy2[5];
+ bool tracking_result = false;
+
+ // Convert to doubles for the libmv api. The four corners and the center.
+ for (int i = 0; i < 5; ++i) {
+ xx1[i] = x1[i];
+ yy1[i] = y1[i];
+ xx2[i] = x2[i];
+ yy2[i] = y2[i];
+ }
+
+ TrackRegionOptions track_region_options;
+ FloatImage image1_mask;
+
+ libmv_configureTrackRegionOptions(*options, &track_region_options);
+ if (options->image1_mask) {
+ libmv_floatBufferToFloatImage(options->image1_mask,
+ image1_width,
+ image1_height,
+ 1,
+ &image1_mask);
+
+ track_region_options.image1_mask = &image1_mask;
+ }
+
+ // Convert from raw float buffers to libmv's FloatImage.
+ FloatImage old_patch, new_patch;
+ libmv_floatBufferToFloatImage(image1,
+ image1_width,
+ image1_height,
+ 1,
+ &old_patch);
+ libmv_floatBufferToFloatImage(image2,
+ image2_width,
+ image2_height,
+ 1,
+ &new_patch);
+
+ TrackRegionResult track_region_result;
+ TrackRegion(old_patch, new_patch,
+ xx1, yy1,
+ track_region_options,
+ xx2, yy2,
+ &track_region_result);
+
+ // Convert to floats for the blender api.
+ for (int i = 0; i < 5; ++i) {
+ x2[i] = xx2[i];
+ y2[i] = yy2[i];
+ }
+
+ // TODO(keir): Update the termination string with failure details.
+ if (track_region_result.termination == TrackRegionResult::CONVERGENCE ||
+ track_region_result.termination == TrackRegionResult::NO_CONVERGENCE) {
+ tracking_result = true;
+ }
+
+ // Debug dump of patches.
+#if defined(DUMP_FAILURE) || defined(DUMP_ALWAYS)
+ bool need_dump = !tracking_result;
+
+# ifdef DUMP_ALWAYS
+ need_dump = true;
+# endif
+
+ if (need_dump) {
+ libmv_saveImage(old_patch, "old_patch", x1[4], y1[4]);
+ libmv_saveImage(new_patch, "new_patch", x2[4], y2[4]);
+ if (options->image1_mask) {
+ libmv_saveImage(image1_mask, "mask", x2[4], y2[4]);
+ }
+ }
+#endif
+
+ return tracking_result;
+}
diff --git a/intern/libmv/intern/track_region.h b/intern/libmv/intern/track_region.h
new file mode 100644
index 00000000000..7ed3e443e40
--- /dev/null
+++ b/intern/libmv/intern/track_region.h
@@ -0,0 +1,81 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2011 Blender Foundation.
+ * All rights reserved.
+ *
+ * Contributor(s): Blender Foundation,
+ * Sergey Sharybin
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+#ifndef LIBMV_C_API_TRACK_REGION_H_
+#define LIBMV_C_API_TRACK_REGION_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct libmv_TrackRegionOptions {
+ int motion_model;
+ int num_iterations;
+ int use_brute;
+ int use_normalization;
+ double minimum_correlation;
+ double sigma;
+ float *image1_mask;
+} libmv_TrackRegionOptions;
+
+typedef struct libmv_TrackRegionResult {
+ int termination;
+ const char* termination_reason;
+ double correlation;
+} libmv_TrackRegionResult;
+
+#ifdef __cplusplus
+namespace libmv {
+ struct TrackRegionOptions;
+ struct TrackRegionResult;
+}
+void libmv_configureTrackRegionOptions(
+ const libmv_TrackRegionOptions& options,
+ libmv::TrackRegionOptions* track_region_options);
+
+void libmv_regionTrackergetResult(
+ const libmv::TrackRegionResult& track_region_result,
+ libmv_TrackRegionResult* result);
+#endif
+
+int libmv_trackRegion(const libmv_TrackRegionOptions* options,
+ const float* image1,
+ int image1_width,
+ int image1_height,
+ const float* image2,
+ int image2_width,
+ int image2_height,
+ const double* x1,
+ const double* y1,
+ libmv_TrackRegionResult* result,
+ double* x2,
+ double* y2);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // LIBMV_C_API_PLANAR_TRACKER_H_
diff --git a/intern/libmv/intern/tracks.cc b/intern/libmv/intern/tracks.cc
new file mode 100644
index 00000000000..9b032b0760a
--- /dev/null
+++ b/intern/libmv/intern/tracks.cc
@@ -0,0 +1,52 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2011 Blender Foundation.
+ * All rights reserved.
+ *
+ * Contributor(s): Blender Foundation,
+ * Sergey Sharybin
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+#include "intern/tracks.h"
+#include "intern/utildefines.h"
+
+#include "libmv/simple_pipeline/tracks.h"
+
+using libmv::Marker;
+using libmv::Tracks;
+
+libmv_Tracks* libmv_tracksNew(void) {
+ Tracks* tracks = LIBMV_OBJECT_NEW(Tracks);
+
+ return (libmv_Tracks*) tracks;
+}
+
+void libmv_tracksDestroy(libmv_Tracks* libmv_tracks) {
+ LIBMV_OBJECT_DELETE(libmv_tracks, Tracks);
+}
+
+void libmv_tracksInsert(libmv_Tracks *libmv_tracks,
+ int image,
+ int track,
+ double x,
+ double y,
+ double weight) {
+ ((Tracks *) libmv_tracks)->Insert(image, track, x, y, weight);
+}
diff --git a/intern/libmv/intern/tracks.h b/intern/libmv/intern/tracks.h
new file mode 100644
index 00000000000..79f6cc99579
--- /dev/null
+++ b/intern/libmv/intern/tracks.h
@@ -0,0 +1,51 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2011 Blender Foundation.
+ * All rights reserved.
+ *
+ * Contributor(s): Blender Foundation,
+ * Sergey Sharybin
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+#ifndef LIBMV_C_API_TRACKS_H_
+#define LIBMV_C_API_TRACKS_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct libmv_Tracks libmv_Tracks;
+
+libmv_Tracks* libmv_tracksNew(void);
+
+void libmv_tracksDestroy(libmv_Tracks* libmv_tracks);
+
+void libmv_tracksInsert(libmv_Tracks* libmv_tracks,
+ int image,
+ int track,
+ double x,
+ double y,
+ double weight);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // LIBMV_C_API_TRACKS_H_
diff --git a/intern/libmv/intern/tracksN.cc b/intern/libmv/intern/tracksN.cc
new file mode 100644
index 00000000000..9e1da88ef10
--- /dev/null
+++ b/intern/libmv/intern/tracksN.cc
@@ -0,0 +1,138 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2011 Blender Foundation.
+ * All rights reserved.
+ *
+ * Contributor(s): Blender Foundation,
+ * Sergey Sharybin
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+#include "intern/tracksN.h"
+#include "intern/utildefines.h"
+#include "libmv/autotrack/marker.h"
+#include "libmv/autotrack/tracks.h"
+
+using mv::Marker;
+using mv::Tracks;
+
+void libmv_apiMarkerToMarker(const libmv_Marker& libmv_marker,
+ Marker *marker) {
+ marker->clip = libmv_marker.clip;
+ marker->frame = libmv_marker.frame;
+ marker->track = libmv_marker.track;
+ marker->center(0) = libmv_marker.center[0];
+ marker->center(1) = libmv_marker.center[1];
+ for (int i = 0; i < 4; i++) {
+ marker->patch.coordinates(i, 0) = libmv_marker.patch[i][0];
+ marker->patch.coordinates(i, 1) = libmv_marker.patch[i][1];
+ }
+ marker->search_region.min(0) = libmv_marker.search_region_min[0];
+ marker->search_region.min(1) = libmv_marker.search_region_min[1];
+ marker->search_region.max(0) = libmv_marker.search_region_max[0];
+ marker->search_region.max(1) = libmv_marker.search_region_max[1];
+ marker->weight = libmv_marker.weight;
+ marker->source = (Marker::Source) libmv_marker.source;
+ marker->status = (Marker::Status) libmv_marker.status;
+ marker->reference_clip = libmv_marker.reference_clip;
+ marker->reference_frame = libmv_marker.reference_frame;
+ marker->model_type = (Marker::ModelType) libmv_marker.model_type;
+ marker->model_id = libmv_marker.model_id;
+ marker->disabled_channels = libmv_marker.disabled_channels;
+}
+
+void libmv_markerToApiMarker(const Marker& marker,
+ libmv_Marker *libmv_marker) {
+ libmv_marker->clip = marker.clip;
+ libmv_marker->frame = marker.frame;
+ libmv_marker->track = marker.track;
+ libmv_marker->center[0] = marker.center(0);
+ libmv_marker->center[1] = marker.center(1);
+ for (int i = 0; i < 4; i++) {
+ libmv_marker->patch[i][0] = marker.patch.coordinates(i, 0);
+ libmv_marker->patch[i][1] = marker.patch.coordinates(i, 1);
+ }
+ libmv_marker->search_region_min[0] = marker.search_region.min(0);
+ libmv_marker->search_region_min[1] = marker.search_region.min(1);
+ libmv_marker->search_region_max[0] = marker.search_region.max(0);
+ libmv_marker->search_region_max[1] = marker.search_region.max(1);
+ libmv_marker->weight = marker.weight;
+ libmv_marker->source = (libmv_MarkerSource) marker.source;
+ libmv_marker->status = (libmv_MarkerStatus) marker.status;
+ libmv_marker->reference_clip = marker.reference_clip;
+ libmv_marker->reference_frame = marker.reference_frame;
+ libmv_marker->model_type = (libmv_MarkerModelType) marker.model_type;
+ libmv_marker->model_id = marker.model_id;
+ libmv_marker->disabled_channels = marker.disabled_channels;
+}
+
+libmv_TracksN* libmv_tracksNewN(void) {
+ Tracks* tracks = LIBMV_OBJECT_NEW(Tracks);
+
+ return (libmv_TracksN*) tracks;
+}
+
+void libmv_tracksDestroyN(libmv_TracksN* libmv_tracks) {
+ LIBMV_OBJECT_DELETE(libmv_tracks, Tracks);
+}
+
+void libmv_tracksAddMarkerN(libmv_TracksN* libmv_tracks,
+ const libmv_Marker* libmv_marker) {
+ Marker marker;
+ libmv_apiMarkerToMarker(*libmv_marker, &marker);
+ ((Tracks*) libmv_tracks)->AddMarker(marker);
+}
+
+void libmv_tracksGetMarkerN(libmv_TracksN* libmv_tracks,
+ int clip,
+ int frame,
+ int track,
+ libmv_Marker* libmv_marker) {
+ Marker marker;
+ ((Tracks*) libmv_tracks)->GetMarker(clip, frame, track, &marker);
+ libmv_markerToApiMarker(marker, libmv_marker);
+}
+
+void libmv_tracksRemoveMarkerN(libmv_TracksN* libmv_tracks,
+ int clip,
+ int frame,
+ int track) {
+ ((Tracks *) libmv_tracks)->RemoveMarker(clip, frame, track);
+}
+
+void libmv_tracksRemoveMarkersForTrack(libmv_TracksN* libmv_tracks,
+ int track) {
+ ((Tracks *) libmv_tracks)->RemoveMarkersForTrack(track);
+}
+
+int libmv_tracksMaxClipN(libmv_TracksN* libmv_tracks) {
+ return ((Tracks*) libmv_tracks)->MaxClip();
+}
+
+int libmv_tracksMaxFrameN(libmv_TracksN* libmv_tracks, int clip) {
+ return ((Tracks*) libmv_tracks)->MaxFrame(clip);
+}
+
+int libmv_tracksMaxTrackN(libmv_TracksN* libmv_tracks) {
+ return ((Tracks*) libmv_tracks)->MaxTrack();
+}
+
+int libmv_tracksNumMarkersN(libmv_TracksN* libmv_tracks) {
+ return ((Tracks*) libmv_tracks)->NumMarkers();
+}
diff --git a/intern/libmv/intern/tracksN.h b/intern/libmv/intern/tracksN.h
new file mode 100644
index 00000000000..d0cb54e40bc
--- /dev/null
+++ b/intern/libmv/intern/tracksN.h
@@ -0,0 +1,129 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2011 Blender Foundation.
+ * All rights reserved.
+ *
+ * Contributor(s): Blender Foundation,
+ * Sergey Sharybin
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+// TODO(serrgey): For the time being we're converting simple pipeline
+// to an autotrack pipeline we call it tracks.
+// Once we've done with porting we remove N.
+
+#ifndef LIBMV_C_API_TRACKSN_H_
+#define LIBMV_C_API_TRACKSN_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct libmv_TracksN libmv_TracksN;
+
+// Keep order in this enums exactly the same as in mv::Marker.
+// Otherwise API wouldn't convert the values properly.
+typedef enum libmv_MarkerSource {
+ LIBMV_MARKER_SOURCE_MANUAL,
+ LIBMV_MARKER_SOURCE_DETECTED,
+ LIBMV_MARKER_SOURCE_TRACKED,
+ LIBMV_MARKER_SOURCE_MATCHED,
+ LIBMV_MARKER_SOURCE_PREDICTED,
+} libmv_MarkerSource;
+
+typedef enum libmv_MarkerStatus {
+ LIBMV_MARKER_STATUS_UNKNOWN,
+ LIBMV_MARKER_STATUS_INLIER,
+ LIBMV_MARKER_STATUS_OUTLIER,
+} libmv_MarkerStatus;
+
+typedef enum libmv_MarkerModelType {
+ LIBMV_MARKER_MODEL_TYPE_POINT,
+ LIBMV_MARKER_MODEL_TYPE_PLANE,
+ LIBMV_MARKER_MODEL_TYPE_LINE,
+ LIBMV_MARKER_MODEL_TYPE_CUBE,
+} libmv_MarkerModelType;
+
+enum libmv_MarkerChannel {
+ LIBMV_MARKER_CHANNEL_R = (1 << 0),
+ LIBMV_MARKER_CHANNEL_G = (1 << 1),
+ LIBMV_MARKER_CHANNEL_B = (1 << 2),
+};
+
+typedef struct libmv_Marker {
+ int clip;
+ int frame;
+ int track;
+ float center[2];
+ float patch[4][2];
+ float search_region_min[2];
+ float search_region_max[2];
+ float weight;
+ libmv_MarkerSource source;
+ libmv_MarkerStatus status;
+ int reference_clip;
+ int reference_frame;
+ libmv_MarkerModelType model_type;
+ int model_id;
+ int disabled_channels;
+} libmv_Marker;
+
+#ifdef __cplusplus
+namespace mv {
+ struct Marker;
+}
+void libmv_apiMarkerToMarker(const libmv_Marker& libmv_marker,
+ mv::Marker *marker);
+
+void libmv_markerToApiMarker(const mv::Marker& marker,
+ libmv_Marker *libmv_marker);
+#endif
+
+libmv_TracksN* libmv_tracksNewN(void);
+
+void libmv_tracksDestroyN(libmv_TracksN* libmv_tracks);
+
+
+void libmv_tracksAddMarkerN(libmv_TracksN* libmv_tracks,
+ const libmv_Marker* libmv_marker);
+
+void libmv_tracksGetMarkerN(libmv_TracksN* libmv_tracks,
+ int clip,
+ int frame,
+ int track,
+ libmv_Marker* libmv_marker);
+
+void libmv_tracksRemoveMarkerN(libmv_TracksN* libmv_tracks,
+ int clip,
+ int frame,
+ int track);
+
+void libmv_tracksRemoveMarkersForTrack(libmv_TracksN* libmv_tracks,
+ int track);
+
+int libmv_tracksMaxClipN(libmv_TracksN* libmv_tracks);
+int libmv_tracksMaxFrameN(libmv_TracksN* libmv_tracks, int clip);
+int libmv_tracksMaxTrackN(libmv_TracksN* libmv_tracks);
+int libmv_tracksNumMarkersN(libmv_TracksN* libmv_tracks);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // LIBMV_C_API_TRACKS_H_
diff --git a/intern/libmv/intern/utildefines.h b/intern/libmv/intern/utildefines.h
new file mode 100644
index 00000000000..b1f11d6091e
--- /dev/null
+++ b/intern/libmv/intern/utildefines.h
@@ -0,0 +1,62 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2013 Blender Foundation.
+ * All rights reserved.
+ *
+ * Contributor(s): Sergey Sharybin
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+#ifndef LIBMV_C_API_UTILDEFINES_H_
+#define LIBMV_C_API_UTILDEFINES_H_
+
+#if defined(_MSC_VER) && _MSC_VER < 1900
+# define __func__ __FUNCTION__
+# define snprintf _snprintf
+#endif
+
+#ifdef WITH_LIBMV_GUARDED_ALLOC
+# include "MEM_guardedalloc.h"
+# define LIBMV_OBJECT_NEW OBJECT_GUARDED_NEW
+# define LIBMV_OBJECT_DELETE OBJECT_GUARDED_DELETE
+# define LIBMV_OBJECT_DELETE OBJECT_GUARDED_DELETE
+# define LIBMV_STRUCT_NEW(type, count) \
+ (type*)MEM_mallocN(sizeof(type) * count, __func__)
+# define LIBMV_STRUCT_DELETE(what) MEM_freeN(what)
+#else
+// Need this to keep libmv-capi potentially standalone.
+# if defined __GNUC__ || defined __sun
+# define LIBMV_OBJECT_NEW(type, args ...) \
+ new(malloc(sizeof(type))) type(args)
+# else
+# define LIBMV_OBJECT_NEW(type, ...) \
+ new(malloc(sizeof(type))) type(__VA_ARGS__)
+#endif
+# define LIBMV_OBJECT_DELETE(what, type) \
+ { \
+ if (what) { \
+ ((type*)(what))->~type(); \
+ free(what); \
+ } \
+ } (void)0
+# define LIBMV_STRUCT_NEW(type, count) (type*)malloc(sizeof(type) * count)
+# define LIBMV_STRUCT_DELETE(what) { if (what) free(what); } (void)0
+#endif
+
+#endif // LIBMV_C_API_UTILDEFINES_H_
diff --git a/intern/libmv/libmv-capi.h b/intern/libmv/libmv-capi.h
new file mode 100644
index 00000000000..92e206a19b2
--- /dev/null
+++ b/intern/libmv/libmv-capi.h
@@ -0,0 +1,42 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2011 Blender Foundation.
+ * All rights reserved.
+ *
+ * Contributor(s): Blender Foundation,
+ * Sergey Sharybin
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+#ifndef LIBMV_C_API_H
+#define LIBMV_C_API_H
+
+#include "intern/autotrack.h"
+#include "intern/camera_intrinsics.h"
+#include "intern/detector.h"
+#include "intern/frame_accessor.h"
+#include "intern/homography.h"
+#include "intern/image.h"
+#include "intern/logging.h"
+#include "intern/reconstruction.h"
+#include "intern/track_region.h"
+#include "intern/tracks.h"
+#include "intern/tracksN.h"
+
+#endif // LIBMV_C_API_H
diff --git a/intern/libmv/libmv/autotrack/autotrack.cc b/intern/libmv/libmv/autotrack/autotrack.cc
new file mode 100644
index 00000000000..4c7bdf1fde8
--- /dev/null
+++ b/intern/libmv/libmv/autotrack/autotrack.cc
@@ -0,0 +1,291 @@
+// Copyright (c) 2014 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+//
+// Author: mierle@gmail.com (Keir Mierle)
+
+#include "libmv/autotrack/autotrack.h"
+#include "libmv/autotrack/quad.h"
+#include "libmv/autotrack/frame_accessor.h"
+#include "libmv/autotrack/predict_tracks.h"
+#include "libmv/base/scoped_ptr.h"
+#include "libmv/logging/logging.h"
+#include "libmv/numeric/numeric.h"
+
+namespace mv {
+
+namespace {
+
+class DisableChannelsTransform : public FrameAccessor::Transform {
+ public:
+ DisableChannelsTransform(int disabled_channels)
+ : disabled_channels_(disabled_channels) { }
+
+ int64_t key() const {
+ return disabled_channels_;
+ }
+
+ void run(const FloatImage& input, FloatImage* output) const {
+ bool disable_red = (disabled_channels_ & Marker::CHANNEL_R) != 0,
+ disable_green = (disabled_channels_ & Marker::CHANNEL_G) != 0,
+ disable_blue = (disabled_channels_ & Marker::CHANNEL_B) != 0;
+
+ LG << "Disabling channels: "
+ << (disable_red ? "R " : "")
+ << (disable_green ? "G " : "")
+ << (disable_blue ? "B" : "");
+
+ // It's important to rescale the resultappropriately so that e.g. if only
+ // blue is selected, it's not zeroed out.
+ float scale = (disable_red ? 0.0f : 0.2126f) +
+ (disable_green ? 0.0f : 0.7152f) +
+ (disable_blue ? 0.0f : 0.0722f);
+
+ output->Resize(input.Height(), input.Width(), 1);
+ for (int y = 0; y < input.Height(); y++) {
+ for (int x = 0; x < input.Width(); x++) {
+ float r = disable_red ? 0.0f : input(y, x, 0);
+ float g = disable_green ? 0.0f : input(y, x, 1);
+ float b = disable_blue ? 0.0f : input(y, x, 2);
+ (*output)(y, x, 0) = (0.2126f * r + 0.7152f * g + 0.0722f * b) / scale;
+ }
+ }
+ }
+
+ private:
+ // Bitfield representing visible channels, bits are from Marker::Channel.
+ int disabled_channels_;
+};
+
+template<typename QuadT, typename ArrayT>
+void QuadToArrays(const QuadT& quad, ArrayT* x, ArrayT* y) {
+ for (int i = 0; i < 4; ++i) {
+ x[i] = quad.coordinates(i, 0);
+ y[i] = quad.coordinates(i, 1);
+ }
+}
+
+void MarkerToArrays(const Marker& marker, double* x, double* y) {
+ Quad2Df offset_quad = marker.patch;
+ Vec2f origin = marker.search_region.Rounded().min;
+ offset_quad.coordinates.rowwise() -= origin.transpose();
+ QuadToArrays(offset_quad, x, y);
+ x[4] = marker.center.x() - origin(0);
+ y[4] = marker.center.y() - origin(1);
+}
+
+FrameAccessor::Key GetImageForMarker(const Marker& marker,
+ FrameAccessor* frame_accessor,
+ FloatImage* image) {
+ // TODO(sergey): Currently we pass float region to the accessor,
+ // but we don't want the accessor to decide the rounding, so we
+ // do rounding here.
+ // Ideally we would need to pass IntRegion to the frame accessor.
+ Region region = marker.search_region.Rounded();
+ libmv::scoped_ptr<FrameAccessor::Transform> transform = NULL;
+ if (marker.disabled_channels != 0) {
+ transform.reset(new DisableChannelsTransform(marker.disabled_channels));
+ }
+ return frame_accessor->GetImage(marker.clip,
+ marker.frame,
+ FrameAccessor::MONO,
+ 0, // No downscale for now.
+ &region,
+ transform.get(),
+ image);
+}
+
+} // namespace
+
+bool AutoTrack::TrackMarker(Marker* tracked_marker,
+ TrackRegionResult* result,
+ const TrackRegionOptions* track_options) {
+ // Try to predict the location of the second marker.
+ bool predicted_position = false;
+ if (PredictMarkerPosition(tracks_, tracked_marker)) {
+ LG << "Succesfully predicted!";
+ predicted_position = true;
+ } else {
+ LG << "Prediction failed; trying to track anyway.";
+ }
+
+ Marker reference_marker;
+ tracks_.GetMarker(tracked_marker->reference_clip,
+ tracked_marker->reference_frame,
+ tracked_marker->track,
+ &reference_marker);
+
+ // Convert markers into the format expected by TrackRegion.
+ double x1[5], y1[5];
+ MarkerToArrays(reference_marker, x1, y1);
+
+ double x2[5], y2[5];
+ MarkerToArrays(*tracked_marker, x2, y2);
+
+ // TODO(keir): Technically this could take a smaller slice from the source
+ // image instead of taking one the size of the search window.
+ FloatImage reference_image;
+ FrameAccessor::Key reference_key = GetImageForMarker(reference_marker,
+ frame_accessor_,
+ &reference_image);
+ if (!reference_key) {
+ LG << "Couldn't get frame for reference marker: " << reference_marker;
+ return false;
+ }
+
+ FloatImage tracked_image;
+ FrameAccessor::Key tracked_key = GetImageForMarker(*tracked_marker,
+ frame_accessor_,
+ &tracked_image);
+ if (!tracked_key) {
+ frame_accessor_->ReleaseImage(reference_key);
+ LG << "Couldn't get frame for tracked marker: " << tracked_marker;
+ return false;
+ }
+
+ // Store original position befoer tracking, so we can claculate offset later.
+ Vec2f original_center = tracked_marker->center;
+
+ // Do the tracking!
+ TrackRegionOptions local_track_region_options;
+ if (track_options) {
+ local_track_region_options = *track_options;
+ }
+ local_track_region_options.num_extra_points = 1; // For center point.
+ local_track_region_options.attempt_refine_before_brute = predicted_position;
+ TrackRegion(reference_image,
+ tracked_image,
+ x1, y1,
+ local_track_region_options,
+ x2, y2,
+ result);
+
+ // Copy results over the tracked marker.
+ Vec2f tracked_origin = tracked_marker->search_region.Rounded().min;
+ for (int i = 0; i < 4; ++i) {
+ tracked_marker->patch.coordinates(i, 0) = x2[i] + tracked_origin[0];
+ tracked_marker->patch.coordinates(i, 1) = y2[i] + tracked_origin[1];
+ }
+ tracked_marker->center(0) = x2[4] + tracked_origin[0];
+ tracked_marker->center(1) = y2[4] + tracked_origin[1];
+ Vec2f delta = tracked_marker->center - original_center;
+ tracked_marker->search_region.Offset(delta);
+ tracked_marker->source = Marker::TRACKED;
+ tracked_marker->status = Marker::UNKNOWN;
+ tracked_marker->reference_clip = reference_marker.clip;
+ tracked_marker->reference_frame = reference_marker.frame;
+
+ // Release the images from the accessor cache.
+ frame_accessor_->ReleaseImage(reference_key);
+ frame_accessor_->ReleaseImage(tracked_key);
+
+ // TODO(keir): Possibly the return here should get removed since the results
+ // are part of TrackResult. However, eventually the autotrack stuff will have
+ // extra status (e.g. prediction fail, etc) that should get included.
+ return true;
+}
+
+void AutoTrack::AddMarker(const Marker& marker) {
+ tracks_.AddMarker(marker);
+}
+
+void AutoTrack::SetMarkers(vector<Marker>* markers) {
+ tracks_.SetMarkers(markers);
+}
+
+bool AutoTrack::GetMarker(int clip, int frame, int track,
+ Marker* markers) const {
+ return tracks_.GetMarker(clip, frame, track, markers);
+}
+
+void AutoTrack::DetectAndTrack(const DetectAndTrackOptions& options) {
+ int num_clips = frame_accessor_->NumClips();
+ for (int clip = 0; clip < num_clips; ++clip) {
+ int num_frames = frame_accessor_->NumFrames(clip);
+ vector<Marker> previous_frame_markers;
+ // Q: How to decide track #s when detecting?
+ // Q: How to match markers from previous frame? set of prev frame tracks?
+ // Q: How to decide what markers should get tracked and which ones should not?
+ for (int frame = 0; frame < num_frames; ++frame) {
+ if (Cancelled()) {
+ LG << "Got cancel message while detecting and tracking...";
+ return;
+ }
+ // First, get or detect markers for this frame.
+ vector<Marker> this_frame_markers;
+ tracks_.GetMarkersInFrame(clip, frame, &this_frame_markers);
+ LG << "Clip " << clip << ", frame " << frame << " have "
+ << this_frame_markers.size();
+ if (this_frame_markers.size() < options.min_num_features) {
+ DetectFeaturesInFrame(clip, frame);
+ this_frame_markers.clear();
+ tracks_.GetMarkersInFrame(clip, frame, &this_frame_markers);
+ LG << "... detected " << this_frame_markers.size() << " features.";
+ }
+ if (previous_frame_markers.empty()) {
+ LG << "First frame; skipping tracking stage.";
+ previous_frame_markers.swap(this_frame_markers);
+ continue;
+ }
+ // Second, find tracks that should get tracked forward into this frame.
+ // To avoid tracking markers that are already tracked to this frame, make
+ // a sorted set of the tracks that exist in the last frame.
+ vector<int> tracks_in_this_frame;
+ for (int i = 0; i < this_frame_markers.size(); ++i) {
+ tracks_in_this_frame.push_back(this_frame_markers[i].track);
+ }
+ std::sort(tracks_in_this_frame.begin(),
+ tracks_in_this_frame.end());
+
+ // Find tracks in the previous frame that are not in this one.
+ vector<Marker*> previous_frame_markers_to_track;
+ int num_skipped = 0;
+ for (int i = 0; i < previous_frame_markers.size(); ++i) {
+ if (std::binary_search(tracks_in_this_frame.begin(),
+ tracks_in_this_frame.end(),
+ previous_frame_markers[i].track)) {
+ num_skipped++;
+ } else {
+ previous_frame_markers_to_track.push_back(&previous_frame_markers[i]);
+ }
+ }
+
+ // Finally track the markers from the last frame into this one.
+ // TODO(keir): Use OMP.
+ for (int i = 0; i < previous_frame_markers_to_track.size(); ++i) {
+ Marker this_frame_marker = *previous_frame_markers_to_track[i];
+ this_frame_marker.frame = frame;
+ LG << "Tracking: " << this_frame_marker;
+ TrackRegionResult result;
+ TrackMarker(&this_frame_marker, &result);
+ if (result.is_usable()) {
+ LG << "Success: " << this_frame_marker;
+ AddMarker(this_frame_marker);
+ this_frame_markers.push_back(this_frame_marker);
+ } else {
+ LG << "Failed to track: " << this_frame_marker;
+ }
+ }
+ // Put the markers from this frame
+ previous_frame_markers.swap(this_frame_markers);
+ }
+ }
+}
+
+} // namespace mv
diff --git a/intern/libmv/libmv/autotrack/autotrack.h b/intern/libmv/libmv/autotrack/autotrack.h
new file mode 100644
index 00000000000..1d7422f54e7
--- /dev/null
+++ b/intern/libmv/libmv/autotrack/autotrack.h
@@ -0,0 +1,226 @@
+// Copyright (c) 2014 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+//
+// Author: mierle@gmail.com (Keir Mierle)
+
+#ifndef LIBMV_AUTOTRACK_AUTOTRACK_H_
+#define LIBMV_AUTOTRACK_AUTOTRACK_H_
+
+#include "libmv/autotrack/tracks.h"
+#include "libmv/autotrack/region.h"
+#include "libmv/tracking/track_region.h"
+
+namespace libmv {
+class CameraIntrinsics;
+};
+
+namespace mv {
+
+using libmv::CameraIntrinsics;
+using libmv::TrackRegionOptions;
+using libmv::TrackRegionResult;
+
+struct FrameAccessor;
+class OperationListener;
+
+// The coordinator of all tracking operations; keeps track of all state
+// relating to tracking and reconstruction; for example, 2D tracks and motion
+// models, reconstructed cameras, points, and planes; tracking settings; etc.
+//
+// Typical usage for full autotrack:
+//
+// AutoTrack auto_track(image_accessor);
+// auto_track.SetNumFramesInClip(0, 10);
+// auto_track.SetNumFramesInClip(1, 54);
+// auto_track.AutoTrack()
+//
+// It is also possible to specify options to control the reconstruction.
+// Furthermore, the individual methods of reconstruction are exposed to make it
+// possible to interact with the pipeline as it runs. For example, to track one
+// marker across frames,
+//
+// AutoTrack auto_track(image_accessor);
+// auto_track.SetNumFramesInClip(0, 10);
+// auto_track.SetNumFramesInClip(1, 54);
+// auto_track.AddMarker(...);
+// auto_track.TrackMarkerToFrame(int clip1, int frame1,
+// int clip2, int frame2,
+// options?)
+//
+class AutoTrack {
+ public:
+ struct Options {
+ // Default configuration for 2D tracking when calling TrackMarkerToFrame().
+ TrackRegionOptions track_region;
+
+ // Default search window for region tracking, in absolute frame pixels.
+ Region search_region;
+ };
+
+ AutoTrack(FrameAccessor* frame_accessor)
+ : frame_accessor_(frame_accessor) {}
+
+ // Marker manipulation.
+ // Clip manipulation.
+
+ // Set the number of clips. These clips will get accessed from the frame
+ // accessor, matches between frames found, and a reconstruction created.
+ //void SetNumFrames(int clip, int num_frames);
+
+ // Tracking & Matching
+
+ // Find the marker for the track in the frame indicated by the marker.
+ // Caller maintains ownership of *result and *tracked_marker.
+ bool TrackMarker(Marker* tracked_marker,
+ TrackRegionResult* result,
+ const TrackRegionOptions* track_options=NULL);
+
+ // Wrapper around Tracks API; however these may add additional processing.
+ void AddMarker(const Marker& tracked_marker);
+ void SetMarkers(vector<Marker>* markers);
+ bool GetMarker(int clip, int frame, int track, Marker* marker) const;
+
+ // TODO(keir): Implement frame matching! This could be very cool for loop
+ // closing and connecting across clips.
+ //void MatchFrames(int clip1, int frame1, int clip2, int frame2) {}
+
+ // Wrapper around the Reconstruction API.
+ // Returns the new ID.
+ int AddCameraIntrinsics(CameraIntrinsics* intrinsics) {
+ (void) intrinsics;
+ return 0;
+ } // XXX
+ int SetClipIntrinsics(int clip, int intrinsics) {
+ (void) clip;
+ (void) intrinsics;
+ return 0;
+ } // XXX
+
+ enum Motion {
+ GENERAL_CAMERA_MOTION,
+ TRIPOD_CAMERA_MOTION,
+ };
+ int SetClipMotion(int clip, Motion motion) {
+ (void) clip;
+ (void) motion;
+ return 0;
+ } // XXX
+
+ // Decide what to refine for the given intrinsics. bundle_options is from
+ // bundle.h (e.g. BUNDLE_FOCAL_LENGTH | BUNDLE_RADIAL_K1).
+ void SetIntrinsicsRefine(int intrinsics, int bundle_options) {
+ (void) intrinsics;
+ (void) bundle_options;
+ } // XXX
+
+ // Keyframe read/write.
+ struct ClipFrame {
+ int clip;
+ int frame;
+ };
+ const vector<ClipFrame>& keyframes() { return keyframes_; }
+ void ClearKeyframes() { keyframes_.clear(); }
+ void SetKeyframes(const vector<ClipFrame>& keyframes) {
+ keyframes_ = keyframes;
+ }
+
+ // What about reporting what happened? -- callbacks; maybe result struct.
+ void Reconstruct();
+
+ // Detect and track in 2D.
+ struct DetectAndTrackOptions {
+ int min_num_features;
+ };
+ void DetectAndTrack(const DetectAndTrackOptions& options);
+
+ struct DetectFeaturesInFrameOptions {
+ };
+ void DetectFeaturesInFrame(int clip, int frame,
+ const DetectFeaturesInFrameOptions* options=NULL) {
+ (void) clip;
+ (void) frame;
+ (void) options;
+ } // XXX
+
+ // Does not take ownership of the given listener, but keeps a reference to it.
+ void AddListener(OperationListener* listener) {(void) listener;} // XXX
+
+ // Create the initial reconstruction,
+ //void FindInitialReconstruction();
+
+ // State machine
+ //
+ // Question: Have explicit state? Or determine state from existing data?
+ // Conclusion: Determine state from existing data.
+ //
+ // Preliminary state thoughts
+ //
+ // No tracks or markers
+ // - Tracks empty.
+ //
+ // Initial tracks found
+ // - All images have at least 5 tracks
+ //
+ // Ran RANSAC on tracks to mark inliers / outliers.
+ // - All images have at least 8 "inlier" tracks
+ //
+ // Detector matching run to close loops and match across clips
+ // - At least 5 matching tracks between clips
+ //
+ // Initial reconstruction found (2 frames)?
+ // - There exists two cameras with intrinsics / extrinsics
+ //
+ // Preliminary reconstruction finished
+ // - Poses for all frames in all clips estimated.
+ //
+ // Final reconstruction finished
+ // - Final reconstruction bundle adjusted.
+
+ // For now, expose options directly. In the future this may change.
+ Options options;
+
+ private:
+ bool Log();
+ bool Progress();
+ bool Cancelled() { return false; }
+
+ Tracks tracks_; // May be normalized camera coordinates or raw pixels.
+ //Reconstruction reconstruction_;
+
+ // TODO(keir): Add the motion models here.
+ //vector<MotionModel> motion_models_;
+
+ // TODO(keir): Should num_clips and num_frames get moved to FrameAccessor?
+ // TODO(keir): What about masking for clips and frames to prevent various
+ // things like reconstruction or tracking from happening on certain frames?
+ FrameAccessor* frame_accessor_;
+ //int num_clips_;
+ //vector<int> num_frames_; // Indexed by clip.
+
+ // The intrinsics for each clip, assuming each clip has fixed intrinsics.
+ // TODO(keir): Decide what the semantics should be for varying focal length.
+ vector<int> clip_intrinsics_;
+
+ vector<ClipFrame> keyframes_;
+};
+
+} // namespace mv
+
+#endif // LIBMV_AUTOTRACK_AUTOTRACK_H_
diff --git a/intern/libmv/libmv/autotrack/callbacks.h b/intern/libmv/libmv/autotrack/callbacks.h
new file mode 100644
index 00000000000..e65841de3ce
--- /dev/null
+++ b/intern/libmv/libmv/autotrack/callbacks.h
@@ -0,0 +1,38 @@
+// Copyright (c) 2014 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+//
+// Author: mierle@gmail.com (Keir Mierle)
+
+#ifndef LIBMV_AUTOTRACK_LISTENER_H_
+#define LIBMV_AUTOTRACK_LISTENER_H_
+
+namespace mv {
+
+struct OperationListener {
+ // All hooks return true to continue or false to indicate the operation
+ // should abort. Hooks should be thread safe (reentrant).
+ virtual bool Log(const string& message) = 0;
+ virtual bool Progress(double fraction) = 0;
+ virtual bool Cancelled() = 0;
+};
+
+} // namespace mv
+
+#endif // LIBMV_AUTOTRACK_LISTENER_H_
diff --git a/intern/libmv/libmv/autotrack/frame_accessor.h b/intern/libmv/libmv/autotrack/frame_accessor.h
new file mode 100644
index 00000000000..8de5d865cd7
--- /dev/null
+++ b/intern/libmv/libmv/autotrack/frame_accessor.h
@@ -0,0 +1,86 @@
+// Copyright (c) 2014 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+//
+// Author: mierle@gmail.com (Keir Mierle)
+
+#ifndef LIBMV_AUTOTRACK_FRAME_ACCESSOR_H_
+#define LIBMV_AUTOTRACK_FRAME_ACCESSOR_H_
+
+#include <stdint.h>
+
+#include "libmv/image/image.h"
+
+namespace mv {
+
+struct Region;
+
+using libmv::FloatImage;
+
+// This is the abstraction to different sources of images that will be part of
+// a reconstruction. These may come from disk or they may come from Blender. In
+// most cases it's expected that the implementation provides some caching
+// otherwise performance will be terrible. Sometimes the images need to get
+// filtered, and this interface provides for that as well (and permits
+// implementations to cache filtered image pieces).
+struct FrameAccessor {
+ struct Transform {
+ virtual ~Transform() { }
+ // The key should depend on the transform arguments. Must be non-zero.
+ virtual int64_t key() const = 0;
+
+ // Apply the expected transform. Output is sized correctly already.
+ // TODO(keir): What about blurs that need to access pixels outside the ROI?
+ virtual void run(const FloatImage& input, FloatImage* output) const = 0;
+ };
+
+ enum InputMode {
+ MONO,
+ RGBA
+ };
+
+ typedef void* Key;
+
+ // Get a possibly-filtered version of a frame of a video. Downscale will
+ // cause the input image to get downscaled by 2^downscale for pyramid access.
+ // Region is always in original-image coordinates, and describes the
+ // requested area. The transform describes an (optional) transform to apply
+ // to the image before it is returned.
+ //
+ // When done with an image, you must call ReleaseImage with the returned key.
+ virtual Key GetImage(int clip,
+ int frame,
+ InputMode input_mode,
+ int downscale, // Downscale by 2^downscale.
+ const Region* region, // Get full image if NULL.
+ const Transform* transform, // May be NULL.
+ FloatImage* destination) = 0;
+
+ // Releases an image from the frame accessor. Non-caching implementations may
+ // free the image immediately; others may hold onto the image.
+ virtual void ReleaseImage(Key) = 0;
+
+ virtual bool GetClipDimensions(int clip, int* width, int* height) = 0;
+ virtual int NumClips() = 0;
+ virtual int NumFrames(int clip) = 0;
+};
+
+} // namespace libmv
+
+#endif // LIBMV_AUTOTRACK_FRAME_ACCESSOR_H_
diff --git a/intern/libmv/libmv/autotrack/marker.h b/intern/libmv/libmv/autotrack/marker.h
new file mode 100644
index 00000000000..bb803313af8
--- /dev/null
+++ b/intern/libmv/libmv/autotrack/marker.h
@@ -0,0 +1,144 @@
+// Copyright (c) 2014 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+//
+// Author: mierle@gmail.com (Keir Mierle)
+
+#ifndef LIBMV_AUTOTRACK_MARKER_H_
+#define LIBMV_AUTOTRACK_MARKER_H_
+
+#include <ostream>
+
+#include "libmv/autotrack/quad.h"
+#include "libmv/autotrack/region.h"
+#include "libmv/numeric/numeric.h"
+
+namespace mv {
+
+using libmv::Vec2f;
+
+// A marker is the 2D location of a tracked region (quad) in an image.
+// Note that some of this information could be normalized by having a
+// collection of inter-connected structs. Instead the "fat Marker" design below
+// trades memory for data structure simplicity.
+struct Marker {
+ int clip; // The clip this marker is from.
+ int frame; // The frame within the clip this marker is from.
+ int track; // The track this marker is from.
+
+ // The center of the marker in frame coordinates. This is typically, but not
+ // always, the same as the center of the patch.
+ Vec2f center;
+
+ // A frame-realtive quad defining the part of the image the marker covers.
+ // For reference markers, the pixels in the patch are the tracking pattern.
+ Quad2Df patch;
+
+ // Some markers are less certain than others; the weight determines the
+ // amount this marker contributes to the error. 1.0 indicates normal
+ // contribution; 0.0 indicates a zero-weight track (and will be omitted from
+ // bundle adjustment).
+ float weight;
+
+ enum Source {
+ MANUAL, // The user placed this marker manually.
+ DETECTED, // A keypoint detector found this point.
+ TRACKED, // The tracking algorithm placed this marker.
+ MATCHED, // A matching algorithm (e.g. SIFT or SURF or ORB) found this.
+ PREDICTED, // A motion model predicted this marker. This is needed for
+ // handling occlusions in some cases where an imaginary marker
+ // is placed to keep camera motion smooth.
+ };
+ Source source;
+
+ // Markers may be inliers or outliers if the tracking fails; this allows
+ // visualizing the markers in the image.
+ enum Status {
+ UNKNOWN,
+ INLIER,
+ OUTLIER
+ };
+ Status status;
+
+ // When doing correlation tracking, where to search in the current frame for
+ // the pattern from the reference frame, in absolute frame coordinates.
+ Region search_region;
+
+ // For tracked and matched markers, indicates what the reference was.
+ int reference_clip;
+ int reference_frame;
+
+ // Model related information for non-point tracks.
+ //
+ // Some tracks are on a larger object, such as a plane or a line or perhaps
+ // another primitive (a rectangular prisim). This captures the information
+ // needed to say that for example a collection of markers belongs to model #2
+ // (and model #2 is a plane).
+ enum ModelType {
+ POINT,
+ PLANE,
+ LINE,
+ CUBE
+ };
+ ModelType model_type;
+
+ // The model ID this track (e.g. the second model, which is a plane).
+ int model_id;
+
+ // TODO(keir): Add a "int model_argument" to capture that e.g. a marker is on
+ // the 3rd face of a cube.
+
+ enum Channel {
+ CHANNEL_R = (1 << 0),
+ CHANNEL_G = (1 << 1),
+ CHANNEL_B = (1 << 2),
+ };
+
+ // Channels from the original frame which this marker is unable to see.
+ int disabled_channels;
+
+ // Offset everything (center, patch, search) by the given delta.
+ template<typename T>
+ void Offset(const T& offset) {
+ center += offset.template cast<float>();
+ patch.coordinates.rowwise() += offset.template cast<int>();
+ search_region.Offset(offset);
+ }
+
+ // Shift the center to the given new position (and patch, search).
+ template<typename T>
+ void SetPosition(const T& new_center) {
+ Offset(new_center - center);
+ }
+};
+
+inline std::ostream& operator<<(std::ostream& out, const Marker& marker) {
+ out << "{"
+ << marker.clip << ", "
+ << marker.frame << ", "
+ << marker.track << ", ("
+ << marker.center.x() << ", "
+ << marker.center.y() << ")"
+ << "}";
+ return out;
+}
+
+} // namespace mv
+
+#endif // LIBMV_AUTOTRACK_MARKER_H_
diff --git a/intern/libmv/libmv/autotrack/model.h b/intern/libmv/libmv/autotrack/model.h
new file mode 100644
index 00000000000..1165281cdac
--- /dev/null
+++ b/intern/libmv/libmv/autotrack/model.h
@@ -0,0 +1,44 @@
+// Copyright (c) 2014 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+//
+// Author: mierle@gmail.com (Keir Mierle)
+
+#ifndef LIBMV_AUTOTRACK_MODEL_H_
+#define LIBMV_AUTOTRACK_MODEL_H_
+
+#include "libmv/numeric/numeric.h"
+#include "libmv/autotrack/quad.h"
+
+namespace mv {
+
+struct Model {
+ enum ModelType {
+ POINT,
+ PLANE,
+ LINE,
+ CUBE
+ };
+
+ // ???
+};
+
+} // namespace mv
+
+#endif // LIBMV_AUTOTRACK_MODEL_H_
diff --git a/intern/libmv/libmv/autotrack/predict_tracks.cc b/intern/libmv/libmv/autotrack/predict_tracks.cc
new file mode 100644
index 00000000000..adc986a0033
--- /dev/null
+++ b/intern/libmv/libmv/autotrack/predict_tracks.cc
@@ -0,0 +1,316 @@
+// Copyright (c) 2014 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+//
+// Author: mierle@gmail.com (Keir Mierle)
+
+#include "libmv/autotrack/marker.h"
+#include "libmv/autotrack/predict_tracks.h"
+#include "libmv/autotrack/tracks.h"
+#include "libmv/base/vector.h"
+#include "libmv/logging/logging.h"
+#include "libmv/tracking/kalman_filter.h"
+
+namespace mv {
+
+namespace {
+
+using libmv::vector;
+using libmv::Vec2;
+
+// Implied time delta between steps. Set empirically by tweaking and seeing
+// what numbers did best at prediction.
+const double dt = 3.8;
+
+// State transition matrix.
+
+// The states for predicting a track are as follows:
+//
+// 0 - X position
+// 1 - X velocity
+// 2 - X acceleration
+// 3 - Y position
+// 4 - Y velocity
+// 5 - Y acceleration
+//
+// Note that in the velocity-only state transition matrix, the acceleration
+// component is ignored; so technically the system could be modelled with only
+// 4 states instead of 6. For ease of implementation, this keeps order 6.
+
+// Choose one or the other model from below (velocity or acceleration).
+
+// For a typical system having constant velocity. This gives smooth-appearing
+// predictions, but they are not always as accurate.
+const double velocity_state_transition_data[] = {
+ 1, dt, 0, 0, 0, 0,
+ 0, 1, 0, 0, 0, 0,
+ 0, 0, 1, 0, 0, 0,
+ 0, 0, 0, 1, dt, 0,
+ 0, 0, 0, 0, 1, 0,
+ 0, 0, 0, 0, 0, 1
+};
+
+// This 3rd-order system also models acceleration. This makes for "jerky"
+// predictions, but that tend to be more accurate.
+const double acceleration_state_transition_data[] = {
+ 1, dt, dt*dt/2, 0, 0, 0,
+ 0, 1, dt, 0, 0, 0,
+ 0, 0, 1, 0, 0, 0,
+ 0, 0, 0, 1, dt, dt*dt/2,
+ 0, 0, 0, 0, 1, dt,
+ 0, 0, 0, 0, 0, 1
+};
+
+// This system (attempts) to add an angular velocity component. However, it's
+// total junk.
+const double angular_state_transition_data[] = {
+ 1, dt, -dt, 0, 0, 0, // Position x
+ 0, 1, 0, 0, 0, 0, // Velocity x
+ 0, 0, 1, 0, 0, 0, // Angular momentum
+ 0, 0, dt, 1, dt, 0, // Position y
+ 0, 0, 0, 0, 1, 0, // Velocity y
+ 0, 0, 0, 0, 0, 1 // Ignored
+};
+
+const double* state_transition_data = velocity_state_transition_data;
+
+// Observation matrix.
+const double observation_data[] = {
+ 1., 0., 0., 0., 0., 0.,
+ 0., 0., 0., 1., 0., 0.
+};
+
+// Process covariance.
+const double process_covariance_data[] = {
+ 35, 0, 0, 0, 0, 0,
+ 0, 5, 0, 0, 0, 0,
+ 0, 0, 5, 0, 0, 0,
+ 0, 0, 0, 35, 0, 0,
+ 0, 0, 0, 0, 5, 0,
+ 0, 0, 0, 0, 0, 5
+};
+
+// Process covariance.
+const double measurement_covariance_data[] = {
+ 0.01, 0.00,
+ 0.00, 0.01,
+};
+
+// Initial covariance.
+const double initial_covariance_data[] = {
+ 10, 0, 0, 0, 0, 0,
+ 0, 1, 0, 0, 0, 0,
+ 0, 0, 1, 0, 0, 0,
+ 0, 0, 0, 10, 0, 0,
+ 0, 0, 0, 0, 1, 0,
+ 0, 0, 0, 0, 0, 1
+};
+
+typedef mv::KalmanFilter<double, 6, 2> TrackerKalman;
+
+TrackerKalman filter(state_transition_data,
+ observation_data,
+ process_covariance_data,
+ measurement_covariance_data);
+
+bool OrderByFrameLessThan(const Marker* a, const Marker* b) {
+ if (a->frame == b->frame) {
+ if (a->clip == b->clip) {
+ return a->track < b->track;
+ }
+ return a->clip < b->clip;
+ }
+ return a->frame < b-> frame;
+}
+
+// Predicted must be after the previous markers (in the frame numbering sense).
+void RunPrediction(const vector<Marker*> previous_markers,
+ Marker* predicted_marker) {
+ TrackerKalman::State state;
+ state.mean << previous_markers[0]->center.x(), 0, 0,
+ previous_markers[0]->center.y(), 0, 0;
+ state.covariance = Eigen::Matrix<double, 6, 6, Eigen::RowMajor>(
+ initial_covariance_data);
+
+ int current_frame = previous_markers[0]->frame;
+ int target_frame = predicted_marker->frame;
+
+ bool predict_forward = current_frame < target_frame;
+ int frame_delta = predict_forward ? 1 : -1;
+
+ for (int i = 1; i < previous_markers.size(); ++i) {
+ // Step forward predicting the state until it is on the current marker.
+ int predictions = 0;
+ for (;
+ current_frame != previous_markers[i]->frame;
+ current_frame += frame_delta) {
+ filter.Step(&state);
+ predictions++;
+ LG << "Predicted point (frame " << current_frame << "): "
+ << state.mean(0) << ", " << state.mean(3);
+ }
+ // Log the error -- not actually used, but interesting.
+ Vec2 error = previous_markers[i]->center.cast<double>() -
+ Vec2(state.mean(0), state.mean(3));
+ LG << "Prediction error for " << predictions << " steps: ("
+ << error.x() << ", " << error.y() << "); norm: " << error.norm();
+ // Now that the state is predicted in the current frame, update the state
+ // based on the measurement from the current frame.
+ filter.Update(previous_markers[i]->center.cast<double>(),
+ Eigen::Matrix<double, 2, 2, Eigen::RowMajor>(
+ measurement_covariance_data),
+ &state);
+ LG << "Updated point: " << state.mean(0) << ", " << state.mean(3);
+ }
+ // At this point as all the prediction that's possible is done. Finally
+ // predict until the target frame.
+ for (; current_frame != target_frame; current_frame += frame_delta) {
+ filter.Step(&state);
+ LG << "Final predicted point (frame " << current_frame << "): "
+ << state.mean(0) << ", " << state.mean(3);
+ }
+
+ // The x and y positions are at 0 and 3; ignore acceleration and velocity.
+ predicted_marker->center.x() = state.mean(0);
+ predicted_marker->center.y() = state.mean(3);
+
+ // Take the patch from the last marker then shift it to match the prediction.
+ const Marker& last_marker = *previous_markers[previous_markers.size() - 1];
+ predicted_marker->patch = last_marker.patch;
+ Vec2f delta = predicted_marker->center - last_marker.center;
+ for (int i = 0; i < 4; ++i) {
+ predicted_marker->patch.coordinates.row(i) += delta;
+ }
+
+ // Alter the search area as well so it always corresponds to the center.
+ predicted_marker->search_region = last_marker.search_region;
+ predicted_marker->search_region.Offset(delta);
+}
+
+} // namespace
+
+bool PredictMarkerPosition(const Tracks& tracks, Marker* marker) {
+ // Get all markers for this clip and track.
+ vector<Marker> markers;
+ tracks.GetMarkersForTrackInClip(marker->clip, marker->track, &markers);
+
+ if (markers.empty()) {
+ LG << "No markers to predict from for " << *marker;
+ return false;
+ }
+
+ // Order the markers by frame within the clip.
+ vector<Marker*> boxed_markers(markers.size());
+ for (int i = 0; i < markers.size(); ++i) {
+ boxed_markers[i] = &markers[i];
+ }
+ std::sort(boxed_markers.begin(), boxed_markers.end(), OrderByFrameLessThan);
+
+ // Find the insertion point for this marker among the returned ones.
+ int insert_at = -1; // If we find the exact frame
+ int insert_before = -1; // Otherwise...
+ for (int i = 0; i < boxed_markers.size(); ++i) {
+ if (boxed_markers[i]->frame == marker->frame) {
+ insert_at = i;
+ break;
+ }
+ if (boxed_markers[i]->frame > marker->frame) {
+ insert_before = i;
+ break;
+ }
+ }
+
+ // Forward starts at the marker or insertion point, and goes forward.
+ int forward_scan_begin, forward_scan_end;
+
+ // Backward scan starts at the marker or insertion point, and goes backward.
+ int backward_scan_begin, backward_scan_end;
+
+ // Determine the scanning ranges.
+ if (insert_at == -1 && insert_before == -1) {
+ // Didn't find an insertion point except the end.
+ forward_scan_begin = forward_scan_end = 0;
+ backward_scan_begin = markers.size() - 1;
+ backward_scan_end = 0;
+ } else if (insert_at != -1) {
+ // Found existing marker; scan before and after it.
+ forward_scan_begin = insert_at + 1;
+ forward_scan_end = markers.size() - 1;;
+ backward_scan_begin = insert_at - 1;
+ backward_scan_end = 0;
+ } else {
+ // Didn't find existing marker but found an insertion point.
+ forward_scan_begin = insert_before;
+ forward_scan_end = markers.size() - 1;;
+ backward_scan_begin = insert_before - 1;
+ backward_scan_end = 0;
+ }
+
+ const int num_consecutive_needed = 2;
+
+ if (forward_scan_begin <= forward_scan_end &&
+ forward_scan_end - forward_scan_begin > num_consecutive_needed) {
+ // TODO(keir): Finish this.
+ }
+
+ bool predict_forward = false;
+ if (backward_scan_end <= backward_scan_begin) {
+ // TODO(keir): Add smarter handling and detecting of consecutive frames!
+ predict_forward = true;
+ }
+
+ const int max_frames_to_predict_from = 20;
+ if (predict_forward) {
+ if (backward_scan_begin - backward_scan_end < num_consecutive_needed) {
+ // Not enough information to do a prediction.
+ LG << "Predicting forward impossible, not enough information";
+ return false;
+ }
+ LG << "Predicting forward";
+ int predict_begin =
+ std::max(backward_scan_begin - max_frames_to_predict_from, 0);
+ int predict_end = backward_scan_begin;
+ vector<Marker*> previous_markers;
+ for (int i = predict_begin; i <= predict_end; ++i) {
+ previous_markers.push_back(boxed_markers[i]);
+ }
+ RunPrediction(previous_markers, marker);
+ return true;
+ } else {
+ if (forward_scan_end - forward_scan_begin < num_consecutive_needed) {
+ // Not enough information to do a prediction.
+ LG << "Predicting backward impossible, not enough information";
+ return false;
+ }
+ LG << "Predicting backward";
+ int predict_begin =
+ std::min(forward_scan_begin + max_frames_to_predict_from,
+ forward_scan_end);
+ int predict_end = forward_scan_begin;
+ vector<Marker*> previous_markers;
+ for (int i = predict_begin; i >= predict_end; --i) {
+ previous_markers.push_back(boxed_markers[i]);
+ }
+ RunPrediction(previous_markers, marker);
+ return false;
+ }
+
+}
+
+} // namespace mv
diff --git a/intern/libmv/libmv/autotrack/predict_tracks.h b/intern/libmv/libmv/autotrack/predict_tracks.h
new file mode 100644
index 00000000000..0a176d08378
--- /dev/null
+++ b/intern/libmv/libmv/autotrack/predict_tracks.h
@@ -0,0 +1,37 @@
+// Copyright (c) 2014 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+//
+// Author: mierle@gmail.com (Keir Mierle)
+
+#ifndef LIBMV_AUTOTRACK_PREDICT_TRACKS_H_
+#define LIBMV_AUTOTRACK_PREDICT_TRACKS_H_
+
+namespace mv {
+
+class Tracks;
+struct Marker;
+
+// Predict the position of the given marker, and update it accordingly. The
+// existing position will be overwritten.
+bool PredictMarkerPosition(const Tracks& tracks, Marker* marker);
+
+} // namespace mv
+
+#endif // LIBMV_AUTOTRACK_PREDICT_TRACKS_H_
diff --git a/intern/libmv/libmv/autotrack/predict_tracks_test.cc b/intern/libmv/libmv/autotrack/predict_tracks_test.cc
new file mode 100644
index 00000000000..fc90e260d94
--- /dev/null
+++ b/intern/libmv/libmv/autotrack/predict_tracks_test.cc
@@ -0,0 +1,201 @@
+// Copyright (c) 2014 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+//
+// Author: mierle@gmail.com (Keir Mierle)
+
+#include "libmv/autotrack/predict_tracks.h"
+
+#include "libmv/autotrack/marker.h"
+#include "libmv/autotrack/tracks.h"
+#include "libmv/logging/logging.h"
+#include "testing/testing.h"
+
+namespace mv {
+
+void AddMarker(int frame, float x, float y, Tracks* tracks) {
+ Marker marker;
+ marker.clip = marker.track = 0;
+ marker.frame = frame;
+ marker.center.x() = x;
+ marker.center.y() = y;
+ marker.patch.coordinates << x - 1, y - 1,
+ x + 1, y - 1,
+ x + 1, y + 1,
+ x - 1, y + 1;
+ tracks->AddMarker(marker);
+}
+
+TEST(PredictMarkerPosition, EasyLinearMotion) {
+ Tracks tracks;
+ AddMarker(0, 1.0, 0.0, &tracks);
+ AddMarker(1, 2.0, 5.0, &tracks);
+ AddMarker(2, 3.0, 10.0, &tracks);
+ AddMarker(3, 4.0, 15.0, &tracks);
+ AddMarker(4, 5.0, 20.0, &tracks);
+ AddMarker(5, 6.0, 25.0, &tracks);
+ AddMarker(6, 7.0, 30.0, &tracks);
+ AddMarker(7, 8.0, 35.0, &tracks);
+
+ Marker predicted;
+ predicted.clip = 0;
+ predicted.track = 0;
+ predicted.frame = 8;
+
+ PredictMarkerPosition(tracks, &predicted);
+ double error = (libmv::Vec2f(9.0, 40.0) - predicted.center).norm();
+ LG << "Got error: " << error;
+ EXPECT_LT(error, 0.1);
+
+ // Check the patch coordinates as well.
+ double x = 9, y = 40.0;
+ Quad2Df expected_patch;
+ expected_patch.coordinates << x - 1, y - 1,
+ x + 1, y - 1,
+ x + 1, y + 1,
+ x - 1, y + 1;
+
+ error = (expected_patch.coordinates - predicted.patch.coordinates).norm();
+ LG << "Patch error: " << error;
+ EXPECT_LT(error, 0.1);
+}
+
+TEST(PredictMarkerPosition, EasyBackwardLinearMotion) {
+ Tracks tracks;
+ AddMarker(8, 1.0, 0.0, &tracks);
+ AddMarker(7, 2.0, 5.0, &tracks);
+ AddMarker(6, 3.0, 10.0, &tracks);
+ AddMarker(5, 4.0, 15.0, &tracks);
+ AddMarker(4, 5.0, 20.0, &tracks);
+ AddMarker(3, 6.0, 25.0, &tracks);
+ AddMarker(2, 7.0, 30.0, &tracks);
+ AddMarker(1, 8.0, 35.0, &tracks);
+
+ Marker predicted;
+ predicted.clip = 0;
+ predicted.track = 0;
+ predicted.frame = 0;
+
+ PredictMarkerPosition(tracks, &predicted);
+ LG << predicted;
+ double error = (libmv::Vec2f(9.0, 40.0) - predicted.center).norm();
+ LG << "Got error: " << error;
+ EXPECT_LT(error, 0.1);
+
+ // Check the patch coordinates as well.
+ double x = 9.0, y = 40.0;
+ Quad2Df expected_patch;
+ expected_patch.coordinates << x - 1, y - 1,
+ x + 1, y - 1,
+ x + 1, y + 1,
+ x - 1, y + 1;
+
+ error = (expected_patch.coordinates - predicted.patch.coordinates).norm();
+ LG << "Patch error: " << error;
+ EXPECT_LT(error, 0.1);
+}
+
+TEST(PredictMarkerPosition, TwoFrameGap) {
+ Tracks tracks;
+ AddMarker(0, 1.0, 0.0, &tracks);
+ AddMarker(1, 2.0, 5.0, &tracks);
+ AddMarker(2, 3.0, 10.0, &tracks);
+ AddMarker(3, 4.0, 15.0, &tracks);
+ AddMarker(4, 5.0, 20.0, &tracks);
+ AddMarker(5, 6.0, 25.0, &tracks);
+ AddMarker(6, 7.0, 30.0, &tracks);
+ // Missing frame 7!
+
+ Marker predicted;
+ predicted.clip = 0;
+ predicted.track = 0;
+ predicted.frame = 8;
+
+ PredictMarkerPosition(tracks, &predicted);
+ double error = (libmv::Vec2f(9.0, 40.0) - predicted.center).norm();
+ LG << "Got error: " << error;
+ EXPECT_LT(error, 0.1);
+}
+
+TEST(PredictMarkerPosition, FourFrameGap) {
+ Tracks tracks;
+ AddMarker(0, 1.0, 0.0, &tracks);
+ AddMarker(1, 2.0, 5.0, &tracks);
+ AddMarker(2, 3.0, 10.0, &tracks);
+ AddMarker(3, 4.0, 15.0, &tracks);
+ // Missing frames 4, 5, 6, 7.
+
+ Marker predicted;
+ predicted.clip = 0;
+ predicted.track = 0;
+ predicted.frame = 8;
+
+ PredictMarkerPosition(tracks, &predicted);
+ double error = (libmv::Vec2f(9.0, 40.0) - predicted.center).norm();
+ LG << "Got error: " << error;
+ EXPECT_LT(error, 2.0); // Generous error due to larger prediction window.
+}
+
+TEST(PredictMarkerPosition, MultipleGaps) {
+ Tracks tracks;
+ AddMarker(0, 1.0, 0.0, &tracks);
+ AddMarker(1, 2.0, 5.0, &tracks);
+ AddMarker(2, 3.0, 10.0, &tracks);
+ // AddMarker(3, 4.0, 15.0, &tracks); // Note the 3-frame gap.
+ // AddMarker(4, 5.0, 20.0, &tracks);
+ // AddMarker(5, 6.0, 25.0, &tracks);
+ AddMarker(6, 7.0, 30.0, &tracks); // Intermediate measurement.
+ // AddMarker(7, 8.0, 35.0, &tracks);
+
+ Marker predicted;
+ predicted.clip = 0;
+ predicted.track = 0;
+ predicted.frame = 8;
+
+ PredictMarkerPosition(tracks, &predicted);
+ double error = (libmv::Vec2f(9.0, 40.0) - predicted.center).norm();
+ LG << "Got error: " << error;
+ EXPECT_LT(error, 1.0); // Generous error due to larger prediction window.
+}
+
+TEST(PredictMarkerPosition, MarkersInRandomOrder) {
+ Tracks tracks;
+
+ // This is the same as the easy, except that the tracks are randomly ordered.
+ AddMarker(0, 1.0, 0.0, &tracks);
+ AddMarker(2, 3.0, 10.0, &tracks);
+ AddMarker(7, 8.0, 35.0, &tracks);
+ AddMarker(5, 6.0, 25.0, &tracks);
+ AddMarker(4, 5.0, 20.0, &tracks);
+ AddMarker(3, 4.0, 15.0, &tracks);
+ AddMarker(6, 7.0, 30.0, &tracks);
+ AddMarker(1, 2.0, 5.0, &tracks);
+
+ Marker predicted;
+ predicted.clip = 0;
+ predicted.track = 0;
+ predicted.frame = 8;
+
+ PredictMarkerPosition(tracks, &predicted);
+ double error = (libmv::Vec2f(9.0, 40.0) - predicted.center).norm();
+ LG << "Got error: " << error;
+ EXPECT_LT(error, 0.1);
+}
+
+} // namespace mv
diff --git a/intern/libmv/libmv/autotrack/quad.h b/intern/libmv/libmv/autotrack/quad.h
new file mode 100644
index 00000000000..0c70f9882da
--- /dev/null
+++ b/intern/libmv/libmv/autotrack/quad.h
@@ -0,0 +1,57 @@
+// Copyright (c) 2014 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+//
+// Author: mierle@gmail.com (Keir Mierle)
+
+#ifndef LIBMV_AUTOTRACK_QUAD_H_
+#define LIBMV_AUTOTRACK_QUAD_H_
+
+#include <Eigen/Core>
+
+namespace mv {
+
+template<typename T, int D>
+struct Quad {
+ // A quad is 4 points; generally in 2D or 3D.
+ //
+ // +----------> x
+ // |\.
+ // | \.
+ // | z (z goes into screen)
+ // |
+ // | r0----->r1
+ // | ^ |
+ // | | . |
+ // | | V
+ // | r3<-----r2
+ // | \.
+ // | \.
+ // v normal goes away (right handed).
+ // y
+ //
+ // Each row is one of the corners coordinates; either (x, y) or (x, y, z).
+ Eigen::Matrix<T, 4, D> coordinates;
+};
+
+typedef Quad<float, 2> Quad2Df;
+
+} // namespace mv
+
+#endif // LIBMV_AUTOTRACK_QUAD_H_
diff --git a/intern/libmv/libmv/autotrack/reconstruction.h b/intern/libmv/libmv/autotrack/reconstruction.h
new file mode 100644
index 00000000000..e1d4e882cbd
--- /dev/null
+++ b/intern/libmv/libmv/autotrack/reconstruction.h
@@ -0,0 +1,89 @@
+// Copyright (c) 2014 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+//
+// Author: mierle@gmail.com (Keir Mierle)
+
+#ifndef LIBMV_AUTOTRACK_RECONSTRUCTION_H_
+#define LIBMV_AUTOTRACK_RECONSTRUCTION_H_
+
+#include "libmv/base/vector.h"
+#include "libmv/numeric/numeric.h"
+#include "libmv/simple_pipeline/camera_intrinsics.h"
+
+namespace mv {
+
+using libmv::CameraIntrinsics;
+using libmv::vector;
+
+class Model;
+
+class CameraPose {
+ int clip;
+ int frame;
+ int intrinsics;
+ Mat3 R;
+ Vec3 t;
+};
+
+class Point {
+ int track;
+
+ // The coordinates of the point. Note that not all coordinates are always
+ // used; for example points on a plane only use the first two coordinates.
+ Vec3 X;
+};
+
+// A reconstruction for a set of tracks. The indexing for clip, frame, and
+// track should match that of a Tracs object, stored elsewhere.
+class Reconstruction {
+ public:
+ // All methods copy their input reference or take ownership of the pointer.
+ void AddCameraPose(const CameraPose& pose);
+ int AddCameraIntrinsics(CameraIntrinsics* intrinsics);
+ int AddPoint(const Point& point);
+ int AddModel(Model* model);
+
+ // Returns the corresponding pose or point or NULL if missing.
+ CameraPose* CameraPoseForFrame(int clip, int frame);
+ const CameraPose* CameraPoseForFrame(int clip, int frame) const;
+ Point* PointForTrack(int track);
+ const Point* PointForTrack(int track) const;
+
+ const vector<vector<CameraPose> >& camera_poses() const {
+ return camera_poses_;
+ }
+
+ private:
+ // Indexed by CameraPose::intrinsics. Owns the intrinsics objects.
+ vector<CameraIntrinsics*> camera_intrinsics_;
+
+ // Indexed by Marker::clip then by Marker::frame.
+ vector<vector<CameraPose> > camera_poses_;
+
+ // Indexed by Marker::track.
+ vector<Point> points_;
+
+ // Indexed by Marker::model_id. Owns model objects.
+ vector<Model*> models_;
+};
+
+} // namespace mv
+
+#endif // LIBMV_AUTOTRACK_RECONSTRUCTION_H_
diff --git a/intern/libmv/libmv/autotrack/region.h b/intern/libmv/libmv/autotrack/region.h
new file mode 100644
index 00000000000..b35d99eb60d
--- /dev/null
+++ b/intern/libmv/libmv/autotrack/region.h
@@ -0,0 +1,67 @@
+// Copyright (c) 2014 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+//
+// Author: mierle@gmail.com (Keir Mierle)
+
+#ifndef LIBMV_AUTOTRACK_REGION_H_
+#define LIBMV_AUTOTRACK_REGION_H_
+
+#include "libmv/numeric/numeric.h"
+
+namespace mv {
+
+using libmv::Vec2f;
+
+// A region is a bounding box within an image.
+//
+// +----------> x
+// |
+// | (min.x, min.y) (max.x, min.y)
+// | +-------------------------+
+// | | |
+// | | |
+// | | |
+// | +-------------------------+
+// v (min.x, max.y) (max.x, max.y)
+// y
+//
+struct Region {
+ Vec2f min;
+ Vec2f max;
+
+ template<typename T>
+ void Offset(const T& offset) {
+ min += offset.template cast<float>();
+ max += offset.template cast<float>();
+ }
+
+ Region Rounded() const {
+ Region result;
+ result.min(0) = ceil(this->min(0));
+ result.min(1) = ceil(this->min(1));
+ result.max(0) = ceil(this->max(0));
+ result.max(1) = ceil(this->max(1));
+ return result;
+ }
+};
+
+} // namespace mv
+
+#endif // LIBMV_AUTOTRACK_REGION_H_
diff --git a/intern/libmv/libmv/autotrack/tracks.cc b/intern/libmv/libmv/autotrack/tracks.cc
new file mode 100644
index 00000000000..174f264f3f2
--- /dev/null
+++ b/intern/libmv/libmv/autotrack/tracks.cc
@@ -0,0 +1,193 @@
+// Copyright (c) 2014 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+//
+// Author: mierle@gmail.com (Keir Mierle)
+
+#include "libmv/autotrack/tracks.h"
+
+#include <algorithm>
+#include <vector>
+#include <iterator>
+
+#include "libmv/numeric/numeric.h"
+
+namespace mv {
+
+Tracks::Tracks(const Tracks& other) {
+ markers_ = other.markers_;
+}
+
+Tracks::Tracks(const vector<Marker>& markers) : markers_(markers) {}
+
+bool Tracks::GetMarker(int clip, int frame, int track, Marker* marker) const {
+ for (int i = 0; i < markers_.size(); ++i) {
+ if (markers_[i].clip == clip &&
+ markers_[i].frame == frame &&
+ markers_[i].track == track) {
+ *marker = markers_[i];
+ return true;
+ }
+ }
+ return false;
+}
+
+void Tracks::GetMarkersForTrack(int track, vector<Marker>* markers) const {
+ for (int i = 0; i < markers_.size(); ++i) {
+ if (track == markers_[i].track) {
+ markers->push_back(markers_[i]);
+ }
+ }
+}
+
+void Tracks::GetMarkersForTrackInClip(int clip,
+ int track,
+ vector<Marker>* markers) const {
+ for (int i = 0; i < markers_.size(); ++i) {
+ if (clip == markers_[i].clip &&
+ track == markers_[i].track) {
+ markers->push_back(markers_[i]);
+ }
+ }
+}
+
+void Tracks::GetMarkersInFrame(int clip,
+ int frame,
+ vector<Marker>* markers) const {
+ for (int i = 0; i < markers_.size(); ++i) {
+ if (markers_[i].clip == clip &&
+ markers_[i].frame == frame) {
+ markers->push_back(markers_[i]);
+ }
+ }
+}
+
+void Tracks::GetMarkersForTracksInBothImages(int clip1, int frame1,
+ int clip2, int frame2,
+ vector<Marker>* markers) const {
+ std::vector<int> image1_tracks;
+ std::vector<int> image2_tracks;
+
+ // Collect the tracks in each of the two images.
+ for (int i = 0; i < markers_.size(); ++i) {
+ int clip = markers_[i].clip;
+ int frame = markers_[i].frame;
+ if (clip == clip1 && frame == frame1) {
+ image1_tracks.push_back(markers_[i].track);
+ } else if (clip == clip2 && frame == frame2) {
+ image2_tracks.push_back(markers_[i].track);
+ }
+ }
+
+ // Intersect the two sets to find the tracks of interest.
+ std::sort(image1_tracks.begin(), image1_tracks.end());
+ std::sort(image2_tracks.begin(), image2_tracks.end());
+ std::vector<int> intersection;
+ std::set_intersection(image1_tracks.begin(), image1_tracks.end(),
+ image2_tracks.begin(), image2_tracks.end(),
+ std::back_inserter(intersection));
+
+ // Scan through and get the relevant tracks from the two images.
+ for (int i = 0; i < markers_.size(); ++i) {
+ // Save markers that are in either frame and are in our candidate set.
+ if (((markers_[i].clip == clip1 &&
+ markers_[i].frame == frame1) ||
+ (markers_[i].clip == clip2 &&
+ markers_[i].frame == frame2)) &&
+ std::binary_search(intersection.begin(),
+ intersection.end(),
+ markers_[i].track)) {
+ markers->push_back(markers_[i]);
+ }
+ }
+}
+
+void Tracks::AddMarker(const Marker& marker) {
+ // TODO(keir): This is quadratic for repeated insertions. Fix this by adding
+ // a smarter data structure like a set<>.
+ for (int i = 0; i < markers_.size(); ++i) {
+ if (markers_[i].clip == marker.clip &&
+ markers_[i].frame == marker.frame &&
+ markers_[i].track == marker.track) {
+ markers_[i] = marker;
+ return;
+ }
+ }
+ markers_.push_back(marker);
+}
+
+void Tracks::SetMarkers(vector<Marker>* markers) {
+ std::swap(markers_, *markers);
+}
+
+bool Tracks::RemoveMarker(int clip, int frame, int track) {
+ int size = markers_.size();
+ for (int i = 0; i < markers_.size(); ++i) {
+ if (markers_[i].clip == clip &&
+ markers_[i].frame == frame &&
+ markers_[i].track == track) {
+ markers_[i] = markers_[size - 1];
+ markers_.resize(size - 1);
+ return true;
+ }
+ }
+ return false;
+}
+
+void Tracks::RemoveMarkersForTrack(int track) {
+ int size = 0;
+ for (int i = 0; i < markers_.size(); ++i) {
+ if (markers_[i].track != track) {
+ markers_[size++] = markers_[i];
+ }
+ }
+ markers_.resize(size);
+}
+
+int Tracks::MaxClip() const {
+ int max_clip = 0;
+ for (int i = 0; i < markers_.size(); ++i) {
+ max_clip = std::max(markers_[i].clip, max_clip);
+ }
+ return max_clip;
+}
+
+int Tracks::MaxFrame(int clip) const {
+ int max_frame = 0;
+ for (int i = 0; i < markers_.size(); ++i) {
+ if (markers_[i].clip == clip) {
+ max_frame = std::max(markers_[i].frame, max_frame);
+ }
+ }
+ return max_frame;
+}
+
+int Tracks::MaxTrack() const {
+ int max_track = 0;
+ for (int i = 0; i < markers_.size(); ++i) {
+ max_track = std::max(markers_[i].track, max_track);
+ }
+ return max_track;
+}
+
+int Tracks::NumMarkers() const {
+ return markers_.size();
+}
+
+} // namespace mv
diff --git a/intern/libmv/libmv/autotrack/tracks.h b/intern/libmv/libmv/autotrack/tracks.h
new file mode 100644
index 00000000000..0b7de91d211
--- /dev/null
+++ b/intern/libmv/libmv/autotrack/tracks.h
@@ -0,0 +1,82 @@
+// Copyright (c) 2014 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+//
+// Author: mierle@gmail.com (Keir Mierle)
+
+#ifndef LIBMV_AUTOTRACK_TRACKS_H_
+#define LIBMV_AUTOTRACK_TRACKS_H_
+
+#include "libmv/base/vector.h"
+#include "libmv/autotrack/marker.h"
+
+namespace mv {
+
+using libmv::vector;
+
+// The Tracks container stores correspondences between frames.
+class Tracks {
+ public:
+ Tracks() { }
+ Tracks(const Tracks &other);
+
+ // Create a tracks object with markers already initialized. Copies markers.
+ explicit Tracks(const vector<Marker>& markers);
+
+ // All getters append to the output argument vector.
+ bool GetMarker(int clip, int frame, int track, Marker* marker) const;
+ void GetMarkersForTrack(int track, vector<Marker>* markers) const;
+ void GetMarkersForTrackInClip(int clip,
+ int track,
+ vector<Marker>* markers) const;
+ void GetMarkersInFrame(int clip, int frame, vector<Marker>* markers) const;
+
+ // Get the markers in frame1 and frame2 which have a common track.
+ //
+ // This is not the same as the union of the markers in frame1 and
+ // frame2; each marker is for a track that appears in both images.
+ void GetMarkersForTracksInBothImages(int clip1, int frame1,
+ int clip2, int frame2,
+ vector<Marker>* markers) const;
+
+ void AddMarker(const Marker& marker);
+
+ // Moves the contents of *markers over top of the existing markers. This
+ // destroys *markers in the process (but avoids copies).
+ void SetMarkers(vector<Marker>* markers);
+ bool RemoveMarker(int clip, int frame, int track);
+ void RemoveMarkersForTrack(int track);
+
+ int MaxClip() const;
+ int MaxFrame(int clip) const;
+ int MaxTrack() const;
+ int NumMarkers() const;
+
+ const vector<Marker>& markers() const { return markers_; }
+
+ private:
+ vector<Marker> markers_;
+
+ // TODO(keir): Consider adding access-map data structures to avoid all the
+ // linear lookup penalties for the accessors.
+};
+
+} // namespace mv
+
+#endif // LIBMV_AUTOTRACK_TRACKS_H_
diff --git a/intern/libmv/libmv/autotrack/tracks_test.cc b/intern/libmv/libmv/autotrack/tracks_test.cc
new file mode 100644
index 00000000000..028b4a10913
--- /dev/null
+++ b/intern/libmv/libmv/autotrack/tracks_test.cc
@@ -0,0 +1,52 @@
+// Copyright (c) 2014 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+//
+// Author: mierle@gmail.com (Keir Mierle)
+
+#include "libmv/autotrack/tracks.h"
+
+#include "testing/testing.h"
+#include "libmv/logging/logging.h"
+
+namespace mv {
+
+TEST(Tracks, MaxFrame) {
+ Marker marker;
+ Tracks tracks;
+
+ // Add some markers to clip 0.
+ marker.clip = 0;
+ marker.frame = 1;
+ tracks.AddMarker(marker);
+
+ // Add some markers to clip 1.
+ marker.clip = 1;
+ marker.frame = 1;
+ tracks.AddMarker(marker);
+
+ marker.clip = 1;
+ marker.frame = 12;
+ tracks.AddMarker(marker);
+
+ EXPECT_EQ(1, tracks.MaxFrame(0));
+ EXPECT_EQ(12, tracks.MaxFrame(1));
+}
+
+} // namespace mv
diff --git a/intern/libmv/libmv/base/aligned_malloc.cc b/intern/libmv/libmv/base/aligned_malloc.cc
new file mode 100644
index 00000000000..9141186f196
--- /dev/null
+++ b/intern/libmv/libmv/base/aligned_malloc.cc
@@ -0,0 +1,74 @@
+// Copyright (c) 2014 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/base/aligned_malloc.h"
+#include "libmv/logging/logging.h"
+
+#if !defined(__APPLE__) && !defined(__FreeBSD__) && !defined(__NetBSD__)
+// Needed for memalign on Linux and _aligned_alloc on Windows.
+# ifdef FREE_WINDOWS
+/* make sure _aligned_malloc is included */
+# ifdef __MSVCRT_VERSION__
+# undef __MSVCRT_VERSION__
+# endif
+
+# define __MSVCRT_VERSION__ 0x0700
+# endif // FREE_WINDOWS
+
+# include <malloc.h>
+#else
+// Apple's malloc is 16-byte aligned, and does not have malloc.h, so include
+// stdilb instead.
+# include <cstdlib>
+#endif
+
+namespace libmv {
+
+void *aligned_malloc(int size, int alignment) {
+#ifdef _WIN32
+ return _aligned_malloc(size, alignment);
+#elif __APPLE__
+ // On Mac OS X, both the heap and the stack are guaranteed 16-byte aligned so
+ // they work natively with SSE types with no further work.
+ CHECK_EQ(alignment, 16);
+ return malloc(size);
+#elif defined(__FreeBSD__) || defined(__NetBSD__)
+ void *result;
+
+ if (posix_memalign(&result, alignment, size)) {
+ // non-zero means allocation error
+ // either no allocation or bad alignment value
+ return NULL;
+ }
+ return result;
+#else // This is for Linux.
+ return memalign(alignment, size);
+#endif
+}
+
+void aligned_free(void *ptr) {
+#ifdef _WIN32
+ _aligned_free(ptr);
+#else
+ free(ptr);
+#endif
+}
+
+} // namespace libmv
diff --git a/intern/libmv/libmv/base/aligned_malloc.h b/intern/libmv/libmv/base/aligned_malloc.h
new file mode 100644
index 00000000000..096ff6e2d7c
--- /dev/null
+++ b/intern/libmv/libmv/base/aligned_malloc.h
@@ -0,0 +1,34 @@
+// Copyright (c) 2014 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_BASE_ALIGNED_MALLOC_H_
+#define LIBMV_BASE_ALIGNED_MALLOC_H_
+
+namespace libmv {
+
+// Allocate block of size bytes at least aligned to a given value.
+void *aligned_malloc(int size, int alignment);
+
+// Free memory allocated by aligned_malloc.
+void aligned_free(void *ptr);
+
+} // namespace libmv
+
+#endif // LIBMV_BASE_ALIGNED_MALLOC_H_
diff --git a/intern/libmv/libmv/base/id_generator.h b/intern/libmv/libmv/base/id_generator.h
new file mode 100644
index 00000000000..bf1eafd218e
--- /dev/null
+++ b/intern/libmv/libmv/base/id_generator.h
@@ -0,0 +1,37 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_ID_GENERATOR_H
+#define LIBMV_ID_GENERATOR_H
+
+namespace libmv {
+
+template <typename ID>
+class IdGenerator {
+ public:
+ IdGenerator() : next_(0) {}
+ ID Generate() { return next_++; }
+ private:
+ ID next_;
+};
+
+} // namespace libmv
+
+#endif // LIBMV_ID_GENERATOR_H
diff --git a/intern/libmv/libmv/base/scoped_ptr.h b/intern/libmv/libmv/base/scoped_ptr.h
new file mode 100644
index 00000000000..b9cd4854213
--- /dev/null
+++ b/intern/libmv/libmv/base/scoped_ptr.h
@@ -0,0 +1,105 @@
+// Copyright (c) 2009 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_BASE_SCOPED_PTR_H
+#define LIBMV_BASE_SCOPED_PTR_H
+
+#include <cassert>
+#include <cstddef>
+
+namespace libmv {
+
+/**
+ * A handle for a heap-allocated resource that should be freed when it goes out
+ * of scope. This looks similar to the one found in TR1.
+ */
+template<typename T>
+class scoped_ptr {
+ public:
+ scoped_ptr(T *resource) : resource_(resource) {}
+ ~scoped_ptr() { reset(0); }
+
+ T *get() const { return resource_; }
+ T *operator->() const { return resource_; }
+ T &operator*() const { return *resource_; }
+
+ void reset(T *new_resource) {
+ if (sizeof(T)) {
+ delete resource_;
+ }
+ resource_ = new_resource;
+ }
+
+ T *release() {
+ T *released_resource = resource_;
+ resource_ = 0;
+ return released_resource;
+ }
+
+ private:
+ // No copying allowed.
+ T *resource_;
+};
+
+// Same as scoped_ptr but caller must allocate the data
+// with new[] and the destructor will free the memory
+// using delete[].
+template<typename T>
+class scoped_array {
+ public:
+ scoped_array(T *array) : array_(array) {}
+ ~scoped_array() { reset(NULL); }
+
+ T *get() const { return array_; }
+
+ T& operator[](std::ptrdiff_t i) const {
+ assert(i >= 0);
+ assert(array_ != NULL);
+ return array_[i];
+ }
+
+ void reset(T *new_array) {
+ if (sizeof(T)) {
+ delete array_;
+ }
+ array_ = new_array;
+ }
+
+ T *release() {
+ T *released_array = array_;
+ array_ = NULL;
+ return released_array;
+ }
+
+ private:
+ T *array_;
+
+ // Forbid comparison of different scoped_array types.
+ template <typename T2> bool operator==(scoped_array<T2> const& p2) const;
+ template <typename T2> bool operator!=(scoped_array<T2> const& p2) const;
+
+ // Disallow evil constructors
+ scoped_array(const scoped_array&);
+ void operator=(const scoped_array&);
+};
+
+} // namespace libmv
+
+#endif // LIBMV_BASE_SCOPED_PTR_H
diff --git a/intern/libmv/libmv/base/scoped_ptr_test.cc b/intern/libmv/libmv/base/scoped_ptr_test.cc
new file mode 100644
index 00000000000..ce1d56b500a
--- /dev/null
+++ b/intern/libmv/libmv/base/scoped_ptr_test.cc
@@ -0,0 +1,79 @@
+// Copyright (c) 2009 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/base/scoped_ptr.h"
+#include "testing/testing.h"
+
+namespace libmv {
+namespace {
+
+struct FreeMe {
+ FreeMe(int *freed) : freed(freed) {}
+ ~FreeMe() { (*freed)++; }
+ int *freed;
+};
+
+TEST(ScopedPtr, NullDoesNothing) {
+ scoped_ptr<FreeMe> x(NULL);
+ // Does nothing.
+}
+
+TEST(ScopedPtr, FreesWhenOutOfScope) {
+ int frees = 0;
+ {
+ scoped_ptr<FreeMe> scoped(new FreeMe(&frees));
+ EXPECT_EQ(0, frees);
+ }
+ EXPECT_EQ(1, frees);
+}
+
+TEST(ScopedPtr, Operators) {
+ int tag = 123;
+ scoped_ptr<FreeMe> scoped(new FreeMe(&tag));
+ EXPECT_EQ(123, *(scoped->freed));
+ EXPECT_EQ(123, *((*scoped).freed));
+}
+
+TEST(ScopedPtr, Reset) {
+ int frees = 0;
+ scoped_ptr<FreeMe> scoped(new FreeMe(&frees));
+ EXPECT_EQ(0, frees);
+ scoped.reset(new FreeMe(&frees));
+ EXPECT_EQ(1, frees);
+}
+
+TEST(ScopedPtr, ReleaseAndGet) {
+ int frees = 0;
+ FreeMe *allocated = new FreeMe(&frees);
+ FreeMe *released = NULL;
+ {
+ scoped_ptr<FreeMe> scoped(allocated);
+ EXPECT_EQ(0, frees);
+ EXPECT_EQ(allocated, scoped.get());
+ released = scoped.release();
+ EXPECT_EQ(0, frees);
+ EXPECT_EQ(released, allocated);
+ }
+ EXPECT_EQ(0, frees);
+ delete released;
+}
+
+} // namespace
+} // namespace libmv
diff --git a/intern/libmv/libmv/base/vector.h b/intern/libmv/libmv/base/vector.h
new file mode 100644
index 00000000000..1931fb0b1f9
--- /dev/null
+++ b/intern/libmv/libmv/base/vector.h
@@ -0,0 +1,176 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+//
+// Get an aligned vector implementation. Must be included before <vector>. The
+// Eigen guys went through some trouble to make a portable override for the
+// fixed size vector types.
+
+#ifndef LIBMV_BASE_VECTOR_H
+#define LIBMV_BASE_VECTOR_H
+
+#include <cstring>
+#include <new>
+
+#include <Eigen/Core>
+
+namespace libmv {
+
+// A simple container class, which guarantees 16 byte alignment needed for most
+// vectorization. Don't use this container for classes that cannot be copied
+// via memcpy.
+// FIXME: this class has some issues:
+// - doesn't support iterators.
+// - impede compatibility with code using STL.
+// - the STL already provide support for custom allocators
+// it could be replaced with a simple
+// template <T> class vector : std::vector<T, aligned_allocator> {} declaration
+// provided it doesn't break code relying on libmv::vector specific behavior
+template <typename T,
+ typename Allocator = Eigen::aligned_allocator<T> >
+class vector {
+ public:
+ ~vector() { clear(); }
+
+ vector() { init(); }
+ vector(int size) { init(); resize(size); }
+ vector(int size, const T & val) {
+ init();
+ resize(size);
+ std::fill(data_, data_+size_, val); }
+
+ // Copy constructor and assignment.
+ vector(const vector<T, Allocator> &rhs) {
+ init();
+ copy(rhs);
+ }
+ vector<T, Allocator> &operator=(const vector<T, Allocator> &rhs) {
+ if (&rhs != this) {
+ copy(rhs);
+ }
+ return *this;
+ }
+
+ /// Swaps the contents of two vectors in constant time.
+ void swap(vector<T, Allocator> &other) {
+ std::swap(allocator_, other.allocator_);
+ std::swap(size_, other.size_);
+ std::swap(capacity_, other.capacity_);
+ std::swap(data_, other.data_);
+ }
+
+ T *data() const { return data_; }
+ int size() const { return size_; }
+ int capacity() const { return capacity_; }
+ const T& back() const { return data_[size_ - 1]; }
+ T& back() { return data_[size_ - 1]; }
+ const T& front() const { return data_[0]; }
+ T& front() { return data_[0]; }
+ const T& operator[](int n) const { return data_[n]; }
+ T& operator[](int n) { return data_[n]; }
+ const T& at(int n) const { return data_[n]; }
+ T& at(int n) { return data_[n]; }
+ const T * begin() const { return data_; }
+ const T * end() const { return data_+size_; }
+ T * begin() { return data_; }
+ T * end() { return data_+size_; }
+
+ void resize(size_t size) {
+ reserve(size);
+ if (size > size_) {
+ construct(size_, size);
+ } else if (size < size_) {
+ destruct(size, size_);
+ }
+ size_ = size;
+ }
+
+ void push_back(const T &value) {
+ if (size_ == capacity_) {
+ reserve(size_ ? 2 * size_ : 1);
+ }
+ new (&data_[size_++]) T(value);
+ }
+
+ void pop_back() {
+ resize(size_ - 1);
+ }
+
+ void clear() {
+ destruct(0, size_);
+ deallocate();
+ init();
+ }
+
+ void reserve(unsigned int size) {
+ if (size > size_) {
+ T *data = static_cast<T *>(allocate(size));
+ memcpy(data, data_, sizeof(*data)*size_);
+ allocator_.deallocate(data_, capacity_);
+ data_ = data;
+ capacity_ = size;
+ }
+ }
+
+ bool empty() {
+ return size_ == 0;
+ }
+
+ private:
+ void construct(int start, int end) {
+ for (int i = start; i < end; ++i) {
+ new (&data_[i]) T;
+ }
+ }
+ void destruct(int start, int end) {
+ for (int i = start; i < end; ++i) {
+ data_[i].~T();
+ }
+ }
+ void init() {
+ size_ = 0;
+ data_ = 0;
+ capacity_ = 0;
+ }
+
+ void *allocate(int size) {
+ return size ? allocator_.allocate(size) : 0;
+ }
+
+ void deallocate() {
+ allocator_.deallocate(data_, size_);
+ data_ = 0;
+ }
+
+ void copy(const vector<T, Allocator> &rhs) {
+ resize(rhs.size());
+ for (int i = 0; i < rhs.size(); ++i) {
+ (*this)[i] = rhs[i];
+ }
+ }
+
+ Allocator allocator_;
+ size_t size_;
+ size_t capacity_;
+ T *data_;
+};
+
+} // namespace libmv
+
+#endif // LIBMV_BASE_VECTOR_H
diff --git a/intern/libmv/libmv/base/vector_test.cc b/intern/libmv/libmv/base/vector_test.cc
new file mode 100644
index 00000000000..f17718c3926
--- /dev/null
+++ b/intern/libmv/libmv/base/vector_test.cc
@@ -0,0 +1,223 @@
+// Copyright (c) 2009 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/base/vector.h"
+#include "libmv/numeric/numeric.h"
+#include "testing/testing.h"
+#include <algorithm>
+
+namespace {
+using namespace libmv;
+
+// This uses a Vec2d which is a fixed-size vectorizable Eigen type. It is
+// necessary to test vectorizable types to ensure that the alignment asserts
+// trigger if the alignment is not correct.
+TEST(VectorAlignmentTest, PushBack) {
+ Vec2 x1, x2;
+ x1 << 1, 2;
+ x2 << 3, 4;
+
+ vector<Vec2> vs;
+ vs.push_back(x1);
+ EXPECT_EQ(1, vs.size());
+ EXPECT_EQ(1, vs.capacity());
+
+ vs.push_back(x2);
+ EXPECT_EQ(2, vs.size());
+ EXPECT_EQ(2, vs.capacity());
+
+ // The following is necessary because of some bug in gtest; the expected
+ // parameter can't be a fixed size vectorizable type with alignment
+ // requirements.
+ Vec x1r = x1;
+ Vec x2r = x2;
+ EXPECT_EQ(x1r, vs[0]);
+ EXPECT_EQ(x2r, vs[1]);
+
+ vs.push_back(x2);
+ vs.push_back(x2);
+ vs.push_back(x2);
+ EXPECT_EQ(5, vs.size());
+ EXPECT_EQ(8, vs.capacity());
+}
+
+// Count the number of destruct calls to test that the destructor gets called.
+int foo_construct_calls = 0;
+int foo_destruct_calls = 0;
+
+struct Foo {
+ public:
+ Foo() : value(5) { foo_construct_calls++; }
+ ~Foo() { foo_destruct_calls++; }
+ int value;
+};
+
+struct VectorTest : public testing::Test {
+ VectorTest() {
+ foo_construct_calls = 0;
+ foo_destruct_calls = 0;
+ }
+};
+
+TEST_F(VectorTest, EmptyVectorDoesNotConstruct) {
+ {
+ vector<Foo> v;
+ EXPECT_EQ(0, v.size());
+ EXPECT_EQ(0, v.capacity());
+ }
+ EXPECT_EQ(0, foo_construct_calls);
+ EXPECT_EQ(0, foo_destruct_calls);
+}
+
+TEST_F(VectorTest, DestructorGetsCalled) {
+ {
+ vector<Foo> v;
+ v.resize(5);
+ }
+ EXPECT_EQ(5, foo_construct_calls);
+ EXPECT_EQ(5, foo_destruct_calls);
+}
+
+TEST_F(VectorTest, ReserveDoesNotCallConstructorsOrDestructors) {
+ vector<Foo> v;
+ EXPECT_EQ(0, v.size());
+ EXPECT_EQ(0, v.capacity());
+ EXPECT_EQ(0, foo_construct_calls);
+ EXPECT_EQ(0, foo_destruct_calls);
+
+ v.reserve(5);
+ EXPECT_EQ(0, v.size());
+ EXPECT_EQ(5, v.capacity());
+ EXPECT_EQ(0, foo_construct_calls);
+ EXPECT_EQ(0, foo_destruct_calls);
+}
+
+TEST_F(VectorTest, ResizeConstructsAndDestructsAsExpected) {
+ vector<Foo> v;
+
+ // Create one object.
+ v.resize(1);
+ EXPECT_EQ(1, v.size());
+ EXPECT_EQ(1, v.capacity());
+ EXPECT_EQ(1, foo_construct_calls);
+ EXPECT_EQ(0, foo_destruct_calls);
+ EXPECT_EQ(5, v[0].value);
+
+ // Create two more.
+ v.resize(3);
+ EXPECT_EQ(3, v.size());
+ EXPECT_EQ(3, v.capacity());
+ EXPECT_EQ(3, foo_construct_calls);
+ EXPECT_EQ(0, foo_destruct_calls);
+
+ // Delete the last one.
+ v.resize(2);
+ EXPECT_EQ(2, v.size());
+ EXPECT_EQ(3, v.capacity());
+ EXPECT_EQ(3, foo_construct_calls);
+ EXPECT_EQ(1, foo_destruct_calls);
+
+ // Delete the remaining two.
+ v.resize(0);
+ EXPECT_EQ(0, v.size());
+ EXPECT_EQ(3, v.capacity());
+ EXPECT_EQ(3, foo_construct_calls);
+ EXPECT_EQ(3, foo_destruct_calls);
+}
+
+TEST_F(VectorTest, PushPopBack) {
+ vector<Foo> v;
+
+ Foo foo;
+ foo.value = 10;
+ v.push_back(foo);
+ EXPECT_EQ(1, v.size());
+ EXPECT_EQ(10, v.back().value);
+
+ v.pop_back();
+ EXPECT_EQ(0, v.size());
+ EXPECT_EQ(1, foo_construct_calls);
+ EXPECT_EQ(1, foo_destruct_calls);
+}
+
+TEST_F(VectorTest, CopyConstructor) {
+ vector<int> a;
+ a.push_back(1);
+ a.push_back(5);
+ a.push_back(3);
+
+ vector<int> b(a);
+ EXPECT_EQ(a.size(), b.size());
+ //EXPECT_EQ(a.capacity(), b.capacity());
+ for (int i = 0; i < a.size(); ++i) {
+ EXPECT_EQ(a[i], b[i]);
+ }
+}
+
+TEST_F(VectorTest, OperatorEquals) {
+ vector<int> a, b;
+ a.push_back(1);
+ a.push_back(5);
+ a.push_back(3);
+
+ b = a;
+
+ EXPECT_EQ(a.size(), b.size());
+ //EXPECT_EQ(a.capacity(), b.capacity());
+ for (int i = 0; i < a.size(); ++i) {
+ EXPECT_EQ(a[i], b[i]);
+ }
+}
+
+TEST_F(VectorTest, STLFind) {
+ vector<int> a;
+ a.push_back(1);
+ a.push_back(5);
+ a.push_back(3);
+
+ // Find return an int *
+ EXPECT_EQ(std::find(&a[0], &a[2], 1) == &a[0], true);
+ EXPECT_EQ(std::find(&a[0], &a[2], 5) == &a[1], true);
+ EXPECT_EQ(std::find(&a[0], &a[2], 3) == &a[2], true);
+
+ // Find return a const int *
+ EXPECT_EQ(std::find(a.begin(), a.end(), 1) == &a[0], true);
+ EXPECT_EQ(std::find(a.begin(), a.end(), 5) == &a[1], true);
+ EXPECT_EQ(std::find(a.begin(), a.end(), 3) == &a[2], true);
+
+ // Search value that are not in the vector
+ EXPECT_EQ(std::find(a.begin(), a.end(), 0) == a.end(), true);
+ EXPECT_EQ(std::find(a.begin(), a.end(), 52) == a.end(), true);
+}
+
+TEST(Vector, swap) {
+ vector<int> a, b;
+ a.push_back(1);
+ a.push_back(2);
+ b.push_back(3);
+ a.swap(b);
+ EXPECT_EQ(1, a.size());
+ EXPECT_EQ(3, a[0]);
+ EXPECT_EQ(2, b.size());
+ EXPECT_EQ(1, b[0]);
+ EXPECT_EQ(2, b[1]);
+}
+
+} // namespace
diff --git a/intern/libmv/libmv/base/vector_utils.h b/intern/libmv/libmv/base/vector_utils.h
new file mode 100644
index 00000000000..c71e1bea951
--- /dev/null
+++ b/intern/libmv/libmv/base/vector_utils.h
@@ -0,0 +1,34 @@
+// Copyright (c) 2009 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+
+#ifndef LIBMV_BASE_VECTOR_UTILS_H_
+#define LIBMV_BASE_VECTOR_UTILS_H_
+
+/// Delete the contents of a container.
+template <class Array>
+void DeleteElements(Array *array) {
+ for (int i = 0; i < array->size(); ++i) {
+ delete (*array)[i];
+ }
+ array->clear();
+}
+
+#endif // LIBMV_BASE_VECTOR_UTILS_H_
diff --git a/intern/libmv/libmv/image/array_nd.cc b/intern/libmv/libmv/image/array_nd.cc
new file mode 100644
index 00000000000..469a19aabf1
--- /dev/null
+++ b/intern/libmv/libmv/image/array_nd.cc
@@ -0,0 +1,108 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/image/image.h"
+#include <iostream>
+#include <cmath>
+
+namespace libmv {
+
+void FloatArrayToScaledByteArray(const Array3Df &float_array,
+ Array3Du *byte_array,
+ bool automatic_range_detection
+ ) {
+ byte_array->ResizeLike(float_array);
+ float minval = HUGE_VAL;
+ float maxval = -HUGE_VAL;
+ if (automatic_range_detection) {
+ for (int i = 0; i < float_array.Height(); ++i) {
+ for (int j = 0; j < float_array.Width(); ++j) {
+ for (int k = 0; k < float_array.Depth(); ++k) {
+ minval = std::min(minval, float_array(i, j, k));
+ maxval = std::max(maxval, float_array(i, j, k));
+ }
+ }
+ }
+ } else {
+ minval = 0;
+ maxval = 1;
+ }
+ for (int i = 0; i < float_array.Height(); ++i) {
+ for (int j = 0; j < float_array.Width(); ++j) {
+ for (int k = 0; k < float_array.Depth(); ++k) {
+ float unscaled = (float_array(i, j, k) - minval) / (maxval - minval);
+ (*byte_array)(i, j, k) = (unsigned char)(255 * unscaled);
+ }
+ }
+ }
+}
+
+void ByteArrayToScaledFloatArray(const Array3Du &byte_array,
+ Array3Df *float_array) {
+ float_array->ResizeLike(byte_array);
+ for (int i = 0; i < byte_array.Height(); ++i) {
+ for (int j = 0; j < byte_array.Width(); ++j) {
+ for (int k = 0; k < byte_array.Depth(); ++k) {
+ (*float_array)(i, j, k) = float(byte_array(i, j, k)) / 255.0f;
+ }
+ }
+ }
+}
+
+void SplitChannels(const Array3Df &input,
+ Array3Df *channel0,
+ Array3Df *channel1,
+ Array3Df *channel2) {
+ assert(input.Depth() >= 3);
+ channel0->Resize(input.Height(), input.Width());
+ channel1->Resize(input.Height(), input.Width());
+ channel2->Resize(input.Height(), input.Width());
+ for (int row = 0; row < input.Height(); ++row) {
+ for (int column = 0; column < input.Width(); ++column) {
+ (*channel0)(row, column) = input(row, column, 0);
+ (*channel1)(row, column) = input(row, column, 1);
+ (*channel2)(row, column) = input(row, column, 2);
+ }
+ }
+}
+
+void PrintArray(const Array3Df &array) {
+ using namespace std;
+
+ printf("[\n");
+ for (int r = 0; r < array.Height(); ++r) {
+ printf("[");
+ for (int c = 0; c < array.Width(); ++c) {
+ if (array.Depth() == 1) {
+ printf("%11f, ", array(r, c));
+ } else {
+ printf("[");
+ for (int k = 0; k < array.Depth(); ++k) {
+ printf("%11f, ", array(r, c, k));
+ }
+ printf("],");
+ }
+ }
+ printf("],\n");
+ }
+ printf("]\n");
+}
+
+} // namespace libmv
diff --git a/intern/libmv/libmv/image/array_nd.h b/intern/libmv/libmv/image/array_nd.h
new file mode 100644
index 00000000000..e95e66aa2b3
--- /dev/null
+++ b/intern/libmv/libmv/image/array_nd.h
@@ -0,0 +1,497 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_IMAGE_ARRAY_ND_H
+#define LIBMV_IMAGE_ARRAY_ND_H
+
+#include <cassert>
+#include <cstdio>
+#include <cstring>
+
+#include "libmv/image/tuple.h"
+
+namespace libmv {
+
+class BaseArray {};
+
+/// A multidimensional array class.
+template <typename T, int N>
+class ArrayND : public BaseArray {
+ public:
+ typedef T Scalar;
+
+ /// Type for the multidimensional indices.
+ typedef Tuple<int, N> Index;
+
+ /// Create an empty array.
+ ArrayND() : data_(NULL), own_data_(true) { Resize(Index(0)); }
+
+ /// Create an array with the specified shape.
+ ArrayND(const Index &shape) : data_(NULL), own_data_(true) { Resize(shape); }
+
+ /// Create an array with the specified shape.
+ ArrayND(int *shape) : data_(NULL), own_data_(true) { Resize(shape); }
+
+ /// Copy constructor.
+ ArrayND(const ArrayND<T, N> &b) : data_(NULL), own_data_(true) {
+ ResizeLike(b);
+ std::memcpy(Data(), b.Data(), sizeof(T) * Size());
+ }
+
+ ArrayND(int s0) : data_(NULL), own_data_(true) { Resize(s0); }
+ ArrayND(int s0, int s1) : data_(NULL), own_data_(true) { Resize(s0, s1); }
+ ArrayND(int s0, int s1, int s2) : data_(NULL), own_data_(true) {
+ Resize(s0, s1, s2);
+ }
+
+ ArrayND(T* data, int s0, int s1, int s2)
+ : shape_(0), strides_(0), data_(data), own_data_(false) {
+ Resize(s0, s1, s2);
+ }
+
+ /// Destructor deletes pixel data.
+ ~ArrayND() {
+ if (own_data_) {
+ delete [] data_;
+ }
+ }
+
+ /// Assignation copies pixel data.
+ ArrayND &operator=(const ArrayND<T, N> &b) {
+ assert(this != &b);
+ ResizeLike(b);
+ std::memcpy(Data(), b.Data(), sizeof(T) * Size());
+ return *this;
+ }
+
+ const Index &Shapes() const {
+ return shape_;
+ }
+
+ const Index &Strides() const {
+ return strides_;
+ }
+
+ /// Create an array of shape s.
+ void Resize(const Index &new_shape) {
+ if (data_ != NULL && shape_ == new_shape) {
+ // Don't bother realloacting if the shapes match.
+ return;
+ }
+ shape_.Reset(new_shape);
+ strides_(N - 1) = 1;
+ for (int i = N - 1; i > 0; --i) {
+ strides_(i - 1) = strides_(i) * shape_(i);
+ }
+ if (own_data_) {
+ delete [] data_;
+ data_ = NULL;
+ if (Size() > 0) {
+ data_ = new T[Size()];
+ }
+ }
+ }
+
+ template<typename D>
+ void ResizeLike(const ArrayND<D, N> &other) {
+ Resize(other.Shape());
+ }
+
+ /// Resizes the array to shape s. All data is lost.
+ void Resize(const int *new_shape_array) {
+ Resize(Index(new_shape_array));
+ }
+
+ /// Resize a 1D array to length s0.
+ void Resize(int s0) {
+ assert(N == 1);
+ int shape[] = {s0};
+ Resize(shape);
+ }
+
+ /// Resize a 2D array to shape (s0,s1).
+ void Resize(int s0, int s1) {
+ int shape[N] = {s0, s1};
+ for (int i = 2; i < N; ++i) {
+ shape[i] = 1;
+ }
+ Resize(shape);
+ }
+
+ // Match Eigen2's API.
+ void resize(int rows, int cols) {
+ Resize(rows, cols);
+ }
+
+ /// Resize a 3D array to shape (s0,s1,s2).
+ void Resize(int s0, int s1, int s2) {
+ assert(N == 3);
+ int shape[] = {s0, s1, s2};
+ Resize(shape);
+ }
+
+ template<typename D>
+ void CopyFrom(const ArrayND<D, N> &other) {
+ ResizeLike(other);
+ T *data = Data();
+ const D *other_data = other.Data();
+ for (int i = 0; i < Size(); ++i) {
+ data[i] = T(other_data[i]);
+ }
+ }
+
+ void Fill(T value) {
+ for (int i = 0; i < Size(); ++i) {
+ Data()[i] = value;
+ }
+ }
+
+ // Match Eigen's API.
+ void fill(T value) {
+ for (int i = 0; i < Size(); ++i) {
+ Data()[i] = value;
+ }
+ }
+
+ /// Return a tuple containing the length of each axis.
+ const Index &Shape() const {
+ return shape_;
+ }
+
+ /// Return the length of an axis.
+ int Shape(int axis) const {
+ return shape_(axis);
+ }
+
+ /// Return the distance between neighboring elements along axis.
+ int Stride(int axis) const {
+ return strides_(axis);
+ }
+
+ /// Return the number of elements of the array.
+ int Size() const {
+ int size = 1;
+ for (int i = 0; i < N; ++i)
+ size *= Shape(i);
+ return size;
+ }
+
+ /// Return the total amount of memory used by the array.
+ int MemorySizeInBytes() const {
+ return sizeof(*this) + Size() * sizeof(T);
+ }
+
+ /// Pointer to the first element of the array.
+ T *Data() { return data_; }
+
+ /// Constant pointer to the first element of the array.
+ const T *Data() const { return data_; }
+
+ /// Distance between the first element and the element at position index.
+ int Offset(const Index &index) const {
+ int offset = 0;
+ for (int i = 0; i < N; ++i)
+ offset += index(i) * Stride(i);
+ return offset;
+ }
+
+ /// 1D specialization.
+ int Offset(int i0) const {
+ assert(N == 1);
+ return i0 * Stride(0);
+ }
+
+ /// 2D specialization.
+ int Offset(int i0, int i1) const {
+ assert(N == 2);
+ return i0 * Stride(0) + i1 * Stride(1);
+ }
+
+ /// 3D specialization.
+ int Offset(int i0, int i1, int i2) const {
+ assert(N == 3);
+ return i0 * Stride(0) + i1 * Stride(1) + i2 * Stride(2);
+ }
+
+ /// Return a reference to the element at position index.
+ T &operator()(const Index &index) {
+ // TODO(pau) Boundary checking in debug mode.
+ return *( Data() + Offset(index) );
+ }
+
+ /// 1D specialization.
+ T &operator()(int i0) {
+ return *( Data() + Offset(i0) );
+ }
+
+ /// 2D specialization.
+ T &operator()(int i0, int i1) {
+ assert(0 <= i0 && i0 < Shape(0));
+ assert(0 <= i1 && i1 < Shape(1));
+ return *(Data() + Offset(i0, i1));
+ }
+
+ /// 3D specialization.
+ T &operator()(int i0, int i1, int i2) {
+ assert(0 <= i0 && i0 < Shape(0));
+ assert(0 <= i1 && i1 < Shape(1));
+ assert(0 <= i2 && i2 < Shape(2));
+ return *(Data() + Offset(i0, i1, i2));
+ }
+
+ /// Return a constant reference to the element at position index.
+ const T &operator()(const Index &index) const {
+ return *(Data() + Offset(index));
+ }
+
+ /// 1D specialization.
+ const T &operator()(int i0) const {
+ return *(Data() + Offset(i0));
+ }
+
+ /// 2D specialization.
+ const T &operator()(int i0, int i1) const {
+ assert(0 <= i0 && i0 < Shape(0));
+ assert(0 <= i1 && i1 < Shape(1));
+ return *(Data() + Offset(i0, i1));
+ }
+
+ /// 3D specialization.
+ const T &operator()(int i0, int i1, int i2) const {
+ return *(Data() + Offset(i0, i1, i2));
+ }
+
+ /// True if index is inside array.
+ bool Contains(const Index &index) const {
+ for (int i = 0; i < N; ++i)
+ if (index(i) < 0 || index(i) >= Shape(i))
+ return false;
+ return true;
+ }
+
+ /// 1D specialization.
+ bool Contains(int i0) const {
+ return 0 <= i0 && i0 < Shape(0);
+ }
+
+ /// 2D specialization.
+ bool Contains(int i0, int i1) const {
+ return 0 <= i0 && i0 < Shape(0)
+ && 0 <= i1 && i1 < Shape(1);
+ }
+
+ /// 3D specialization.
+ bool Contains(int i0, int i1, int i2) const {
+ return 0 <= i0 && i0 < Shape(0)
+ && 0 <= i1 && i1 < Shape(1)
+ && 0 <= i2 && i2 < Shape(2);
+ }
+
+ bool operator==(const ArrayND<T, N> &other) const {
+ if (shape_ != other.shape_) return false;
+ if (strides_ != other.strides_) return false;
+ for (int i = 0; i < Size(); ++i) {
+ if (this->Data()[i] != other.Data()[i])
+ return false;
+ }
+ return true;
+ }
+
+ bool operator!=(const ArrayND<T, N> &other) const {
+ return !(*this == other);
+ }
+
+ ArrayND<T, N> operator*(const ArrayND<T, N> &other) const {
+ assert(Shape() = other.Shape());
+ ArrayND<T, N> res;
+ res.ResizeLike(*this);
+ for (int i = 0; i < res.Size(); ++i) {
+ res.Data()[i] = Data()[i] * other.Data()[i];
+ }
+ return res;
+ }
+
+ protected:
+ /// The number of element in each dimension.
+ Index shape_;
+
+ /// How to jump to neighbors in each dimension.
+ Index strides_;
+
+ /// Pointer to the first element of the array.
+ T *data_;
+
+ /// Flag if this Array either own or reference the data
+ bool own_data_;
+};
+
+/// 3D array (row, column, channel).
+template <typename T>
+class Array3D : public ArrayND<T, 3> {
+ typedef ArrayND<T, 3> Base;
+ public:
+ Array3D()
+ : Base() {
+ }
+ Array3D(int height, int width, int depth = 1)
+ : Base(height, width, depth) {
+ }
+ Array3D(T* data, int height, int width, int depth = 1)
+ : Base(data, height, width, depth) {
+ }
+
+ void Resize(int height, int width, int depth = 1) {
+ Base::Resize(height, width, depth);
+ }
+
+ int Height() const {
+ return Base::Shape(0);
+ }
+ int Width() const {
+ return Base::Shape(1);
+ }
+ int Depth() const {
+ return Base::Shape(2);
+ }
+
+ // Match Eigen2's API so that Array3D's and Mat*'s can work together via
+ // template magic.
+ int rows() const { return Height(); }
+ int cols() const { return Width(); }
+ int depth() const { return Depth(); }
+
+ int Get_Step() const { return Width()*Depth(); }
+
+ /// Enable accessing with 2 indices for grayscale images.
+ T &operator()(int i0, int i1, int i2 = 0) {
+ assert(0 <= i0 && i0 < Height());
+ assert(0 <= i1 && i1 < Width());
+ return Base::operator()(i0, i1, i2);
+ }
+ const T &operator()(int i0, int i1, int i2 = 0) const {
+ assert(0 <= i0 && i0 < Height());
+ assert(0 <= i1 && i1 < Width());
+ return Base::operator()(i0, i1, i2);
+ }
+};
+
+typedef Array3D<unsigned char> Array3Du;
+typedef Array3D<unsigned int> Array3Dui;
+typedef Array3D<int> Array3Di;
+typedef Array3D<float> Array3Df;
+typedef Array3D<short> Array3Ds;
+
+void SplitChannels(const Array3Df &input,
+ Array3Df *channel0,
+ Array3Df *channel1,
+ Array3Df *channel2);
+
+void PrintArray(const Array3Df &array);
+
+/** Convert a float array into a byte array by scaling values by 255* (max-min).
+ * where max and min are automatically detected
+ * (if automatic_range_detection = true)
+ * \note and TODO this automatic detection only works when the image contains
+ * at least one pixel of both bounds.
+ **/
+void FloatArrayToScaledByteArray(const Array3Df &float_array,
+ Array3Du *byte_array,
+ bool automatic_range_detection = false);
+
+//! Convert a byte array into a float array by dividing values by 255.
+void ByteArrayToScaledFloatArray(const Array3Du &byte_array,
+ Array3Df *float_array);
+
+template <typename AArrayType, typename BArrayType, typename CArrayType>
+void MultiplyElements(const AArrayType &a,
+ const BArrayType &b,
+ CArrayType *c) {
+ // This function does an element-wise multiply between
+ // the two Arrays A and B, and stores the result in C.
+ // A and B must have the same dimensions.
+ assert(a.Shape() == b.Shape());
+ c->ResizeLike(a);
+
+ // To perform the multiplcation, a "current" index into the N-dimensions of
+ // the A and B matrix specifies which elements are being multiplied.
+ typename CArrayType::Index index;
+
+ // The index starts at the maximum value for each dimension
+ const typename CArrayType::Index& cShape = c->Shape();
+ for ( int i = 0; i < CArrayType::Index::SIZE; ++i )
+ index(i) = cShape(i) - 1;
+
+ // After each multiplication, the highest-dimensional index is reduced.
+ // if this reduces it less than zero, it resets to its maximum value
+ // and decrements the index of the next lower dimension.
+ // This ripple-action continues until the entire new array has been
+ // calculated, indicated by dimension zero having a negative index.
+ while ( index(0) >= 0 ) {
+ (*c)(index) = a(index) * b(index);
+
+ int dimension = CArrayType::Index::SIZE - 1;
+ index(dimension) = index(dimension) - 1;
+ while ( dimension > 0 && index(dimension) < 0 ) {
+ index(dimension) = cShape(dimension) - 1;
+ index(dimension - 1) = index(dimension - 1) - 1;
+ --dimension;
+ }
+ }
+}
+
+template <typename TA, typename TB, typename TC>
+void MultiplyElements(const ArrayND<TA, 3> &a,
+ const ArrayND<TB, 3> &b,
+ ArrayND<TC, 3> *c) {
+ // Specialization for N==3
+ c->ResizeLike(a);
+ assert(a.Shape(0) == b.Shape(0));
+ assert(a.Shape(1) == b.Shape(1));
+ assert(a.Shape(2) == b.Shape(2));
+ for (int i = 0; i < a.Shape(0); ++i) {
+ for (int j = 0; j < a.Shape(1); ++j) {
+ for (int k = 0; k < a.Shape(2); ++k) {
+ (*c)(i, j, k) = TC(a(i, j, k) * b(i, j, k));
+ }
+ }
+ }
+}
+
+template <typename TA, typename TB, typename TC>
+void MultiplyElements(const Array3D<TA> &a,
+ const Array3D<TB> &b,
+ Array3D<TC> *c) {
+ // Specialization for N==3
+ c->ResizeLike(a);
+ assert(a.Shape(0) == b.Shape(0));
+ assert(a.Shape(1) == b.Shape(1));
+ assert(a.Shape(2) == b.Shape(2));
+ for (int i = 0; i < a.Shape(0); ++i) {
+ for (int j = 0; j < a.Shape(1); ++j) {
+ for (int k = 0; k < a.Shape(2); ++k) {
+ (*c)(i, j, k) = TC(a(i, j, k) * b(i, j, k));
+ }
+ }
+ }
+}
+
+} // namespace libmv
+
+#endif // LIBMV_IMAGE_ARRAY_ND_H
diff --git a/intern/libmv/libmv/image/array_nd_test.cc b/intern/libmv/libmv/image/array_nd_test.cc
new file mode 100644
index 00000000000..313f21b60e9
--- /dev/null
+++ b/intern/libmv/libmv/image/array_nd_test.cc
@@ -0,0 +1,324 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/image/array_nd.h"
+#include "testing/testing.h"
+
+using libmv::ArrayND;
+using libmv::Array3D;
+using libmv::Array3Df;
+
+namespace {
+
+TEST(ArrayND, EmptyConstructor) {
+ ArrayND<int, 2> a;
+
+ EXPECT_EQ(0, a.Shape(0));
+ EXPECT_EQ(0, a.Shape(1));
+}
+
+TEST(ArrayND, IndexConstructor) {
+ int s[] = {1, 2, 3};
+ ArrayND<int, 3>::Index shape(s);
+ ArrayND<int, 3> a(shape);
+
+ EXPECT_EQ(1, a.Shape(0));
+ EXPECT_EQ(2, a.Shape(1));
+ EXPECT_EQ(3, a.Shape(2));
+}
+
+TEST(ArrayND, PointerConstructor) {
+ int s[] = {1, 2, 3};
+ ArrayND<int, 3> a(s);
+
+ EXPECT_EQ(1, a.Shape(0));
+ EXPECT_EQ(2, a.Shape(1));
+ EXPECT_EQ(3, a.Shape(2));
+}
+
+TEST(ArrayND, CopyConstructor) {
+ int s[] = {1, 2, 3};
+ ArrayND<int, 3> a(s);
+ a(0, 1, 1) = 3;
+ a(0, 1, 2) = 3;
+ ArrayND<int, 3> b(a);
+ EXPECT_EQ(1, b.Shape(0));
+ EXPECT_EQ(2, b.Shape(1));
+ EXPECT_EQ(3, b.Shape(2));
+ EXPECT_EQ(3, b(0, 1, 1));
+ b(0, 1, 2) = 2;
+ EXPECT_EQ(3, a(0, 1, 2));
+}
+
+TEST(ArrayND, Assignation) {
+ int s[] = {1, 2, 3};
+ ArrayND<int, 3> a(s);
+ a(0, 1, 1) = 3;
+ a(0, 1, 2) = 3;
+ ArrayND<int, 3> b;
+ b = a;
+ EXPECT_EQ(1, b.Shape(0));
+ EXPECT_EQ(2, b.Shape(1));
+ EXPECT_EQ(3, b.Shape(2));
+ EXPECT_EQ(3, b(0, 1, 1));
+ b(0, 1, 2) = 2;
+ EXPECT_EQ(3, a(0, 1, 2));
+}
+
+TEST(ArrayND, Fill) {
+ int s[] = {2, 2};
+ ArrayND<int, 2> a(s);
+ a.Fill(42);
+ EXPECT_EQ(42, a(0, 0));
+ EXPECT_EQ(42, a(0, 1));
+ EXPECT_EQ(42, a(1, 0));
+ EXPECT_EQ(42, a(1, 1));
+}
+
+TEST(ArrayND, Size) {
+ int s[] = {1, 2, 3};
+ ArrayND<int, 3>::Index shape(s);
+ ArrayND<int, 3> a(shape);
+
+ int l[] = {0, 1, 2};
+ ArrayND<int, 3>::Index last(l);
+
+ EXPECT_EQ(a.Size(), a.Offset(last)+1);
+ EXPECT_TRUE(a.Contains(last));
+ EXPECT_FALSE(a.Contains(shape));
+}
+
+TEST(ArrayND, MemorySizeInBytes) {
+ int s[] = {2, 3};
+ ArrayND<int, 2>::Index shape(s);
+ ArrayND<int, 2> a(shape);
+
+ int size = 24 + sizeof(a);
+ EXPECT_EQ(size, a.MemorySizeInBytes());
+}
+
+TEST(ArrayND, Parenthesis) {
+ typedef ArrayND<int, 2>::Index Index;
+
+ int s[] = {3, 3};
+ ArrayND<int, 2> a(s);
+
+ *(a.Data()+0) = 0;
+ *(a.Data()+5) = 5;
+
+ int i1[] = {0, 0};
+ EXPECT_EQ(0, a(Index(i1)));
+ int i2[] = {1, 2};
+ EXPECT_EQ(5, a(Index(i2)));
+}
+
+TEST(ArrayND, 1DConstructor) {
+ ArrayND<int, 1> a(3);
+
+ EXPECT_EQ(3, a.Shape(0));
+}
+
+TEST(ArrayND, 2DConstructor) {
+ ArrayND<int, 2> a(1, 2);
+
+ EXPECT_EQ(1, a.Shape(0));
+ EXPECT_EQ(2, a.Shape(1));
+}
+
+TEST(ArrayND, 3DConstructor) {
+ ArrayND<int, 3> a(1, 2, 3);
+
+ EXPECT_EQ(1, a.Shape(0));
+ EXPECT_EQ(2, a.Shape(1));
+ EXPECT_EQ(3, a.Shape(2));
+}
+
+TEST(ArrayND, 1DAccessor) {
+ ArrayND<int, 1> a(3);
+ a(0) = 1;
+ a(1) = 2;
+
+ EXPECT_EQ(1, a(0));
+ EXPECT_EQ(2, a(1));
+ EXPECT_EQ(1, *(a.Data()));
+ EXPECT_EQ(2, *(a.Data() + a.Stride(0)));
+}
+
+TEST(ArrayND, 2DAccessor) {
+ ArrayND<int, 2> a(3, 3);
+ a(0, 0) = 1;
+ a(1, 1) = 2;
+
+ EXPECT_EQ(1, a(0, 0));
+ EXPECT_EQ(2, a(1, 1));
+ EXPECT_EQ(1, *(a.Data()));
+ EXPECT_EQ(2, *(a.Data() + a.Stride(0) + a.Stride(1)));
+}
+
+TEST(ArrayND, 3DAccessor) {
+ ArrayND<int, 3> a(3, 3, 3);
+ a(0, 0, 0) = 1;
+ a(1, 1, 1) = 2;
+
+ EXPECT_EQ(1, a(0, 0, 0));
+ EXPECT_EQ(2, a(1, 1, 1));
+ EXPECT_EQ(1, *(a.Data()));
+ EXPECT_EQ(2, *(a.Data() + a.Stride(0) + a.Stride(1) + a.Stride(2)));
+}
+
+TEST(ArrayND, CopyFrom) {
+ ArrayND<int, 3> a(2, 2, 1);
+ a(0, 0, 0) = 1;
+ a(0, 1, 0) = 2;
+ a(1, 0, 0) = 3;
+ a(1, 1, 0) = 4;
+ ArrayND<float, 3> b;
+ b.CopyFrom(a);
+ EXPECT_FLOAT_EQ(1.f, b(0, 0, 0));
+ EXPECT_FLOAT_EQ(2.f, b(0, 1, 0));
+ EXPECT_FLOAT_EQ(3.f, b(1, 0, 0));
+ EXPECT_FLOAT_EQ(4.f, b(1, 1, 0));
+}
+
+TEST(ArrayND, MultiplyElements) {
+ ArrayND<int, 3> a(2, 2, 1);
+ a(0, 0, 0) = 1;
+ a(0, 1, 0) = 2;
+ a(1, 0, 0) = 3;
+ a(1, 1, 0) = 4;
+ ArrayND<int, 3> b(2, 2, 1);
+ b(0, 0, 0) = 6;
+ b(0, 1, 0) = 5;
+ b(1, 0, 0) = 4;
+ b(1, 1, 0) = 3;
+ ArrayND<int, 3> c;
+ MultiplyElements(a, b, &c);
+ EXPECT_FLOAT_EQ(6, c(0, 0, 0));
+ EXPECT_FLOAT_EQ(10, c(0, 1, 0));
+ EXPECT_FLOAT_EQ(12, c(1, 0, 0));
+ EXPECT_FLOAT_EQ(12, c(1, 1, 0));
+}
+
+TEST(ArrayND, IsEqualOperator) {
+ ArrayND<int, 3> a(2, 2, 1);
+ a(0, 0, 0) = 1;
+ a(0, 1, 0) = 2;
+ a(1, 0, 0) = 3;
+ a(1, 1, 0) = 4;
+ ArrayND<int, 3> b(2, 2, 1);
+ b(0, 0, 0) = 1;
+ b(0, 1, 0) = 2;
+ b(1, 0, 0) = 3;
+ b(1, 1, 0) = 4;
+ EXPECT_TRUE(a == b);
+ EXPECT_FALSE(a != b);
+ b(1, 1, 0) = 5;
+ EXPECT_TRUE(a != b);
+ EXPECT_FALSE(a == b);
+}
+
+TEST(Array3D, Sizes) {
+ Array3D<int> array;
+ EXPECT_EQ(array.Height(), 0);
+ EXPECT_EQ(array.Width(), 0);
+ EXPECT_EQ(array.Depth(), 0);
+ EXPECT_EQ(array.Shape(0), 0);
+}
+
+TEST(Array3D, CopyConstructor) {
+ Array3D<int> array(10, 10);
+ array(0, 0) = 1;
+ array(0, 1) = 1;
+ Array3D<int> copy(array);
+ EXPECT_EQ(copy.Height(), 10);
+ EXPECT_EQ(copy.Width(), 10);
+ EXPECT_EQ(copy.Depth(), 1);
+ EXPECT_EQ(copy(0, 0), 1);
+ copy(0, 1) = 2;
+ EXPECT_EQ(array(0, 1), 1);
+}
+
+TEST(Array3D, Assignation) {
+ Array3D<int> array(10, 10);
+ array(0, 0) = 1;
+ array(0, 1) = 1;
+ Array3D<int> copy;
+ copy = array;
+ EXPECT_EQ(copy.Height(), 10);
+ EXPECT_EQ(copy.Width(), 10);
+ EXPECT_EQ(copy.Depth(), 1);
+ EXPECT_EQ(copy(0, 0), 1);
+ copy(0, 1) = 2;
+ EXPECT_EQ(array(0, 1), 1);
+}
+
+TEST(Array3D, Parenthesis) {
+ Array3D<int> array(1, 2, 3);
+ array(0, 1, 0) = 3;
+ EXPECT_EQ(array(0, 1), 3);
+}
+
+TEST(Array3Df, SplitChannels) {
+ Array3Df array(1, 2, 3);
+ array(0, 0, 0) = 1;
+ array(0, 1, 0) = 1;
+ array(0, 0, 1) = 2;
+ array(0, 1, 1) = 2;
+ array(0, 0, 2) = 3;
+ array(0, 1, 2) = 3;
+ Array3Df c0, c1, c2;
+ SplitChannels(array, &c0, &c1, &c2);
+ for (int row = 0; row < 1; ++row) {
+ for (int column = 0; column < 2; ++column) {
+ EXPECT_EQ(array(row, column, 0), c0(row, column));
+ EXPECT_EQ(array(row, column, 1), c1(row, column));
+ EXPECT_EQ(array(row, column, 2), c2(row, column));
+ }
+ }
+}
+
+TEST(ArrayND, MultiplyElementsGeneric) {
+ ArrayND<double, 5> A;
+ ArrayND<int, 5> B;
+ ArrayND<double, 5> C;
+ int shape[] = {1, 3, 5, 7, 1};
+ A.Resize(shape);
+ B.Resize(shape);
+
+ A.Fill(1.1);
+ B.Fill(2);
+ MultiplyElements(A, B, &C);
+
+ ArrayND<double, 5>::Index cIndex;
+ for (int d0 = 0; d0 < shape[0]; ++d0)
+ for (int d1 = 0; d1 < shape[1]; ++d1)
+ for (int d2 = 0; d2 < shape[2]; ++d2)
+ for (int d3 = 0; d3 < shape[3]; ++d3)
+ for (int d4 = 0; d4 < shape[4]; ++d4) {
+ cIndex(0) = d0;
+ cIndex(1) = d1;
+ cIndex(2) = d2;
+ cIndex(3) = d3;
+ cIndex(4) = d4;
+ EXPECT_EQ(2.2, C(cIndex));
+ }
+}
+
+} // namespace
diff --git a/intern/libmv/libmv/image/convolve.cc b/intern/libmv/libmv/image/convolve.cc
new file mode 100644
index 00000000000..464043581d2
--- /dev/null
+++ b/intern/libmv/libmv/image/convolve.cc
@@ -0,0 +1,344 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/image/convolve.h"
+
+#include <cmath>
+
+#include "libmv/image/image.h"
+
+namespace libmv {
+
+// Compute a Gaussian kernel and derivative, such that you can take the
+// derivative of an image by convolving with the kernel horizontally then the
+// derivative vertically to get (eg) the y derivative.
+void ComputeGaussianKernel(double sigma, Vec *kernel, Vec *derivative) {
+ assert(sigma >= 0.0);
+
+ // 0.004 implies a 3 pixel kernel with 1 pixel sigma.
+ const float truncation_factor = 0.004f;
+
+ // Calculate the kernel size based on sigma such that it is odd.
+ float precisehalfwidth = GaussianInversePositive(truncation_factor, sigma);
+ int width = lround(2*precisehalfwidth);
+ if (width % 2 == 0) {
+ width++;
+ }
+ // Calculate the gaussian kernel and its derivative.
+ kernel->resize(width);
+ derivative->resize(width);
+ kernel->setZero();
+ derivative->setZero();
+ int halfwidth = width / 2;
+ for (int i = -halfwidth; i <= halfwidth; ++i) {
+ (*kernel)(i + halfwidth) = Gaussian(i, sigma);
+ (*derivative)(i + halfwidth) = GaussianDerivative(i, sigma);
+ }
+ // Since images should not get brighter or darker, normalize.
+ NormalizeL1(kernel);
+
+ // Normalize the derivative differently. See
+ // www.cs.duke.edu/courses/spring03/cps296.1/handouts/Image%20Processing.pdf
+ double factor = 0.;
+ for (int i = -halfwidth; i <= halfwidth; ++i) {
+ factor -= i*(*derivative)(i+halfwidth);
+ }
+ *derivative /= factor;
+}
+
+template <int size, bool vertical>
+void FastConvolve(const Vec &kernel, int width, int height,
+ const float* src, int src_stride, int src_line_stride,
+ float* dst, int dst_stride) {
+ double coefficients[2 * size + 1];
+ for (int k = 0; k < 2 * size + 1; ++k) {
+ coefficients[k] = kernel(2 * size - k);
+ }
+ // Fast path: if the kernel has a certain size, use the constant sized loops.
+ for (int y = 0; y < height; ++y) {
+ for (int x = 0; x < width; ++x) {
+ double sum = 0;
+ for (int k = -size; k <= size; ++k) {
+ if (vertical) {
+ if (y + k >= 0 && y + k < height) {
+ sum += src[k * src_line_stride] * coefficients[k + size];
+ }
+ } else {
+ if (x + k >= 0 && x + k < width) {
+ sum += src[k * src_stride] * coefficients[k + size];
+ }
+ }
+ }
+ dst[0] = static_cast<float>(sum);
+ src += src_stride;
+ dst += dst_stride;
+ }
+ }
+}
+
+template<bool vertical>
+void Convolve(const Array3Df &in,
+ const Vec &kernel,
+ Array3Df *out_pointer,
+ int plane) {
+ int width = in.Width();
+ int height = in.Height();
+ Array3Df &out = *out_pointer;
+ if (plane == -1) {
+ out.ResizeLike(in);
+ plane = 0;
+ }
+
+ assert(kernel.size() % 2 == 1);
+ assert(&in != out_pointer);
+
+ int src_line_stride = in.Stride(0);
+ int src_stride = in.Stride(1);
+ int dst_stride = out.Stride(1);
+ const float* src = in.Data();
+ float* dst = out.Data() + plane;
+
+ // Use a dispatch table to make most convolutions used in practice use the
+ // fast path.
+ int half_width = kernel.size() / 2;
+ switch (half_width) {
+#define static_convolution(size) case size: \
+ FastConvolve<size, vertical>(kernel, width, height, src, src_stride, \
+ src_line_stride, dst, dst_stride); break;
+ static_convolution(1)
+ static_convolution(2)
+ static_convolution(3)
+ static_convolution(4)
+ static_convolution(5)
+ static_convolution(6)
+ static_convolution(7)
+#undef static_convolution
+ default:
+ int dynamic_size = kernel.size() / 2;
+ for (int y = 0; y < height; ++y) {
+ for (int x = 0; x < width; ++x) {
+ double sum = 0;
+ // Slow path: this loop cannot be unrolled.
+ for (int k = -dynamic_size; k <= dynamic_size; ++k) {
+ if (vertical) {
+ if (y + k >= 0 && y + k < height) {
+ sum += src[k * src_line_stride] *
+ kernel(2 * dynamic_size - (k + dynamic_size));
+ }
+ } else {
+ if (x + k >= 0 && x + k < width) {
+ sum += src[k * src_stride] *
+ kernel(2 * dynamic_size - (k + dynamic_size));
+ }
+ }
+ }
+ dst[0] = static_cast<float>(sum);
+ src += src_stride;
+ dst += dst_stride;
+ }
+ }
+ }
+}
+
+void ConvolveHorizontal(const Array3Df &in,
+ const Vec &kernel,
+ Array3Df *out_pointer,
+ int plane) {
+ Convolve<false>(in, kernel, out_pointer, plane);
+}
+
+void ConvolveVertical(const Array3Df &in,
+ const Vec &kernel,
+ Array3Df *out_pointer,
+ int plane) {
+ Convolve<true>(in, kernel, out_pointer, plane);
+}
+
+void ConvolveGaussian(const Array3Df &in,
+ double sigma,
+ Array3Df *out_pointer) {
+ Vec kernel, derivative;
+ ComputeGaussianKernel(sigma, &kernel, &derivative);
+
+ Array3Df tmp;
+ ConvolveVertical(in, kernel, &tmp);
+ ConvolveHorizontal(tmp, kernel, out_pointer);
+}
+
+void ImageDerivatives(const Array3Df &in,
+ double sigma,
+ Array3Df *gradient_x,
+ Array3Df *gradient_y) {
+ Vec kernel, derivative;
+ ComputeGaussianKernel(sigma, &kernel, &derivative);
+ Array3Df tmp;
+
+ // Compute first derivative in x.
+ ConvolveVertical(in, kernel, &tmp);
+ ConvolveHorizontal(tmp, derivative, gradient_x);
+
+ // Compute first derivative in y.
+ ConvolveHorizontal(in, kernel, &tmp);
+ ConvolveVertical(tmp, derivative, gradient_y);
+}
+
+void BlurredImageAndDerivatives(const Array3Df &in,
+ double sigma,
+ Array3Df *blurred_image,
+ Array3Df *gradient_x,
+ Array3Df *gradient_y) {
+ Vec kernel, derivative;
+ ComputeGaussianKernel(sigma, &kernel, &derivative);
+ Array3Df tmp;
+
+ // Compute convolved image.
+ ConvolveVertical(in, kernel, &tmp);
+ ConvolveHorizontal(tmp, kernel, blurred_image);
+
+ // Compute first derivative in x (reusing vertical convolution above).
+ ConvolveHorizontal(tmp, derivative, gradient_x);
+
+ // Compute first derivative in y.
+ ConvolveHorizontal(in, kernel, &tmp);
+ ConvolveVertical(tmp, derivative, gradient_y);
+}
+
+// Compute the gaussian blur of an image and the derivatives of the blurred
+// image, and store the results in three channels. Since the blurred value and
+// gradients are closer in memory, this leads to better performance if all
+// three values are needed at the same time.
+void BlurredImageAndDerivativesChannels(const Array3Df &in,
+ double sigma,
+ Array3Df *blurred_and_gradxy) {
+ assert(in.Depth() == 1);
+
+ Vec kernel, derivative;
+ ComputeGaussianKernel(sigma, &kernel, &derivative);
+
+ // Compute convolved image.
+ Array3Df tmp;
+ ConvolveVertical(in, kernel, &tmp);
+ blurred_and_gradxy->Resize(in.Height(), in.Width(), 3);
+ ConvolveHorizontal(tmp, kernel, blurred_and_gradxy, 0);
+
+ // Compute first derivative in x.
+ ConvolveHorizontal(tmp, derivative, blurred_and_gradxy, 1);
+
+ // Compute first derivative in y.
+ ConvolveHorizontal(in, kernel, &tmp);
+ ConvolveVertical(tmp, derivative, blurred_and_gradxy, 2);
+}
+
+void BoxFilterHorizontal(const Array3Df &in,
+ int window_size,
+ Array3Df *out_pointer) {
+ Array3Df &out = *out_pointer;
+ out.ResizeLike(in);
+ int half_width = (window_size - 1) / 2;
+
+ for (int k = 0; k < in.Depth(); ++k) {
+ for (int i = 0; i < in.Height(); ++i) {
+ float sum = 0;
+ // Init sum.
+ for (int j = 0; j < half_width; ++j) {
+ sum += in(i, j, k);
+ }
+ // Fill left border.
+ for (int j = 0; j < half_width + 1; ++j) {
+ sum += in(i, j + half_width, k);
+ out(i, j, k) = sum;
+ }
+ // Fill interior.
+ for (int j = half_width + 1; j < in.Width()-half_width; ++j) {
+ sum -= in(i, j - half_width - 1, k);
+ sum += in(i, j + half_width, k);
+ out(i, j, k) = sum;
+ }
+ // Fill right border.
+ for (int j = in.Width() - half_width; j < in.Width(); ++j) {
+ sum -= in(i, j - half_width - 1, k);
+ out(i, j, k) = sum;
+ }
+ }
+ }
+}
+
+void BoxFilterVertical(const Array3Df &in,
+ int window_size,
+ Array3Df *out_pointer) {
+ Array3Df &out = *out_pointer;
+ out.ResizeLike(in);
+ int half_width = (window_size - 1) / 2;
+
+ for (int k = 0; k < in.Depth(); ++k) {
+ for (int j = 0; j < in.Width(); ++j) {
+ float sum = 0;
+ // Init sum.
+ for (int i = 0; i < half_width; ++i) {
+ sum += in(i, j, k);
+ }
+ // Fill left border.
+ for (int i = 0; i < half_width + 1; ++i) {
+ sum += in(i + half_width, j, k);
+ out(i, j, k) = sum;
+ }
+ // Fill interior.
+ for (int i = half_width + 1; i < in.Height()-half_width; ++i) {
+ sum -= in(i - half_width - 1, j, k);
+ sum += in(i + half_width, j, k);
+ out(i, j, k) = sum;
+ }
+ // Fill right border.
+ for (int i = in.Height() - half_width; i < in.Height(); ++i) {
+ sum -= in(i - half_width - 1, j, k);
+ out(i, j, k) = sum;
+ }
+ }
+ }
+}
+
+void BoxFilter(const Array3Df &in,
+ int box_width,
+ Array3Df *out) {
+ Array3Df tmp;
+ BoxFilterHorizontal(in, box_width, &tmp);
+ BoxFilterVertical(tmp, box_width, out);
+}
+
+void LaplaceFilter(unsigned char* src,
+ unsigned char* dst,
+ int width,
+ int height,
+ int strength) {
+ for (int y = 1; y < height-1; y++)
+ for (int x = 1; x < width-1; x++) {
+ const unsigned char* s = &src[y*width+x];
+ int l = 128 +
+ s[-width-1] + s[-width] + s[-width+1] +
+ s[1] - 8*s[0] + s[1] +
+ s[ width-1] + s[ width] + s[ width+1];
+ int d = ((256-strength)*s[0] + strength*l) / 256;
+ if (d < 0) d=0;
+ if (d > 255) d=255;
+ dst[y*width+x] = d;
+ }
+}
+
+} // namespace libmv
diff --git a/intern/libmv/libmv/image/convolve.h b/intern/libmv/libmv/image/convolve.h
new file mode 100644
index 00000000000..d3b6da9794b
--- /dev/null
+++ b/intern/libmv/libmv/image/convolve.h
@@ -0,0 +1,107 @@
+// Copyright (c) 2007, 2008, 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_IMAGE_CONVOLVE_H_
+#define LIBMV_IMAGE_CONVOLVE_H_
+
+#include "libmv/image/image.h"
+#include "libmv/numeric/numeric.h"
+
+namespace libmv {
+
+// TODO(keir): Find a better place for these functions. gaussian.h in numeric?
+
+// Zero mean Gaussian.
+inline double Gaussian(double x, double sigma) {
+ return 1/sqrt(2*M_PI*sigma*sigma) * exp(-(x*x/2/sigma/sigma));
+}
+// 2D gaussian (zero mean)
+// (9) in http://mathworld.wolfram.com/GaussianFunction.html
+inline double Gaussian2D(double x, double y, double sigma) {
+ return 1.0/(2.0*M_PI*sigma*sigma) * exp( -(x*x+y*y)/(2.0*sigma*sigma));
+}
+inline double GaussianDerivative(double x, double sigma) {
+ return -x / sigma / sigma * Gaussian(x, sigma);
+}
+// Solve the inverse of the Gaussian for positive x.
+inline double GaussianInversePositive(double y, double sigma) {
+ return sqrt(-2 * sigma * sigma * log(y * sigma * sqrt(2*M_PI)));
+}
+
+void ComputeGaussianKernel(double sigma, Vec *kernel, Vec *derivative);
+void ConvolveHorizontal(const FloatImage &in,
+ const Vec &kernel,
+ FloatImage *out_pointer,
+ int plane = -1);
+void ConvolveVertical(const FloatImage &in,
+ const Vec &kernel,
+ FloatImage *out_pointer,
+ int plane = -1);
+void ConvolveGaussian(const FloatImage &in,
+ double sigma,
+ FloatImage *out_pointer);
+
+void ImageDerivatives(const FloatImage &in,
+ double sigma,
+ FloatImage *gradient_x,
+ FloatImage *gradient_y);
+
+void BlurredImageAndDerivatives(const FloatImage &in,
+ double sigma,
+ FloatImage *blurred_image,
+ FloatImage *gradient_x,
+ FloatImage *gradient_y);
+
+// Blur and take the gradients of an image, storing the results inside the
+// three channels of blurred_and_gradxy.
+void BlurredImageAndDerivativesChannels(const FloatImage &in,
+ double sigma,
+ FloatImage *blurred_and_gradxy);
+
+void BoxFilterHorizontal(const FloatImage &in,
+ int window_size,
+ FloatImage *out_pointer);
+
+void BoxFilterVertical(const FloatImage &in,
+ int window_size,
+ FloatImage *out_pointer);
+
+void BoxFilter(const FloatImage &in,
+ int box_width,
+ FloatImage *out);
+
+/*!
+ Convolve \a src into \a dst with the discrete laplacian operator.
+
+ \a src and \a dst should be \a width x \a height images.
+ \a strength is an interpolation coefficient (0-256) between original image and the laplacian.
+
+ \note Make sure the search region is filtered with the same strength as the pattern.
+*/
+void LaplaceFilter(unsigned char* src,
+ unsigned char* dst,
+ int width,
+ int height,
+ int strength);
+
+} // namespace libmv
+
+#endif // LIBMV_IMAGE_CONVOLVE_H_
+
diff --git a/intern/libmv/libmv/image/convolve_test.cc b/intern/libmv/libmv/image/convolve_test.cc
new file mode 100644
index 00000000000..0cdef8e1e72
--- /dev/null
+++ b/intern/libmv/libmv/image/convolve_test.cc
@@ -0,0 +1,110 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include <iostream>
+
+#include "libmv/image/convolve.h"
+#include "libmv/image/image.h"
+#include "libmv/numeric/numeric.h"
+#include "testing/testing.h"
+
+using namespace libmv;
+
+namespace {
+
+TEST(Convolve, ComputeGaussianKernel) {
+ Vec kernel, derivative;
+ ComputeGaussianKernel(1, &kernel, &derivative);
+ EXPECT_EQ(7, kernel.size());
+ // TODO(keir): Put in a more thorough test here!
+}
+
+TEST(Convolve, ConvolveGaussian) {
+ FloatImage im(10, 10);
+ im.Fill(1);
+ FloatImage blured;
+ ConvolveGaussian(im, 3, &blured);
+ EXPECT_NEAR(im(5, 5), 1, 1e-7);
+}
+
+TEST(Convolve, BoxFilterHorizontal) {
+ FloatImage im(10, 10), convolved, filtered;
+ im.Fill(1);
+ BoxFilterHorizontal(im, 3, &filtered);
+ Vec kernel(3);
+ kernel.setConstant(1.);
+ ConvolveHorizontal(im, kernel, &convolved);
+ EXPECT_EQ(filtered(5, 5), 3);
+ EXPECT_TRUE(filtered == convolved);
+}
+
+TEST(Convolve, BoxFilter) {
+ FloatImage image(5, 5), filtered;
+ // A single 1.0 inside a 5x5 image should expand to a 3x3 square.
+ image.Fill(0);
+ image(2, 2) = 1.0;
+ BoxFilter(image, 3, &filtered);
+ for (int j = 0; j < 5; j++) {
+ for (int i = 0; i < 5; i++) {
+ if (i == 0 || i == 4 || j == 0 || j == 4) {
+ EXPECT_EQ(0.0, filtered(j, i));
+ } else {
+ EXPECT_EQ(1.0, filtered(j, i));
+ }
+ }
+ }
+}
+
+TEST(Convolve, BlurredImageAndDerivativesChannelsFlat) {
+ FloatImage im(10, 10), blurred_and_derivatives;
+ im.Fill(1);
+ BlurredImageAndDerivativesChannels(im, 1.0, &blurred_and_derivatives);
+ EXPECT_NEAR(blurred_and_derivatives(5, 5, 0), 1.0, 1e-7);
+ EXPECT_NEAR(blurred_and_derivatives(5, 5, 1), 0.0, 1e-7);
+ EXPECT_NEAR(blurred_and_derivatives(5, 5, 2), 0.0, 1e-7);
+}
+
+TEST(Convolve, BlurredImageAndDerivativesChannelsHorizontalSlope) {
+ FloatImage image(10, 10), blurred_and_derivatives;
+ for (int j = 0; j < 10; ++j) {
+ for (int i = 0; i < 10; ++i) {
+ image(j, i) = 2*i;
+ }
+ }
+ BlurredImageAndDerivativesChannels(image, 0.9, &blurred_and_derivatives);
+ EXPECT_NEAR(blurred_and_derivatives(5, 5, 0), 10.0, 1e-7);
+ EXPECT_NEAR(blurred_and_derivatives(5, 5, 1), 2.0, 1e-7);
+ EXPECT_NEAR(blurred_and_derivatives(5, 5, 2), 0.0, 1e-7);
+}
+
+TEST(Convolve, BlurredImageAndDerivativesChannelsVerticalSlope) {
+ FloatImage image(10, 10), blurred_and_derivatives;
+ for (int j = 0; j < 10; ++j) {
+ for (int i = 0; i < 10; ++i) {
+ image(j, i) = 2*j;
+ }
+ }
+ BlurredImageAndDerivativesChannels(image, 0.9, &blurred_and_derivatives);
+ EXPECT_NEAR(blurred_and_derivatives(5, 5, 0), 10.0, 1e-7);
+ EXPECT_NEAR(blurred_and_derivatives(5, 5, 1), 0.0, 1e-7);
+ EXPECT_NEAR(blurred_and_derivatives(5, 5, 2), 2.0, 1e-7);
+}
+
+} // namespace
diff --git a/intern/libmv/libmv/image/correlation.h b/intern/libmv/libmv/image/correlation.h
new file mode 100644
index 00000000000..c354f7e891e
--- /dev/null
+++ b/intern/libmv/libmv/image/correlation.h
@@ -0,0 +1,74 @@
+// Copyright (c) 2012 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_IMAGE_CORRELATION_H
+#define LIBMV_IMAGE_CORRELATION_H
+
+#include "libmv/logging/logging.h"
+#include "libmv/image/image.h"
+
+namespace libmv {
+
+inline double PearsonProductMomentCorrelation(
+ const FloatImage &image_and_gradient1_sampled,
+ const FloatImage &image_and_gradient2_sampled) {
+ assert(image_and_gradient1_sampled.Width() ==
+ image_and_gradient2_sampled.Width());
+ assert(image_and_gradient1_sampled.Height() ==
+ image_and_gradient2_sampled.Height());
+
+ const int width = image_and_gradient1_sampled.Width(),
+ height = image_and_gradient1_sampled.Height();
+ double sX = 0, sY = 0, sXX = 0, sYY = 0, sXY = 0;
+
+ for (int r = 0; r < height; ++r) {
+ for (int c = 0; c < width; ++c) {
+ double x = image_and_gradient1_sampled(r, c, 0);
+ double y = image_and_gradient2_sampled(r, c, 0);
+ sX += x;
+ sY += y;
+ sXX += x * x;
+ sYY += y * y;
+ sXY += x * y;
+ }
+ }
+
+ // Normalize.
+ double N = width * height;
+ sX /= N;
+ sY /= N;
+ sXX /= N;
+ sYY /= N;
+ sXY /= N;
+
+ double var_x = sXX - sX * sX;
+ double var_y = sYY - sY * sY;
+ double covariance_xy = sXY - sX * sY;
+
+ double correlation = covariance_xy / sqrt(var_x * var_y);
+ LG << "Covariance xy: " << covariance_xy
+ << ", var 1: " << var_x << ", var 2: " << var_y
+ << ", correlation: " << correlation;
+ return correlation;
+}
+
+} // namespace libmv
+
+#endif // LIBMV_IMAGE_IMAGE_CORRELATION_H
diff --git a/intern/libmv/libmv/image/image.h b/intern/libmv/libmv/image/image.h
new file mode 100644
index 00000000000..e0f200a4c93
--- /dev/null
+++ b/intern/libmv/libmv/image/image.h
@@ -0,0 +1,155 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_IMAGE_IMAGE_H
+#define LIBMV_IMAGE_IMAGE_H
+
+#include <cmath>
+
+#include "libmv/image/array_nd.h"
+
+namespace libmv {
+
+typedef Array3Du ByteImage; // For backwards compatibility.
+typedef Array3Df FloatImage;
+
+// Type added only to manage special 2D array for feature detection
+typedef Array3Di IntImage;
+typedef Array3Ds ShortImage;
+
+// An image class that is a thin wrapper around Array3D's of various types.
+// TODO(keir): Decide if we should add reference counting semantics... Maybe it
+// is the best solution after all.
+class Image {
+ public:
+
+ // Create an image from an array. The image takes ownership of the array.
+ Image(Array3Du *array) : array_type_(BYTE), array_(array) {}
+ Image(Array3Df *array) : array_type_(FLOAT), array_(array) {}
+
+ Image(const Image &img): array_type_(NONE), array_(NULL) {
+ *this = img;
+ }
+
+ // Underlying data type.
+ enum DataType {
+ NONE,
+ BYTE,
+ FLOAT,
+ INT,
+ SHORT,
+ };
+
+ // Size in bytes that the image takes in memory.
+ int MemorySizeInBytes() {
+ int size;
+ switch (array_type_) {
+ case BYTE:
+ size = reinterpret_cast<Array3Du *>(array_)->MemorySizeInBytes();
+ break;
+ case FLOAT:
+ size = reinterpret_cast<Array3Df *>(array_)->MemorySizeInBytes();
+ break;
+ case INT:
+ size = reinterpret_cast<Array3Di *>(array_)->MemorySizeInBytes();
+ break;
+ case SHORT:
+ size = reinterpret_cast<Array3Ds *>(array_)->MemorySizeInBytes();
+ break;
+ default :
+ size = 0;
+ assert(0);
+ }
+ size += sizeof(*this);
+ return size;
+ }
+
+ ~Image() {
+ switch (array_type_) {
+ case BYTE:
+ delete reinterpret_cast<Array3Du *>(array_);
+
+ break;
+ case FLOAT:
+ delete reinterpret_cast<Array3Df *>(array_);
+
+ break;
+ case INT:
+ delete reinterpret_cast<Array3Di *>(array_);
+
+ break;
+ case SHORT:
+ delete reinterpret_cast<Array3Ds *>(array_);
+
+ break;
+ default:
+ assert(0);
+ }
+ }
+
+ Image& operator= (const Image& f) {
+ if (this != &f) {
+ array_type_ = f.array_type_;
+ switch (array_type_) {
+ case BYTE:
+ delete reinterpret_cast<Array3Du *>(array_);
+ array_ = new Array3Du(*(Array3Du *)f.array_);
+ break;
+ case FLOAT:
+ delete reinterpret_cast<Array3Df *>(array_);
+ array_ = new Array3Df(*(Array3Df *)f.array_);
+ break;
+ case INT:
+ delete reinterpret_cast<Array3Di *>(array_);
+ array_ = new Array3Di(*(Array3Di *)f.array_);
+ break;
+ case SHORT:
+ delete reinterpret_cast<Array3Ds *>(array_);
+ array_ = new Array3Ds(*(Array3Ds *)f.array_);
+ break;
+ default:
+ assert(0);
+ }
+ }
+ return *this;
+ }
+
+ Array3Du *AsArray3Du() const {
+ if (array_type_ == BYTE) {
+ return reinterpret_cast<Array3Du *>(array_);
+ }
+ return NULL;
+ }
+
+ Array3Df *AsArray3Df() const {
+ if (array_type_ == FLOAT) {
+ return reinterpret_cast<Array3Df *>(array_);
+ }
+ return NULL;
+ }
+
+ private:
+ DataType array_type_;
+ BaseArray *array_;
+};
+
+} // namespace libmv
+
+#endif // LIBMV_IMAGE_IMAGE_IMAGE_H
diff --git a/intern/libmv/libmv/image/image_converter.h b/intern/libmv/libmv/image/image_converter.h
new file mode 100644
index 00000000000..b3a3fa2bf8c
--- /dev/null
+++ b/intern/libmv/libmv/image/image_converter.h
@@ -0,0 +1,81 @@
+// Copyright (c) 2009 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_IMAGE_IMAGE_CONVERTER_H
+#define LIBMV_IMAGE_IMAGE_CONVERTER_H
+
+#include "libmv/image/array_nd.h"
+
+namespace libmv {
+
+// The factor comes from http://www.easyrgb.com/
+// RGB to XYZ : Y is the luminance channel
+// var_R * 0.2126 + var_G * 0.7152 + var_B * 0.0722
+template<typename T>
+inline T RGB2GRAY(const T r, const T g, const T b) {
+ return static_cast<T>(r * 0.2126 + g * 0.7152 + b * 0.0722);
+}
+
+/*
+// Specialization for the uchar type. (that do not want to work...)
+template<>
+inline unsigned char RGB2GRAY<unsigned char>(const unsigned char r,
+ const unsigned char g,
+ const unsigned char b) {
+ return (unsigned char)(r * 0.2126 + g * 0.7152 + b * 0.0722 +0.5);
+}*/
+
+template<class ImageIn, class ImageOut>
+void Rgb2Gray(const ImageIn &imaIn, ImageOut *imaOut) {
+ // It is all fine to cnvert RGBA image here as well,
+ // all the additional channels will be nicely ignored.
+ assert(imaIn.Depth() >= 3);
+
+ imaOut->Resize(imaIn.Height(), imaIn.Width(), 1);
+ // Convert each RGB pixel into Gray value (luminance)
+
+ for (int j = 0; j < imaIn.Height(); ++j) {
+ for (int i = 0; i < imaIn.Width(); ++i) {
+ (*imaOut)(j, i) = RGB2GRAY(imaIn(j, i, 0) , imaIn(j, i, 1), imaIn(j, i, 2));
+ }
+ }
+}
+
+// Convert given float image to an unsigned char array of pixels.
+template<class Image>
+unsigned char *FloatImageToUCharArray(const Image &image) {
+ unsigned char *buffer =
+ new unsigned char[image.Width() * image.Height() * image.Depth()];
+
+ for (int y = 0; y < image.Height(); y++) {
+ for (int x = 0; x < image.Width(); x++) {
+ for (int d = 0; d < image.Depth(); d++) {
+ int index = (y * image.Width() + x) * image.Depth() + d;
+ buffer[index] = 255.0 * image(y, x, d);
+ }
+ }
+ }
+
+ return buffer;
+}
+
+} // namespace libmv
+
+#endif // LIBMV_IMAGE_IMAGE_CONVERTER_H
diff --git a/intern/libmv/libmv/image/image_drawing.h b/intern/libmv/libmv/image/image_drawing.h
new file mode 100644
index 00000000000..f50e48b75a3
--- /dev/null
+++ b/intern/libmv/libmv/image/image_drawing.h
@@ -0,0 +1,285 @@
+// Copyright (c) 2009 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+// Generic Image Processing Algorithm (GIPA)
+// Use an ImageModel class that must implement the following :
+//
+// ::Contains(int y, int x) <= Tell if a point is inside or not the image
+// ::operator(int y,int x) <= Modification accessor over the pixel (y,x)
+// ::Width()
+// ::Height()
+
+#ifndef LIBMV_IMAGE_IMAGE_DRAWING_H
+#define LIBMV_IMAGE_IMAGE_DRAWING_H
+
+namespace libmv {
+
+/// Put the pixel in the image to the given color only if the point (xc,yc)
+/// is inside the image.
+template <class Image, class Color>
+inline void safePutPixel(int yc, int xc, const Color & col, Image *pim) {
+ if (!pim)
+ return;
+ if (pim->Contains(yc, xc)) {
+ (*pim)(yc, xc) = col;
+ }
+}
+/// Put the pixel in the image to the given color only if the point (xc,yc)
+/// is inside the image. This function support multi-channel color
+/// \note The color pointer col must have size as the image depth
+template <class Image, class Color>
+inline void safePutPixel(int yc, int xc, const Color *col, Image *pim) {
+ if (!pim)
+ return;
+ if (pim->Contains(yc, xc)) {
+ for (int i = 0; i < pim->Depth(); ++i)
+ (*pim)(yc, xc, i) = *(col + i);
+ }
+}
+
+// Bresenham approach to draw ellipse.
+// http://raphaello.univ-fcomte.fr/ig/algorithme/ExemplesGLUt/BresenhamEllipse.htm
+// Add the rotation of the ellipse.
+// As the algo. use symmetry we must use 4 rotations.
+template <class Image, class Color>
+void DrawEllipse(int xc, int yc, int radiusA, int radiusB,
+ const Color &col, Image *pim, double angle = 0.0) {
+ int a = radiusA;
+ int b = radiusB;
+
+ // Counter Clockwise rotation matrix.
+ double matXY[4] = { cos(angle), sin(angle),
+ -sin(angle), cos(angle)};
+ int x, y;
+ double d1, d2;
+ x = 0;
+ y = b;
+ d1 = b*b - a*a*b + a*a/4;
+
+ float rotX = (matXY[0] * x + matXY[1] * y);
+ float rotY = (matXY[2] * x + matXY[3] * y);
+ safePutPixel(yc + rotY, xc + rotX, col, pim);
+ rotX = (matXY[0] * x - matXY[1] * y);
+ rotY = (matXY[2] * x - matXY[3] * y);
+ safePutPixel(yc + rotY, xc + rotX, col, pim);
+ rotX = (-matXY[0] * x - matXY[1] * y);
+ rotY = (-matXY[2] * x - matXY[3] * y);
+ safePutPixel(yc + rotY, xc + rotX, col, pim);
+ rotX = (-matXY[0] * x + matXY[1] * y);
+ rotY = (-matXY[2] * x + matXY[3] * y);
+ safePutPixel(yc + rotY, xc + rotX, col, pim);
+
+ while (a*a*(y-.5) > b*b*(x+1)) {
+ if (d1 < 0) {
+ d1 += b*b*(2*x+3);
+ ++x;
+ } else {
+ d1 += b*b*(2*x+3) + a*a*(-2*y+2);
+ ++x;
+ --y;
+ }
+ rotX = (matXY[0] * x + matXY[1] * y);
+ rotY = (matXY[2] * x + matXY[3] * y);
+ safePutPixel(yc + rotY, xc + rotX, col, pim);
+ rotX = (matXY[0] * x - matXY[1] * y);
+ rotY = (matXY[2] * x - matXY[3] * y);
+ safePutPixel(yc + rotY, xc + rotX, col, pim);
+ rotX = (-matXY[0] * x - matXY[1] * y);
+ rotY = (-matXY[2] * x - matXY[3] * y);
+ safePutPixel(yc + rotY, xc + rotX, col, pim);
+ rotX = (-matXY[0] * x + matXY[1] * y);
+ rotY = (-matXY[2] * x + matXY[3] * y);
+ safePutPixel(yc + rotY, xc + rotX, col, pim);
+ }
+ d2 = b*b*(x+.5)*(x+.5) + a*a*(y-1)*(y-1) - a*a*b*b;
+ while (y > 0) {
+ if (d2 < 0) {
+ d2 += b*b*(2*x+2) + a*a*(-2*y+3);
+ --y;
+ ++x;
+ } else {
+ d2 += a*a*(-2*y+3);
+ --y;
+ }
+ rotX = (matXY[0] * x + matXY[1] * y);
+ rotY = (matXY[2] * x + matXY[3] * y);
+ safePutPixel(yc + rotY, xc + rotX, col, pim);
+ rotX = (matXY[0] * x - matXY[1] * y);
+ rotY = (matXY[2] * x - matXY[3] * y);
+ safePutPixel(yc + rotY, xc + rotX, col, pim);
+ rotX = (-matXY[0] * x - matXY[1] * y);
+ rotY = (-matXY[2] * x - matXY[3] * y);
+ safePutPixel(yc + rotY, xc + rotX, col, pim);
+ rotX = (-matXY[0] * x + matXY[1] * y);
+ rotY = (-matXY[2] * x + matXY[3] * y);
+ safePutPixel(yc + rotY, xc + rotX, col, pim);
+ }
+}
+
+// Bresenham approach do not allow to draw concentric circle without holes.
+// So it's better the use the Andres method.
+// http://fr.wikipedia.org/wiki/Algorithme_de_tracé_de_cercle_d'Andres.
+template <class Image, class Color>
+void DrawCircle(int x, int y, int radius, const Color &col, Image *pim) {
+ Image &im = *pim;
+ if ( im.Contains(y + radius, x + radius)
+ || im.Contains(y + radius, x - radius)
+ || im.Contains(y - radius, x + radius)
+ || im.Contains(y - radius, x - radius)) {
+ int x1 = 0;
+ int y1 = radius;
+ int d = radius - 1;
+ while (y1 >= x1) {
+ // Draw the point for each octant.
+ safePutPixel( y1 + y, x1 + x, col, pim);
+ safePutPixel( x1 + y, y1 + x, col, pim);
+ safePutPixel( y1 + y, -x1 + x, col, pim);
+ safePutPixel( x1 + y, -y1 + x, col, pim);
+ safePutPixel(-y1 + y, x1 + x, col, pim);
+ safePutPixel(-x1 + y, y1 + x, col, pim);
+ safePutPixel(-y1 + y, -x1 + x, col, pim);
+ safePutPixel(-x1 + y, -y1 + x, col, pim);
+ if (d >= 2 * x1) {
+ d = d - 2 * x1 - 1;
+ x1 += 1;
+ } else {
+ if (d <= 2 * (radius - y1)) {
+ d = d + 2 * y1 - 1;
+ y1 -= 1;
+ } else {
+ d = d + 2 * (y1 - x1 - 1);
+ y1 -= 1;
+ x1 += 1;
+ }
+ }
+ }
+ }
+}
+
+// Bresenham algorithm
+template <class Image, class Color>
+void DrawLine(int xa, int ya, int xb, int yb, const Color &col, Image *pim) {
+ Image &im = *pim;
+
+ // If one point is outside the image
+ // Replace the outside point by the intersection of the line and
+ // the limit (either x=width or y=height).
+ if (!im.Contains(ya, xa) || !im.Contains(yb, xb)) {
+ int width = pim->Width();
+ int height = pim->Height();
+ const bool xdir = xa < xb, ydir = ya < yb;
+ float nx0 = xa, nx1 = xb, ny0 = ya, ny1 = yb,
+ &xleft = xdir?nx0:nx1, &yleft = xdir?ny0:ny1,
+ &xright = xdir?nx1:nx0, &yright = xdir?ny1:ny0,
+ &xup = ydir?nx0:nx1, &yup = ydir?ny0:ny1,
+ &xdown = ydir?nx1:nx0, &ydown = ydir?ny1:ny0;
+
+ if (xright < 0 || xleft >= width) return;
+ if (xleft < 0) {
+ yleft -= xleft*(yright - yleft)/(xright - xleft);
+ xleft = 0;
+ }
+ if (xright >= width) {
+ yright -= (xright - width)*(yright - yleft)/(xright - xleft);
+ xright = width - 1;
+ }
+ if (ydown < 0 || yup >= height) return;
+ if (yup < 0) {
+ xup -= yup*(xdown - xup)/(ydown - yup);
+ yup = 0;
+ }
+ if (ydown >= height) {
+ xdown -= (ydown - height)*(xdown - xup)/(ydown - yup);
+ ydown = height - 1;
+ }
+
+ xa = (int) xleft;
+ xb = (int) xright;
+ ya = (int) yleft;
+ yb = (int) yright;
+ }
+
+ int xbas, xhaut, ybas, yhaut;
+ // Check the condition ybas < yhaut.
+ if (ya <= yb) {
+ xbas = xa;
+ ybas = ya;
+ xhaut = xb;
+ yhaut = yb;
+ } else {
+ xbas = xb;
+ ybas = yb;
+ xhaut = xa;
+ yhaut = ya;
+ }
+ // Initialize slope.
+ int x, y, dx, dy, incrmX, incrmY, dp, N, S;
+ dx = xhaut - xbas;
+ dy = yhaut - ybas;
+ if (dx > 0) { // If xhaut > xbas we will increment X.
+ incrmX = 1;
+ } else {
+ incrmX = -1; // else we will decrement X.
+ dx *= -1;
+ }
+ if (dy > 0) { // Positive slope will increment X.
+ incrmY = 1;
+ } else { // Negative slope.
+ incrmY = -1;
+ }
+ if (dx >= dy) {
+ dp = 2 * dy - dx;
+ S = 2 * dy;
+ N = 2 * (dy - dx);
+ y = ybas;
+ x = xbas;
+ while (x != xhaut) {
+ safePutPixel(y, x, col, pim);
+ x += incrmX;
+ if (dp <= 0) { // Go in direction of the South Pixel.
+ dp += S;
+ } else { // Go to the North.
+ dp += N;
+ y+=incrmY;
+ }
+ }
+ } else {
+ dp = 2 * dx - dy;
+ S = 2 * dx;
+ N = 2 * (dx - dy);
+ x = xbas;
+ y = ybas;
+ while (y < yhaut) {
+ safePutPixel(y, x, col, pim);
+ y += incrmY;
+ if (dp <= 0) { // Go in direction of the South Pixel.
+ dp += S;
+ } else { // Go to the North.
+ dp += N;
+ x += incrmX;
+ }
+ }
+ }
+ safePutPixel(y, x, col, pim);
+}
+
+} // namespace libmv
+
+#endif // LIBMV_IMAGE_IMAGE_DRAWING_H
diff --git a/intern/libmv/libmv/image/image_test.cc b/intern/libmv/libmv/image/image_test.cc
new file mode 100644
index 00000000000..241f49f2244
--- /dev/null
+++ b/intern/libmv/libmv/image/image_test.cc
@@ -0,0 +1,45 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include <iostream>
+
+#include "libmv/image/image.h"
+#include "testing/testing.h"
+
+using libmv::Image;
+using libmv::Array3Df;
+
+namespace {
+
+TEST(Image, SimpleImageAccessors) {
+ Array3Df *array = new Array3Df(2, 3);
+ Image image(array);
+ EXPECT_EQ(array, image.AsArray3Df());
+ EXPECT_TRUE(NULL == image.AsArray3Du());
+}
+
+TEST(Image, MemorySizeInBytes) {
+ Array3Df *array = new Array3Df(2, 3);
+ Image image(array);
+ int size = sizeof(image) + array->MemorySizeInBytes();
+ EXPECT_EQ(size, image.MemorySizeInBytes());
+}
+
+} // namespace
diff --git a/intern/libmv/libmv/image/sample.h b/intern/libmv/libmv/image/sample.h
new file mode 100644
index 00000000000..24eb9ccd57d
--- /dev/null
+++ b/intern/libmv/libmv/image/sample.h
@@ -0,0 +1,138 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_IMAGE_SAMPLE_H_
+#define LIBMV_IMAGE_SAMPLE_H_
+
+#include "libmv/image/image.h"
+
+namespace libmv {
+
+/// Nearest neighbor interpolation.
+template<typename T>
+inline T SampleNearest(const Array3D<T> &image,
+ float y, float x, int v = 0) {
+ const int i = int(round(y));
+ const int j = int(round(x));
+ return image(i, j, v);
+}
+
+inline void LinearInitAxis(float x, int size,
+ int *x1, int *x2,
+ float *dx) {
+ const int ix = static_cast<int>(x);
+ if (ix < 0) {
+ *x1 = 0;
+ *x2 = 0;
+ *dx = 1.0;
+ } else if (ix > size - 2) {
+ *x1 = size - 1;
+ *x2 = size - 1;
+ *dx = 1.0;
+ } else {
+ *x1 = ix;
+ *x2 = ix + 1;
+ *dx = *x2 - x;
+ }
+}
+
+/// Linear interpolation.
+template<typename T>
+inline T SampleLinear(const Array3D<T> &image, float y, float x, int v = 0) {
+ int x1, y1, x2, y2;
+ float dx, dy;
+
+ LinearInitAxis(y, image.Height(), &y1, &y2, &dy);
+ LinearInitAxis(x, image.Width(), &x1, &x2, &dx);
+
+ const T im11 = image(y1, x1, v);
+ const T im12 = image(y1, x2, v);
+ const T im21 = image(y2, x1, v);
+ const T im22 = image(y2, x2, v);
+
+ return T( dy * (dx * im11 + (1.0 - dx) * im12) +
+ (1 - dy) * (dx * im21 + (1.0 - dx) * im22));
+}
+
+/// Linear interpolation, of all channels. The sample is assumed to have the
+/// same size as the number of channels in image.
+template<typename T>
+inline void SampleLinear(const Array3D<T> &image, float y, float x, T *sample) {
+ int x1, y1, x2, y2;
+ float dx, dy;
+
+ LinearInitAxis(y, image.Height(), &y1, &y2, &dy);
+ LinearInitAxis(x, image.Width(), &x1, &x2, &dx);
+
+ for (int i = 0; i < image.Depth(); ++i) {
+ const T im11 = image(y1, x1, i);
+ const T im12 = image(y1, x2, i);
+ const T im21 = image(y2, x1, i);
+ const T im22 = image(y2, x2, i);
+
+ sample[i] = T( dy * (dx * im11 + (1.0 - dx) * im12) +
+ (1 - dy) * (dx * im21 + (1.0 - dx) * im22));
+ }
+}
+
+// Downsample all channels by 2. If the image has odd width or height, the last
+// row or column is ignored.
+// FIXME(MatthiasF): this implementation shouldn't be in an interface file
+inline void DownsampleChannelsBy2(const Array3Df &in, Array3Df *out) {
+ int height = in.Height() / 2;
+ int width = in.Width() / 2;
+ int depth = in.Depth();
+
+ out->Resize(height, width, depth);
+
+ // 2x2 box filter downsampling.
+ for (int r = 0; r < height; ++r) {
+ for (int c = 0; c < width; ++c) {
+ for (int k = 0; k < depth; ++k) {
+ (*out)(r, c, k) = (in(2 * r, 2 * c, k) +
+ in(2 * r + 1, 2 * c, k) +
+ in(2 * r, 2 * c + 1, k) +
+ in(2 * r + 1, 2 * c + 1, k)) / 4.0f;
+ }
+ }
+ }
+}
+
+// Sample a region centered at x,y in image with size extending by half_width
+// from x,y. Channels specifies the number of channels to sample from.
+inline void SamplePattern(const FloatImage &image,
+ double x, double y,
+ int half_width,
+ int channels,
+ FloatImage *sampled) {
+ sampled->Resize(2 * half_width + 1, 2 * half_width + 1, channels);
+ for (int r = -half_width; r <= half_width; ++r) {
+ for (int c = -half_width; c <= half_width; ++c) {
+ for (int i = 0; i < channels; ++i) {
+ (*sampled)(r + half_width, c + half_width, i) =
+ SampleLinear(image, y + r, x + c, i);
+ }
+ }
+ }
+}
+
+} // namespace libmv
+
+#endif // LIBMV_IMAGE_SAMPLE_H_
diff --git a/intern/libmv/libmv/image/sample_test.cc b/intern/libmv/libmv/image/sample_test.cc
new file mode 100644
index 00000000000..c8a0ce470c2
--- /dev/null
+++ b/intern/libmv/libmv/image/sample_test.cc
@@ -0,0 +1,89 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/image/sample.h"
+#include "testing/testing.h"
+
+using namespace libmv;
+
+namespace {
+
+TEST(Image, Nearest) {
+ Array3Du image(2, 2);
+ image(0, 0) = 0;
+ image(0, 1) = 1;
+ image(1, 0) = 2;
+ image(1, 1) = 3;
+ EXPECT_EQ(0, SampleNearest(image, -0.4f, -0.4f));
+ EXPECT_EQ(0, SampleNearest(image, 0.4f, 0.4f));
+ EXPECT_EQ(3, SampleNearest(image, 0.6f, 0.6f));
+ EXPECT_EQ(3, SampleNearest(image, 1.4f, 1.4f));
+}
+
+TEST(Image, Linear) {
+ Array3Df image(2, 2);
+ image(0, 0) = 0;
+ image(0, 1) = 1;
+ image(1, 0) = 2;
+ image(1, 1) = 3;
+ EXPECT_EQ(1.5, SampleLinear(image, 0.5, 0.5));
+}
+
+TEST(Image, DownsampleBy2) {
+ Array3Df image(2, 2);
+ image(0, 0) = 0;
+ image(0, 1) = 1;
+ image(1, 0) = 2;
+ image(1, 1) = 3;
+ Array3Df resampled_image;
+ DownsampleChannelsBy2(image, &resampled_image);
+ ASSERT_EQ(1, resampled_image.Height());
+ ASSERT_EQ(1, resampled_image.Width());
+ ASSERT_EQ(1, resampled_image.Depth());
+ EXPECT_FLOAT_EQ(6./4., resampled_image(0, 0));
+}
+
+TEST(Image, DownsampleBy2MultiChannel) {
+ Array3Df image(2, 2, 3);
+ image(0, 0, 0) = 0;
+ image(0, 1, 0) = 1;
+ image(1, 0, 0) = 2;
+ image(1, 1, 0) = 3;
+
+ image(0, 0, 1) = 5;
+ image(0, 1, 1) = 6;
+ image(1, 0, 1) = 7;
+ image(1, 1, 1) = 8;
+
+ image(0, 0, 2) = 9;
+ image(0, 1, 2) = 10;
+ image(1, 0, 2) = 11;
+ image(1, 1, 2) = 12;
+
+ Array3Df resampled_image;
+ DownsampleChannelsBy2(image, &resampled_image);
+ ASSERT_EQ(1, resampled_image.Height());
+ ASSERT_EQ(1, resampled_image.Width());
+ ASSERT_EQ(3, resampled_image.Depth());
+ EXPECT_FLOAT_EQ((0+1+2+3)/4., resampled_image(0, 0, 0));
+ EXPECT_FLOAT_EQ((5+6+7+8)/4., resampled_image(0, 0, 1));
+ EXPECT_FLOAT_EQ((9+10+11+12)/4., resampled_image(0, 0, 2));
+}
+} // namespace
diff --git a/intern/libmv/libmv/image/tuple.h b/intern/libmv/libmv/image/tuple.h
new file mode 100644
index 00000000000..c8dc36f2e18
--- /dev/null
+++ b/intern/libmv/libmv/image/tuple.h
@@ -0,0 +1,90 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_IMAGE_TUPLE_H
+#define LIBMV_IMAGE_TUPLE_H
+
+#include <algorithm>
+
+namespace libmv {
+
+// A vector of elements with fixed lenght and deep copy semantics.
+template <typename T, int N>
+class Tuple {
+ public:
+ enum { SIZE = N };
+ Tuple() {}
+ Tuple(T initial_value) { Reset(initial_value); }
+
+ template <typename D>
+ Tuple(D *values) { Reset(values); }
+
+ template <typename D>
+ Tuple(const Tuple<D, N> &b) { Reset(b); }
+
+ template <typename D>
+ Tuple& operator=(const Tuple<D, N>& b) {
+ Reset(b);
+ return *this;
+ }
+
+ template <typename D>
+ void Reset(const Tuple<D, N>& b) { Reset(b.Data()); }
+
+ template <typename D>
+ void Reset(D *values) {
+ for (int i = 0;i < N; i++) {
+ data_[i] = T(values[i]);
+ }
+ }
+
+ // Set all tuple values to the same thing.
+ void Reset(T value) {
+ for (int i = 0;i < N; i++) {
+ data_[i] = value;
+ }
+ }
+
+ // Pointer to the first element.
+ T *Data() { return &data_[0]; }
+ const T *Data() const { return &data_[0]; }
+
+ T &operator()(int i) { return data_[i]; }
+ const T &operator()(int i) const { return data_[i]; }
+
+ bool operator==(const Tuple<T, N> &other) const {
+ for (int i = 0; i < N; ++i) {
+ if ((*this)(i) != other(i)) {
+ return false;
+ }
+ }
+ return true;
+ }
+ bool operator!=(const Tuple<T, N> &other) const {
+ return !(*this == other);
+ }
+
+ private:
+ T data_[N];
+};
+
+} // namespace libmv
+
+#endif // LIBMV_IMAGE_TUPLE_H
diff --git a/intern/libmv/libmv/image/tuple_test.cc b/intern/libmv/libmv/image/tuple_test.cc
new file mode 100644
index 00000000000..df44e5638b5
--- /dev/null
+++ b/intern/libmv/libmv/image/tuple_test.cc
@@ -0,0 +1,83 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/image/tuple.h"
+#include "testing/testing.h"
+
+#include <algorithm>
+
+using libmv::Tuple;
+
+namespace {
+
+TEST(Tuple, InitConstantValue) {
+ Tuple<int, 3> t(5);
+ EXPECT_EQ(t(0), 5);
+ EXPECT_EQ(t(0), 5);
+ EXPECT_EQ(t(0), 5);
+}
+
+TEST(Tuple, InitFromPointer) {
+ float vals[3] = {1.0f, 2.0f, 3.0f};
+
+ Tuple<float, 3> t(vals);
+ for (int i = 0; i < 3; i++)
+ EXPECT_EQ(t(i), vals[i]);
+
+ Tuple<int, 3> b(t);
+ EXPECT_EQ(b(0), int(vals[0]));
+ EXPECT_EQ(b(1), int(vals[1]));
+ EXPECT_EQ(b(2), int(vals[2]));
+}
+
+TEST(Tuple, Swap) {
+ unsigned char vala[3] = {1, 2, 3};
+ unsigned char valb[3] = {4, 5, 6};
+
+ Tuple<unsigned char, 3> a(vala);
+ Tuple<unsigned char, 3> b(valb);
+
+ std::swap(a, b);
+
+ EXPECT_EQ(a(0), int(valb[0]));
+ EXPECT_EQ(a(1), int(valb[1]));
+ EXPECT_EQ(a(2), int(valb[2]));
+ EXPECT_EQ(b(0), int(vala[0]));
+ EXPECT_EQ(b(1), int(vala[1]));
+ EXPECT_EQ(b(2), int(vala[2]));
+}
+
+TEST(Tuple, IsEqualOperator) {
+ Tuple<int, 3> a;
+ a(0) = 1;
+ a(1) = 2;
+ a(2) = 3;
+ Tuple<int, 3> b;
+ b(0) = 1;
+ b(1) = 2;
+ b(2) = 3;
+ EXPECT_TRUE(a == b);
+ EXPECT_FALSE(a != b);
+ b(1) = 5;
+ EXPECT_TRUE(a != b);
+ EXPECT_FALSE(a == b);
+}
+
+} // namespace
diff --git a/intern/libmv/libmv/logging/logging.h b/intern/libmv/libmv/logging/logging.h
new file mode 100644
index 00000000000..776d9d52f7a
--- /dev/null
+++ b/intern/libmv/libmv/logging/logging.h
@@ -0,0 +1,31 @@
+// Copyright (c) 2007, 2008, 2009 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_LOGGING_LOGGING_H
+#define LIBMV_LOGGING_LOGGING_H
+
+#include <glog/logging.h>
+
+#define LG LOG(INFO)
+#define V0 LOG(INFO)
+#define V1 LOG(INFO)
+#define V2 LOG(INFO)
+
+#endif // LIBMV_LOGGING_LOGGING_H
diff --git a/intern/libmv/libmv/multiview/conditioning.cc b/intern/libmv/libmv/multiview/conditioning.cc
new file mode 100644
index 00000000000..0afbf119ea3
--- /dev/null
+++ b/intern/libmv/libmv/multiview/conditioning.cc
@@ -0,0 +1,99 @@
+// Copyright (c) 2010 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/multiview/conditioning.h"
+#include "libmv/multiview/projection.h"
+
+namespace libmv {
+
+// HZ 4.4.4 pag.109: Point conditioning (non isotropic)
+void PreconditionerFromPoints(const Mat &points, Mat3 *T) {
+ Vec mean, variance;
+ MeanAndVarianceAlongRows(points, &mean, &variance);
+
+ double xfactor = sqrt(2.0 / variance(0));
+ double yfactor = sqrt(2.0 / variance(1));
+
+ // If variance is equal to 0.0 set scaling factor to identity.
+ // -> Else it will provide nan value (because division by 0).
+ if (variance(0) < 1e-8)
+ xfactor = mean(0) = 1.0;
+ if (variance(1) < 1e-8)
+ yfactor = mean(1) = 1.0;
+
+ *T << xfactor, 0, -xfactor * mean(0),
+ 0, yfactor, -yfactor * mean(1),
+ 0, 0, 1;
+}
+// HZ 4.4.4 pag.107: Point conditioning (isotropic)
+void IsotropicPreconditionerFromPoints(const Mat &points, Mat3 *T) {
+ Vec mean, variance;
+ MeanAndVarianceAlongRows(points, &mean, &variance);
+
+ double var_norm = variance.norm();
+ double factor = sqrt(2.0 / var_norm);
+
+ // If variance is equal to 0.0 set scaling factor to identity.
+ // -> Else it will provide nan value (because division by 0).
+ if (var_norm < 1e-8) {
+ factor = 1.0;
+ mean.setOnes();
+ }
+
+ *T << factor, 0, -factor * mean(0),
+ 0, factor, -factor * mean(1),
+ 0, 0, 1;
+}
+
+void ApplyTransformationToPoints(const Mat &points,
+ const Mat3 &T,
+ Mat *transformed_points) {
+ int n = points.cols();
+ transformed_points->resize(2, n);
+ Mat3X p(3, n);
+ EuclideanToHomogeneous(points, &p);
+ p = T * p;
+ HomogeneousToEuclidean(p, transformed_points);
+}
+
+void NormalizePoints(const Mat &points,
+ Mat *normalized_points,
+ Mat3 *T) {
+ PreconditionerFromPoints(points, T);
+ ApplyTransformationToPoints(points, *T, normalized_points);
+}
+
+void NormalizeIsotropicPoints(const Mat &points,
+ Mat *normalized_points,
+ Mat3 *T) {
+ IsotropicPreconditionerFromPoints(points, T);
+ ApplyTransformationToPoints(points, *T, normalized_points);
+}
+
+// Denormalize the results. See HZ page 109.
+void UnnormalizerT::Unnormalize(const Mat3 &T1, const Mat3 &T2, Mat3 *H) {
+ *H = T2.transpose() * (*H) * T1;
+}
+
+void UnnormalizerI::Unnormalize(const Mat3 &T1, const Mat3 &T2, Mat3 *H) {
+ *H = T2.inverse() * (*H) * T1;
+}
+
+} // namespace libmv
diff --git a/intern/libmv/libmv/multiview/conditioning.h b/intern/libmv/libmv/multiview/conditioning.h
new file mode 100644
index 00000000000..8f3e3a76070
--- /dev/null
+++ b/intern/libmv/libmv/multiview/conditioning.h
@@ -0,0 +1,59 @@
+// Copyright (c) 2010 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_MULTIVIEW_CONDITIONNING_H_
+#define LIBMV_MULTIVIEW_CONDITIONNING_H_
+
+#include "libmv/numeric/numeric.h"
+
+namespace libmv {
+
+// Point conditioning (non isotropic)
+void PreconditionerFromPoints(const Mat &points, Mat3 *T);
+// Point conditioning (isotropic)
+void IsotropicPreconditionerFromPoints(const Mat &points, Mat3 *T);
+
+void ApplyTransformationToPoints(const Mat &points,
+ const Mat3 &T,
+ Mat *transformed_points);
+
+void NormalizePoints(const Mat &points,
+ Mat *normalized_points,
+ Mat3 *T);
+
+void NormalizeIsotropicPoints(const Mat &points,
+ Mat *normalized_points,
+ Mat3 *T);
+
+/// Use inverse for unnormalize
+struct UnnormalizerI {
+ // Denormalize the results. See HZ page 109.
+ static void Unnormalize(const Mat3 &T1, const Mat3 &T2, Mat3 *H);
+};
+
+/// Use transpose for unnormalize
+struct UnnormalizerT {
+ // Denormalize the results. See HZ page 109.
+ static void Unnormalize(const Mat3 &T1, const Mat3 &T2, Mat3 *H);
+};
+
+} // namespace libmv
+
+#endif // LIBMV_MULTIVIEW_CONDITIONNING_H_
diff --git a/intern/libmv/libmv/multiview/euclidean_resection.cc b/intern/libmv/libmv/multiview/euclidean_resection.cc
new file mode 100644
index 00000000000..245b027fb7c
--- /dev/null
+++ b/intern/libmv/libmv/multiview/euclidean_resection.cc
@@ -0,0 +1,774 @@
+// Copyright (c) 2009 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/multiview/euclidean_resection.h"
+
+#include <cmath>
+#include <limits>
+
+#include <Eigen/SVD>
+#include <Eigen/Geometry>
+
+#include "libmv/base/vector.h"
+#include "libmv/logging/logging.h"
+#include "libmv/multiview/projection.h"
+
+namespace libmv {
+namespace euclidean_resection {
+
+typedef unsigned int uint;
+
+bool EuclideanResection(const Mat2X &x_camera,
+ const Mat3X &X_world,
+ Mat3 *R, Vec3 *t,
+ ResectionMethod method) {
+ switch (method) {
+ case RESECTION_ANSAR_DANIILIDIS:
+ EuclideanResectionAnsarDaniilidis(x_camera, X_world, R, t);
+ break;
+ case RESECTION_EPNP:
+ return EuclideanResectionEPnP(x_camera, X_world, R, t);
+ break;
+ case RESECTION_PPNP:
+ return EuclideanResectionPPnP(x_camera, X_world, R, t);
+ break;
+ default:
+ LOG(FATAL) << "Unknown resection method.";
+ }
+ return false;
+}
+
+bool EuclideanResection(const Mat &x_image,
+ const Mat3X &X_world,
+ const Mat3 &K,
+ Mat3 *R, Vec3 *t,
+ ResectionMethod method) {
+ CHECK(x_image.rows() == 2 || x_image.rows() == 3)
+ << "Invalid size for x_image: "
+ << x_image.rows() << "x" << x_image.cols();
+
+ Mat2X x_camera;
+ if (x_image.rows() == 2) {
+ EuclideanToNormalizedCamera(x_image, K, &x_camera);
+ } else if (x_image.rows() == 3) {
+ HomogeneousToNormalizedCamera(x_image, K, &x_camera);
+ }
+ return EuclideanResection(x_camera, X_world, R, t, method);
+}
+
+void AbsoluteOrientation(const Mat3X &X,
+ const Mat3X &Xp,
+ Mat3 *R,
+ Vec3 *t) {
+ int num_points = X.cols();
+ Vec3 C = X.rowwise().sum() / num_points; // Centroid of X.
+ Vec3 Cp = Xp.rowwise().sum() / num_points; // Centroid of Xp.
+
+ // Normalize the two point sets.
+ Mat3X Xn(3, num_points), Xpn(3, num_points);
+ for (int i = 0; i < num_points; ++i) {
+ Xn.col(i) = X.col(i) - C;
+ Xpn.col(i) = Xp.col(i) - Cp;
+ }
+
+ // Construct the N matrix (pg. 635).
+ double Sxx = Xn.row(0).dot(Xpn.row(0));
+ double Syy = Xn.row(1).dot(Xpn.row(1));
+ double Szz = Xn.row(2).dot(Xpn.row(2));
+ double Sxy = Xn.row(0).dot(Xpn.row(1));
+ double Syx = Xn.row(1).dot(Xpn.row(0));
+ double Sxz = Xn.row(0).dot(Xpn.row(2));
+ double Szx = Xn.row(2).dot(Xpn.row(0));
+ double Syz = Xn.row(1).dot(Xpn.row(2));
+ double Szy = Xn.row(2).dot(Xpn.row(1));
+
+ Mat4 N;
+ N << Sxx + Syy + Szz, Syz - Szy, Szx - Sxz, Sxy - Syx,
+ Syz - Szy, Sxx - Syy - Szz, Sxy + Syx, Szx + Sxz,
+ Szx - Sxz, Sxy + Syx, -Sxx + Syy - Szz, Syz + Szy,
+ Sxy - Syx, Szx + Sxz, Syz + Szy, -Sxx - Syy + Szz;
+
+ // Find the unit quaternion q that maximizes qNq. It is the eigenvector
+ // corresponding to the lagest eigenvalue.
+ Vec4 q = N.jacobiSvd(Eigen::ComputeFullU).matrixU().col(0);
+
+ // Retrieve the 3x3 rotation matrix.
+ Vec4 qq = q.array() * q.array();
+ double q0q1 = q(0) * q(1);
+ double q0q2 = q(0) * q(2);
+ double q0q3 = q(0) * q(3);
+ double q1q2 = q(1) * q(2);
+ double q1q3 = q(1) * q(3);
+ double q2q3 = q(2) * q(3);
+
+ (*R) << qq(0) + qq(1) - qq(2) - qq(3),
+ 2 * (q1q2 - q0q3),
+ 2 * (q1q3 + q0q2),
+ 2 * (q1q2+ q0q3),
+ qq(0) - qq(1) + qq(2) - qq(3),
+ 2 * (q2q3 - q0q1),
+ 2 * (q1q3 - q0q2),
+ 2 * (q2q3 + q0q1),
+ qq(0) - qq(1) - qq(2) + qq(3);
+
+ // Fix the handedness of the R matrix.
+ if (R->determinant() < 0) {
+ R->row(2) = -R->row(2);
+ }
+ // Compute the final translation.
+ *t = Cp - *R * C;
+}
+
+// Convert i and j indices of the original variables into their quadratic
+// permutation single index. It follows that t_ij = t_ji.
+static int IJToPointIndex(int i, int j, int num_points) {
+ // Always make sure that j is bigger than i. This handles t_ij = t_ji.
+ if (j < i) {
+ std::swap(i, j);
+ }
+ int idx;
+ int num_permutation_rows = num_points * (num_points - 1) / 2;
+
+ // All t_ii's are located at the end of the t vector after all t_ij's.
+ if (j == i) {
+ idx = num_permutation_rows + i;
+ } else {
+ int offset = (num_points - i - 1) * (num_points - i) / 2;
+ idx = (num_permutation_rows - offset + j - i - 1);
+ }
+ return idx;
+};
+
+// Convert i and j indexes of the solution for lambda to their linear indexes.
+static int IJToIndex(int i, int j, int num_lambda) {
+ if (j < i) {
+ std::swap(i, j);
+ }
+ int A = num_lambda * (num_lambda + 1) / 2;
+ int B = num_lambda - i;
+ int C = B * (B + 1) / 2;
+ int idx = A - C + j - i;
+ return idx;
+};
+
+static int Sign(double value) {
+ return (value < 0) ? -1 : 1;
+};
+
+// Organizes a square matrix into a single row constraint on the elements of
+// Lambda to create the constraints in equation (5) in "Linear Pose Estimation
+// from Points or Lines", by Ansar, A. and Daniilidis, PAMI 2003. vol. 25, no.
+// 5.
+static Vec MatrixToConstraint(const Mat &A,
+ int num_k_columns,
+ int num_lambda) {
+ Vec C(num_k_columns);
+ C.setZero();
+ int idx = 0;
+ for (int i = 0; i < num_lambda; ++i) {
+ for (int j = i; j < num_lambda; ++j) {
+ C(idx) = A(i, j);
+ if (i != j) {
+ C(idx) += A(j, i);
+ }
+ ++idx;
+ }
+ }
+ return C;
+}
+
+// Normalizes the columns of vectors.
+static void NormalizeColumnVectors(Mat3X *vectors) {
+ int num_columns = vectors->cols();
+ for (int i = 0; i < num_columns; ++i) {
+ vectors->col(i).normalize();
+ }
+}
+
+void EuclideanResectionAnsarDaniilidis(const Mat2X &x_camera,
+ const Mat3X &X_world,
+ Mat3 *R,
+ Vec3 *t) {
+ CHECK(x_camera.cols() == X_world.cols());
+ CHECK(x_camera.cols() > 3);
+
+ int num_points = x_camera.cols();
+
+ // Copy the normalized camera coords into 3 vectors and normalize them so
+ // that they are unit vectors from the camera center.
+ Mat3X x_camera_unit(3, num_points);
+ x_camera_unit.block(0, 0, 2, num_points) = x_camera;
+ x_camera_unit.row(2).setOnes();
+ NormalizeColumnVectors(&x_camera_unit);
+
+ int num_m_rows = num_points * (num_points - 1) / 2;
+ int num_tt_variables = num_points * (num_points + 1) / 2;
+ int num_m_columns = num_tt_variables + 1;
+ Mat M(num_m_columns, num_m_columns);
+ M.setZero();
+ Matu ij_index(num_tt_variables, 2);
+
+ // Create the constraint equations for the t_ij variables (7) and arrange
+ // them into the M matrix (8). Also store the initial (i, j) indices.
+ int row = 0;
+ for (int i = 0; i < num_points; ++i) {
+ for (int j = i+1; j < num_points; ++j) {
+ M(row, row) = -2 * x_camera_unit.col(i).dot(x_camera_unit.col(j));
+ M(row, num_m_rows + i) = x_camera_unit.col(i).dot(x_camera_unit.col(i));
+ M(row, num_m_rows + j) = x_camera_unit.col(j).dot(x_camera_unit.col(j));
+ Vec3 Xdiff = X_world.col(i) - X_world.col(j);
+ double center_to_point_distance = Xdiff.norm();
+ M(row, num_m_columns - 1) =
+ - center_to_point_distance * center_to_point_distance;
+ ij_index(row, 0) = i;
+ ij_index(row, 1) = j;
+ ++row;
+ }
+ ij_index(i + num_m_rows, 0) = i;
+ ij_index(i + num_m_rows, 1) = i;
+ }
+
+ int num_lambda = num_points + 1; // Dimension of the null space of M.
+ Mat V = M.jacobiSvd(Eigen::ComputeFullV).matrixV().block(0,
+ num_m_rows,
+ num_m_columns,
+ num_lambda);
+
+ // TODO(vess): The number of constraint equations in K (num_k_rows) must be
+ // (num_points + 1) * (num_points + 2)/2. This creates a performance issue
+ // for more than 4 points. It is fine for 4 points at the moment with 18
+ // instead of 15 equations.
+ int num_k_rows = num_m_rows + num_points *
+ (num_points*(num_points-1)/2 - num_points+1);
+ int num_k_columns = num_lambda * (num_lambda + 1) / 2;
+ Mat K(num_k_rows, num_k_columns);
+ K.setZero();
+
+ // Construct the first part of the K matrix corresponding to (t_ii, t_jk) for
+ // i != j.
+ int counter_k_row = 0;
+ for (int idx1 = num_m_rows; idx1 < num_tt_variables; ++idx1) {
+ for (int idx2 = 0; idx2 < num_m_rows; ++idx2) {
+ unsigned int i = ij_index(idx1, 0);
+ unsigned int j = ij_index(idx2, 0);
+ unsigned int k = ij_index(idx2, 1);
+
+ if (i != j && i != k) {
+ int idx3 = IJToPointIndex(i, j, num_points);
+ int idx4 = IJToPointIndex(i, k, num_points);
+
+ K.row(counter_k_row) =
+ MatrixToConstraint(V.row(idx1).transpose() * V.row(idx2)-
+ V.row(idx3).transpose() * V.row(idx4),
+ num_k_columns,
+ num_lambda);
+ ++counter_k_row;
+ }
+ }
+ }
+
+ // Construct the second part of the K matrix corresponding to (t_ii,t_jk) for
+ // j==k.
+ for (int idx1 = num_m_rows; idx1 < num_tt_variables; ++idx1) {
+ for (int idx2 = idx1 + 1; idx2 < num_tt_variables; ++idx2) {
+ unsigned int i = ij_index(idx1, 0);
+ unsigned int j = ij_index(idx2, 0);
+ unsigned int k = ij_index(idx2, 1);
+
+ int idx3 = IJToPointIndex(i, j, num_points);
+ int idx4 = IJToPointIndex(i, k, num_points);
+
+ K.row(counter_k_row) =
+ MatrixToConstraint(V.row(idx1).transpose() * V.row(idx2)-
+ V.row(idx3).transpose() * V.row(idx4),
+ num_k_columns,
+ num_lambda);
+ ++counter_k_row;
+ }
+ }
+ Vec L_sq = K.jacobiSvd(Eigen::ComputeFullV).matrixV().col(num_k_columns - 1);
+
+ // Pivot on the largest element for numerical stability. Afterwards recover
+ // the sign of the lambda solution.
+ double max_L_sq_value = fabs(L_sq(IJToIndex(0, 0, num_lambda)));
+ int max_L_sq_index = 1;
+ for (int i = 1; i < num_lambda; ++i) {
+ double abs_sq_value = fabs(L_sq(IJToIndex(i, i, num_lambda)));
+ if (max_L_sq_value < abs_sq_value) {
+ max_L_sq_value = abs_sq_value;
+ max_L_sq_index = i;
+ }
+ }
+ // Ensure positiveness of the largest value corresponding to lambda_ii.
+ L_sq = L_sq * Sign(L_sq(IJToIndex(max_L_sq_index,
+ max_L_sq_index,
+ num_lambda)));
+
+ Vec L(num_lambda);
+ L(max_L_sq_index) = sqrt(L_sq(IJToIndex(max_L_sq_index,
+ max_L_sq_index,
+ num_lambda)));
+
+ for (int i = 0; i < num_lambda; ++i) {
+ if (i != max_L_sq_index) {
+ L(i) = L_sq(IJToIndex(max_L_sq_index, i, num_lambda)) / L(max_L_sq_index);
+ }
+ }
+
+ // Correct the scale using the fact that the last constraint is equal to 1.
+ L = L / (V.row(num_m_columns - 1).dot(L));
+ Vec X = V * L;
+
+ // Recover the distances from the camera center to the 3D points Q.
+ Vec d(num_points);
+ d.setZero();
+ for (int c_point = num_m_rows; c_point < num_tt_variables; ++c_point) {
+ d(c_point - num_m_rows) = sqrt(X(c_point));
+ }
+
+ // Create the 3D points in the camera system.
+ Mat X_cam(3, num_points);
+ for (int c_point = 0; c_point < num_points; ++c_point) {
+ X_cam.col(c_point) = d(c_point) * x_camera_unit.col(c_point);
+ }
+ // Recover the camera translation and rotation.
+ AbsoluteOrientation(X_world, X_cam, R, t);
+}
+
+// Selects 4 virtual control points using mean and PCA.
+static void SelectControlPoints(const Mat3X &X_world,
+ Mat *X_centered,
+ Mat34 *X_control_points) {
+ size_t num_points = X_world.cols();
+
+ // The first virtual control point, C0, is the centroid.
+ Vec mean, variance;
+ MeanAndVarianceAlongRows(X_world, &mean, &variance);
+ X_control_points->col(0) = mean;
+
+ // Computes PCA
+ X_centered->resize(3, num_points);
+ for (size_t c = 0; c < num_points; c++) {
+ X_centered->col(c) = X_world.col(c) - mean;
+ }
+ Mat3 X_centered_sq = (*X_centered) * X_centered->transpose();
+ Eigen::JacobiSVD<Mat3> X_centered_sq_svd(X_centered_sq, Eigen::ComputeFullU);
+ Vec3 w = X_centered_sq_svd.singularValues();
+ Mat3 u = X_centered_sq_svd.matrixU();
+ for (size_t c = 0; c < 3; c++) {
+ double k = sqrt(w(c) / num_points);
+ X_control_points->col(c + 1) = mean + k * u.col(c);
+ }
+}
+
+// Computes the barycentric coordinates for all real points
+static void ComputeBarycentricCoordinates(const Mat3X &X_world_centered,
+ const Mat34 &X_control_points,
+ Mat4X *alphas) {
+ size_t num_points = X_world_centered.cols();
+ Mat3 C2;
+ for (size_t c = 1; c < 4; c++) {
+ C2.col(c-1) = X_control_points.col(c) - X_control_points.col(0);
+ }
+
+ Mat3 C2inv = C2.inverse();
+ Mat3X a = C2inv * X_world_centered;
+
+ alphas->resize(4, num_points);
+ alphas->setZero();
+ alphas->block(1, 0, 3, num_points) = a;
+ for (size_t c = 0; c < num_points; c++) {
+ (*alphas)(0, c) = 1.0 - alphas->col(c).sum();
+ }
+}
+
+// Estimates the coordinates of all real points in the camera coordinate frame
+static void ComputePointsCoordinatesInCameraFrame(
+ const Mat4X &alphas,
+ const Vec4 &betas,
+ const Eigen::Matrix<double, 12, 12> &U,
+ Mat3X *X_camera) {
+ size_t num_points = alphas.cols();
+
+ // Estimates the control points in the camera reference frame.
+ Mat34 C2b; C2b.setZero();
+ for (size_t cu = 0; cu < 4; cu++) {
+ for (size_t c = 0; c < 4; c++) {
+ C2b.col(c) += betas(cu) * U.block(11 - cu, c * 3, 1, 3).transpose();
+ }
+ }
+
+ // Estimates the 3D points in the camera reference frame
+ X_camera->resize(3, num_points);
+ for (size_t c = 0; c < num_points; c++) {
+ X_camera->col(c) = C2b * alphas.col(c);
+ }
+
+ // Check the sign of the z coordinate of the points (should be positive)
+ uint num_z_neg = 0;
+ for (size_t i = 0; i < X_camera->cols(); ++i) {
+ if ((*X_camera)(2, i) < 0) {
+ num_z_neg++;
+ }
+ }
+
+ // If more than 50% of z are negative, we change the signs
+ if (num_z_neg > 0.5 * X_camera->cols()) {
+ C2b = -C2b;
+ *X_camera = -(*X_camera);
+ }
+}
+
+bool EuclideanResectionEPnP(const Mat2X &x_camera,
+ const Mat3X &X_world,
+ Mat3 *R, Vec3 *t) {
+ CHECK(x_camera.cols() == X_world.cols());
+ CHECK(x_camera.cols() > 3);
+ size_t num_points = X_world.cols();
+
+ // Select the control points.
+ Mat34 X_control_points;
+ Mat X_centered;
+ SelectControlPoints(X_world, &X_centered, &X_control_points);
+
+ // Compute the barycentric coordinates.
+ Mat4X alphas(4, num_points);
+ ComputeBarycentricCoordinates(X_centered, X_control_points, &alphas);
+
+ // Estimates the M matrix with the barycentric coordinates
+ Mat M(2 * num_points, 12);
+ Eigen::Matrix<double, 2, 12> sub_M;
+ for (size_t c = 0; c < num_points; c++) {
+ double a0 = alphas(0, c);
+ double a1 = alphas(1, c);
+ double a2 = alphas(2, c);
+ double a3 = alphas(3, c);
+ double ui = x_camera(0, c);
+ double vi = x_camera(1, c);
+ M.block(2*c, 0, 2, 12) << a0, 0,
+ a0*(-ui), a1, 0,
+ a1*(-ui), a2, 0,
+ a2*(-ui), a3, 0,
+ a3*(-ui), 0,
+ a0, a0*(-vi), 0,
+ a1, a1*(-vi), 0,
+ a2, a2*(-vi), 0,
+ a3, a3*(-vi);
+ }
+
+ // TODO(julien): Avoid the transpose by rewriting the u2.block() calls.
+ Eigen::JacobiSVD<Mat> MtMsvd(M.transpose()*M, Eigen::ComputeFullU);
+ Eigen::Matrix<double, 12, 12> u2 = MtMsvd.matrixU().transpose();
+
+ // Estimate the L matrix.
+ Eigen::Matrix<double, 6, 3> dv1;
+ Eigen::Matrix<double, 6, 3> dv2;
+ Eigen::Matrix<double, 6, 3> dv3;
+ Eigen::Matrix<double, 6, 3> dv4;
+
+ dv1.row(0) = u2.block(11, 0, 1, 3) - u2.block(11, 3, 1, 3);
+ dv1.row(1) = u2.block(11, 0, 1, 3) - u2.block(11, 6, 1, 3);
+ dv1.row(2) = u2.block(11, 0, 1, 3) - u2.block(11, 9, 1, 3);
+ dv1.row(3) = u2.block(11, 3, 1, 3) - u2.block(11, 6, 1, 3);
+ dv1.row(4) = u2.block(11, 3, 1, 3) - u2.block(11, 9, 1, 3);
+ dv1.row(5) = u2.block(11, 6, 1, 3) - u2.block(11, 9, 1, 3);
+ dv2.row(0) = u2.block(10, 0, 1, 3) - u2.block(10, 3, 1, 3);
+ dv2.row(1) = u2.block(10, 0, 1, 3) - u2.block(10, 6, 1, 3);
+ dv2.row(2) = u2.block(10, 0, 1, 3) - u2.block(10, 9, 1, 3);
+ dv2.row(3) = u2.block(10, 3, 1, 3) - u2.block(10, 6, 1, 3);
+ dv2.row(4) = u2.block(10, 3, 1, 3) - u2.block(10, 9, 1, 3);
+ dv2.row(5) = u2.block(10, 6, 1, 3) - u2.block(10, 9, 1, 3);
+ dv3.row(0) = u2.block(9, 0, 1, 3) - u2.block(9, 3, 1, 3);
+ dv3.row(1) = u2.block(9, 0, 1, 3) - u2.block(9, 6, 1, 3);
+ dv3.row(2) = u2.block(9, 0, 1, 3) - u2.block(9, 9, 1, 3);
+ dv3.row(3) = u2.block(9, 3, 1, 3) - u2.block(9, 6, 1, 3);
+ dv3.row(4) = u2.block(9, 3, 1, 3) - u2.block(9, 9, 1, 3);
+ dv3.row(5) = u2.block(9, 6, 1, 3) - u2.block(9, 9, 1, 3);
+ dv4.row(0) = u2.block(8, 0, 1, 3) - u2.block(8, 3, 1, 3);
+ dv4.row(1) = u2.block(8, 0, 1, 3) - u2.block(8, 6, 1, 3);
+ dv4.row(2) = u2.block(8, 0, 1, 3) - u2.block(8, 9, 1, 3);
+ dv4.row(3) = u2.block(8, 3, 1, 3) - u2.block(8, 6, 1, 3);
+ dv4.row(4) = u2.block(8, 3, 1, 3) - u2.block(8, 9, 1, 3);
+ dv4.row(5) = u2.block(8, 6, 1, 3) - u2.block(8, 9, 1, 3);
+
+ Eigen::Matrix<double, 6, 10> L;
+ for (size_t r = 0; r < 6; r++) {
+ L.row(r) << dv1.row(r).dot(dv1.row(r)),
+ 2.0 * dv1.row(r).dot(dv2.row(r)),
+ dv2.row(r).dot(dv2.row(r)),
+ 2.0 * dv1.row(r).dot(dv3.row(r)),
+ 2.0 * dv2.row(r).dot(dv3.row(r)),
+ dv3.row(r).dot(dv3.row(r)),
+ 2.0 * dv1.row(r).dot(dv4.row(r)),
+ 2.0 * dv2.row(r).dot(dv4.row(r)),
+ 2.0 * dv3.row(r).dot(dv4.row(r)),
+ dv4.row(r).dot(dv4.row(r));
+ }
+ Vec6 rho;
+ rho << (X_control_points.col(0) - X_control_points.col(1)).squaredNorm(),
+ (X_control_points.col(0) - X_control_points.col(2)).squaredNorm(),
+ (X_control_points.col(0) - X_control_points.col(3)).squaredNorm(),
+ (X_control_points.col(1) - X_control_points.col(2)).squaredNorm(),
+ (X_control_points.col(1) - X_control_points.col(3)).squaredNorm(),
+ (X_control_points.col(2) - X_control_points.col(3)).squaredNorm();
+
+ // There are three possible solutions based on the three approximations of L
+ // (betas). Below, each one is solved for then the best one is chosen.
+ Mat3X X_camera;
+ Mat3 K; K.setIdentity();
+ vector<Mat3> Rs(3);
+ vector<Vec3> ts(3);
+ Vec rmse(3);
+
+ // At one point this threshold was 1e-3, and caused no end of problems in
+ // Blender by causing frames to not resect when they would have worked fine.
+ // When the resect failed, the projective followup is run leading to worse
+ // results, and often the dreaded "flipping" where objects get flipped
+ // between frames. Instead, disable the check for now, always succeeding. The
+ // ultimate check is always reprojection error anyway.
+ //
+ // TODO(keir): Decide if setting this to infinity, effectively disabling the
+ // check, is the right approach. So far this seems the case.
+ double kSuccessThreshold = std::numeric_limits<double>::max();
+
+ // Find the first possible solution for R, t corresponding to:
+ // Betas = [b00 b01 b11 b02 b12 b22 b03 b13 b23 b33]
+ // Betas_approx_1 = [b00 b01 b02 b03]
+ Vec4 betas = Vec4::Zero();
+ Eigen::Matrix<double, 6, 4> l_6x4;
+ for (size_t r = 0; r < 6; r++) {
+ l_6x4.row(r) << L(r, 0), L(r, 1), L(r, 3), L(r, 6);
+ }
+ Eigen::JacobiSVD<Mat> svd_of_l4(l_6x4,
+ Eigen::ComputeFullU | Eigen::ComputeFullV);
+ Vec4 b4 = svd_of_l4.solve(rho);
+ if ((l_6x4 * b4).isApprox(rho, kSuccessThreshold)) {
+ if (b4(0) < 0) {
+ b4 = -b4;
+ }
+ b4(0) = std::sqrt(b4(0));
+ betas << b4(0), b4(1) / b4(0), b4(2) / b4(0), b4(3) / b4(0);
+ ComputePointsCoordinatesInCameraFrame(alphas, betas, u2, &X_camera);
+ AbsoluteOrientation(X_world, X_camera, &Rs[0], &ts[0]);
+ rmse(0) = RootMeanSquareError(x_camera, X_world, K, Rs[0], ts[0]);
+ } else {
+ LOG(ERROR) << "First approximation of beta not good enough.";
+ ts[0].setZero();
+ rmse(0) = std::numeric_limits<double>::max();
+ }
+
+ // Find the second possible solution for R, t corresponding to:
+ // Betas = [b00 b01 b11 b02 b12 b22 b03 b13 b23 b33]
+ // Betas_approx_2 = [b00 b01 b11]
+ betas.setZero();
+ Eigen::Matrix<double, 6, 3> l_6x3;
+ l_6x3 = L.block(0, 0, 6, 3);
+ Eigen::JacobiSVD<Mat> svdOfL3(l_6x3,
+ Eigen::ComputeFullU | Eigen::ComputeFullV);
+ Vec3 b3 = svdOfL3.solve(rho);
+ VLOG(2) << " rho = " << rho;
+ VLOG(2) << " l_6x3 * b3 = " << l_6x3 * b3;
+ if ((l_6x3 * b3).isApprox(rho, kSuccessThreshold)) {
+ if (b3(0) < 0) {
+ betas(0) = std::sqrt(-b3(0));
+ betas(1) = (b3(2) < 0) ? std::sqrt(-b3(2)) : 0;
+ } else {
+ betas(0) = std::sqrt(b3(0));
+ betas(1) = (b3(2) > 0) ? std::sqrt(b3(2)) : 0;
+ }
+ if (b3(1) < 0) {
+ betas(0) = -betas(0);
+ }
+ betas(2) = 0;
+ betas(3) = 0;
+ ComputePointsCoordinatesInCameraFrame(alphas, betas, u2, &X_camera);
+ AbsoluteOrientation(X_world, X_camera, &Rs[1], &ts[1]);
+ rmse(1) = RootMeanSquareError(x_camera, X_world, K, Rs[1], ts[1]);
+ } else {
+ LOG(ERROR) << "Second approximation of beta not good enough.";
+ ts[1].setZero();
+ rmse(1) = std::numeric_limits<double>::max();
+ }
+
+ // Find the third possible solution for R, t corresponding to:
+ // Betas = [b00 b01 b11 b02 b12 b22 b03 b13 b23 b33]
+ // Betas_approx_3 = [b00 b01 b11 b02 b12]
+ betas.setZero();
+ Eigen::Matrix<double, 6, 5> l_6x5;
+ l_6x5 = L.block(0, 0, 6, 5);
+ Eigen::JacobiSVD<Mat> svdOfL5(l_6x5,
+ Eigen::ComputeFullU | Eigen::ComputeFullV);
+ Vec5 b5 = svdOfL5.solve(rho);
+ if ((l_6x5 * b5).isApprox(rho, kSuccessThreshold)) {
+ if (b5(0) < 0) {
+ betas(0) = std::sqrt(-b5(0));
+ if (b5(2) < 0) {
+ betas(1) = std::sqrt(-b5(2));
+ } else {
+ b5(2) = 0;
+ }
+ } else {
+ betas(0) = std::sqrt(b5(0));
+ if (b5(2) > 0) {
+ betas(1) = std::sqrt(b5(2));
+ } else {
+ b5(2) = 0;
+ }
+ }
+ if (b5(1) < 0) {
+ betas(0) = -betas(0);
+ }
+ betas(2) = b5(3) / betas(0);
+ betas(3) = 0;
+ ComputePointsCoordinatesInCameraFrame(alphas, betas, u2, &X_camera);
+ AbsoluteOrientation(X_world, X_camera, &Rs[2], &ts[2]);
+ rmse(2) = RootMeanSquareError(x_camera, X_world, K, Rs[2], ts[2]);
+ } else {
+ LOG(ERROR) << "Third approximation of beta not good enough.";
+ ts[2].setZero();
+ rmse(2) = std::numeric_limits<double>::max();
+ }
+
+ // Finally, with all three solutions, select the (R, t) with the best RMSE.
+ VLOG(2) << "RMSE for solution 0: " << rmse(0);
+ VLOG(2) << "RMSE for solution 1: " << rmse(1);
+ VLOG(2) << "RMSE for solution 2: " << rmse(2);
+ size_t n = 0;
+ if (rmse(1) < rmse(0)) {
+ n = 1;
+ }
+ if (rmse(2) < rmse(n)) {
+ n = 2;
+ }
+ if (rmse(n) == std::numeric_limits<double>::max()) {
+ LOG(ERROR) << "All three possibilities failed. Reporting failure.";
+ return false;
+ }
+
+ VLOG(1) << "RMSE for best solution #" << n << ": " << rmse(n);
+ *R = Rs[n];
+ *t = ts[n];
+
+ // TODO(julien): Improve the solutions with non-linear refinement.
+ return true;
+}
+
+/*
+
+ Straight from the paper:
+ http://www.diegm.uniud.it/fusiello/papers/3dimpvt12-b.pdf
+
+ function [R T] = ppnp(P,S,tol)
+ % input
+ % P : matrix (nx3) image coordinates in camera reference [u v 1]
+ % S : matrix (nx3) coordinates in world reference [X Y Z]
+ % tol: exit threshold
+ %
+ % output
+ % R : matrix (3x3) rotation (world-to-camera)
+ % T : vector (3x1) translation (world-to-camera)
+ %
+ n = size(P,1);
+ Z = zeros(n);
+ e = ones(n,1);
+ A = eye(n)-((e*e’)./n);
+ II = e./n;
+ err = +Inf;
+ E_old = 1000*ones(n,3);
+ while err>tol
+ [U,˜,V] = svd(P’*Z*A*S);
+ VT = V’;
+ R=U*[1 0 0; 0 1 0; 0 0 det(U*VT)]*VT;
+ PR = P*R;
+ c = (S-Z*PR)’*II;
+ Y = S-e*c’;
+ Zmindiag = diag(PR*Y’)./(sum(P.*P,2));
+ Zmindiag(Zmindiag<0)=0;
+ Z = diag(Zmindiag);
+ E = Y-Z*PR;
+ err = norm(E-E_old,’fro’);
+ E_old = E;
+ end
+ T = -R*c;
+ end
+
+ */
+// TODO(keir): Re-do all the variable names and add comments matching the paper.
+// This implementation has too much of the terseness of the original. On the
+// other hand, it did work on the first try.
+bool EuclideanResectionPPnP(const Mat2X &x_camera,
+ const Mat3X &X_world,
+ Mat3 *R, Vec3 *t) {
+ int n = x_camera.cols();
+ Mat Z = Mat::Zero(n, n);
+ Vec e = Vec::Ones(n);
+ Mat A = Mat::Identity(n, n) - (e * e.transpose() / n);
+ Vec II = e / n;
+
+ Mat P(n, 3);
+ P.col(0) = x_camera.row(0);
+ P.col(1) = x_camera.row(1);
+ P.col(2).setConstant(1.0);
+
+ Mat S = X_world.transpose();
+
+ double error = std::numeric_limits<double>::infinity();
+ Mat E_old = 1000 * Mat::Ones(n, 3);
+
+ Vec3 c;
+ Mat E(n, 3);
+
+ int iteration = 0;
+ double tolerance = 1e-5;
+ // TODO(keir): The limit of 100 can probably be reduced, but this will require
+ // some investigation.
+ while (error > tolerance && iteration < 100) {
+ Mat3 tmp = P.transpose() * Z * A * S;
+ Eigen::JacobiSVD<Mat3> svd(tmp, Eigen::ComputeFullU | Eigen::ComputeFullV);
+ Mat3 U = svd.matrixU();
+ Mat3 VT = svd.matrixV().transpose();
+ Vec3 s;
+ s << 1, 1, (U * VT).determinant();
+ *R = U * s.asDiagonal() * VT;
+ Mat PR = P * *R; // n x 3
+ c = (S - Z*PR).transpose() * II;
+ Mat Y = S - e*c.transpose(); // n x 3
+ Vec Zmindiag = (PR * Y.transpose()).diagonal()
+ .cwiseQuotient(P.rowwise().squaredNorm());
+ for (int i = 0; i < n; ++i) {
+ Zmindiag[i] = std::max(Zmindiag[i], 0.0);
+ }
+ Z = Zmindiag.asDiagonal();
+ E = Y - Z*PR;
+ error = (E - E_old).norm();
+ LOG(INFO) << "PPnP error(" << (iteration++) << "): " << error;
+ E_old = E;
+ }
+ *t = -*R*c;
+
+ // TODO(keir): Figure out what the failure cases are. Is it too many
+ // iterations? Spend some time going through the math figuring out if there
+ // is some way to detect that the algorithm is going crazy, and return false.
+ return true;
+}
+
+
+} // namespace resection
+} // namespace libmv
diff --git a/intern/libmv/libmv/multiview/euclidean_resection.h b/intern/libmv/libmv/multiview/euclidean_resection.h
new file mode 100644
index 00000000000..28eae92611c
--- /dev/null
+++ b/intern/libmv/libmv/multiview/euclidean_resection.h
@@ -0,0 +1,148 @@
+// Copyright (c) 2010 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_MULTIVIEW_EUCLIDEAN_RESECTION_H_
+#define LIBMV_MULTIVIEW_EUCLIDEAN_RESECTION_H_
+
+#include "libmv/numeric/numeric.h"
+#include "libmv/multiview/projection.h"
+
+namespace libmv {
+namespace euclidean_resection {
+
+enum ResectionMethod {
+ RESECTION_ANSAR_DANIILIDIS,
+
+ // The "EPnP" algorithm by Lepetit et al.
+ // http://cvlab.epfl.ch/~lepetit/papers/lepetit_ijcv08.pdf
+ RESECTION_EPNP,
+
+ // The Procrustes PNP algorithm ("PPnP")
+ // http://www.diegm.uniud.it/fusiello/papers/3dimpvt12-b.pdf
+ RESECTION_PPNP
+};
+
+/**
+ * Computes the extrinsic parameters, R and t for a calibrated camera
+ * from 4 or more 3D points and their normalized images.
+ *
+ * \param x_camera Image points in normalized camera coordinates e.g. x_camera
+ * = inv(K) * x_image.
+ * \param X_world 3D points in the world coordinate system
+ * \param R Solution for the camera rotation matrix
+ * \param t Solution for the camera translation vector
+ * \param method The resection method to use.
+ */
+bool EuclideanResection(const Mat2X &x_camera,
+ const Mat3X &X_world,
+ Mat3 *R, Vec3 *t,
+ ResectionMethod method = RESECTION_EPNP);
+
+/**
+ * Computes the extrinsic parameters, R and t for a calibrated camera
+ * from 4 or more 3D points and their images.
+ *
+ * \param x_image Image points in non-normalized image coordinates. The
+ * coordates are laid out one per row. The matrix can be Nx2
+ * or Nx3 for euclidean or homogenous 2D coordinates.
+ * \param X_world 3D points in the world coordinate system
+ * \param K Intrinsic parameters camera matrix
+ * \param R Solution for the camera rotation matrix
+ * \param t Solution for the camera translation vector
+ * \param method Resection method
+ */
+bool EuclideanResection(const Mat &x_image,
+ const Mat3X &X_world,
+ const Mat3 &K,
+ Mat3 *R, Vec3 *t,
+ ResectionMethod method = RESECTION_EPNP);
+
+/**
+ * The absolute orientation algorithm recovers the transformation between a set
+ * of 3D points, X and Xp such that:
+ *
+ * Xp = R*X + t
+ *
+ * The recovery of the absolute orientation is implemented after this article:
+ * Horn, Hilden, "Closed-form solution of absolute orientation using
+ * orthonormal matrices"
+ */
+void AbsoluteOrientation(const Mat3X &X,
+ const Mat3X &Xp,
+ Mat3 *R,
+ Vec3 *t);
+
+/**
+ * Computes the extrinsic parameters, R and t for a calibrated camera from 4 or
+ * more 3D points and their images.
+ *
+ * \param x_camera Image points in normalized camera coordinates, e.g.
+ * x_camera=inv(K)*x_image
+ * \param X_world 3D points in the world coordinate system
+ * \param R Solution for the camera rotation matrix
+ * \param t Solution for the camera translation vector
+ *
+ * This is the algorithm described in: "Linear Pose Estimation from Points or
+ * Lines", by Ansar, A. and Daniilidis, PAMI 2003. vol. 25, no. 5.
+ */
+void EuclideanResectionAnsarDaniilidis(const Mat2X &x_camera,
+ const Mat3X &X_world,
+ Mat3 *R, Vec3 *t);
+/**
+ * Computes the extrinsic parameters, R and t for a calibrated camera from 4 or
+ * more 3D points and their images.
+ *
+ * \param x_camera Image points in normalized camera coordinates,
+ * e.g. x_camera = inv(K) * x_image
+ * \param X_world 3D points in the world coordinate system
+ * \param R Solution for the camera rotation matrix
+ * \param t Solution for the camera translation vector
+ *
+ * This is the algorithm described in:
+ * "{EP$n$P: An Accurate $O(n)$ Solution to the P$n$P Problem", by V. Lepetit
+ * and F. Moreno-Noguer and P. Fua, IJCV 2009. vol. 81, no. 2
+ * \note: the non-linear optimization is not implemented here.
+ */
+bool EuclideanResectionEPnP(const Mat2X &x_camera,
+ const Mat3X &X_world,
+ Mat3 *R, Vec3 *t);
+
+/**
+ * Computes the extrinsic parameters, R and t for a calibrated camera from 4 or
+ * more 3D points and their images.
+ *
+ * \param x_camera Image points in normalized camera coordinates,
+ * e.g. x_camera = inv(K) * x_image
+ * \param X_world 3D points in the world coordinate system
+ * \param R Solution for the camera rotation matrix
+ * \param t Solution for the camera translation vector
+ *
+ * Straight from the paper:
+ * http://www.diegm.uniud.it/fusiello/papers/3dimpvt12-b.pdf
+ */
+bool EuclideanResectionPPnP(const Mat2X &x_camera,
+ const Mat3X &X_world,
+ Mat3 *R, Vec3 *t);
+
+} // namespace euclidean_resection
+} // namespace libmv
+
+
+#endif /* LIBMV_MULTIVIEW_EUCLIDEAN_RESECTION_H_ */
diff --git a/intern/libmv/libmv/multiview/euclidean_resection_test.cc b/intern/libmv/libmv/multiview/euclidean_resection_test.cc
new file mode 100644
index 00000000000..5ec9dbda3cf
--- /dev/null
+++ b/intern/libmv/libmv/multiview/euclidean_resection_test.cc
@@ -0,0 +1,237 @@
+// Copyright (c) 2009 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/multiview/euclidean_resection.h"
+#include "libmv/numeric/numeric.h"
+#include "libmv/logging/logging.h"
+#include "libmv/multiview/projection.h"
+#include "testing/testing.h"
+
+using namespace libmv::euclidean_resection;
+using namespace libmv;
+
+// Generates all necessary inputs and expected outputs for EuclideanResection.
+void CreateCameraSystem(const Mat3& KK,
+ const Mat3X& x_image,
+ const Vec& X_distances,
+ const Mat3& R_input,
+ const Vec3& T_input,
+ Mat2X *x_camera,
+ Mat3X *X_world,
+ Mat3 *R_expected,
+ Vec3 *T_expected) {
+ int num_points = x_image.cols();
+
+ Mat3X x_unit_cam(3, num_points);
+ x_unit_cam = KK.inverse() * x_image;
+
+ // Create normalized camera coordinates to be used as an input to the PnP
+ // function, instead of using NormalizeColumnVectors(&x_unit_cam).
+ *x_camera = x_unit_cam.block(0, 0, 2, num_points);
+ for (int i = 0; i < num_points; ++i) {
+ x_unit_cam.col(i).normalize();
+ }
+
+ // Create the 3D points in the camera system.
+ Mat X_camera(3, num_points);
+ for (int i = 0; i < num_points; ++i) {
+ X_camera.col(i) = X_distances(i) * x_unit_cam.col(i);
+ }
+
+ // Apply the transformation to the camera 3D points
+ Mat translation_matrix(3, num_points);
+ translation_matrix.row(0).setConstant(T_input(0));
+ translation_matrix.row(1).setConstant(T_input(1));
+ translation_matrix.row(2).setConstant(T_input(2));
+
+ *X_world = R_input * X_camera + translation_matrix;
+
+ // Create the expected result for comparison.
+ *R_expected = R_input.transpose();
+ *T_expected = *R_expected * (-T_input);
+};
+
+TEST(AbsoluteOrientation, QuaternionSolution) {
+ int num_points = 4;
+ Mat X;
+ Mat Xp;
+ X = 100 * Mat::Random(3, num_points);
+
+ // Create a random translation and rotation.
+ Mat3 R_input;
+ R_input = Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ())
+ * Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitY())
+ * Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ());
+
+ Vec3 t_input;
+ t_input.setRandom();
+ t_input = 100 * t_input;
+
+ Mat translation_matrix(3, num_points);
+ translation_matrix.row(0).setConstant(t_input(0));
+ translation_matrix.row(1).setConstant(t_input(1));
+ translation_matrix.row(2).setConstant(t_input(2));
+
+ // Create the transformed 3D points Xp as Xp = R * X + t.
+ Xp = R_input * X + translation_matrix;
+
+ // Output variables.
+ Mat3 R;
+ Vec3 t;
+
+ AbsoluteOrientation(X, Xp, &R, &t);
+
+ EXPECT_MATRIX_NEAR(t, t_input, 1e-6);
+ EXPECT_MATRIX_NEAR(R, R_input, 1e-8);
+}
+
+TEST(EuclideanResection, Points4KnownImagePointsRandomTranslationRotation) {
+ // In this test only the translation and rotation are random. The image
+ // points are selected from a real case and are well conditioned.
+ Vec2i image_dimensions;
+ image_dimensions << 1600, 1200;
+
+ Mat3 KK;
+ KK << 2796, 0, 804,
+ 0 , 2796, 641,
+ 0, 0, 1;
+
+ // The real image points.
+ int num_points = 4;
+ Mat3X x_image(3, num_points);
+ x_image << 1164.06, 734.948, 749.599, 430.727,
+ 681.386, 844.59, 496.315, 580.775,
+ 1, 1, 1, 1;
+
+
+ // A vector of the 4 distances to the 3D points.
+ Vec X_distances = 100 * Vec::Random(num_points).array().abs();
+
+ // Create the random camera motion R and t that resection should recover.
+ Mat3 R_input;
+ R_input = Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ())
+ * Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitY())
+ * Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ());
+
+ Vec3 T_input;
+ T_input.setRandom();
+ T_input = 100 * T_input;
+
+ // Create the camera system, also getting the expected result of the
+ // transformation.
+ Mat3 R_expected;
+ Vec3 T_expected;
+ Mat3X X_world;
+ Mat2X x_camera;
+ CreateCameraSystem(KK, x_image, X_distances, R_input, T_input,
+ &x_camera, &X_world, &R_expected, &T_expected);
+
+ // Finally, run the code under test.
+ Mat3 R_output;
+ Vec3 T_output;
+ EuclideanResection(x_camera, X_world,
+ &R_output, &T_output,
+ RESECTION_ANSAR_DANIILIDIS);
+
+ EXPECT_MATRIX_NEAR(T_output, T_expected, 1e-5);
+ EXPECT_MATRIX_NEAR(R_output, R_expected, 1e-7);
+
+ // For now, the EPnP doesn't have a non-linear optimization step and so is
+ // not precise enough with only 4 points.
+ //
+ // TODO(jmichot): Reenable this test when there is nonlinear refinement.
+#if 0
+ R_output.setIdentity();
+ T_output.setZero();
+
+ EuclideanResection(x_camera, X_world,
+ &R_output, &T_output,
+ RESECTION_EPNP);
+
+ EXPECT_MATRIX_NEAR(T_output, T_expected, 1e-5);
+ EXPECT_MATRIX_NEAR(R_output, R_expected, 1e-7);*/
+#endif
+}
+
+// TODO(jmichot): Reduce the code duplication here with the code above.
+TEST(EuclideanResection, Points6AllRandomInput) {
+ Mat3 KK;
+ KK << 2796, 0, 804,
+ 0 , 2796, 641,
+ 0, 0, 1;
+
+ // Create random image points for a 1600x1200 image.
+ int w = 1600;
+ int h = 1200;
+ int num_points = 6;
+ Mat3X x_image(3, num_points);
+ x_image.row(0) = w * Vec::Random(num_points).array().abs();
+ x_image.row(1) = h * Vec::Random(num_points).array().abs();
+ x_image.row(2).setOnes();
+
+ // Normalized camera coordinates to be used as an input to the PnP function.
+ Mat2X x_camera;
+ Vec X_distances = 100 * Vec::Random(num_points).array().abs();
+
+ // Create the random camera motion R and t that resection should recover.
+ Mat3 R_input;
+ R_input = Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ())
+ * Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitY())
+ * Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ());
+
+ Vec3 T_input;
+ T_input.setRandom();
+ T_input = 100 * T_input;
+
+ // Create the camera system.
+ Mat3 R_expected;
+ Vec3 T_expected;
+ Mat3X X_world;
+ CreateCameraSystem(KK, x_image, X_distances, R_input, T_input,
+ &x_camera, &X_world, &R_expected, &T_expected);
+
+ // Test each of the resection methods.
+ {
+ Mat3 R_output;
+ Vec3 T_output;
+ EuclideanResection(x_camera, X_world,
+ &R_output, &T_output,
+ RESECTION_ANSAR_DANIILIDIS);
+ EXPECT_MATRIX_NEAR(T_output, T_expected, 1e-5);
+ EXPECT_MATRIX_NEAR(R_output, R_expected, 1e-7);
+ }
+ {
+ Mat3 R_output;
+ Vec3 T_output;
+ EuclideanResection(x_camera, X_world,
+ &R_output, &T_output,
+ RESECTION_EPNP);
+ EXPECT_MATRIX_NEAR(T_output, T_expected, 1e-5);
+ EXPECT_MATRIX_NEAR(R_output, R_expected, 1e-7);
+ }
+ {
+ Mat3 R_output;
+ Vec3 T_output;
+ EuclideanResection(x_image, X_world, KK,
+ &R_output, &T_output);
+ EXPECT_MATRIX_NEAR(T_output, T_expected, 1e-5);
+ EXPECT_MATRIX_NEAR(R_output, R_expected, 1e-7);
+ }
+}
diff --git a/intern/libmv/libmv/multiview/fundamental.cc b/intern/libmv/libmv/multiview/fundamental.cc
new file mode 100644
index 00000000000..ea8594c8cc0
--- /dev/null
+++ b/intern/libmv/libmv/multiview/fundamental.cc
@@ -0,0 +1,544 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/multiview/fundamental.h"
+
+#include "ceres/ceres.h"
+#include "libmv/logging/logging.h"
+#include "libmv/numeric/numeric.h"
+#include "libmv/numeric/poly.h"
+#include "libmv/multiview/conditioning.h"
+#include "libmv/multiview/projection.h"
+#include "libmv/multiview/triangulation.h"
+
+namespace libmv {
+
+static void EliminateRow(const Mat34 &P, int row, Mat *X) {
+ X->resize(2, 4);
+
+ int first_row = (row + 1) % 3;
+ int second_row = (row + 2) % 3;
+
+ for (int i = 0; i < 4; ++i) {
+ (*X)(0, i) = P(first_row, i);
+ (*X)(1, i) = P(second_row, i);
+ }
+}
+
+void ProjectionsFromFundamental(const Mat3 &F, Mat34 *P1, Mat34 *P2) {
+ *P1 << Mat3::Identity(), Vec3::Zero();
+ Vec3 e2;
+ Mat3 Ft = F.transpose();
+ Nullspace(&Ft, &e2);
+ *P2 << CrossProductMatrix(e2) * F, e2;
+}
+
+// Addapted from vgg_F_from_P.
+void FundamentalFromProjections(const Mat34 &P1, const Mat34 &P2, Mat3 *F) {
+ Mat X[3];
+ Mat Y[3];
+ Mat XY;
+
+ for (int i = 0; i < 3; ++i) {
+ EliminateRow(P1, i, X + i);
+ EliminateRow(P2, i, Y + i);
+ }
+
+ for (int i = 0; i < 3; ++i) {
+ for (int j = 0; j < 3; ++j) {
+ VerticalStack(X[j], Y[i], &XY);
+ (*F)(i, j) = XY.determinant();
+ }
+ }
+}
+
+// HZ 11.1 pag.279 (x1 = x, x2 = x')
+// http://www.cs.unc.edu/~marc/tutorial/node54.html
+static double EightPointSolver(const Mat &x1, const Mat &x2, Mat3 *F) {
+ DCHECK_EQ(x1.rows(), 2);
+ DCHECK_GE(x1.cols(), 8);
+ DCHECK_EQ(x1.rows(), x2.rows());
+ DCHECK_EQ(x1.cols(), x2.cols());
+
+ int n = x1.cols();
+ Mat A(n, 9);
+ for (int i = 0; i < n; ++i) {
+ A(i, 0) = x2(0, i) * x1(0, i);
+ A(i, 1) = x2(0, i) * x1(1, i);
+ A(i, 2) = x2(0, i);
+ A(i, 3) = x2(1, i) * x1(0, i);
+ A(i, 4) = x2(1, i) * x1(1, i);
+ A(i, 5) = x2(1, i);
+ A(i, 6) = x1(0, i);
+ A(i, 7) = x1(1, i);
+ A(i, 8) = 1;
+ }
+
+ Vec9 f;
+ double smaller_singular_value = Nullspace(&A, &f);
+ *F = Map<RMat3>(f.data());
+ return smaller_singular_value;
+}
+
+// HZ 11.1.1 pag.280
+void EnforceFundamentalRank2Constraint(Mat3 *F) {
+ Eigen::JacobiSVD<Mat3> USV(*F, Eigen::ComputeFullU | Eigen::ComputeFullV);
+ Vec3 d = USV.singularValues();
+ d(2) = 0.0;
+ *F = USV.matrixU() * d.asDiagonal() * USV.matrixV().transpose();
+}
+
+// HZ 11.2 pag.281 (x1 = x, x2 = x')
+double NormalizedEightPointSolver(const Mat &x1,
+ const Mat &x2,
+ Mat3 *F) {
+ DCHECK_EQ(x1.rows(), 2);
+ DCHECK_GE(x1.cols(), 8);
+ DCHECK_EQ(x1.rows(), x2.rows());
+ DCHECK_EQ(x1.cols(), x2.cols());
+
+ // Normalize the data.
+ Mat3 T1, T2;
+ PreconditionerFromPoints(x1, &T1);
+ PreconditionerFromPoints(x2, &T2);
+ Mat x1_normalized, x2_normalized;
+ ApplyTransformationToPoints(x1, T1, &x1_normalized);
+ ApplyTransformationToPoints(x2, T2, &x2_normalized);
+
+ // Estimate the fundamental matrix.
+ double smaller_singular_value =
+ EightPointSolver(x1_normalized, x2_normalized, F);
+ EnforceFundamentalRank2Constraint(F);
+
+ // Denormalize the fundamental matrix.
+ *F = T2.transpose() * (*F) * T1;
+
+ return smaller_singular_value;
+}
+
+// Seven-point algorithm.
+// http://www.cs.unc.edu/~marc/tutorial/node55.html
+double FundamentalFrom7CorrespondencesLinear(const Mat &x1,
+ const Mat &x2,
+ std::vector<Mat3> *F) {
+ DCHECK_EQ(x1.rows(), 2);
+ DCHECK_EQ(x1.cols(), 7);
+ DCHECK_EQ(x1.rows(), x2.rows());
+ DCHECK_EQ(x2.cols(), x2.cols());
+
+ // Build a 9 x n matrix from point matches, where each row is equivalent to
+ // the equation x'T*F*x = 0 for a single correspondence pair (x', x). The
+ // domain of the matrix is a 9 element vector corresponding to F. The
+ // nullspace should be rank two; the two dimensions correspond to the set of
+ // F matrices satisfying the epipolar geometry.
+ Matrix<double, 7, 9> A;
+ for (int ii = 0; ii < 7; ++ii) {
+ A(ii, 0) = x1(0, ii) * x2(0, ii); // 0 represents x coords,
+ A(ii, 1) = x1(1, ii) * x2(0, ii); // 1 represents y coords.
+ A(ii, 2) = x2(0, ii);
+ A(ii, 3) = x1(0, ii) * x2(1, ii);
+ A(ii, 4) = x1(1, ii) * x2(1, ii);
+ A(ii, 5) = x2(1, ii);
+ A(ii, 6) = x1(0, ii);
+ A(ii, 7) = x1(1, ii);
+ A(ii, 8) = 1.0;
+ }
+
+ // Find the two F matrices in the nullspace of A.
+ Vec9 f1, f2;
+ double s = Nullspace2(&A, &f1, &f2);
+ Mat3 F1 = Map<RMat3>(f1.data());
+ Mat3 F2 = Map<RMat3>(f2.data());
+
+ // Then, use the condition det(F) = 0 to determine F. In other words, solve
+ // det(F1 + a*F2) = 0 for a.
+ double a = F1(0, 0), j = F2(0, 0),
+ b = F1(0, 1), k = F2(0, 1),
+ c = F1(0, 2), l = F2(0, 2),
+ d = F1(1, 0), m = F2(1, 0),
+ e = F1(1, 1), n = F2(1, 1),
+ f = F1(1, 2), o = F2(1, 2),
+ g = F1(2, 0), p = F2(2, 0),
+ h = F1(2, 1), q = F2(2, 1),
+ i = F1(2, 2), r = F2(2, 2);
+
+ // Run fundamental_7point_coeffs.py to get the below coefficients.
+ // The coefficients are in ascending powers of alpha, i.e. P[N]*x^N.
+ double P[4] = {
+ a*e*i + b*f*g + c*d*h - a*f*h - b*d*i - c*e*g,
+ a*e*r + a*i*n + b*f*p + b*g*o + c*d*q + c*h*m + d*h*l + e*i*j + f*g*k -
+ a*f*q - a*h*o - b*d*r - b*i*m - c*e*p - c*g*n - d*i*k - e*g*l - f*h*j,
+ a*n*r + b*o*p + c*m*q + d*l*q + e*j*r + f*k*p + g*k*o + h*l*m + i*j*n -
+ a*o*q - b*m*r - c*n*p - d*k*r - e*l*p - f*j*q - g*l*n - h*j*o - i*k*m,
+ j*n*r + k*o*p + l*m*q - j*o*q - k*m*r - l*n*p,
+ };
+
+ // Solve for the roots of P[3]*x^3 + P[2]*x^2 + P[1]*x + P[0] = 0.
+ double roots[3];
+ int num_roots = SolveCubicPolynomial(P, roots);
+
+ // Build the fundamental matrix for each solution.
+ for (int kk = 0; kk < num_roots; ++kk) {
+ F->push_back(F1 + roots[kk] * F2);
+ }
+ return s;
+}
+
+double FundamentalFromCorrespondences7Point(const Mat &x1,
+ const Mat &x2,
+ std::vector<Mat3> *F) {
+ DCHECK_EQ(x1.rows(), 2);
+ DCHECK_GE(x1.cols(), 7);
+ DCHECK_EQ(x1.rows(), x2.rows());
+ DCHECK_EQ(x1.cols(), x2.cols());
+
+ // Normalize the data.
+ Mat3 T1, T2;
+ PreconditionerFromPoints(x1, &T1);
+ PreconditionerFromPoints(x2, &T2);
+ Mat x1_normalized, x2_normalized;
+ ApplyTransformationToPoints(x1, T1, &x1_normalized);
+ ApplyTransformationToPoints(x2, T2, &x2_normalized);
+
+ // Estimate the fundamental matrix.
+ double smaller_singular_value =
+ FundamentalFrom7CorrespondencesLinear(x1_normalized, x2_normalized, &(*F));
+
+ for (int k = 0; k < F->size(); ++k) {
+ Mat3 & Fmat = (*F)[k];
+ // Denormalize the fundamental matrix.
+ Fmat = T2.transpose() * Fmat * T1;
+ }
+ return smaller_singular_value;
+}
+
+void NormalizeFundamental(const Mat3 &F, Mat3 *F_normalized) {
+ *F_normalized = F / FrobeniusNorm(F);
+ if ((*F_normalized)(2, 2) < 0) {
+ *F_normalized *= -1;
+ }
+}
+
+double SampsonDistance(const Mat &F, const Vec2 &x1, const Vec2 &x2) {
+ Vec3 x(x1(0), x1(1), 1.0);
+ Vec3 y(x2(0), x2(1), 1.0);
+
+ Vec3 F_x = F * x;
+ Vec3 Ft_y = F.transpose() * y;
+ double y_F_x = y.dot(F_x);
+
+ return Square(y_F_x) / ( F_x.head<2>().squaredNorm()
+ + Ft_y.head<2>().squaredNorm());
+}
+
+double SymmetricEpipolarDistance(const Mat &F, const Vec2 &x1, const Vec2 &x2) {
+ Vec3 x(x1(0), x1(1), 1.0);
+ Vec3 y(x2(0), x2(1), 1.0);
+
+ Vec3 F_x = F * x;
+ Vec3 Ft_y = F.transpose() * y;
+ double y_F_x = y.dot(F_x);
+
+ return Square(y_F_x) * ( 1 / F_x.head<2>().squaredNorm()
+ + 1 / Ft_y.head<2>().squaredNorm());
+}
+
+// HZ 9.6 pag 257 (formula 9.12)
+void EssentialFromFundamental(const Mat3 &F,
+ const Mat3 &K1,
+ const Mat3 &K2,
+ Mat3 *E) {
+ *E = K2.transpose() * F * K1;
+}
+
+// HZ 9.6 pag 257 (formula 9.12)
+// Or http://ai.stanford.edu/~birch/projective/node20.html
+void FundamentalFromEssential(const Mat3 &E,
+ const Mat3 &K1,
+ const Mat3 &K2,
+ Mat3 *F) {
+ *F = K2.inverse().transpose() * E * K1.inverse();
+}
+
+void RelativeCameraMotion(const Mat3 &R1,
+ const Vec3 &t1,
+ const Mat3 &R2,
+ const Vec3 &t2,
+ Mat3 *R,
+ Vec3 *t) {
+ *R = R2 * R1.transpose();
+ *t = t2 - (*R) * t1;
+}
+
+// HZ 9.6 pag 257
+void EssentialFromRt(const Mat3 &R1,
+ const Vec3 &t1,
+ const Mat3 &R2,
+ const Vec3 &t2,
+ Mat3 *E) {
+ Mat3 R;
+ Vec3 t;
+ RelativeCameraMotion(R1, t1, R2, t2, &R, &t);
+ Mat3 Tx = CrossProductMatrix(t);
+ *E = Tx * R;
+}
+
+// HZ 9.6 pag 259 (Result 9.19)
+void MotionFromEssential(const Mat3 &E,
+ std::vector<Mat3> *Rs,
+ std::vector<Vec3> *ts) {
+ Eigen::JacobiSVD<Mat3> USV(E, Eigen::ComputeFullU | Eigen::ComputeFullV);
+ Mat3 U = USV.matrixU();
+ Mat3 Vt = USV.matrixV().transpose();
+
+ // Last column of U is undetermined since d = (a a 0).
+ if (U.determinant() < 0) {
+ U.col(2) *= -1;
+ }
+ // Last row of Vt is undetermined since d = (a a 0).
+ if (Vt.determinant() < 0) {
+ Vt.row(2) *= -1;
+ }
+
+ Mat3 W;
+ W << 0, -1, 0,
+ 1, 0, 0,
+ 0, 0, 1;
+
+ Mat3 U_W_Vt = U * W * Vt;
+ Mat3 U_Wt_Vt = U * W.transpose() * Vt;
+
+ Rs->resize(4);
+ (*Rs)[0] = U_W_Vt;
+ (*Rs)[1] = U_W_Vt;
+ (*Rs)[2] = U_Wt_Vt;
+ (*Rs)[3] = U_Wt_Vt;
+
+ ts->resize(4);
+ (*ts)[0] = U.col(2);
+ (*ts)[1] = -U.col(2);
+ (*ts)[2] = U.col(2);
+ (*ts)[3] = -U.col(2);
+}
+
+int MotionFromEssentialChooseSolution(const std::vector<Mat3> &Rs,
+ const std::vector<Vec3> &ts,
+ const Mat3 &K1,
+ const Vec2 &x1,
+ const Mat3 &K2,
+ const Vec2 &x2) {
+ DCHECK_EQ(4, Rs.size());
+ DCHECK_EQ(4, ts.size());
+
+ Mat34 P1, P2;
+ Mat3 R1;
+ Vec3 t1;
+ R1.setIdentity();
+ t1.setZero();
+ P_From_KRt(K1, R1, t1, &P1);
+ for (int i = 0; i < 4; ++i) {
+ const Mat3 &R2 = Rs[i];
+ const Vec3 &t2 = ts[i];
+ P_From_KRt(K2, R2, t2, &P2);
+ Vec3 X;
+ TriangulateDLT(P1, x1, P2, x2, &X);
+ double d1 = Depth(R1, t1, X);
+ double d2 = Depth(R2, t2, X);
+ // Test if point is front to the two cameras.
+ if (d1 > 0 && d2 > 0) {
+ return i;
+ }
+ }
+ return -1;
+}
+
+bool MotionFromEssentialAndCorrespondence(const Mat3 &E,
+ const Mat3 &K1,
+ const Vec2 &x1,
+ const Mat3 &K2,
+ const Vec2 &x2,
+ Mat3 *R,
+ Vec3 *t) {
+ std::vector<Mat3> Rs;
+ std::vector<Vec3> ts;
+ MotionFromEssential(E, &Rs, &ts);
+ int solution = MotionFromEssentialChooseSolution(Rs, ts, K1, x1, K2, x2);
+ if (solution >= 0) {
+ *R = Rs[solution];
+ *t = ts[solution];
+ return true;
+ } else {
+ return false;
+ }
+}
+
+void FundamentalToEssential(const Mat3 &F, Mat3 *E) {
+ Eigen::JacobiSVD<Mat3> svd(F, Eigen::ComputeFullU | Eigen::ComputeFullV);
+
+ // See Hartley & Zisserman page 294, result 11.1, which shows how to get the
+ // closest essential matrix to a matrix that is "almost" an essential matrix.
+ double a = svd.singularValues()(0);
+ double b = svd.singularValues()(1);
+ double s = (a + b) / 2.0;
+
+ LG << "Initial reconstruction's rotation is non-euclidean by "
+ << (((a - b) / std::max(a, b)) * 100) << "%; singular values:"
+ << svd.singularValues().transpose();
+
+ Vec3 diag;
+ diag << s, s, 0;
+
+ *E = svd.matrixU() * diag.asDiagonal() * svd.matrixV().transpose();
+}
+
+// Default settings for fundamental estimation which should be suitable
+// for a wide range of use cases.
+EstimateFundamentalOptions::EstimateFundamentalOptions(void) :
+ max_num_iterations(50),
+ expected_average_symmetric_distance(1e-16) {
+}
+
+namespace {
+// Cost functor which computes symmetric epipolar distance
+// used for fundamental matrix refinement.
+class FundamentalSymmetricEpipolarCostFunctor {
+ public:
+ FundamentalSymmetricEpipolarCostFunctor(const Vec2 &x,
+ const Vec2 &y)
+ : x_(x), y_(y) {}
+
+ template<typename T>
+ bool operator()(const T *fundamental_parameters, T *residuals) const {
+ typedef Eigen::Matrix<T, 3, 3> Mat3;
+ typedef Eigen::Matrix<T, 3, 1> Vec3;
+
+ Mat3 F(fundamental_parameters);
+
+ Vec3 x(T(x_(0)), T(x_(1)), T(1.0));
+ Vec3 y(T(y_(0)), T(y_(1)), T(1.0));
+
+ Vec3 F_x = F * x;
+ Vec3 Ft_y = F.transpose() * y;
+ T y_F_x = y.dot(F_x);
+
+ residuals[0] = y_F_x * T(1) / F_x.head(2).norm();
+ residuals[1] = y_F_x * T(1) / Ft_y.head(2).norm();
+
+ return true;
+ }
+
+ const Mat x_;
+ const Mat y_;
+};
+
+// Termination checking callback used for fundamental estimation.
+// It finished the minimization as soon as actual average of
+// symmetric epipolar distance is less or equal to the expected
+// average value.
+class TerminationCheckingCallback : public ceres::IterationCallback {
+ public:
+ TerminationCheckingCallback(const Mat &x1, const Mat &x2,
+ const EstimateFundamentalOptions &options,
+ Mat3 *F)
+ : options_(options), x1_(x1), x2_(x2), F_(F) {}
+
+ virtual ceres::CallbackReturnType operator()(
+ const ceres::IterationSummary& summary) {
+ // If the step wasn't successful, there's nothing to do.
+ if (!summary.step_is_successful) {
+ return ceres::SOLVER_CONTINUE;
+ }
+
+ // Calculate average of symmetric epipolar distance.
+ double average_distance = 0.0;
+ for (int i = 0; i < x1_.cols(); i++) {
+ average_distance = SymmetricEpipolarDistance(*F_,
+ x1_.col(i),
+ x2_.col(i));
+ }
+ average_distance /= x1_.cols();
+
+ if (average_distance <= options_.expected_average_symmetric_distance) {
+ return ceres::SOLVER_TERMINATE_SUCCESSFULLY;
+ }
+
+ return ceres::SOLVER_CONTINUE;
+ }
+
+ private:
+ const EstimateFundamentalOptions &options_;
+ const Mat &x1_;
+ const Mat &x2_;
+ Mat3 *F_;
+};
+} // namespace
+
+/* Fundamental transformation estimation. */
+bool EstimateFundamentalFromCorrespondences(
+ const Mat &x1,
+ const Mat &x2,
+ const EstimateFundamentalOptions &options,
+ Mat3 *F) {
+ // Step 1: Algebraic fundamental estimation.
+
+ // Assume algebraic estiation always succeeds,
+ NormalizedEightPointSolver(x1, x2, F);
+
+ LG << "Estimated matrix after algebraic estimation:\n" << *F;
+
+ // Step 2: Refine matrix using Ceres minimizer.
+ ceres::Problem problem;
+ for (int i = 0; i < x1.cols(); i++) {
+ FundamentalSymmetricEpipolarCostFunctor
+ *fundamental_symmetric_epipolar_cost_function =
+ new FundamentalSymmetricEpipolarCostFunctor(x1.col(i),
+ x2.col(i));
+
+ problem.AddResidualBlock(
+ new ceres::AutoDiffCostFunction<
+ FundamentalSymmetricEpipolarCostFunctor,
+ 2, // num_residuals
+ 9>(fundamental_symmetric_epipolar_cost_function),
+ NULL,
+ F->data());
+ }
+
+ // Configure the solve.
+ ceres::Solver::Options solver_options;
+ solver_options.linear_solver_type = ceres::DENSE_QR;
+ solver_options.max_num_iterations = options.max_num_iterations;
+ solver_options.update_state_every_iteration = true;
+
+ // Terminate if the average symmetric distance is good enough.
+ TerminationCheckingCallback callback(x1, x2, options, F);
+ solver_options.callbacks.push_back(&callback);
+
+ // Run the solve.
+ ceres::Solver::Summary summary;
+ ceres::Solve(solver_options, &problem, &summary);
+
+ VLOG(1) << "Summary:\n" << summary.FullReport();
+
+ LG << "Final refined matrix:\n" << *F;
+
+ return summary.IsSolutionUsable();
+}
+
+} // namespace libmv
diff --git a/intern/libmv/libmv/multiview/fundamental.h b/intern/libmv/libmv/multiview/fundamental.h
new file mode 100644
index 00000000000..51067aefc2b
--- /dev/null
+++ b/intern/libmv/libmv/multiview/fundamental.h
@@ -0,0 +1,187 @@
+// Copyright (c) 2007, 2008, 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_MULTIVIEW_FUNDAMENTAL_H_
+#define LIBMV_MULTIVIEW_FUNDAMENTAL_H_
+
+#include <vector>
+
+#include "libmv/numeric/numeric.h"
+
+namespace libmv {
+
+void ProjectionsFromFundamental(const Mat3 &F, Mat34 *P1, Mat34 *P2);
+void FundamentalFromProjections(const Mat34 &P1, const Mat34 &P2, Mat3 *F);
+
+/**
+ * The normalized 8-point fundamental matrix solver.
+ */
+double NormalizedEightPointSolver(const Mat &x1,
+ const Mat &x2,
+ Mat3 *F);
+
+/**
+ * 7 points (minimal case, points coordinates must be normalized before):
+ */
+double FundamentalFrom7CorrespondencesLinear(const Mat &x1,
+ const Mat &x2,
+ std::vector<Mat3> *F);
+
+/**
+ * 7 points (points coordinates must be in image space):
+ */
+double FundamentalFromCorrespondences7Point(const Mat &x1,
+ const Mat &x2,
+ std::vector<Mat3> *F);
+
+/**
+ * 8 points (points coordinates must be in image space):
+ */
+double NormalizedEightPointSolver(const Mat &x1,
+ const Mat &x2,
+ Mat3 *F);
+
+/**
+ * Fundamental matrix utility function:
+ */
+void EnforceFundamentalRank2Constraint(Mat3 *F);
+
+void NormalizeFundamental(const Mat3 &F, Mat3 *F_normalized);
+
+/**
+ * Approximate squared reprojection errror.
+ *
+ * See page 287 of HZ equation 11.9. This avoids triangulating the point,
+ * relying only on the entries in F.
+ */
+double SampsonDistance(const Mat &F, const Vec2 &x1, const Vec2 &x2);
+
+/**
+ * Calculates the sum of the distances from the points to the epipolar lines.
+ *
+ * See page 288 of HZ equation 11.10.
+ */
+double SymmetricEpipolarDistance(const Mat &F, const Vec2 &x1, const Vec2 &x2);
+
+/**
+ * Compute the relative camera motion between two cameras.
+ *
+ * Given the motion parameters of two cameras, computes the motion parameters
+ * of the second one assuming the first one to be at the origin.
+ * If T1 and T2 are the camera motions, the computed relative motion is
+ * T = T2 T1^{-1}
+ */
+void RelativeCameraMotion(const Mat3 &R1,
+ const Vec3 &t1,
+ const Mat3 &R2,
+ const Vec3 &t2,
+ Mat3 *R,
+ Vec3 *t);
+
+void EssentialFromFundamental(const Mat3 &F,
+ const Mat3 &K1,
+ const Mat3 &K2,
+ Mat3 *E);
+
+void FundamentalFromEssential(const Mat3 &E,
+ const Mat3 &K1,
+ const Mat3 &K2,
+ Mat3 *F);
+
+void EssentialFromRt(const Mat3 &R1,
+ const Vec3 &t1,
+ const Mat3 &R2,
+ const Vec3 &t2,
+ Mat3 *E);
+
+void MotionFromEssential(const Mat3 &E,
+ std::vector<Mat3> *Rs,
+ std::vector<Vec3> *ts);
+
+/**
+ * Choose one of the four possible motion solutions from an essential matrix.
+ *
+ * Decides the right solution by checking that the triangulation of a match
+ * x1--x2 lies in front of the cameras. See HZ 9.6 pag 259 (9.6.3 Geometrical
+ * interpretation of the 4 solutions)
+ *
+ * \return index of the right solution or -1 if no solution.
+ */
+int MotionFromEssentialChooseSolution(const std::vector<Mat3> &Rs,
+ const std::vector<Vec3> &ts,
+ const Mat3 &K1,
+ const Vec2 &x1,
+ const Mat3 &K2,
+ const Vec2 &x2);
+
+bool MotionFromEssentialAndCorrespondence(const Mat3 &E,
+ const Mat3 &K1,
+ const Vec2 &x1,
+ const Mat3 &K2,
+ const Vec2 &x2,
+ Mat3 *R,
+ Vec3 *t);
+
+/**
+ * Find closest essential matrix E to fundamental F
+ */
+void FundamentalToEssential(const Mat3 &F, Mat3 *E);
+
+/**
+ * This structure contains options that controls how the fundamental
+ * estimation operates.
+ *
+ * Defaults should be suitable for a wide range of use cases, but
+ * better performance and accuracy might require tweaking/
+ */
+struct EstimateFundamentalOptions {
+ // Default constructor which sets up a options for generic usage.
+ EstimateFundamentalOptions(void);
+
+ // Maximal number of iterations for refinement step.
+ int max_num_iterations;
+
+ // Expected average of symmetric epipolar distance between
+ // actual destination points and original ones transformed by
+ // estimated fundamental matrix.
+ //
+ // Refinement will finish as soon as average of symmetric
+ // epipolar distance is less or equal to this value.
+ //
+ // This distance is measured in the same units as input points are.
+ double expected_average_symmetric_distance;
+};
+
+/**
+ * Fundamental transformation estimation.
+ *
+ * This function estimates the fundamental transformation from a list of 2D
+ * correspondences by doing algebraic estimation first followed with result
+ * refinement.
+ */
+bool EstimateFundamentalFromCorrespondences(
+ const Mat &x1,
+ const Mat &x2,
+ const EstimateFundamentalOptions &options,
+ Mat3 *F);
+
+} // namespace libmv
+
+#endif // LIBMV_MULTIVIEW_FUNDAMENTAL_H_
diff --git a/intern/libmv/libmv/multiview/fundamental_test.cc b/intern/libmv/libmv/multiview/fundamental_test.cc
new file mode 100644
index 00000000000..da0eb449b8f
--- /dev/null
+++ b/intern/libmv/libmv/multiview/fundamental_test.cc
@@ -0,0 +1,162 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include <iostream>
+
+#include "libmv/logging/logging.h"
+#include "libmv/multiview/conditioning.h"
+#include "libmv/multiview/fundamental.h"
+#include "libmv/multiview/projection.h"
+#include "libmv/multiview/test_data_sets.h"
+#include "libmv/numeric/numeric.h"
+#include "testing/testing.h"
+
+namespace {
+
+using namespace libmv;
+
+TEST(Fundamental, FundamentalFromProjections) {
+ Mat34 P1_gt, P2_gt;
+ P1_gt << 1, 0, 0, 0,
+ 0, 1, 0, 0,
+ 0, 0, 1, 0;
+ P2_gt << 1, 1, 1, 3,
+ 0, 2, 0, 3,
+ 0, 1, 1, 0;
+ Mat3 F_gt;
+ FundamentalFromProjections(P1_gt, P2_gt, &F_gt);
+
+ Mat34 P1, P2;
+ ProjectionsFromFundamental(F_gt, &P1, &P2);
+
+ Mat3 F;
+ FundamentalFromProjections(P1, P2, &F);
+
+ EXPECT_MATRIX_PROP(F_gt, F, 1e-6);
+}
+
+TEST(Fundamental, PreconditionerFromPoints) {
+ int n = 4;
+ Mat points(2, n);
+ points << 0, 0, 1, 1,
+ 0, 2, 1, 3;
+
+ Mat3 T;
+ PreconditionerFromPoints(points, &T);
+
+ Mat normalized_points;
+ ApplyTransformationToPoints(points, T, &normalized_points);
+
+ Vec mean, variance;
+ MeanAndVarianceAlongRows(normalized_points, &mean, &variance);
+
+ EXPECT_NEAR(0, mean(0), 1e-8);
+ EXPECT_NEAR(0, mean(1), 1e-8);
+ EXPECT_NEAR(2, variance(0), 1e-8);
+ EXPECT_NEAR(2, variance(1), 1e-8);
+}
+
+TEST(Fundamental, EssentialFromFundamental) {
+ TwoViewDataSet d = TwoRealisticCameras();
+
+ Mat3 E_from_Rt;
+ EssentialFromRt(d.R1, d.t1, d.R2, d.t2, &E_from_Rt);
+
+ Mat3 E_from_F;
+ EssentialFromFundamental(d.F, d.K1, d.K2, &E_from_F);
+
+ EXPECT_MATRIX_PROP(E_from_Rt, E_from_F, 1e-6);
+}
+
+TEST(Fundamental, MotionFromEssential) {
+ TwoViewDataSet d = TwoRealisticCameras();
+
+ Mat3 E;
+ EssentialFromRt(d.R1, d.t1, d.R2, d.t2, &E);
+
+ Mat3 R;
+ Vec3 t;
+ RelativeCameraMotion(d.R1, d.t1, d.R2, d.t2, &R, &t);
+ NormalizeL2(&t);
+
+ std::vector<Mat3> Rs;
+ std::vector<Vec3> ts;
+ MotionFromEssential(E, &Rs, &ts);
+ bool one_solution_is_correct = false;
+ for (size_t i = 0; i < Rs.size(); ++i) {
+ if (FrobeniusDistance(Rs[i], R) < 1e-8 && DistanceL2(ts[i], t) < 1e-8) {
+ one_solution_is_correct = true;
+ break;
+ }
+ }
+ EXPECT_TRUE(one_solution_is_correct);
+}
+
+TEST(Fundamental, MotionFromEssentialChooseSolution) {
+ TwoViewDataSet d = TwoRealisticCameras();
+
+ Mat3 E;
+ EssentialFromRt(d.R1, d.t1, d.R2, d.t2, &E);
+
+ Mat3 R;
+ Vec3 t;
+ RelativeCameraMotion(d.R1, d.t1, d.R2, d.t2, &R, &t);
+ NormalizeL2(&t);
+
+ std::vector<Mat3> Rs;
+ std::vector<Vec3> ts;
+ MotionFromEssential(E, &Rs, &ts);
+
+ Vec2 x1, x2;
+ MatrixColumn(d.x1, 0, &x1);
+ MatrixColumn(d.x2, 0, &x2);
+ int solution = MotionFromEssentialChooseSolution(Rs, ts, d.K1, x1, d.K2, x2);
+
+ EXPECT_LE(0, solution);
+ EXPECT_LE(solution, 3);
+ EXPECT_LE(FrobeniusDistance(Rs[solution], R), 1e-8);
+ EXPECT_LE(DistanceL2(ts[solution], t), 1e-8);
+}
+
+TEST(Fundamental, MotionFromEssentialAndCorrespondence) {
+ TwoViewDataSet d = TwoRealisticCameras();
+
+ Mat3 E;
+ EssentialFromRt(d.R1, d.t1, d.R2, d.t2, &E);
+
+ Mat3 R;
+ Vec3 t;
+ RelativeCameraMotion(d.R1, d.t1, d.R2, d.t2, &R, &t);
+ NormalizeL2(&t);
+
+ Vec2 x1, x2;
+ MatrixColumn(d.x1, 0, &x1);
+ MatrixColumn(d.x2, 0, &x2);
+
+ Mat3 R_estimated;
+ Vec3 t_estimated;
+ MotionFromEssentialAndCorrespondence(E, d.K1, x1, d.K2, x2,
+ &R_estimated, &t_estimated);
+
+ EXPECT_LE(FrobeniusDistance(R_estimated, R), 1e-8);
+ EXPECT_LE(DistanceL2(t_estimated, t), 1e-8);
+}
+
+} // namespace
diff --git a/intern/libmv/libmv/multiview/homography.cc b/intern/libmv/libmv/multiview/homography.cc
new file mode 100644
index 00000000000..ce533a3ead2
--- /dev/null
+++ b/intern/libmv/libmv/multiview/homography.cc
@@ -0,0 +1,465 @@
+// Copyright (c) 2008, 2009 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/multiview/homography.h"
+
+#include "ceres/ceres.h"
+#include "libmv/logging/logging.h"
+#include "libmv/multiview/conditioning.h"
+#include "libmv/multiview/homography_parameterization.h"
+
+namespace libmv {
+/** 2D Homography transformation estimation in the case that points are in
+ * euclidean coordinates.
+ *
+ * x = H y
+ * x and y vector must have the same direction, we could write
+ * crossproduct(|x|, * H * |y| ) = |0|
+ *
+ * | 0 -1 x2| |a b c| |y1| |0|
+ * | 1 0 -x1| * |d e f| * |y2| = |0|
+ * |-x2 x1 0| |g h 1| |1 | |0|
+ *
+ * That gives :
+ *
+ * (-d+x2*g)*y1 + (-e+x2*h)*y2 + -f+x2 |0|
+ * (a-x1*g)*y1 + (b-x1*h)*y2 + c-x1 = |0|
+ * (-x2*a+x1*d)*y1 + (-x2*b+x1*e)*y2 + -x2*c+x1*f |0|
+ */
+static bool Homography2DFromCorrespondencesLinearEuc(
+ const Mat &x1,
+ const Mat &x2,
+ Mat3 *H,
+ double expected_precision) {
+ assert(2 == x1.rows());
+ assert(4 <= x1.cols());
+ assert(x1.rows() == x2.rows());
+ assert(x1.cols() == x2.cols());
+
+ int n = x1.cols();
+ MatX8 L = Mat::Zero(n * 3, 8);
+ Mat b = Mat::Zero(n * 3, 1);
+ for (int i = 0; i < n; ++i) {
+ int j = 3 * i;
+ L(j, 0) = x1(0, i); // a
+ L(j, 1) = x1(1, i); // b
+ L(j, 2) = 1.0; // c
+ L(j, 6) = -x2(0, i) * x1(0, i); // g
+ L(j, 7) = -x2(0, i) * x1(1, i); // h
+ b(j, 0) = x2(0, i); // i
+
+ ++j;
+ L(j, 3) = x1(0, i); // d
+ L(j, 4) = x1(1, i); // e
+ L(j, 5) = 1.0; // f
+ L(j, 6) = -x2(1, i) * x1(0, i); // g
+ L(j, 7) = -x2(1, i) * x1(1, i); // h
+ b(j, 0) = x2(1, i); // i
+
+ // This ensures better stability
+ // TODO(julien) make a lite version without this 3rd set
+ ++j;
+ L(j, 0) = x2(1, i) * x1(0, i); // a
+ L(j, 1) = x2(1, i) * x1(1, i); // b
+ L(j, 2) = x2(1, i); // c
+ L(j, 3) = -x2(0, i) * x1(0, i); // d
+ L(j, 4) = -x2(0, i) * x1(1, i); // e
+ L(j, 5) = -x2(0, i); // f
+ }
+ // Solve Lx=B
+ Vec h = L.fullPivLu().solve(b);
+ Homography2DNormalizedParameterization<double>::To(h, H);
+ if ((L * h).isApprox(b, expected_precision)) {
+ return true;
+ } else {
+ return false;
+ }
+}
+
+/** 2D Homography transformation estimation in the case that points are in
+ * homogeneous coordinates.
+ *
+ * | 0 -x3 x2| |a b c| |y1| -x3*d+x2*g -x3*e+x2*h -x3*f+x2*1 |y1| (-x3*d+x2*g)*y1 (-x3*e+x2*h)*y2 (-x3*f+x2*1)*y3 |0|
+ * | x3 0 -x1| * |d e f| * |y2| = x3*a-x1*g x3*b-x1*h x3*c-x1*1 * |y2| = (x3*a-x1*g)*y1 (x3*b-x1*h)*y2 (x3*c-x1*1)*y3 = |0|
+ * |-x2 x1 0| |g h 1| |y3| -x2*a+x1*d -x2*b+x1*e -x2*c+x1*f |y3| (-x2*a+x1*d)*y1 (-x2*b+x1*e)*y2 (-x2*c+x1*f)*y3 |0|
+ * X = |a b c d e f g h|^t
+ */
+bool Homography2DFromCorrespondencesLinear(const Mat &x1,
+ const Mat &x2,
+ Mat3 *H,
+ double expected_precision) {
+ if (x1.rows() == 2) {
+ return Homography2DFromCorrespondencesLinearEuc(x1, x2, H,
+ expected_precision);
+ }
+ assert(3 == x1.rows());
+ assert(4 <= x1.cols());
+ assert(x1.rows() == x2.rows());
+ assert(x1.cols() == x2.cols());
+
+ const int x = 0;
+ const int y = 1;
+ const int w = 2;
+ int n = x1.cols();
+ MatX8 L = Mat::Zero(n * 3, 8);
+ Mat b = Mat::Zero(n * 3, 1);
+ for (int i = 0; i < n; ++i) {
+ int j = 3 * i;
+ L(j, 0) = x2(w, i) * x1(x, i); // a
+ L(j, 1) = x2(w, i) * x1(y, i); // b
+ L(j, 2) = x2(w, i) * x1(w, i); // c
+ L(j, 6) = -x2(x, i) * x1(x, i); // g
+ L(j, 7) = -x2(x, i) * x1(y, i); // h
+ b(j, 0) = x2(x, i) * x1(w, i);
+
+ ++j;
+ L(j, 3) = x2(w, i) * x1(x, i); // d
+ L(j, 4) = x2(w, i) * x1(y, i); // e
+ L(j, 5) = x2(w, i) * x1(w, i); // f
+ L(j, 6) = -x2(y, i) * x1(x, i); // g
+ L(j, 7) = -x2(y, i) * x1(y, i); // h
+ b(j, 0) = x2(y, i) * x1(w, i);
+
+ // This ensures better stability
+ ++j;
+ L(j, 0) = x2(y, i) * x1(x, i); // a
+ L(j, 1) = x2(y, i) * x1(y, i); // b
+ L(j, 2) = x2(y, i) * x1(w, i); // c
+ L(j, 3) = -x2(x, i) * x1(x, i); // d
+ L(j, 4) = -x2(x, i) * x1(y, i); // e
+ L(j, 5) = -x2(x, i) * x1(w, i); // f
+ }
+ // Solve Lx=B
+ Vec h = L.fullPivLu().solve(b);
+ if ((L * h).isApprox(b, expected_precision)) {
+ Homography2DNormalizedParameterization<double>::To(h, H);
+ return true;
+ } else {
+ return false;
+ }
+}
+
+// Default settings for homography estimation which should be suitable
+// for a wide range of use cases.
+EstimateHomographyOptions::EstimateHomographyOptions(void) :
+ use_normalization(true),
+ max_num_iterations(50),
+ expected_average_symmetric_distance(1e-16) {
+}
+
+namespace {
+void GetNormalizedPoints(const Mat &original_points,
+ Mat *normalized_points,
+ Mat3 *normalization_matrix) {
+ IsotropicPreconditionerFromPoints(original_points, normalization_matrix);
+ ApplyTransformationToPoints(original_points,
+ *normalization_matrix,
+ normalized_points);
+}
+
+// Cost functor which computes symmetric geometric distance
+// used for homography matrix refinement.
+class HomographySymmetricGeometricCostFunctor {
+ public:
+ HomographySymmetricGeometricCostFunctor(const Vec2 &x,
+ const Vec2 &y)
+ : x_(x), y_(y) { }
+
+ template<typename T>
+ bool operator()(const T *homography_parameters, T *residuals) const {
+ typedef Eigen::Matrix<T, 3, 3> Mat3;
+ typedef Eigen::Matrix<T, 3, 1> Vec3;
+
+ Mat3 H(homography_parameters);
+
+ Vec3 x(T(x_(0)), T(x_(1)), T(1.0));
+ Vec3 y(T(y_(0)), T(y_(1)), T(1.0));
+
+ Vec3 H_x = H * x;
+ Vec3 Hinv_y = H.inverse() * y;
+
+ H_x /= H_x(2);
+ Hinv_y /= Hinv_y(2);
+
+ // This is a forward error.
+ residuals[0] = H_x(0) - T(y_(0));
+ residuals[1] = H_x(1) - T(y_(1));
+
+ // This is a backward error.
+ residuals[2] = Hinv_y(0) - T(x_(0));
+ residuals[3] = Hinv_y(1) - T(x_(1));
+
+ return true;
+ }
+
+ const Vec2 x_;
+ const Vec2 y_;
+};
+
+// Termination checking callback used for homography estimation.
+// It finished the minimization as soon as actual average of
+// symmetric geometric distance is less or equal to the expected
+// average value.
+class TerminationCheckingCallback : public ceres::IterationCallback {
+ public:
+ TerminationCheckingCallback(const Mat &x1, const Mat &x2,
+ const EstimateHomographyOptions &options,
+ Mat3 *H)
+ : options_(options), x1_(x1), x2_(x2), H_(H) {}
+
+ virtual ceres::CallbackReturnType operator()(
+ const ceres::IterationSummary& summary) {
+ // If the step wasn't successful, there's nothing to do.
+ if (!summary.step_is_successful) {
+ return ceres::SOLVER_CONTINUE;
+ }
+
+ // Calculate average of symmetric geometric distance.
+ double average_distance = 0.0;
+ for (int i = 0; i < x1_.cols(); i++) {
+ average_distance = SymmetricGeometricDistance(*H_,
+ x1_.col(i),
+ x2_.col(i));
+ }
+ average_distance /= x1_.cols();
+
+ if (average_distance <= options_.expected_average_symmetric_distance) {
+ return ceres::SOLVER_TERMINATE_SUCCESSFULLY;
+ }
+
+ return ceres::SOLVER_CONTINUE;
+ }
+
+ private:
+ const EstimateHomographyOptions &options_;
+ const Mat &x1_;
+ const Mat &x2_;
+ Mat3 *H_;
+};
+} // namespace
+
+/** 2D Homography transformation estimation in the case that points are in
+ * euclidean coordinates.
+ */
+bool EstimateHomography2DFromCorrespondences(
+ const Mat &x1,
+ const Mat &x2,
+ const EstimateHomographyOptions &options,
+ Mat3 *H) {
+ // TODO(sergey): Support homogenous coordinates, not just euclidean.
+
+ assert(2 == x1.rows());
+ assert(4 <= x1.cols());
+ assert(x1.rows() == x2.rows());
+ assert(x1.cols() == x2.cols());
+
+ Mat3 T1 = Mat3::Identity(),
+ T2 = Mat3::Identity();
+
+ // Step 1: Algebraic homography estimation.
+ Mat x1_normalized, x2_normalized;
+
+ if (options.use_normalization) {
+ LG << "Estimating homography using normalization.";
+ GetNormalizedPoints(x1, &x1_normalized, &T1);
+ GetNormalizedPoints(x2, &x2_normalized, &T2);
+ } else {
+ x1_normalized = x1;
+ x2_normalized = x2;
+ }
+
+ // Assume algebraic estiation always suceeds,
+ Homography2DFromCorrespondencesLinear(x1_normalized, x2_normalized, H);
+
+ // Denormalize the homography matrix.
+ if (options.use_normalization) {
+ *H = T2.inverse() * (*H) * T1;
+ }
+
+ LG << "Estimated matrix after algebraic estimation:\n" << *H;
+
+ // Step 2: Refine matrix using Ceres minimizer.
+ ceres::Problem problem;
+ for (int i = 0; i < x1.cols(); i++) {
+ HomographySymmetricGeometricCostFunctor
+ *homography_symmetric_geometric_cost_function =
+ new HomographySymmetricGeometricCostFunctor(x1.col(i),
+ x2.col(i));
+
+ problem.AddResidualBlock(
+ new ceres::AutoDiffCostFunction<
+ HomographySymmetricGeometricCostFunctor,
+ 4, // num_residuals
+ 9>(homography_symmetric_geometric_cost_function),
+ NULL,
+ H->data());
+ }
+
+ // Configure the solve.
+ ceres::Solver::Options solver_options;
+ solver_options.linear_solver_type = ceres::DENSE_QR;
+ solver_options.max_num_iterations = options.max_num_iterations;
+ solver_options.update_state_every_iteration = true;
+
+ // Terminate if the average symmetric distance is good enough.
+ TerminationCheckingCallback callback(x1, x2, options, H);
+ solver_options.callbacks.push_back(&callback);
+
+ // Run the solve.
+ ceres::Solver::Summary summary;
+ ceres::Solve(solver_options, &problem, &summary);
+
+ VLOG(1) << "Summary:\n" << summary.FullReport();
+
+ LG << "Final refined matrix:\n" << *H;
+
+ return summary.IsSolutionUsable();
+}
+
+/**
+ * x2 ~ A * x1
+ * x2^t * Hi * A *x1 = 0
+ * H1 = H2 = H3 =
+ * | 0 0 0 1| |-x2w| |0 0 0 0| | 0 | | 0 0 1 0| |-x2z|
+ * | 0 0 0 0| -> | 0 | |0 0 1 0| -> |-x2z| | 0 0 0 0| -> | 0 |
+ * | 0 0 0 0| | 0 | |0-1 0 0| | x2y| |-1 0 0 0| | x2x|
+ * |-1 0 0 0| | x2x| |0 0 0 0| | 0 | | 0 0 0 0| | 0 |
+ * H4 = H5 = H6 =
+ * |0 0 0 0| | 0 | | 0 1 0 0| |-x2y| |0 0 0 0| | 0 |
+ * |0 0 0 1| -> |-x2w| |-1 0 0 0| -> | x2x| |0 0 0 0| -> | 0 |
+ * |0 0 0 0| | 0 | | 0 0 0 0| | 0 | |0 0 0 1| |-x2w|
+ * |0-1 0 0| | x2y| | 0 0 0 0| | 0 | |0 0-1 0| | x2z|
+ * |a b c d|
+ * A = |e f g h|
+ * |i j k l|
+ * |m n o 1|
+ *
+ * x2^t * H1 * A *x1 = (-x2w*a +x2x*m )*x1x + (-x2w*b +x2x*n )*x1y + (-x2w*c +x2x*o )*x1z + (-x2w*d +x2x*1 )*x1w = 0
+ * x2^t * H2 * A *x1 = (-x2z*e +x2y*i )*x1x + (-x2z*f +x2y*j )*x1y + (-x2z*g +x2y*k )*x1z + (-x2z*h +x2y*l )*x1w = 0
+ * x2^t * H3 * A *x1 = (-x2z*a +x2x*i )*x1x + (-x2z*b +x2x*j )*x1y + (-x2z*c +x2x*k )*x1z + (-x2z*d +x2x*l )*x1w = 0
+ * x2^t * H4 * A *x1 = (-x2w*e +x2y*m )*x1x + (-x2w*f +x2y*n )*x1y + (-x2w*g +x2y*o )*x1z + (-x2w*h +x2y*1 )*x1w = 0
+ * x2^t * H5 * A *x1 = (-x2y*a +x2x*e )*x1x + (-x2y*b +x2x*f )*x1y + (-x2y*c +x2x*g )*x1z + (-x2y*d +x2x*h )*x1w = 0
+ * x2^t * H6 * A *x1 = (-x2w*i +x2z*m )*x1x + (-x2w*j +x2z*n )*x1y + (-x2w*k +x2z*o )*x1z + (-x2w*l +x2z*1 )*x1w = 0
+ *
+ * X = |a b c d e f g h i j k l m n o|^t
+*/
+bool Homography3DFromCorrespondencesLinear(const Mat &x1,
+ const Mat &x2,
+ Mat4 *H,
+ double expected_precision) {
+ assert(4 == x1.rows());
+ assert(5 <= x1.cols());
+ assert(x1.rows() == x2.rows());
+ assert(x1.cols() == x2.cols());
+ const int x = 0;
+ const int y = 1;
+ const int z = 2;
+ const int w = 3;
+ int n = x1.cols();
+ MatX15 L = Mat::Zero(n * 6, 15);
+ Mat b = Mat::Zero(n * 6, 1);
+ for (int i = 0; i < n; ++i) {
+ int j = 6 * i;
+ L(j, 0) = -x2(w, i) * x1(x, i); // a
+ L(j, 1) = -x2(w, i) * x1(y, i); // b
+ L(j, 2) = -x2(w, i) * x1(z, i); // c
+ L(j, 3) = -x2(w, i) * x1(w, i); // d
+ L(j, 12) = x2(x, i) * x1(x, i); // m
+ L(j, 13) = x2(x, i) * x1(y, i); // n
+ L(j, 14) = x2(x, i) * x1(z, i); // o
+ b(j, 0) = -x2(x, i) * x1(w, i);
+
+ ++j;
+ L(j, 4) = -x2(z, i) * x1(x, i); // e
+ L(j, 5) = -x2(z, i) * x1(y, i); // f
+ L(j, 6) = -x2(z, i) * x1(z, i); // g
+ L(j, 7) = -x2(z, i) * x1(w, i); // h
+ L(j, 8) = x2(y, i) * x1(x, i); // i
+ L(j, 9) = x2(y, i) * x1(y, i); // j
+ L(j, 10) = x2(y, i) * x1(z, i); // k
+ L(j, 11) = x2(y, i) * x1(w, i); // l
+
+ ++j;
+ L(j, 0) = -x2(z, i) * x1(x, i); // a
+ L(j, 1) = -x2(z, i) * x1(y, i); // b
+ L(j, 2) = -x2(z, i) * x1(z, i); // c
+ L(j, 3) = -x2(z, i) * x1(w, i); // d
+ L(j, 8) = x2(x, i) * x1(x, i); // i
+ L(j, 9) = x2(x, i) * x1(y, i); // j
+ L(j, 10) = x2(x, i) * x1(z, i); // k
+ L(j, 11) = x2(x, i) * x1(w, i); // l
+
+ ++j;
+ L(j, 4) = -x2(w, i) * x1(x, i); // e
+ L(j, 5) = -x2(w, i) * x1(y, i); // f
+ L(j, 6) = -x2(w, i) * x1(z, i); // g
+ L(j, 7) = -x2(w, i) * x1(w, i); // h
+ L(j, 12) = x2(y, i) * x1(x, i); // m
+ L(j, 13) = x2(y, i) * x1(y, i); // n
+ L(j, 14) = x2(y, i) * x1(z, i); // o
+ b(j, 0) = -x2(y, i) * x1(w, i);
+
+ ++j;
+ L(j, 0) = -x2(y, i) * x1(x, i); // a
+ L(j, 1) = -x2(y, i) * x1(y, i); // b
+ L(j, 2) = -x2(y, i) * x1(z, i); // c
+ L(j, 3) = -x2(y, i) * x1(w, i); // d
+ L(j, 4) = x2(x, i) * x1(x, i); // e
+ L(j, 5) = x2(x, i) * x1(y, i); // f
+ L(j, 6) = x2(x, i) * x1(z, i); // g
+ L(j, 7) = x2(x, i) * x1(w, i); // h
+
+ ++j;
+ L(j, 8) = -x2(w, i) * x1(x, i); // i
+ L(j, 9) = -x2(w, i) * x1(y, i); // j
+ L(j, 10) = -x2(w, i) * x1(z, i); // k
+ L(j, 11) = -x2(w, i) * x1(w, i); // l
+ L(j, 12) = x2(z, i) * x1(x, i); // m
+ L(j, 13) = x2(z, i) * x1(y, i); // n
+ L(j, 14) = x2(z, i) * x1(z, i); // o
+ b(j, 0) = -x2(z, i) * x1(w, i);
+ }
+ // Solve Lx=B
+ Vec h = L.fullPivLu().solve(b);
+ if ((L * h).isApprox(b, expected_precision)) {
+ Homography3DNormalizedParameterization<double>::To(h, H);
+ return true;
+ } else {
+ return false;
+ }
+}
+
+double SymmetricGeometricDistance(const Mat3 &H,
+ const Vec2 &x1,
+ const Vec2 &x2) {
+ Vec3 x(x1(0), x1(1), 1.0);
+ Vec3 y(x2(0), x2(1), 1.0);
+
+ Vec3 H_x = H * x;
+ Vec3 Hinv_y = H.inverse() * y;
+
+ H_x /= H_x(2);
+ Hinv_y /= Hinv_y(2);
+
+ return (H_x.head<2>() - y.head<2>()).squaredNorm() +
+ (Hinv_y.head<2>() - x.head<2>()).squaredNorm();
+}
+
+} // namespace libmv
diff --git a/intern/libmv/libmv/multiview/homography.h b/intern/libmv/libmv/multiview/homography.h
new file mode 100644
index 00000000000..6d810c845ed
--- /dev/null
+++ b/intern/libmv/libmv/multiview/homography.h
@@ -0,0 +1,145 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_MULTIVIEW_HOMOGRAPHY_H_
+#define LIBMV_MULTIVIEW_HOMOGRAPHY_H_
+
+#include "libmv/numeric/numeric.h"
+
+namespace libmv {
+
+/**
+ * 2D homography transformation estimation.
+ *
+ * This function estimates the homography transformation from a list of 2D
+ * correspondences which represents either:
+ *
+ * - 3D points on a plane, with a general moving camera.
+ * - 3D points with a rotating camera (pure rotation).
+ * - 3D points + different planar projections
+ *
+ * \param x1 The first 2xN or 3xN matrix of euclidean or homogeneous points.
+ * \param x2 The second 2xN or 3xN matrix of euclidean or homogeneous points.
+ * \param H The 3x3 homography transformation matrix (8 dof) such that
+ * x2 = H * x1 with |a b c|
+ * H = |d e f|
+ * |g h 1|
+ * \param expected_precision The expected precision in order for instance
+ * to accept almost homography matrices.
+ *
+ * \return True if the transformation estimation has succeeded.
+ * \note There must be at least 4 non-colinear points.
+ */
+bool Homography2DFromCorrespondencesLinear(const Mat &x1,
+ const Mat &x2,
+ Mat3 *H,
+ double expected_precision =
+ EigenDouble::dummy_precision());
+
+/**
+ * This structure contains options that controls how the homography
+ * estimation operates.
+ *
+ * Defaults should be suitable for a wide range of use cases, but
+ * better performance and accuracy might require tweaking/
+ */
+struct EstimateHomographyOptions {
+ // Default constructor which sets up a options for generic usage.
+ EstimateHomographyOptions(void);
+
+ // Normalize correspondencies before estimating the homography
+ // in order to increase estimation stability.
+ //
+ // Normaliztion will make it so centroid od correspondences
+ // is the coordinate origin and their average distance from
+ // the origin is sqrt(2).
+ //
+ // See:
+ // - R. Hartley and A. Zisserman. Multiple View Geometry in Computer
+ // Vision. Cambridge University Press, second edition, 2003.
+ // - https://www.cs.ubc.ca/grads/resources/thesis/May09/Dubrofsky_Elan.pdf
+ bool use_normalization;
+
+ // Maximal number of iterations for the refinement step.
+ int max_num_iterations;
+
+ // Expected average of symmetric geometric distance between
+ // actual destination points and original ones transformed by
+ // estimated homography matrix.
+ //
+ // Refinement will finish as soon as average of symmetric
+ // geometric distance is less or equal to this value.
+ //
+ // This distance is measured in the same units as input points are.
+ double expected_average_symmetric_distance;
+};
+
+/**
+ * 2D homography transformation estimation.
+ *
+ * This function estimates the homography transformation from a list of 2D
+ * correspondences by doing algebraic estimation first followed with result
+ * refinement.
+ */
+bool EstimateHomography2DFromCorrespondences(
+ const Mat &x1,
+ const Mat &x2,
+ const EstimateHomographyOptions &options,
+ Mat3 *H);
+
+/**
+ * 3D Homography transformation estimation.
+ *
+ * This function can be used in order to estimate the homography transformation
+ * from a list of 3D correspondences.
+ *
+ * \param[in] x1 The first 4xN matrix of homogeneous points
+ * \param[in] x2 The second 4xN matrix of homogeneous points
+ * \param[out] H The 4x4 homography transformation matrix (15 dof) such that
+ * x2 = H * x1 with |a b c d|
+ * H = |e f g h|
+ * |i j k l|
+ * |m n o 1|
+ * \param[in] expected_precision The expected precision in order for instance
+ * to accept almost homography matrices.
+ *
+ * \return true if the transformation estimation has succeeded
+ *
+ * \note Need at least 5 non coplanar points
+ * \note Points coordinates must be in homogeneous coordinates
+ */
+bool Homography3DFromCorrespondencesLinear(const Mat &x1,
+ const Mat &x2,
+ Mat4 *H,
+ double expected_precision =
+ EigenDouble::dummy_precision());
+
+/**
+ * Calculate symmetric geometric cost:
+ *
+ * D(H * x1, x2)^2 + D(H^-1 * x2, x1)
+ */
+double SymmetricGeometricDistance(const Mat3 &H,
+ const Vec2 &x1,
+ const Vec2 &x2);
+
+} // namespace libmv
+
+#endif // LIBMV_MULTIVIEW_HOMOGRAPHY_H_
diff --git a/intern/libmv/libmv/multiview/homography_error.h b/intern/libmv/libmv/multiview/homography_error.h
new file mode 100644
index 00000000000..f8b9d45e73c
--- /dev/null
+++ b/intern/libmv/libmv/multiview/homography_error.h
@@ -0,0 +1,248 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_MULTIVIEW_HOMOGRAPHY_ERRORS_H_
+#define LIBMV_MULTIVIEW_HOMOGRAPHY_ERRORS_H_
+
+#include "libmv/multiview/projection.h"
+
+namespace libmv {
+namespace homography {
+namespace homography2D {
+
+ /**
+ * Structure for estimating the asymmetric error between a vector x2 and the
+ * transformed x1 such that
+ * Error = ||x2 - Psi(H * x1)||^2
+ * where Psi is the function that transforms homogeneous to euclidean coords.
+ * \note It should be distributed as Chi-squared with k = 2.
+ */
+struct AsymmetricError {
+ /**
+ * Computes the asymmetric residuals between a set of 2D points x2 and the
+ * transformed 2D point set x1 such that
+ * Residuals_i = x2_i - Psi(H * x1_i)
+ * where Psi is the function that transforms homogeneous to euclidean coords.
+ *
+ * \param[in] H The 3x3 homography matrix.
+ * The estimated homography should approximatelly hold the condition y = H x.
+ * \param[in] x1 A set of 2D points (2xN or 3xN matrix of column vectors).
+ * \param[in] x2 A set of 2D points (2xN or 3xN matrix of column vectors).
+ * \param[out] dx A 2xN matrix of column vectors of residuals errors
+ */
+ static void Residuals(const Mat &H, const Mat &x1,
+ const Mat &x2, Mat2X *dx) {
+ dx->resize(2, x1.cols());
+ Mat3X x2h_est;
+ if (x1.rows() == 2)
+ x2h_est = H * EuclideanToHomogeneous(static_cast<Mat2X>(x1));
+ else
+ x2h_est = H * x1;
+ dx->row(0) = x2h_est.row(0).array() / x2h_est.row(2).array();
+ dx->row(1) = x2h_est.row(1).array() / x2h_est.row(2).array();
+ if (x2.rows() == 2)
+ *dx = x2 - *dx;
+ else
+ *dx = HomogeneousToEuclidean(static_cast<Mat3X>(x2)) - *dx;
+ }
+ /**
+ * Computes the asymmetric residuals between a 2D point x2 and the transformed
+ * 2D point x1 such that
+ * Residuals = x2 - Psi(H * x1)
+ * where Psi is the function that transforms homogeneous to euclidean coords.
+ *
+ * \param[in] H The 3x3 homography matrix.
+ * The estimated homography should approximatelly hold the condition y = H x.
+ * \param[in] x1 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
+ * \param[in] x2 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
+ * \param[out] dx A vector of size 2 of the residual error
+ */
+ static void Residuals(const Mat &H, const Vec &x1,
+ const Vec &x2, Vec2 *dx) {
+ Vec3 x2h_est;
+ if (x1.rows() == 2)
+ x2h_est = H * EuclideanToHomogeneous(static_cast<Vec2>(x1));
+ else
+ x2h_est = H * x1;
+ if (x2.rows() == 2)
+ *dx = x2 - x2h_est.head<2>() / x2h_est[2];
+ else
+ *dx = HomogeneousToEuclidean(static_cast<Vec3>(x2)) -
+ x2h_est.head<2>() / x2h_est[2];
+ }
+ /**
+ * Computes the squared norm of the residuals between a set of 2D points x2
+ * and the transformed 2D point set x1 such that
+ * Error = || x2 - Psi(H * x1) ||^2
+ * where Psi is the function that transforms homogeneous to euclidean coords.
+ *
+ * \param[in] H The 3x3 homography matrix.
+ * The estimated homography should approximatelly hold the condition y = H x.
+ * \param[in] x1 A set of 2D points (2xN or 3xN matrix of column vectors).
+ * \param[in] x2 A set of 2D points (2xN or 3xN matrix of column vectors).
+ * \return The squared norm of the asymmetric residuals errors
+ */
+ static double Error(const Mat &H, const Mat &x1, const Mat &x2) {
+ Mat2X dx;
+ Residuals(H, x1, x2, &dx);
+ return dx.squaredNorm();
+ }
+ /**
+ * Computes the squared norm of the residuals between a 2D point x2 and the
+ * transformed 2D point x1 such that rms = || x2 - Psi(H * x1) ||^2
+ * where Psi is the function that transforms homogeneous to euclidean coords.
+ *
+ * \param[in] H The 3x3 homography matrix.
+ * The estimated homography should approximatelly hold the condition y = H x.
+ * \param[in] x1 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
+ * \param[in] x2 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
+ * \return The squared norm of the asymmetric residual error
+ */
+ static double Error(const Mat &H, const Vec &x1, const Vec &x2) {
+ Vec2 dx;
+ Residuals(H, x1, x2, &dx);
+ return dx.squaredNorm();
+ }
+};
+
+ /**
+ * Structure for estimating the symmetric error
+ * between a vector x2 and the transformed x1 such that
+ * Error = ||x2 - Psi(H * x1)||^2 + ||x1 - Psi(H^-1 * x2)||^2
+ * where Psi is the function that transforms homogeneous to euclidean coords.
+ * \note It should be distributed as Chi-squared with k = 4.
+ */
+struct SymmetricError {
+ /**
+ * Computes the squared norm of the residuals between x2 and the
+ * transformed x1 such that
+ * Error = ||x2 - Psi(H * x1)||^2 + ||x1 - Psi(H^-1 * x2)||^2
+ * where Psi is the function that transforms homogeneous to euclidean coords.
+ *
+ * \param[in] H The 3x3 homography matrix.
+ * The estimated homography should approximatelly hold the condition y = H x.
+ * \param[in] x1 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
+ * \param[in] x2 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
+ * \return The squared norm of the symmetric residuals errors
+ */
+ static double Error(const Mat &H, const Vec &x1, const Vec &x2) {
+ // TODO(keir): This is awesomely inefficient because it does a 3x3
+ // inversion for each evaluation.
+ Mat3 Hinv = H.inverse();
+ return AsymmetricError::Error(H, x1, x2) +
+ AsymmetricError::Error(Hinv, x2, x1);
+ }
+ // TODO(julien) Add residuals function \see AsymmetricError
+};
+ /**
+ * Structure for estimating the algebraic error (cross product)
+ * between a vector x2 and the transformed x1 such that
+ * Error = ||[x2] * H * x1||^^2
+ * where [x2] is the skew matrix of x2.
+ */
+struct AlgebraicError {
+ // TODO(julien) Make an AlgebraicError2Rows and AlgebraicError3Rows
+
+ /**
+ * Computes the algebraic residuals (cross product) between a set of 2D
+ * points x2 and the transformed 2D point set x1 such that
+ * [x2] * H * x1 where [x2] is the skew matrix of x2.
+ *
+ * \param[in] H The 3x3 homography matrix.
+ * The estimated homography should approximatelly hold the condition y = H x.
+ * \param[in] x1 A set of 2D points (2xN or 3xN matrix of column vectors).
+ * \param[in] x2 A set of 2D points (2xN or 3xN matrix of column vectors).
+ * \param[out] dx A 3xN matrix of column vectors of residuals errors
+ */
+ static void Residuals(const Mat &H, const Mat &x1,
+ const Mat &x2, Mat3X *dx) {
+ dx->resize(3, x1.cols());
+ Vec3 col;
+ for (int i = 0; i < x1.cols(); ++i) {
+ Residuals(H, x1.col(i), x2.col(i), &col);
+ dx->col(i) = col;
+ }
+ }
+ /**
+ * Computes the algebraic residuals (cross product) between a 2D point x2
+ * and the transformed 2D point x1 such that
+ * [x2] * H * x1 where [x2] is the skew matrix of x2.
+ *
+ * \param[in] H The 3x3 homography matrix.
+ * The estimated homography should approximatelly hold the condition y = H x.
+ * \param[in] x1 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
+ * \param[in] x2 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
+ * \param[out] dx A vector of size 3 of the residual error
+ */
+ static void Residuals(const Mat &H, const Vec &x1,
+ const Vec &x2, Vec3 *dx) {
+ Vec3 x2h_est;
+ if (x1.rows() == 2)
+ x2h_est = H * EuclideanToHomogeneous(static_cast<Vec2>(x1));
+ else
+ x2h_est = H * x1;
+ if (x2.rows() == 2)
+ *dx = SkewMat(EuclideanToHomogeneous(static_cast<Vec2>(x2))) * x2h_est;
+ else
+ *dx = SkewMat(x2) * x2h_est;
+ // TODO(julien) This is inefficient since it creates an
+ // identical 3x3 skew matrix for each evaluation.
+ }
+ /**
+ * Computes the squared norm of the algebraic residuals between a set of 2D
+ * points x2 and the transformed 2D point set x1 such that
+ * [x2] * H * x1 where [x2] is the skew matrix of x2.
+ *
+ * \param[in] H The 3x3 homography matrix.
+ * The estimated homography should approximatelly hold the condition y = H x.
+ * \param[in] x1 A set of 2D points (2xN or 3xN matrix of column vectors).
+ * \param[in] x2 A set of 2D points (2xN or 3xN matrix of column vectors).
+ * \return The squared norm of the asymmetric residuals errors
+ */
+ static double Error(const Mat &H, const Mat &x1, const Mat &x2) {
+ Mat3X dx;
+ Residuals(H, x1, x2, &dx);
+ return dx.squaredNorm();
+ }
+ /**
+ * Computes the squared norm of the algebraic residuals between a 2D point x2
+ * and the transformed 2D point x1 such that
+ * [x2] * H * x1 where [x2] is the skew matrix of x2.
+ *
+ * \param[in] H The 3x3 homography matrix.
+ * The estimated homography should approximatelly hold the condition y = H x.
+ * \param[in] x1 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
+ * \param[in] x2 A 2D point (vector of size 2 or 3 (euclidean/homogeneous))
+ * \return The squared norm of the asymmetric residual error
+ */
+ static double Error(const Mat &H, const Vec &x1, const Vec &x2) {
+ Vec3 dx;
+ Residuals(H, x1, x2, &dx);
+ return dx.squaredNorm();
+ }
+};
+// TODO(keir): Add error based on ideal points.
+
+} // namespace homography2D
+// TODO(julien) add homography3D errors
+} // namespace homography
+} // namespace libmv
+
+#endif // LIBMV_MULTIVIEW_HOMOGRAPHY_ERRORS_H_
diff --git a/intern/libmv/libmv/multiview/homography_parameterization.h b/intern/libmv/libmv/multiview/homography_parameterization.h
new file mode 100644
index 00000000000..ca8fbd8066e
--- /dev/null
+++ b/intern/libmv/libmv/multiview/homography_parameterization.h
@@ -0,0 +1,91 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_MULTIVIEW_HOMOGRAPHY_PARAMETERIZATION_H_
+#define LIBMV_MULTIVIEW_HOMOGRAPHY_PARAMETERIZATION_H_
+
+#include "libmv/numeric/numeric.h"
+
+namespace libmv {
+
+/** A parameterization of the 2D homography matrix that uses 8 parameters so
+ * that the matrix is normalized (H(2,2) == 1).
+ * The homography matrix H is built from a list of 8 parameters (a, b,...g, h)
+ * as follows
+ * |a b c|
+ * H = |d e f|
+ * |g h 1|
+ */
+template<typename T = double>
+class Homography2DNormalizedParameterization {
+ public:
+ typedef Eigen::Matrix<T, 8, 1> Parameters; // a, b, ... g, h
+ typedef Eigen::Matrix<T, 3, 3> Parameterized; // H
+
+ /// Convert from the 8 parameters to a H matrix.
+ static void To(const Parameters &p, Parameterized *h) {
+ *h << p(0), p(1), p(2),
+ p(3), p(4), p(5),
+ p(6), p(7), 1.0;
+ }
+
+ /// Convert from a H matrix to the 8 parameters.
+ static void From(const Parameterized &h, Parameters *p) {
+ *p << h(0, 0), h(0, 1), h(0, 2),
+ h(1, 0), h(1, 1), h(1, 2),
+ h(2, 0), h(2, 1);
+ }
+};
+
+/** A parameterization of the 2D homography matrix that uses 15 parameters so
+ * that the matrix is normalized (H(3,3) == 1).
+ * The homography matrix H is built from a list of 15 parameters (a, b,...n, o)
+ * as follows
+ * |a b c d|
+ * H = |e f g h|
+ * |i j k l|
+ * |m n o 1|
+ */
+template<typename T = double>
+class Homography3DNormalizedParameterization {
+ public:
+ typedef Eigen::Matrix<T, 15, 1> Parameters; // a, b, ... n, o
+ typedef Eigen::Matrix<T, 4, 4> Parameterized; // H
+
+ /// Convert from the 15 parameters to a H matrix.
+ static void To(const Parameters &p, Parameterized *h) {
+ *h << p(0), p(1), p(2), p(3),
+ p(4), p(5), p(6), p(7),
+ p(8), p(9), p(10), p(11),
+ p(12), p(13), p(14), 1.0;
+ }
+
+ /// Convert from a H matrix to the 15 parameters.
+ static void From(const Parameterized &h, Parameters *p) {
+ *p << h(0, 0), h(0, 1), h(0, 2), h(0, 3),
+ h(1, 0), h(1, 1), h(1, 2), h(1, 3),
+ h(2, 0), h(2, 1), h(2, 2), h(2, 3),
+ h(3, 0), h(3, 1), h(3, 2);
+ }
+};
+
+} // namespace libmv
+
+#endif // LIBMV_MULTIVIEW_HOMOGRAPHY_PARAMETERIZATION_H_
diff --git a/intern/libmv/libmv/multiview/homography_test.cc b/intern/libmv/libmv/multiview/homography_test.cc
new file mode 100644
index 00000000000..8d7266e3d11
--- /dev/null
+++ b/intern/libmv/libmv/multiview/homography_test.cc
@@ -0,0 +1,261 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "testing/testing.h"
+#include "libmv/logging/logging.h"
+#include "libmv/multiview/projection.h"
+#include "libmv/multiview/homography.h"
+
+namespace {
+using namespace libmv;
+
+namespace {
+
+// Check whether homography transform M actually transforms
+// given vectors x1 to x2. Used to check validness of a reconstructed
+// homography matrix.
+// TODO(sergey): Consider using this in all tests since possible homography
+// matrix is not fixed to a single value and different-looking matrices
+// might actually crrespond to the same exact transform.
+void CheckHomography2DTransform(const Mat3 &H,
+ const Mat &x1,
+ const Mat &x2) {
+ for (int i = 0; i < x2.cols(); ++i) {
+ Vec3 x2_expected = x2.col(i);
+ Vec3 x2_observed = H * x1.col(i);
+ x2_observed /= x2_observed(2);
+ EXPECT_MATRIX_NEAR(x2_expected, x2_observed, 1e-8);
+ }
+}
+
+} // namespace
+
+TEST(Homography2DTest, Rotation45AndTranslationXY) {
+ Mat x1(3, 4);
+ x1 << 0, 1, 0, 5,
+ 0, 0, 2, 3,
+ 1, 1, 1, 1;
+
+ double angle = 45.0;
+ Mat3 m;
+ m << cos(angle), -sin(angle), -2,
+ sin(angle), cos(angle), 5,
+ 0, 0, 1;
+
+ Mat x2 = x1;
+ // Transform point from ground truth matrix
+ for (int i = 0; i < x2.cols(); ++i)
+ x2.col(i) = m * x1.col(i);
+
+ Mat3 homography_mat;
+ EXPECT_TRUE(Homography2DFromCorrespondencesLinear(x1, x2, &homography_mat));
+ VLOG(1) << "Mat Homography2D ";
+ VLOG(1) << homography_mat;
+ VLOG(1) << "Mat GT ";
+ VLOG(1) << m;
+ EXPECT_MATRIX_NEAR(homography_mat, m, 1e-8);
+}
+
+TEST(Homography2DTest, AffineGeneral4) {
+ // TODO(julien) find why it doesn't work with 4 points!!!
+ Mat x1(3, 4);
+ x1 << 0, 1, 0, 2,
+ 0, 0, 1, 2,
+ 1, 1, 1, 1;
+ Mat3 m;
+ m << 3, -1, 4,
+ 6, -2, -3,
+ 0, 0, 1;
+
+ Mat x2 = x1;
+ for (int i = 0; i < x2.cols(); ++i) {
+ x2.col(i) = m * x1.col(i);
+ }
+
+ Mat3 homography_mat;
+ EXPECT_TRUE(Homography2DFromCorrespondencesLinear(x1, x2, &homography_mat));
+ VLOG(1) << "Mat Homography2D";
+ VLOG(1) << homography_mat;
+ CheckHomography2DTransform(homography_mat, x1, x2);
+
+ // Test with euclidean coordinates
+ Mat eX1, eX2;
+ HomogeneousToEuclidean(x1, &eX1);
+ HomogeneousToEuclidean(x2, &eX2);
+ homography_mat.setIdentity();
+ EXPECT_TRUE(Homography2DFromCorrespondencesLinear(eX1, eX2, &homography_mat));
+
+ VLOG(1) << "Mat Homography2D ";
+ VLOG(1) << homography_mat;
+ CheckHomography2DTransform(homography_mat, x1, x2);
+}
+
+TEST(Homography2DTest, AffineGeneral5) {
+ Mat x1(3, 5);
+ x1 << 0, 1, 0, 2, 5,
+ 0, 0, 1, 2, 2,
+ 1, 1, 1, 1, 1;
+ Mat3 m;
+ m << 3, -1, 4,
+ 6, -2, -3,
+ 0, 0, 1;
+
+ Mat x2 = x1;
+ for (int i = 0; i < x2.cols(); ++i)
+ x2.col(i) = m * x1.col(i);
+
+ Mat3 homography_mat;
+ EXPECT_TRUE(Homography2DFromCorrespondencesLinear(x1, x2, &homography_mat));
+
+ VLOG(1) << "Mat Homography2D ";
+ VLOG(1) << homography_mat;
+ EXPECT_MATRIX_NEAR(homography_mat, m, 1e-8);
+
+ // Test with euclidean coordinates
+ Mat eX1, eX2;
+ HomogeneousToEuclidean(x1, &eX1);
+ HomogeneousToEuclidean(x2, &eX2);
+ homography_mat.setIdentity();
+ EXPECT_TRUE(Homography2DFromCorrespondencesLinear(eX1, eX2, &homography_mat));
+
+ VLOG(1) << "Mat Homography2D ";
+ VLOG(1) << homography_mat;
+ EXPECT_MATRIX_NEAR(homography_mat, m, 1e-8);
+}
+
+TEST(Homography2DTest, HomographyGeneral) {
+ Mat x1(3, 4);
+ x1 << 0, 1, 0, 5,
+ 0, 0, 2, 3,
+ 1, 1, 1, 1;
+ Mat3 m;
+ m << 3, -1, 4,
+ 6, -2, -3,
+ 1, -3, 1;
+
+ Mat x2 = x1;
+ for (int i = 0; i < x2.cols(); ++i)
+ x2.col(i) = m * x1.col(i);
+
+ Mat3 homography_mat;
+ EXPECT_TRUE(Homography2DFromCorrespondencesLinear(x1, x2, &homography_mat));
+
+ VLOG(1) << "Mat Homography2D ";
+ VLOG(1) << homography_mat;
+ EXPECT_MATRIX_NEAR(homography_mat, m, 1e-8);
+}
+
+TEST(Homography3DTest, RotationAndTranslationXYZ) {
+ Mat x1(4, 5);
+ x1 << 0, 0, 1, 5, 2,
+ 0, 1, 2, 3, 5,
+ 0, 2, 0, 1, 5,
+ 1, 1, 1, 1, 1;
+ Mat4 M;
+ M.setIdentity();
+ /*
+ M = AngleAxisd(45.0, Vector3f::UnitZ())
+ * AngleAxisd(25.0, Vector3f::UnitX())
+ * AngleAxisd(5.0, Vector3f::UnitZ());*/
+
+ // Rotation on x + translation
+ double angle = 45.0;
+ Mat4 rot;
+ rot << 1, 0, 0, 1,
+ 0, cos(angle), -sin(angle), 3,
+ 0, sin(angle), cos(angle), -2,
+ 0, 0, 0, 1;
+ M *= rot;
+ // Rotation on y
+ angle = 25.0;
+ rot << cos(angle), 0, sin(angle), 0,
+ 0, 1, 0, 0,
+ -sin(angle), 0, cos(angle), 0,
+ 0, 0, 0, 1;
+ M *= rot;
+ // Rotation on z
+ angle = 5.0;
+ rot << cos(angle), -sin(angle), 0, 0,
+ sin(angle), cos(angle), 0, 0,
+ 0, 0, 1, 0,
+ 0, 0, 0, 1;
+ M *= rot;
+ Mat x2 = x1;
+ for (int i = 0; i < x2.cols(); ++i) {
+ x2.col(i) = M * x1.col(i);
+ }
+
+ Mat4 homography_mat;
+ EXPECT_TRUE(Homography3DFromCorrespondencesLinear(x1, x2, &homography_mat));
+
+ VLOG(1) << "Mat Homography3D " << homography_mat;
+ VLOG(1) << "Mat GT " << M;
+ EXPECT_MATRIX_NEAR(homography_mat, M, 1e-8);
+}
+
+TEST(Homography3DTest, AffineGeneral) {
+ Mat x1(4, 5);
+ x1 << 0, 0, 1, 5, 2,
+ 0, 1, 2, 3, 5,
+ 0, 2, 0, 1, 5,
+ 1, 1, 1, 1, 1;
+ Mat4 m;
+ m << 3, -1, 4, 1,
+ 6, -2, -3, -6,
+ 1, 0, 1, 2,
+ 0, 0, 0, 1;
+
+ Mat x2 = x1;
+ for (int i = 0; i < x2.cols(); ++i) {
+ x2.col(i) = m * x1.col(i);
+ }
+
+ Mat4 homography_mat;
+ EXPECT_TRUE(Homography3DFromCorrespondencesLinear(x1, x2, &homography_mat));
+ VLOG(1) << "Mat Homography3D ";
+ VLOG(1) << homography_mat;
+ EXPECT_MATRIX_NEAR(homography_mat, m, 1e-8);
+}
+
+TEST(Homography3DTest, HomographyGeneral) {
+ Mat x1(4, 5);
+ x1 << 0, 0, 1, 5, 2,
+ 0, 1, 2, 3, 5,
+ 0, 2, 0, 1, 5,
+ 1, 1, 1, 1, 1;
+ Mat4 m;
+ m << 3, -1, 4, 1,
+ 6, -2, -3, -6,
+ 1, 0, 1, 2,
+ -3, 1, 0, 1;
+
+ Mat x2 = x1;
+ for (int i = 0; i < x2.cols(); ++i) {
+ x2.col(i) = m * x1.col(i);
+ }
+
+ Mat4 homography_mat;
+ EXPECT_TRUE(Homography3DFromCorrespondencesLinear(x1, x2, &homography_mat));
+ VLOG(1) << "Mat Homography3D";
+ VLOG(1) << homography_mat;
+ EXPECT_MATRIX_NEAR(homography_mat, m, 1e-8);
+}
+
+} // namespace
diff --git a/intern/libmv/libmv/multiview/nviewtriangulation.h b/intern/libmv/libmv/multiview/nviewtriangulation.h
new file mode 100644
index 00000000000..f4614ab1a5c
--- /dev/null
+++ b/intern/libmv/libmv/multiview/nviewtriangulation.h
@@ -0,0 +1,80 @@
+// Copyright (c) 2009 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+//
+// Compute a 3D position of a point from several images of it. In particular,
+// compute the projective point X in R^4 such that x = PX.
+//
+// Algorithm is the standard DLT; for derivation see appendix of Keir's thesis.
+
+#ifndef LIBMV_MULTIVIEW_NVIEWTRIANGULATION_H
+#define LIBMV_MULTIVIEW_NVIEWTRIANGULATION_H
+
+#include "libmv/base/vector.h"
+#include "libmv/logging/logging.h"
+#include "libmv/numeric/numeric.h"
+
+namespace libmv {
+
+// x's are 2D coordinates (x,y,1) in each image; Ps are projective cameras. The
+// output, X, is a homogeneous four vectors.
+template<typename T>
+void NViewTriangulate(const Matrix<T, 2, Dynamic> &x,
+ const vector<Matrix<T, 3, 4> > &Ps,
+ Matrix<T, 4, 1> *X) {
+ int nviews = x.cols();
+ assert(nviews == Ps.size());
+
+ Matrix<T, Dynamic, Dynamic> design(3*nviews, 4 + nviews);
+ design.setConstant(0.0);
+ for (int i = 0; i < nviews; i++) {
+ design.template block<3, 4>(3*i, 0) = -Ps[i];
+ design(3*i + 0, 4 + i) = x(0, i);
+ design(3*i + 1, 4 + i) = x(1, i);
+ design(3*i + 2, 4 + i) = 1.0;
+ }
+ Matrix<T, Dynamic, 1> X_and_alphas;
+ Nullspace(&design, &X_and_alphas);
+ X->resize(4);
+ *X = X_and_alphas.head(4);
+}
+
+// x's are 2D coordinates (x,y,1) in each image; Ps are projective cameras. The
+// output, X, is a homogeneous four vectors.
+// This method uses the algebraic distance approximation.
+// Note that this method works better when the 2D points are normalized
+// with an isotopic normalization.
+template<typename T>
+void NViewTriangulateAlgebraic(const Matrix<T, 2, Dynamic> &x,
+ const vector<Matrix<T, 3, 4> > &Ps,
+ Matrix<T, 4, 1> *X) {
+ int nviews = x.cols();
+ assert(nviews == Ps.size());
+
+ Matrix<T, Dynamic, 4> design(2*nviews, 4);
+ for (int i = 0; i < nviews; i++) {
+ design.template block<2, 4>(2*i, 0) = SkewMatMinimal(x.col(i)) * Ps[i];
+ }
+ X->resize(4);
+ Nullspace(&design, X);
+}
+
+} // namespace libmv
+
+#endif // LIBMV_MULTIVIEW_RESECTION_H
diff --git a/intern/libmv/libmv/multiview/nviewtriangulation_test.cc b/intern/libmv/libmv/multiview/nviewtriangulation_test.cc
new file mode 100644
index 00000000000..5a4d8499753
--- /dev/null
+++ b/intern/libmv/libmv/multiview/nviewtriangulation_test.cc
@@ -0,0 +1,94 @@
+// Copyright (c) 2009 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include <iostream>
+
+#include "libmv/base/vector.h"
+#include "libmv/logging/logging.h"
+#include "libmv/multiview/nviewtriangulation.h"
+#include "libmv/multiview/projection.h"
+#include "libmv/multiview/test_data_sets.h"
+#include "libmv/numeric/numeric.h"
+#include "testing/testing.h"
+
+namespace {
+
+using namespace libmv;
+
+TEST(NViewTriangulate, FiveViews) {
+ int nviews = 5;
+ int npoints = 6;
+ NViewDataSet d = NRealisticCamerasFull(nviews, npoints);
+
+ // Collect P matrices together.
+ vector<Mat34> Ps(nviews);
+ for (int j = 0; j < nviews; ++j) {
+ Ps[j] = d.P(j);
+ }
+
+ for (int i = 0; i < npoints; ++i) {
+ // Collect the image of point i in each frame.
+ Mat2X xs(2, nviews);
+ for (int j = 0; j < nviews; ++j) {
+ xs.col(j) = d.x[j].col(i);
+ }
+ Vec4 X;
+ NViewTriangulate(xs, Ps, &X);
+
+ // Check reprojection error. Should be nearly zero.
+ for (int j = 0; j < nviews; ++j) {
+ Vec3 x_reprojected = Ps[j]*X;
+ x_reprojected /= x_reprojected(2);
+ double error = (x_reprojected.head(2) - xs.col(j)).norm();
+ EXPECT_NEAR(error, 0.0, 1e-9);
+ }
+ }
+}
+
+TEST(NViewTriangulateAlgebraic, FiveViews) {
+ int nviews = 5;
+ int npoints = 6;
+ NViewDataSet d = NRealisticCamerasFull(nviews, npoints);
+
+ // Collect P matrices together.
+ vector<Mat34> Ps(nviews);
+ for (int j = 0; j < nviews; ++j) {
+ Ps[j] = d.P(j);
+ }
+
+ for (int i = 0; i < npoints; ++i) {
+ // Collect the image of point i in each frame.
+ Mat2X xs(2, nviews);
+ for (int j = 0; j < nviews; ++j) {
+ xs.col(j) = d.x[j].col(i);
+ }
+ Vec4 X;
+ NViewTriangulate(xs, Ps, &X);
+
+ // Check reprojection error. Should be nearly zero.
+ for (int j = 0; j < nviews; ++j) {
+ Vec3 x_reprojected = Ps[j]*X;
+ x_reprojected /= x_reprojected(2);
+ double error = (x_reprojected.head<2>() - xs.col(j)).norm();
+ EXPECT_NEAR(error, 0.0, 1e-9);
+ }
+ }
+}
+} // namespace
diff --git a/intern/libmv/libmv/multiview/panography.cc b/intern/libmv/libmv/multiview/panography.cc
new file mode 100644
index 00000000000..b62802948c4
--- /dev/null
+++ b/intern/libmv/libmv/multiview/panography.cc
@@ -0,0 +1,125 @@
+// Copyright (c) 2009 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+//
+
+#include "libmv/multiview/panography.h"
+
+namespace libmv {
+
+static bool Build_Minimal2Point_PolynomialFactor(
+ const Mat & x1, const Mat & x2,
+ double * P) { // P must be a double[4]
+ assert(2 == x1.rows());
+ assert(2 == x1.cols());
+ assert(x1.rows() == x2.rows());
+ assert(x1.cols() == x2.cols());
+
+ // Setup the variable of the input problem:
+ Vec xx1 = (x1.col(0)).transpose();
+ Vec yx1 = (x1.col(1)).transpose();
+
+ double a12 = xx1.dot(yx1);
+ Vec xx2 = (x2.col(0)).transpose();
+ Vec yx2 = (x2.col(1)).transpose();
+ double b12 = xx2.dot(yx2);
+
+ double a1 = xx1.squaredNorm();
+ double a2 = yx1.squaredNorm();
+
+ double b1 = xx2.squaredNorm();
+ double b2 = yx2.squaredNorm();
+
+ // Build the 3rd degre polynomial in F^2.
+ //
+ // f^6 * p + f^4 * q + f^2* r + s = 0;
+ //
+ // Coefficients in ascending powers of alpha, i.e. P[N]*x^N.
+ // Run panography_coeffs.py to get the below coefficients.
+ P[0] = b1*b2*a12*a12-a1*a2*b12*b12;
+ P[1] = -2*a1*a2*b12+2*a12*b1*b2+b1*a12*a12+b2*a12*a12-a1*b12*b12-a2*b12*b12;
+ P[2] = b1*b2-a1*a2-2*a1*b12-2*a2*b12+2*a12*b1+2*a12*b2+a12*a12-b12*b12;
+ P[3] = b1+b2-2*b12-a1-a2+2*a12;
+
+ // If P[3] equal to 0 we get ill conditionned data
+ return (P[3] != 0.0);
+}
+
+// This implements a minimal solution (2 points) for panoramic stitching:
+//
+// http://www.cs.ubc.ca/~mbrown/minimal/minimal.html
+//
+// [1] M. Brown and R. Hartley and D. Nister. Minimal Solutions for Panoramic
+// Stitching. CVPR07.
+void F_FromCorrespondance_2points(const Mat &x1, const Mat &x2,
+ vector<double> *fs) {
+ // Build Polynomial factor to get squared focal value.
+ double P[4];
+ Build_Minimal2Point_PolynomialFactor(x1, x2, &P[0]);
+
+ // Solve it by using F = f^2 and a Cubic polynomial solver
+ //
+ // F^3 * p + F^2 * q + F^1 * r + s = 0
+ //
+ double roots[3];
+ int num_roots = SolveCubicPolynomial(P, roots);
+ for (int i = 0; i < num_roots; ++i) {
+ if (roots[i] > 0.0) {
+ fs->push_back(sqrt(roots[i]));
+ }
+ }
+}
+
+// Compute the 3x3 rotation matrix that fits two 3D point clouds in the least
+// square sense. The method is from:
+//
+// K. Arun,T. Huand and D. Blostein. Least-squares fitting of 2 3-D point
+// sets. IEEE Transactions on Pattern Analysis and Machine Intelligence,
+// 9:698-700, 1987.
+void GetR_FixedCameraCenter(const Mat &x1, const Mat &x2,
+ const double focal,
+ Mat3 *R) {
+ assert(3 == x1.rows());
+ assert(2 <= x1.cols());
+ assert(x1.rows() == x2.rows());
+ assert(x1.cols() == x2.cols());
+
+ // Build simplified K matrix
+ Mat3 K(Mat3::Identity() * 1.0/focal);
+ K(2, 2)= 1.0;
+
+ // Build the correlation matrix; equation (22) in [1].
+ Mat3 C = Mat3::Zero();
+ for (int i = 0; i < x1.cols(); ++i) {
+ Mat r1i = (K * x1.col(i)).normalized();
+ Mat r2i = (K * x2.col(i)).normalized();
+ C += r2i * r1i.transpose();
+ }
+
+ // Solve for rotation. Equations (24) and (25) in [1].
+ Eigen::JacobiSVD<Mat> svd(C, Eigen::ComputeThinU | Eigen::ComputeThinV);
+ Mat3 scale = Mat3::Identity();
+ scale(2, 2) = ((svd.matrixU() * svd.matrixV().transpose()).determinant() > 0.0)
+ ? 1.0
+ : -1.0;
+
+ (*R) = svd.matrixU() * scale * svd.matrixV().transpose();
+}
+
+} // namespace libmv
diff --git a/intern/libmv/libmv/multiview/panography.h b/intern/libmv/libmv/multiview/panography.h
new file mode 100644
index 00000000000..6e87bd71304
--- /dev/null
+++ b/intern/libmv/libmv/multiview/panography.h
@@ -0,0 +1,99 @@
+// Copyright (c) 2009 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+//
+
+#ifndef LIBMV_MULTIVIEW_PANOGRAPHY_H
+#define LIBMV_MULTIVIEW_PANOGRAPHY_H
+
+#include "libmv/numeric/numeric.h"
+#include "libmv/numeric/poly.h"
+#include "libmv/base/vector.h"
+
+namespace libmv {
+
+// This implements a minimal solution (2 points) for panoramic stitching:
+//
+// http://www.cs.ubc.ca/~mbrown/minimal/minimal.html
+//
+// [1] M. Brown and R. Hartley and D. Nister. Minimal Solutions for Panoramic
+// Stitching. CVPR07.
+//
+// The 2-point algorithm solves for the rotation of the camera with a single
+// focal length (4 degrees of freedom).
+//
+// Compute from 1 to 3 possible focal lenght for 2 point correspondences.
+// Suppose that the cameras share the same optical center and focal lengths:
+//
+// Image 1 => H*x = x' => Image 2
+// x (u1j) x' (u2j)
+// a (u11) a' (u21)
+// b (u12) b' (u22)
+//
+// The return values are 1 to 3 possible values for the focal lengths such
+// that:
+//
+// [f 0 0]
+// K = [0 f 0]
+// [0 0 1]
+//
+void F_FromCorrespondance_2points(const Mat &x1, const Mat &x2,
+ vector<double> *fs);
+
+// Compute the 3x3 rotation matrix that fits two 3D point clouds in the least
+// square sense. The method is from:
+//
+// K. Arun,T. Huand and D. Blostein. Least-squares fitting of 2 3-D point
+// sets. IEEE Transactions on Pattern Analysis and Machine Intelligence,
+// 9:698-700, 1987.
+//
+// Given the calibration matrices K1, K2 solve for the rotation from
+// corresponding image rays.
+//
+// R = min || X2 - R * x1 ||.
+//
+// In case of panography, which is for a camera that shares the same camera
+// center,
+//
+// H = K2 * R * K1.inverse();
+//
+// For the full explanation, see Section 8, Solving for Rotation from [1].
+//
+// Parameters:
+//
+// x1 : Point cloud A (3D coords)
+// x2 : Point cloud B (3D coords)
+//
+// [f 0 0]
+// K1 = [0 f 0]
+// [0 0 1]
+//
+// K2 (the same form as K1, but may have different f)
+//
+// Returns: A rotation matrix that minimizes
+//
+// R = arg min || X2 - R * x1 ||
+//
+void GetR_FixedCameraCenter(const Mat &x1, const Mat &x2,
+ const double focal,
+ Mat3 *R);
+
+} // namespace libmv
+
+#endif // LIBMV_MULTIVIEW_PANOGRAPHY_H
diff --git a/intern/libmv/libmv/multiview/panography_kernel.cc b/intern/libmv/libmv/multiview/panography_kernel.cc
new file mode 100644
index 00000000000..8fdc9e79aed
--- /dev/null
+++ b/intern/libmv/libmv/multiview/panography_kernel.cc
@@ -0,0 +1,51 @@
+// Copyright (c) 2008, 2009 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/multiview/panography_kernel.h"
+#include "libmv/multiview/panography.h"
+
+namespace libmv {
+namespace panography {
+namespace kernel {
+
+void TwoPointSolver::Solve(const Mat &x1, const Mat &x2, vector<Mat3> *Hs) {
+ // Solve for the focal lengths.
+ vector<double> fs;
+ F_FromCorrespondance_2points(x1, x2, &fs);
+
+ // Then solve for the rotations and homographies.
+ Mat x1h, x2h;
+ EuclideanToHomogeneous(x1, &x1h);
+ EuclideanToHomogeneous(x2, &x2h);
+ for (int i = 0; i < fs.size(); ++i) {
+ Mat3 K1 = Mat3::Identity() * fs[i];
+ K1(2, 2) = 1.0;
+
+ Mat3 R;
+ GetR_FixedCameraCenter(x1h, x2h, fs[i], &R);
+ R /= R(2, 2);
+
+ (*Hs).push_back(K1 * R * K1.inverse());
+ }
+}
+
+} // namespace kernel
+} // namespace panography
+} // namespace libmv
diff --git a/intern/libmv/libmv/multiview/panography_kernel.h b/intern/libmv/libmv/multiview/panography_kernel.h
new file mode 100644
index 00000000000..a6adbd54b20
--- /dev/null
+++ b/intern/libmv/libmv/multiview/panography_kernel.h
@@ -0,0 +1,54 @@
+// Copyright (c) 2009 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_MULTIVIEW_PANOGRAPHY_KERNEL_H
+#define LIBMV_MULTIVIEW_PANOGRAPHY_KERNEL_H
+
+#include "libmv/base/vector.h"
+#include "libmv/multiview/conditioning.h"
+#include "libmv/multiview/projection.h"
+#include "libmv/multiview/two_view_kernel.h"
+#include "libmv/multiview/homography_error.h"
+#include "libmv/numeric/numeric.h"
+
+namespace libmv {
+namespace panography {
+namespace kernel {
+
+struct TwoPointSolver {
+ enum { MINIMUM_SAMPLES = 2 };
+ static void Solve(const Mat &x1, const Mat &x2, vector<Mat3> *Hs);
+};
+
+typedef two_view::kernel::Kernel<
+ TwoPointSolver, homography::homography2D::AsymmetricError, Mat3>
+ UnnormalizedKernel;
+
+typedef two_view::kernel::Kernel<
+ two_view::kernel::NormalizedSolver<TwoPointSolver, UnnormalizerI>,
+ homography::homography2D::AsymmetricError,
+ Mat3>
+ Kernel;
+
+} // namespace kernel
+} // namespace panography
+} // namespace libmv
+
+#endif // LIBMV_MULTIVIEW_PANOGRAPHY_KERNEL_H
diff --git a/intern/libmv/libmv/multiview/panography_test.cc b/intern/libmv/libmv/multiview/panography_test.cc
new file mode 100644
index 00000000000..f6faf0f6022
--- /dev/null
+++ b/intern/libmv/libmv/multiview/panography_test.cc
@@ -0,0 +1,144 @@
+// Copyright (c) 2009 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/logging/logging.h"
+#include "libmv/multiview/panography.h"
+#include "libmv/multiview/panography_kernel.h"
+#include "libmv/multiview/projection.h"
+#include "libmv/numeric/numeric.h"
+#include "testing/testing.h"
+
+namespace libmv {
+namespace {
+
+TEST(Panography, PrintSomeSharedFocalEstimationValues) {
+ Mat x1(2, 2), x2(2, 2);
+ x1<< 158, 78,
+ 124, 113;
+ x2<< 300, 214,
+ 125, 114;
+
+ // Normalize data (set principal point 0,0 and image border to 1.0).
+ x1.block<1, 2>(0, 0) /= 320;
+ x1.block<1, 2>(1, 0) /= 240;
+ x2.block<1, 2>(0, 0) /= 320;
+ x2.block<1, 2>(1, 0) /= 240;
+ x1+=Mat2::Constant(0.5);
+ x2+=Mat2::Constant(0.5);
+
+ vector<double> fs;
+ F_FromCorrespondance_2points(x1, x2, &fs);
+
+ // Assert we found a valid solution.
+ EXPECT_EQ(1, fs.size());
+ EXPECT_NEAR(1.01667, fs[1], 1e-3);
+}
+
+TEST(Panography, GetR_FixedCameraCenterWithIdentity) {
+ Mat x1(3, 3);
+ x1 << 0.5, 0.6, 0.7,
+ 0.5, 0.5, 0.4,
+ 10.0, 10.0, 10.0;
+
+ Mat3 R;
+ GetR_FixedCameraCenter(x1, x1, 1.0, &R);
+ R /= R(2, 2);
+ EXPECT_MATRIX_NEAR(Mat3::Identity(), R, 1e-8);
+ LOG(INFO) << "R \n" << R;
+}
+
+TEST(Panography, Homography_GetR_Test_PitchY30) {
+ int n = 3;
+
+ Mat x1(3, n);
+ x1 << 0.5, 0.6, 0.7,
+ 0.5, 0.5, 0.4,
+ 10, 10, 10;
+
+ Mat x2 = x1;
+ const double alpha = 30.0 * M_PI / 180.0;
+ Mat3 rotY;
+ rotY << cos(alpha), 0, -sin(alpha),
+ 0, 1, 0,
+ sin(alpha), 0, cos(alpha);
+
+ for (int i = 0; i < n; ++i) {
+ x2.block<3, 1>(0, i) = rotY * x1.col(i);
+ }
+
+ Mat3 R;
+ GetR_FixedCameraCenter(x1, x2, 1.0, &R);
+
+ // Assert that residuals are small enough
+ for (int i = 0; i < n; ++i) {
+ Vec residuals = (R * x1.col(i)) - x2.col(i);
+ EXPECT_NEAR(0, residuals.norm(), 1e-6);
+ }
+
+ // Check that the rotation angle along Y is the expected one.
+ // Use the euler approximation to recover the angle.
+ double pitch_y = asin(R(2, 0)) * 180.0 / M_PI;
+ EXPECT_NEAR(30, pitch_y, 1e-4);
+}
+
+TEST(MinimalPanoramic, Real_Case_Kernel) {
+ const int n = 2;
+ Mat x1(2, n); // From image 0.jpg
+ x1<< 158, 78,
+ 124, 113;
+
+ Mat x2(2, n); // From image 3.jpg
+ x2<< 300, 214,
+ 125, 114;
+
+ Mat3 Ground_TruthHomography;
+ Ground_TruthHomography<< 1, 0.02, 129.83,
+ -0.02, 1.012, 0.07823,
+ 0, 0, 1;
+
+ vector<Mat3> Hs;
+
+ libmv::panography::kernel::TwoPointSolver::Solve(x1, x2, &Hs);
+
+ LOG(INFO) << "Got " << Hs.size() << " solutions.";
+ for (int j = 0; j < Hs.size(); ++j) {
+ Mat3 H = Hs[j];
+
+ EXPECT_MATRIX_NEAR(H, Ground_TruthHomography, 1e-1);
+
+ Mat x1h, x2h;
+ EuclideanToHomogeneous(x1, &x1h);
+ EuclideanToHomogeneous(x2, &x2h);
+
+ // Assert that residuals are small enough
+ for (int i = 0; i < n; ++i) {
+ Vec x1p = H * x1h.col(i);
+ Vec residuals = x1p/x1p(2) - x2h.col(i);
+ EXPECT_MATRIX_NEAR_ZERO(residuals, 1e-5);
+ }
+ }
+}
+
+} // namespace
+} // namespace libmv
+
+// TODO(pmoulon): Add a real test case based on images.
+// TODO(pmoulon): Add a check for the actual f value for the real images.
+// TODO(pmoulon): Add a test that has some inliers and outliers.
diff --git a/intern/libmv/libmv/multiview/projection.cc b/intern/libmv/libmv/multiview/projection.cc
new file mode 100644
index 00000000000..f8bece3de68
--- /dev/null
+++ b/intern/libmv/libmv/multiview/projection.cc
@@ -0,0 +1,224 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/multiview/projection.h"
+#include "libmv/numeric/numeric.h"
+
+namespace libmv {
+
+void P_From_KRt(const Mat3 &K, const Mat3 &R, const Vec3 &t, Mat34 *P) {
+ P->block<3, 3>(0, 0) = R;
+ P->col(3) = t;
+ (*P) = K * (*P);
+}
+
+void KRt_From_P(const Mat34 &P, Mat3 *Kp, Mat3 *Rp, Vec3 *tp) {
+ // Decompose using the RQ decomposition HZ A4.1.1 pag.579.
+ Mat3 K = P.block(0, 0, 3, 3);
+
+ Mat3 Q;
+ Q.setIdentity();
+
+ // Set K(2,1) to zero.
+ if (K(2, 1) != 0) {
+ double c = -K(2, 2);
+ double s = K(2, 1);
+ double l = sqrt(c * c + s * s);
+ c /= l;
+ s /= l;
+ Mat3 Qx;
+ Qx << 1, 0, 0,
+ 0, c, -s,
+ 0, s, c;
+ K = K * Qx;
+ Q = Qx.transpose() * Q;
+ }
+ // Set K(2,0) to zero.
+ if (K(2, 0) != 0) {
+ double c = K(2, 2);
+ double s = K(2, 0);
+ double l = sqrt(c * c + s * s);
+ c /= l;
+ s /= l;
+ Mat3 Qy;
+ Qy << c, 0, s,
+ 0, 1, 0,
+ -s, 0, c;
+ K = K * Qy;
+ Q = Qy.transpose() * Q;
+ }
+ // Set K(1,0) to zero.
+ if (K(1, 0) != 0) {
+ double c = -K(1, 1);
+ double s = K(1, 0);
+ double l = sqrt(c * c + s * s);
+ c /= l;
+ s /= l;
+ Mat3 Qz;
+ Qz << c, -s, 0,
+ s, c, 0,
+ 0, 0, 1;
+ K = K * Qz;
+ Q = Qz.transpose() * Q;
+ }
+
+ Mat3 R = Q;
+
+ // Ensure that the diagonal is positive.
+ // TODO(pau) Change this to ensure that:
+ // - K(0,0) > 0
+ // - K(2,2) = 1
+ // - det(R) = 1
+ if (K(2, 2) < 0) {
+ K = -K;
+ R = -R;
+ }
+ if (K(1, 1) < 0) {
+ Mat3 S;
+ S << 1, 0, 0,
+ 0, -1, 0,
+ 0, 0, 1;
+ K = K * S;
+ R = S * R;
+ }
+ if (K(0, 0) < 0) {
+ Mat3 S;
+ S << -1, 0, 0,
+ 0, 1, 0,
+ 0, 0, 1;
+ K = K * S;
+ R = S * R;
+ }
+
+ // Compute translation.
+ Vec p(3);
+ p << P(0, 3), P(1, 3), P(2, 3);
+ // TODO(pau) This sould be done by a SolveLinearSystem(A, b, &x) call.
+ // TODO(keir) use the eigen LU solver syntax...
+ Vec3 t = K.inverse() * p;
+
+ // scale K so that K(2,2) = 1
+ K = K / K(2, 2);
+
+ *Kp = K;
+ *Rp = R;
+ *tp = t;
+}
+
+void ProjectionShiftPrincipalPoint(const Mat34 &P,
+ const Vec2 &principal_point,
+ const Vec2 &principal_point_new,
+ Mat34 *P_new) {
+ Mat3 T;
+ T << 1, 0, principal_point_new(0) - principal_point(0),
+ 0, 1, principal_point_new(1) - principal_point(1),
+ 0, 0, 1;
+ *P_new = T * P;
+}
+
+void ProjectionChangeAspectRatio(const Mat34 &P,
+ const Vec2 &principal_point,
+ double aspect_ratio,
+ double aspect_ratio_new,
+ Mat34 *P_new) {
+ Mat3 T;
+ T << 1, 0, 0,
+ 0, aspect_ratio_new / aspect_ratio, 0,
+ 0, 0, 1;
+ Mat34 P_temp;
+
+ ProjectionShiftPrincipalPoint(P, principal_point, Vec2(0, 0), &P_temp);
+ P_temp = T * P_temp;
+ ProjectionShiftPrincipalPoint(P_temp, Vec2(0, 0), principal_point, P_new);
+}
+
+void HomogeneousToEuclidean(const Mat &H, Mat *X) {
+ int d = H.rows() - 1;
+ int n = H.cols();
+ X->resize(d, n);
+ for (size_t i = 0; i < n; ++i) {
+ double h = H(d, i);
+ for (int j = 0; j < d; ++j) {
+ (*X)(j, i) = H(j, i) / h;
+ }
+ }
+}
+
+void HomogeneousToEuclidean(const Mat3X &h, Mat2X *e) {
+ e->resize(2, h.cols());
+ e->row(0) = h.row(0).array() / h.row(2).array();
+ e->row(1) = h.row(1).array() / h.row(2).array();
+}
+void HomogeneousToEuclidean(const Mat4X &h, Mat3X *e) {
+ e->resize(3, h.cols());
+ e->row(0) = h.row(0).array() / h.row(3).array();
+ e->row(1) = h.row(1).array() / h.row(3).array();
+ e->row(2) = h.row(2).array() / h.row(3).array();
+}
+
+void HomogeneousToEuclidean(const Vec3 &H, Vec2 *X) {
+ double w = H(2);
+ *X << H(0) / w, H(1) / w;
+}
+
+void HomogeneousToEuclidean(const Vec4 &H, Vec3 *X) {
+ double w = H(3);
+ *X << H(0) / w, H(1) / w, H(2) / w;
+}
+
+void EuclideanToHomogeneous(const Mat &X, Mat *H) {
+ int d = X.rows();
+ int n = X.cols();
+ H->resize(d + 1, n);
+ H->block(0, 0, d, n) = X;
+ H->row(d).setOnes();
+}
+
+void EuclideanToHomogeneous(const Vec2 &X, Vec3 *H) {
+ *H << X(0), X(1), 1;
+}
+
+void EuclideanToHomogeneous(const Vec3 &X, Vec4 *H) {
+ *H << X(0), X(1), X(2), 1;
+}
+
+// TODO(julien) Call conditioning.h/ApplyTransformationToPoints ?
+void EuclideanToNormalizedCamera(const Mat2X &x, const Mat3 &K, Mat2X *n) {
+ Mat3X x_image_h;
+ EuclideanToHomogeneous(x, &x_image_h);
+ Mat3X x_camera_h = K.inverse() * x_image_h;
+ HomogeneousToEuclidean(x_camera_h, n);
+}
+
+void HomogeneousToNormalizedCamera(const Mat3X &x, const Mat3 &K, Mat2X *n) {
+ Mat3X x_camera_h = K.inverse() * x;
+ HomogeneousToEuclidean(x_camera_h, n);
+}
+
+double Depth(const Mat3 &R, const Vec3 &t, const Vec3 &X) {
+ return (R*X)(2) + t(2);
+}
+
+double Depth(const Mat3 &R, const Vec3 &t, const Vec4 &X) {
+ Vec3 Xe = X.head<3>() / X(3);
+ return Depth(R, t, Xe);
+}
+
+} // namespace libmv
diff --git a/intern/libmv/libmv/multiview/projection.h b/intern/libmv/libmv/multiview/projection.h
new file mode 100644
index 00000000000..3220bc2dbbc
--- /dev/null
+++ b/intern/libmv/libmv/multiview/projection.h
@@ -0,0 +1,231 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_MULTIVIEW_PROJECTION_H_
+#define LIBMV_MULTIVIEW_PROJECTION_H_
+
+#include "libmv/numeric/numeric.h"
+
+namespace libmv {
+
+void P_From_KRt(const Mat3 &K, const Mat3 &R, const Vec3 &t, Mat34 *P);
+void KRt_From_P(const Mat34 &P, Mat3 *K, Mat3 *R, Vec3 *t);
+
+// Applies a change of basis to the image coordinates of the projection matrix
+// so that the principal point becomes principal_point_new.
+void ProjectionShiftPrincipalPoint(const Mat34 &P,
+ const Vec2 &principal_point,
+ const Vec2 &principal_point_new,
+ Mat34 *P_new);
+
+// Applies a change of basis to the image coordinates of the projection matrix
+// so that the aspect ratio becomes aspect_ratio_new. This is done by
+// stretching the y axis. The aspect ratio is defined as the quotient between
+// the focal length of the y and the x axis.
+void ProjectionChangeAspectRatio(const Mat34 &P,
+ const Vec2 &principal_point,
+ double aspect_ratio,
+ double aspect_ratio_new,
+ Mat34 *P_new);
+
+void HomogeneousToEuclidean(const Mat &H, Mat *X);
+void HomogeneousToEuclidean(const Mat3X &h, Mat2X *e);
+void HomogeneousToEuclidean(const Mat4X &h, Mat3X *e);
+void HomogeneousToEuclidean(const Vec3 &H, Vec2 *X);
+void HomogeneousToEuclidean(const Vec4 &H, Vec3 *X);
+inline Vec2 HomogeneousToEuclidean(const Vec3 &h) {
+ return h.head<2>() / h(2);
+}
+inline Vec3 HomogeneousToEuclidean(const Vec4 &h) {
+ return h.head<3>() / h(3);
+}
+inline Mat2X HomogeneousToEuclidean(const Mat3X &h) {
+ Mat2X e(2, h.cols());
+ e.row(0) = h.row(0).array() / h.row(2).array();
+ e.row(1) = h.row(1).array() / h.row(2).array();
+ return e;
+}
+
+void EuclideanToHomogeneous(const Mat &X, Mat *H);
+inline Mat3X EuclideanToHomogeneous(const Mat2X &x) {
+ Mat3X h(3, x.cols());
+ h.block(0, 0, 2, x.cols()) = x;
+ h.row(2).setOnes();
+ return h;
+}
+inline void EuclideanToHomogeneous(const Mat2X &x, Mat3X *h) {
+ h->resize(3, x.cols());
+ h->block(0, 0, 2, x.cols()) = x;
+ h->row(2).setOnes();
+}
+inline Mat4X EuclideanToHomogeneous(const Mat3X &x) {
+ Mat4X h(4, x.cols());
+ h.block(0, 0, 3, x.cols()) = x;
+ h.row(3).setOnes();
+ return h;
+}
+inline void EuclideanToHomogeneous(const Mat3X &x, Mat4X *h) {
+ h->resize(4, x.cols());
+ h->block(0, 0, 3, x.cols()) = x;
+ h->row(3).setOnes();
+}
+void EuclideanToHomogeneous(const Vec2 &X, Vec3 *H);
+void EuclideanToHomogeneous(const Vec3 &X, Vec4 *H);
+inline Vec3 EuclideanToHomogeneous(const Vec2 &x) {
+ return Vec3(x(0), x(1), 1);
+}
+inline Vec4 EuclideanToHomogeneous(const Vec3 &x) {
+ return Vec4(x(0), x(1), x(2), 1);
+}
+// Conversion from image coordinates to normalized camera coordinates
+void EuclideanToNormalizedCamera(const Mat2X &x, const Mat3 &K, Mat2X *n);
+void HomogeneousToNormalizedCamera(const Mat3X &x, const Mat3 &K, Mat2X *n);
+
+inline Vec2 Project(const Mat34 &P, const Vec3 &X) {
+ Vec4 HX;
+ HX << X, 1.0;
+ Vec3 hx = P * HX;
+ return hx.head<2>() / hx(2);
+}
+
+inline void Project(const Mat34 &P, const Vec4 &X, Vec3 *x) {
+ *x = P * X;
+}
+
+inline void Project(const Mat34 &P, const Vec4 &X, Vec2 *x) {
+ Vec3 hx = P * X;
+ *x = hx.head<2>() / hx(2);
+}
+
+inline void Project(const Mat34 &P, const Vec3 &X, Vec3 *x) {
+ Vec4 HX;
+ HX << X, 1.0;
+ Project(P, HX, x);
+}
+
+inline void Project(const Mat34 &P, const Vec3 &X, Vec2 *x) {
+ Vec3 hx;
+ Project(P, X, x);
+ *x = hx.head<2>() / hx(2);
+}
+
+inline void Project(const Mat34 &P, const Mat4X &X, Mat2X *x) {
+ x->resize(2, X.cols());
+ for (int c = 0; c < X.cols(); ++c) {
+ Vec3 hx = P * X.col(c);
+ x->col(c) = hx.head<2>() / hx(2);
+ }
+}
+
+inline Mat2X Project(const Mat34 &P, const Mat4X &X) {
+ Mat2X x;
+ Project(P, X, &x);
+ return x;
+}
+
+inline void Project(const Mat34 &P, const Mat3X &X, Mat2X *x) {
+ x->resize(2, X.cols());
+ for (int c = 0; c < X.cols(); ++c) {
+ Vec4 HX;
+ HX << X.col(c), 1.0;
+ Vec3 hx = P * HX;
+ x->col(c) = hx.head<2>() / hx(2);
+ }
+}
+
+inline void Project(const Mat34 &P, const Mat3X &X, const Vecu &ids, Mat2X *x) {
+ x->resize(2, ids.size());
+ Vec4 HX;
+ Vec3 hx;
+ for (int c = 0; c < ids.size(); ++c) {
+ HX << X.col(ids[c]), 1.0;
+ hx = P * HX;
+ x->col(c) = hx.head<2>() / hx(2);
+ }
+}
+
+inline Mat2X Project(const Mat34 &P, const Mat3X &X) {
+ Mat2X x(2, X.cols());
+ Project(P, X, &x);
+ return x;
+}
+
+inline Mat2X Project(const Mat34 &P, const Mat3X &X, const Vecu &ids) {
+ Mat2X x(2, ids.size());
+ Project(P, X, ids, &x);
+ return x;
+}
+
+double Depth(const Mat3 &R, const Vec3 &t, const Vec3 &X);
+double Depth(const Mat3 &R, const Vec3 &t, const Vec4 &X);
+
+/**
+* Returns true if the homogenious 3D point X is in front of
+* the camera P.
+*/
+inline bool isInFrontOfCamera(const Mat34 &P, const Vec4 &X) {
+ double condition_1 = P.row(2).dot(X) * X[3];
+ double condition_2 = X[2] * X[3];
+ if (condition_1 > 0 && condition_2 > 0)
+ return true;
+ else
+ return false;
+}
+
+inline bool isInFrontOfCamera(const Mat34 &P, const Vec3 &X) {
+ Vec4 X_homo;
+ X_homo.segment<3>(0) = X;
+ X_homo(3) = 1;
+ return isInFrontOfCamera( P, X_homo);
+}
+
+/**
+* Transforms a 2D point from pixel image coordinates to a 2D point in
+* normalized image coordinates.
+*/
+inline Vec2 ImageToNormImageCoordinates(Mat3 &Kinverse, Vec2 &x) {
+ Vec3 x_h = Kinverse*EuclideanToHomogeneous(x);
+ return HomogeneousToEuclidean( x_h );
+}
+
+/// Estimates the root mean square error (2D)
+inline double RootMeanSquareError(const Mat2X &x_image,
+ const Mat4X &X_world,
+ const Mat34 &P) {
+ size_t num_points = x_image.cols();
+ Mat2X dx = Project(P, X_world) - x_image;
+ return dx.norm() / num_points;
+}
+
+/// Estimates the root mean square error (2D)
+inline double RootMeanSquareError(const Mat2X &x_image,
+ const Mat3X &X_world,
+ const Mat3 &K,
+ const Mat3 &R,
+ const Vec3 &t) {
+ Mat34 P;
+ P_From_KRt(K, R, t, &P);
+ size_t num_points = x_image.cols();
+ Mat2X dx = Project(P, X_world) - x_image;
+ return dx.norm() / num_points;
+}
+} // namespace libmv
+
+#endif // LIBMV_MULTIVIEW_PROJECTION_H_
diff --git a/intern/libmv/libmv/multiview/projection_test.cc b/intern/libmv/libmv/multiview/projection_test.cc
new file mode 100644
index 00000000000..c060bfb0681
--- /dev/null
+++ b/intern/libmv/libmv/multiview/projection_test.cc
@@ -0,0 +1,115 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include <iostream>
+
+#include "libmv/multiview/projection.h"
+#include "libmv/numeric/numeric.h"
+#include "testing/testing.h"
+
+namespace {
+using namespace libmv;
+
+TEST(Projection, P_From_KRt) {
+ Mat3 K, Kp;
+ K << 10, 1, 30,
+ 0, 20, 40,
+ 0, 0, 1;
+
+ Mat3 R, Rp;
+ R << 1, 0, 0,
+ 0, 1, 0,
+ 0, 0, 1;
+
+ Vec3 t, tp;
+ t << 1, 2, 3;
+
+ Mat34 P;
+ P_From_KRt(K, R, t, &P);
+ KRt_From_P(P, &Kp, &Rp, &tp);
+
+ EXPECT_MATRIX_NEAR(K, Kp, 1e-8);
+ EXPECT_MATRIX_NEAR(R, Rp, 1e-8);
+ EXPECT_MATRIX_NEAR(t, tp, 1e-8);
+
+ // TODO(keir): Change the code to ensure det(R) == 1, which is not currently
+ // the case. Also add a test for that here.
+}
+
+Vec4 GetRandomPoint() {
+ Vec4 X;
+ X.setRandom();
+ X(3) = 1;
+ return X;
+}
+
+TEST(Projection, isInFrontOfCamera) {
+ Mat34 P;
+ P << 1, 0, 0, 0,
+ 0, 1, 0, 0,
+ 0, 0, 1, 0;
+
+ Vec4 X_front = GetRandomPoint();
+ Vec4 X_back = GetRandomPoint();
+ X_front(2) = 10; // Any point in the positive Z direction
+ // where Z > 1 is infront of the camera.
+ X_back(2) = -10; // Any point int he negative Z dirstaion
+ // is behind the camera.
+
+ bool res_front = isInFrontOfCamera(P, X_front);
+ bool res_back = isInFrontOfCamera(P, X_back);
+
+ EXPECT_EQ(true, res_front);
+ EXPECT_EQ(false, res_back);
+}
+
+TEST(AutoCalibration, ProjectionShiftPrincipalPoint) {
+ Mat34 P1, P2;
+ P1 << 1, 0, 0, 0,
+ 0, 1, 0, 0,
+ 0, 0, 1, 0;
+ P2 << 1, 0, 3, 0,
+ 0, 1, 4, 0,
+ 0, 0, 1, 0;
+ Mat34 P1_computed, P2_computed;
+ ProjectionShiftPrincipalPoint(P1, Vec2(0, 0), Vec2(3, 4), &P2_computed);
+ ProjectionShiftPrincipalPoint(P2, Vec2(3, 4), Vec2(0, 0), &P1_computed);
+
+ EXPECT_MATRIX_EQ(P1, P1_computed);
+ EXPECT_MATRIX_EQ(P2, P2_computed);
+}
+
+TEST(AutoCalibration, ProjectionChangeAspectRatio) {
+ Mat34 P1, P2;
+ P1 << 1, 0, 3, 0,
+ 0, 1, 4, 0,
+ 0, 0, 1, 0;
+ P2 << 1, 0, 3, 0,
+ 0, 2, 4, 0,
+ 0, 0, 1, 0;
+ Mat34 P1_computed, P2_computed;
+ ProjectionChangeAspectRatio(P1, Vec2(3, 4), 1, 2, &P2_computed);
+ ProjectionChangeAspectRatio(P2, Vec2(3, 4), 2, 1, &P1_computed);
+
+ EXPECT_MATRIX_EQ(P1, P1_computed);
+ EXPECT_MATRIX_EQ(P2, P2_computed);
+}
+
+} // namespace
diff --git a/intern/libmv/libmv/multiview/resection.h b/intern/libmv/libmv/multiview/resection.h
new file mode 100644
index 00000000000..c142d6deeb2
--- /dev/null
+++ b/intern/libmv/libmv/multiview/resection.h
@@ -0,0 +1,62 @@
+// Copyright (c) 2009 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+//
+// Compute the projection matrix from a set of 3D points X and their
+// projections x = PX in 2D. This is useful if a point cloud is reconstructed.
+//
+// Algorithm is the standard DLT as described in Hartley & Zisserman, page 179.
+
+#ifndef LIBMV_MULTIVIEW_RESECTION_H
+#define LIBMV_MULTIVIEW_RESECTION_H
+
+#include "libmv/logging/logging.h"
+#include "libmv/numeric/numeric.h"
+
+namespace libmv {
+namespace resection {
+
+// x's are 2D image coordinates, (x,y,1), and X's are homogeneous four vectors.
+template<typename T>
+void Resection(const Matrix<T, 2, Dynamic> &x,
+ const Matrix<T, 4, Dynamic> &X,
+ Matrix<T, 3, 4> *P) {
+ int N = x.cols();
+ assert(X.cols() == N);
+
+ Matrix<T, Dynamic, 12> design(2*N, 12);
+ design.setZero();
+ for (int i = 0; i < N; i++) {
+ T xi = x(0, i);
+ T yi = x(1, i);
+ // See equation (7.2) on page 179 of H&Z.
+ design.template block<1, 4>(2*i, 4) = -X.col(i).transpose();
+ design.template block<1, 4>(2*i, 8) = yi*X.col(i).transpose();
+ design.template block<1, 4>(2*i + 1, 0) = X.col(i).transpose();
+ design.template block<1, 4>(2*i + 1, 8) = -xi*X.col(i).transpose();
+ }
+ Matrix<T, 12, 1> p;
+ Nullspace(&design, &p);
+ reshape(p, 3, 4, P);
+}
+
+} // namespace resection
+} // namespace libmv
+
+#endif // LIBMV_MULTIVIEW_RESECTION_H
diff --git a/intern/libmv/libmv/multiview/resection_test.cc b/intern/libmv/libmv/multiview/resection_test.cc
new file mode 100644
index 00000000000..368e2281cfa
--- /dev/null
+++ b/intern/libmv/libmv/multiview/resection_test.cc
@@ -0,0 +1,61 @@
+// Copyright (c) 2009 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include <iostream>
+
+#include "libmv/multiview/projection.h"
+#include "libmv/multiview/resection.h"
+#include "libmv/multiview/test_data_sets.h"
+#include "libmv/numeric/numeric.h"
+#include "testing/testing.h"
+#include "libmv/logging/logging.h"
+
+namespace {
+
+using namespace libmv;
+using namespace libmv::resection;
+
+TEST(Resection, ThreeViews) {
+ int nviews = 5;
+ int npoints = 6;
+ NViewDataSet d = NRealisticCamerasFull(nviews, npoints);
+ for (int i = 0; i < nviews; ++i) {
+ Mat4X X(4, npoints);
+ X.block(0, 0, 3, npoints) = d.X;
+ X.row(3).setOnes();
+ const Mat2X &x = d.x[i];
+ Mat34 P;
+ Resection(x, X, &P);
+ Mat34 P_expected = d.P(i);
+
+ // Because the P matrices are homogeneous, it is necessary to be tricky
+ // about the scale factor to make them match.
+ P_expected *= 1/P_expected.array().abs().sum();
+ P *= 1/P.array().abs().sum();
+ if (!((P(0, 0) > 0 && P_expected(0, 0) > 0) ||
+ (P(0, 0) < 0 && P_expected(0, 0) < 0))) {
+ P *= -1;
+ }
+
+ EXPECT_MATRIX_NEAR(P_expected, P, 1e-9);
+ }
+}
+
+} // namespace
diff --git a/intern/libmv/libmv/multiview/test_data_sets.cc b/intern/libmv/libmv/multiview/test_data_sets.cc
new file mode 100644
index 00000000000..110bde6f762
--- /dev/null
+++ b/intern/libmv/libmv/multiview/test_data_sets.cc
@@ -0,0 +1,196 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/multiview/test_data_sets.h"
+
+#include <cmath>
+
+#include "libmv/numeric/numeric.h"
+#include "libmv/multiview/projection.h"
+#include "libmv/multiview/fundamental.h"
+
+namespace libmv {
+
+TwoViewDataSet TwoRealisticCameras(bool same_K) {
+ TwoViewDataSet d;
+
+ d.K1 << 320, 0, 160,
+ 0, 320, 120,
+ 0, 0, 1;
+ if (same_K) {
+ d.K2 = d.K1;
+ } else {
+ d.K2 << 360, 0, 170,
+ 0, 360, 110,
+ 0, 0, 1;
+ }
+ d.R1 = RotationAroundZ(-0.1);
+ d.R2 = RotationAroundX(-0.1);
+ d.t1 << 1, 1, 10;
+ d.t2 << -2, -1, 10;
+ P_From_KRt(d.K1, d.R1, d.t1, &d.P1);
+ P_From_KRt(d.K2, d.R2, d.t2, &d.P2);
+
+ FundamentalFromProjections(d.P1, d.P2, &d.F);
+
+ d.X.resize(3, 30);
+ d.X.setRandom();
+
+ Project(d.P1, d.X, &d.x1);
+ Project(d.P2, d.X, &d.x2);
+
+ return d;
+}
+
+nViewDatasetConfigator::nViewDatasetConfigator(int fx , int fy,
+ int cx, int cy,
+ double distance,
+ double jitter_amount) {
+ _fx = fx;
+ _fy = fy;
+ _cx = cx;
+ _cy = cy;
+ _dist = distance;
+ _jitter_amount = jitter_amount;
+}
+
+NViewDataSet NRealisticCamerasFull(int nviews, int npoints,
+ const nViewDatasetConfigator config) {
+ NViewDataSet d;
+ d.n = nviews;
+ d.K.resize(nviews);
+ d.R.resize(nviews);
+ d.t.resize(nviews);
+ d.C.resize(nviews);
+ d.x.resize(nviews);
+ d.x_ids.resize(nviews);
+
+ d.X.resize(3, npoints);
+ d.X.setRandom();
+ d.X *= 0.6;
+
+ Vecu all_point_ids(npoints);
+ for (size_t j = 0; j < npoints; ++j)
+ all_point_ids[j] = j;
+
+ for (size_t i = 0; i < nviews; ++i) {
+ Vec3 camera_center, t, jitter, lookdir;
+
+ double theta = i * 2 * M_PI / nviews;
+ camera_center << sin(theta), 0.0, cos(theta);
+ camera_center *= config._dist;
+ d.C[i] = camera_center;
+
+ jitter.setRandom();
+ jitter *= config._jitter_amount / camera_center.norm();
+ lookdir = -camera_center + jitter;
+
+ d.K[i] << config._fx, 0, config._cx,
+ 0, config._fy, config._cy,
+ 0, 0, 1;
+ d.R[i] = LookAt(lookdir);
+ d.t[i] = -d.R[i] * camera_center;
+ d.x[i] = Project(d.P(i), d.X);
+ d.x_ids[i] = all_point_ids;
+ }
+ return d;
+}
+
+
+NViewDataSet NRealisticCamerasSparse(int nviews, int npoints,
+ float view_ratio, unsigned min_projections,
+ const nViewDatasetConfigator config) {
+ assert(view_ratio <= 1.0);
+ assert(view_ratio > 0.0);
+ assert(min_projections <= npoints);
+ NViewDataSet d;
+ d.n = nviews;
+ d.K.resize(nviews);
+ d.R.resize(nviews);
+ d.t.resize(nviews);
+ d.C.resize(nviews);
+ d.x.resize(nviews);
+ d.x_ids.resize(nviews);
+
+ d.X.resize(3, npoints);
+ d.X.setRandom();
+ d.X *= 0.6;
+
+ Mat visibility(nviews, npoints);
+ visibility.setZero();
+ Mat randoms(nviews, npoints);
+ randoms.setRandom();
+ randoms = (randoms.array() + 1)/2.0;
+ unsigned num_visibles = 0;
+ for (size_t i = 0; i < nviews; ++i) {
+ num_visibles = 0;
+ for (size_t j = 0; j < npoints; j++) {
+ if (randoms(i, j) <= view_ratio) {
+ visibility(i, j) = true;
+ num_visibles++;
+ }
+ }
+ if (num_visibles < min_projections) {
+ unsigned num_projections_to_add = min_projections - num_visibles;
+ for (size_t j = 0; j < npoints && num_projections_to_add > 0; ++j) {
+ if (!visibility(i, j)) {
+ num_projections_to_add--;
+ }
+ }
+ num_visibles += num_projections_to_add;
+ }
+ d.x_ids[i].resize(num_visibles);
+ d.x[i].resize(2, num_visibles);
+ }
+
+ size_t j_visible = 0;
+ Vec3 X;
+ for (size_t i = 0; i < nviews; ++i) {
+ Vec3 camera_center, t, jitter, lookdir;
+
+ double theta = i * 2 * M_PI / nviews;
+ camera_center << sin(theta), 0.0, cos(theta);
+ camera_center *= config._dist;
+ d.C[i] = camera_center;
+
+ jitter.setRandom();
+ jitter *= config._jitter_amount / camera_center.norm();
+ lookdir = -camera_center + jitter;
+
+ d.K[i] << config._fx, 0, config._cx,
+ 0, config._fy, config._cy,
+ 0, 0, 1;
+ d.R[i] = LookAt(lookdir);
+ d.t[i] = -d.R[i] * camera_center;
+ j_visible = 0;
+ for (size_t j = 0; j < npoints; j++) {
+ if (visibility(i, j)) {
+ X = d.X.col(j);
+ d.x[i].col(j_visible) = Project(d.P(i), X);
+ d.x_ids[i][j_visible] = j;
+ j_visible++;
+ }
+ }
+ }
+ return d;
+}
+
+
+} // namespace libmv
diff --git a/intern/libmv/libmv/multiview/test_data_sets.h b/intern/libmv/libmv/multiview/test_data_sets.h
new file mode 100644
index 00000000000..cf01663ca02
--- /dev/null
+++ b/intern/libmv/libmv/multiview/test_data_sets.h
@@ -0,0 +1,105 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_MULTIVIEW_TEST_DATA_SETS_H_
+#define LIBMV_MULTIVIEW_TEST_DATA_SETS_H_
+
+#include "libmv/base/vector.h"
+#include "libmv/multiview/fundamental.h"
+#include "libmv/multiview/projection.h"
+#include "libmv/numeric/numeric.h"
+
+namespace libmv {
+
+struct TwoViewDataSet {
+ Mat3 K1, K2; // Internal parameters.
+ Mat3 R1, R2; // Rotation.
+ Vec3 t1, t2; // Translation.
+ Mat34 P1, P2; // Projection matrix, P = K(R|t)
+ Mat3 F; // Fundamental matrix.
+ Mat3X X; // 3D points.
+ Mat2X x1, x2; // Projected points.
+};
+
+// Two cameras at (-1,-1,-10) and (2,1,-10) looking approximately towards z+.
+TwoViewDataSet TwoRealisticCameras(bool same_K = false);
+
+// An N-view metric dataset . An important difference between this
+// and the other reconstruction data types is that all points are seen by all
+// cameras.
+struct NViewDataSet {
+ vector<Mat3> K; // Internal parameters (fx, fy, etc).
+ vector<Mat3> R; // Rotation.
+ vector<Vec3> t; // Translation.
+ vector<Vec3> C; // Camera centers.
+ Mat3X X; // 3D points.
+ vector<Mat2X> x; // Projected points; may have noise added.
+ vector<Vecu> x_ids; // Indexes of points corresponding to the projections
+
+ int n; // Actual number of cameras.
+
+ Mat34 P(int i) {
+ assert(i < n);
+ return K[i] * HStack(R[i], t[i]);
+ }
+ Mat3 F(int i, int j) {
+ Mat3 F_;
+ FundamentalFromProjections(P(i), P(j), &F_);
+ return F_;
+ }
+ void Reproject() {
+ for (int i = 0; i < n; ++i) {
+ x[i] = Project(P(i), X);
+ }
+ }
+ // TODO(keir): Add gaussian jitter functions.
+};
+
+struct nViewDatasetConfigator {
+ /// Internal camera parameters
+ int _fx;
+ int _fy;
+ int _cx;
+ int _cy;
+
+ /// Camera random position parameters
+ double _dist;
+ double _jitter_amount;
+
+ nViewDatasetConfigator(int fx = 1000, int fy = 1000,
+ int cx = 500, int cy = 500,
+ double distance = 1.5,
+ double jitter_amount = 0.01);
+};
+
+NViewDataSet NRealisticCamerasFull(int nviews, int npoints,
+ const nViewDatasetConfigator
+ config = nViewDatasetConfigator());
+
+// Generates sparse projections (not all points are projected)
+NViewDataSet NRealisticCamerasSparse(int nviews, int npoints,
+ float view_ratio = 0.6,
+ unsigned min_projections = 3,
+ const nViewDatasetConfigator
+ config = nViewDatasetConfigator());
+
+} // namespace libmv
+
+#endif // LIBMV_MULTIVIEW_TEST_DATA_SETS_H_
diff --git a/intern/libmv/libmv/multiview/triangulation.cc b/intern/libmv/libmv/multiview/triangulation.cc
new file mode 100644
index 00000000000..4d146c8f21b
--- /dev/null
+++ b/intern/libmv/libmv/multiview/triangulation.cc
@@ -0,0 +1,50 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/multiview/triangulation.h"
+
+#include "libmv/numeric/numeric.h"
+#include "libmv/multiview/projection.h"
+
+namespace libmv {
+
+// HZ 12.2 pag.312
+void TriangulateDLT(const Mat34 &P1, const Vec2 &x1,
+ const Mat34 &P2, const Vec2 &x2,
+ Vec4 *X_homogeneous) {
+ Mat4 design;
+ for (int i = 0; i < 4; ++i) {
+ design(0, i) = x1(0) * P1(2, i) - P1(0, i);
+ design(1, i) = x1(1) * P1(2, i) - P1(1, i);
+ design(2, i) = x2(0) * P2(2, i) - P2(0, i);
+ design(3, i) = x2(1) * P2(2, i) - P2(1, i);
+ }
+ Nullspace(&design, X_homogeneous);
+}
+
+void TriangulateDLT(const Mat34 &P1, const Vec2 &x1,
+ const Mat34 &P2, const Vec2 &x2,
+ Vec3 *X_euclidean) {
+ Vec4 X_homogeneous;
+ TriangulateDLT(P1, x1, P2, x2, &X_homogeneous);
+ HomogeneousToEuclidean(X_homogeneous, X_euclidean);
+}
+
+} // namespace libmv
diff --git a/intern/libmv/libmv/multiview/triangulation.h b/intern/libmv/libmv/multiview/triangulation.h
new file mode 100644
index 00000000000..be878890242
--- /dev/null
+++ b/intern/libmv/libmv/multiview/triangulation.h
@@ -0,0 +1,38 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_MULTIVIEW_TRIANGULATION_H_
+#define LIBMV_MULTIVIEW_TRIANGULATION_H_
+
+#include "libmv/numeric/numeric.h"
+
+namespace libmv {
+
+void TriangulateDLT(const Mat34 &P1, const Vec2 &x1,
+ const Mat34 &P2, const Vec2 &x2,
+ Vec4 *X_homogeneous);
+
+void TriangulateDLT(const Mat34 &P1, const Vec2 &x1,
+ const Mat34 &P2, const Vec2 &x2,
+ Vec3 *X_euclidean);
+
+} // namespace libmv
+
+#endif // LIBMV_MULTIVIEW_TRIANGULATION_H_
diff --git a/intern/libmv/libmv/multiview/triangulation_test.cc b/intern/libmv/libmv/multiview/triangulation_test.cc
new file mode 100644
index 00000000000..66d1ee25a62
--- /dev/null
+++ b/intern/libmv/libmv/multiview/triangulation_test.cc
@@ -0,0 +1,47 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include <iostream>
+
+#include "libmv/multiview/triangulation.h"
+#include "libmv/multiview/fundamental.h"
+#include "libmv/multiview/projection.h"
+#include "libmv/multiview/test_data_sets.h"
+#include "libmv/numeric/numeric.h"
+#include "testing/testing.h"
+
+namespace {
+using namespace libmv;
+
+TEST(Triangulation, TriangulateDLT) {
+ TwoViewDataSet d = TwoRealisticCameras();
+
+ for (int i = 0; i < d.X.cols(); ++i) {
+ Vec2 x1, x2;
+ MatrixColumn(d.x1, i, &x1);
+ MatrixColumn(d.x2, i, &x2);
+ Vec3 X_estimated, X_gt;
+ MatrixColumn(d.X, i, &X_gt);
+ TriangulateDLT(d.P1, x1, d.P2, x2, &X_estimated);
+ EXPECT_NEAR(0, DistanceLInfinity(X_estimated, X_gt), 1e-8);
+ }
+}
+
+} // namespace
diff --git a/intern/libmv/libmv/multiview/two_view_kernel.h b/intern/libmv/libmv/multiview/two_view_kernel.h
new file mode 100644
index 00000000000..7af0ed5ddab
--- /dev/null
+++ b/intern/libmv/libmv/multiview/two_view_kernel.h
@@ -0,0 +1,137 @@
+// Copyright (c) 2009 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_MULTIVIEW_TWO_VIEW_KERNEL_H_
+#define LIBMV_MULTIVIEW_TWO_VIEW_KERNEL_H_
+
+#include "libmv/base/vector.h"
+#include "libmv/logging/logging.h"
+#include "libmv/multiview/conditioning.h"
+#include "libmv/numeric/numeric.h"
+
+namespace libmv {
+namespace two_view {
+namespace kernel {
+
+template<typename Solver, typename Unnormalizer>
+struct NormalizedSolver {
+ enum { MINIMUM_SAMPLES = Solver::MINIMUM_SAMPLES };
+ static void Solve(const Mat &x1, const Mat &x2, vector<Mat3> *models) {
+ assert(2 == x1.rows());
+ assert(MINIMUM_SAMPLES <= x1.cols());
+ assert(x1.rows() == x2.rows());
+ assert(x1.cols() == x2.cols());
+
+ // Normalize the data.
+ Mat3 T1, T2;
+ Mat x1_normalized, x2_normalized;
+ NormalizePoints(x1, &x1_normalized, &T1);
+ NormalizePoints(x2, &x2_normalized, &T2);
+
+ Solver::Solve(x1_normalized, x2_normalized, models);
+
+ for (int i = 0; i < models->size(); ++i) {
+ Unnormalizer::Unnormalize(T1, T2, &(*models)[i]);
+ }
+ }
+};
+
+template<typename Solver, typename Unnormalizer>
+struct IsotropicNormalizedSolver {
+ enum { MINIMUM_SAMPLES = Solver::MINIMUM_SAMPLES };
+ static void Solve(const Mat &x1, const Mat &x2, vector<Mat3> *models) {
+ assert(2 == x1.rows());
+ assert(MINIMUM_SAMPLES <= x1.cols());
+ assert(x1.rows() == x2.rows());
+ assert(x1.cols() == x2.cols());
+
+ // Normalize the data.
+ Mat3 T1, T2;
+ Mat x1_normalized, x2_normalized;
+ NormalizeIsotropicPoints(x1, &x1_normalized, &T1);
+ NormalizeIsotropicPoints(x2, &x2_normalized, &T2);
+
+ Solver::Solve(x1_normalized, x2_normalized, models);
+
+ for (int i = 0; i < models->size(); ++i) {
+ Unnormalizer::Unnormalize(T1, T2, &(*models)[i]);
+ }
+ }
+};
+// This is one example (targeted at solvers that operate on correspondences
+// between two views) that shows the "kernel" part of a robust fitting
+// problem:
+//
+// 1. The model; Mat3 in the case of the F or H matrix.
+// 2. The minimum number of samples needed to fit; 7 or 8 (or 4).
+// 3. A way to convert samples to a model.
+// 4. A way to convert a sample and a model to an error.
+//
+// Of particular note is that the kernel does not expose what the samples are.
+// All the robust fitting algorithm sees is that there is some number of
+// samples; it is able to fit subsets of them (via the kernel) and check their
+// error, but can never access the samples themselves.
+//
+// The Kernel objects must follow the following concept so that the robust
+// fitting alogrithm can fit this type of relation:
+//
+// 1. Kernel::Model
+// 2. Kernel::MINIMUM_SAMPLES
+// 3. Kernel::Fit(vector<int>, vector<Kernel::Model> *)
+// 4. Kernel::Error(int, Model) -> error
+//
+// The fit routine must not clear existing entries in the vector of models; it
+// should append new solutions to the end.
+template<typename SolverArg,
+ typename ErrorArg,
+ typename ModelArg = Mat3>
+class Kernel {
+ public:
+ Kernel(const Mat &x1, const Mat &x2) : x1_(x1), x2_(x2) {}
+ typedef SolverArg Solver;
+ typedef ModelArg Model;
+ enum { MINIMUM_SAMPLES = Solver::MINIMUM_SAMPLES };
+ void Fit(const vector<int> &samples, vector<Model> *models) const {
+ Mat x1 = ExtractColumns(x1_, samples);
+ Mat x2 = ExtractColumns(x2_, samples);
+ Solver::Solve(x1, x2, models);
+ }
+ double Error(int sample, const Model &model) const {
+ return ErrorArg::Error(model,
+ static_cast<Vec>(x1_.col(sample)),
+ static_cast<Vec>(x2_.col(sample)));
+ }
+ int NumSamples() const {
+ return x1_.cols();
+ }
+ static void Solve(const Mat &x1, const Mat &x2, vector<Model> *models) {
+ // By offering this, Kernel types can be passed to templates.
+ Solver::Solve(x1, x2, models);
+ }
+ protected:
+ const Mat &x1_;
+ const Mat &x2_;
+};
+
+} // namespace kernel
+} // namespace two_view
+} // namespace libmv
+
+#endif // LIBMV_MULTIVIEW_TWO_VIEW_KERNEL_H_
diff --git a/intern/libmv/libmv/numeric/dogleg.h b/intern/libmv/libmv/numeric/dogleg.h
new file mode 100644
index 00000000000..bf6f996ddaf
--- /dev/null
+++ b/intern/libmv/libmv/numeric/dogleg.h
@@ -0,0 +1,261 @@
+// Copyright (c) 2007, 2008, 2009 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+//
+// A simple implementation of Powell's dogleg nonlinear minimization.
+//
+// [1] K. Madsen, H. Nielsen, O. Tingleoff. Methods for Non-linear Least
+// Squares Problems.
+// http://www2.imm.dtu.dk/pubdb/views/edoc_download.php/3215/pdf/imm3215.pdf
+//
+// TODO(keir): Cite the Lourakis' dogleg paper.
+
+#ifndef LIBMV_NUMERIC_DOGLEG_H
+#define LIBMV_NUMERIC_DOGLEG_H
+
+#include <cmath>
+#include <cstdio>
+
+#include "libmv/numeric/numeric.h"
+#include "libmv/numeric/function_derivative.h"
+#include "libmv/logging/logging.h"
+
+namespace libmv {
+
+template<typename Function,
+ typename Jacobian = NumericJacobian<Function>,
+ typename Solver = Eigen::PartialPivLU<
+ Matrix<typename Function::FMatrixType::RealScalar,
+ Function::XMatrixType::RowsAtCompileTime,
+ Function::XMatrixType::RowsAtCompileTime> > >
+class Dogleg {
+ public:
+ typedef typename Function::XMatrixType::RealScalar Scalar;
+ typedef typename Function::FMatrixType FVec;
+ typedef typename Function::XMatrixType Parameters;
+ typedef Matrix<typename Function::FMatrixType::RealScalar,
+ Function::FMatrixType::RowsAtCompileTime,
+ Function::XMatrixType::RowsAtCompileTime> JMatrixType;
+ typedef Matrix<typename JMatrixType::RealScalar,
+ JMatrixType::ColsAtCompileTime,
+ JMatrixType::ColsAtCompileTime> AMatrixType;
+
+ enum Status {
+ RUNNING,
+ GRADIENT_TOO_SMALL, // eps > max(J'*f(x))
+ RELATIVE_STEP_SIZE_TOO_SMALL, // eps > ||dx|| / ||x||
+ TRUST_REGION_TOO_SMALL, // eps > radius / ||x||
+ ERROR_TOO_SMALL, // eps > ||f(x)||
+ HIT_MAX_ITERATIONS,
+ };
+
+ enum Step {
+ DOGLEG,
+ GAUSS_NEWTON,
+ STEEPEST_DESCENT,
+ };
+
+ Dogleg(const Function &f)
+ : f_(f), df_(f) {}
+
+ struct SolverParameters {
+ SolverParameters()
+ : gradient_threshold(1e-16),
+ relative_step_threshold(1e-16),
+ error_threshold(1e-16),
+ initial_trust_radius(1e0),
+ max_iterations(500) {}
+ Scalar gradient_threshold; // eps > max(J'*f(x))
+ Scalar relative_step_threshold; // eps > ||dx|| / ||x||
+ Scalar error_threshold; // eps > ||f(x)||
+ Scalar initial_trust_radius; // Initial u for solving normal equations.
+ int max_iterations; // Maximum number of solver iterations.
+ };
+
+ struct Results {
+ Scalar error_magnitude; // ||f(x)||
+ Scalar gradient_magnitude; // ||J'f(x)||
+ int iterations;
+ Status status;
+ };
+
+ Status Update(const Parameters &x, const SolverParameters &params,
+ JMatrixType *J, AMatrixType *A, FVec *error, Parameters *g) {
+ *J = df_(x);
+ // TODO(keir): In the case of m = n, avoid computing A and just do J^-1 directly.
+ *A = (*J).transpose() * (*J);
+ *error = f_(x);
+ *g = (*J).transpose() * *error;
+ if (g->array().abs().maxCoeff() < params.gradient_threshold) {
+ return GRADIENT_TOO_SMALL;
+ } else if (error->array().abs().maxCoeff() < params.error_threshold) {
+ return ERROR_TOO_SMALL;
+ }
+ return RUNNING;
+ }
+
+ Step SolveDoglegDirection(const Parameters &dx_sd,
+ const Parameters &dx_gn,
+ Scalar radius,
+ Scalar alpha,
+ Parameters *dx_dl,
+ Scalar *beta) {
+ Parameters a, b_minus_a;
+ // Solve for Dogleg step dx_dl.
+ if (dx_gn.norm() < radius) {
+ *dx_dl = dx_gn;
+ return GAUSS_NEWTON;
+
+ } else if (alpha * dx_sd.norm() > radius) {
+ *dx_dl = (radius / dx_sd.norm()) * dx_sd;
+ return STEEPEST_DESCENT;
+
+ } else {
+ Parameters a = alpha * dx_sd;
+ const Parameters &b = dx_gn;
+ b_minus_a = a - b;
+ Scalar Mbma2 = b_minus_a.squaredNorm();
+ Scalar Ma2 = a.squaredNorm();
+ Scalar c = a.dot(b_minus_a);
+ Scalar radius2 = radius*radius;
+ if (c <= 0) {
+ *beta = (-c + sqrt(c*c + Mbma2*(radius2 - Ma2)))/(Mbma2);
+ } else {
+ *beta = (radius2 - Ma2) /
+ (c + sqrt(c*c + Mbma2*(radius2 - Ma2)));
+ }
+ *dx_dl = alpha * dx_sd + (*beta) * (dx_gn - alpha*dx_sd);
+ return DOGLEG;
+ }
+ }
+
+ Results minimize(Parameters *x_and_min) {
+ SolverParameters params;
+ return minimize(params, x_and_min);
+ }
+
+ Results minimize(const SolverParameters &params, Parameters *x_and_min) {
+ Parameters &x = *x_and_min;
+ JMatrixType J;
+ AMatrixType A;
+ FVec error;
+ Parameters g;
+
+ Results results;
+ results.status = Update(x, params, &J, &A, &error, &g);
+
+ Scalar radius = params.initial_trust_radius;
+ bool x_updated = true;
+
+ Parameters x_new;
+ Parameters dx_sd; // Steepest descent step.
+ Parameters dx_dl; // Dogleg step.
+ Parameters dx_gn; // Gauss-Newton step.
+ printf("iteration ||f(x)|| max(g) radius\n");
+ int i = 0;
+ for (; results.status == RUNNING && i < params.max_iterations; ++i) {
+ printf("%9d %12g %12g %12g",
+ i, f_(x).norm(), g.array().abs().maxCoeff(), radius);
+
+ //LG << "iteration: " << i;
+ //LG << "||f(x)||: " << f_(x).norm();
+ //LG << "max(g): " << g.cwise().abs().maxCoeff();
+ //LG << "radius: " << radius;
+ // Eqn 3.19 from [1]
+ Scalar alpha = g.squaredNorm() / (J*g).squaredNorm();
+
+ // Solve for steepest descent direction dx_sd.
+ dx_sd = -g;
+
+ // Solve for Gauss-Newton direction dx_gn.
+ if (x_updated) {
+ // TODO(keir): See Appendix B of [1] for discussion of when A is
+ // singular and there are many solutions. Solving that involves the SVD
+ // and is slower, but should still work.
+ Solver solver(A);
+ dx_gn = solver.solve(-g);
+ if (!(A * dx_gn).isApprox(-g)) {
+ LOG(ERROR) << "Failed to solve normal eqns. TODO: Solve via SVD.";
+ return results;
+ }
+ x_updated = false;
+ }
+
+ // Solve for dogleg direction dx_dl.
+ Scalar beta = 0;
+ Step step = SolveDoglegDirection(dx_sd, dx_gn, radius, alpha,
+ &dx_dl, &beta);
+
+ Scalar e3 = params.relative_step_threshold;
+ if (dx_dl.norm() < e3*(x.norm() + e3)) {
+ results.status = RELATIVE_STEP_SIZE_TOO_SMALL;
+ break;
+ }
+
+ x_new = x + dx_dl;
+ Scalar actual = f_(x).squaredNorm() - f_(x_new).squaredNorm();
+ Scalar predicted = 0;
+ if (step == GAUSS_NEWTON) {
+ predicted = f_(x).squaredNorm();
+ } else if (step == STEEPEST_DESCENT) {
+ predicted = radius * (2*alpha*g.norm() - radius) / 2 / alpha;
+ } else if (step == DOGLEG) {
+ predicted = 0.5 * alpha * (1-beta)*(1-beta)*g.squaredNorm() +
+ beta*(2-beta)*f_(x).squaredNorm();
+ }
+ Scalar rho = actual / predicted;
+
+ if (step == GAUSS_NEWTON) printf(" GAUSS");
+ if (step == STEEPEST_DESCENT) printf(" STEE");
+ if (step == DOGLEG) printf(" DOGL");
+
+ printf(" %12g %12g %12g\n", rho, actual, predicted);
+
+ if (rho > 0) {
+ // Accept update because the linear model is a good fit.
+ x = x_new;
+ results.status = Update(x, params, &J, &A, &error, &g);
+ x_updated = true;
+ }
+ if (rho > 0.75) {
+ radius = std::max(radius, 3*dx_dl.norm());
+ } else if (rho < 0.25) {
+ radius /= 2;
+ if (radius < e3 * (x.norm() + e3)) {
+ results.status = TRUST_REGION_TOO_SMALL;
+ }
+ }
+ }
+ if (results.status == RUNNING) {
+ results.status = HIT_MAX_ITERATIONS;
+ }
+ results.error_magnitude = error.norm();
+ results.gradient_magnitude = g.norm();
+ results.iterations = i;
+ return results;
+ }
+
+ private:
+ const Function &f_;
+ Jacobian df_;
+};
+
+} // namespace mv
+
+#endif // LIBMV_NUMERIC_DOGLEG_H
diff --git a/intern/libmv/libmv/numeric/dogleg_test.cc b/intern/libmv/libmv/numeric/dogleg_test.cc
new file mode 100644
index 00000000000..90c46c31672
--- /dev/null
+++ b/intern/libmv/libmv/numeric/dogleg_test.cc
@@ -0,0 +1,95 @@
+// Copyright (c) 2009 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "testing/testing.h"
+#include "libmv/numeric/dogleg.h"
+
+using namespace libmv;
+
+namespace {
+
+class F {
+ public:
+ typedef Vec4 FMatrixType;
+ typedef Vec3 XMatrixType;
+ Vec4 operator()(const Vec3 &x) const {
+ double x1 = x.x() - 2;
+ double y1 = x.y() - 5;
+ double z1 = x.z();
+ Vec4 fx; fx << x1*x1 + z1*z1,
+ y1*y1 + z1*z1,
+ z1*z1,
+ x1*x1;
+ return fx;
+ }
+};
+
+TEST(Dogleg, SimpleCase) {
+ Vec3 x; x << 0.76026643, -30.01799744, 0.55192142;
+ F f;
+ Dogleg<F>::SolverParameters params;
+ Dogleg<F> lm(f);
+ /* TODO(sergey): Better error handling. */
+ /* Dogleg<F>::Results results = */ lm.minimize(params, &x);
+ Vec3 expected_min_x; expected_min_x << 2, 5, 0;
+
+ EXPECT_MATRIX_NEAR(expected_min_x, x, 1e-5);
+}
+
+// Example 3.2 from [1]; page 11 of the pdf, 20 of the document. This is a
+// tricky problem because of the singluar Jacobian near the origin.
+class F32 {
+ public:
+ typedef Vec2 FMatrixType;
+ typedef Vec2 XMatrixType;
+ Vec2 operator()(const Vec2 &x) const {
+ double x1 = x(0);
+ double x2 = 10*x(0)/(x(0) + 0.1) + 2*x(1)*x(1);
+ Vec2 fx; fx << x1, x2;
+ return fx;
+ }
+};
+
+class JF32 {
+ public:
+ JF32(const F32 &f) { (void) f; }
+ Mat2 operator()(const Vec2 &x) {
+ Mat2 J; J << 1, 0,
+ 1./pow(x(0) + 0.1, 2), 4*x(1)*x(1);
+ return J;
+ }
+};
+
+// TODO(keir): Re-enable this when the dogleg code properly handles singular
+// normal equations.
+/*
+TEST(Dogleg, Example32) {
+ Vec2 x; x << 3, 1;
+ F32 f;
+ CheckJacobian<F32, JF32>(f, x);
+ Dogleg<F32, JF32> dogleg(f);
+ Dogleg<F32, JF32>::Results results = dogleg.minimize(&x);
+ Vec2 expected_min_x; expected_min_x << 0, 0;
+
+ EXPECT_MATRIX_NEAR(expected_min_x, x, 1e-5);
+}
+*/
+
+} // namespace
diff --git a/intern/libmv/libmv/numeric/function_derivative.h b/intern/libmv/libmv/numeric/function_derivative.h
new file mode 100644
index 00000000000..9820885f04e
--- /dev/null
+++ b/intern/libmv/libmv/numeric/function_derivative.h
@@ -0,0 +1,107 @@
+// Copyright (c) 2007, 2008, 2009 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_NUMERIC_DERIVATIVE_H
+#define LIBMV_NUMERIC_DERIVATIVE_H
+
+#include <cmath>
+
+#include "libmv/numeric/numeric.h"
+#include "libmv/logging/logging.h"
+
+namespace libmv {
+
+// Numeric derivative of a function.
+// TODO(keir): Consider adding a quadratic approximation.
+
+enum NumericJacobianMode {
+ CENTRAL,
+ FORWARD,
+};
+
+template<typename Function, NumericJacobianMode mode = CENTRAL>
+class NumericJacobian {
+ public:
+ typedef typename Function::XMatrixType Parameters;
+ typedef typename Function::XMatrixType::RealScalar XScalar;
+ typedef typename Function::FMatrixType FMatrixType;
+ typedef Matrix<typename Function::FMatrixType::RealScalar,
+ Function::FMatrixType::RowsAtCompileTime,
+ Function::XMatrixType::RowsAtCompileTime>
+ JMatrixType;
+
+ NumericJacobian(const Function &f) : f_(f) {}
+
+ // TODO(keir): Perhaps passing the jacobian back by value is not a good idea.
+ JMatrixType operator()(const Parameters &x) {
+ // Empirically determined constant.
+ Parameters eps = x.array().abs() * XScalar(1e-5);
+ // To handle cases where a paremeter is exactly zero, instead use the mean
+ // eps for the other dimensions.
+ XScalar mean_eps = eps.sum() / eps.rows();
+ if (mean_eps == XScalar(0)) {
+ // TODO(keir): Do something better here.
+ mean_eps = 1e-8; // ~sqrt(machine precision).
+ }
+ // TODO(keir): Elimininate this needless function evaluation for the
+ // central difference case.
+ FMatrixType fx = f_(x);
+ const int rows = fx.rows();
+ const int cols = x.rows();
+ JMatrixType jacobian(rows, cols);
+ Parameters x_plus_delta = x;
+ for (int c = 0; c < cols; ++c) {
+ if (eps(c) == XScalar(0)) {
+ eps(c) = mean_eps;
+ }
+ x_plus_delta(c) = x(c) + eps(c);
+ jacobian.col(c) = f_(x_plus_delta);
+
+ XScalar one_over_h = 1 / eps(c);
+ if (mode == CENTRAL) {
+ x_plus_delta(c) = x(c) - eps(c);
+ jacobian.col(c) -= f_(x_plus_delta);
+ one_over_h /= 2;
+ } else {
+ jacobian.col(c) -= fx;
+ }
+ x_plus_delta(c) = x(c);
+ jacobian.col(c) = jacobian.col(c) * one_over_h;
+ }
+ return jacobian;
+ }
+ private:
+ const Function &f_;
+};
+
+template<typename Function, typename Jacobian>
+bool CheckJacobian(const Function &f, const typename Function::XMatrixType &x) {
+ Jacobian j_analytic(f);
+ NumericJacobian<Function> j_numeric(f);
+
+ typename NumericJacobian<Function>::JMatrixType J_numeric = j_numeric(x);
+ typename NumericJacobian<Function>::JMatrixType J_analytic = j_analytic(x);
+ LG << J_numeric - J_analytic;
+ return true;
+}
+
+} // namespace libmv
+
+#endif // LIBMV_NUMERIC_DERIVATIVE_H
diff --git a/intern/libmv/libmv/numeric/function_derivative_test.cc b/intern/libmv/libmv/numeric/function_derivative_test.cc
new file mode 100644
index 00000000000..8d976d3e9a0
--- /dev/null
+++ b/intern/libmv/libmv/numeric/function_derivative_test.cc
@@ -0,0 +1,57 @@
+// Copyright (c) 2007, 2008, 2009 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "testing/testing.h"
+#include "libmv/numeric/numeric.h"
+#include "libmv/numeric/function_derivative.h"
+
+using namespace libmv;
+
+namespace {
+
+class F {
+ public:
+ typedef Vec2 FMatrixType;
+ typedef Vec3 XMatrixType;
+ Vec2 operator()(const Vec3 &x) const {
+ Vec2 fx;
+ fx << 0.19*x(0) + 0.19*x(1)*x(1) + x(2),
+ 3*sin(x(0)) + 2*cos(x(1));
+ return fx;
+ }
+ Mat23 J(const Vec3 &x) const {
+ Mat23 jacobian;
+ jacobian << 0.19, 2*0.19*x(1), 1.0,
+ 3*cos(x(0)), -2*sin(x(1)), 0;
+ return jacobian;
+ }
+};
+
+TEST(FunctionDerivative, SimpleCase) {
+ Vec3 x; x << 0.76026643, 0.01799744, 0.55192142;
+ F f;
+ NumericJacobian<F, CENTRAL> J(f);
+ EXPECT_MATRIX_NEAR(f.J(x), J(x), 1e-8);
+ NumericJacobian<F, FORWARD> J_forward(f);
+ // Forward difference is very inaccurate.
+ EXPECT_MATRIX_NEAR(f.J(x), J_forward(x), 1e-5);
+}
+
+} // namespace
diff --git a/intern/libmv/libmv/numeric/levenberg_marquardt.h b/intern/libmv/libmv/numeric/levenberg_marquardt.h
new file mode 100644
index 00000000000..2af9a62cf7b
--- /dev/null
+++ b/intern/libmv/libmv/numeric/levenberg_marquardt.h
@@ -0,0 +1,183 @@
+// Copyright (c) 2007, 2008, 2009 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+//
+// A simple implementation of levenberg marquardt.
+//
+// [1] K. Madsen, H. Nielsen, O. Tingleoff. Methods for Non-linear Least
+// Squares Problems.
+// http://www2.imm.dtu.dk/pubdb/views/edoc_download.php/3215/pdf/imm3215.pdf
+//
+// TODO(keir): Cite the Lourakis' dogleg paper.
+
+#ifndef LIBMV_NUMERIC_LEVENBERG_MARQUARDT_H
+#define LIBMV_NUMERIC_LEVENBERG_MARQUARDT_H
+
+#include <cmath>
+
+#include "libmv/numeric/numeric.h"
+#include "libmv/numeric/function_derivative.h"
+#include "libmv/logging/logging.h"
+
+namespace libmv {
+
+template<typename Function,
+ typename Jacobian = NumericJacobian<Function>,
+ typename Solver = Eigen::PartialPivLU<
+ Matrix<typename Function::FMatrixType::RealScalar,
+ Function::XMatrixType::RowsAtCompileTime,
+ Function::XMatrixType::RowsAtCompileTime> > >
+class LevenbergMarquardt {
+ public:
+ typedef typename Function::XMatrixType::RealScalar Scalar;
+ typedef typename Function::FMatrixType FVec;
+ typedef typename Function::XMatrixType Parameters;
+ typedef Matrix<typename Function::FMatrixType::RealScalar,
+ Function::FMatrixType::RowsAtCompileTime,
+ Function::XMatrixType::RowsAtCompileTime> JMatrixType;
+ typedef Matrix<typename JMatrixType::RealScalar,
+ JMatrixType::ColsAtCompileTime,
+ JMatrixType::ColsAtCompileTime> AMatrixType;
+
+ // TODO(keir): Some of these knobs can be derived from each other and
+ // removed, instead of requiring the user to set them.
+ enum Status {
+ RUNNING,
+ GRADIENT_TOO_SMALL, // eps > max(J'*f(x))
+ RELATIVE_STEP_SIZE_TOO_SMALL, // eps > ||dx|| / ||x||
+ ERROR_TOO_SMALL, // eps > ||f(x)||
+ HIT_MAX_ITERATIONS,
+ };
+
+ LevenbergMarquardt(const Function &f)
+ : f_(f), df_(f) {}
+
+ struct SolverParameters {
+ SolverParameters()
+ : gradient_threshold(1e-16),
+ relative_step_threshold(1e-16),
+ error_threshold(1e-16),
+ initial_scale_factor(1e-3),
+ max_iterations(100) {}
+ Scalar gradient_threshold; // eps > max(J'*f(x))
+ Scalar relative_step_threshold; // eps > ||dx|| / ||x||
+ Scalar error_threshold; // eps > ||f(x)||
+ Scalar initial_scale_factor; // Initial u for solving normal equations.
+ int max_iterations; // Maximum number of solver iterations.
+ };
+
+ struct Results {
+ Scalar error_magnitude; // ||f(x)||
+ Scalar gradient_magnitude; // ||J'f(x)||
+ int iterations;
+ Status status;
+ };
+
+ Status Update(const Parameters &x, const SolverParameters &params,
+ JMatrixType *J, AMatrixType *A, FVec *error, Parameters *g) {
+ *J = df_(x);
+ *A = (*J).transpose() * (*J);
+ *error = -f_(x);
+ *g = (*J).transpose() * *error;
+ if (g->array().abs().maxCoeff() < params.gradient_threshold) {
+ return GRADIENT_TOO_SMALL;
+ } else if (error->norm() < params.error_threshold) {
+ return ERROR_TOO_SMALL;
+ }
+ return RUNNING;
+ }
+
+ Results minimize(Parameters *x_and_min) {
+ SolverParameters params;
+ minimize(params, x_and_min);
+ }
+
+ Results minimize(const SolverParameters &params, Parameters *x_and_min) {
+ Parameters &x = *x_and_min;
+ JMatrixType J;
+ AMatrixType A;
+ FVec error;
+ Parameters g;
+
+ Results results;
+ results.status = Update(x, params, &J, &A, &error, &g);
+
+ Scalar u = Scalar(params.initial_scale_factor*A.diagonal().maxCoeff());
+ Scalar v = 2;
+
+ Parameters dx, x_new;
+ int i;
+ for (i = 0; results.status == RUNNING && i < params.max_iterations; ++i) {
+ VLOG(3) << "iteration: " << i;
+ VLOG(3) << "||f(x)||: " << f_(x).norm();
+ VLOG(3) << "max(g): " << g.array().abs().maxCoeff();
+ VLOG(3) << "u: " << u;
+ VLOG(3) << "v: " << v;
+
+ AMatrixType A_augmented = A + u*AMatrixType::Identity(J.cols(), J.cols());
+ Solver solver(A_augmented);
+ dx = solver.solve(g);
+ bool solved = (A_augmented * dx).isApprox(g);
+ if (!solved) {
+ LOG(ERROR) << "Failed to solve";
+ }
+ if (solved && dx.norm() <= params.relative_step_threshold * x.norm()) {
+ results.status = RELATIVE_STEP_SIZE_TOO_SMALL;
+ break;
+ }
+ if (solved) {
+ x_new = x + dx;
+ // Rho is the ratio of the actual reduction in error to the reduction
+ // in error that would be obtained if the problem was linear.
+ // See [1] for details.
+ Scalar rho((error.squaredNorm() - f_(x_new).squaredNorm())
+ / dx.dot(u*dx + g));
+ if (rho > 0) {
+ // Accept the Gauss-Newton step because the linear model fits well.
+ x = x_new;
+ results.status = Update(x, params, &J, &A, &error, &g);
+ Scalar tmp = Scalar(2*rho-1);
+ u = u*std::max(1/3., 1 - (tmp*tmp*tmp));
+ v = 2;
+ continue;
+ }
+ }
+ // Reject the update because either the normal equations failed to solve
+ // or the local linear model was not good (rho < 0). Instead, increase u
+ // to move closer to gradient descent.
+ u *= v;
+ v *= 2;
+ }
+ if (results.status == RUNNING) {
+ results.status = HIT_MAX_ITERATIONS;
+ }
+ results.error_magnitude = error.norm();
+ results.gradient_magnitude = g.norm();
+ results.iterations = i;
+ return results;
+ }
+
+ private:
+ const Function &f_;
+ Jacobian df_;
+};
+
+} // namespace mv
+
+#endif // LIBMV_NUMERIC_LEVENBERG_MARQUARDT_H
diff --git a/intern/libmv/libmv/numeric/levenberg_marquardt_test.cc b/intern/libmv/libmv/numeric/levenberg_marquardt_test.cc
new file mode 100644
index 00000000000..fc3f9ebbb29
--- /dev/null
+++ b/intern/libmv/libmv/numeric/levenberg_marquardt_test.cc
@@ -0,0 +1,56 @@
+// Copyright (c) 2007, 2008, 2009 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "testing/testing.h"
+#include "libmv/numeric/levenberg_marquardt.h"
+
+using namespace libmv;
+
+namespace {
+
+class F {
+ public:
+ typedef Vec4 FMatrixType;
+ typedef Vec3 XMatrixType;
+ Vec4 operator()(const Vec3 &x) const {
+ double x1 = x.x() - 2;
+ double y1 = x.y() - 5;
+ double z1 = x.z();
+ Vec4 fx; fx << x1*x1 + z1*z1,
+ y1*y1 + z1*z1,
+ z1*z1,
+ x1*x1;
+ return fx;
+ }
+};
+
+TEST(LevenbergMarquardt, SimpleCase) {
+ Vec3 x(0.76026643, -30.01799744, 0.55192142);
+ F f;
+ LevenbergMarquardt<F>::SolverParameters params;
+ LevenbergMarquardt<F> lm(f);
+ /* TODO(sergey): Better error handling. */
+ /* LevenbergMarquardt<F>::Results results = */ lm.minimize(params, &x);
+ Vec3 expected_min_x(2, 5, 0);
+
+ EXPECT_MATRIX_NEAR(expected_min_x, x, 1e-5);
+}
+
+} // namespace
diff --git a/intern/libmv/libmv/numeric/numeric.cc b/intern/libmv/libmv/numeric/numeric.cc
new file mode 100644
index 00000000000..9007663c8e2
--- /dev/null
+++ b/intern/libmv/libmv/numeric/numeric.cc
@@ -0,0 +1,136 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+
+#include "libmv/numeric/numeric.h"
+
+namespace libmv {
+
+Mat3 RotationAroundX(double angle) {
+ double c, s;
+ sincos(angle, &s, &c);
+ Mat3 R;
+ R << 1, 0, 0,
+ 0, c, -s,
+ 0, s, c;
+ return R;
+}
+
+Mat3 RotationAroundY(double angle) {
+ double c, s;
+ sincos(angle, &s, &c);
+ Mat3 R;
+ R << c, 0, s,
+ 0, 1, 0,
+ -s, 0, c;
+ return R;
+}
+
+Mat3 RotationAroundZ(double angle) {
+ double c, s;
+ sincos(angle, &s, &c);
+ Mat3 R;
+ R << c, -s, 0,
+ s, c, 0,
+ 0, 0, 1;
+ return R;
+}
+
+
+Mat3 RotationRodrigues(const Vec3 &axis) {
+ double theta = axis.norm();
+ Vec3 w = axis / theta;
+ Mat3 W = CrossProductMatrix(w);
+
+ return Mat3::Identity() + sin(theta) * W + (1 - cos(theta)) * W * W;
+}
+
+
+Mat3 LookAt(Vec3 center) {
+ Vec3 zc = center.normalized();
+ Vec3 xc = Vec3::UnitY().cross(zc).normalized();
+ Vec3 yc = zc.cross(xc);
+ Mat3 R;
+ R.row(0) = xc;
+ R.row(1) = yc;
+ R.row(2) = zc;
+ return R;
+}
+
+Mat3 CrossProductMatrix(const Vec3 &x) {
+ Mat3 X;
+ X << 0, -x(2), x(1),
+ x(2), 0, -x(0),
+ -x(1), x(0), 0;
+ return X;
+}
+
+void MeanAndVarianceAlongRows(const Mat &A,
+ Vec *mean_pointer,
+ Vec *variance_pointer) {
+ Vec &mean = *mean_pointer;
+ Vec &variance = *variance_pointer;
+ int n = A.rows();
+ int m = A.cols();
+ mean.resize(n);
+ variance.resize(n);
+
+ for (int i = 0; i < n; ++i) {
+ mean(i) = 0;
+ variance(i) = 0;
+ for (int j = 0; j < m; ++j) {
+ double x = A(i, j);
+ mean(i) += x;
+ variance(i) += x * x;
+ }
+ }
+
+ mean /= m;
+ for (int i = 0; i < n; ++i) {
+ variance(i) = variance(i) / m - Square(mean(i));
+ }
+}
+
+void HorizontalStack(const Mat &left, const Mat &right, Mat *stacked) {
+ assert(left.rows() == left.rows());
+ int n = left.rows();
+ int m1 = left.cols();
+ int m2 = right.cols();
+
+ stacked->resize(n, m1 + m2);
+ stacked->block(0, 0, n, m1) = left;
+ stacked->block(0, m1, n, m2) = right;
+}
+
+void MatrixColumn(const Mat &A, int i, Vec2 *v) {
+ assert(A.rows() == 2);
+ *v << A(0, i), A(1, i);
+}
+void MatrixColumn(const Mat &A, int i, Vec3 *v) {
+ assert(A.rows() == 3);
+ *v << A(0, i), A(1, i), A(2, i);
+}
+void MatrixColumn(const Mat &A, int i, Vec4 *v) {
+ assert(A.rows() == 4);
+ *v << A(0, i), A(1, i), A(2, i), A(3, i);
+}
+
+} // namespace libmv
+
diff --git a/intern/libmv/libmv/numeric/numeric.h b/intern/libmv/libmv/numeric/numeric.h
new file mode 100644
index 00000000000..55d4c7d4651
--- /dev/null
+++ b/intern/libmv/libmv/numeric/numeric.h
@@ -0,0 +1,502 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+//
+// Matrix and vector classes, based on Eigen2.
+//
+// Avoid using Eigen2 classes directly; instead typedef them here.
+
+#ifndef LIBMV_NUMERIC_NUMERIC_H
+#define LIBMV_NUMERIC_NUMERIC_H
+
+#include <Eigen/Cholesky>
+#include <Eigen/Core>
+#include <Eigen/Eigenvalues>
+#include <Eigen/Geometry>
+#include <Eigen/LU>
+#include <Eigen/QR>
+#include <Eigen/SVD>
+
+#if !defined(__MINGW64__)
+# if defined(_WIN32) || defined(__APPLE__) || \
+ defined(__FreeBSD__) || defined(__NetBSD__)
+static void sincos(double x, double *sinx, double *cosx) {
+ *sinx = sin(x);
+ *cosx = cos(x);
+}
+# endif
+#endif // !__MINGW64__
+
+#if (defined(WIN32) || defined(WIN64)) && !defined(__MINGW32__)
+inline long lround(double d) {
+ return (long)(d>0 ? d+0.5 : ceil(d-0.5));
+}
+# if _MSC_VER < 1800
+inline int round(double d) {
+ return (d>0) ? int(d+0.5) : int(d-0.5);
+}
+# endif // _MSC_VER < 1800
+typedef unsigned int uint;
+#endif // _WIN32
+
+namespace libmv {
+
+typedef Eigen::MatrixXd Mat;
+typedef Eigen::VectorXd Vec;
+
+typedef Eigen::MatrixXf Matf;
+typedef Eigen::VectorXf Vecf;
+
+typedef Eigen::Matrix<unsigned int, Eigen::Dynamic, Eigen::Dynamic> Matu;
+typedef Eigen::Matrix<unsigned int, Eigen::Dynamic, 1> Vecu;
+typedef Eigen::Matrix<unsigned int, 2, 1> Vec2u;
+
+typedef Eigen::Matrix<double, 2, 2> Mat2;
+typedef Eigen::Matrix<double, 2, 3> Mat23;
+typedef Eigen::Matrix<double, 3, 3> Mat3;
+typedef Eigen::Matrix<double, 3, 4> Mat34;
+typedef Eigen::Matrix<double, 3, 5> Mat35;
+typedef Eigen::Matrix<double, 4, 1> Mat41;
+typedef Eigen::Matrix<double, 4, 3> Mat43;
+typedef Eigen::Matrix<double, 4, 4> Mat4;
+typedef Eigen::Matrix<double, 4, 6> Mat46;
+typedef Eigen::Matrix<float, 2, 2> Mat2f;
+typedef Eigen::Matrix<float, 2, 3> Mat23f;
+typedef Eigen::Matrix<float, 3, 3> Mat3f;
+typedef Eigen::Matrix<float, 3, 4> Mat34f;
+typedef Eigen::Matrix<float, 3, 5> Mat35f;
+typedef Eigen::Matrix<float, 4, 3> Mat43f;
+typedef Eigen::Matrix<float, 4, 4> Mat4f;
+typedef Eigen::Matrix<float, 4, 6> Mat46f;
+
+typedef Eigen::Matrix<double, 3, 3, Eigen::RowMajor> RMat3;
+typedef Eigen::Matrix<double, 4, 4, Eigen::RowMajor> RMat4;
+
+typedef Eigen::Matrix<double, 2, Eigen::Dynamic> Mat2X;
+typedef Eigen::Matrix<double, 3, Eigen::Dynamic> Mat3X;
+typedef Eigen::Matrix<double, 4, Eigen::Dynamic> Mat4X;
+typedef Eigen::Matrix<double, Eigen::Dynamic, 2> MatX2;
+typedef Eigen::Matrix<double, Eigen::Dynamic, 3> MatX3;
+typedef Eigen::Matrix<double, Eigen::Dynamic, 4> MatX4;
+typedef Eigen::Matrix<double, Eigen::Dynamic, 5> MatX5;
+typedef Eigen::Matrix<double, Eigen::Dynamic, 6> MatX6;
+typedef Eigen::Matrix<double, Eigen::Dynamic, 7> MatX7;
+typedef Eigen::Matrix<double, Eigen::Dynamic, 8> MatX8;
+typedef Eigen::Matrix<double, Eigen::Dynamic, 9> MatX9;
+typedef Eigen::Matrix<double, Eigen::Dynamic, 15> MatX15;
+typedef Eigen::Matrix<double, Eigen::Dynamic, 16> MatX16;
+
+typedef Eigen::Vector2d Vec2;
+typedef Eigen::Vector3d Vec3;
+typedef Eigen::Vector4d Vec4;
+typedef Eigen::Matrix<double, 5, 1> Vec5;
+typedef Eigen::Matrix<double, 6, 1> Vec6;
+typedef Eigen::Matrix<double, 7, 1> Vec7;
+typedef Eigen::Matrix<double, 8, 1> Vec8;
+typedef Eigen::Matrix<double, 9, 1> Vec9;
+typedef Eigen::Matrix<double, 10, 1> Vec10;
+typedef Eigen::Matrix<double, 11, 1> Vec11;
+typedef Eigen::Matrix<double, 12, 1> Vec12;
+typedef Eigen::Matrix<double, 13, 1> Vec13;
+typedef Eigen::Matrix<double, 14, 1> Vec14;
+typedef Eigen::Matrix<double, 15, 1> Vec15;
+typedef Eigen::Matrix<double, 16, 1> Vec16;
+typedef Eigen::Matrix<double, 17, 1> Vec17;
+typedef Eigen::Matrix<double, 18, 1> Vec18;
+typedef Eigen::Matrix<double, 19, 1> Vec19;
+typedef Eigen::Matrix<double, 20, 1> Vec20;
+
+typedef Eigen::Vector2f Vec2f;
+typedef Eigen::Vector3f Vec3f;
+typedef Eigen::Vector4f Vec4f;
+
+typedef Eigen::VectorXi VecXi;
+
+typedef Eigen::Vector2i Vec2i;
+typedef Eigen::Vector3i Vec3i;
+typedef Eigen::Vector4i Vec4i;
+
+typedef Eigen::Matrix<float,
+ Eigen::Dynamic,
+ Eigen::Dynamic,
+ Eigen::RowMajor> RMatf;
+
+typedef Eigen::NumTraits<double> EigenDouble;
+
+using Eigen::Map;
+using Eigen::Dynamic;
+using Eigen::Matrix;
+
+// Find U, s, and VT such that
+//
+// A = U * diag(s) * VT
+//
+template <typename TMat, typename TVec>
+inline void SVD(TMat *A, Vec *s, Mat *U, Mat *VT) {
+ assert(0);
+}
+
+// Solve the linear system Ax = 0 via SVD. Store the solution in x, such that
+// ||x|| = 1.0. Return the singluar value corresponding to the solution.
+// Destroys A and resizes x if necessary.
+// TODO(maclean): Take the SVD of the transpose instead of this zero padding.
+template <typename TMat, typename TVec>
+double Nullspace(TMat *A, TVec *nullspace) {
+ Eigen::JacobiSVD<TMat> svd(*A, Eigen::ComputeFullV);
+ (*nullspace) = svd.matrixV().col(A->cols()-1);
+ if (A->rows() >= A->cols())
+ return svd.singularValues()(A->cols()-1);
+ else
+ return 0.0;
+}
+
+// Solve the linear system Ax = 0 via SVD. Finds two solutions, x1 and x2, such
+// that x1 is the best solution and x2 is the next best solution (in the L2
+// norm sense). Store the solution in x1 and x2, such that ||x|| = 1.0. Return
+// the singluar value corresponding to the solution x1. Destroys A and resizes
+// x if necessary.
+template <typename TMat, typename TVec1, typename TVec2>
+double Nullspace2(TMat *A, TVec1 *x1, TVec2 *x2) {
+ Eigen::JacobiSVD<TMat> svd(*A, Eigen::ComputeFullV);
+ *x1 = svd.matrixV().col(A->cols() - 1);
+ *x2 = svd.matrixV().col(A->cols() - 2);
+ if (A->rows() >= A->cols())
+ return svd.singularValues()(A->cols()-1);
+ else
+ return 0.0;
+}
+
+// In place transpose for square matrices.
+template<class TA>
+inline void TransposeInPlace(TA *A) {
+ *A = A->transpose().eval();
+}
+
+template<typename TVec>
+inline double NormL1(const TVec &x) {
+ return x.array().abs().sum();
+}
+
+template<typename TVec>
+inline double NormL2(const TVec &x) {
+ return x.norm();
+}
+
+template<typename TVec>
+inline double NormLInfinity(const TVec &x) {
+ return x.array().abs().maxCoeff();
+}
+
+template<typename TVec>
+inline double DistanceL1(const TVec &x, const TVec &y) {
+ return (x - y).array().abs().sum();
+}
+
+template<typename TVec>
+inline double DistanceL2(const TVec &x, const TVec &y) {
+ return (x - y).norm();
+}
+template<typename TVec>
+inline double DistanceLInfinity(const TVec &x, const TVec &y) {
+ return (x - y).array().abs().maxCoeff();
+}
+
+// Normalize a vector with the L1 norm, and return the norm before it was
+// normalized.
+template<typename TVec>
+inline double NormalizeL1(TVec *x) {
+ double norm = NormL1(*x);
+ *x /= norm;
+ return norm;
+}
+
+// Normalize a vector with the L2 norm, and return the norm before it was
+// normalized.
+template<typename TVec>
+inline double NormalizeL2(TVec *x) {
+ double norm = NormL2(*x);
+ *x /= norm;
+ return norm;
+}
+
+// Normalize a vector with the L^Infinity norm, and return the norm before it
+// was normalized.
+template<typename TVec>
+inline double NormalizeLInfinity(TVec *x) {
+ double norm = NormLInfinity(*x);
+ *x /= norm;
+ return norm;
+}
+
+// Return the square of a number.
+template<typename T>
+inline T Square(T x) {
+ return x * x;
+}
+
+Mat3 RotationAroundX(double angle);
+Mat3 RotationAroundY(double angle);
+Mat3 RotationAroundZ(double angle);
+
+// Returns the rotation matrix of a rotation of angle |axis| around axis.
+// This is computed using the Rodrigues formula, see:
+// http://mathworld.wolfram.com/RodriguesRotationFormula.html
+Mat3 RotationRodrigues(const Vec3 &axis);
+
+// Make a rotation matrix such that center becomes the direction of the
+// positive z-axis, and y is oriented close to up.
+Mat3 LookAt(Vec3 center);
+
+// Return a diagonal matrix from a vector containg the diagonal values.
+template <typename TVec>
+inline Mat Diag(const TVec &x) {
+ return x.asDiagonal();
+}
+
+template<typename TMat>
+inline double FrobeniusNorm(const TMat &A) {
+ return sqrt(A.array().abs2().sum());
+}
+
+template<typename TMat>
+inline double FrobeniusDistance(const TMat &A, const TMat &B) {
+ return FrobeniusNorm(A - B);
+}
+
+inline Vec3 CrossProduct(const Vec3 &x, const Vec3 &y) {
+ return x.cross(y);
+}
+
+Mat3 CrossProductMatrix(const Vec3 &x);
+
+void MeanAndVarianceAlongRows(const Mat &A,
+ Vec *mean_pointer,
+ Vec *variance_pointer);
+
+#if _WIN32
+ // TODO(bomboze): un-#if this for both platforms once tested under Windows
+ /* This solution was extensively discussed here
+ http://forum.kde.org/viewtopic.php?f=74&t=61940 */
+ #define SUM_OR_DYNAMIC(x, y) (x == Eigen::Dynamic || y == Eigen::Dynamic) ? Eigen::Dynamic : (x+y)
+
+ template<typename Derived1, typename Derived2>
+ struct hstack_return {
+ typedef typename Derived1::Scalar Scalar;
+ enum {
+ RowsAtCompileTime = Derived1::RowsAtCompileTime,
+ ColsAtCompileTime = SUM_OR_DYNAMIC(Derived1::ColsAtCompileTime,
+ Derived2::ColsAtCompileTime),
+ Options = Derived1::Flags&Eigen::RowMajorBit ? Eigen::RowMajor : 0,
+ MaxRowsAtCompileTime = Derived1::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = SUM_OR_DYNAMIC(Derived1::MaxColsAtCompileTime,
+ Derived2::MaxColsAtCompileTime)
+ };
+ typedef Eigen::Matrix<Scalar,
+ RowsAtCompileTime,
+ ColsAtCompileTime,
+ Options,
+ MaxRowsAtCompileTime,
+ MaxColsAtCompileTime> type;
+ };
+
+ template<typename Derived1, typename Derived2>
+ typename hstack_return<Derived1, Derived2>::type
+ HStack(const Eigen::MatrixBase<Derived1>& lhs,
+ const Eigen::MatrixBase<Derived2>& rhs) {
+ typename hstack_return<Derived1, Derived2>::type res;
+ res.resize(lhs.rows(), lhs.cols()+rhs.cols());
+ res << lhs, rhs;
+ return res;
+ };
+
+
+ template<typename Derived1, typename Derived2>
+ struct vstack_return {
+ typedef typename Derived1::Scalar Scalar;
+ enum {
+ RowsAtCompileTime = SUM_OR_DYNAMIC(Derived1::RowsAtCompileTime,
+ Derived2::RowsAtCompileTime),
+ ColsAtCompileTime = Derived1::ColsAtCompileTime,
+ Options = Derived1::Flags&Eigen::RowMajorBit ? Eigen::RowMajor : 0,
+ MaxRowsAtCompileTime = SUM_OR_DYNAMIC(Derived1::MaxRowsAtCompileTime,
+ Derived2::MaxRowsAtCompileTime),
+ MaxColsAtCompileTime = Derived1::MaxColsAtCompileTime
+ };
+ typedef Eigen::Matrix<Scalar,
+ RowsAtCompileTime,
+ ColsAtCompileTime,
+ Options,
+ MaxRowsAtCompileTime,
+ MaxColsAtCompileTime> type;
+ };
+
+ template<typename Derived1, typename Derived2>
+ typename vstack_return<Derived1, Derived2>::type
+ VStack(const Eigen::MatrixBase<Derived1>& lhs,
+ const Eigen::MatrixBase<Derived2>& rhs) {
+ typename vstack_return<Derived1, Derived2>::type res;
+ res.resize(lhs.rows()+rhs.rows(), lhs.cols());
+ res << lhs, rhs;
+ return res;
+ };
+
+
+#else // _WIN32
+
+ // Since it is not possible to typedef privately here, use a macro.
+ // Always take dynamic columns if either side is dynamic.
+ #define COLS \
+ ((ColsLeft == Eigen::Dynamic || ColsRight == Eigen::Dynamic) \
+ ? Eigen::Dynamic : (ColsLeft + ColsRight))
+
+ // Same as above, except that prefer fixed size if either is fixed.
+ #define ROWS \
+ ((RowsLeft == Eigen::Dynamic && RowsRight == Eigen::Dynamic) \
+ ? Eigen::Dynamic \
+ : ((RowsLeft == Eigen::Dynamic) \
+ ? RowsRight \
+ : RowsLeft \
+ ) \
+ )
+
+ // TODO(keir): Add a static assert if both rows are at compiletime.
+ template<typename T, int RowsLeft, int RowsRight, int ColsLeft, int ColsRight>
+ Eigen::Matrix<T, ROWS, COLS>
+ HStack(const Eigen::Matrix<T, RowsLeft, ColsLeft> &left,
+ const Eigen::Matrix<T, RowsRight, ColsRight> &right) {
+ assert(left.rows() == right.rows());
+ int n = left.rows();
+ int m1 = left.cols();
+ int m2 = right.cols();
+
+ Eigen::Matrix<T, ROWS, COLS> stacked(n, m1 + m2);
+ stacked.block(0, 0, n, m1) = left;
+ stacked.block(0, m1, n, m2) = right;
+ return stacked;
+ }
+
+ // Reuse the above macros by swapping the order of Rows and Cols. Nasty, but
+ // the duplication is worse.
+ // TODO(keir): Add a static assert if both rows are at compiletime.
+ // TODO(keir): Mail eigen list about making this work for general expressions
+ // rather than only matrix types.
+ template<typename T, int RowsLeft, int RowsRight, int ColsLeft, int ColsRight>
+ Eigen::Matrix<T, COLS, ROWS>
+ VStack(const Eigen::Matrix<T, ColsLeft, RowsLeft> &top,
+ const Eigen::Matrix<T, ColsRight, RowsRight> &bottom) {
+ assert(top.cols() == bottom.cols());
+ int n1 = top.rows();
+ int n2 = bottom.rows();
+ int m = top.cols();
+
+ Eigen::Matrix<T, COLS, ROWS> stacked(n1 + n2, m);
+ stacked.block(0, 0, n1, m) = top;
+ stacked.block(n1, 0, n2, m) = bottom;
+ return stacked;
+ }
+ #undef COLS
+ #undef ROWS
+#endif // _WIN32
+
+
+
+void HorizontalStack(const Mat &left, const Mat &right, Mat *stacked);
+
+template<typename TTop, typename TBot, typename TStacked>
+void VerticalStack(const TTop &top, const TBot &bottom, TStacked *stacked) {
+ assert(top.cols() == bottom.cols());
+ int n1 = top.rows();
+ int n2 = bottom.rows();
+ int m = top.cols();
+
+ stacked->resize(n1 + n2, m);
+ stacked->block(0, 0, n1, m) = top;
+ stacked->block(n1, 0, n2, m) = bottom;
+}
+
+void MatrixColumn(const Mat &A, int i, Vec2 *v);
+void MatrixColumn(const Mat &A, int i, Vec3 *v);
+void MatrixColumn(const Mat &A, int i, Vec4 *v);
+
+template <typename TMat, typename TCols>
+TMat ExtractColumns(const TMat &A, const TCols &columns) {
+ TMat compressed(A.rows(), columns.size());
+ for (int i = 0; i < columns.size(); ++i) {
+ compressed.col(i) = A.col(columns[i]);
+ }
+ return compressed;
+}
+
+template <typename TMat, typename TDest>
+void reshape(const TMat &a, int rows, int cols, TDest *b) {
+ assert(a.rows()*a.cols() == rows*cols);
+ b->resize(rows, cols);
+ for (int i = 0; i < rows; i++) {
+ for (int j = 0; j < cols; j++) {
+ (*b)(i, j) = a[cols*i + j];
+ }
+ }
+}
+
+inline bool isnan(double i) {
+#ifdef WIN32
+ return _isnan(i) > 0;
+#else
+ return std::isnan(i);
+#endif
+}
+
+/// Ceil function that has the same behaviour for positive
+/// and negative values
+template <typename FloatType>
+FloatType ceil0(const FloatType& value) {
+ FloatType result = std::ceil(std::fabs(value));
+ return (value < 0.0) ? -result : result;
+}
+
+/// Returns the skew anti-symmetric matrix of a vector
+inline Mat3 SkewMat(const Vec3 &x) {
+ Mat3 skew;
+ skew << 0 , -x(2), x(1),
+ x(2), 0 , -x(0),
+ -x(1), x(0), 0;
+ return skew;
+}
+/// Returns the skew anti-symmetric matrix of a vector with only
+/// the first two (independent) lines
+inline Mat23 SkewMatMinimal(const Vec2 &x) {
+ Mat23 skew;
+ skew << 0, -1, x(1),
+ 1, 0, -x(0);
+ return skew;
+}
+
+/// Returns the rotaiton matrix built from given vector of euler angles
+inline Mat3 RotationFromEulerVector(Vec3 euler_vector) {
+ double theta = euler_vector.norm();
+ if (theta == 0.0) {
+ return Mat3::Identity();
+ }
+ Vec3 w = euler_vector / theta;
+ Mat3 w_hat = CrossProductMatrix(w);
+ return Mat3::Identity() + w_hat*sin(theta) + w_hat*w_hat*(1 - cos(theta));
+}
+} // namespace libmv
+
+#endif // LIBMV_NUMERIC_NUMERIC_H
diff --git a/intern/libmv/libmv/numeric/numeric_test.cc b/intern/libmv/libmv/numeric/numeric_test.cc
new file mode 100644
index 00000000000..0cdfaf33ab2
--- /dev/null
+++ b/intern/libmv/libmv/numeric/numeric_test.cc
@@ -0,0 +1,439 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/numeric/numeric.h"
+#include "testing/testing.h"
+
+using namespace libmv;
+
+namespace {
+
+TEST(Numeric, DynamicSizedNullspace) {
+ Mat A(3, 4);
+ A << 0.76026643, 0.01799744, 0.55192142, 0.8699745,
+ 0.42016166, 0.97863392, 0.33711682, 0.14479271,
+ 0.51016811, 0.66528302, 0.54395496, 0.57794893;
+ Vec x;
+ double s = Nullspace(&A, &x);
+ EXPECT_NEAR(0.0, s, 1e-15);
+ EXPECT_NEAR(0.0, (A * x).norm(), 1e-15);
+ EXPECT_NEAR(1.0, x.norm(), 1e-15);
+}
+
+TEST(Numeric, FixedSizeMatrixNullspace) {
+ Mat34 A;
+ A << 0.76026643, 0.01799744, 0.55192142, 0.8699745,
+ 0.42016166, 0.97863392, 0.33711682, 0.14479271,
+ 0.51016811, 0.66528302, 0.54395496, 0.57794893;
+ Vec x;
+ double s = Nullspace(&A, &x);
+ EXPECT_NEAR(0.0, s, 1e-15);
+ EXPECT_NEAR(0.0, (A * x).norm(), 1e-15);
+ EXPECT_NEAR(1.0, x.norm(), 1e-15);
+}
+
+TEST(Numeric, NullspaceMatchesLapackSVD) {
+ Mat43 A;
+ A << 0.76026643, 0.01799744, 0.55192142,
+ 0.8699745, 0.42016166, 0.97863392,
+ 0.33711682, 0.14479271, 0.51016811,
+ 0.66528302, 0.54395496, 0.57794893;
+ Vec x;
+ double s = Nullspace(&A, &x);
+ EXPECT_NEAR(1.0, x.norm(), 1e-15);
+ EXPECT_NEAR(0.206694992663, s, 1e-9);
+ EXPECT_NEAR(0.206694992663, (A * x).norm(), 1e-9);
+
+ EXPECT_NEAR(-0.64999717, x(0), 1e-8);
+ EXPECT_NEAR(-0.18452646, x(1), 1e-8);
+ EXPECT_NEAR(0.7371931, x(2), 1e-8);
+}
+
+TEST(Numeric, Nullspace2) {
+ Mat43 A;
+ A << 0.76026643, 0.01799744, 0.55192142,
+ 0.8699745, 0.42016166, 0.97863392,
+ 0.33711682, 0.14479271, 0.51016811,
+ 0.66528302, 0.54395496, 0.57794893;
+ Vec3 x1, x2;
+ double s = Nullspace2(&A, &x1, &x2);
+ EXPECT_NEAR(1.0, x1.norm(), 1e-15);
+ EXPECT_NEAR(0.206694992663, s, 1e-9);
+ EXPECT_NEAR(0.206694992663, (A * x1).norm(), 1e-9);
+
+ EXPECT_NEAR(-0.64999717, x1(0), 1e-8);
+ EXPECT_NEAR(-0.18452646, x1(1), 1e-8);
+ EXPECT_NEAR( 0.7371931, x1(2), 1e-8);
+
+ if (x2(0) < 0) {
+ x2 *= -1;
+ }
+ EXPECT_NEAR( 0.34679618, x2(0), 1e-8);
+ EXPECT_NEAR(-0.93519689, x2(1), 1e-8);
+ EXPECT_NEAR( 0.07168809, x2(2), 1e-8);
+}
+
+TEST(Numeric, TinyMatrixSquareTranspose) {
+ Mat2 A;
+ A << 1.0, 2.0, 3.0, 4.0;
+ libmv::TransposeInPlace(&A);
+ EXPECT_EQ(1.0, A(0, 0));
+ EXPECT_EQ(3.0, A(0, 1));
+ EXPECT_EQ(2.0, A(1, 0));
+ EXPECT_EQ(4.0, A(1, 1));
+}
+
+TEST(Numeric, NormalizeL1) {
+ Vec2 x;
+ x << 1, 2;
+ double l1 = NormalizeL1(&x);
+ EXPECT_DOUBLE_EQ(3., l1);
+ EXPECT_DOUBLE_EQ(1./3., x(0));
+ EXPECT_DOUBLE_EQ(2./3., x(1));
+}
+
+TEST(Numeric, NormalizeL2) {
+ Vec2 x;
+ x << 1, 2;
+ double l2 = NormalizeL2(&x);
+ EXPECT_DOUBLE_EQ(sqrt(5.0), l2);
+ EXPECT_DOUBLE_EQ(1./sqrt(5.), x(0));
+ EXPECT_DOUBLE_EQ(2./sqrt(5.), x(1));
+}
+
+TEST(Numeric, Diag) {
+ Vec x(2);
+ x << 1, 2;
+ Mat D = Diag(x);
+ EXPECT_EQ(1, D(0, 0));
+ EXPECT_EQ(0, D(0, 1));
+ EXPECT_EQ(0, D(1, 0));
+ EXPECT_EQ(2, D(1, 1));
+}
+
+TEST(Numeric, Determinant) {
+ Mat A(2, 2);
+ A << 1, 2,
+ -1, 3;
+ double detA = A.determinant();
+ EXPECT_NEAR(5, detA, 1e-8);
+
+ Mat B(4, 4);
+ B << 0, 1, 2, 3,
+ 4, 5, 6, 7,
+ 8, 9, 10, 11,
+ 12, 13, 14, 15;
+ double detB = B.determinant();
+ EXPECT_NEAR(0, detB, 1e-8);
+
+ Mat3 C;
+ C << 0, 1, 2,
+ 3, 4, 5,
+ 6, 7, 1;
+ double detC = C.determinant();
+ EXPECT_NEAR(21, detC, 1e-8);
+}
+
+TEST(Numeric, Inverse) {
+ Mat A(2, 2), A1;
+ A << 1, 2,
+ -1, 3;
+ Mat I = A * A.inverse();
+
+ EXPECT_NEAR(1, I(0, 0), 1e-8);
+ EXPECT_NEAR(0, I(0, 1), 1e-8);
+ EXPECT_NEAR(0, I(1, 0), 1e-8);
+ EXPECT_NEAR(1, I(1, 1), 1e-8);
+
+ Mat B(4, 4), B1;
+ B << 0, 1, 2, 3,
+ 4, 5, 6, 7,
+ 8, 9, 2, 11,
+ 12, 13, 14, 4;
+ Mat I2 = B * B.inverse();
+ EXPECT_NEAR(1, I2(0, 0), 1e-8);
+ EXPECT_NEAR(0, I2(0, 1), 1e-8);
+ EXPECT_NEAR(0, I2(0, 2), 1e-8);
+ EXPECT_NEAR(0, I2(1, 0), 1e-8);
+ EXPECT_NEAR(1, I2(1, 1), 1e-8);
+ EXPECT_NEAR(0, I2(1, 2), 1e-8);
+ EXPECT_NEAR(0, I2(2, 0), 1e-8);
+ EXPECT_NEAR(0, I2(2, 1), 1e-8);
+ EXPECT_NEAR(1, I2(2, 2), 1e-8);
+}
+
+TEST(Numeric, MeanAndVarianceAlongRows) {
+ int n = 4;
+ Mat points(2, n);
+ points << 0, 0, 1, 1,
+ 0, 2, 1, 3;
+
+ Vec mean, variance;
+ MeanAndVarianceAlongRows(points, &mean, &variance);
+
+ EXPECT_NEAR(0.5, mean(0), 1e-8);
+ EXPECT_NEAR(1.5, mean(1), 1e-8);
+ EXPECT_NEAR(0.25, variance(0), 1e-8);
+ EXPECT_NEAR(1.25, variance(1), 1e-8);
+}
+
+TEST(Numeric, HorizontalStack) {
+ Mat x(2, 1), y(2, 1), z;
+ x << 1, 2;
+ y << 3, 4;
+
+ HorizontalStack(x, y, &z);
+
+ EXPECT_EQ(2, z.cols());
+ EXPECT_EQ(2, z.rows());
+ EXPECT_EQ(1, z(0, 0));
+ EXPECT_EQ(2, z(1, 0));
+ EXPECT_EQ(3, z(0, 1));
+ EXPECT_EQ(4, z(1, 1));
+}
+
+TEST(Numeric, HStack) {
+ Mat x(2, 1), y(2, 1), z(2, 2);
+ x << 1, 2;
+ y << 3, 4;
+ z << 1, 3,
+ 2, 4;
+ Vec2 xC = x, yC = y;
+
+ Mat2 xy = HStack(x, y);
+ EXPECT_MATRIX_EQ(z, xy);
+
+ EXPECT_MATRIX_EQ(z, HStack(x, y));
+ EXPECT_MATRIX_EQ(z, HStack(x, yC));
+ EXPECT_MATRIX_EQ(z, HStack(xC, y));
+ EXPECT_MATRIX_EQ(z, HStack(xC, yC));
+}
+
+// TODO(keir): Need some way of verifying that the compile time types of the
+// resulting stacked matrices properly propagate the fixed dimensions.
+TEST(Numeric, VStack) {
+ Mat x(2, 2), y(2, 2), z(4, 2);
+ x << 1, 2,
+ 3, 4;
+ y << 10, 20,
+ 30, 40;
+ z << 1, 2,
+ 3, 4,
+ 10, 20,
+ 30, 40;
+ Mat2 xC = x, yC = y;
+
+ Mat xy = VStack(x, y);
+ EXPECT_MATRIX_EQ(z, xy);
+
+ EXPECT_MATRIX_EQ(z, VStack(x, y));
+ EXPECT_MATRIX_EQ(z, VStack(x, yC));
+ EXPECT_MATRIX_EQ(z, VStack(xC, y));
+ EXPECT_MATRIX_EQ(z, VStack(xC, yC));
+}
+
+TEST(Numeric, VerticalStack) {
+ Mat x(1, 2), y(1, 2), z;
+ x << 1, 2;
+ y << 3, 4;
+ VerticalStack(x, y, &z);
+
+ EXPECT_EQ(2, z.cols());
+ EXPECT_EQ(2, z.rows());
+ EXPECT_EQ(1, z(0, 0));
+ EXPECT_EQ(2, z(0, 1));
+ EXPECT_EQ(3, z(1, 0));
+ EXPECT_EQ(4, z(1, 1));
+}
+
+TEST(Numeric, CrossProduct) {
+ Vec3 x, y, z;
+ x << 1, 0, 0;
+ y << 0, 1, 0;
+ z << 0, 0, 1;
+ Vec3 xy = CrossProduct(x, y);
+ Vec3 yz = CrossProduct(y, z);
+ Vec3 zx = CrossProduct(z, x);
+ EXPECT_NEAR(0, DistanceLInfinity(xy, z), 1e-8);
+ EXPECT_NEAR(0, DistanceLInfinity(yz, x), 1e-8);
+ EXPECT_NEAR(0, DistanceLInfinity(zx, y), 1e-8);
+}
+
+TEST(Numeric, CrossProductMatrix) {
+ Vec3 x, y;
+ x << 1, 2, 3;
+ y << 2, 3, 4;
+ Vec3 xy = CrossProduct(x, y);
+ Vec3 yx = CrossProduct(y, x);
+ Mat3 X = CrossProductMatrix(x);
+ Vec3 Xy, Xty;
+ Xy = X * y;
+ Xty = X.transpose() * y;
+ EXPECT_NEAR(0, DistanceLInfinity(xy, Xy), 1e-8);
+ EXPECT_NEAR(0, DistanceLInfinity(yx, Xty), 1e-8);
+}
+
+TEST(Numeric, MatrixColumn) {
+ Mat A2(2, 3);
+ Vec2 v2;
+ A2 << 1, 2, 3,
+ 4, 5, 6;
+ MatrixColumn(A2, 1, &v2);
+ EXPECT_EQ(2, v2(0));
+ EXPECT_EQ(5, v2(1));
+
+ Mat A3(3, 3);
+ Vec3 v3;
+ A3 << 1, 2, 3,
+ 4, 5, 6,
+ 7, 8, 9;
+ MatrixColumn(A3, 1, &v3);
+ EXPECT_EQ(2, v3(0));
+ EXPECT_EQ(5, v3(1));
+ EXPECT_EQ(8, v3(2));
+
+ Mat A4(4, 3);
+ Vec4 v4;
+ A4 << 1, 2, 3,
+ 4, 5, 6,
+ 7, 8, 9,
+ 10, 11, 12;
+ MatrixColumn(A4, 1, &v4);
+ EXPECT_EQ( 2, v4(0));
+ EXPECT_EQ( 5, v4(1));
+ EXPECT_EQ( 8, v4(2));
+ EXPECT_EQ(11, v4(3));
+}
+
+// This used to give a compile error with FLENS.
+TEST(Numeric, TinyMatrixView) {
+ Mat34 P;
+ Mat K = P.block(0, 0, 3, 3);
+}
+
+// This gives a compile error.
+TEST(Numeric, Mat3MatProduct) {
+ Mat3 A;
+ Mat3 B;
+ Mat C = A * B;
+}
+
+// This gives a compile error.
+TEST(Numeric, Vec3Negative) {
+ Vec3 y; y << 1, 2, 3;
+ Vec3 x = -y;
+ EXPECT_EQ(-1, x(0));
+ EXPECT_EQ(-2, x(1));
+ EXPECT_EQ(-3, x(2));
+}
+
+// This gives a compile error.
+TEST(Numeric, Vec3VecInteroperability) {
+ Vec y(3);
+ y << 1, 2, 3;
+ Vec3 x = y + y;
+ EXPECT_EQ(2, x(0));
+ EXPECT_EQ(4, x(1));
+ EXPECT_EQ(6, x(2));
+}
+
+// This segfaults inside lapack.
+TEST(Numeric, DeterminantLU7) {
+ Mat A(5, 5);
+ A << 1, 0, 0, 0, 0,
+ 0, 1, 0, 0, 0,
+ 0, 0, 1, 0, 0,
+ 0, 0, 0, 1, 0,
+ 0, 0, 0, 0, 1;
+ EXPECT_NEAR(1, A.determinant(), 1e-8);
+}
+
+// This segfaults inside lapack.
+TEST(Numeric, DeterminantLU) {
+ Mat A(2, 2);
+ A << 1, 2,
+ -1, 3;
+ EXPECT_NEAR(5, A.determinant(), 1e-8);
+}
+
+// This does unexpected things.
+// Keir: Not with eigen2!
+TEST(Numeric, InplaceProduct) {
+ Mat2 K, S;
+ K << 1, 0,
+ 0, 1;
+ S << 1, 0,
+ 0, 1;
+ K = K * S;
+ EXPECT_MATRIX_NEAR(Mat2::Identity(), K, 1e-8);
+}
+
+TEST(Numeric, ExtractColumns) {
+ Mat2X A(2, 5);
+ A << 1, 2, 3, 4, 5,
+ 6, 7, 8, 9, 10;
+ Vec2i columns; columns << 0, 2;
+ Mat2X extracted = ExtractColumns(A, columns);
+ EXPECT_NEAR(1, extracted(0, 0), 1e-15);
+ EXPECT_NEAR(3, extracted(0, 1), 1e-15);
+ EXPECT_NEAR(6, extracted(1, 0), 1e-15);
+ EXPECT_NEAR(8, extracted(1, 1), 1e-15);
+}
+
+TEST(Numeric, RotationRodrigues) {
+ Vec3 x, y, z;
+ x << 1, 0, 0;
+ y << 0, 1, 0;
+ z << 0, 0, 1;
+
+ Mat3 rodrigues_x = RotationRodrigues(x);
+ Mat3 rodrigues_y = RotationRodrigues(y);
+ Mat3 rodrigues_z = RotationRodrigues(z);
+
+ Mat3 Rx = RotationAroundX(1);
+ Mat3 Ry = RotationAroundY(1);
+ Mat3 Rz = RotationAroundZ(1);
+
+ EXPECT_MATRIX_NEAR(Rx, rodrigues_x, 1e-15);
+ EXPECT_MATRIX_NEAR(Ry, rodrigues_y, 1e-15);
+ EXPECT_MATRIX_NEAR(Rz, rodrigues_z, 1e-15);
+}
+
+TEST(Numeric, LookAt) {
+ // Simple orthogonality check.
+ Vec3 e; e << 1, 2, 3;
+ Mat3 R = LookAt(e), I = Mat3::Identity();
+ Mat3 RRT = R*R.transpose();
+ Mat3 RTR = R.transpose()*R;
+
+ EXPECT_MATRIX_NEAR(I, RRT, 1e-15);
+ EXPECT_MATRIX_NEAR(I, RTR, 1e-15);
+}
+
+TEST(Numeric, Reshape) {
+ Vec4 x; x << 1, 2, 3, 4;
+ Mat2 M, M_expected;
+ reshape(x, 2, 2, &M);
+ M_expected << 1, 2,
+ 3, 4;
+ EXPECT_MATRIX_NEAR(M_expected, M, 1e-15);
+}
+
+} // namespace
diff --git a/intern/libmv/libmv/numeric/poly.cc b/intern/libmv/libmv/numeric/poly.cc
new file mode 100644
index 00000000000..376403616c3
--- /dev/null
+++ b/intern/libmv/libmv/numeric/poly.cc
@@ -0,0 +1,23 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+//
+// Routines for solving polynomials.
+
+// TODO(keir): Add a solver for degree > 3 polynomials.
diff --git a/intern/libmv/libmv/numeric/poly.h b/intern/libmv/libmv/numeric/poly.h
new file mode 100644
index 00000000000..76ba062d475
--- /dev/null
+++ b/intern/libmv/libmv/numeric/poly.h
@@ -0,0 +1,123 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_NUMERIC_POLY_H_
+#define LIBMV_NUMERIC_POLY_H_
+
+#include <cmath>
+#include <stdio.h>
+
+namespace libmv {
+
+// Solve the cubic polynomial
+//
+// x^3 + a*x^2 + b*x + c = 0
+//
+// The number of roots (from zero to three) is returned. If the number of roots
+// is less than three, then higher numbered x's are not changed. For example,
+// if there are 2 roots, only x0 and x1 are set.
+//
+// The GSL cubic solver was used as a reference for this routine.
+template<typename Real>
+int SolveCubicPolynomial(Real a, Real b, Real c,
+ Real *x0, Real *x1, Real *x2) {
+ Real q = a * a - 3 * b;
+ Real r = 2 * a * a * a - 9 * a * b + 27 * c;
+
+ Real Q = q / 9;
+ Real R = r / 54;
+
+ Real Q3 = Q * Q * Q;
+ Real R2 = R * R;
+
+ Real CR2 = 729 * r * r;
+ Real CQ3 = 2916 * q * q * q;
+
+ if (R == 0 && Q == 0) {
+ // Tripple root in one place.
+ *x0 = *x1 = *x2 = -a / 3;
+ return 3;
+
+ } else if (CR2 == CQ3) {
+ // This test is actually R2 == Q3, written in a form suitable for exact
+ // computation with integers.
+ //
+ // Due to finite precision some double roots may be missed, and considered
+ // to be a pair of complex roots z = x +/- epsilon i close to the real
+ // axis.
+ Real sqrtQ = sqrt(Q);
+ if (R > 0) {
+ *x0 = -2 * sqrtQ - a / 3;
+ *x1 = sqrtQ - a / 3;
+ *x2 = sqrtQ - a / 3;
+ } else {
+ *x0 = -sqrtQ - a / 3;
+ *x1 = -sqrtQ - a / 3;
+ *x2 = 2 * sqrtQ - a / 3;
+ }
+ return 3;
+
+ } else if (CR2 < CQ3) {
+ // This case is equivalent to R2 < Q3.
+ Real sqrtQ = sqrt(Q);
+ Real sqrtQ3 = sqrtQ * sqrtQ * sqrtQ;
+ Real theta = acos(R / sqrtQ3);
+ Real norm = -2 * sqrtQ;
+ *x0 = norm * cos(theta / 3) - a / 3;
+ *x1 = norm * cos((theta + 2.0 * M_PI) / 3) - a / 3;
+ *x2 = norm * cos((theta - 2.0 * M_PI) / 3) - a / 3;
+
+ // Put the roots in ascending order.
+ if (*x0 > *x1) {
+ std::swap(*x0, *x1);
+ }
+ if (*x1 > *x2) {
+ std::swap(*x1, *x2);
+ if (*x0 > *x1) {
+ std::swap(*x0, *x1);
+ }
+ }
+ return 3;
+ }
+ Real sgnR = (R >= 0 ? 1 : -1);
+ Real A = -sgnR * pow(fabs(R) + sqrt(R2 - Q3), 1.0/3.0);
+ Real B = Q / A;
+ *x0 = A + B - a / 3;
+ return 1;
+}
+
+// The coefficients are in ascending powers, i.e. coeffs[N]*x^N.
+template<typename Real>
+int SolveCubicPolynomial(const Real *coeffs, Real *solutions) {
+ if (coeffs[0] == 0.0) {
+ // TODO(keir): This is a quadratic not a cubic. Implement a quadratic
+ // solver!
+ return 0;
+ }
+ Real a = coeffs[2] / coeffs[3];
+ Real b = coeffs[1] / coeffs[3];
+ Real c = coeffs[0] / coeffs[3];
+ return SolveCubicPolynomial(a, b, c,
+ solutions + 0,
+ solutions + 1,
+ solutions + 2);
+}
+} // namespace libmv
+#endif // LIBMV_NUMERIC_POLY_H_
diff --git a/intern/libmv/libmv/numeric/poly_test.cc b/intern/libmv/libmv/numeric/poly_test.cc
new file mode 100644
index 00000000000..ea50383190f
--- /dev/null
+++ b/intern/libmv/libmv/numeric/poly_test.cc
@@ -0,0 +1,98 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/numeric/numeric.h"
+#include "libmv/numeric/poly.h"
+#include "testing/testing.h"
+
+using namespace libmv;
+
+namespace {
+
+// Find the polynomial coefficients of x in the equation
+//
+// (x - a)(x - b)(x - c) == 0
+//
+// by expanding to
+//
+// x^3 - (c+b+a) * x^2 + (a*b+(b+a)*c) * x - a*b*c = 0.
+// = p = q = r
+void CoeffsForCubicZeros(double a, double b, double c,
+ double *p, double *q, double *r) {
+ *p = -(c + b + a);
+ *q = (a * b + (b + a) * c);
+ *r = -a * b * c;
+}
+// Find the polynomial coefficients of x in the equation
+//
+// (x - a)(x - b)(x - c)(x - d) == 0
+//
+// by expanding to
+//
+// x^4 - (d+c+b+a) * x^3 + (d*(c+b+a) + a*b+(b+a)*c) * x^2
+// - (d*(a*b+(b+a)*c)+a*b*c) * x + a*b*c*d = 0.
+void CoeffsForQuarticZeros(double a, double b, double c, double d,
+ double *p, double *q, double *r, double *s) {
+ *p = -(d + c + b + a);
+ *q = (d * (c + b + a) + a * b + (b + a) * c);
+ *r = -(d * (a * b + (b + a) * c) + a * b * c);
+ *s = a * b * c *d;
+}
+
+TEST(Poly, SolveCubicPolynomial) {
+ double a, b, c, aa, bb, cc;
+ double p, q, r;
+
+ a = 1; b = 2; c = 3;
+ CoeffsForCubicZeros(a, b, c, &p, &q, &r);
+ ASSERT_EQ(3, SolveCubicPolynomial(p, q, r, &aa, &bb, &cc));
+ EXPECT_NEAR(a, aa, 1e-10);
+ EXPECT_NEAR(b, bb, 1e-10);
+ EXPECT_NEAR(c, cc, 1e-10);
+
+ a = 0; b = 1; c = 3;
+ CoeffsForCubicZeros(a, b, c, &p, &q, &r);
+ ASSERT_EQ(3, SolveCubicPolynomial(p, q, r, &aa, &bb, &cc));
+ EXPECT_NEAR(a, aa, 1e-10);
+ EXPECT_NEAR(b, bb, 1e-10);
+ EXPECT_NEAR(c, cc, 1e-10);
+
+ a = -10; b = 0; c = 1;
+ CoeffsForCubicZeros(a, b, c, &p, &q, &r);
+ ASSERT_EQ(3, SolveCubicPolynomial(p, q, r, &aa, &bb, &cc));
+ EXPECT_NEAR(a, aa, 1e-10);
+ EXPECT_NEAR(b, bb, 1e-10);
+ EXPECT_NEAR(c, cc, 1e-10);
+
+ a = -8; b = 1; c = 3;
+ CoeffsForCubicZeros(a, b, c, &p, &q, &r);
+ ASSERT_EQ(3, SolveCubicPolynomial(p, q, r, &aa, &bb, &cc));
+ EXPECT_NEAR(a, aa, 1e-10);
+ EXPECT_NEAR(b, bb, 1e-10);
+ EXPECT_NEAR(c, cc, 1e-10);
+
+ a = 28; b = 28; c = 105;
+ CoeffsForCubicZeros(a, b, c, &p, &q, &r);
+ ASSERT_EQ(3, SolveCubicPolynomial(p, q, r, &aa, &bb, &cc));
+ EXPECT_NEAR(a, aa, 1e-10);
+ EXPECT_NEAR(b, bb, 1e-10);
+ EXPECT_NEAR(c, cc, 1e-10);
+}
+} // namespace
diff --git a/intern/libmv/libmv/simple_pipeline/bundle.cc b/intern/libmv/libmv/simple_pipeline/bundle.cc
new file mode 100644
index 00000000000..e61650fb371
--- /dev/null
+++ b/intern/libmv/libmv/simple_pipeline/bundle.cc
@@ -0,0 +1,658 @@
+// Copyright (c) 2011, 2012, 2013 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/simple_pipeline/bundle.h"
+
+#include <map>
+
+#include "ceres/ceres.h"
+#include "ceres/rotation.h"
+#include "libmv/base/vector.h"
+#include "libmv/logging/logging.h"
+#include "libmv/multiview/fundamental.h"
+#include "libmv/multiview/projection.h"
+#include "libmv/numeric/numeric.h"
+#include "libmv/simple_pipeline/camera_intrinsics.h"
+#include "libmv/simple_pipeline/reconstruction.h"
+#include "libmv/simple_pipeline/tracks.h"
+#include "libmv/simple_pipeline/distortion_models.h"
+
+#ifdef _OPENMP
+# include <omp.h>
+#endif
+
+namespace libmv {
+
+// The intrinsics need to get combined into a single parameter block; use these
+// enums to index instead of numeric constants.
+enum {
+ // Camera calibration values.
+ OFFSET_FOCAL_LENGTH,
+ OFFSET_PRINCIPAL_POINT_X,
+ OFFSET_PRINCIPAL_POINT_Y,
+
+ // Distortion model coefficients.
+ OFFSET_K1,
+ OFFSET_K2,
+ OFFSET_K3,
+ OFFSET_P1,
+ OFFSET_P2,
+
+ // Maximal possible offset.
+ OFFSET_MAX,
+};
+
+#define FIRST_DISTORTION_COEFFICIENT OFFSET_K1
+#define LAST_DISTORTION_COEFFICIENT OFFSET_P2
+#define NUM_DISTORTION_COEFFICIENTS \
+ (LAST_DISTORTION_COEFFICIENT - FIRST_DISTORTION_COEFFICIENT + 1)
+
+namespace {
+
+// Cost functor which computes reprojection error of 3D point X
+// on camera defined by angle-axis rotation and it's translation
+// (which are in the same block due to optimization reasons).
+//
+// This functor uses a radial distortion model.
+struct OpenCVReprojectionError {
+ OpenCVReprojectionError(const DistortionModelType distortion_model,
+ const double observed_x,
+ const double observed_y,
+ const double weight)
+ : distortion_model_(distortion_model),
+ observed_x_(observed_x), observed_y_(observed_y),
+ weight_(weight) {}
+
+ template <typename T>
+ bool operator()(const T* const intrinsics,
+ const T* const R_t, // Rotation denoted by angle axis
+ // followed with translation
+ const T* const X, // Point coordinates 3x1.
+ T* residuals) const {
+ // Unpack the intrinsics.
+ const T& focal_length = intrinsics[OFFSET_FOCAL_LENGTH];
+ const T& principal_point_x = intrinsics[OFFSET_PRINCIPAL_POINT_X];
+ const T& principal_point_y = intrinsics[OFFSET_PRINCIPAL_POINT_Y];
+
+ // Compute projective coordinates: x = RX + t.
+ T x[3];
+
+ ceres::AngleAxisRotatePoint(R_t, X, x);
+ x[0] += R_t[3];
+ x[1] += R_t[4];
+ x[2] += R_t[5];
+
+ // Prevent points from going behind the camera.
+ if (x[2] < T(0)) {
+ return false;
+ }
+
+ // Compute normalized coordinates: x /= x[2].
+ T xn = x[0] / x[2];
+ T yn = x[1] / x[2];
+
+ T predicted_x, predicted_y;
+
+ // Apply distortion to the normalized points to get (xd, yd).
+ // TODO(keir): Do early bailouts for zero distortion; these are expensive
+ // jet operations.
+ switch (distortion_model_) {
+ case DISTORTION_MODEL_POLYNOMIAL:
+ {
+ const T& k1 = intrinsics[OFFSET_K1];
+ const T& k2 = intrinsics[OFFSET_K2];
+ const T& k3 = intrinsics[OFFSET_K3];
+ const T& p1 = intrinsics[OFFSET_P1];
+ const T& p2 = intrinsics[OFFSET_P2];
+
+ ApplyPolynomialDistortionModel(focal_length,
+ focal_length,
+ principal_point_x,
+ principal_point_y,
+ k1, k2, k3,
+ p1, p2,
+ xn, yn,
+ &predicted_x,
+ &predicted_y);
+ break;
+ }
+ case DISTORTION_MODEL_DIVISION:
+ {
+ const T& k1 = intrinsics[OFFSET_K1];
+ const T& k2 = intrinsics[OFFSET_K2];
+
+ ApplyDivisionDistortionModel(focal_length,
+ focal_length,
+ principal_point_x,
+ principal_point_y,
+ k1, k2,
+ xn, yn,
+ &predicted_x,
+ &predicted_y);
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unknown distortion model";
+ }
+
+ // The error is the difference between the predicted and observed position.
+ residuals[0] = (predicted_x - T(observed_x_)) * weight_;
+ residuals[1] = (predicted_y - T(observed_y_)) * weight_;
+ return true;
+ }
+
+ const DistortionModelType distortion_model_;
+ const double observed_x_;
+ const double observed_y_;
+ const double weight_;
+};
+
+// Print a message to the log which camera intrinsics are gonna to be optimixed.
+void BundleIntrinsicsLogMessage(const int bundle_intrinsics) {
+ if (bundle_intrinsics == BUNDLE_NO_INTRINSICS) {
+ LOG(INFO) << "Bundling only camera positions.";
+ } else {
+ std::string bundling_message = "";
+
+#define APPEND_BUNDLING_INTRINSICS(name, flag) \
+ if (bundle_intrinsics & flag) { \
+ if (!bundling_message.empty()) { \
+ bundling_message += ", "; \
+ } \
+ bundling_message += name; \
+ } (void)0
+
+ APPEND_BUNDLING_INTRINSICS("f", BUNDLE_FOCAL_LENGTH);
+ APPEND_BUNDLING_INTRINSICS("px, py", BUNDLE_PRINCIPAL_POINT);
+ APPEND_BUNDLING_INTRINSICS("k1", BUNDLE_RADIAL_K1);
+ APPEND_BUNDLING_INTRINSICS("k2", BUNDLE_RADIAL_K2);
+ APPEND_BUNDLING_INTRINSICS("p1", BUNDLE_TANGENTIAL_P1);
+ APPEND_BUNDLING_INTRINSICS("p2", BUNDLE_TANGENTIAL_P2);
+
+ LOG(INFO) << "Bundling " << bundling_message << ".";
+ }
+}
+
+// Pack intrinsics from object to an array for easier
+// and faster minimization.
+void PackIntrinisicsIntoArray(const CameraIntrinsics &intrinsics,
+ double ceres_intrinsics[OFFSET_MAX]) {
+ ceres_intrinsics[OFFSET_FOCAL_LENGTH] = intrinsics.focal_length();
+ ceres_intrinsics[OFFSET_PRINCIPAL_POINT_X] = intrinsics.principal_point_x();
+ ceres_intrinsics[OFFSET_PRINCIPAL_POINT_Y] = intrinsics.principal_point_y();
+
+ int num_distortion_parameters = intrinsics.num_distortion_parameters();
+ assert(num_distortion_parameters <= NUM_DISTORTION_COEFFICIENTS);
+
+ const double *distortion_parameters = intrinsics.distortion_parameters();
+ for (int i = 0; i < num_distortion_parameters; ++i) {
+ ceres_intrinsics[FIRST_DISTORTION_COEFFICIENT + i] =
+ distortion_parameters[i];
+ }
+}
+
+// Unpack intrinsics back from an array to an object.
+void UnpackIntrinsicsFromArray(const double ceres_intrinsics[OFFSET_MAX],
+ CameraIntrinsics *intrinsics) {
+ intrinsics->SetFocalLength(ceres_intrinsics[OFFSET_FOCAL_LENGTH],
+ ceres_intrinsics[OFFSET_FOCAL_LENGTH]);
+
+ intrinsics->SetPrincipalPoint(ceres_intrinsics[OFFSET_PRINCIPAL_POINT_X],
+ ceres_intrinsics[OFFSET_PRINCIPAL_POINT_Y]);
+
+ int num_distortion_parameters = intrinsics->num_distortion_parameters();
+ assert(num_distortion_parameters <= NUM_DISTORTION_COEFFICIENTS);
+
+ double *distortion_parameters = intrinsics->distortion_parameters();
+ for (int i = 0; i < num_distortion_parameters; ++i) {
+ distortion_parameters[i] =
+ ceres_intrinsics[FIRST_DISTORTION_COEFFICIENT + i];
+ }
+}
+
+// Get a vector of camera's rotations denoted by angle axis
+// conjuncted with translations into single block
+//
+// Element with index i matches to a rotation+translation for
+// camera at image i.
+vector<Vec6> PackCamerasRotationAndTranslation(
+ const Tracks &tracks,
+ const EuclideanReconstruction &reconstruction) {
+ vector<Vec6> all_cameras_R_t;
+ int max_image = tracks.MaxImage();
+
+ all_cameras_R_t.resize(max_image + 1);
+
+ for (int i = 0; i <= max_image; i++) {
+ const EuclideanCamera *camera = reconstruction.CameraForImage(i);
+
+ if (!camera) {
+ continue;
+ }
+
+ ceres::RotationMatrixToAngleAxis(&camera->R(0, 0),
+ &all_cameras_R_t[i](0));
+ all_cameras_R_t[i].tail<3>() = camera->t;
+ }
+ return all_cameras_R_t;
+}
+
+// Convert cameras rotations fro mangle axis back to rotation matrix.
+void UnpackCamerasRotationAndTranslation(
+ const Tracks &tracks,
+ const vector<Vec6> &all_cameras_R_t,
+ EuclideanReconstruction *reconstruction) {
+ int max_image = tracks.MaxImage();
+
+ for (int i = 0; i <= max_image; i++) {
+ EuclideanCamera *camera = reconstruction->CameraForImage(i);
+
+ if (!camera) {
+ continue;
+ }
+
+ ceres::AngleAxisToRotationMatrix(&all_cameras_R_t[i](0),
+ &camera->R(0, 0));
+ camera->t = all_cameras_R_t[i].tail<3>();
+ }
+}
+
+// Converts sparse CRSMatrix to Eigen matrix, so it could be used
+// all over in the pipeline.
+//
+// TODO(sergey): currently uses dense Eigen matrices, best would
+// be to use sparse Eigen matrices
+void CRSMatrixToEigenMatrix(const ceres::CRSMatrix &crs_matrix,
+ Mat *eigen_matrix) {
+ eigen_matrix->resize(crs_matrix.num_rows, crs_matrix.num_cols);
+ eigen_matrix->setZero();
+
+ for (int row = 0; row < crs_matrix.num_rows; ++row) {
+ int start = crs_matrix.rows[row];
+ int end = crs_matrix.rows[row + 1] - 1;
+
+ for (int i = start; i <= end; i++) {
+ int col = crs_matrix.cols[i];
+ double value = crs_matrix.values[i];
+
+ (*eigen_matrix)(row, col) = value;
+ }
+ }
+}
+
+void EuclideanBundlerPerformEvaluation(const Tracks &tracks,
+ EuclideanReconstruction *reconstruction,
+ vector<Vec6> *all_cameras_R_t,
+ ceres::Problem *problem,
+ BundleEvaluation *evaluation) {
+ int max_track = tracks.MaxTrack();
+ // Number of camera rotations equals to number of translation,
+ int num_cameras = all_cameras_R_t->size();
+ int num_points = 0;
+
+ vector<EuclideanPoint*> minimized_points;
+ for (int i = 0; i <= max_track; i++) {
+ EuclideanPoint *point = reconstruction->PointForTrack(i);
+ if (point) {
+ // We need to know whether the track is constant zero weight,
+ // and it so it wouldn't have parameter block in the problem.
+ //
+ // Getting all markers for track is not so bac currently since
+ // this code is only used by keyframe selection when there are
+ // not so much tracks and only 2 frames anyway.
+ vector<Marker> markera_of_track = tracks.MarkersForTrack(i);
+ for (int j = 0; j < markera_of_track.size(); j++) {
+ if (markera_of_track.at(j).weight != 0.0) {
+ minimized_points.push_back(point);
+ num_points++;
+ break;
+ }
+ }
+ }
+ }
+
+ LG << "Number of cameras " << num_cameras;
+ LG << "Number of points " << num_points;
+
+ evaluation->num_cameras = num_cameras;
+ evaluation->num_points = num_points;
+
+ if (evaluation->evaluate_jacobian) { // Evaluate jacobian matrix.
+ ceres::CRSMatrix evaluated_jacobian;
+ ceres::Problem::EvaluateOptions eval_options;
+
+ // Cameras goes first in the ordering.
+ int max_image = tracks.MaxImage();
+ for (int i = 0; i <= max_image; i++) {
+ const EuclideanCamera *camera = reconstruction->CameraForImage(i);
+ if (camera) {
+ double *current_camera_R_t = &(*all_cameras_R_t)[i](0);
+
+ // All cameras are variable now.
+ problem->SetParameterBlockVariable(current_camera_R_t);
+
+ eval_options.parameter_blocks.push_back(current_camera_R_t);
+ }
+ }
+
+ // Points goes at the end of ordering,
+ for (int i = 0; i < minimized_points.size(); i++) {
+ EuclideanPoint *point = minimized_points.at(i);
+ eval_options.parameter_blocks.push_back(&point->X(0));
+ }
+
+ problem->Evaluate(eval_options,
+ NULL, NULL, NULL,
+ &evaluated_jacobian);
+
+ CRSMatrixToEigenMatrix(evaluated_jacobian, &evaluation->jacobian);
+ }
+}
+
+// This is an utility function to only bundle 3D position of
+// given markers list.
+//
+// Main purpose of this function is to adjust positions of tracks
+// which does have constant zero weight and so far only were using
+// algebraic intersection to obtain their 3D positions.
+//
+// At this point we only need to bundle points positions, cameras
+// are to be totally still here.
+void EuclideanBundlePointsOnly(const DistortionModelType distortion_model,
+ const vector<Marker> &markers,
+ vector<Vec6> &all_cameras_R_t,
+ double ceres_intrinsics[OFFSET_MAX],
+ EuclideanReconstruction *reconstruction) {
+ ceres::Problem::Options problem_options;
+ ceres::Problem problem(problem_options);
+ int num_residuals = 0;
+ for (int i = 0; i < markers.size(); ++i) {
+ const Marker &marker = markers[i];
+ EuclideanCamera *camera = reconstruction->CameraForImage(marker.image);
+ EuclideanPoint *point = reconstruction->PointForTrack(marker.track);
+ if (camera == NULL || point == NULL) {
+ continue;
+ }
+
+ // Rotation of camera denoted in angle axis followed with
+ // camera translaiton.
+ double *current_camera_R_t = &all_cameras_R_t[camera->image](0);
+
+ problem.AddResidualBlock(new ceres::AutoDiffCostFunction<
+ OpenCVReprojectionError, 2, OFFSET_MAX, 6, 3>(
+ new OpenCVReprojectionError(
+ distortion_model,
+ marker.x,
+ marker.y,
+ 1.0)),
+ NULL,
+ ceres_intrinsics,
+ current_camera_R_t,
+ &point->X(0));
+
+ problem.SetParameterBlockConstant(current_camera_R_t);
+ num_residuals++;
+ }
+
+ LG << "Number of residuals: " << num_residuals;
+ if (!num_residuals) {
+ LG << "Skipping running minimizer with zero residuals";
+ return;
+ }
+
+ problem.SetParameterBlockConstant(ceres_intrinsics);
+
+ // Configure the solver.
+ ceres::Solver::Options options;
+ options.use_nonmonotonic_steps = true;
+ options.preconditioner_type = ceres::SCHUR_JACOBI;
+ options.linear_solver_type = ceres::ITERATIVE_SCHUR;
+ options.use_explicit_schur_complement = true;
+ options.use_inner_iterations = true;
+ options.max_num_iterations = 100;
+
+#ifdef _OPENMP
+ options.num_threads = omp_get_max_threads();
+ options.num_linear_solver_threads = omp_get_max_threads();
+#endif
+
+ // Solve!
+ ceres::Solver::Summary summary;
+ ceres::Solve(options, &problem, &summary);
+
+ LG << "Final report:\n" << summary.FullReport();
+
+}
+
+} // namespace
+
+void EuclideanBundle(const Tracks &tracks,
+ EuclideanReconstruction *reconstruction) {
+ PolynomialCameraIntrinsics empty_intrinsics;
+ EuclideanBundleCommonIntrinsics(tracks,
+ BUNDLE_NO_INTRINSICS,
+ BUNDLE_NO_CONSTRAINTS,
+ reconstruction,
+ &empty_intrinsics,
+ NULL);
+}
+
+void EuclideanBundleCommonIntrinsics(
+ const Tracks &tracks,
+ const int bundle_intrinsics,
+ const int bundle_constraints,
+ EuclideanReconstruction *reconstruction,
+ CameraIntrinsics *intrinsics,
+ BundleEvaluation *evaluation) {
+ LG << "Original intrinsics: " << *intrinsics;
+ vector<Marker> markers = tracks.AllMarkers();
+
+ // N-th element denotes whether track N is a constant zero-weigthed track.
+ vector<bool> zero_weight_tracks_flags(tracks.MaxTrack() + 1, true);
+
+ // Residual blocks with 10 parameters are unwieldly with Ceres, so pack the
+ // intrinsics into a single block and rely on local parameterizations to
+ // control which intrinsics are allowed to vary.
+ double ceres_intrinsics[OFFSET_MAX];
+ PackIntrinisicsIntoArray(*intrinsics, ceres_intrinsics);
+
+ // Convert cameras rotations to angle axis and merge with translation
+ // into single parameter block for maximal minimization speed.
+ //
+ // Block for minimization has got the following structure:
+ // <3 elements for angle-axis> <3 elements for translation>
+ vector<Vec6> all_cameras_R_t =
+ PackCamerasRotationAndTranslation(tracks, *reconstruction);
+
+ // Parameterization used to restrict camera motion for modal solvers.
+ ceres::SubsetParameterization *constant_translation_parameterization = NULL;
+ if (bundle_constraints & BUNDLE_NO_TRANSLATION) {
+ std::vector<int> constant_translation;
+
+ // First three elements are rotation, ast three are translation.
+ constant_translation.push_back(3);
+ constant_translation.push_back(4);
+ constant_translation.push_back(5);
+
+ constant_translation_parameterization =
+ new ceres::SubsetParameterization(6, constant_translation);
+ }
+
+ // Add residual blocks to the problem.
+ ceres::Problem::Options problem_options;
+ ceres::Problem problem(problem_options);
+ int num_residuals = 0;
+ bool have_locked_camera = false;
+ for (int i = 0; i < markers.size(); ++i) {
+ const Marker &marker = markers[i];
+ EuclideanCamera *camera = reconstruction->CameraForImage(marker.image);
+ EuclideanPoint *point = reconstruction->PointForTrack(marker.track);
+ if (camera == NULL || point == NULL) {
+ continue;
+ }
+
+ // Rotation of camera denoted in angle axis followed with
+ // camera translaiton.
+ double *current_camera_R_t = &all_cameras_R_t[camera->image](0);
+
+ // Skip residual block for markers which does have absolutely
+ // no affect on the final solution.
+ // This way ceres is not gonna to go crazy.
+ if (marker.weight != 0.0) {
+ problem.AddResidualBlock(new ceres::AutoDiffCostFunction<
+ OpenCVReprojectionError, 2, OFFSET_MAX, 6, 3>(
+ new OpenCVReprojectionError(
+ intrinsics->GetDistortionModelType(),
+ marker.x,
+ marker.y,
+ marker.weight)),
+ NULL,
+ ceres_intrinsics,
+ current_camera_R_t,
+ &point->X(0));
+
+ // We lock the first camera to better deal with scene orientation ambiguity.
+ if (!have_locked_camera) {
+ problem.SetParameterBlockConstant(current_camera_R_t);
+ have_locked_camera = true;
+ }
+
+ if (bundle_constraints & BUNDLE_NO_TRANSLATION) {
+ problem.SetParameterization(current_camera_R_t,
+ constant_translation_parameterization);
+ }
+
+ zero_weight_tracks_flags[marker.track] = false;
+ num_residuals++;
+ }
+ }
+ LG << "Number of residuals: " << num_residuals;
+
+ if (!num_residuals) {
+ LG << "Skipping running minimizer with zero residuals";
+ return;
+ }
+
+ if (intrinsics->GetDistortionModelType() == DISTORTION_MODEL_DIVISION &&
+ (bundle_intrinsics & BUNDLE_TANGENTIAL) != 0) {
+ LOG(FATAL) << "Division model doesn't support bundling "
+ "of tangential distortion";
+ }
+
+ BundleIntrinsicsLogMessage(bundle_intrinsics);
+
+ if (bundle_intrinsics == BUNDLE_NO_INTRINSICS) {
+ // No camera intrinsics are being refined,
+ // set the whole parameter block as constant for best performance.
+ problem.SetParameterBlockConstant(ceres_intrinsics);
+ } else {
+ // Set the camera intrinsics that are not to be bundled as
+ // constant using some macro trickery.
+
+ std::vector<int> constant_intrinsics;
+#define MAYBE_SET_CONSTANT(bundle_enum, offset) \
+ if (!(bundle_intrinsics & bundle_enum)) { \
+ constant_intrinsics.push_back(offset); \
+ }
+ MAYBE_SET_CONSTANT(BUNDLE_FOCAL_LENGTH, OFFSET_FOCAL_LENGTH);
+ MAYBE_SET_CONSTANT(BUNDLE_PRINCIPAL_POINT, OFFSET_PRINCIPAL_POINT_X);
+ MAYBE_SET_CONSTANT(BUNDLE_PRINCIPAL_POINT, OFFSET_PRINCIPAL_POINT_Y);
+ MAYBE_SET_CONSTANT(BUNDLE_RADIAL_K1, OFFSET_K1);
+ MAYBE_SET_CONSTANT(BUNDLE_RADIAL_K2, OFFSET_K2);
+ MAYBE_SET_CONSTANT(BUNDLE_TANGENTIAL_P1, OFFSET_P1);
+ MAYBE_SET_CONSTANT(BUNDLE_TANGENTIAL_P2, OFFSET_P2);
+#undef MAYBE_SET_CONSTANT
+
+ // Always set K3 constant, it's not used at the moment.
+ constant_intrinsics.push_back(OFFSET_K3);
+
+ ceres::SubsetParameterization *subset_parameterization =
+ new ceres::SubsetParameterization(OFFSET_MAX, constant_intrinsics);
+
+ problem.SetParameterization(ceres_intrinsics, subset_parameterization);
+ }
+
+ // Configure the solver.
+ ceres::Solver::Options options;
+ options.use_nonmonotonic_steps = true;
+ options.preconditioner_type = ceres::SCHUR_JACOBI;
+ options.linear_solver_type = ceres::ITERATIVE_SCHUR;
+ options.use_explicit_schur_complement = true;
+ options.use_inner_iterations = true;
+ options.max_num_iterations = 100;
+
+#ifdef _OPENMP
+ options.num_threads = omp_get_max_threads();
+ options.num_linear_solver_threads = omp_get_max_threads();
+#endif
+
+ // Solve!
+ ceres::Solver::Summary summary;
+ ceres::Solve(options, &problem, &summary);
+
+ LG << "Final report:\n" << summary.FullReport();
+
+ // Copy rotations and translations back.
+ UnpackCamerasRotationAndTranslation(tracks,
+ all_cameras_R_t,
+ reconstruction);
+
+ // Copy intrinsics back.
+ if (bundle_intrinsics != BUNDLE_NO_INTRINSICS)
+ UnpackIntrinsicsFromArray(ceres_intrinsics, intrinsics);
+
+ LG << "Final intrinsics: " << *intrinsics;
+
+ if (evaluation) {
+ EuclideanBundlerPerformEvaluation(tracks, reconstruction, &all_cameras_R_t,
+ &problem, evaluation);
+ }
+
+ // Separate step to adjust positions of tracks which are
+ // constant zero-weighted.
+ vector<Marker> zero_weight_markers;
+ for (int track = 0; track < tracks.MaxTrack(); ++track) {
+ if (zero_weight_tracks_flags[track]) {
+ vector<Marker> current_markers = tracks.MarkersForTrack(track);
+ zero_weight_markers.reserve(zero_weight_markers.size() +
+ current_markers.size());
+ for (int i = 0; i < current_markers.size(); ++i) {
+ zero_weight_markers.push_back(current_markers[i]);
+ }
+ }
+ }
+
+ if (zero_weight_markers.size()) {
+ LG << "Refining position of constant zero-weighted tracks";
+ EuclideanBundlePointsOnly(intrinsics->GetDistortionModelType(),
+ zero_weight_markers,
+ all_cameras_R_t,
+ ceres_intrinsics,
+ reconstruction);
+ }
+}
+
+void ProjectiveBundle(const Tracks & /*tracks*/,
+ ProjectiveReconstruction * /*reconstruction*/) {
+ // TODO(keir): Implement this! This can't work until we have a better bundler
+ // than SSBA, since SSBA has no support for projective bundling.
+}
+
+} // namespace libmv
diff --git a/intern/libmv/libmv/simple_pipeline/bundle.h b/intern/libmv/libmv/simple_pipeline/bundle.h
new file mode 100644
index 00000000000..781bd8476fe
--- /dev/null
+++ b/intern/libmv/libmv/simple_pipeline/bundle.h
@@ -0,0 +1,148 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_SIMPLE_PIPELINE_BUNDLE_H
+#define LIBMV_SIMPLE_PIPELINE_BUNDLE_H
+
+#include "libmv/numeric/numeric.h"
+
+namespace libmv {
+
+class CameraIntrinsics;
+class EuclideanReconstruction;
+class ProjectiveReconstruction;
+class Tracks;
+
+struct BundleEvaluation {
+ BundleEvaluation() :
+ num_cameras(0),
+ num_points(0),
+ evaluate_jacobian(false) {
+ }
+
+ // Number of cameras appeared in bundle adjustment problem
+ int num_cameras;
+
+ // Number of points appeared in bundle adjustment problem
+ int num_points;
+
+ // When set to truth, jacobian of the problem after optimization
+ // will be evaluated and stored in \parameter jacobian
+ bool evaluate_jacobian;
+
+ // Contains evaluated jacobian of the problem.
+ // Parameters are ordered in the following way:
+ // - Intrinsics block
+ // - Cameras (for each camera rotation goes first, then translation)
+ // - Points
+ Mat jacobian;
+};
+
+/*!
+ Refine camera poses and 3D coordinates using bundle adjustment.
+
+ This routine adjusts all cameras and points in \a *reconstruction. This
+ assumes a full observation for reconstructed tracks; this implies that if
+ there is a reconstructed 3D point (a bundle) for a track, then all markers
+ for that track will be included in the minimization. \a tracks should
+ contain markers used in the initial reconstruction.
+
+ The cameras and bundles (3D points) are refined in-place.
+
+ \note This assumes an outlier-free set of markers.
+ \note This assumes a calibrated reconstruction, e.g. the markers are
+ already corrected for camera intrinsics and radial distortion.
+
+ \sa EuclideanResect, EuclideanIntersect, EuclideanReconstructTwoFrames
+*/
+void EuclideanBundle(const Tracks &tracks,
+ EuclideanReconstruction *reconstruction);
+
+/*!
+ Refine camera poses and 3D coordinates using bundle adjustment.
+
+ This routine adjusts all cameras positions, points, and the camera
+ intrinsics (assumed common across all images) in \a *reconstruction. This
+ assumes a full observation for reconstructed tracks; this implies that if
+ there is a reconstructed 3D point (a bundle) for a track, then all markers
+ for that track will be included in the minimization. \a tracks should
+ contain markers used in the initial reconstruction.
+
+ The cameras, bundles, and intrinsics are refined in-place.
+
+ Constraints denotes which blocks to keep constant during bundling.
+ For example it is useful to keep camera translations constant
+ when bundling tripod motions.
+
+ If evaluaiton is not null, different evaluation statistics is filled in
+ there, plus all the requested additional information (like jacobian) is
+ also calculating there. Also see comments for BundleEvaluation.
+
+ \note This assumes an outlier-free set of markers.
+
+ \sa EuclideanResect, EuclideanIntersect, EuclideanReconstructTwoFrames
+*/
+enum BundleIntrinsics {
+ BUNDLE_NO_INTRINSICS = 0,
+ BUNDLE_FOCAL_LENGTH = 1,
+ BUNDLE_PRINCIPAL_POINT = 2,
+ BUNDLE_RADIAL_K1 = 4,
+ BUNDLE_RADIAL_K2 = 8,
+ BUNDLE_RADIAL = 12,
+ BUNDLE_TANGENTIAL_P1 = 16,
+ BUNDLE_TANGENTIAL_P2 = 32,
+ BUNDLE_TANGENTIAL = 48,
+};
+enum BundleConstraints {
+ BUNDLE_NO_CONSTRAINTS = 0,
+ BUNDLE_NO_TRANSLATION = 1,
+};
+void EuclideanBundleCommonIntrinsics(
+ const Tracks &tracks,
+ const int bundle_intrinsics,
+ const int bundle_constraints,
+ EuclideanReconstruction *reconstruction,
+ CameraIntrinsics *intrinsics,
+ BundleEvaluation *evaluation = NULL);
+
+/*!
+ Refine camera poses and 3D coordinates using bundle adjustment.
+
+ This routine adjusts all cameras and points in \a *reconstruction. This
+ assumes a full observation for reconstructed tracks; this implies that if
+ there is a reconstructed 3D point (a bundle) for a track, then all markers
+ for that track will be included in the minimization. \a tracks should
+ contain markers used in the initial reconstruction.
+
+ The cameras and bundles (homogeneous 3D points) are refined in-place.
+
+ \note This assumes an outlier-free set of markers.
+ \note This assumes that radial distortion is already corrected for, but
+ does not assume that that other intrinsics are.
+
+ \sa ProjectiveResect, ProjectiveIntersect, ProjectiveReconstructTwoFrames
+*/
+void ProjectiveBundle(const Tracks &tracks,
+ ProjectiveReconstruction *reconstruction);
+
+} // namespace libmv
+
+#endif // LIBMV_SIMPLE_PIPELINE_BUNDLE_H
+
diff --git a/intern/libmv/libmv/simple_pipeline/callbacks.h b/intern/libmv/libmv/simple_pipeline/callbacks.h
new file mode 100644
index 00000000000..58f7b0d3cc9
--- /dev/null
+++ b/intern/libmv/libmv/simple_pipeline/callbacks.h
@@ -0,0 +1,34 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_SIMPLE_PIPELINE_CALLBACKS_H_
+#define LIBMV_SIMPLE_PIPELINE_CALLBACKS_H_
+
+namespace libmv {
+
+class ProgressUpdateCallback {
+ public:
+ virtual ~ProgressUpdateCallback() {}
+ virtual void invoke(double progress, const char *message) = 0;
+};
+
+} // namespace libmv
+
+#endif // LIBMV_SIMPLE_PIPELINE_MARKERS_H_
diff --git a/intern/libmv/libmv/simple_pipeline/camera_intrinsics.cc b/intern/libmv/libmv/simple_pipeline/camera_intrinsics.cc
new file mode 100644
index 00000000000..5e4e07b3c4c
--- /dev/null
+++ b/intern/libmv/libmv/simple_pipeline/camera_intrinsics.cc
@@ -0,0 +1,293 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/simple_pipeline/camera_intrinsics.h"
+
+#include "libmv/logging/logging.h"
+#include "libmv/simple_pipeline/distortion_models.h"
+
+namespace libmv {
+
+namespace internal {
+
+LookupWarpGrid::LookupWarpGrid()
+ : offset_(NULL),
+ width_(0),
+ height_(0),
+ overscan_(0.0),
+ threads_(1) {}
+
+LookupWarpGrid::LookupWarpGrid(const LookupWarpGrid &from)
+ : offset_(NULL),
+ width_(from.width_),
+ height_(from.height_),
+ overscan_(from.overscan_),
+ threads_(from.threads_) {
+ if (from.offset_) {
+ offset_ = new Offset[width_ * height_];
+ memcpy(offset_, from.offset_, sizeof(Offset) * width_ * height_);
+ }
+}
+
+LookupWarpGrid::~LookupWarpGrid() {
+ delete [] offset_;
+}
+
+void LookupWarpGrid::Reset() {
+ delete [] offset_;
+ offset_ = NULL;
+}
+
+// Set number of threads used for threaded buffer distortion/undistortion.
+void LookupWarpGrid::SetThreads(int threads) {
+ threads_ = threads;
+}
+
+} // namespace internal
+
+CameraIntrinsics::CameraIntrinsics()
+ : image_width_(0),
+ image_height_(0),
+ K_(Mat3::Identity()) {}
+
+CameraIntrinsics::CameraIntrinsics(const CameraIntrinsics &from)
+ : image_width_(from.image_width_),
+ image_height_(from.image_height_),
+ K_(from.K_),
+ distort_(from.distort_),
+ undistort_(from.undistort_) {}
+
+// Set the image size in pixels.
+void CameraIntrinsics::SetImageSize(int width, int height) {
+ image_width_ = width;
+ image_height_ = height;
+ ResetLookupGrids();
+}
+
+// Set the entire calibration matrix at once.
+void CameraIntrinsics::SetK(const Mat3 new_k) {
+ K_ = new_k;
+ ResetLookupGrids();
+}
+
+// Set both x and y focal length in pixels.
+void CameraIntrinsics::SetFocalLength(double focal_x,
+ double focal_y) {
+ K_(0, 0) = focal_x;
+ K_(1, 1) = focal_y;
+ ResetLookupGrids();
+}
+
+// Set principal point in pixels.
+void CameraIntrinsics::SetPrincipalPoint(double cx,
+ double cy) {
+ K_(0, 2) = cx;
+ K_(1, 2) = cy;
+ ResetLookupGrids();
+}
+
+// Set number of threads used for threaded buffer distortion/undistortion.
+void CameraIntrinsics::SetThreads(int threads) {
+ distort_.SetThreads(threads);
+ undistort_.SetThreads(threads);
+}
+
+void CameraIntrinsics::ImageSpaceToNormalized(double image_x,
+ double image_y,
+ double *normalized_x,
+ double *normalized_y) const {
+ *normalized_x = (image_x - principal_point_x()) / focal_length_x();
+ *normalized_y = (image_y - principal_point_y()) / focal_length_y();
+}
+
+void CameraIntrinsics::NormalizedToImageSpace(double normalized_x,
+ double normalized_y,
+ double *image_x,
+ double *image_y) const {
+ *image_x = normalized_x * focal_length_x() + principal_point_x();
+ *image_y = normalized_y * focal_length_y() + principal_point_y();
+}
+
+// Reset lookup grids after changing the distortion model.
+void CameraIntrinsics::ResetLookupGrids() {
+ distort_.Reset();
+ undistort_.Reset();
+}
+
+PolynomialCameraIntrinsics::PolynomialCameraIntrinsics()
+ : CameraIntrinsics() {
+ SetRadialDistortion(0.0, 0.0, 0.0);
+ SetTangentialDistortion(0.0, 0.0);
+}
+
+PolynomialCameraIntrinsics::PolynomialCameraIntrinsics(
+ const PolynomialCameraIntrinsics &from)
+ : CameraIntrinsics(from) {
+ SetRadialDistortion(from.k1(), from.k2(), from.k3());
+ SetTangentialDistortion(from.p1(), from.p2());
+}
+
+void PolynomialCameraIntrinsics::SetRadialDistortion(double k1,
+ double k2,
+ double k3) {
+ parameters_[OFFSET_K1] = k1;
+ parameters_[OFFSET_K2] = k2;
+ parameters_[OFFSET_K3] = k3;
+ ResetLookupGrids();
+}
+
+void PolynomialCameraIntrinsics::SetTangentialDistortion(double p1,
+ double p2) {
+ parameters_[OFFSET_P1] = p1;
+ parameters_[OFFSET_P2] = p2;
+ ResetLookupGrids();
+}
+
+void PolynomialCameraIntrinsics::ApplyIntrinsics(double normalized_x,
+ double normalized_y,
+ double *image_x,
+ double *image_y) const {
+ ApplyPolynomialDistortionModel(focal_length_x(),
+ focal_length_y(),
+ principal_point_x(),
+ principal_point_y(),
+ k1(), k2(), k3(),
+ p1(), p2(),
+ normalized_x,
+ normalized_y,
+ image_x,
+ image_y);
+}
+
+void PolynomialCameraIntrinsics::InvertIntrinsics(
+ double image_x,
+ double image_y,
+ double *normalized_x,
+ double *normalized_y) const {
+ InvertPolynomialDistortionModel(focal_length_x(),
+ focal_length_y(),
+ principal_point_x(),
+ principal_point_y(),
+ k1(), k2(), k3(),
+ p1(), p2(),
+ image_x,
+ image_y,
+ normalized_x,
+ normalized_y);
+}
+
+DivisionCameraIntrinsics::DivisionCameraIntrinsics()
+ : CameraIntrinsics() {
+ SetDistortion(0.0, 0.0);
+}
+
+DivisionCameraIntrinsics::DivisionCameraIntrinsics(
+ const DivisionCameraIntrinsics &from)
+ : CameraIntrinsics(from) {
+ SetDistortion(from.k1(), from.k1());
+}
+
+void DivisionCameraIntrinsics::SetDistortion(double k1,
+ double k2) {
+ parameters_[OFFSET_K1] = k1;
+ parameters_[OFFSET_K2] = k2;
+ ResetLookupGrids();
+}
+
+void DivisionCameraIntrinsics::ApplyIntrinsics(double normalized_x,
+ double normalized_y,
+ double *image_x,
+ double *image_y) const {
+ ApplyDivisionDistortionModel(focal_length_x(),
+ focal_length_y(),
+ principal_point_x(),
+ principal_point_y(),
+ k1(), k2(),
+ normalized_x,
+ normalized_y,
+ image_x,
+ image_y);
+}
+
+void DivisionCameraIntrinsics::InvertIntrinsics(double image_x,
+ double image_y,
+ double *normalized_x,
+ double *normalized_y) const {
+ InvertDivisionDistortionModel(focal_length_x(),
+ focal_length_y(),
+ principal_point_x(),
+ principal_point_y(),
+ k1(), k2(),
+ image_x,
+ image_y,
+ normalized_x,
+ normalized_y);
+}
+
+std::ostream& operator <<(std::ostream &os,
+ const CameraIntrinsics &intrinsics) {
+ if (intrinsics.focal_length_x() == intrinsics.focal_length_x()) {
+ os << "f=" << intrinsics.focal_length();
+ } else {
+ os << "fx=" << intrinsics.focal_length_x()
+ << " fy=" << intrinsics.focal_length_y();
+ }
+ os << " cx=" << intrinsics.principal_point_x()
+ << " cy=" << intrinsics.principal_point_y()
+ << " w=" << intrinsics.image_width()
+ << " h=" << intrinsics.image_height();
+
+#define PRINT_NONZERO_COEFFICIENT(intrinsics, coeff) \
+ { \
+ if (intrinsics->coeff() != 0.0) { \
+ os << " " #coeff "=" << intrinsics->coeff(); \
+ } \
+ } (void) 0
+
+ switch (intrinsics.GetDistortionModelType()) {
+ case DISTORTION_MODEL_POLYNOMIAL:
+ {
+ const PolynomialCameraIntrinsics *polynomial_intrinsics =
+ static_cast<const PolynomialCameraIntrinsics *>(&intrinsics);
+ PRINT_NONZERO_COEFFICIENT(polynomial_intrinsics, k1);
+ PRINT_NONZERO_COEFFICIENT(polynomial_intrinsics, k2);
+ PRINT_NONZERO_COEFFICIENT(polynomial_intrinsics, k3);
+ PRINT_NONZERO_COEFFICIENT(polynomial_intrinsics, p1);
+ PRINT_NONZERO_COEFFICIENT(polynomial_intrinsics, p2);
+ break;
+ }
+ case DISTORTION_MODEL_DIVISION:
+ {
+ const DivisionCameraIntrinsics *division_intrinsics =
+ static_cast<const DivisionCameraIntrinsics *>(&intrinsics);
+ PRINT_NONZERO_COEFFICIENT(division_intrinsics, k1);
+ PRINT_NONZERO_COEFFICIENT(division_intrinsics, k2);
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unknown distortion model.";
+ }
+
+#undef PRINT_NONZERO_COEFFICIENT
+
+ return os;
+}
+
+} // namespace libmv
diff --git a/intern/libmv/libmv/simple_pipeline/camera_intrinsics.h b/intern/libmv/libmv/simple_pipeline/camera_intrinsics.h
new file mode 100644
index 00000000000..6a3ade81089
--- /dev/null
+++ b/intern/libmv/libmv/simple_pipeline/camera_intrinsics.h
@@ -0,0 +1,406 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_SIMPLE_PIPELINE_CAMERA_INTRINSICS_H_
+#define LIBMV_SIMPLE_PIPELINE_CAMERA_INTRINSICS_H_
+
+#include <iostream>
+#include <string>
+
+#include <Eigen/Core>
+
+#include "libmv/numeric/numeric.h"
+#include "libmv/simple_pipeline/distortion_models.h"
+
+namespace libmv {
+
+class CameraIntrinsics;
+
+namespace internal {
+
+// This class is responsible to store a lookup grid to perform
+// image warping using this lookup grid. Such a magic is needed
+// to make (un)distortion as fast as possible in cases multiple
+// images are to be processed.
+class LookupWarpGrid {
+ public:
+ LookupWarpGrid();
+ LookupWarpGrid(const LookupWarpGrid &from);
+ ~LookupWarpGrid();
+
+ // Width and height og the image, measured in pixels.
+ int width() const { return width_; }
+ int height() const { return height_; }
+
+ // Overscan factor of the image, so that
+ // - 0.0 is overscan of 0 pixels,
+ // - 1.0 is overscan of image weight pixels in horizontal direction
+ // and image height pixels in vertical direction.
+ double overscan() const { return overscan_; }
+
+ // Update lookup grid in order to be sure it's calculated for
+ // given image width, height and overscan.
+ //
+ // See comment for CameraIntrinsics::DistortBuffer to get more
+ // details about what overscan is.
+ template<typename WarpFunction>
+ void Update(const CameraIntrinsics &intrinsics,
+ int width,
+ int height,
+ double overscan);
+
+ // Apply coordinate lookup grid on a giver input buffer.
+ //
+ // See comment for CameraIntrinsics::DistortBuffer to get more
+ // details about template type.
+ template<typename PixelType>
+ void Apply(const PixelType *input_buffer,
+ int width,
+ int height,
+ int channels,
+ PixelType *output_buffer);
+
+ // Reset lookup grids.
+ // This will tag the grid for update without re-computing it.
+ void Reset();
+
+ // Set number of threads used for threaded buffer distortion/undistortion.
+ void SetThreads(int threads);
+
+ private:
+ // This structure contains an offset in both x,y directions
+ // in an optimized way sawing some bytes per pixel in the memory.
+ //
+ // TODO(sergey): This is rather questionable optimizations, memory
+ // is cheap nowadays and storing offset in such a way implies much
+ // more operations comparing to using bare floats.
+ struct Offset {
+ // Integer part of the offset.
+ short ix, iy;
+
+ // Float part of an offset, to get a real float value divide this
+ // value by 255.
+ unsigned char fx, fy;
+ };
+
+ // Compute coordinate lookup grid using a giver warp functor.
+ //
+ // width and height corresponds to a size of buffer which will
+ // be warped later.
+ template<typename WarpFunction>
+ void Compute(const CameraIntrinsics &intrinsics,
+ int width,
+ int height,
+ double overscan);
+
+ // This is a buffer which contains per-pixel offset of the
+ // pixels from input buffer to correspond the warping function.
+ Offset *offset_;
+
+ // Dimensions of the image this lookup grid processes.
+ int width_, height_;
+
+ // Overscan of the image being processed by this grid.
+ double overscan_;
+
+ // Number of threads which will be used for buffer istortion/undistortion.
+ int threads_;
+};
+
+} // namespace internal
+
+class CameraIntrinsics {
+ public:
+ CameraIntrinsics();
+ CameraIntrinsics(const CameraIntrinsics &from);
+ virtual ~CameraIntrinsics() {}
+
+ virtual DistortionModelType GetDistortionModelType() const = 0;
+
+ int image_width() const { return image_width_; }
+ int image_height() const { return image_height_; }
+
+ const Mat3 &K() const { return K_; }
+
+ double focal_length() const { return K_(0, 0); }
+ double focal_length_x() const { return K_(0, 0); }
+ double focal_length_y() const { return K_(1, 1); }
+
+ double principal_point_x() const { return K_(0, 2); }
+ double principal_point_y() const { return K_(1, 2); }
+
+ virtual int num_distortion_parameters() const = 0;
+ virtual double *distortion_parameters() = 0;
+ virtual const double *distortion_parameters() const = 0;
+
+ // Set the image size in pixels.
+ // Image is the size of image camera intrinsics were calibrated with.
+ void SetImageSize(int width, int height);
+
+ // Set the entire calibration matrix at once.
+ void SetK(const Mat3 new_k);
+
+ // Set both x and y focal length in pixels.
+ void SetFocalLength(double focal_x, double focal_y);
+
+ // Set principal point in pixels.
+ void SetPrincipalPoint(double cx, double cy);
+
+ // Set number of threads used for threaded buffer distortion/undistortion.
+ void SetThreads(int threads);
+
+ // Convert image space coordinates to normalized.
+ void ImageSpaceToNormalized(double image_x,
+ double image_y,
+ double *normalized_x,
+ double *normalized_y) const;
+
+ // Convert normalized coordinates to image space.
+ void NormalizedToImageSpace(double normalized_x,
+ double normalized_y,
+ double *image_x,
+ double *image_y) const;
+
+ // Apply camera intrinsics to the normalized point to get image coordinates.
+ //
+ // This applies the lens distortion to a point which is in normalized
+ // camera coordinates (i.e. the principal point is at (0, 0)) to get image
+ // coordinates in pixels.
+ virtual void ApplyIntrinsics(double normalized_x,
+ double normalized_y,
+ double *image_x,
+ double *image_y) const = 0;
+
+ // Invert camera intrinsics on the image point to get normalized coordinates.
+ //
+ // This reverses the effect of lens distortion on a point which is in image
+ // coordinates to get normalized camera coordinates.
+ virtual void InvertIntrinsics(double image_x,
+ double image_y,
+ double *normalized_x,
+ double *normalized_y) const = 0;
+
+ // Distort an image using the current camera instrinsics
+ //
+ // The distorted image is computed in output_buffer using samples from
+ // input_buffer. Both buffers should be width x height x channels sized.
+ //
+ // Overscan is a percentage value of how much overcan the image have.
+ // For example overscal value of 0.2 means 20% of overscan in the
+ // buffers.
+ //
+ // Overscan is usually used in cases when one need to distort an image
+ // and don't have a barrel in the distorted buffer. For example, when
+ // one need to render properly distorted FullHD frame without barrel
+ // visible. For such cases renderers usually renders bigger images and
+ // crops them after the distortion.
+ //
+ // This method is templated to be able to distort byte and float buffers
+ // without having separate methods for this two types. So basically only
+ //
+ // But in fact PixelType might be any type for which multiplication by
+ // a scalar and addition are implemented. For example PixelType might be
+ // Vec3 as well.
+ template<typename PixelType>
+ void DistortBuffer(const PixelType *input_buffer,
+ int width,
+ int height,
+ double overscan,
+ int channels,
+ PixelType *output_buffer);
+
+ // Undistort an image using the current camera instrinsics
+ //
+ // The undistorted image is computed in output_buffer using samples from
+ // input_buffer. Both buffers should be width x height x channels sized.
+ //
+ // Overscan is a percentage value of how much overcan the image have.
+ // For example overscal value of 0.2 means 20% of overscan in the
+ // buffers.
+ //
+ // Overscan is usually used in cases when one need to distort an image
+ // and don't have a barrel in the distorted buffer. For example, when
+ // one need to render properly distorted FullHD frame without barrel
+ // visible. For such cases renderers usually renders bigger images and
+ // crops them after the distortion.
+ //
+ // This method is templated to be able to distort byte and float buffers
+ // without having separate methods for this two types. So basically only
+ //
+ // But in fact PixelType might be any type for which multiplication by
+ // a scalar and addition are implemented. For example PixelType might be
+ // Vec3 as well.
+ template<typename PixelType>
+ void UndistortBuffer(const PixelType *input_buffer,
+ int width,
+ int height,
+ double overscan,
+ int channels,
+ PixelType *output_buffer);
+
+ private:
+ // This is the size of the image. This is necessary to, for example, handle
+ // the case of processing a scaled image.
+ int image_width_;
+ int image_height_;
+
+ // The traditional intrinsics matrix from x = K[R|t]X.
+ Mat3 K_;
+
+ // Coordinate lookup grids for distortion and undistortion.
+ internal::LookupWarpGrid distort_;
+ internal::LookupWarpGrid undistort_;
+
+ protected:
+ // Reset lookup grids after changing the distortion model.
+ void ResetLookupGrids();
+};
+
+class PolynomialCameraIntrinsics : public CameraIntrinsics {
+ public:
+ // This constants defines an offset of corresponding coefficients
+ // in the arameters_ array.
+ enum {
+ OFFSET_K1,
+ OFFSET_K2,
+ OFFSET_K3,
+ OFFSET_P1,
+ OFFSET_P2,
+
+ // This defines the size of array which we need to have in order
+ // to store all the coefficients.
+ NUM_PARAMETERS,
+ };
+
+ PolynomialCameraIntrinsics();
+ PolynomialCameraIntrinsics(const PolynomialCameraIntrinsics &from);
+
+ DistortionModelType GetDistortionModelType() const {
+ return DISTORTION_MODEL_POLYNOMIAL;
+ }
+
+ int num_distortion_parameters() const { return NUM_PARAMETERS; }
+ double *distortion_parameters() { return parameters_; };
+ const double *distortion_parameters() const { return parameters_; };
+
+ double k1() const { return parameters_[OFFSET_K1]; }
+ double k2() const { return parameters_[OFFSET_K2]; }
+ double k3() const { return parameters_[OFFSET_K3]; }
+ double p1() const { return parameters_[OFFSET_P1]; }
+ double p2() const { return parameters_[OFFSET_P2]; }
+
+ // Set radial distortion coeffcients.
+ void SetRadialDistortion(double k1, double k2, double k3);
+
+ // Set tangential distortion coeffcients.
+ void SetTangentialDistortion(double p1, double p2);
+
+ // Apply camera intrinsics to the normalized point to get image coordinates.
+ //
+ // This applies the lens distortion to a point which is in normalized
+ // camera coordinates (i.e. the principal point is at (0, 0)) to get image
+ // coordinates in pixels.
+ void ApplyIntrinsics(double normalized_x,
+ double normalized_y,
+ double *image_x,
+ double *image_y) const;
+
+ // Invert camera intrinsics on the image point to get normalized coordinates.
+ //
+ // This reverses the effect of lens distortion on a point which is in image
+ // coordinates to get normalized camera coordinates.
+ void InvertIntrinsics(double image_x,
+ double image_y,
+ double *normalized_x,
+ double *normalized_y) const;
+
+ private:
+ // OpenCV's distortion model with third order polynomial radial distortion
+ // terms and second order tangential distortion. The distortion is applied to
+ // the normalized coordinates before the focal length, which makes them
+ // independent of image size.
+ double parameters_[NUM_PARAMETERS];
+};
+
+class DivisionCameraIntrinsics : public CameraIntrinsics {
+ public:
+ // This constants defines an offset of corresponding coefficients
+ // in the arameters_ array.
+ enum {
+ OFFSET_K1,
+ OFFSET_K2,
+
+ // This defines the size of array which we need to have in order
+ // to store all the coefficients.
+ NUM_PARAMETERS,
+ };
+
+ DivisionCameraIntrinsics();
+ DivisionCameraIntrinsics(const DivisionCameraIntrinsics &from);
+
+ DistortionModelType GetDistortionModelType() const {
+ return DISTORTION_MODEL_DIVISION;
+ }
+
+ int num_distortion_parameters() const { return NUM_PARAMETERS; }
+ double *distortion_parameters() { return parameters_; };
+ const double *distortion_parameters() const { return parameters_; };
+
+ double k1() const { return parameters_[OFFSET_K1]; }
+ double k2() const { return parameters_[OFFSET_K2]; }
+
+ // Set radial distortion coeffcients.
+ void SetDistortion(double k1, double k2);
+
+ // Apply camera intrinsics to the normalized point to get image coordinates.
+ //
+ // This applies the lens distortion to a point which is in normalized
+ // camera coordinates (i.e. the principal point is at (0, 0)) to get image
+ // coordinates in pixels.
+ void ApplyIntrinsics(double normalized_x,
+ double normalized_y,
+ double *image_x,
+ double *image_y) const;
+
+ // Invert camera intrinsics on the image point to get normalized coordinates.
+ //
+ // This reverses the effect of lens distortion on a point which is in image
+ // coordinates to get normalized camera coordinates.
+ void InvertIntrinsics(double image_x,
+ double image_y,
+ double *normalized_x,
+ double *normalized_y) const;
+
+ private:
+ // Double-parameter division distortion model.
+ double parameters_[NUM_PARAMETERS];
+};
+
+/// A human-readable representation of the camera intrinsic parameters.
+std::ostream& operator <<(std::ostream &os,
+ const CameraIntrinsics &intrinsics);
+
+} // namespace libmv
+
+// Include implementation of all templated methods here,
+// so they're visible to the compiler.
+#include "libmv/simple_pipeline/camera_intrinsics_impl.h"
+
+#endif // LIBMV_SIMPLE_PIPELINE_CAMERA_INTRINSICS_H_
diff --git a/intern/libmv/libmv/simple_pipeline/camera_intrinsics_impl.h b/intern/libmv/libmv/simple_pipeline/camera_intrinsics_impl.h
new file mode 100644
index 00000000000..97abee7ab01
--- /dev/null
+++ b/intern/libmv/libmv/simple_pipeline/camera_intrinsics_impl.h
@@ -0,0 +1,192 @@
+// Copyright (c) 2014 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+namespace libmv {
+
+namespace {
+
+// FIXME: C++ templates limitations makes thing complicated,
+// but maybe there is a simpler method.
+struct ApplyIntrinsicsFunction {
+ ApplyIntrinsicsFunction(const CameraIntrinsics &intrinsics,
+ double x,
+ double y,
+ double *warp_x,
+ double *warp_y) {
+ double normalized_x, normalized_y;
+ intrinsics.ImageSpaceToNormalized(x, y, &normalized_x, &normalized_y);
+ intrinsics.ApplyIntrinsics(normalized_x, normalized_y, warp_x, warp_y);
+ }
+};
+
+struct InvertIntrinsicsFunction {
+ InvertIntrinsicsFunction(const CameraIntrinsics &intrinsics,
+ double x,
+ double y,
+ double *warp_x,
+ double *warp_y) {
+ double normalized_x, normalized_y;
+ intrinsics.InvertIntrinsics(x, y, &normalized_x, &normalized_y);
+ intrinsics.NormalizedToImageSpace(normalized_x, normalized_y, warp_x, warp_y);
+ }
+};
+
+} // namespace
+
+namespace internal {
+
+// TODO(MatthiasF): downsample lookup
+template<typename WarpFunction>
+void LookupWarpGrid::Compute(const CameraIntrinsics &intrinsics,
+ int width,
+ int height,
+ double overscan) {
+ double w = (double) width / (1.0 + overscan);
+ double h = (double) height / (1.0 + overscan);
+ double aspx = (double) w / intrinsics.image_width();
+ double aspy = (double) h / intrinsics.image_height();
+#if defined(_OPENMP)
+# pragma omp parallel for schedule(dynamic) num_threads(threads_) \
+ if (threads_ > 1 && height > 100)
+#endif
+ for (int y = 0; y < height; y++) {
+ for (int x = 0; x < width; x++) {
+ double src_x = (x - 0.5 * overscan * w) / aspx,
+ src_y = (y - 0.5 * overscan * h) / aspy;
+ double warp_x, warp_y;
+ WarpFunction(intrinsics, src_x, src_y, &warp_x, &warp_y);
+ warp_x = warp_x * aspx + 0.5 * overscan * w;
+ warp_y = warp_y * aspy + 0.5 * overscan * h;
+ int ix = int(warp_x), iy = int(warp_y);
+ int fx = round((warp_x - ix) * 256), fy = round((warp_y - iy) * 256);
+ if (fx == 256) { fx = 0; ix++; } // NOLINT
+ if (fy == 256) { fy = 0; iy++; } // NOLINT
+ // Use nearest border pixel
+ if (ix < 0) { ix = 0, fx = 0; } // NOLINT
+ if (iy < 0) { iy = 0, fy = 0; } // NOLINT
+ if (ix >= width - 2) ix = width - 2;
+ if (iy >= height - 2) iy = height - 2;
+
+ Offset offset = { (short) (ix - x),
+ (short) (iy - y),
+ (unsigned char) fx,
+ (unsigned char) fy };
+ offset_[y * width + x] = offset;
+ }
+ }
+}
+
+template<typename WarpFunction>
+void LookupWarpGrid::Update(const CameraIntrinsics &intrinsics,
+ int width,
+ int height,
+ double overscan) {
+ if (width_ != width ||
+ height_ != height ||
+ overscan_ != overscan) {
+ Reset();
+ }
+
+ if (offset_ == NULL) {
+ offset_ = new Offset[width * height];
+ Compute<WarpFunction>(intrinsics,
+ width,
+ height,
+ overscan);
+ }
+
+ width_ = width;
+ height_ = height;
+ overscan_ = overscan;
+}
+
+// TODO(MatthiasF): cubic B-Spline image sampling, bilinear lookup
+template<typename PixelType>
+void LookupWarpGrid::Apply(const PixelType *input_buffer,
+ int width,
+ int height,
+ int channels,
+ PixelType *output_buffer) {
+#if defined(_OPENMP)
+# pragma omp parallel for schedule(dynamic) num_threads(threads_) \
+ if (threads_ > 1 && height > 100)
+#endif
+ for (int y = 0; y < height; y++) {
+ for (int x = 0; x < width; x++) {
+ Offset offset = offset_[y * width + x];
+ const int pixel_index = ((y + offset.iy) * width +
+ (x + offset.ix)) * channels;
+ const PixelType *s = &input_buffer[pixel_index];
+ for (int i = 0; i < channels; i++) {
+ output_buffer[(y * width + x) * channels + i] =
+ ((s[i] * (256 - offset.fx) +
+ s[channels + i] * offset.fx) * (256 - offset.fy) +
+ (s[width * channels + i] * (256 - offset.fx) +
+ s[width * channels + channels + i] * offset.fx) * offset.fy)
+ / (256 * 256);
+ }
+ }
+ }
+}
+
+} // namespace internal
+
+template<typename PixelType>
+void CameraIntrinsics::DistortBuffer(const PixelType *input_buffer,
+ int width,
+ int height,
+ double overscan,
+ int channels,
+ PixelType *output_buffer) {
+ assert(channels >= 1);
+ assert(channels <= 4);
+ distort_.Update<InvertIntrinsicsFunction>(*this,
+ width,
+ height,
+ overscan);
+ distort_.Apply<PixelType>(input_buffer,
+ width,
+ height,
+ channels,
+ output_buffer);
+}
+
+template<typename PixelType>
+void CameraIntrinsics::UndistortBuffer(const PixelType *input_buffer,
+ int width,
+ int height,
+ double overscan,
+ int channels,
+ PixelType *output_buffer) {
+ assert(channels >= 1);
+ assert(channels <= 4);
+ undistort_.Update<ApplyIntrinsicsFunction>(*this,
+ width,
+ height,
+ overscan);
+
+ undistort_.Apply<PixelType>(input_buffer,
+ width,
+ height,
+ channels,
+ output_buffer);
+}
+
+} // namespace libmv
diff --git a/intern/libmv/libmv/simple_pipeline/camera_intrinsics_test.cc b/intern/libmv/libmv/simple_pipeline/camera_intrinsics_test.cc
new file mode 100644
index 00000000000..96d35a29ef8
--- /dev/null
+++ b/intern/libmv/libmv/simple_pipeline/camera_intrinsics_test.cc
@@ -0,0 +1,239 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/simple_pipeline/camera_intrinsics.h"
+
+#include <iostream>
+
+#include "testing/testing.h"
+#include "libmv/image/image.h"
+#include "libmv/image/image_drawing.h"
+#include "libmv/logging/logging.h"
+
+namespace libmv {
+
+TEST(PolynomialCameraIntrinsics2, ApplyOnFocalCenter) {
+ PolynomialCameraIntrinsics intrinsics;
+ intrinsics.SetFocalLength(1300.0, 1300.0);
+ intrinsics.SetPrincipalPoint(640.0, 540.0);
+ intrinsics.SetRadialDistortion(-0.2, -0.1, -0.05);
+
+ double distorted_x, distorted_y;
+ intrinsics.ApplyIntrinsics(0.0, 0.0, &distorted_x, &distorted_y);
+
+ EXPECT_NEAR(640.0, distorted_x, 1e-8);
+ EXPECT_NEAR(540.0, distorted_y, 1e-8);
+}
+
+TEST(PolynomialCameraIntrinsics, InvertOnFocalCenter) {
+ PolynomialCameraIntrinsics intrinsics;
+ intrinsics.SetFocalLength(1300.0, 1300.0);
+ intrinsics.SetPrincipalPoint(640.0, 540.0);
+ intrinsics.SetRadialDistortion(-0.2, -0.1, -0.05);
+
+ double normalized_x, normalized_y;
+ intrinsics.InvertIntrinsics(640.0, 540.0, &normalized_x, &normalized_y);
+
+ EXPECT_NEAR(0.0, normalized_x, 1e-8);
+ EXPECT_NEAR(0.0, normalized_y, 1e-8);
+}
+
+TEST(PolynomialCameraIntrinsics, ApplyIntrinsics) {
+ const int N = 5;
+
+ double expected[N][N][2] = {
+ { {75.312500, -24.687500}, {338.982239, -62.035522},
+ {640.000000, -72.929688}, {941.017761, -62.035522},
+ {1204.687500, -24.687500}},
+
+ { {37.964478, 238.982239}, {323.664551, 223.664551},
+ {640.000000, 219.193420}, {956.335449, 223.664551},
+ {1242.035522, 238.982239}},
+
+ { {27.070312, 540.000000}, {319.193420, 540.000000},
+ {640.000000, 540.000000}, {960.806580, 540.000000},
+ {1252.929688, 540.000000}},
+
+ { {37.964478, 841.017761}, {323.664551, 856.335449},
+ {640.000000, 860.806580}, {956.335449, 856.335449},
+ {1242.035522, 841.017761}},
+
+ { {75.312500, 1104.687500}, {338.982239, 1142.035522},
+ {640.000000, 1152.929688}, {941.017761, 1142.035522},
+ {1204.687500, 1104.687500}}
+ };
+
+ PolynomialCameraIntrinsics intrinsics;
+ intrinsics.SetFocalLength(1300.0, 1300.0);
+ intrinsics.SetPrincipalPoint(640.0, 540.0);
+ intrinsics.SetRadialDistortion(-0.2, -0.1, -0.05);
+
+ double step = 1.0 / (N - 1);
+
+ for (int i = 0; i < N; i++) {
+ for (int j = 0; j < N; j++) {
+ double normalized_x = j * step - 0.5,
+ normalized_y = i * step - 0.5;
+
+ double distorted_x, distorted_y;
+ intrinsics.ApplyIntrinsics(normalized_x, normalized_y,
+ &distorted_x, &distorted_y);
+
+ EXPECT_NEAR(expected[i][j][0], distorted_x, 1e-6);
+ EXPECT_NEAR(expected[i][j][1], distorted_y, 1e-6);
+ }
+ }
+}
+
+TEST(PolynomialCameraIntrinsics, InvertIntrinsics) {
+ const int N = 5;
+
+ double expected[N][N][2] = {
+ { {-0.524482, -0.437069}, {-0.226237, -0.403994},
+ { 0.031876, -0.398446}, { 0.293917, -0.408218},
+ { 0.632438, -0.465028}},
+
+ { {-0.493496, -0.189173}, {-0.219052, -0.179936},
+ { 0.030975, -0.178107}, { 0.283742, -0.181280},
+ { 0.574557, -0.194335}},
+
+ { {-0.488013, 0.032534}, {-0.217537, 0.031077},
+ { 0.030781, 0.030781}, { 0.281635, 0.031293},
+ { 0.566344, 0.033314}},
+
+ { {-0.498696, 0.257660}, {-0.220424, 0.244041},
+ { 0.031150, 0.241409}, { 0.285660, 0.245985},
+ { 0.582670, 0.265629}},
+
+ { {-0.550617, 0.532263}, {-0.230399, 0.477255},
+ { 0.032380, 0.469510}, { 0.299986, 0.483311},
+ { 0.684740, 0.584043}}
+ };
+
+ PolynomialCameraIntrinsics intrinsics;
+ intrinsics.SetFocalLength(1300.0, 1300.0);
+ intrinsics.SetPrincipalPoint(600.0, 500.0);
+ intrinsics.SetRadialDistortion(-0.2, -0.1, -0.05);
+
+ double step_x = 1280.0 / (N - 1),
+ step_y = 1080.0 / (N - 1);
+
+ for (int i = 0; i < N; i++) {
+ for (int j = 0; j < N; j++) {
+ double distorted_x = j * step_x,
+ distorted_y = i * step_y;
+
+ double normalized_x, normalized_y;
+ intrinsics.InvertIntrinsics(distorted_x, distorted_y,
+ &normalized_x, &normalized_y);
+
+ EXPECT_NEAR(expected[i][j][0], normalized_x, 1e-6);
+ EXPECT_NEAR(expected[i][j][1], normalized_y, 1e-6);
+ }
+ }
+}
+
+TEST(PolynomialCameraIntrinsics, ApplyIsInvertibleSimple) {
+ PolynomialCameraIntrinsics intrinsics;
+ intrinsics.SetFocalLength(1300.0, 1300.0);
+ intrinsics.SetPrincipalPoint(640.0, 540.0);
+ intrinsics.SetRadialDistortion(-0.2, -0.1, -0.05);
+
+ // Scan over image coordinates, invert the intrinsics, then re-apply them to
+ // make sure the cycle gets back where it started.
+ for (double y = 0; y < 1000; y += 100) {
+ for (double x = 0; x < 1000; x += 100) {
+ double normalized_x, normalized_y;
+ intrinsics.InvertIntrinsics(x, y, &normalized_x, &normalized_y);
+
+ double xp, yp;
+ intrinsics.ApplyIntrinsics(normalized_x, normalized_y, &xp, &yp);
+
+ EXPECT_NEAR(x, xp, 1e-8) << "y: " << y;
+ EXPECT_NEAR(y, yp, 1e-8) << "x: " << x;
+ LG << "Error x: " << (x - xp);
+ LG << "Error y: " << (y - yp);
+ }
+ }
+}
+
+TEST(PolynomialCameraIntrinsics, IdentityDistortBuffer) {
+ const int w = 101, h = 101;
+ FloatImage image(h, w);
+ image.Fill(0);
+
+ DrawLine(0.0, h / 2.0, w - 1, h / 2.0, 1.0, &image);
+ DrawLine(0.0, h / 4.0, w - 1, h / 4.0, 1.0, &image);
+ DrawLine(0.0, h / 4.0 * 3.0, w - 1.0, h / 4.0 * 3.0, 1.0, &image);
+ DrawLine(w / 2.0, 0.0, w / 2.0, h - 1.0, 1.0, &image);
+ DrawLine(w / 4.0, 0.0, w / 4.0, h - 1.0, 1.0, &image);
+ DrawLine(w / 4.0 * 3.0, 0.0, w / 4.0 * 3.0, h - 1.0, 1.0, &image);
+
+ PolynomialCameraIntrinsics intrinsics;
+ FloatImage distorted_image(h, w);
+ intrinsics.SetImageSize(w, h);
+ intrinsics.SetFocalLength(10.0, 10.0);
+ intrinsics.SetPrincipalPoint((double) w / 2.0, (double) h / 2.0);
+ intrinsics.SetRadialDistortion(0.0, 0.0, 0.0);
+ intrinsics.DistortBuffer(image.Data(),
+ image.Width(), image.Height(),
+ 0.0,
+ image.Depth(),
+ distorted_image.Data());
+
+ for (int x = 0; x < image.Width(); ++x) {
+ for (int y = 0; y < image.Height(); ++y) {
+ EXPECT_EQ(image(y, x), distorted_image(y, x));
+ }
+ }
+}
+
+TEST(PolynomialCameraIntrinsics, IdentityUndistortBuffer) {
+ const int w = 101, h = 101;
+ FloatImage image(h, w);
+ image.Fill(0);
+
+ DrawLine(0.0, h / 2.0, w - 1, h / 2.0, 1.0, &image);
+ DrawLine(0.0, h / 4.0, w - 1, h / 4.0, 1.0, &image);
+ DrawLine(0.0, h / 4.0 * 3.0, w - 1.0, h / 4.0 * 3.0, 1.0, &image);
+ DrawLine(w / 2.0, 0.0, w / 2.0, h - 1.0, 1.0, &image);
+ DrawLine(w / 4.0, 0.0, w / 4.0, h - 1.0, 1.0, &image);
+ DrawLine(w / 4.0 * 3.0, 0.0, w / 4.0 * 3.0, h - 1.0, 1.0, &image);
+
+ PolynomialCameraIntrinsics intrinsics;
+ FloatImage distorted_image(h, w);
+ intrinsics.SetImageSize(w, h);
+ intrinsics.SetFocalLength(10.0, 10.0);
+ intrinsics.SetPrincipalPoint((double) w / 2.0, (double) h / 2.0);
+ intrinsics.SetRadialDistortion(0.0, 0.0, 0.0);
+ intrinsics.UndistortBuffer(image.Data(),
+ image.Width(), image.Height(),
+ 0.0,
+ image.Depth(),
+ distorted_image.Data());
+
+ for (int x = 0; x < image.Width(); ++x) {
+ for (int y = 0; y < image.Height(); ++y) {
+ EXPECT_EQ(image(y, x), distorted_image(y, x));
+ }
+ }
+}
+
+} // namespace libmv
diff --git a/intern/libmv/libmv/simple_pipeline/detect.cc b/intern/libmv/libmv/simple_pipeline/detect.cc
new file mode 100644
index 00000000000..46599a4c49e
--- /dev/null
+++ b/intern/libmv/libmv/simple_pipeline/detect.cc
@@ -0,0 +1,361 @@
+/****************************************************************************
+**
+** Copyright (c) 2011 libmv authors.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a copy
+** of this software and associated documentation files (the "Software"), to
+** deal in the Software without restriction, including without limitation the
+** rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+** sell copies of the Software, and to permit persons to whom the Software is
+** furnished to do so, subject to the following conditions:
+**
+** The above copyright notice and this permission notice shall be included in
+** all copies or substantial portions of the Software.
+**
+** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+** FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+** IN THE SOFTWARE.
+**
+****************************************************************************/
+
+#include <stdlib.h>
+#include <memory.h>
+#include <queue>
+
+#include "libmv/base/scoped_ptr.h"
+#include "libmv/image/array_nd.h"
+#include "libmv/image/image_converter.h"
+#include "libmv/image/convolve.h"
+#include "libmv/logging/logging.h"
+#include "libmv/simple_pipeline/detect.h"
+
+#ifndef LIBMV_NO_FAST_DETECTOR
+# include <third_party/fast/fast.h>
+#endif
+
+#ifdef __SSE2__
+# include <emmintrin.h>
+#endif
+
+namespace libmv {
+
+namespace {
+
+// Default value for FAST minimal trackness in the DetectOptions structure.
+// TODO(sergey): Think of a better default value here.
+int kDefaultFastMinTrackness = 128;
+
+// Default value for Harris threshold in the DetectOptions structure.
+// TODO(sergey): Think of a better default value here.
+double kDefaultHarrisThreshold = 1e-5;
+
+class FeatureComparison {
+ public:
+ bool operator() (const Feature &left, const Feature &right) const {
+ return right.score > left.score;
+ }
+};
+
+// Filter the features so there are no features closer than
+// minimal distance to each other.
+// This is a naive implementation with O(n^2) asymptotic.
+void FilterFeaturesByDistance(const vector<Feature> &all_features,
+ int min_distance,
+ vector<Feature> *detected_features) {
+ const int min_distance_squared = min_distance * min_distance;
+
+ // Use priority queue to sort the features by their score.
+ //
+ // Do this on copy of the input features to prevent possible
+ // distortion in callee function behavior.
+ std::priority_queue<Feature,
+ std::vector<Feature>,
+ FeatureComparison> priority_features;
+
+ for (int i = 0; i < all_features.size(); i++) {
+ priority_features.push(all_features.at(i));
+ }
+
+ while (!priority_features.empty()) {
+ bool ok = true;
+ Feature a = priority_features.top();
+
+ for (int i = 0; i < detected_features->size(); i++) {
+ Feature &b = detected_features->at(i);
+ if (Square(a.x - b.x) + Square(a.y - b.y) < min_distance_squared) {
+ ok = false;
+ break;
+ }
+ }
+
+ if (ok) {
+ detected_features->push_back(a);
+ }
+
+ priority_features.pop();
+ }
+}
+
+void DetectFAST(const FloatImage &grayscale_image,
+ const DetectOptions &options,
+ vector<Feature> *detected_features) {
+#ifndef LIBMV_NO_FAST_DETECTOR
+ const int min_distance = options.min_distance;
+ const int min_trackness = options.fast_min_trackness;
+ const int margin = options.margin;
+ const int width = grayscale_image.Width() - 2 * margin;
+ const int height = grayscale_image.Width() - 2 * margin;
+ const int stride = grayscale_image.Width();
+
+ scoped_array<unsigned char> byte_image(FloatImageToUCharArray(grayscale_image));
+ const int byte_image_offset = margin * stride + margin;
+
+ // TODO(MatthiasF): Support targetting a feature count (binary search trackness)
+ int num_features;
+ xy *all = fast9_detect(byte_image.get() + byte_image_offset,
+ width,
+ height,
+ stride,
+ min_trackness,
+ &num_features);
+ if (num_features == 0) {
+ free(all);
+ return;
+ }
+ int *scores = fast9_score(byte_image.get() + byte_image_offset,
+ stride,
+ all,
+ num_features,
+ min_trackness);
+ // TODO(MatthiasF): merge with close feature suppression
+ xy *nonmax = nonmax_suppression(all, scores, num_features, &num_features);
+ free(all);
+ // Remove too close features
+ // TODO(MatthiasF): A resolution independent parameter would be better than
+ // distance e.g. a coefficient going from 0 (no minimal distance) to 1
+ // (optimal circle packing)
+ // FIXME(MatthiasF): this method will not necessarily give all maximum markers
+ if (num_features) {
+ vector<Feature> all_features;
+ for (int i = 0; i < num_features; ++i) {
+ all_features.push_back(Feature((float)nonmax[i].x + margin,
+ (float)nonmax[i].y + margin,
+ (float)scores[i],
+ 9.0));
+ }
+ FilterFeaturesByDistance(all_features, min_distance, detected_features);
+ }
+ free(scores);
+ free(nonmax);
+#else
+ (void) grayscale_image; // Ignored.
+ (void) options; // Ignored.
+ (void) detected_features; // Ignored.
+ LOG(FATAL) << "FAST detector is disabled in this build.";
+#endif
+}
+
+#ifdef __SSE2__
+static unsigned int SAD(const ubyte* imageA, const ubyte* imageB,
+ int strideA, int strideB) {
+ __m128i a = _mm_setzero_si128();
+ for (int i = 0; i < 16; i++) {
+ a = _mm_adds_epu16(a,
+ _mm_sad_epu8(_mm_loadu_si128((__m128i*)(imageA+i*strideA)),
+ _mm_loadu_si128((__m128i*)(imageB+i*strideB))));
+ }
+ return _mm_extract_epi16(a, 0) + _mm_extract_epi16(a, 4);
+}
+#else
+static unsigned int SAD(const ubyte* imageA, const ubyte* imageB,
+ int strideA, int strideB) {
+ unsigned int sad = 0;
+ for (int i = 0; i < 16; i++) {
+ for (int j = 0; j < 16; j++) {
+ sad += abs((int)imageA[i*strideA+j] - imageB[i*strideB+j]);
+ }
+ }
+ return sad;
+}
+#endif
+
+void DetectMORAVEC(const FloatImage &grayscale_image,
+ const DetectOptions &options,
+ vector<Feature> *detected_features) {
+ const int distance = options.min_distance;
+ const int margin = options.margin;
+ const unsigned char *pattern = options.moravec_pattern;
+ const int count = options.moravec_max_count;
+ const int width = grayscale_image.Width() - 2 * margin;
+ const int height = grayscale_image.Width() - 2 * margin;
+ const int stride = grayscale_image.Width();
+
+ scoped_array<unsigned char> byte_image(FloatImageToUCharArray(grayscale_image));
+
+ unsigned short histogram[256];
+ memset(histogram, 0, sizeof(histogram));
+ scoped_array<ubyte> scores(new ubyte[width*height]);
+ memset(scores.get(), 0, width*height);
+ const int r = 1; // radius for self similarity comparison
+ for (int y = distance; y < height-distance; y++) {
+ for (int x = distance; x < width-distance; x++) {
+ const ubyte* s = &byte_image[y*stride+x];
+ int score = // low self-similarity with overlapping patterns
+ // OPTI: load pattern once
+ SAD(s, s-r*stride-r, stride, stride)+SAD(s, s-r*stride, stride, stride)+SAD(s, s-r*stride+r, stride, stride)+
+ SAD(s, s -r, stride, stride)+ SAD(s, s +r, stride, stride)+
+ SAD(s, s+r*stride-r, stride, stride)+SAD(s, s+r*stride, stride, stride)+SAD(s, s+r*stride+r, stride, stride);
+ score /= 256; // normalize
+ if (pattern) // find only features similar to pattern
+ score -= SAD(s, pattern, stride, 16);
+ if (score <= 16) continue; // filter very self-similar features
+ score -= 16; // translate to score/histogram values
+ if (score>255) score=255; // clip
+ ubyte* c = &scores[y*width+x];
+ for (int i = -distance; i < 0; i++) {
+ for (int j = -distance; j < distance; j++) {
+ int s = c[i*width+j];
+ if (s == 0) continue;
+ if (s >= score) goto nonmax;
+ c[i*width+j] = 0;
+ histogram[s]--;
+ }
+ }
+ for (int i = 0, j = -distance; j < 0; j++) {
+ int s = c[i*width+j];
+ if (s == 0) continue;
+ if (s >= score) goto nonmax;
+ c[i*width+j] = 0;
+ histogram[s]--;
+ }
+ c[0] = score, histogram[score]++;
+ nonmax:
+ { } // Do nothing.
+ }
+ }
+ int min = 255, total = 0;
+ for (; min > 0; min--) {
+ int h = histogram[min];
+ if (total + h > count) {
+ break;
+ }
+ total += h;
+ }
+ for (int y = 16; y < height - 16; y++) {
+ for (int x = 16; x < width - 16; x++) {
+ int s = scores[y * width + x];
+ if (s > min) {
+ // Currently SAD works with the patterns of 16x16 pixels.
+ //
+ // Score calculation above uses top left corner of the
+ // patch as the origin, here we need to convert this value
+ // to a pattrn center by adding 8 pixels.
+ detected_features->push_back(Feature((float) x + 8.0f,
+ (float) y + 8.0f,
+ (float) s,
+ 16.0f));
+ }
+ }
+ }
+}
+
+void DetectHarris(const FloatImage &grayscale_image,
+ const DetectOptions &options,
+ vector<Feature> *detected_features) {
+ const double alpha = 0.06;
+ const double sigma = 0.9;
+
+ const int min_distance = options.min_distance;
+ const int margin = options.margin;
+ const double threshold = options.harris_threshold;
+
+ FloatImage gradient_x, gradient_y;
+ ImageDerivatives(grayscale_image, sigma, &gradient_x, &gradient_y);
+
+ FloatImage gradient_xx, gradient_yy, gradient_xy;
+ MultiplyElements(gradient_x, gradient_x, &gradient_xx);
+ MultiplyElements(gradient_y, gradient_y, &gradient_yy);
+ MultiplyElements(gradient_x, gradient_y, &gradient_xy);
+
+ FloatImage gradient_xx_blurred,
+ gradient_yy_blurred,
+ gradient_xy_blurred;
+ ConvolveGaussian(gradient_xx, sigma, &gradient_xx_blurred);
+ ConvolveGaussian(gradient_yy, sigma, &gradient_yy_blurred);
+ ConvolveGaussian(gradient_xy, sigma, &gradient_xy_blurred);
+
+ vector<Feature> all_features;
+ for (int y = margin; y < gradient_xx_blurred.Height() - margin; ++y) {
+ for (int x = margin; x < gradient_xx_blurred.Width() - margin; ++x) {
+ // Construct matrix
+ //
+ // A = [ Ix^2 Ix*Iy ]
+ // [ Ix*Iy Iy^2 ]
+ Mat2 A;
+ A(0, 0) = gradient_xx_blurred(y, x);
+ A(1, 1) = gradient_yy_blurred(y, x);
+ A(0, 1) = A(1, 0) = gradient_xy_blurred(y, x);
+
+ double detA = A.determinant();
+ double traceA = A.trace();
+ double harris_function = detA - alpha * traceA * traceA;
+ if (harris_function > threshold) {
+ all_features.push_back(Feature((float) x,
+ (float) y,
+ (float) harris_function,
+ 5.0f));
+ }
+ }
+ }
+
+ FilterFeaturesByDistance(all_features, min_distance, detected_features);
+}
+
+} // namespace
+
+DetectOptions::DetectOptions()
+ : type(DetectOptions::HARRIS),
+ margin(0),
+ min_distance(120),
+ fast_min_trackness(kDefaultFastMinTrackness),
+ moravec_max_count(0),
+ moravec_pattern(NULL),
+ harris_threshold(kDefaultHarrisThreshold) {}
+
+void Detect(const FloatImage &image,
+ const DetectOptions &options,
+ vector<Feature> *detected_features) {
+ // Currently all the detectors requires image to be grayscale.
+ // Do it here to avoid code duplication.
+ FloatImage grayscale_image;
+ if (image.Depth() != 1) {
+ Rgb2Gray(image, &grayscale_image);
+ } else {
+ // TODO(sergey): Find a way to avoid such image duplication/
+ grayscale_image = image;
+ }
+
+ if (options.type == DetectOptions::FAST) {
+ DetectFAST(grayscale_image, options, detected_features);
+ } else if (options.type == DetectOptions::MORAVEC) {
+ DetectMORAVEC(grayscale_image, options, detected_features);
+ } else if (options.type == DetectOptions::HARRIS) {
+ DetectHarris(grayscale_image, options, detected_features);
+ } else {
+ LOG(FATAL) << "Unknown detector has been passed to featur detection";
+ }
+}
+
+std::ostream& operator <<(std::ostream &os,
+ const Feature &feature) {
+ os << "x: " << feature.x << ", y: " << feature.y;
+ os << ", score: " << feature.score;
+ os << ", size: " << feature.size;
+ return os;
+}
+
+} // namespace libmv
diff --git a/intern/libmv/libmv/simple_pipeline/detect.h b/intern/libmv/libmv/simple_pipeline/detect.h
new file mode 100644
index 00000000000..1035287bcf2
--- /dev/null
+++ b/intern/libmv/libmv/simple_pipeline/detect.h
@@ -0,0 +1,113 @@
+/****************************************************************************
+**
+** Copyright (c) 2011 libmv authors.
+**
+** Permission is hereby granted, free of charge, to any person obtaining a copy
+** of this software and associated documentation files (the "Software"), to
+** deal in the Software without restriction, including without limitation the
+** rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+** sell copies of the Software, and to permit persons to whom the Software is
+** furnished to do so, subject to the following conditions:
+**
+** The above copyright notice and this permission notice shall be included in
+** all copies or substantial portions of the Software.
+**
+** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+** IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+** FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+** AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+** LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+** FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+** IN THE SOFTWARE.
+**
+****************************************************************************/
+
+#ifndef LIBMV_SIMPLE_PIPELINE_DETECT_H_
+#define LIBMV_SIMPLE_PIPELINE_DETECT_H_
+
+#include <iostream>
+#include <vector>
+
+#include "libmv/base/vector.h"
+#include "libmv/image/image.h"
+
+namespace libmv {
+
+typedef unsigned char ubyte;
+
+// A Feature is the 2D location of a detected feature in an image.
+struct Feature {
+ Feature(float x, float y) : x(x), y(y) {}
+ Feature(float x, float y, float score, float size)
+ : x(x), y(y), score(score), size(size) {}
+
+ // Position of the feature in pixels from top-left corner.
+ // Note: Libmv detector might eventually support subpixel precision.
+ float x, y;
+
+ // An estimate of how well the feature will be tracked.
+ //
+ // Absolute values totally depends on particular detector type
+ // used for detection. It's only guaranteed that features with
+ // higher score from the same Detect() result will be tracked better.
+ float score;
+
+ // An approximate feature size in pixels.
+ //
+ // If the feature is approximately a 5x5 square region, then size will be 5.
+ // It can be used as an initial pattern size to track the feature.
+ float size;
+};
+
+struct DetectOptions {
+ DetectOptions();
+
+ // TODO(sergey): Write descriptions to each of algorithms.
+ enum DetectorType {
+ FAST,
+ MORAVEC,
+ HARRIS,
+ };
+ DetectorType type;
+
+ // Margin in pixels from the image boundary.
+ // No features will be detected within the margin.
+ int margin;
+
+ // Minimal distance between detected features.
+ int min_distance;
+
+ // Minimum score to add a feature. Only used by FAST detector.
+ //
+ // TODO(sergey): Write more detailed documentation about which
+ // units this value is measured in and so on.
+ int fast_min_trackness;
+
+ // Maximum count to detect. Only used by MORAVEC detector.
+ int moravec_max_count;
+
+ // Find only features similar to this pattern. Only used by MORAVEC detector.
+ //
+ // This is an image patch denoted in byte array with dimensions of 16px by 16px
+ // used to filter features by similarity to this patch.
+ unsigned char *moravec_pattern;
+
+ // Threshold value of the Harris function to add new featrue
+ // to the result.
+ double harris_threshold;
+};
+
+// Detect features on a given image using given detector options.
+//
+// Image could have 1-4 channels, it'll be converted to a grayscale
+// by the detector function if needed.
+void Detect(const FloatImage &image,
+ const DetectOptions &options,
+ vector<Feature> *detected_features);
+
+std::ostream& operator <<(std::ostream &os,
+ const Feature &feature);
+
+} // namespace libmv
+
+#endif
diff --git a/intern/libmv/libmv/simple_pipeline/detect_test.cc b/intern/libmv/libmv/simple_pipeline/detect_test.cc
new file mode 100644
index 00000000000..fe57e3d04a2
--- /dev/null
+++ b/intern/libmv/libmv/simple_pipeline/detect_test.cc
@@ -0,0 +1,230 @@
+// Copyright (c) 2014 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/simple_pipeline/detect.h"
+
+#include "testing/testing.h"
+#include "libmv/logging/logging.h"
+
+namespace libmv {
+
+namespace {
+
+void PreformSinglePointTest(const DetectOptions &options) {
+ // Prepare the image.
+ FloatImage image(15, 15);
+ image.fill(1.0);
+ image(7, 7) = 0.0;
+
+ // Run the detector.
+ vector<Feature> detected_features;
+ Detect(image, options, &detected_features);
+
+ // Check detected features matches our expectations.
+ EXPECT_EQ(1, detected_features.size());
+ if (detected_features.size() == 1) {
+ Feature &feature = detected_features[0];
+ EXPECT_EQ(7, feature.x);
+ EXPECT_EQ(7, feature.y);
+ }
+}
+
+void PreformCheckerBoardTest(const DetectOptions &options) {
+ // Prepare the image.
+ FloatImage image(30, 30);
+ for (int y = 0; y < image.Height(); ++y) {
+ for (int x = 0; x < image.Width(); ++x) {
+ image(y, x) = (x / 10 + y / 10) % 2 ? 1.0 : 0.0;
+ }
+ }
+
+ // Run the detector.
+ vector<Feature> detected_features;
+ Detect(image, options, &detected_features);
+
+ // Check detected features matches our expectations.
+
+ // We expect here only corners of a center square to be
+ // considered a feature points.
+ EXPECT_EQ(4, detected_features.size());
+
+ // We don't know which side of the corner detector will choose,
+ // so what we're checking here is that detected feature is from
+ // any side of the corner.
+ //
+ // This doesn't check whether there're multiple features which
+ // are placed on different sides of the same corner. The way we
+ // deal with this is requiring min_distance to be greater than 2px.
+ for (int i = 0; i < detected_features.size(); ++i) {
+ Feature &feature = detected_features[i];
+ int rounded_x = ((feature.x + 1) / 10) * 10,
+ rounded_y = ((feature.y + 1) / 10) * 10;
+ EXPECT_LE(1, std::abs(feature.x - rounded_x));
+ EXPECT_LE(1, std::abs(feature.y - rounded_y));
+ }
+}
+
+void CheckExpectedFeatures(const vector<Feature> &detected_features,
+ const vector<Feature> &expected_features) {
+ EXPECT_EQ(expected_features.size(), detected_features.size());
+
+ // That's unsafe to iterate over vectors when their lengths
+ // doesn't match. And it doesn't make any sense actually since
+ // the test will already be considered failed here.
+ if (expected_features.size() != detected_features.size()) {
+ return;
+ }
+
+ for (int i = 0; i < expected_features.size(); ++i) {
+ const Feature &extected_feature = expected_features[i];
+ bool found = false;
+ for (int j = 0; j < detected_features.size(); ++j) {
+ const Feature &detected_feature = detected_features[j];
+ if (extected_feature.x == detected_feature.x &&
+ extected_feature.y == detected_feature.y) {
+ found = true;
+ break;
+ }
+ }
+ EXPECT_TRUE(found);
+ }
+}
+
+void PreformSingleTriangleTest(const DetectOptions &options) {
+ // Prepare the image.
+ FloatImage image(15, 21);
+ image.fill(1.0);
+
+ int vertex_x = 10, vertex_y = 5;
+ for (int i = 0; i < 6; ++i) {
+ int current_x = vertex_x - i,
+ current_y = vertex_y + i;
+ for (int j = 0; j < i * 2 + 1; ++j, ++current_x) {
+ image(current_y, current_x) = 0.0;
+ }
+ }
+
+ // Run the detector.
+ vector<Feature> detected_features;
+ Detect(image, options, &detected_features);
+
+ // Check detected features matches our expectations.
+ vector<Feature> expected_features;
+ expected_features.push_back(Feature(6, 10));
+ expected_features.push_back(Feature(14, 10));
+ expected_features.push_back(Feature(10, 6));
+
+ CheckExpectedFeatures(detected_features, expected_features);
+}
+
+} // namespace
+
+#ifndef LIBMV_NO_FAST_DETECTOR
+TEST(Detect, FASTSinglePointTest) {
+ DetectOptions options;
+ options.type = DetectOptions::FAST;
+ options.min_distance = 0;
+ options.fast_min_trackness = 1;
+
+ PreformSinglePointTest(options);
+}
+#endif // LIBMV_NO_FAST_DETECTOR
+
+#if 0
+// TODO(sergey): FAST doesn't detect checker board corners, but should it?
+TEST(Detect, FASTCheckerBoardTest) {
+ DetectOptions options;
+ options.type = DetectOptions::FAST;
+ options.min_distance = 0;
+ options.fast_min_trackness = 1;
+
+ PreformCheckerBoardTest(options);
+}
+#endif
+
+#if 0
+// TODO(sergey): FAST doesn't detect triangle corners!
+TEST(Detect, FASTSingleTriangleTest) {
+ DetectOptions options;
+ options.type = DetectOptions::FAST;
+ options.margin = 3;
+ options.min_distance = 0;
+ options.fast_min_trackness = 2;
+
+ PreformSingleTriangleTest(options);
+}
+#endif
+
+#if 0
+// TODO(sergey): This doesn't actually detect single point,
+// but should it or it's expected that Moravec wouldn't consider
+// single point as feature?
+//
+// Uncomment this or remove as soon as we know answer for the
+// question.
+TEST(Detect, MoravecSinglePointTest) {
+ DetectOptions options;
+ options.type = DetectOptions::MORAVEC;
+ options.min_distance = 0;
+ options.moravec_max_count = 10;
+
+ PreformSinglePointTest(options);
+}
+
+// TODO(sergey): Moravec doesn't detect checker board corners, but should it?
+TEST(Detect, MoravecCheckerBoardTest) {
+ DetectOptions options;
+ options.type = DetectOptions::MORAVEC;
+ options.min_distance = 0;
+ options.moravec_max_count = 10;
+
+ PreformCheckerBoardTest(options);
+}
+#endif
+
+TEST(Detect, HarrisSinglePointTest) {
+ DetectOptions options;
+ options.type = DetectOptions::HARRIS;
+
+ // Set this to non-zero so image corners are not considered
+ // a feature points and avoid center point neighbors to be
+ // considered a features as well.
+ options.margin = 3;
+ options.min_distance = 3;
+
+ PreformSinglePointTest(options);
+}
+
+TEST(Detect, HarrisSingleTriangleTest) {
+ DetectOptions options;
+ options.type = DetectOptions::HARRIS;
+
+ options.margin = 3;
+ options.min_distance = 2;
+ options.harris_threshold = 1e-3;
+
+ PreformSingleTriangleTest(options);
+}
+
+// TODO(sergey): Add tests for margin option.
+
+// TODO(sergey): Add tests for min_distance option.
+
+} // namespace libmv
diff --git a/intern/libmv/libmv/simple_pipeline/distortion_models.cc b/intern/libmv/libmv/simple_pipeline/distortion_models.cc
new file mode 100644
index 00000000000..9b6dca2678a
--- /dev/null
+++ b/intern/libmv/libmv/simple_pipeline/distortion_models.cc
@@ -0,0 +1,197 @@
+// Copyright (c) 2014 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/simple_pipeline/distortion_models.h"
+#include "libmv/numeric/levenberg_marquardt.h"
+
+namespace libmv {
+
+namespace {
+
+struct InvertPolynomialIntrinsicsCostFunction {
+ public:
+ typedef Vec2 FMatrixType;
+ typedef Vec2 XMatrixType;
+
+ InvertPolynomialIntrinsicsCostFunction(const double focal_length_x,
+ const double focal_length_y,
+ const double principal_point_x,
+ const double principal_point_y,
+ const double k1,
+ const double k2,
+ const double k3,
+ const double p1,
+ const double p2,
+ const double image_x,
+ const double image_y)
+ : focal_length_x_(focal_length_x),
+ focal_length_y_(focal_length_y),
+ principal_point_x_(principal_point_x),
+ principal_point_y_(principal_point_y),
+ k1_(k1), k2_(k2), k3_(k3),
+ p1_(p1), p2_(p2),
+ x_(image_x), y_(image_y) {}
+
+ Vec2 operator()(const Vec2 &u) const {
+ double xx, yy;
+
+ ApplyPolynomialDistortionModel(focal_length_x_,
+ focal_length_y_,
+ principal_point_x_,
+ principal_point_y_,
+ k1_, k2_, k3_,
+ p1_, p2_,
+ u(0), u(1),
+ &xx, &yy);
+
+ Vec2 fx;
+ fx << (xx - x_), (yy - y_);
+ return fx;
+ }
+ double focal_length_x_;
+ double focal_length_y_;
+ double principal_point_x_;
+ double principal_point_y_;
+ double k1_, k2_, k3_;
+ double p1_, p2_;
+ double x_, y_;
+};
+
+struct InvertDivisionIntrinsicsCostFunction {
+ public:
+ typedef Vec2 FMatrixType;
+ typedef Vec2 XMatrixType;
+
+ InvertDivisionIntrinsicsCostFunction(const double focal_length_x,
+ const double focal_length_y,
+ const double principal_point_x,
+ const double principal_point_y,
+ const double k1,
+ const double k2,
+ const double image_x,
+ const double image_y)
+ : focal_length_x_(focal_length_x),
+ focal_length_y_(focal_length_y),
+ principal_point_x_(principal_point_x),
+ principal_point_y_(principal_point_y),
+ k1_(k1), k2_(k2),
+ x_(image_x), y_(image_y) {}
+
+ Vec2 operator()(const Vec2 &u) const {
+ double xx, yy;
+
+ ApplyDivisionDistortionModel(focal_length_x_,
+ focal_length_y_,
+ principal_point_x_,
+ principal_point_y_,
+ k1_, k2_,
+ u(0), u(1),
+ &xx, &yy);
+
+ Vec2 fx;
+ fx << (xx - x_), (yy - y_);
+ return fx;
+ }
+ double focal_length_x_;
+ double focal_length_y_;
+ double principal_point_x_;
+ double principal_point_y_;
+ double k1_, k2_;
+ double x_, y_;
+};
+
+} // namespace
+
+void InvertPolynomialDistortionModel(const double focal_length_x,
+ const double focal_length_y,
+ const double principal_point_x,
+ const double principal_point_y,
+ const double k1,
+ const double k2,
+ const double k3,
+ const double p1,
+ const double p2,
+ const double image_x,
+ const double image_y,
+ double *normalized_x,
+ double *normalized_y) {
+ // Compute the initial guess. For a camera with no distortion, this will also
+ // be the final answer; the LM iteration will terminate immediately.
+ Vec2 normalized;
+ normalized(0) = (image_x - principal_point_x) / focal_length_x;
+ normalized(1) = (image_y - principal_point_y) / focal_length_y;
+
+ typedef LevenbergMarquardt<InvertPolynomialIntrinsicsCostFunction> Solver;
+
+ InvertPolynomialIntrinsicsCostFunction intrinsics_cost(focal_length_x,
+ focal_length_y,
+ principal_point_x,
+ principal_point_y,
+ k1, k2, k3,
+ p1, p2,
+ image_x, image_y);
+ Solver::SolverParameters params;
+ Solver solver(intrinsics_cost);
+
+ /*Solver::Results results =*/ solver.minimize(params, &normalized);
+
+ // TODO(keir): Better error handling.
+
+ *normalized_x = normalized(0);
+ *normalized_y = normalized(1);
+}
+
+void InvertDivisionDistortionModel(const double focal_length_x,
+ const double focal_length_y,
+ const double principal_point_x,
+ const double principal_point_y,
+ const double k1,
+ const double k2,
+ const double image_x,
+ const double image_y,
+ double *normalized_x,
+ double *normalized_y) {
+ // Compute the initial guess. For a camera with no distortion, this will also
+ // be the final answer; the LM iteration will terminate immediately.
+ Vec2 normalized;
+ normalized(0) = (image_x - principal_point_x) / focal_length_x;
+ normalized(1) = (image_y - principal_point_y) / focal_length_y;
+
+ // TODO(sergey): Use Ceres minimizer instead.
+ typedef LevenbergMarquardt<InvertDivisionIntrinsicsCostFunction> Solver;
+
+ InvertDivisionIntrinsicsCostFunction intrinsics_cost(focal_length_x,
+ focal_length_y,
+ principal_point_x,
+ principal_point_y,
+ k1, k2,
+ image_x, image_y);
+ Solver::SolverParameters params;
+ Solver solver(intrinsics_cost);
+
+ /*Solver::Results results =*/ solver.minimize(params, &normalized);
+
+ // TODO(keir): Better error handling.
+
+ *normalized_x = normalized(0);
+ *normalized_y = normalized(1);
+}
+
+} // namespace libmv
diff --git a/intern/libmv/libmv/simple_pipeline/distortion_models.h b/intern/libmv/libmv/simple_pipeline/distortion_models.h
new file mode 100644
index 00000000000..4f8e2295a0e
--- /dev/null
+++ b/intern/libmv/libmv/simple_pipeline/distortion_models.h
@@ -0,0 +1,131 @@
+// Copyright (c) 2014 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_SIMPLE_PIPELINE_DISTORTION_MODELS_H_
+#define LIBMV_SIMPLE_PIPELINE_DISTORTION_MODELS_H_
+
+namespace libmv {
+
+enum DistortionModelType {
+ DISTORTION_MODEL_POLYNOMIAL,
+ DISTORTION_MODEL_DIVISION
+};
+
+// Invert camera intrinsics on the image point to get normalized coordinates.
+// This inverts the radial lens distortion to a point which is in image pixel
+// coordinates to get normalized coordinates.
+void InvertPolynomialDistortionModel(const double focal_length_x,
+ const double focal_length_y,
+ const double principal_point_x,
+ const double principal_point_y,
+ const double k1,
+ const double k2,
+ const double k3,
+ const double p1,
+ const double p2,
+ const double image_x,
+ const double image_y,
+ double *normalized_x,
+ double *normalized_y);
+
+// Apply camera intrinsics to the normalized point to get image coordinates.
+// This applies the radial lens distortion to a point which is in normalized
+// camera coordinates (i.e. the principal point is at (0, 0)) to get image
+// coordinates in pixels. Templated for use with autodifferentiation.
+template <typename T>
+inline void ApplyPolynomialDistortionModel(const T &focal_length_x,
+ const T &focal_length_y,
+ const T &principal_point_x,
+ const T &principal_point_y,
+ const T &k1,
+ const T &k2,
+ const T &k3,
+ const T &p1,
+ const T &p2,
+ const T &normalized_x,
+ const T &normalized_y,
+ T *image_x,
+ T *image_y) {
+ T x = normalized_x;
+ T y = normalized_y;
+
+ // Apply distortion to the normalized points to get (xd, yd).
+ T r2 = x*x + y*y;
+ T r4 = r2 * r2;
+ T r6 = r4 * r2;
+ T r_coeff = (T(1) + k1*r2 + k2*r4 + k3*r6);
+ T xd = x * r_coeff + T(2)*p1*x*y + p2*(r2 + T(2)*x*x);
+ T yd = y * r_coeff + T(2)*p2*x*y + p1*(r2 + T(2)*y*y);
+
+ // Apply focal length and principal point to get the final image coordinates.
+ *image_x = focal_length_x * xd + principal_point_x;
+ *image_y = focal_length_y * yd + principal_point_y;
+}
+
+// Invert camera intrinsics on the image point to get normalized coordinates.
+// This inverts the radial lens distortion to a point which is in image pixel
+// coordinates to get normalized coordinates.
+//
+// Uses division distortion model.
+void InvertDivisionDistortionModel(const double focal_length_x,
+ const double focal_length_y,
+ const double principal_point_x,
+ const double principal_point_y,
+ const double k1,
+ const double k2,
+ const double image_x,
+ const double image_y,
+ double *normalized_x,
+ double *normalized_y);
+
+// Apply camera intrinsics to the normalized point to get image coordinates.
+// This applies the radial lens distortion to a point which is in normalized
+// camera coordinates (i.e. the principal point is at (0, 0)) to get image
+// coordinates in pixels. Templated for use with autodifferentiation.
+//
+// Uses division distortion model.
+template <typename T>
+inline void ApplyDivisionDistortionModel(const T &focal_length_x,
+ const T &focal_length_y,
+ const T &principal_point_x,
+ const T &principal_point_y,
+ const T &k1,
+ const T &k2,
+ const T &normalized_x,
+ const T &normalized_y,
+ T *image_x,
+ T *image_y) {
+
+ T x = normalized_x;
+ T y = normalized_y;
+ T r2 = x*x + y*y;
+ T r4 = r2 * r2;
+
+ T xd = x / (T(1) + k1 * r2 + k2 * r4);
+ T yd = y / (T(1) + k1 * r2 + k2 * r4);
+
+ // Apply focal length and principal point to get the final image coordinates.
+ *image_x = focal_length_x * xd + principal_point_x;
+ *image_y = focal_length_y * yd + principal_point_y;
+}
+
+} // namespace libmv
+
+#endif // LIBMV_SIMPLE_PIPELINE_DISTORTION_MODELS_H_
diff --git a/intern/libmv/libmv/simple_pipeline/initialize_reconstruction.cc b/intern/libmv/libmv/simple_pipeline/initialize_reconstruction.cc
new file mode 100644
index 00000000000..7a086c375d5
--- /dev/null
+++ b/intern/libmv/libmv/simple_pipeline/initialize_reconstruction.cc
@@ -0,0 +1,195 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/simple_pipeline/initialize_reconstruction.h"
+
+#include "libmv/base/vector.h"
+#include "libmv/logging/logging.h"
+#include "libmv/multiview/fundamental.h"
+#include "libmv/multiview/projection.h"
+#include "libmv/numeric/levenberg_marquardt.h"
+#include "libmv/numeric/numeric.h"
+#include "libmv/simple_pipeline/reconstruction.h"
+#include "libmv/simple_pipeline/tracks.h"
+
+namespace libmv {
+namespace {
+
+void GetImagesInMarkers(const vector<Marker> &markers,
+ int *image1, int *image2) {
+ if (markers.size() < 2) {
+ return;
+ }
+ *image1 = markers[0].image;
+ for (int i = 1; i < markers.size(); ++i) {
+ if (markers[i].image != *image1) {
+ *image2 = markers[i].image;
+ return;
+ }
+ }
+ *image2 = -1;
+ LOG(FATAL) << "Only one image in the markers.";
+}
+
+} // namespace
+
+bool EuclideanReconstructTwoFrames(const vector<Marker> &markers,
+ EuclideanReconstruction *reconstruction) {
+ if (markers.size() < 16) {
+ LG << "Not enough markers to initialize from two frames: " << markers.size();
+ return false;
+ }
+
+ int image1, image2;
+ GetImagesInMarkers(markers, &image1, &image2);
+
+ Mat x1, x2;
+ CoordinatesForMarkersInImage(markers, image1, &x1);
+ CoordinatesForMarkersInImage(markers, image2, &x2);
+
+ Mat3 F;
+ NormalizedEightPointSolver(x1, x2, &F);
+
+ // The F matrix should be an E matrix, but squash it just to be sure.
+ Mat3 E;
+ FundamentalToEssential(F, &E);
+
+ // Recover motion between the two images. Since this function assumes a
+ // calibrated camera, use the identity for K.
+ Mat3 R;
+ Vec3 t;
+ Mat3 K = Mat3::Identity();
+ if (!MotionFromEssentialAndCorrespondence(E,
+ K, x1.col(0),
+ K, x2.col(0),
+ &R, &t)) {
+ LG << "Failed to compute R and t from E and K.";
+ return false;
+ }
+
+ // Image 1 gets the reference frame, image 2 gets the relative motion.
+ reconstruction->InsertCamera(image1, Mat3::Identity(), Vec3::Zero());
+ reconstruction->InsertCamera(image2, R, t);
+
+ LG << "From two frame reconstruction got:\nR:\n" << R
+ << "\nt:" << t.transpose();
+ return true;
+}
+
+namespace {
+
+Mat3 DecodeF(const Vec9 &encoded_F) {
+ // Decode F and force it to be rank 2.
+ Map<const Mat3> full_rank_F(encoded_F.data(), 3, 3);
+ Eigen::JacobiSVD<Mat3> svd(full_rank_F,
+ Eigen::ComputeFullU | Eigen::ComputeFullV);
+ Vec3 diagonal = svd.singularValues();
+ diagonal(2) = 0;
+ Mat3 F = svd.matrixU() * diagonal.asDiagonal() * svd.matrixV().transpose();
+ return F;
+}
+
+// This is the stupidest way to refine F known to mankind, since it requires
+// doing a full SVD of F at each iteration. This uses sampson error.
+struct FundamentalSampsonCostFunction {
+ public:
+ typedef Vec FMatrixType;
+ typedef Vec9 XMatrixType;
+
+ // Assumes markers are ordered by track.
+ explicit FundamentalSampsonCostFunction(const vector<Marker> &markers)
+ : markers(markers) {}
+
+ Vec operator()(const Vec9 &encoded_F) const {
+ // Decode F and force it to be rank 2.
+ Mat3 F = DecodeF(encoded_F);
+
+ Vec residuals(markers.size() / 2);
+ residuals.setZero();
+ for (int i = 0; i < markers.size() / 2; ++i) {
+ const Marker &marker1 = markers[2*i + 0];
+ const Marker &marker2 = markers[2*i + 1];
+ CHECK_EQ(marker1.track, marker2.track);
+ Vec2 x1(marker1.x, marker1.y);
+ Vec2 x2(marker2.x, marker2.y);
+
+ residuals[i] = SampsonDistance(F, x1, x2);
+ }
+ return residuals;
+ }
+ const vector<Marker> &markers;
+};
+
+} // namespace
+
+bool ProjectiveReconstructTwoFrames(const vector<Marker> &markers,
+ ProjectiveReconstruction *reconstruction) {
+ if (markers.size() < 16) {
+ return false;
+ }
+
+ int image1, image2;
+ GetImagesInMarkers(markers, &image1, &image2);
+
+ Mat x1, x2;
+ CoordinatesForMarkersInImage(markers, image1, &x1);
+ CoordinatesForMarkersInImage(markers, image2, &x2);
+
+ Mat3 F;
+ NormalizedEightPointSolver(x1, x2, &F);
+
+ // XXX Verify sampson distance.
+#if 0
+ // Refine the resulting projection fundamental matrix using Sampson's
+ // approximation of geometric error. This avoids having to do a full bundle
+ // at the cost of some accuracy.
+ //
+ // TODO(keir): After switching to a better bundling library, use a proper
+ // full bundle adjust here instead of this lame bundle adjustment.
+ typedef LevenbergMarquardt<FundamentalSampsonCostFunction> Solver;
+
+ FundamentalSampsonCostFunction fundamental_cost(markers);
+
+ // Pack the initial P matrix into a size-12 vector..
+ Vec9 encoded_F = Map<Vec9>(F.data(), 3, 3);
+
+ Solver solver(fundamental_cost);
+
+ Solver::SolverParameters params;
+ Solver::Results results = solver.minimize(params, &encoded_F);
+ // TODO(keir): Check results to ensure clean termination.
+
+ // Recover F from the minimization.
+ F = DecodeF(encoded_F);
+#endif
+
+ // Image 1 gets P = [I|0], image 2 gets arbitrary P.
+ Mat34 P1 = Mat34::Zero();
+ P1.block<3, 3>(0, 0) = Mat3::Identity();
+ Mat34 P2;
+ ProjectionsFromFundamental(F, &P1, &P2);
+
+ reconstruction->InsertCamera(image1, P1);
+ reconstruction->InsertCamera(image2, P2);
+
+ LG << "From two frame reconstruction got P2:\n" << P2;
+ return true;
+}
+} // namespace libmv
diff --git a/intern/libmv/libmv/simple_pipeline/initialize_reconstruction.h b/intern/libmv/libmv/simple_pipeline/initialize_reconstruction.h
new file mode 100644
index 00000000000..744436246b0
--- /dev/null
+++ b/intern/libmv/libmv/simple_pipeline/initialize_reconstruction.h
@@ -0,0 +1,74 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_SIMPLE_PIPELINE_INITIALIZE_RECONSTRUCTION_H
+#define LIBMV_SIMPLE_PIPELINE_INITIALIZE_RECONSTRUCTION_H
+
+#include "libmv/base/vector.h"
+
+namespace libmv {
+
+struct Marker;
+class EuclideanReconstruction;
+class ProjectiveReconstruction;
+
+/*!
+ Initialize the \link EuclideanReconstruction reconstruction \endlink using
+ two frames.
+
+ \a markers should contain all \l Marker markers \endlink belonging to
+ tracks visible in both frames. The pose estimation of the camera for
+ these frames will be inserted into \a *reconstruction.
+
+ \note The two frames need to have both enough parallax and enough common tracks
+ for accurate reconstruction. At least 8 tracks are suggested.
+ \note The origin of the coordinate system is defined to be the camera of
+ the first keyframe.
+ \note This assumes a calibrated reconstruction, e.g. the markers are
+ already corrected for camera intrinsics and radial distortion.
+ \note This assumes an outlier-free set of markers.
+
+ \sa EuclideanResect, EuclideanIntersect, EuclideanBundle
+*/
+bool EuclideanReconstructTwoFrames(const vector<Marker> &markers,
+ EuclideanReconstruction *reconstruction);
+
+/*!
+ Initialize the \link ProjectiveReconstruction reconstruction \endlink using
+ two frames.
+
+ \a markers should contain all \l Marker markers \endlink belonging to
+ tracks visible in both frames. An estimate of the projection matrices for
+ the two frames will get added to the reconstruction.
+
+ \note The two frames need to have both enough parallax and enough common tracks
+ for accurate reconstruction. At least 8 tracks are suggested.
+ \note The origin of the coordinate system is defined to be the camera of
+ the first keyframe.
+ \note This assumes the markers are already corrected for radial distortion.
+ \note This assumes an outlier-free set of markers.
+
+ \sa ProjectiveResect, ProjectiveIntersect, ProjectiveBundle
+*/
+bool ProjectiveReconstructTwoFrames(const vector<Marker> &markers,
+ ProjectiveReconstruction *reconstruction);
+} // namespace libmv
+
+#endif // LIBMV_SIMPLE_PIPELINE_INITIALIZE_RECONSTRUCTION_H
diff --git a/intern/libmv/libmv/simple_pipeline/intersect.cc b/intern/libmv/libmv/simple_pipeline/intersect.cc
new file mode 100644
index 00000000000..ddb713684a4
--- /dev/null
+++ b/intern/libmv/libmv/simple_pipeline/intersect.cc
@@ -0,0 +1,254 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/simple_pipeline/intersect.h"
+
+#include "libmv/base/vector.h"
+#include "libmv/logging/logging.h"
+#include "libmv/multiview/projection.h"
+#include "libmv/multiview/triangulation.h"
+#include "libmv/multiview/nviewtriangulation.h"
+#include "libmv/numeric/numeric.h"
+#include "libmv/numeric/levenberg_marquardt.h"
+#include "libmv/simple_pipeline/reconstruction.h"
+#include "libmv/simple_pipeline/tracks.h"
+
+#include "ceres/ceres.h"
+
+namespace libmv {
+
+namespace {
+
+class EuclideanIntersectCostFunctor {
+ public:
+ EuclideanIntersectCostFunctor(const Marker &marker,
+ const EuclideanCamera &camera)
+ : marker_(marker), camera_(camera) {}
+
+ template<typename T>
+ bool operator()(const T *X, T *residuals) const {
+ typedef Eigen::Matrix<T, 3, 3> Mat3;
+ typedef Eigen::Matrix<T, 3, 1> Vec3;
+
+ Vec3 x(X);
+ Mat3 R(camera_.R.cast<T>());
+ Vec3 t(camera_.t.cast<T>());
+
+ Vec3 projected = R * x + t;
+ projected /= projected(2);
+
+ residuals[0] = (projected(0) - T(marker_.x)) * marker_.weight;
+ residuals[1] = (projected(1) - T(marker_.y)) * marker_.weight;
+
+ return true;
+ }
+
+ const Marker &marker_;
+ const EuclideanCamera &camera_;
+};
+
+} // namespace
+
+bool EuclideanIntersect(const vector<Marker> &markers,
+ EuclideanReconstruction *reconstruction) {
+ if (markers.size() < 2) {
+ return false;
+ }
+
+ // Compute projective camera matrices for the cameras the intersection is
+ // going to use.
+ Mat3 K = Mat3::Identity();
+ vector<Mat34> cameras;
+ Mat34 P;
+ for (int i = 0; i < markers.size(); ++i) {
+ EuclideanCamera *camera = reconstruction->CameraForImage(markers[i].image);
+ P_From_KRt(K, camera->R, camera->t, &P);
+ cameras.push_back(P);
+ }
+
+ // Stack the 2D coordinates together as required by NViewTriangulate.
+ Mat2X points(2, markers.size());
+ for (int i = 0; i < markers.size(); ++i) {
+ points(0, i) = markers[i].x;
+ points(1, i) = markers[i].y;
+ }
+
+ Vec4 Xp;
+ LG << "Intersecting with " << markers.size() << " markers.";
+ NViewTriangulateAlgebraic(points, cameras, &Xp);
+
+ // Get euclidean version of the homogeneous point.
+ Xp /= Xp(3);
+ Vec3 X = Xp.head<3>();
+
+ ceres::Problem problem;
+
+ // Add residual blocks to the problem.
+ int num_residuals = 0;
+ for (int i = 0; i < markers.size(); ++i) {
+ const Marker &marker = markers[i];
+ if (marker.weight != 0.0) {
+ const EuclideanCamera &camera =
+ *reconstruction->CameraForImage(marker.image);
+
+ problem.AddResidualBlock(
+ new ceres::AutoDiffCostFunction<
+ EuclideanIntersectCostFunctor,
+ 2, /* num_residuals */
+ 3>(new EuclideanIntersectCostFunctor(marker, camera)),
+ NULL,
+ &X(0));
+ num_residuals++;
+ }
+ }
+
+ // TODO(sergey): Once we'll update Ceres to the next version
+ // we wouldn't need this check anymore -- Ceres will deal with
+ // zero-sized problems nicely.
+ LG << "Number of residuals: " << num_residuals;
+ if (!num_residuals) {
+ LG << "Skipping running minimizer with zero residuals";
+
+ // We still add 3D point for the track regardless it was
+ // optimized or not. If track is a constant zero it'll use
+ // algebraic intersection result as a 3D coordinate.
+
+ Vec3 point = X.head<3>();
+ reconstruction->InsertPoint(markers[0].track, point);
+
+ return true;
+ }
+
+ // Configure the solve.
+ ceres::Solver::Options solver_options;
+ solver_options.linear_solver_type = ceres::DENSE_QR;
+ solver_options.max_num_iterations = 50;
+ solver_options.update_state_every_iteration = true;
+ solver_options.parameter_tolerance = 1e-16;
+ solver_options.function_tolerance = 1e-16;
+
+ // Run the solve.
+ ceres::Solver::Summary summary;
+ ceres::Solve(solver_options, &problem, &summary);
+
+ VLOG(1) << "Summary:\n" << summary.FullReport();
+
+ // Try projecting the point; make sure it's in front of everyone.
+ for (int i = 0; i < cameras.size(); ++i) {
+ const EuclideanCamera &camera =
+ *reconstruction->CameraForImage(markers[i].image);
+ Vec3 x = camera.R * X + camera.t;
+ if (x(2) < 0) {
+ LOG(ERROR) << "POINT BEHIND CAMERA " << markers[i].image
+ << ": " << x.transpose();
+ return false;
+ }
+ }
+
+ Vec3 point = X.head<3>();
+ reconstruction->InsertPoint(markers[0].track, point);
+
+ // TODO(keir): Add proper error checking.
+ return true;
+}
+
+namespace {
+
+struct ProjectiveIntersectCostFunction {
+ public:
+ typedef Vec FMatrixType;
+ typedef Vec4 XMatrixType;
+
+ ProjectiveIntersectCostFunction(
+ const vector<Marker> &markers,
+ const ProjectiveReconstruction &reconstruction)
+ : markers(markers), reconstruction(reconstruction) {}
+
+ Vec operator()(const Vec4 &X) const {
+ Vec residuals(2 * markers.size());
+ residuals.setZero();
+ for (int i = 0; i < markers.size(); ++i) {
+ const ProjectiveCamera &camera =
+ *reconstruction.CameraForImage(markers[i].image);
+ Vec3 projected = camera.P * X;
+ projected /= projected(2);
+ residuals[2*i + 0] = projected(0) - markers[i].x;
+ residuals[2*i + 1] = projected(1) - markers[i].y;
+ }
+ return residuals;
+ }
+ const vector<Marker> &markers;
+ const ProjectiveReconstruction &reconstruction;
+};
+
+} // namespace
+
+bool ProjectiveIntersect(const vector<Marker> &markers,
+ ProjectiveReconstruction *reconstruction) {
+ if (markers.size() < 2) {
+ return false;
+ }
+
+ // Get the cameras to use for the intersection.
+ vector<Mat34> cameras;
+ for (int i = 0; i < markers.size(); ++i) {
+ ProjectiveCamera *camera = reconstruction->CameraForImage(markers[i].image);
+ cameras.push_back(camera->P);
+ }
+
+ // Stack the 2D coordinates together as required by NViewTriangulate.
+ Mat2X points(2, markers.size());
+ for (int i = 0; i < markers.size(); ++i) {
+ points(0, i) = markers[i].x;
+ points(1, i) = markers[i].y;
+ }
+
+ Vec4 X;
+ LG << "Intersecting with " << markers.size() << " markers.";
+ NViewTriangulateAlgebraic(points, cameras, &X);
+ X /= X(3);
+
+ typedef LevenbergMarquardt<ProjectiveIntersectCostFunction> Solver;
+
+ ProjectiveIntersectCostFunction triangulate_cost(markers, *reconstruction);
+ Solver::SolverParameters params;
+ Solver solver(triangulate_cost);
+
+ Solver::Results results = solver.minimize(params, &X);
+ (void) results; // TODO(keir): Ensure results are good.
+
+ // Try projecting the point; make sure it's in front of everyone.
+ for (int i = 0; i < cameras.size(); ++i) {
+ const ProjectiveCamera &camera =
+ *reconstruction->CameraForImage(markers[i].image);
+ Vec3 x = camera.P * X;
+ if (x(2) < 0) {
+ LOG(ERROR) << "POINT BEHIND CAMERA " << markers[i].image
+ << ": " << x.transpose();
+ }
+ }
+
+ reconstruction->InsertPoint(markers[0].track, X);
+
+ // TODO(keir): Add proper error checking.
+ return true;
+}
+
+} // namespace libmv
diff --git a/intern/libmv/libmv/simple_pipeline/intersect.h b/intern/libmv/libmv/simple_pipeline/intersect.h
new file mode 100644
index 00000000000..3a0ffa7418b
--- /dev/null
+++ b/intern/libmv/libmv/simple_pipeline/intersect.h
@@ -0,0 +1,77 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_SIMPLE_PIPELINE_INTERSECT_H
+#define LIBMV_SIMPLE_PIPELINE_INTERSECT_H
+
+#include "libmv/base/vector.h"
+#include "libmv/simple_pipeline/tracks.h"
+#include "libmv/simple_pipeline/reconstruction.h"
+
+namespace libmv {
+
+/*!
+ Estimate the 3D coordinates of a track by intersecting rays from images.
+
+ This takes a set of markers, where each marker is for the same track but
+ different images, and reconstructs the 3D position of that track. Each of
+ the frames for which there is a marker for that track must have a
+ corresponding reconstructed camera in \a *reconstruction.
+
+ \a markers should contain all \l Marker markers \endlink belonging to
+ tracks visible in all frames.
+ \a reconstruction should contain the cameras for all frames.
+ The new \l Point points \endlink will be inserted in \a reconstruction.
+
+ \note This assumes a calibrated reconstruction, e.g. the markers are
+ already corrected for camera intrinsics and radial distortion.
+ \note This assumes an outlier-free set of markers.
+
+ \sa EuclideanResect
+*/
+bool EuclideanIntersect(const vector<Marker> &markers,
+ EuclideanReconstruction *reconstruction);
+
+/*!
+ Estimate the homogeneous coordinates of a track by intersecting rays.
+
+ This takes a set of markers, where each marker is for the same track but
+ different images, and reconstructs the homogeneous 3D position of that
+ track. Each of the frames for which there is a marker for that track must
+ have a corresponding reconstructed camera in \a *reconstruction.
+
+ \a markers should contain all \l Marker markers \endlink belonging to
+ tracks visible in all frames.
+ \a reconstruction should contain the cameras for all frames.
+ The new \l Point points \endlink will be inserted in \a reconstruction.
+
+ \note This assumes that radial distortion is already corrected for, but
+ does not assume that e.g. focal length and principal point are
+ accounted for.
+ \note This assumes an outlier-free set of markers.
+
+ \sa Resect
+*/
+bool ProjectiveIntersect(const vector<Marker> &markers,
+ ProjectiveReconstruction *reconstruction);
+
+} // namespace libmv
+
+#endif // LIBMV_SIMPLE_PIPELINE_INTERSECT_H
diff --git a/intern/libmv/libmv/simple_pipeline/intersect_test.cc b/intern/libmv/libmv/simple_pipeline/intersect_test.cc
new file mode 100644
index 00000000000..dd4fdc789af
--- /dev/null
+++ b/intern/libmv/libmv/simple_pipeline/intersect_test.cc
@@ -0,0 +1,81 @@
+// Copyright (c) 2007, 2008 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/simple_pipeline/intersect.h"
+
+#include <iostream>
+
+#include "testing/testing.h"
+#include "libmv/multiview/projection.h"
+#include "libmv/numeric/numeric.h"
+#include "libmv/logging/logging.h"
+
+namespace libmv {
+
+TEST(Intersect, EuclideanIntersect) {
+ Mat3 K1 = Mat3::Identity();
+ // K1 << 320, 0, 160,
+ // 0, 320, 120,
+ // 0, 0, 1;
+ Mat3 K2 = Mat3::Identity();
+ // K2 << 360, 0, 170,
+ // 0, 360, 110,
+ // 0, 0, 1;
+ Mat3 R1 = RotationAroundZ(-0.1);
+ Mat3 R2 = RotationAroundX(-0.1);
+ Vec3 t1; t1 << 1, 1, 10;
+ Vec3 t2; t2 << -2, -1, 10;
+ Mat34 P1, P2;
+ P_From_KRt(K1, R1, t1, &P1);
+ P_From_KRt(K2, R2, t2, &P2);
+
+ //Mat3 F; FundamentalFromProjections(P1, P2, &F);
+
+ Mat3X X;
+ X.resize(3, 30);
+ X.setRandom();
+
+ Mat2X X1, X2;
+ Project(P1, X, &X1);
+ Project(P2, X, &X2);
+
+ for (int i = 0; i < X.cols(); ++i) {
+ Vec2 x1, x2;
+ MatrixColumn(X1, i, &x1);
+ MatrixColumn(X2, i, &x2);
+ Vec3 expected;
+ MatrixColumn(X, i, &expected);
+
+ EuclideanReconstruction reconstruction;
+ reconstruction.InsertCamera(1, R1, t1);
+ reconstruction.InsertCamera(2, R2, t2);
+
+ vector<Marker> markers;
+ Marker a = { 1, 0, x1.x(), x1.y(), 1.0 };
+ markers.push_back(a);
+ Marker b = { 2, 0, x2.x(), x2.y(), 1.0 };
+ markers.push_back(b);
+
+ EuclideanIntersect(markers, &reconstruction);
+ Vec3 estimated = reconstruction.PointForTrack(0)->X;
+ EXPECT_NEAR(0, DistanceLInfinity(estimated, expected), 1e-8);
+ }
+}
+} // namespace
diff --git a/intern/libmv/libmv/simple_pipeline/keyframe_selection.cc b/intern/libmv/libmv/simple_pipeline/keyframe_selection.cc
new file mode 100644
index 00000000000..241b5600505
--- /dev/null
+++ b/intern/libmv/libmv/simple_pipeline/keyframe_selection.cc
@@ -0,0 +1,450 @@
+// Copyright (c) 2012 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/simple_pipeline/keyframe_selection.h"
+
+#include "libmv/numeric/numeric.h"
+#include "ceres/ceres.h"
+#include "libmv/logging/logging.h"
+#include "libmv/multiview/homography.h"
+#include "libmv/multiview/fundamental.h"
+#include "libmv/simple_pipeline/intersect.h"
+#include "libmv/simple_pipeline/bundle.h"
+
+#include <Eigen/Eigenvalues>
+
+namespace libmv {
+namespace {
+
+Mat3 IntrinsicsNormalizationMatrix(const CameraIntrinsics &intrinsics) {
+ Mat3 T = Mat3::Identity(), S = Mat3::Identity();
+
+ T(0, 2) = -intrinsics.principal_point_x();
+ T(1, 2) = -intrinsics.principal_point_y();
+
+ S(0, 0) /= intrinsics.focal_length_x();
+ S(1, 1) /= intrinsics.focal_length_y();
+
+ return S * T;
+}
+
+// P.H.S. Torr
+// Geometric Motion Segmentation and Model Selection
+//
+// http://reference.kfupm.edu.sa/content/g/e/geometric_motion_segmentation_and_model__126445.pdf
+//
+// d is the number of dimensions modeled
+// (d = 3 for a fundamental matrix or 2 for a homography)
+// k is the number of degrees of freedom in the model
+// (k = 7 for a fundamental matrix or 8 for a homography)
+// r is the dimension of the data
+// (r = 4 for 2D correspondences between two frames)
+double GRIC(const Vec &e, int d, int k, int r) {
+ int n = e.rows();
+ double lambda1 = log(static_cast<double>(r));
+ double lambda2 = log(static_cast<double>(r * n));
+
+ // lambda3 limits the residual error, and this paper
+ // http://elvera.nue.tu-berlin.de/files/0990Knorr2006.pdf
+ // suggests using lambda3 of 2
+ // same value is used in Torr's Problem of degeneracy in structure
+ // and motion recovery from uncalibrated image sequences
+ // http://www.robots.ox.ac.uk/~vgg/publications/papers/torr99.ps.gz
+ double lambda3 = 2.0;
+
+ // Variance of tracker position. Physically, this is typically about 0.1px,
+ // and when squared becomes 0.01 px^2.
+ double sigma2 = 0.01;
+
+ // Finally, calculate the GRIC score.
+ double gric = 0.0;
+ for (int i = 0; i < n; i++) {
+ gric += std::min(e(i) * e(i) / sigma2, lambda3 * (r - d));
+ }
+ gric += lambda1 * d * n;
+ gric += lambda2 * k;
+ return gric;
+}
+
+// Compute a generalized inverse using eigen value decomposition, clamping the
+// smallest eigenvalues if requested. This is needed to compute the variance of
+// reconstructed 3D points.
+//
+// TODO(keir): Consider moving this into the numeric code, since this is not
+// related to keyframe selection.
+Mat PseudoInverseWithClampedEigenvalues(const Mat &matrix,
+ int num_eigenvalues_to_clamp) {
+ Eigen::EigenSolver<Mat> eigen_solver(matrix);
+ Mat D = eigen_solver.pseudoEigenvalueMatrix();
+ Mat V = eigen_solver.pseudoEigenvectors();
+
+ // Clamp too-small singular values to zero to prevent numeric blowup.
+ double epsilon = std::numeric_limits<double>::epsilon();
+ for (int i = 0; i < D.cols(); ++i) {
+ if (D(i, i) > epsilon) {
+ D(i, i) = 1.0 / D(i, i);
+ } else {
+ D(i, i) = 0.0;
+ }
+ }
+
+ // Apply the clamp.
+ for (int i = D.cols() - num_eigenvalues_to_clamp; i < D.cols(); ++i) {
+ D(i, i) = 0.0;
+ }
+ return V * D * V.inverse();
+}
+
+void FilterZeroWeightMarkersFromTracks(const Tracks &tracks,
+ Tracks *filtered_tracks) {
+ vector<Marker> all_markers = tracks.AllMarkers();
+
+ for (int i = 0; i < all_markers.size(); ++i) {
+ Marker &marker = all_markers[i];
+ if (marker.weight != 0.0) {
+ filtered_tracks->Insert(marker.image,
+ marker.track,
+ marker.x,
+ marker.y,
+ marker.weight);
+ }
+ }
+}
+
+} // namespace
+
+void SelectKeyframesBasedOnGRICAndVariance(const Tracks &_tracks,
+ const CameraIntrinsics &intrinsics,
+ vector<int> &keyframes) {
+ // Mirza Tahir Ahmed, Matthew N. Dailey
+ // Robust key frame extraction for 3D reconstruction from video streams
+ //
+ // http://www.cs.ait.ac.th/~mdailey/papers/Tahir-KeyFrame.pdf
+
+ Tracks filtered_tracks;
+ FilterZeroWeightMarkersFromTracks(_tracks, &filtered_tracks);
+
+ int max_image = filtered_tracks.MaxImage();
+ int next_keyframe = 1;
+ int number_keyframes = 0;
+
+ // Limit correspondence ratio from both sides.
+ // On the one hand if number of correspondent features is too low,
+ // triangulation will suffer.
+ // On the other hand high correspondence likely means short baseline.
+ // which also will affect om accuracy
+ const double Tmin = 0.8;
+ const double Tmax = 1.0;
+
+ Mat3 N = IntrinsicsNormalizationMatrix(intrinsics);
+ Mat3 N_inverse = N.inverse();
+
+ double Sc_best = std::numeric_limits<double>::max();
+ double success_intersects_factor_best = 0.0f;
+
+ while (next_keyframe != -1) {
+ int current_keyframe = next_keyframe;
+ double Sc_best_candidate = std::numeric_limits<double>::max();
+
+ LG << "Found keyframe " << next_keyframe;
+
+ number_keyframes++;
+ next_keyframe = -1;
+
+ for (int candidate_image = current_keyframe + 1;
+ candidate_image <= max_image;
+ candidate_image++) {
+ // Conjunction of all markers from both keyframes
+ vector<Marker> all_markers =
+ filtered_tracks.MarkersInBothImages(current_keyframe,
+ candidate_image);
+
+ // Match keypoints between frames current_keyframe and candidate_image
+ vector<Marker> tracked_markers =
+ filtered_tracks.MarkersForTracksInBothImages(current_keyframe,
+ candidate_image);
+
+ // Correspondences in normalized space
+ Mat x1, x2;
+ CoordinatesForMarkersInImage(tracked_markers, current_keyframe, &x1);
+ CoordinatesForMarkersInImage(tracked_markers, candidate_image, &x2);
+
+ LG << "Found " << x1.cols()
+ << " correspondences between " << current_keyframe
+ << " and " << candidate_image;
+
+ // Not enough points to construct fundamental matrix
+ if (x1.cols() < 8 || x2.cols() < 8)
+ continue;
+
+ // STEP 1: Correspondence ratio constraint
+ int Tc = tracked_markers.size();
+ int Tf = all_markers.size();
+ double Rc = static_cast<double>(Tc) / Tf;
+
+ LG << "Correspondence between " << current_keyframe
+ << " and " << candidate_image
+ << ": " << Rc;
+
+ if (Rc < Tmin || Rc > Tmax)
+ continue;
+
+ Mat3 H, F;
+
+ // Estimate homography using default options.
+ EstimateHomographyOptions estimate_homography_options;
+ EstimateHomography2DFromCorrespondences(x1,
+ x2,
+ estimate_homography_options,
+ &H);
+
+ // Convert homography to original pixel space.
+ H = N_inverse * H * N;
+
+ EstimateFundamentalOptions estimate_fundamental_options;
+ EstimateFundamentalFromCorrespondences(x1,
+ x2,
+ estimate_fundamental_options,
+ &F);
+
+ // Convert fundamental to original pixel space.
+ F = N_inverse * F * N;
+
+ // TODO(sergey): STEP 2: Discard outlier matches
+
+ // STEP 3: Geometric Robust Information Criteria
+
+ // Compute error values for homography and fundamental matrices
+ Vec H_e, F_e;
+ H_e.resize(x1.cols());
+ F_e.resize(x1.cols());
+ for (int i = 0; i < x1.cols(); i++) {
+ Vec2 current_x1, current_x2;
+
+ intrinsics.NormalizedToImageSpace(x1(0, i), x1(1, i),
+ &current_x1(0), &current_x1(1));
+
+ intrinsics.NormalizedToImageSpace(x2(0, i), x2(1, i),
+ &current_x2(0), &current_x2(1));
+
+ H_e(i) = SymmetricGeometricDistance(H, current_x1, current_x2);
+ F_e(i) = SymmetricEpipolarDistance(F, current_x1, current_x2);
+ }
+
+ LG << "H_e: " << H_e.transpose();
+ LG << "F_e: " << F_e.transpose();
+
+ // Degeneracy constraint
+ double GRIC_H = GRIC(H_e, 2, 8, 4);
+ double GRIC_F = GRIC(F_e, 3, 7, 4);
+
+ LG << "GRIC values for frames " << current_keyframe
+ << " and " << candidate_image
+ << ", H-GRIC: " << GRIC_H
+ << ", F-GRIC: " << GRIC_F;
+
+ if (GRIC_H <= GRIC_F)
+ continue;
+
+ // TODO(sergey): STEP 4: PELC criterion
+
+ // STEP 5: Estimation of reconstruction error
+ //
+ // Uses paper Keyframe Selection for Camera Motion and Structure
+ // Estimation from Multiple Views
+ // Uses ftp://ftp.tnt.uni-hannover.de/pub/papers/2004/ECCV2004-TTHBAW.pdf
+ // Basically, equation (15)
+ //
+ // TODO(sergey): separate all the constraints into functions,
+ // this one is getting to much cluttered already
+
+ // Definitions in equation (15):
+ // - I is the number of 3D feature points
+ // - A is the number of essential parameters of one camera
+
+ EuclideanReconstruction reconstruction;
+
+ // The F matrix should be an E matrix, but squash it just to be sure
+
+ // Reconstruction should happen using normalized fundamental matrix
+ Mat3 F_normal = N * F * N_inverse;
+
+ Mat3 E;
+ FundamentalToEssential(F_normal, &E);
+
+ // Recover motion between the two images. Since this function assumes a
+ // calibrated camera, use the identity for K
+ Mat3 R;
+ Vec3 t;
+ Mat3 K = Mat3::Identity();
+
+ if (!MotionFromEssentialAndCorrespondence(E,
+ K, x1.col(0),
+ K, x2.col(0),
+ &R, &t)) {
+ LG << "Failed to compute R and t from E and K";
+ continue;
+ }
+
+ LG << "Camera transform between frames " << current_keyframe
+ << " and " << candidate_image
+ << ":\nR:\n" << R
+ << "\nt:" << t.transpose();
+
+ // First camera is identity, second one is relative to it
+ reconstruction.InsertCamera(current_keyframe,
+ Mat3::Identity(),
+ Vec3::Zero());
+ reconstruction.InsertCamera(candidate_image, R, t);
+
+ // Reconstruct 3D points
+ int intersects_total = 0, intersects_success = 0;
+ for (int i = 0; i < tracked_markers.size(); i++) {
+ if (!reconstruction.PointForTrack(tracked_markers[i].track)) {
+ vector<Marker> reconstructed_markers;
+
+ int track = tracked_markers[i].track;
+
+ reconstructed_markers.push_back(tracked_markers[i]);
+
+ // We know there're always only two markers for a track
+ // Also, we're using brute-force search because we don't
+ // actually know about markers layout in a list, but
+ // at this moment this cycle will run just once, which
+ // is not so big deal
+
+ for (int j = i + 1; j < tracked_markers.size(); j++) {
+ if (tracked_markers[j].track == track) {
+ reconstructed_markers.push_back(tracked_markers[j]);
+ break;
+ }
+ }
+
+ intersects_total++;
+
+ if (EuclideanIntersect(reconstructed_markers, &reconstruction)) {
+ LG << "Ran Intersect() for track " << track;
+ intersects_success++;
+ } else {
+ LG << "Filed to intersect track " << track;
+ }
+ }
+ }
+
+ double success_intersects_factor =
+ (double) intersects_success / intersects_total;
+
+ if (success_intersects_factor < success_intersects_factor_best) {
+ LG << "Skip keyframe candidate because of "
+ "lower successful intersections ratio";
+
+ continue;
+ }
+
+ success_intersects_factor_best = success_intersects_factor;
+
+ Tracks two_frames_tracks(tracked_markers);
+ PolynomialCameraIntrinsics empty_intrinsics;
+ BundleEvaluation evaluation;
+ evaluation.evaluate_jacobian = true;
+
+ EuclideanBundleCommonIntrinsics(two_frames_tracks,
+ BUNDLE_NO_INTRINSICS,
+ BUNDLE_NO_CONSTRAINTS,
+ &reconstruction,
+ &empty_intrinsics,
+ &evaluation);
+
+ Mat &jacobian = evaluation.jacobian;
+
+ Mat JT_J = jacobian.transpose() * jacobian;
+ // There are 7 degrees of freedom, so clamp them out.
+ Mat JT_J_inv = PseudoInverseWithClampedEigenvalues(JT_J, 7);
+
+ Mat temp_derived = JT_J * JT_J_inv * JT_J;
+ bool is_inversed = (temp_derived - JT_J).cwiseAbs2().sum() <
+ 1e-4 * std::min(temp_derived.cwiseAbs2().sum(),
+ JT_J.cwiseAbs2().sum());
+
+ LG << "Check on inversed: " << (is_inversed ? "true" : "false" )
+ << ", det(JT_J): " << JT_J.determinant();
+
+ if (!is_inversed) {
+ LG << "Ignoring candidature due to poor jacobian stability";
+ continue;
+ }
+
+ Mat Sigma_P;
+ Sigma_P = JT_J_inv.bottomRightCorner(evaluation.num_points * 3,
+ evaluation.num_points * 3);
+
+ int I = evaluation.num_points;
+ int A = 12;
+
+ double Sc = static_cast<double>(I + A) / Square(3 * I) * Sigma_P.trace();
+
+ LG << "Expected estimation error between "
+ << current_keyframe << " and "
+ << candidate_image << ": " << Sc;
+
+ // Pairing with a lower Sc indicates a better choice
+ if (Sc > Sc_best_candidate)
+ continue;
+
+ Sc_best_candidate = Sc;
+
+ next_keyframe = candidate_image;
+ }
+
+ // This is a bit arbitrary and main reason of having this is to deal
+ // better with situations when there's no keyframes were found for
+ // current keyframe this could happen when there's no so much parallax
+ // in the beginning of image sequence and then most of features are
+ // getting occluded. In this case there could be good keyframe pair in
+ // the middle of the sequence
+ //
+ // However, it's just quick hack and smarter way to do this would be nice
+ if (next_keyframe == -1) {
+ next_keyframe = current_keyframe + 10;
+ number_keyframes = 0;
+
+ if (next_keyframe >= max_image)
+ break;
+
+ LG << "Starting searching for keyframes starting from " << next_keyframe;
+ } else {
+ // New pair's expected reconstruction error is lower
+ // than existing pair's one.
+ //
+ // For now let's store just one candidate, easy to
+ // store more candidates but needs some thoughts
+ // how to choose best one automatically from them
+ // (or allow user to choose pair manually).
+ if (Sc_best > Sc_best_candidate) {
+ keyframes.clear();
+ keyframes.push_back(current_keyframe);
+ keyframes.push_back(next_keyframe);
+ Sc_best = Sc_best_candidate;
+ }
+ }
+ }
+}
+
+} // namespace libmv
diff --git a/intern/libmv/libmv/simple_pipeline/keyframe_selection.h b/intern/libmv/libmv/simple_pipeline/keyframe_selection.h
new file mode 100644
index 00000000000..aa3eeaf193d
--- /dev/null
+++ b/intern/libmv/libmv/simple_pipeline/keyframe_selection.h
@@ -0,0 +1,53 @@
+// Copyright (c) 2010, 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_SIMPLE_PIPELINE_KEYFRAME_SELECTION_H_
+#define LIBMV_SIMPLE_PIPELINE_KEYFRAME_SELECTION_H_
+
+#include "libmv/base/vector.h"
+#include "libmv/simple_pipeline/tracks.h"
+#include "libmv/simple_pipeline/camera_intrinsics.h"
+
+namespace libmv {
+
+// Get list of all images which are good enough to be as keyframes from
+// camera reconstruction. Based on GRIC criteria and uses Pollefeys'
+// approach for correspondence ratio constraint.
+//
+// As an additional, additional criteria based on reconstruction
+// variance is used. This means if correspondence and GRIC criteria
+// are passed, two-frames reconstruction using candidate keyframes
+// happens. After reconstruction variance of 3D points is calculating
+// and if expected error estimation is too large, keyframe candidate
+// is rejecting.
+//
+// \param tracks contains all tracked correspondences between frames
+// expected to be undistorted and normalized
+// \param intrinsics is camera intrinsics
+// \param keyframes will contain all images number which are considered
+// good to be used for reconstruction
+void SelectKeyframesBasedOnGRICAndVariance(
+ const Tracks &tracks,
+ const CameraIntrinsics &intrinsics,
+ vector<int> &keyframes);
+
+} // namespace libmv
+
+#endif // LIBMV_SIMPLE_PIPELINE_KEYFRAME_SELECTION_H_
diff --git a/intern/libmv/libmv/simple_pipeline/keyframe_selection_test.cc b/intern/libmv/libmv/simple_pipeline/keyframe_selection_test.cc
new file mode 100644
index 00000000000..9d88362cc88
--- /dev/null
+++ b/intern/libmv/libmv/simple_pipeline/keyframe_selection_test.cc
@@ -0,0 +1,307 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/simple_pipeline/keyframe_selection.h"
+
+#include "testing/testing.h"
+#include "libmv/simple_pipeline/camera_intrinsics.h"
+#include "libmv/logging/logging.h"
+
+namespace libmv {
+
+// Synthetic test, N markers with the same translation
+// Should not be keyframe
+TEST(KeyframeSelection, SyntheticNeighborFrame) {
+ PolynomialCameraIntrinsics intrinsics;
+ intrinsics.SetFocalLength(900.0,900.0);
+ intrinsics.SetPrincipalPoint(640.0, 540.0);
+ intrinsics.SetRadialDistortion(0.0, 0.0, 0.0);
+
+ Tracks tracks;
+ const int markers_per_size = 15;
+
+ // Fill in tracks for homography estimation
+ for (int x = 0; x < markers_per_size; x++) {
+ for (int y = 0; y < markers_per_size; y++) {
+ double current_x = 10 + x * 40, current_y = 10 + y * 40;
+ double next_x = current_x + 10, next_y = current_y + 10;
+
+ intrinsics.InvertIntrinsics(current_x, current_y, &current_x, &current_y);
+ intrinsics.InvertIntrinsics(next_x, next_y, &next_x, &next_y);
+
+ tracks.Insert(1, y * markers_per_size + x, current_x, current_y);
+ tracks.Insert(2, y * markers_per_size + x, next_x, next_y);
+ }
+ }
+
+ vector<int> keyframes;
+ SelectKeyframesBasedOnGRICAndVariance(tracks, intrinsics, keyframes);
+
+ // Synthetic second frame shouldn't be considered a keyframe
+ EXPECT_EQ(0, keyframes.size());
+}
+
+// Frames 1 and 2 of FabrikEingang footage
+// Only one wall is tracked, should not be keyframes
+TEST(KeyframeSelection, FabrikEingangNeighborFrames) {
+ PolynomialCameraIntrinsics intrinsics;
+ intrinsics.SetFocalLength(1605.797, 1605.797);
+ intrinsics.SetPrincipalPoint(960.000, 544.000);
+ intrinsics.SetRadialDistortion(0.0, 0.0, 0.0);
+
+ Marker markers[] = {
+ {1, 0, 737.599983, 646.397594, 1.0}, {2, 0, 737.906628, 648.113327, 1.0}, {1, 1, 863.045425, 646.081905, 1.0},
+ {2, 1, 863.339767, 647.650040, 1.0}, {1, 2, 736.959972, 574.080151, 1.0}, {2, 2, 737.217350, 575.604900, 1.0},
+ {1, 3, 864.097424, 573.374908, 1.0}, {2, 3, 864.383469, 574.900307, 1.0}, {1, 4, 789.429073, 631.677521, 1.0},
+ {2, 4, 789.893131, 633.124451, 1.0}, {1, 5, 791.051960, 573.442028, 1.0}, {2, 5, 791.336575, 575.088890, 1.0},
+ {1, 6, 738.973961, 485.130308, 1.0}, {2, 6, 739.435501, 486.734207, 1.0}, {1, 7, 862.403240, 514.866074, 1.0},
+ {2, 7, 862.660618, 516.413261, 1.0}, {1, 8, 802.240162, 485.759838, 1.0}, {2, 8, 802.602253, 487.432899, 1.0},
+ {1, 9, 754.340630, 500.624559, 1.0}, {2, 9, 754.559956, 502.079920, 1.0}, {1, 10, 849.398689, 484.480545, 1.0},
+ {2, 10, 849.599934, 486.079937, 1.0}, {1, 11, 788.803768, 515.924391, 1.0}, {2, 11, 789.119911, 517.439932, 1.0},
+ {1, 12, 838.733940, 558.212688, 1.0}, {2, 12, 839.039898, 559.679916, 1.0}, {1, 13, 760.014782, 575.194466, 1.0},
+ {2, 13, 760.319881, 576.639904, 1.0}, {1, 14, 765.321636, 616.015957, 1.0}, {2, 14, 765.759945, 617.599915, 1.0},
+ {1, 15, 800.963230, 660.032082, 1.0}, {2, 15, 801.279945, 661.759876, 1.0}, {1, 16, 846.321087, 602.313053, 1.0},
+ {2, 16, 846.719913, 603.839878, 1.0}, {1, 17, 864.288311, 616.790524, 1.0}, {2, 17, 864.639931, 618.239918, 1.0},
+ {1, 18, 800.006790, 602.573425, 1.0}, {2, 18, 800.319958, 604.159912, 1.0}, {1, 19, 739.026890, 617.944138, 1.0},
+ {2, 19, 739.199924, 619.519924, 1.0}, {1, 20, 801.987419, 544.134888, 1.0}, {2, 20, 802.239933, 545.599911, 1.0},
+ {1, 21, 753.619823, 542.961300, 1.0}, {2, 21, 753.919945, 544.639874, 1.0}, {1, 22, 787.921257, 499.910206, 1.0},
+ {2, 22, 788.159924, 501.439917, 1.0}, {1, 23, 839.095459, 529.287903, 1.0}, {2, 23, 839.359932, 530.879934, 1.0},
+ {1, 24, 811.760330, 630.732269, 1.0}, {2, 24, 812.159901, 632.319859, 1.0}
+ };
+ int num_markers = sizeof(markers) / sizeof(Marker);
+
+ Tracks tracks;
+ for (int i = 0; i < num_markers; i++) {
+ double x = markers[i].x, y = markers[i].y;
+ intrinsics.InvertIntrinsics(x, y, &x, &y);
+ tracks.Insert(markers[i].image, markers[i].track, x, y);
+ }
+
+ vector<int> keyframes;
+ SelectKeyframesBasedOnGRICAndVariance(tracks, intrinsics, keyframes);
+
+ EXPECT_EQ(0, keyframes.size());
+}
+
+// Frames 120 and 200 from FabrikEingang footage
+// Should be enough of parallax for keyframing
+TEST(KeyframeSelection, FabrikEingangFarFrames) {
+ PolynomialCameraIntrinsics intrinsics;
+ intrinsics.SetFocalLength(1605.797, 1605.797);
+ intrinsics.SetPrincipalPoint(960.000, 544.000);
+ intrinsics.SetRadialDistortion(0.0, 0.0, 0.0);
+
+ Marker markers[] = {
+ {1, 0, 369.459200, 619.315258, 1.0}, {2, 0, 279.677496, 722.086842, 1.0}, {1, 1, 376.831970, 370.278397, 1.0},
+ {2, 1, 221.695247, 460.065418, 1.0}, {1, 2, 1209.139023, 567.705605, 1.0}, {2, 2, 1080.760117, 659.230083, 1.0},
+ {1, 3, 1643.495750, 903.620453, 1.0}, {2, 3, 1618.405037, 1015.374908, 1.0}, {1, 4, 1494.849815, 425.302460, 1.0},
+ {2, 4, 1457.467575, 514.727587, 1.0}, {1, 5, 1794.637299, 328.728609, 1.0}, {2, 5, 1742.161446, 408.988636, 1.0},
+ {1, 6, 1672.822723, 102.240358, 1.0}, {2, 6, 1539.287224, 153.536892, 1.0}, {1, 7, 1550.843925, 53.424943, 1.0},
+ {2, 7, 1385.579109, 96.450085, 1.0}, {1, 8, 852.953281, 465.399578, 1.0}, {2, 8, 779.404564, 560.091843, 1.0},
+ {1, 9, 906.853752, 299.827040, 1.0}, {2, 9, 786.923218, 385.570770, 1.0}, {1, 10, 406.322966, 87.556041, 1.0},
+ {2, 10, 140.339413, 150.877481, 1.0}, {1, 11, 254.811573, 851.296478, 1.0}, {2, 11, 94.478302, 969.350189, 1.0},
+ {1, 12, 729.087868, 806.092758, 1.0}, {2, 12, 606.212139, 919.876560, 1.0}, {1, 13, 1525.719452, 920.398083, 1.0},
+ {2, 13, 1495.579720, 1031.971218, 1.0}
+ };
+ int num_markers = sizeof(markers) / sizeof(Marker);
+
+ Tracks tracks;
+ for (int i = 0; i < num_markers; i++) {
+ double x = markers[i].x, y = markers[i].y;
+ intrinsics.InvertIntrinsics(x, y, &x, &y);
+ tracks.Insert(markers[i].image, markers[i].track, x, y);
+ }
+
+ vector<int> keyframes;
+ SelectKeyframesBasedOnGRICAndVariance(tracks, intrinsics, keyframes);
+
+ EXPECT_EQ(2, keyframes.size());
+}
+
+// Manually selected keyframes from copter footage from Sebastian
+// Keyframes were 167 and 237
+TEST(KeyframeSelection, CopterManualKeyFrames) {
+ PolynomialCameraIntrinsics intrinsics;
+ intrinsics.SetFocalLength(1155.043, 1155.043);
+ intrinsics.SetPrincipalPoint(640.000, 360.000);
+ intrinsics.SetRadialDistortion(-0.08590, 0.0, 0.0);
+
+ Marker markers[] = {
+ {1, 0, 645.792694, 403.115931, 1.0}, {2, 0, 630.641174, 307.996409, 1.0}, {1, 1, 783.469086, 403.904328, 1.0},
+ {2, 1, 766.001129, 308.998225, 1.0}, {1, 2, 650.000000, 160.000001, 1.0}, {1, 3, 785.225906, 158.619039, 1.0},
+ {2, 3, 767.526474, 70.449695, 1.0}, {1, 4, 290.640526, 382.335634, 1.0}, {2, 4, 273.001728, 86.993319, 1.0},
+ {1, 5, 291.162739, 410.602684, 1.0}, {2, 5, 273.287849, 111.937487, 1.0}, {1, 6, 136.919317, 349.895797, 1.0},
+ {1, 7, 490.844345, 47.572222, 1.0}, {1, 8, 454.406433, 488.935761, 1.0}, {1, 9, 378.655815, 618.522248, 1.0},
+ {2, 9, 357.061806, 372.265077, 1.0}, {1, 10, 496.011391, 372.668824, 1.0}, {2, 10, 477.979164, 222.986112, 1.0},
+ {1, 11, 680.060272, 256.103625, 1.0}, {2, 11, 670.587540, 204.830453, 1.0}, {1, 12, 1070.817108, 218.775322, 1.0},
+ {2, 12, 1046.129913, 128.969783, 1.0}, {1, 14, 242.516403, 596.048512, 1.0}, {2, 14, 224.182606, 248.272154, 1.0},
+ {1, 15, 613.936272, 287.519073, 1.0}, {2, 15, 600.467644, 196.085722, 1.0}, {1, 31, 844.637451, 256.354315, 1.0},
+ {2, 31, 823.200150, 165.714952, 1.0},
+ };
+ int num_markers = sizeof(markers) / sizeof(Marker);
+
+ Tracks tracks;
+ for (int i = 0; i < num_markers; i++) {
+ double x = markers[i].x, y = markers[i].y;
+ intrinsics.InvertIntrinsics(x, y, &x, &y);
+ tracks.Insert(markers[i].image, markers[i].track, x, y);
+ }
+
+ vector<int> keyframes;
+ SelectKeyframesBasedOnGRICAndVariance(tracks, intrinsics, keyframes);
+
+ EXPECT_EQ(2, keyframes.size());
+}
+
+// Used old friend elevator scene MMI_2366 with automatic feature selection
+// and manual outlier elimination and manual keyframe selection
+// Selected keyframes were 29 and 41
+TEST(KeyframeSelection, ElevatorManualKeyframesFrames) {
+ PolynomialCameraIntrinsics intrinsics;
+ intrinsics.SetFocalLength(1380.000, 1380.000);
+ intrinsics.SetPrincipalPoint(960.000, 540.000);
+ intrinsics.SetRadialDistortion(-0.034, 0.0, 0.0);
+
+ Marker markers[] = {
+ {1, 2, 1139.861412, 1034.634984, 1.0}, {2, 2, 1143.512192, 1065.355718, 1.0}, {1, 3, 1760.821953, 644.658036, 1.0},
+ {2, 3, 1770.901108, 697.899928, 1.0}, {1, 4, 858.071823, 1068.520746, 1.0}, {1, 6, 1633.952408, 797.050145, 1.0},
+ {2, 6, 1642.508469, 849.157140, 1.0}, {1, 8, 1716.695824, 451.805491, 1.0}, {2, 8, 1726.513939, 502.095687, 1.0},
+ {1, 9, 269.577627, 724.986935, 1.0}, {2, 9, 269.424820, 764.154246, 1.0}, {1, 10, 1891.321907, 706.948843, 1.0},
+ {2, 10, 1903.338547, 766.068377, 1.0}, {1, 12, 1806.227074, 956.089604, 1.0}, {2, 12, 1816.619568, 1013.767376, 1.0},
+ {1, 14, 269.544153, 1002.333570, 1.0}, {2, 14, 269.367542, 1043.509254, 1.0}, {1, 15, 1402.772141, 281.392962, 1.0},
+ {2, 15, 1409.089165, 318.731629, 1.0}, {1, 16, 195.877233, 919.454341, 1.0}, {2, 16, 192.531109, 997.367899, 1.0},
+ {1, 17, 1789.584045, 120.036661, 1.0}, {2, 17, 1800.391846, 167.822964, 1.0}, {1, 18, 999.363213, 765.004807, 1.0},
+ {2, 18, 1002.345772, 790.560122, 1.0}, {1, 19, 647.342491, 1044.805727, 1.0}, {2, 19, 649.328041, 1058.682940, 1.0},
+ {1, 20, 1365.486832, 440.901829, 1.0}, {2, 20, 1371.413040, 477.888730, 1.0}, {1, 21, 1787.125282, 301.431606, 1.0},
+ {2, 21, 1798.527260, 355.224531, 1.0}, {1, 22, 1257.805824, 932.797258, 1.0}, {2, 22, 1263.017578, 969.376774, 1.0},
+ {1, 23, 961.969528, 843.148112, 1.0}, {2, 23, 964.869461, 868.587620, 1.0}, {1, 24, 158.076110, 1052.643592, 1.0},
+ {1, 25, 1072.884521, 1005.296981, 1.0}, {2, 25, 1076.091156, 1032.776856, 1.0}, {1, 26, 1107.656937, 526.577228, 1.0},
+ {2, 26, 1111.618423, 555.524454, 1.0}, {1, 27, 1416.410751, 529.857581, 1.0}, {2, 27, 1422.663574, 570.025957, 1.0},
+ {1, 28, 1498.673630, 1005.453086, 1.0}, {2, 28, 1505.381813, 1051.827149, 1.0}, {1, 29, 1428.647804, 652.473629, 1.0},
+ {2, 29, 1434.898224, 692.715390, 1.0}, {1, 30, 1332.318764, 503.673599, 1.0}, {2, 30, 1338.000069, 540.507967, 1.0},
+ {1, 32, 1358.642693, 709.837904, 1.0}, {2, 32, 1364.231529, 748.678265, 1.0}, {1, 33, 1850.911560, 460.475668, 1.0},
+ {2, 33, 1862.221413, 512.797347, 1.0}, {1, 34, 1226.117821, 607.053959, 1.0}, {2, 34, 1230.736084, 641.091449, 1.0},
+ {1, 35, 619.598236, 523.341744, 1.0}, {2, 35, 621.601124, 554.453287, 1.0}, {1, 36, 956.591492, 958.223183, 1.0},
+ {2, 36, 959.289265, 983.289263, 1.0}, {1, 37, 1249.922218, 419.095856, 1.0}, {2, 37, 1255.005913, 452.556177, 1.0},
+ {1, 39, 1300.528450, 386.251166, 1.0}, {2, 39, 1305.957413, 420.185595, 1.0}, {1, 40, 1128.689919, 972.558346, 1.0},
+ {2, 40, 1132.413712, 1003.984737, 1.0}, {1, 41, 503.304749, 1053.504388, 1.0}, {2, 41, 505.019703, 1069.175613, 1.0},
+ {1, 42, 1197.352982, 472.681564, 1.0}, {2, 42, 1201.910706, 503.459880, 1.0}, {1, 43, 1794.391022, 383.911400, 1.0},
+ {2, 43, 1805.324135, 436.116468, 1.0}, {1, 44, 789.641418, 1058.045647, 1.0}, {1, 45, 1376.575241, 928.714979, 1.0},
+ {2, 45, 1381.995850, 969.511957, 1.0}, {1, 46, 1598.023567, 93.975592, 1.0}, {2, 46, 1606.937141, 136.827035, 1.0},
+ {1, 47, 1455.550232, 762.128685, 1.0}, {2, 47, 1462.014313, 805.164878, 1.0}, {1, 48, 1357.123489, 354.460326, 1.0},
+ {2, 48, 1363.071899, 390.363121, 1.0}, {1, 49, 939.792652, 781.549895, 1.0}, {2, 49, 942.802620, 806.164012, 1.0},
+ {1, 50, 1380.251083, 805.948620, 1.0}, {2, 50, 1385.637932, 845.592098, 1.0}, {1, 51, 1021.769943, 1049.802361, 1.0},
+ {1, 52, 1065.634918, 608.099055, 1.0}, {2, 52, 1069.142189, 635.361736, 1.0}, {1, 53, 624.324188, 463.202863, 1.0},
+ {2, 53, 626.395454, 494.994088, 1.0}, {1, 54, 1451.459885, 881.557624, 1.0}, {2, 54, 1457.679634, 924.345531, 1.0},
+ {1, 55, 1201.885986, 1057.079022, 1.0}, {1, 56, 581.157532, 947.661438, 1.0}, {2, 56, 583.242359, 960.831449, 1.0},
+ {1, 58, 513.593102, 954.175858, 1.0}, {2, 58, 515.470047, 971.309574, 1.0}, {1, 59, 928.069038, 901.774421, 1.0},
+ {2, 59, 930.847950, 925.613744, 1.0}, {1, 60, 1065.860023, 740.395389, 1.0}, {2, 60, 1069.484253, 768.971086, 1.0},
+ {1, 61, 990.479393, 906.264632, 1.0}, {2, 61, 993.217506, 933.088803, 1.0}, {1, 62, 1776.196747, 776.278453, 1.0},
+ {2, 62, 1786.292496, 831.136880, 1.0}, {1, 63, 834.454365, 1012.449725, 1.0}, {2, 63, 836.868324, 1033.451807, 1.0},
+ {1, 64, 1355.190697, 869.184809, 1.0}, {2, 64, 1360.736618, 909.773347, 1.0}, {1, 65, 702.072487, 897.519686, 1.0},
+ {2, 65, 704.203377, 911.931131, 1.0}, {1, 66, 1214.022903, 856.199934, 1.0}, {2, 66, 1218.109016, 890.753052, 1.0},
+ {1, 67, 327.676048, 236.814036, 1.0}, {2, 67, 328.335285, 277.251878, 1.0}, {1, 68, 289.064083, 454.793912, 1.0},
+ {2, 68, 288.651924, 498.882444, 1.0}, {1, 69, 1626.240692, 278.374350, 1.0}, {2, 69, 1634.131508, 315.853672, 1.0},
+ {1, 70, 1245.375710, 734.862142, 1.0}, {2, 70, 1250.047417, 769.670885, 1.0}, {1, 71, 497.015305, 510.718904, 1.0},
+ {2, 71, 498.682308, 541.070201, 1.0}, {1, 72, 1280.542030, 153.939185, 1.0}, {2, 72, 1286.993637, 198.436196, 1.0},
+ {1, 73, 1534.748840, 138.601043, 1.0}, {2, 73, 1542.961349, 180.170819, 1.0}, {1, 74, 1477.412682, 200.608061, 1.0},
+ {2, 74, 1484.683914, 240.413260, 1.0}, {1, 76, 450.637321, 407.279642, 1.0}, {2, 76, 451.695642, 441.666291, 1.0},
+ {1, 78, 246.981239, 220.786298, 1.0}, {2, 78, 244.524879, 290.016564, 1.0}, {1, 79, 36.696489, 420.023407, 1.0},
+ {2, 79, 21.364746, 591.245492, 1.0},
+ };
+ int num_markers = sizeof(markers) / sizeof(Marker);
+
+ Tracks tracks;
+ for (int i = 0; i < num_markers; i++) {
+ double x = markers[i].x, y = markers[i].y;
+ intrinsics.InvertIntrinsics(x, y, &x, &y);
+ tracks.Insert(markers[i].image, markers[i].track, x, y);
+ }
+
+ vector<int> keyframes;
+ SelectKeyframesBasedOnGRICAndVariance(tracks, intrinsics, keyframes);
+
+ EXPECT_EQ(2, keyframes.size());
+}
+
+// Elevator scene MMI_2366 with manual tracks, frames 1, 2, 3, 5 and 27
+TEST(KeyframeSelection, ElevatorReconstructionVarianceTest) {
+ PolynomialCameraIntrinsics intrinsics;
+ intrinsics.SetFocalLength(1380.000, 1380.000);
+ intrinsics.SetPrincipalPoint(960.000, 540.000);
+ intrinsics.SetRadialDistortion(-0.034, 0.0, 0.0);
+
+ Marker markers[] = {
+ {1, 0, 182.999997, 1047.000010, 1.0}, {2, 0, 181.475730, 1052.091079, 1.0}, {3, 0, 181.741562, 1057.893341, 1.0},
+ {4, 0, 183.190498, 1068.310440, 1.0}, {1, 1, 271.000013, 666.000009, 1.0}, {2, 1, 270.596180, 668.665760, 1.0},
+ {3, 1, 270.523510, 671.559069, 1.0}, {4, 1, 271.856518, 676.818151, 1.0}, {5, 1, 268.989000, 727.051570, 1.0},
+ {1, 2, 264.999990, 1018.000031, 1.0}, {2, 2, 264.020061, 1021.157591, 1.0}, {3, 2, 264.606056, 1024.823506, 1.0},
+ {4, 2, 266.200933, 1031.168690, 1.0}, {1, 3, 270.000000, 938.000014, 1.0}, {2, 3, 269.022617, 941.153390, 1.0},
+ {3, 3, 269.605579, 944.454954, 1.0}, {4, 3, 271.281366, 949.452167, 1.0}, {5, 3, 268.963480, 1004.417453, 1.0},
+ {1, 4, 200.999994, 799.000003, 1.0}, {2, 4, 199.841366, 803.891838, 1.0}, {3, 4, 200.262208, 809.323246, 1.0},
+ {4, 4, 201.456513, 819.271195, 1.0}, {5, 4, 195.026493, 924.363234, 1.0}, {1, 5, 1775.000038, 49.999998, 1.0},
+ {2, 5, 1775.255127, 53.718264, 1.0}, {3, 5, 1776.449890, 55.951670, 1.0}, {4, 5, 1778.815727, 61.923309, 1.0},
+ {5, 5, 1790.274124, 123.074923, 1.0}, {1, 6, 164.000001, 927.999988, 1.0}, {2, 6, 162.665462, 933.169527, 1.0},
+ {3, 6, 163.067923, 938.577182, 1.0}, {4, 6, 164.370360, 948.840945, 1.0}, {5, 6, 157.199407, 1057.762341, 1.0},
+ {1, 7, 618.000011, 477.999998, 1.0}, {2, 7, 617.583504, 480.124243, 1.0}, {3, 7, 618.356495, 482.441897, 1.0},
+ {4, 7, 619.792500, 486.428132, 1.0}, {5, 7, 619.546051, 525.222627, 1.0}, {1, 8, 499.999981, 1036.999984, 1.0},
+ {2, 8, 499.080162, 1038.720160, 1.0}, {3, 8, 499.949398, 1039.014344, 1.0}, {4, 8, 501.828003, 1041.286647, 1.0},
+ {5, 8, 502.777576, 1055.196369, 1.0}, {1, 9, 1587.000046, 31.999999, 1.0}, {2, 9, 1586.988373, 34.635853, 1.0},
+ {3, 9, 1588.155899, 37.444186, 1.0}, {4, 9, 1589.973106, 42.492081, 1.0}, {5, 9, 1598.683205, 96.526332, 1.0},
+ {1, 10, 622.999992, 416.999999, 1.0}, {2, 10, 622.449017, 419.233485, 1.0}, {3, 10, 623.283234, 421.500703, 1.0},
+ {4, 10, 624.620132, 425.537406, 1.0}, {5, 10, 624.290829, 465.078338, 1.0}, {1, 11, 577.999992, 931.999998, 1.0},
+ {2, 11, 577.042294, 932.872703, 1.0}, {3, 11, 577.832451, 934.045451, 1.0}, {4, 11, 579.729137, 935.735435, 1.0},
+ {5, 11, 580.691242, 948.396256, 1.0}, {1, 12, 510.999985, 931.999998, 1.0}, {2, 12, 510.111237, 933.152146, 1.0},
+ {3, 12, 510.797081, 934.454219, 1.0}, {4, 12, 512.647362, 936.595910, 1.0}, {5, 12, 513.247204, 955.144157, 1.0},
+ {1, 13, 330.459995, 177.059993, 1.0}, {2, 13, 329.876347, 179.615586, 1.0}, {3, 13, 330.681696, 182.757810, 1.0},
+ {4, 13, 331.345053, 187.903853, 1.0}, {5, 13, 327.824135, 239.611639, 1.0}, {1, 14, 291.813097, 388.516195, 1.0},
+ {2, 14, 290.984058, 391.382725, 1.0}, {3, 14, 291.526737, 394.778595, 1.0}, {4, 14, 292.763815, 400.310973, 1.0},
+ {5, 14, 288.714552, 457.548015, 1.0}, {1, 15, 496.491680, 466.534005, 1.0}, {2, 15, 495.909519, 468.518561, 1.0},
+ {3, 15, 496.588383, 470.853596, 1.0}, {4, 15, 497.976780, 474.731458, 1.0}, {5, 15, 496.998882, 512.568694, 1.0},
+ {1, 16, 1273.000031, 89.000000, 1.0}, {2, 16, 1272.951965, 92.003637, 1.0}, {3, 16, 1273.934784, 94.972191, 1.0},
+ {4, 16, 1275.493584, 100.139952, 1.0}, {5, 16, 1281.003571, 156.880163, 1.0}, {1, 17, 1524.713173, 78.852922, 1.0},
+ {2, 17, 1524.782066, 81.427142, 1.0}, {3, 17, 1525.759048, 84.057939, 1.0}, {4, 17, 1527.579689, 88.966550, 1.0},
+ {5, 17, 1535.262451, 141.186054, 1.0}, {1, 18, 1509.425011, 94.371824, 1.0}, {1, 19, 451.000013, 357.000003, 1.0},
+ {2, 19, 450.354881, 359.312410, 1.0}, {3, 19, 451.107473, 361.837088, 1.0}, {4, 19, 452.186537, 366.318061, 1.0},
+ {5, 19, 450.507660, 409.257599, 1.0}, {1, 20, 254.004936, 114.784185, 1.0}, {2, 20, 253.291512, 119.288486, 1.0},
+ {3, 20, 253.745584, 124.114957, 1.0}, {4, 20, 254.453287, 132.795120, 1.0}, {5, 20, 246.772242, 225.165337, 1.0},
+ {1, 21, 65.262880, 147.889409, 1.0}, {2, 21, 63.634465, 157.656807, 1.0}, {3, 21, 63.306799, 169.067053, 1.0},
+ {4, 21, 62.462311, 189.724241, 1.0}, {5, 21, 35.396615, 430.308380, 1.0},
+ };
+ int num_markers = sizeof(markers) / sizeof(Marker);
+
+ Tracks tracks;
+ for (int i = 0; i < num_markers; i++) {
+ double x = markers[i].x, y = markers[i].y;
+ intrinsics.InvertIntrinsics(x, y, &x, &y);
+ tracks.Insert(markers[i].image, markers[i].track, x, y);
+ }
+
+ vector<int> keyframes;
+ SelectKeyframesBasedOnGRICAndVariance(tracks, intrinsics, keyframes);
+
+ EXPECT_EQ(2, keyframes.size());
+ if (keyframes.size() == 2) {
+ EXPECT_EQ(1, keyframes[0]);
+ EXPECT_EQ(5, keyframes[1]);
+ }
+}
+
+} // namespace libmv
diff --git a/intern/libmv/libmv/simple_pipeline/modal_solver.cc b/intern/libmv/libmv/simple_pipeline/modal_solver.cc
new file mode 100644
index 00000000000..15e185eeda7
--- /dev/null
+++ b/intern/libmv/libmv/simple_pipeline/modal_solver.cc
@@ -0,0 +1,251 @@
+// Copyright (c) 2015 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/simple_pipeline/modal_solver.h"
+
+#include <cstdio>
+
+#include "ceres/ceres.h"
+#include "ceres/rotation.h"
+#include "libmv/logging/logging.h"
+#include "libmv/multiview/panography.h"
+
+#ifdef _MSC_VER
+# define snprintf _snprintf
+#endif
+
+namespace libmv {
+
+namespace {
+void ProjectMarkerOnSphere(const Marker &marker, Vec3 &X) {
+ X(0) = marker.x;
+ X(1) = marker.y;
+ X(2) = 1.0;
+
+ X *= 5.0 / X.norm();
+}
+
+void ModalSolverLogProress(ProgressUpdateCallback *update_callback,
+ double progress) {
+ if (update_callback) {
+ char message[256];
+
+ snprintf(message, sizeof(message), "Solving progress %d%%",
+ (int)(progress * 100));
+
+ update_callback->invoke(progress, message);
+ }
+}
+
+struct ModalReprojectionError {
+ ModalReprojectionError(double observed_x,
+ double observed_y,
+ const double weight,
+ const Vec3 &bundle)
+ : observed_x_(observed_x), observed_y_(observed_y),
+ weight_(weight), bundle_(bundle) { }
+
+ // TODO(keir): This should support bundling focal length as well.
+ template <typename T>
+ bool operator()(const T* quaternion, T* residuals) const {
+ // Convert bundle position from double to T.
+ T X[3] = { T(bundle_(0)), T(bundle_(1)), T(bundle_(2)) };
+
+ // Compute the point position in camera coordinates: x = RX.
+ T x[3];
+
+ // This flips the sense of the quaternion, to adhere to Blender conventions.
+ T quaternion_inverse[4] = {
+ quaternion[0],
+ -quaternion[1],
+ -quaternion[2],
+ -quaternion[3],
+ };
+ ceres::QuaternionRotatePoint(quaternion_inverse, X, x);
+
+ // Compute normalized coordinates by dividing out the depth.
+ T xn = x[0] / x[2];
+ T yn = x[1] / x[2];
+
+ // The error is the difference between reprojected and observed marker
+ // positions, weighted by the passed in weight.
+ residuals[0] = T(weight_) * (xn - T(observed_x_));
+ residuals[1] = T(weight_) * (yn - T(observed_y_));
+
+ return true;
+ }
+
+ double observed_x_;
+ double observed_y_;
+ double weight_;
+ Vec3 bundle_;
+};
+} // namespace
+
+void ModalSolver(const Tracks &tracks,
+ EuclideanReconstruction *reconstruction,
+ ProgressUpdateCallback *update_callback) {
+ int max_image = tracks.MaxImage();
+ int max_track = tracks.MaxTrack();
+
+ LG << "Max image: " << max_image;
+ LG << "Max track: " << max_track;
+
+ // For minimization we're using quaternions.
+ Vec3 zero_rotation = Vec3::Zero();
+ Vec4 quaternion;
+ ceres::AngleAxisToQuaternion(&zero_rotation(0), &quaternion(0));
+
+ for (int image = 0; image <= max_image; ++image) {
+ vector<Marker> all_markers = tracks.MarkersInImage(image);
+
+ ModalSolverLogProress(update_callback, (float) image / max_image);
+
+ // Skip empty images without doing anything.
+ if (all_markers.size() == 0) {
+ LG << "Skipping image: " << image;
+ continue;
+ }
+
+ // STEP 1: Estimate rotation analytically.
+ Mat3 current_R;
+ ceres::QuaternionToRotation(&quaternion(0), &current_R(0, 0));
+
+ // Construct point cloud for current and previous images,
+ // using markers appear at current image for which we know
+ // 3D positions.
+ Mat x1, x2;
+ for (int i = 0; i < all_markers.size(); ++i) {
+ Marker &marker = all_markers[i];
+ EuclideanPoint *point = reconstruction->PointForTrack(marker.track);
+ if (point) {
+ Vec3 X;
+ ProjectMarkerOnSphere(marker, X);
+
+ int last_column = x1.cols();
+ x1.conservativeResize(3, last_column + 1);
+ x2.conservativeResize(3, last_column + 1);
+
+ x1.col(last_column) = current_R * point->X;
+ x2.col(last_column) = X;
+ }
+ }
+
+ if (x1.cols() >= 2) {
+ Mat3 delta_R;
+
+ // Compute delta rotation matrix for two point clouds.
+ // Could be a bit confusing at first glance, but order
+ // of clouds is indeed so.
+ GetR_FixedCameraCenter(x2, x1, 1.0, &delta_R);
+
+ // Convert delta rotation form matrix to final image
+ // rotation stored in a quaternion
+ Vec3 delta_angle_axis;
+ ceres::RotationMatrixToAngleAxis(&delta_R(0, 0), &delta_angle_axis(0));
+
+ Vec3 current_angle_axis;
+ ceres::QuaternionToAngleAxis(&quaternion(0), &current_angle_axis(0));
+
+ Vec3 angle_axis = current_angle_axis + delta_angle_axis;
+
+ ceres::AngleAxisToQuaternion(&angle_axis(0), &quaternion(0));
+
+ LG << "Analytically computed quaternion "
+ << quaternion.transpose();
+ }
+
+ // STEP 2: Refine rotation with Ceres.
+ ceres::Problem problem;
+
+ ceres::LocalParameterization* quaternion_parameterization =
+ new ceres::QuaternionParameterization;
+
+ int num_residuals = 0;
+ for (int i = 0; i < all_markers.size(); ++i) {
+ Marker &marker = all_markers[i];
+ EuclideanPoint *point = reconstruction->PointForTrack(marker.track);
+
+ if (point && marker.weight != 0.0) {
+ problem.AddResidualBlock(new ceres::AutoDiffCostFunction<
+ ModalReprojectionError,
+ 2, /* num_residuals */
+ 4>(new ModalReprojectionError(marker.x,
+ marker.y,
+ marker.weight,
+ point->X)),
+ NULL,
+ &quaternion(0));
+ num_residuals++;
+
+ problem.SetParameterization(&quaternion(0),
+ quaternion_parameterization);
+ }
+ }
+
+ LG << "Number of residuals: " << num_residuals;
+
+ if (num_residuals) {
+ // Configure the solve.
+ ceres::Solver::Options solver_options;
+ solver_options.linear_solver_type = ceres::DENSE_QR;
+ solver_options.max_num_iterations = 50;
+ solver_options.update_state_every_iteration = true;
+ solver_options.gradient_tolerance = 1e-36;
+ solver_options.parameter_tolerance = 1e-36;
+ solver_options.function_tolerance = 1e-36;
+
+ // Run the solve.
+ ceres::Solver::Summary summary;
+ ceres::Solve(solver_options, &problem, &summary);
+
+ LG << "Summary:\n" << summary.FullReport();
+ LG << "Refined quaternion " << quaternion.transpose();
+ }
+
+ // Convert quaternion to rotation matrix.
+ Mat3 R;
+ ceres::QuaternionToRotation(&quaternion(0), &R(0, 0));
+ reconstruction->InsertCamera(image, R, Vec3::Zero());
+
+ // STEP 3: reproject all new markers appeared at image
+
+ // Check if there're new markers appeared on current image
+ // and reproject them on sphere to obtain 3D position/
+ for (int track = 0; track <= max_track; ++track) {
+ if (!reconstruction->PointForTrack(track)) {
+ Marker marker = tracks.MarkerInImageForTrack(image, track);
+
+ if (marker.image == image) {
+ // New track appeared on this image,
+ // project it's position onto sphere.
+
+ LG << "Projecting track " << track << " at image " << image;
+
+ Vec3 X;
+ ProjectMarkerOnSphere(marker, X);
+ reconstruction->InsertPoint(track, R.inverse() * X);
+ }
+ }
+ }
+ }
+}
+
+} // namespace libmv
diff --git a/intern/libmv/libmv/simple_pipeline/modal_solver.h b/intern/libmv/libmv/simple_pipeline/modal_solver.h
new file mode 100644
index 00000000000..9801fd21d81
--- /dev/null
+++ b/intern/libmv/libmv/simple_pipeline/modal_solver.h
@@ -0,0 +1,48 @@
+// Copyright (c) 2012 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_SIMPLE_PIPELINE_MODAL_SOLVER_H_
+#define LIBMV_SIMPLE_PIPELINE_MODAL_SOLVER_H_
+
+#include "libmv/simple_pipeline/tracks.h"
+#include "libmv/simple_pipeline/reconstruction.h"
+#include "libmv/simple_pipeline/callbacks.h"
+
+namespace libmv {
+
+/*!
+ This solver solves such camera motion as tripod rotation, reconstructing
+ only camera motion itself. Bundles are not reconstructing properly, they're
+ just getting projected onto sphere.
+
+ Markers from tracks object would be used for recosntruction, and algorithm
+ assumes thir's positions are undistorted already and they're in nnormalized
+ space.
+
+ Reconstructed cameras and projected bundles would be added to reconstruction
+ object.
+*/
+void ModalSolver(const Tracks &tracks,
+ EuclideanReconstruction *reconstruction,
+ ProgressUpdateCallback *update_callback = NULL);
+
+} // namespace libmv
+
+#endif // LIBMV_SIMPLE_PIPELINE_MODAL_SOLVER_H_
diff --git a/intern/libmv/libmv/simple_pipeline/modal_solver_test.cc b/intern/libmv/libmv/simple_pipeline/modal_solver_test.cc
new file mode 100644
index 00000000000..8b87acd95bb
--- /dev/null
+++ b/intern/libmv/libmv/simple_pipeline/modal_solver_test.cc
@@ -0,0 +1,79 @@
+// Copyright (c) 2013 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/simple_pipeline/modal_solver.h"
+
+#include "testing/testing.h"
+#include "libmv/logging/logging.h"
+#include "libmv/simple_pipeline/bundle.h"
+#include "libmv/simple_pipeline/camera_intrinsics.h"
+
+#include <stdio.h>
+
+namespace libmv {
+
+TEST(ModalSolver, SyntheticCubeSceneMotion) {
+ double kTolerance = 1e-8;
+
+ PolynomialCameraIntrinsics intrinsics;
+ intrinsics.SetFocalLength(658.286, 658.286);
+ intrinsics.SetPrincipalPoint(480.0, 270.0);
+ intrinsics.SetRadialDistortion(0.0, 0.0, 0.0);
+
+ Marker markers[] = {
+ {1, 0, 212.172775, 354.713538, 1.0}, {2, 0, 773.468399, 358.735306, 1.0},
+ {1, 1, 62.415197, 287.905354, 1.0}, {2, 1, 619.103336, 324.402537, 1.0},
+ {1, 2, 206.847939, 237.567925, 1.0}, {2, 2, 737.496986, 247.881383, 1.0},
+ {1, 3, 351.743889, 316.415906, 1.0}, {2, 3, 908.779621, 290.703617, 1.0},
+ {1, 4, 232.941413, 54.265443, 1.0}, {2, 4, 719.444847, 63.062531, 1.0},
+ {1, 5, 96.391611, 119.283537, 1.0}, {2, 5, 611.413136, 160.890715, 1.0},
+ {1, 6, 363.444958, 150.838144, 1.0}, {2, 6, 876.374531, 114.916206, 1.0},
+ };
+ int num_markers = sizeof(markers) / sizeof(Marker);
+
+ Tracks tracks;
+ for (int i = 0; i < num_markers; i++) {
+ double x = markers[i].x, y = markers[i].y;
+ intrinsics.InvertIntrinsics(x, y, &x, &y);
+ tracks.Insert(markers[i].image, markers[i].track, x, y);
+ }
+
+ EuclideanReconstruction reconstruction;
+ ModalSolver(tracks, &reconstruction);
+ EuclideanBundleCommonIntrinsics(tracks,
+ BUNDLE_NO_INTRINSICS,
+ BUNDLE_NO_TRANSLATION,
+ &reconstruction,
+ &intrinsics,
+ NULL);
+
+ Mat3 expected_rotation;
+ expected_rotation << 0.98215101299251, 0.17798357184544, 0.06083778292258,
+ -0.16875286001759, 0.97665299913606, -0.13293378620359,
+ -0.08307743323957, 0.12029450291547, 0.98925596922871;
+
+ Mat3 &first_camera_R = reconstruction.CameraForImage(1)->R;
+ Mat3 &second_camera_R = reconstruction.CameraForImage(2)->R;
+
+ EXPECT_TRUE(Mat3::Identity().isApprox(first_camera_R, kTolerance));
+ EXPECT_TRUE(expected_rotation.isApprox(second_camera_R, kTolerance));
+}
+
+} // namespace libmv
diff --git a/intern/libmv/libmv/simple_pipeline/pipeline.cc b/intern/libmv/libmv/simple_pipeline/pipeline.cc
new file mode 100644
index 00000000000..6c8592baa00
--- /dev/null
+++ b/intern/libmv/libmv/simple_pipeline/pipeline.cc
@@ -0,0 +1,368 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/simple_pipeline/pipeline.h"
+
+#include <cstdio>
+
+#include "libmv/logging/logging.h"
+#include "libmv/simple_pipeline/bundle.h"
+#include "libmv/simple_pipeline/intersect.h"
+#include "libmv/simple_pipeline/resect.h"
+#include "libmv/simple_pipeline/reconstruction.h"
+#include "libmv/simple_pipeline/tracks.h"
+#include "libmv/simple_pipeline/camera_intrinsics.h"
+
+#ifdef _MSC_VER
+# define snprintf _snprintf
+#endif
+
+namespace libmv {
+namespace {
+
+// These are "strategy" classes which make it possible to use the same code for
+// both projective and euclidean reconstruction.
+// FIXME(MatthiasF): OOP would achieve the same goal while avoiding
+// template bloat and making interface changes much easier.
+struct EuclideanPipelineRoutines {
+ typedef EuclideanReconstruction Reconstruction;
+ typedef EuclideanCamera Camera;
+ typedef EuclideanPoint Point;
+
+ static void Bundle(const Tracks &tracks,
+ EuclideanReconstruction *reconstruction) {
+ EuclideanBundle(tracks, reconstruction);
+ }
+
+ static bool Resect(const vector<Marker> &markers,
+ EuclideanReconstruction *reconstruction, bool final_pass) {
+ return EuclideanResect(markers, reconstruction, final_pass);
+ }
+
+ static bool Intersect(const vector<Marker> &markers,
+ EuclideanReconstruction *reconstruction) {
+ return EuclideanIntersect(markers, reconstruction);
+ }
+
+ static Marker ProjectMarker(const EuclideanPoint &point,
+ const EuclideanCamera &camera,
+ const CameraIntrinsics &intrinsics) {
+ Vec3 projected = camera.R * point.X + camera.t;
+ projected /= projected(2);
+
+ Marker reprojected_marker;
+ intrinsics.ApplyIntrinsics(projected(0),
+ projected(1),
+ &reprojected_marker.x,
+ &reprojected_marker.y);
+
+ reprojected_marker.image = camera.image;
+ reprojected_marker.track = point.track;
+ return reprojected_marker;
+ }
+};
+
+struct ProjectivePipelineRoutines {
+ typedef ProjectiveReconstruction Reconstruction;
+ typedef ProjectiveCamera Camera;
+ typedef ProjectivePoint Point;
+
+ static void Bundle(const Tracks &tracks,
+ ProjectiveReconstruction *reconstruction) {
+ ProjectiveBundle(tracks, reconstruction);
+ }
+
+ static bool Resect(const vector<Marker> &markers,
+ ProjectiveReconstruction *reconstruction, bool final_pass) {
+ (void) final_pass; // Ignored.
+
+ return ProjectiveResect(markers, reconstruction);
+ }
+
+ static bool Intersect(const vector<Marker> &markers,
+ ProjectiveReconstruction *reconstruction) {
+ return ProjectiveIntersect(markers, reconstruction);
+ }
+
+ static Marker ProjectMarker(const ProjectivePoint &point,
+ const ProjectiveCamera &camera,
+ const CameraIntrinsics &intrinsics) {
+ Vec3 projected = camera.P * point.X;
+ projected /= projected(2);
+
+ Marker reprojected_marker;
+ intrinsics.ApplyIntrinsics(projected(0),
+ projected(1),
+ &reprojected_marker.x,
+ &reprojected_marker.y);
+
+ reprojected_marker.image = camera.image;
+ reprojected_marker.track = point.track;
+ return reprojected_marker;
+ }
+};
+
+} // namespace
+
+static void CompleteReconstructionLogProgress(
+ ProgressUpdateCallback *update_callback,
+ double progress,
+ const char *step = NULL) {
+ if (update_callback) {
+ char message[256];
+
+ if (step)
+ snprintf(message, sizeof(message), "Completing solution %d%% | %s",
+ (int)(progress*100), step);
+ else
+ snprintf(message, sizeof(message), "Completing solution %d%%",
+ (int)(progress*100));
+
+ update_callback->invoke(progress, message);
+ }
+}
+
+template<typename PipelineRoutines>
+void InternalCompleteReconstruction(
+ const Tracks &tracks,
+ typename PipelineRoutines::Reconstruction *reconstruction,
+ ProgressUpdateCallback *update_callback = NULL) {
+ int max_track = tracks.MaxTrack();
+ int max_image = tracks.MaxImage();
+ int num_resects = -1;
+ int num_intersects = -1;
+ int tot_resects = 0;
+ LG << "Max track: " << max_track;
+ LG << "Max image: " << max_image;
+ LG << "Number of markers: " << tracks.NumMarkers();
+ while (num_resects != 0 || num_intersects != 0) {
+ // Do all possible intersections.
+ num_intersects = 0;
+ for (int track = 0; track <= max_track; ++track) {
+ if (reconstruction->PointForTrack(track)) {
+ LG << "Skipping point: " << track;
+ continue;
+ }
+ vector<Marker> all_markers = tracks.MarkersForTrack(track);
+ LG << "Got " << all_markers.size() << " markers for track " << track;
+
+ vector<Marker> reconstructed_markers;
+ for (int i = 0; i < all_markers.size(); ++i) {
+ if (reconstruction->CameraForImage(all_markers[i].image)) {
+ reconstructed_markers.push_back(all_markers[i]);
+ }
+ }
+ LG << "Got " << reconstructed_markers.size()
+ << " reconstructed markers for track " << track;
+ if (reconstructed_markers.size() >= 2) {
+ CompleteReconstructionLogProgress(update_callback,
+ (double)tot_resects/(max_image));
+ if (PipelineRoutines::Intersect(reconstructed_markers,
+ reconstruction)) {
+ num_intersects++;
+ LG << "Ran Intersect() for track " << track;
+ } else {
+ LG << "Failed Intersect() for track " << track;
+ }
+ }
+ }
+ if (num_intersects) {
+ CompleteReconstructionLogProgress(update_callback,
+ (double)tot_resects/(max_image),
+ "Bundling...");
+ PipelineRoutines::Bundle(tracks, reconstruction);
+ LG << "Ran Bundle() after intersections.";
+ }
+ LG << "Did " << num_intersects << " intersects.";
+
+ // Do all possible resections.
+ num_resects = 0;
+ for (int image = 0; image <= max_image; ++image) {
+ if (reconstruction->CameraForImage(image)) {
+ LG << "Skipping frame: " << image;
+ continue;
+ }
+ vector<Marker> all_markers = tracks.MarkersInImage(image);
+ LG << "Got " << all_markers.size() << " markers for image " << image;
+
+ vector<Marker> reconstructed_markers;
+ for (int i = 0; i < all_markers.size(); ++i) {
+ if (reconstruction->PointForTrack(all_markers[i].track)) {
+ reconstructed_markers.push_back(all_markers[i]);
+ }
+ }
+ LG << "Got " << reconstructed_markers.size()
+ << " reconstructed markers for image " << image;
+ if (reconstructed_markers.size() >= 5) {
+ CompleteReconstructionLogProgress(update_callback,
+ (double)tot_resects/(max_image));
+ if (PipelineRoutines::Resect(reconstructed_markers,
+ reconstruction, false)) {
+ num_resects++;
+ tot_resects++;
+ LG << "Ran Resect() for image " << image;
+ } else {
+ LG << "Failed Resect() for image " << image;
+ }
+ }
+ }
+ if (num_resects) {
+ CompleteReconstructionLogProgress(update_callback,
+ (double)tot_resects/(max_image),
+ "Bundling...");
+ PipelineRoutines::Bundle(tracks, reconstruction);
+ }
+ LG << "Did " << num_resects << " resects.";
+ }
+
+ // One last pass...
+ num_resects = 0;
+ for (int image = 0; image <= max_image; ++image) {
+ if (reconstruction->CameraForImage(image)) {
+ LG << "Skipping frame: " << image;
+ continue;
+ }
+ vector<Marker> all_markers = tracks.MarkersInImage(image);
+
+ vector<Marker> reconstructed_markers;
+ for (int i = 0; i < all_markers.size(); ++i) {
+ if (reconstruction->PointForTrack(all_markers[i].track)) {
+ reconstructed_markers.push_back(all_markers[i]);
+ }
+ }
+ if (reconstructed_markers.size() >= 5) {
+ CompleteReconstructionLogProgress(update_callback,
+ (double)tot_resects/(max_image));
+ if (PipelineRoutines::Resect(reconstructed_markers,
+ reconstruction, true)) {
+ num_resects++;
+ LG << "Ran final Resect() for image " << image;
+ } else {
+ LG << "Failed final Resect() for image " << image;
+ }
+ }
+ }
+ if (num_resects) {
+ CompleteReconstructionLogProgress(update_callback,
+ (double)tot_resects/(max_image),
+ "Bundling...");
+ PipelineRoutines::Bundle(tracks, reconstruction);
+ }
+}
+
+template<typename PipelineRoutines>
+double InternalReprojectionError(
+ const Tracks &image_tracks,
+ const typename PipelineRoutines::Reconstruction &reconstruction,
+ const CameraIntrinsics &intrinsics) {
+ int num_skipped = 0;
+ int num_reprojected = 0;
+ double total_error = 0.0;
+ vector<Marker> markers = image_tracks.AllMarkers();
+ for (int i = 0; i < markers.size(); ++i) {
+ double weight = markers[i].weight;
+ const typename PipelineRoutines::Camera *camera =
+ reconstruction.CameraForImage(markers[i].image);
+ const typename PipelineRoutines::Point *point =
+ reconstruction.PointForTrack(markers[i].track);
+ if (!camera || !point || weight == 0.0) {
+ num_skipped++;
+ continue;
+ }
+ num_reprojected++;
+
+ Marker reprojected_marker =
+ PipelineRoutines::ProjectMarker(*point, *camera, intrinsics);
+ double ex = (reprojected_marker.x - markers[i].x) * weight;
+ double ey = (reprojected_marker.y - markers[i].y) * weight;
+
+ const int N = 100;
+ char line[N];
+ snprintf(line, N,
+ "image %-3d track %-3d "
+ "x %7.1f y %7.1f "
+ "rx %7.1f ry %7.1f "
+ "ex %7.1f ey %7.1f"
+ " e %7.1f",
+ markers[i].image,
+ markers[i].track,
+ markers[i].x,
+ markers[i].y,
+ reprojected_marker.x,
+ reprojected_marker.y,
+ ex,
+ ey,
+ sqrt(ex*ex + ey*ey));
+ VLOG(1) << line;
+
+ total_error += sqrt(ex*ex + ey*ey);
+ }
+ LG << "Skipped " << num_skipped << " markers.";
+ LG << "Reprojected " << num_reprojected << " markers.";
+ LG << "Total error: " << total_error;
+ LG << "Average error: " << (total_error / num_reprojected) << " [pixels].";
+ return total_error / num_reprojected;
+}
+
+double EuclideanReprojectionError(const Tracks &image_tracks,
+ const EuclideanReconstruction &reconstruction,
+ const CameraIntrinsics &intrinsics) {
+ return InternalReprojectionError<EuclideanPipelineRoutines>(image_tracks,
+ reconstruction,
+ intrinsics);
+}
+
+double ProjectiveReprojectionError(
+ const Tracks &image_tracks,
+ const ProjectiveReconstruction &reconstruction,
+ const CameraIntrinsics &intrinsics) {
+ return InternalReprojectionError<ProjectivePipelineRoutines>(image_tracks,
+ reconstruction,
+ intrinsics);
+}
+
+void EuclideanCompleteReconstruction(const Tracks &tracks,
+ EuclideanReconstruction *reconstruction,
+ ProgressUpdateCallback *update_callback) {
+ InternalCompleteReconstruction<EuclideanPipelineRoutines>(tracks,
+ reconstruction,
+ update_callback);
+}
+
+void ProjectiveCompleteReconstruction(const Tracks &tracks,
+ ProjectiveReconstruction *reconstruction) {
+ InternalCompleteReconstruction<ProjectivePipelineRoutines>(tracks,
+ reconstruction);
+}
+
+void InvertIntrinsicsForTracks(const Tracks &raw_tracks,
+ const CameraIntrinsics &camera_intrinsics,
+ Tracks *calibrated_tracks) {
+ vector<Marker> markers = raw_tracks.AllMarkers();
+ for (int i = 0; i < markers.size(); ++i) {
+ camera_intrinsics.InvertIntrinsics(markers[i].x,
+ markers[i].y,
+ &(markers[i].x),
+ &(markers[i].y));
+ }
+ *calibrated_tracks = Tracks(markers);
+}
+
+} // namespace libmv
diff --git a/intern/libmv/libmv/simple_pipeline/pipeline.h b/intern/libmv/libmv/simple_pipeline/pipeline.h
new file mode 100644
index 00000000000..4d1bd00c51f
--- /dev/null
+++ b/intern/libmv/libmv/simple_pipeline/pipeline.h
@@ -0,0 +1,98 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_SIMPLE_PIPELINE_PIPELINE_H_
+#define LIBMV_SIMPLE_PIPELINE_PIPELINE_H_
+
+#include "libmv/simple_pipeline/callbacks.h"
+#include "libmv/simple_pipeline/tracks.h"
+#include "libmv/simple_pipeline/reconstruction.h"
+
+namespace libmv {
+
+/*!
+ Estimate camera poses and scene 3D coordinates for all frames and tracks.
+
+ This method should be used once there is an initial reconstruction in
+ place, for example by reconstructing from two frames that have a sufficient
+ baseline and number of tracks in common. This function iteratively
+ triangulates points that are visible by cameras that have their pose
+ estimated, then resections (i.e. estimates the pose) of cameras that are
+ not estimated yet that can see triangulated points. This process is
+ repeated until all points and cameras are estimated. Periodically, bundle
+ adjustment is run to ensure a quality reconstruction.
+
+ \a tracks should contain markers used in the reconstruction.
+ \a reconstruction should contain at least some 3D points or some estimated
+ cameras. The minimum number of cameras is two (with no 3D points) and the
+ minimum number of 3D points (with no estimated cameras) is 5.
+
+ \sa EuclideanResect, EuclideanIntersect, EuclideanBundle
+*/
+void EuclideanCompleteReconstruction(
+ const Tracks &tracks,
+ EuclideanReconstruction *reconstruction,
+ ProgressUpdateCallback *update_callback = NULL);
+
+/*!
+ Estimate camera matrices and homogeneous 3D coordinates for all frames and
+ tracks.
+
+ This method should be used once there is an initial reconstruction in
+ place, for example by reconstructing from two frames that have a sufficient
+ baseline and number of tracks in common. This function iteratively
+ triangulates points that are visible by cameras that have their pose
+ estimated, then resections (i.e. estimates the pose) of cameras that are
+ not estimated yet that can see triangulated points. This process is
+ repeated until all points and cameras are estimated. Periodically, bundle
+ adjustment is run to ensure a quality reconstruction.
+
+ \a tracks should contain markers used in the reconstruction.
+ \a reconstruction should contain at least some 3D points or some estimated
+ cameras. The minimum number of cameras is two (with no 3D points) and the
+ minimum number of 3D points (with no estimated cameras) is 5.
+
+ \sa ProjectiveResect, ProjectiveIntersect, ProjectiveBundle
+*/
+void ProjectiveCompleteReconstruction(const Tracks &tracks,
+ ProjectiveReconstruction *reconstruction);
+
+
+class CameraIntrinsics;
+
+// TODO(keir): Decide if we want these in the public API, and if so, what the
+// appropriate include file is.
+
+double EuclideanReprojectionError(const Tracks &image_tracks,
+ const EuclideanReconstruction &reconstruction,
+ const CameraIntrinsics &intrinsics);
+
+double ProjectiveReprojectionError(
+ const Tracks &image_tracks,
+ const ProjectiveReconstruction &reconstruction,
+ const CameraIntrinsics &intrinsics);
+
+void InvertIntrinsicsForTracks(const Tracks &raw_tracks,
+ const CameraIntrinsics &camera_intrinsics,
+ Tracks *calibrated_tracks);
+
+} // namespace libmv
+
+#endif // LIBMV_SIMPLE_PIPELINE_PIPELINE_H_
diff --git a/intern/libmv/libmv/simple_pipeline/reconstruction.cc b/intern/libmv/libmv/simple_pipeline/reconstruction.cc
new file mode 100644
index 00000000000..65e5dd27d5d
--- /dev/null
+++ b/intern/libmv/libmv/simple_pipeline/reconstruction.cc
@@ -0,0 +1,191 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/simple_pipeline/reconstruction.h"
+#include "libmv/numeric/numeric.h"
+#include "libmv/logging/logging.h"
+
+namespace libmv {
+
+EuclideanReconstruction::EuclideanReconstruction() {}
+EuclideanReconstruction::EuclideanReconstruction(
+ const EuclideanReconstruction &other) {
+ cameras_ = other.cameras_;
+ points_ = other.points_;
+}
+
+EuclideanReconstruction &EuclideanReconstruction::operator=(
+ const EuclideanReconstruction &other) {
+ if (&other != this) {
+ cameras_ = other.cameras_;
+ points_ = other.points_;
+ }
+ return *this;
+}
+
+void EuclideanReconstruction::InsertCamera(int image,
+ const Mat3 &R,
+ const Vec3 &t) {
+ LG << "InsertCamera " << image << ":\nR:\n"<< R << "\nt:\n" << t;
+ if (image >= cameras_.size()) {
+ cameras_.resize(image + 1);
+ }
+ cameras_[image].image = image;
+ cameras_[image].R = R;
+ cameras_[image].t = t;
+}
+
+void EuclideanReconstruction::InsertPoint(int track, const Vec3 &X) {
+ LG << "InsertPoint " << track << ":\n" << X;
+ if (track >= points_.size()) {
+ points_.resize(track + 1);
+ }
+ points_[track].track = track;
+ points_[track].X = X;
+}
+
+EuclideanCamera *EuclideanReconstruction::CameraForImage(int image) {
+ return const_cast<EuclideanCamera *>(
+ static_cast<const EuclideanReconstruction *>(
+ this)->CameraForImage(image));
+}
+
+const EuclideanCamera *EuclideanReconstruction::CameraForImage(
+ int image) const {
+ if (image < 0 || image >= cameras_.size()) {
+ return NULL;
+ }
+ const EuclideanCamera *camera = &cameras_[image];
+ if (camera->image == -1) {
+ return NULL;
+ }
+ return camera;
+}
+
+vector<EuclideanCamera> EuclideanReconstruction::AllCameras() const {
+ vector<EuclideanCamera> cameras;
+ for (int i = 0; i < cameras_.size(); ++i) {
+ if (cameras_[i].image != -1) {
+ cameras.push_back(cameras_[i]);
+ }
+ }
+ return cameras;
+}
+
+EuclideanPoint *EuclideanReconstruction::PointForTrack(int track) {
+ return const_cast<EuclideanPoint *>(
+ static_cast<const EuclideanReconstruction *>(this)->PointForTrack(track));
+}
+
+const EuclideanPoint *EuclideanReconstruction::PointForTrack(int track) const {
+ if (track < 0 || track >= points_.size()) {
+ return NULL;
+ }
+ const EuclideanPoint *point = &points_[track];
+ if (point->track == -1) {
+ return NULL;
+ }
+ return point;
+}
+
+vector<EuclideanPoint> EuclideanReconstruction::AllPoints() const {
+ vector<EuclideanPoint> points;
+ for (int i = 0; i < points_.size(); ++i) {
+ if (points_[i].track != -1) {
+ points.push_back(points_[i]);
+ }
+ }
+ return points;
+}
+
+void ProjectiveReconstruction::InsertCamera(int image,
+ const Mat34 &P) {
+ LG << "InsertCamera " << image << ":\nP:\n"<< P;
+ if (image >= cameras_.size()) {
+ cameras_.resize(image + 1);
+ }
+ cameras_[image].image = image;
+ cameras_[image].P = P;
+}
+
+void ProjectiveReconstruction::InsertPoint(int track, const Vec4 &X) {
+ LG << "InsertPoint " << track << ":\n" << X;
+ if (track >= points_.size()) {
+ points_.resize(track + 1);
+ }
+ points_[track].track = track;
+ points_[track].X = X;
+}
+
+ProjectiveCamera *ProjectiveReconstruction::CameraForImage(int image) {
+ return const_cast<ProjectiveCamera *>(
+ static_cast<const ProjectiveReconstruction *>(
+ this)->CameraForImage(image));
+}
+
+const ProjectiveCamera *ProjectiveReconstruction::CameraForImage(
+ int image) const {
+ if (image < 0 || image >= cameras_.size()) {
+ return NULL;
+ }
+ const ProjectiveCamera *camera = &cameras_[image];
+ if (camera->image == -1) {
+ return NULL;
+ }
+ return camera;
+}
+
+vector<ProjectiveCamera> ProjectiveReconstruction::AllCameras() const {
+ vector<ProjectiveCamera> cameras;
+ for (int i = 0; i < cameras_.size(); ++i) {
+ if (cameras_[i].image != -1) {
+ cameras.push_back(cameras_[i]);
+ }
+ }
+ return cameras;
+}
+
+ProjectivePoint *ProjectiveReconstruction::PointForTrack(int track) {
+ return const_cast<ProjectivePoint *>(
+ static_cast<const ProjectiveReconstruction *>(this)->PointForTrack(track));
+}
+
+const ProjectivePoint *ProjectiveReconstruction::PointForTrack(int track) const {
+ if (track < 0 || track >= points_.size()) {
+ return NULL;
+ }
+ const ProjectivePoint *point = &points_[track];
+ if (point->track == -1) {
+ return NULL;
+ }
+ return point;
+}
+
+vector<ProjectivePoint> ProjectiveReconstruction::AllPoints() const {
+ vector<ProjectivePoint> points;
+ for (int i = 0; i < points_.size(); ++i) {
+ if (points_[i].track != -1) {
+ points.push_back(points_[i]);
+ }
+ }
+ return points;
+}
+
+} // namespace libmv
diff --git a/intern/libmv/libmv/simple_pipeline/reconstruction.h b/intern/libmv/libmv/simple_pipeline/reconstruction.h
new file mode 100644
index 00000000000..947a0636476
--- /dev/null
+++ b/intern/libmv/libmv/simple_pipeline/reconstruction.h
@@ -0,0 +1,217 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_SIMPLE_PIPELINE_RECONSTRUCTION_H_
+#define LIBMV_SIMPLE_PIPELINE_RECONSTRUCTION_H_
+
+#include "libmv/base/vector.h"
+#include "libmv/numeric/numeric.h"
+
+namespace libmv {
+
+/*!
+ A EuclideanCamera is the location and rotation of the camera viewing \a image.
+
+ \a image identify which image from \l Tracks this camera represents.
+ \a R is a 3x3 matrix representing the rotation of the camera.
+ \a t is a translation vector representing its positions.
+
+ \sa Reconstruction
+*/
+struct EuclideanCamera {
+ EuclideanCamera() : image(-1) {}
+ EuclideanCamera(const EuclideanCamera &c) : image(c.image), R(c.R), t(c.t) {}
+
+ int image;
+ Mat3 R;
+ Vec3 t;
+};
+
+/*!
+ A Point is the 3D location of a track.
+
+ \a track identify which track from \l Tracks this point corresponds to.
+ \a X represents the 3D position of the track.
+
+ \sa Reconstruction
+*/
+struct EuclideanPoint {
+ EuclideanPoint() : track(-1) {}
+ EuclideanPoint(const EuclideanPoint &p) : track(p.track), X(p.X) {}
+ int track;
+ Vec3 X;
+};
+
+/*!
+ The EuclideanReconstruction class stores \link EuclideanCamera cameras
+ \endlink and \link EuclideanPoint points \endlink.
+
+ The EuclideanReconstruction container is intended as the store of 3D
+ reconstruction data to be used with the MultiView API.
+
+ The container has lookups to query a \a EuclideanCamera from an \a image or
+ a \a EuclideanPoint from a \a track.
+
+ \sa Camera, Point
+*/
+class EuclideanReconstruction {
+ public:
+ // Default constructor starts with no cameras.
+ EuclideanReconstruction();
+
+ /// Copy constructor.
+ EuclideanReconstruction(const EuclideanReconstruction &other);
+
+ EuclideanReconstruction &operator=(const EuclideanReconstruction &other);
+
+ /*!
+ Insert a camera into the set. If there is already a camera for the given
+ \a image, the existing camera is replaced. If there is no camera for the
+ given \a image, a new one is added.
+
+ \a image is the key used to retrieve the cameras with the other methods
+ in this class.
+
+ \note You should use the same \a image identifier as in \l Tracks.
+ */
+ void InsertCamera(int image, const Mat3 &R, const Vec3 &t);
+
+ /*!
+ Insert a point into the reconstruction. If there is already a point for
+ the given \a track, the existing point is replaced. If there is no point
+ for the given \a track, a new one is added.
+
+ \a track is the key used to retrieve the points with the
+ other methods in this class.
+
+ \note You should use the same \a track identifier as in \l Tracks.
+ */
+ void InsertPoint(int track, const Vec3 &X);
+
+ /// Returns a pointer to the camera corresponding to \a image.
+ EuclideanCamera *CameraForImage(int image);
+ const EuclideanCamera *CameraForImage(int image) const;
+
+ /// Returns all cameras.
+ vector<EuclideanCamera> AllCameras() const;
+
+ /// Returns a pointer to the point corresponding to \a track.
+ EuclideanPoint *PointForTrack(int track);
+ const EuclideanPoint *PointForTrack(int track) const;
+
+ /// Returns all points.
+ vector<EuclideanPoint> AllPoints() const;
+
+ private:
+ vector<EuclideanCamera> cameras_;
+ vector<EuclideanPoint> points_;
+};
+
+/*!
+ A ProjectiveCamera is the projection matrix for the camera of \a image.
+
+ \a image identify which image from \l Tracks this camera represents.
+ \a P is the 3x4 projection matrix.
+
+ \sa ProjectiveReconstruction
+*/
+struct ProjectiveCamera {
+ ProjectiveCamera() : image(-1) {}
+ ProjectiveCamera(const ProjectiveCamera &c) : image(c.image), P(c.P) {}
+
+ int image;
+ Mat34 P;
+};
+
+/*!
+ A Point is the 3D location of a track.
+
+ \a track identifies which track from \l Tracks this point corresponds to.
+ \a X is the homogeneous 3D position of the track.
+
+ \sa Reconstruction
+*/
+struct ProjectivePoint {
+ ProjectivePoint() : track(-1) {}
+ ProjectivePoint(const ProjectivePoint &p) : track(p.track), X(p.X) {}
+ int track;
+ Vec4 X;
+};
+
+/*!
+ The ProjectiveReconstruction class stores \link ProjectiveCamera cameras
+ \endlink and \link ProjectivePoint points \endlink.
+
+ The ProjectiveReconstruction container is intended as the store of 3D
+ reconstruction data to be used with the MultiView API.
+
+ The container has lookups to query a \a ProjectiveCamera from an \a image or
+ a \a ProjectivePoint from a \a track.
+
+ \sa Camera, Point
+*/
+class ProjectiveReconstruction {
+ public:
+ /*!
+ Insert a camera into the set. If there is already a camera for the given
+ \a image, the existing camera is replaced. If there is no camera for the
+ given \a image, a new one is added.
+
+ \a image is the key used to retrieve the cameras with the other methods
+ in this class.
+
+ \note You should use the same \a image identifier as in \l Tracks.
+ */
+ void InsertCamera(int image, const Mat34 &P);
+
+ /*!
+ Insert a point into the reconstruction. If there is already a point for
+ the given \a track, the existing point is replaced. If there is no point
+ for the given \a track, a new one is added.
+
+ \a track is the key used to retrieve the points with the
+ other methods in this class.
+
+ \note You should use the same \a track identifier as in \l Tracks.
+ */
+ void InsertPoint(int track, const Vec4 &X);
+
+ /// Returns a pointer to the camera corresponding to \a image.
+ ProjectiveCamera *CameraForImage(int image);
+ const ProjectiveCamera *CameraForImage(int image) const;
+
+ /// Returns all cameras.
+ vector<ProjectiveCamera> AllCameras() const;
+
+ /// Returns a pointer to the point corresponding to \a track.
+ ProjectivePoint *PointForTrack(int track);
+ const ProjectivePoint *PointForTrack(int track) const;
+
+ /// Returns all points.
+ vector<ProjectivePoint> AllPoints() const;
+
+ private:
+ vector<ProjectiveCamera> cameras_;
+ vector<ProjectivePoint> points_;
+};
+
+} // namespace libmv
+
+#endif // LIBMV_SIMPLE_PIPELINE_RECONSTRUCTION_H_
diff --git a/intern/libmv/libmv/simple_pipeline/reconstruction_scale.cc b/intern/libmv/libmv/simple_pipeline/reconstruction_scale.cc
new file mode 100644
index 00000000000..40ac23be7a2
--- /dev/null
+++ b/intern/libmv/libmv/simple_pipeline/reconstruction_scale.cc
@@ -0,0 +1,68 @@
+// Copyright (c) 2013 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/simple_pipeline/reconstruction_scale.h"
+#include "libmv/logging/logging.h"
+
+namespace libmv {
+
+void EuclideanScaleToUnity(EuclideanReconstruction *reconstruction) {
+ vector<EuclideanCamera> all_cameras = reconstruction->AllCameras();
+ vector<EuclideanPoint> all_points = reconstruction->AllPoints();
+
+ // Calculate center of the mass of all cameras.
+ Vec3 cameras_mass_center = Vec3::Zero();
+ for (int i = 0; i < all_cameras.size(); ++i) {
+ cameras_mass_center += all_cameras[i].t;
+ }
+ cameras_mass_center /= all_cameras.size();
+
+ // Find the most distant camera from the mass center.
+ double max_distance = 0.0;
+ for (int i = 0; i < all_cameras.size(); ++i) {
+ double distance = (all_cameras[i].t - cameras_mass_center).squaredNorm();
+ if (distance > max_distance) {
+ max_distance = distance;
+ }
+ }
+
+ if (max_distance == 0.0) {
+ LG << "Cameras position variance is too small, can not rescale";
+ return;
+ }
+
+ double scale_factor = 1.0 / sqrt(max_distance);
+
+ // Rescale cameras positions.
+ for (int i = 0; i < all_cameras.size(); ++i) {
+ int image = all_cameras[i].image;
+ EuclideanCamera *camera = reconstruction->CameraForImage(image);
+ camera->t = camera->t * scale_factor;
+ }
+
+ // Rescale points positions.
+ for (int i = 0; i < all_points.size(); ++i) {
+ int track = all_points[i].track;
+ EuclideanPoint *point = reconstruction->PointForTrack(track);
+ point->X = point->X * scale_factor;
+ }
+}
+
+} // namespace libmv
diff --git a/intern/libmv/libmv/simple_pipeline/reconstruction_scale.h b/intern/libmv/libmv/simple_pipeline/reconstruction_scale.h
new file mode 100644
index 00000000000..f2349ff5146
--- /dev/null
+++ b/intern/libmv/libmv/simple_pipeline/reconstruction_scale.h
@@ -0,0 +1,36 @@
+// Copyright (c) 2013 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_SIMPLE_PIPELINE_RECONSTRUCTION_SCALE_H_
+#define LIBMV_SIMPLE_PIPELINE_RECONSTRUCTION_SCALE_H_
+
+#include "libmv/simple_pipeline/reconstruction.h"
+
+namespace libmv {
+
+/*!
+ Scale euclidean reconstruction in a way variance of
+ camera centers equals to one.
+ */
+void EuclideanScaleToUnity(EuclideanReconstruction *reconstruction);
+
+} // namespace libmv
+
+#endif // LIBMV_SIMPLE_PIPELINE_RECONSTRUCTION_SCALE_H_
diff --git a/intern/libmv/libmv/simple_pipeline/resect.cc b/intern/libmv/libmv/simple_pipeline/resect.cc
new file mode 100644
index 00000000000..e73fc44df2a
--- /dev/null
+++ b/intern/libmv/libmv/simple_pipeline/resect.cc
@@ -0,0 +1,270 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/simple_pipeline/resect.h"
+
+#include <cstdio>
+
+#include "libmv/base/vector.h"
+#include "libmv/logging/logging.h"
+#include "libmv/multiview/euclidean_resection.h"
+#include "libmv/multiview/resection.h"
+#include "libmv/multiview/projection.h"
+#include "libmv/numeric/numeric.h"
+#include "libmv/numeric/levenberg_marquardt.h"
+#include "libmv/simple_pipeline/reconstruction.h"
+#include "libmv/simple_pipeline/tracks.h"
+
+namespace libmv {
+namespace {
+
+Mat2X PointMatrixFromMarkers(const vector<Marker> &markers) {
+ Mat2X points(2, markers.size());
+ for (int i = 0; i < markers.size(); ++i) {
+ points(0, i) = markers[i].x;
+ points(1, i) = markers[i].y;
+ }
+ return points;
+}
+
+// Uses an incremental rotation:
+//
+// x = R' * R * X + t;
+//
+// to avoid issues with the rotation representation. R' is derived from a
+// euler vector encoding the rotation in 3 parameters; the direction is the
+// axis to rotate around and the magnitude is the amount of the rotation.
+struct EuclideanResectCostFunction {
+ public:
+ typedef Vec FMatrixType;
+ typedef Vec6 XMatrixType;
+
+ EuclideanResectCostFunction(const vector<Marker> &markers,
+ const EuclideanReconstruction &reconstruction,
+ const Mat3 &initial_R)
+ : markers(markers),
+ reconstruction(reconstruction),
+ initial_R(initial_R) {}
+
+ // dRt has dR (delta R) encoded as a euler vector in the first 3 parameters,
+ // followed by t in the next 3 parameters.
+ Vec operator()(const Vec6 &dRt) const {
+ // Unpack R, t from dRt.
+ Mat3 R = RotationFromEulerVector(dRt.head<3>()) * initial_R;
+ Vec3 t = dRt.tail<3>();
+
+ // Compute the reprojection error for each coordinate.
+ Vec residuals(2 * markers.size());
+ residuals.setZero();
+ for (int i = 0; i < markers.size(); ++i) {
+ const EuclideanPoint &point =
+ *reconstruction.PointForTrack(markers[i].track);
+ Vec3 projected = R * point.X + t;
+ projected /= projected(2);
+ residuals[2*i + 0] = projected(0) - markers[i].x;
+ residuals[2*i + 1] = projected(1) - markers[i].y;
+ }
+ return residuals;
+ }
+
+ const vector<Marker> &markers;
+ const EuclideanReconstruction &reconstruction;
+ const Mat3 &initial_R;
+};
+
+} // namespace
+
+bool EuclideanResect(const vector<Marker> &markers,
+ EuclideanReconstruction *reconstruction, bool final_pass) {
+ if (markers.size() < 5) {
+ return false;
+ }
+ Mat2X points_2d = PointMatrixFromMarkers(markers);
+ Mat3X points_3d(3, markers.size());
+ for (int i = 0; i < markers.size(); i++) {
+ points_3d.col(i) = reconstruction->PointForTrack(markers[i].track)->X;
+ }
+ LG << "Points for resect:\n" << points_2d;
+
+ Mat3 R;
+ Vec3 t;
+
+ if (0 || !euclidean_resection::EuclideanResection(
+ points_2d, points_3d, &R, &t,
+ euclidean_resection::RESECTION_EPNP)) {
+ // printf("Resection for image %d failed\n", markers[0].image);
+ LG << "Resection for image " << markers[0].image << " failed;"
+ << " trying fallback projective resection.";
+
+ LG << "No fallback; failing resection for " << markers[0].image;
+ return false;
+
+ if (!final_pass) return false;
+ // Euclidean resection failed. Fall back to projective resection, which is
+ // less reliable but better conditioned when there are many points.
+ Mat34 P;
+ Mat4X points_3d_homogeneous(4, markers.size());
+ for (int i = 0; i < markers.size(); i++) {
+ points_3d_homogeneous.col(i).head<3>() = points_3d.col(i);
+ points_3d_homogeneous(3, i) = 1.0;
+ }
+ resection::Resection(points_2d, points_3d_homogeneous, &P);
+ if ((P * points_3d_homogeneous.col(0))(2) < 0) {
+ LG << "Point behind camera; switch sign.";
+ P = -P;
+ }
+
+ Mat3 ignored;
+ KRt_From_P(P, &ignored, &R, &t);
+
+ // The R matrix should be a rotation, but don't rely on it.
+ Eigen::JacobiSVD<Mat3> svd(R, Eigen::ComputeFullU | Eigen::ComputeFullV);
+
+ LG << "Resection rotation is: " << svd.singularValues().transpose();
+ LG << "Determinant is: " << R.determinant();
+
+ // Correct to make R a rotation.
+ R = svd.matrixU() * svd.matrixV().transpose();
+
+ Vec3 xx = R * points_3d.col(0) + t;
+ if (xx(2) < 0.0) {
+ LG << "Final point is still behind camera...";
+ }
+ // XXX Need to check if error is horrible and fail here too in that case.
+ }
+
+ // Refine the result.
+ typedef LevenbergMarquardt<EuclideanResectCostFunction> Solver;
+
+ // Give the cost our initial guess for R.
+ EuclideanResectCostFunction resect_cost(markers, *reconstruction, R);
+
+ // Encode the initial parameters: start with zero delta rotation, and the
+ // guess for t obtained from resection.
+ Vec6 dRt = Vec6::Zero();
+ dRt.tail<3>() = t;
+
+ Solver solver(resect_cost);
+
+ Solver::SolverParameters params;
+ /* Solver::Results results = */ solver.minimize(params, &dRt);
+ LG << "LM found incremental rotation: " << dRt.head<3>().transpose();
+ // TODO(keir): Check results to ensure clean termination.
+
+ // Unpack the rotation and translation.
+ R = RotationFromEulerVector(dRt.head<3>()) * R;
+ t = dRt.tail<3>();
+
+ LG << "Resection for image " << markers[0].image << " got:\n"
+ << "R:\n" << R << "\nt:\n" << t;
+ reconstruction->InsertCamera(markers[0].image, R, t);
+ return true;
+}
+
+namespace {
+
+// Directly parameterize the projection matrix, P, which is a 12 parameter
+// homogeneous entry. In theory P should be parameterized with only 11
+// parametetrs, but in practice it works fine to let the extra degree of
+// freedom drift.
+struct ProjectiveResectCostFunction {
+ public:
+ typedef Vec FMatrixType;
+ typedef Vec12 XMatrixType;
+
+ ProjectiveResectCostFunction(const vector<Marker> &markers,
+ const ProjectiveReconstruction &reconstruction)
+ : markers(markers),
+ reconstruction(reconstruction) {}
+
+ Vec operator()(const Vec12 &vector_P) const {
+ // Unpack P from vector_P.
+ Map<const Mat34> P(vector_P.data(), 3, 4);
+
+ // Compute the reprojection error for each coordinate.
+ Vec residuals(2 * markers.size());
+ residuals.setZero();
+ for (int i = 0; i < markers.size(); ++i) {
+ const ProjectivePoint &point =
+ *reconstruction.PointForTrack(markers[i].track);
+ Vec3 projected = P * point.X;
+ projected /= projected(2);
+ residuals[2*i + 0] = projected(0) - markers[i].x;
+ residuals[2*i + 1] = projected(1) - markers[i].y;
+ }
+ return residuals;
+ }
+
+ const vector<Marker> &markers;
+ const ProjectiveReconstruction &reconstruction;
+};
+
+} // namespace
+
+bool ProjectiveResect(const vector<Marker> &markers,
+ ProjectiveReconstruction *reconstruction) {
+ if (markers.size() < 5) {
+ return false;
+ }
+
+ // Stack the homogeneous 3D points as the columns of a matrix.
+ Mat2X points_2d = PointMatrixFromMarkers(markers);
+ Mat4X points_3d_homogeneous(4, markers.size());
+ for (int i = 0; i < markers.size(); i++) {
+ points_3d_homogeneous.col(i) =
+ reconstruction->PointForTrack(markers[i].track)->X;
+ }
+ LG << "Points for resect:\n" << points_2d;
+
+ // Resection the point.
+ Mat34 P;
+ resection::Resection(points_2d, points_3d_homogeneous, &P);
+
+ // Flip the sign of P if necessary to keep the point in front of the camera.
+ if ((P * points_3d_homogeneous.col(0))(2) < 0) {
+ LG << "Point behind camera; switch sign.";
+ P = -P;
+ }
+
+ // TODO(keir): Check if error is horrible and fail in that case.
+
+ // Refine the resulting projection matrix using geometric error.
+ typedef LevenbergMarquardt<ProjectiveResectCostFunction> Solver;
+
+ ProjectiveResectCostFunction resect_cost(markers, *reconstruction);
+
+ // Pack the initial P matrix into a size-12 vector..
+ Vec12 vector_P = Map<Vec12>(P.data());
+
+ Solver solver(resect_cost);
+
+ Solver::SolverParameters params;
+ /* Solver::Results results = */ solver.minimize(params, &vector_P);
+ // TODO(keir): Check results to ensure clean termination.
+
+ // Unpack the projection matrix.
+ P = Map<Mat34>(vector_P.data(), 3, 4);
+
+ LG << "Resection for image " << markers[0].image << " got:\n"
+ << "P:\n" << P;
+ reconstruction->InsertCamera(markers[0].image, P);
+ return true;
+}
+} // namespace libmv
diff --git a/intern/libmv/libmv/simple_pipeline/resect.h b/intern/libmv/libmv/simple_pipeline/resect.h
new file mode 100644
index 00000000000..7ca3237437e
--- /dev/null
+++ b/intern/libmv/libmv/simple_pipeline/resect.h
@@ -0,0 +1,86 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_SIMPLE_PIPELINE_RESECT_H
+#define LIBMV_SIMPLE_PIPELINE_RESECT_H
+
+#include "libmv/base/vector.h"
+#include "libmv/simple_pipeline/tracks.h"
+#include "libmv/simple_pipeline/reconstruction.h"
+
+namespace libmv {
+
+/*!
+ Estimate the Euclidean pose of a camera from 2D to 3D correspondences.
+
+ This takes a set of markers visible in one frame (which is the one to
+ resection), such that the markers are also reconstructed in 3D in the
+ reconstruction object, and solves for the pose and orientation of the
+ camera for that frame.
+
+ \a markers should contain \l Marker markers \endlink belonging to tracks
+ visible in the one frame to be resectioned. Each of the tracks associated
+ with the markers must have a corresponding reconstructed 3D position in the
+ \a *reconstruction object.
+
+ \a *reconstruction should contain the 3D points associated with the tracks
+ for the markers present in \a markers.
+
+ \note This assumes a calibrated reconstruction, e.g. the markers are
+ already corrected for camera intrinsics and radial distortion.
+ \note This assumes an outlier-free set of markers.
+
+ \return True if the resection was successful, false otherwise.
+
+ \sa EuclideanIntersect, EuclideanReconstructTwoFrames
+*/
+bool EuclideanResect(const vector<Marker> &markers,
+ EuclideanReconstruction *reconstruction, bool final_pass);
+
+/*!
+ Estimate the projective pose of a camera from 2D to 3D correspondences.
+
+ This takes a set of markers visible in one frame (which is the one to
+ resection), such that the markers are also reconstructed in a projective
+ frame in the reconstruction object, and solves for the projective matrix of
+ the camera for that frame.
+
+ \a markers should contain \l Marker markers \endlink belonging to tracks
+ visible in the one frame to be resectioned. Each of the tracks associated
+ with the markers must have a corresponding reconstructed homogeneous 3D
+ position in the \a *reconstruction object.
+
+ \a *reconstruction should contain the homogeneous 3D points associated with
+ the tracks for the markers present in \a markers.
+
+ \note This assumes radial distortion has already been corrected, but
+ otherwise works for uncalibrated sequences.
+ \note This assumes an outlier-free set of markers.
+
+ \return True if the resection was successful, false otherwise.
+
+ \sa ProjectiveIntersect, ProjectiveReconstructTwoFrames
+*/
+bool ProjectiveResect(const vector<Marker> &markers,
+ ProjectiveReconstruction *reconstruction);
+
+} // namespace libmv
+
+#endif // LIBMV_SIMPLE_PIPELINE_RESECT_H
diff --git a/intern/libmv/libmv/simple_pipeline/resect_test.cc b/intern/libmv/libmv/simple_pipeline/resect_test.cc
new file mode 100644
index 00000000000..811edd282d8
--- /dev/null
+++ b/intern/libmv/libmv/simple_pipeline/resect_test.cc
@@ -0,0 +1,234 @@
+// Copyright (c) 2009 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/simple_pipeline/resect.h"
+#include "libmv/logging/logging.h"
+#include "testing/testing.h"
+
+#if 0
+// Generates all necessary inputs and expected outputs for EuclideanResection.
+void CreateCameraSystem(const Mat3& KK,
+ const Mat3X& x_image,
+ const Vec& X_distances,
+ const Mat3& R_input,
+ const Vec3& T_input,
+ Mat2X *x_camera,
+ Mat3X *X_world,
+ Mat3 *R_expected,
+ Vec3 *T_expected) {
+ int num_points = x_image.cols();
+
+ Mat3X x_unit_cam(3, num_points);
+ x_unit_cam = KK.inverse() * x_image;
+
+ // Create normalized camera coordinates to be used as an input to the PnP
+ // function, instead of using NormalizeColumnVectors(&x_unit_cam).
+ *x_camera = x_unit_cam.block(0, 0, 2, num_points);
+ for (int i = 0; i < num_points; ++i) {
+ x_unit_cam.col(i).normalize();
+ }
+
+ // Create the 3D points in the camera system.
+ Mat X_camera(3, num_points);
+ for (int i = 0; i < num_points; ++i) {
+ X_camera.col(i) = X_distances(i) * x_unit_cam.col(i);
+ }
+
+ // Apply the transformation to the camera 3D points
+ Mat translation_matrix(3, num_points);
+ translation_matrix.row(0).setConstant(T_input(0));
+ translation_matrix.row(1).setConstant(T_input(1));
+ translation_matrix.row(2).setConstant(T_input(2));
+
+ *X_world = R_input * X_camera + translation_matrix;
+
+ // Create the expected result for comparison.
+ *R_expected = R_input.transpose();
+ *T_expected = *R_expected * (-T_input);
+};
+
+TEST(AbsoluteOrientation, QuaternionSolution) {
+ int num_points = 4;
+ Mat X;
+ Mat Xp;
+ X = 100 * Mat::Random(3, num_points);
+
+ // Create a random translation and rotation.
+ Mat3 R_input;
+ R_input = Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ())
+ * Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitY())
+ * Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ());
+
+ Vec3 t_input;
+ t_input.setRandom();
+ t_input = 100 * t_input;
+
+ Mat translation_matrix(3, num_points);
+ translation_matrix.row(0).setConstant(t_input(0));
+ translation_matrix.row(1).setConstant(t_input(1));
+ translation_matrix.row(2).setConstant(t_input(2));
+
+ // Create the transformed 3D points Xp as Xp = R * X + t.
+ Xp = R_input * X + translation_matrix;
+
+ // Output variables.
+ Mat3 R;
+ Vec3 t;
+
+ AbsoluteOrientation(X, Xp, &R, &t);
+
+ EXPECT_MATRIX_NEAR(t, t_input, 1e-6);
+ EXPECT_MATRIX_NEAR(R, R_input, 1e-8);
+}
+
+TEST(EuclideanResection, Points4KnownImagePointsRandomTranslationRotation) {
+ // In this test only the translation and rotation are random. The image
+ // points are selected from a real case and are well conditioned.
+ Vec2i image_dimensions;
+ image_dimensions << 1600, 1200;
+
+ Mat3 KK;
+ KK << 2796, 0, 804,
+ 0 , 2796, 641,
+ 0, 0, 1;
+
+ // The real image points.
+ int num_points = 4;
+ Mat3X x_image(3, num_points);
+ x_image << 1164.06, 734.948, 749.599, 430.727,
+ 681.386, 844.59, 496.315, 580.775,
+ 1, 1, 1, 1;
+
+
+ // A vector of the 4 distances to the 3D points.
+ Vec X_distances = 100 * Vec::Random(num_points).array().abs();
+
+ // Create the random camera motion R and t that resection should recover.
+ Mat3 R_input;
+ R_input = Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ())
+ * Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitY())
+ * Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ());
+
+ Vec3 T_input;
+ T_input.setRandom();
+ T_input = 100 * T_input;
+
+ // Create the camera system, also getting the expected result of the
+ // transformation.
+ Mat3 R_expected;
+ Vec3 T_expected;
+ Mat3X X_world;
+ Mat2X x_camera;
+ CreateCameraSystem(KK, x_image, X_distances, R_input, T_input,
+ &x_camera, &X_world, &R_expected, &T_expected);
+
+ // Finally, run the code under test.
+ Mat3 R_output;
+ Vec3 T_output;
+ EuclideanResection(x_camera, X_world,
+ &R_output, &T_output,
+ RESECTION_ANSAR_DANIILIDIS);
+
+ EXPECT_MATRIX_NEAR(T_output, T_expected, 1e-5);
+ EXPECT_MATRIX_NEAR(R_output, R_expected, 1e-7);
+
+ // For now, the EPnP doesn't have a non-linear optimization step and so is
+ // not precise enough with only 4 points.
+ //
+ // TODO(jmichot): Reenable this test when there is nonlinear refinement.
+#if 0
+ R_output.setIdentity();
+ T_output.setZero();
+
+ EuclideanResection(x_camera, X_world,
+ &R_output, &T_output,
+ RESECTION_EPNP);
+
+ EXPECT_MATRIX_NEAR(T_output, T_expected, 1e-5);
+ EXPECT_MATRIX_NEAR(R_output, R_expected, 1e-7);*/
+#endif
+}
+
+// TODO(jmichot): Reduce the code duplication here with the code above.
+TEST(EuclideanResection, Points6AllRandomInput) {
+ Mat3 KK;
+ KK << 2796, 0, 804,
+ 0 , 2796, 641,
+ 0, 0, 1;
+
+ // Create random image points for a 1600x1200 image.
+ int w = 1600;
+ int h = 1200;
+ int num_points = 6;
+ Mat3X x_image(3, num_points);
+ x_image.row(0) = w * Vec::Random(num_points).array().abs();
+ x_image.row(1) = h * Vec::Random(num_points).array().abs();
+ x_image.row(2).setOnes();
+
+ // Normalized camera coordinates to be used as an input to the PnP function.
+ Mat2X x_camera;
+ Vec X_distances = 100 * Vec::Random(num_points).array().abs();
+
+ // Create the random camera motion R and t that resection should recover.
+ Mat3 R_input;
+ R_input = Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ())
+ * Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitY())
+ * Eigen::AngleAxisd(rand(), Eigen::Vector3d::UnitZ());
+
+ Vec3 T_input;
+ T_input.setRandom();
+ T_input = 100 * T_input;
+
+ // Create the camera system.
+ Mat3 R_expected;
+ Vec3 T_expected;
+ Mat3X X_world;
+ CreateCameraSystem(KK, x_image, X_distances, R_input, T_input,
+ &x_camera, &X_world, &R_expected, &T_expected);
+
+ // Test each of the resection methods.
+ {
+ Mat3 R_output;
+ Vec3 T_output;
+ EuclideanResection(x_camera, X_world,
+ &R_output, &T_output,
+ RESECTION_ANSAR_DANIILIDIS);
+ EXPECT_MATRIX_NEAR(T_output, T_expected, 1e-5);
+ EXPECT_MATRIX_NEAR(R_output, R_expected, 1e-7);
+ }
+ {
+ Mat3 R_output;
+ Vec3 T_output;
+ EuclideanResection(x_camera, X_world,
+ &R_output, &T_output,
+ RESECTION_EPNP);
+ EXPECT_MATRIX_NEAR(T_output, T_expected, 1e-5);
+ EXPECT_MATRIX_NEAR(R_output, R_expected, 1e-7);
+ }
+ {
+ Mat3 R_output;
+ Vec3 T_output;
+ EuclideanResection(x_image, X_world, KK,
+ &R_output, &T_output);
+ EXPECT_MATRIX_NEAR(T_output, T_expected, 1e-5);
+ EXPECT_MATRIX_NEAR(R_output, R_expected, 1e-7);
+ }
+}
+#endif
diff --git a/intern/libmv/libmv/simple_pipeline/tracks.cc b/intern/libmv/libmv/simple_pipeline/tracks.cc
new file mode 100644
index 00000000000..d5d009708ba
--- /dev/null
+++ b/intern/libmv/libmv/simple_pipeline/tracks.cc
@@ -0,0 +1,187 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/simple_pipeline/tracks.h"
+
+#include <algorithm>
+#include <vector>
+#include <iterator>
+
+#include "libmv/numeric/numeric.h"
+
+namespace libmv {
+
+Tracks::Tracks(const Tracks &other) {
+ markers_ = other.markers_;
+}
+
+Tracks::Tracks(const vector<Marker> &markers) : markers_(markers) {}
+
+void Tracks::Insert(int image, int track, double x, double y, double weight) {
+ // TODO(keir): Wow, this is quadratic for repeated insertions. Fix this by
+ // adding a smarter data structure like a set<>.
+ for (int i = 0; i < markers_.size(); ++i) {
+ if (markers_[i].image == image &&
+ markers_[i].track == track) {
+ markers_[i].x = x;
+ markers_[i].y = y;
+ return;
+ }
+ }
+ Marker marker = { image, track, x, y, weight };
+ markers_.push_back(marker);
+}
+
+vector<Marker> Tracks::AllMarkers() const {
+ return markers_;
+}
+
+vector<Marker> Tracks::MarkersInImage(int image) const {
+ vector<Marker> markers;
+ for (int i = 0; i < markers_.size(); ++i) {
+ if (image == markers_[i].image) {
+ markers.push_back(markers_[i]);
+ }
+ }
+ return markers;
+}
+
+vector<Marker> Tracks::MarkersForTrack(int track) const {
+ vector<Marker> markers;
+ for (int i = 0; i < markers_.size(); ++i) {
+ if (track == markers_[i].track) {
+ markers.push_back(markers_[i]);
+ }
+ }
+ return markers;
+}
+
+vector<Marker> Tracks::MarkersInBothImages(int image1, int image2) const {
+ vector<Marker> markers;
+ for (int i = 0; i < markers_.size(); ++i) {
+ int image = markers_[i].image;
+ if (image == image1 || image == image2)
+ markers.push_back(markers_[i]);
+ }
+ return markers;
+}
+
+vector<Marker> Tracks::MarkersForTracksInBothImages(int image1,
+ int image2) const {
+ std::vector<int> image1_tracks;
+ std::vector<int> image2_tracks;
+
+ for (int i = 0; i < markers_.size(); ++i) {
+ int image = markers_[i].image;
+ if (image == image1) {
+ image1_tracks.push_back(markers_[i].track);
+ } else if (image == image2) {
+ image2_tracks.push_back(markers_[i].track);
+ }
+ }
+
+ std::sort(image1_tracks.begin(), image1_tracks.end());
+ std::sort(image2_tracks.begin(), image2_tracks.end());
+
+ std::vector<int> intersection;
+ std::set_intersection(image1_tracks.begin(), image1_tracks.end(),
+ image2_tracks.begin(), image2_tracks.end(),
+ std::back_inserter(intersection));
+
+ vector<Marker> markers;
+ for (int i = 0; i < markers_.size(); ++i) {
+ if ((markers_[i].image == image1 || markers_[i].image == image2) &&
+ std::binary_search(intersection.begin(), intersection.end(),
+ markers_[i].track)) {
+ markers.push_back(markers_[i]);
+ }
+ }
+ return markers;
+}
+
+Marker Tracks::MarkerInImageForTrack(int image, int track) const {
+ for (int i = 0; i < markers_.size(); ++i) {
+ if (markers_[i].image == image && markers_[i].track == track) {
+ return markers_[i];
+ }
+ }
+ Marker null = { -1, -1, -1, -1, 0.0 };
+ return null;
+}
+
+void Tracks::RemoveMarkersForTrack(int track) {
+ int size = 0;
+ for (int i = 0; i < markers_.size(); ++i) {
+ if (markers_[i].track != track) {
+ markers_[size++] = markers_[i];
+ }
+ }
+ markers_.resize(size);
+}
+
+void Tracks::RemoveMarker(int image, int track) {
+ int size = 0;
+ for (int i = 0; i < markers_.size(); ++i) {
+ if (markers_[i].image != image || markers_[i].track != track) {
+ markers_[size++] = markers_[i];
+ }
+ }
+ markers_.resize(size);
+}
+
+int Tracks::MaxImage() const {
+ // TODO(MatthiasF): maintain a max_image_ member (updated on Insert)
+ int max_image = 0;
+ for (int i = 0; i < markers_.size(); ++i) {
+ max_image = std::max(markers_[i].image, max_image);
+ }
+ return max_image;
+}
+
+int Tracks::MaxTrack() const {
+ // TODO(MatthiasF): maintain a max_track_ member (updated on Insert)
+ int max_track = 0;
+ for (int i = 0; i < markers_.size(); ++i) {
+ max_track = std::max(markers_[i].track, max_track);
+ }
+ return max_track;
+}
+
+int Tracks::NumMarkers() const {
+ return markers_.size();
+}
+
+void CoordinatesForMarkersInImage(const vector<Marker> &markers,
+ int image,
+ Mat *coordinates) {
+ vector<Vec2> coords;
+ for (int i = 0; i < markers.size(); ++i) {
+ const Marker &marker = markers[i];
+ if (markers[i].image == image) {
+ coords.push_back(Vec2(marker.x, marker.y));
+ }
+ }
+ coordinates->resize(2, coords.size());
+ for (int i = 0; i < coords.size(); i++) {
+ coordinates->col(i) = coords[i];
+ }
+}
+
+} // namespace libmv
diff --git a/intern/libmv/libmv/simple_pipeline/tracks.h b/intern/libmv/libmv/simple_pipeline/tracks.h
new file mode 100644
index 00000000000..a54a43659b7
--- /dev/null
+++ b/intern/libmv/libmv/simple_pipeline/tracks.h
@@ -0,0 +1,138 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_SIMPLE_PIPELINE_TRACKS_H_
+#define LIBMV_SIMPLE_PIPELINE_TRACKS_H_
+
+#include "libmv/base/vector.h"
+#include "libmv/numeric/numeric.h"
+
+namespace libmv {
+
+/*!
+ A Marker is the 2D location of a tracked point in an image.
+
+ \a x, \a y is the position of the marker in pixels from the top left corner
+ in the image identified by \a image. All markers for to the same target
+ form a track identified by a common \a track number.
+
+ \a weight is used by bundle adjustment and weight means how much the
+ track affects on a final solution.
+
+ \note Markers are typically aggregated with the help of the \l Tracks class.
+
+ \sa Tracks
+*/
+// TODO(sergey): Consider using comment for every member separately
+// instead of having one giantic comment block.
+struct Marker {
+ int image;
+ int track;
+ double x, y;
+ double weight;
+};
+
+/*!
+ The Tracks class stores \link Marker reconstruction markers \endlink.
+
+ The Tracks container is intended as the store of correspondences between
+ images, which must get created before any 3D reconstruction can take place.
+
+ The container has several fast lookups for queries typically needed for
+ structure from motion algorithms, such as \l MarkersForTracksInBothImages().
+
+ \sa Marker
+*/
+class Tracks {
+ public:
+ Tracks() { }
+
+ // Copy constructor for a tracks object.
+ Tracks(const Tracks &other);
+
+ /// Construct a new tracks object using the given markers to start.
+ explicit Tracks(const vector<Marker> &markers);
+
+ /*!
+ Inserts a marker into the set. If there is already a marker for the given
+ \a image and \a track, the existing marker is replaced. If there is no
+ marker for the given \a image and \a track, a new one is added.
+
+ \a image and \a track are the keys used to retrieve the markers with the
+ other methods in this class.
+
+ \a weight is used by bundle adjustment and weight means how much the
+ track affects on a final solution.
+
+ \note To get an identifier for a new track, use \l MaxTrack() + 1.
+ */
+ // TODO(sergey): Consider using InsetWeightedMarker istead of using
+ // stupid default value?
+ void Insert(int image, int track, double x, double y, double weight = 1.0);
+
+ /// Returns all the markers.
+ vector<Marker> AllMarkers() const;
+
+ /// Returns all the markers belonging to a track.
+ vector<Marker> MarkersForTrack(int track) const;
+
+ /// Returns all the markers visible in \a image.
+ vector<Marker> MarkersInImage(int image) const;
+
+ /// Returns all the markers visible in \a image1 and \a image2.
+ vector<Marker> MarkersInBothImages(int image1, int image2) const;
+
+ /*!
+ Returns the markers in \a image1 and \a image2 which have a common track.
+
+ This is not the same as the union of the markers in \a image1 and \a
+ image2; each marker is for a track that appears in both images.
+ */
+ vector<Marker> MarkersForTracksInBothImages(int image1, int image2) const;
+
+ /// Returns the marker in \a image belonging to \a track.
+ Marker MarkerInImageForTrack(int image, int track) const;
+
+ /// Removes all the markers belonging to \a track.
+ void RemoveMarkersForTrack(int track);
+
+ /// Removes the marker in \a image belonging to \a track.
+ void RemoveMarker(int image, int track);
+
+ /// Returns the maximum image identifier used.
+ int MaxImage() const;
+
+ /// Returns the maximum track identifier used.
+ int MaxTrack() const;
+
+ /// Returns the number of markers.
+ int NumMarkers() const;
+
+ private:
+ vector<Marker> markers_;
+};
+
+void CoordinatesForMarkersInImage(const vector<Marker> &markers,
+ int image,
+ Mat *coordinates);
+
+} // namespace libmv
+
+#endif // LIBMV_SIMPLE_PIPELINE_MARKERS_H_
diff --git a/intern/libmv/libmv/tracking/brute_region_tracker.cc b/intern/libmv/libmv/tracking/brute_region_tracker.cc
new file mode 100644
index 00000000000..4a2aef63a96
--- /dev/null
+++ b/intern/libmv/libmv/tracking/brute_region_tracker.cc
@@ -0,0 +1,331 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/tracking/brute_region_tracker.h"
+
+#ifdef __SSE2__
+# include <emmintrin.h>
+#endif
+
+#include "libmv/base/aligned_malloc.h"
+#include "libmv/image/image.h"
+#include "libmv/image/convolve.h"
+#include "libmv/image/correlation.h"
+#include "libmv/image/sample.h"
+#include "libmv/logging/logging.h"
+
+namespace libmv {
+namespace {
+
+bool RegionIsInBounds(const FloatImage &image1,
+ double x, double y,
+ int half_window_size) {
+ // Check the minimum coordinates.
+ int min_x = floor(x) - half_window_size - 1;
+ int min_y = floor(y) - half_window_size - 1;
+ if (min_x < 0.0 ||
+ min_y < 0.0) {
+ return false;
+ }
+
+ // Check the maximum coordinates.
+ int max_x = ceil(x) + half_window_size + 1;
+ int max_y = ceil(y) + half_window_size + 1;
+ if (max_x > image1.cols() ||
+ max_y > image1.rows()) {
+ return false;
+ }
+
+ // Ok, we're good.
+ return true;
+}
+
+#ifdef __SSE2__
+
+// Compute the sub of absolute differences between the arrays "a" and "b".
+// The array "a" is assumed to be 16-byte aligned, while "b" is not. The
+// result is returned as the first and third elements of __m128i if
+// interpreted as a 4-element 32-bit integer array. The SAD is the sum of the
+// elements.
+//
+// The function requires size % 16 valid extra elements at the end of both "a"
+// and "b", since the SSE load instructionst will pull in memory past the end
+// of the arrays if their size is not a multiple of 16.
+inline static __m128i SumOfAbsoluteDifferencesContiguousSSE(
+ const unsigned char *a, // aligned
+ const unsigned char *b, // not aligned
+ unsigned int size,
+ __m128i sad) {
+ // Do the bulk of the work as 16-way integer operations.
+ for (unsigned int j = 0; j < size / 16; j++) {
+ sad = _mm_add_epi32(sad, _mm_sad_epu8( _mm_load_si128 ((__m128i*)(a + 16 * j)),
+ _mm_loadu_si128((__m128i*)(b + 16 * j))));
+ }
+ // Handle the trailing end.
+ // TODO(keir): Benchmark to verify that the below SSE is a win compared to a
+ // hand-rolled loop. It's not clear that the hand rolled loop would be slower
+ // than the potential cache miss when loading the immediate table below.
+ //
+ // An alternative to this version is to take a packet of all 1's then do a
+ // 128-bit shift. The issue is that the shift instruction needs an immediate
+ // amount rather than a variable amount, so the branch instruction here must
+ // remain. See _mm_srli_si128 and _mm_slli_si128.
+ unsigned int remainder = size % 16u;
+ if (remainder) {
+ unsigned int j = size / 16;
+ __m128i a_trail = _mm_load_si128 ((__m128i*)(a + 16 * j));
+ __m128i b_trail = _mm_loadu_si128((__m128i*)(b + 16 * j));
+ __m128i mask;
+ switch (remainder) {
+#define X 0xff
+ case 1: mask = _mm_setr_epi8(X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); break;
+ case 2: mask = _mm_setr_epi8(X, X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); break;
+ case 3: mask = _mm_setr_epi8(X, X, X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); break;
+ case 4: mask = _mm_setr_epi8(X, X, X, X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); break;
+ case 5: mask = _mm_setr_epi8(X, X, X, X, X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); break;
+ case 6: mask = _mm_setr_epi8(X, X, X, X, X, X, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0); break;
+ case 7: mask = _mm_setr_epi8(X, X, X, X, X, X, X, 0, 0, 0, 0, 0, 0, 0, 0, 0); break;
+ case 8: mask = _mm_setr_epi8(X, X, X, X, X, X, X, X, 0, 0, 0, 0, 0, 0, 0, 0); break;
+ case 9: mask = _mm_setr_epi8(X, X, X, X, X, X, X, X, X, 0, 0, 0, 0, 0, 0, 0); break;
+ case 10: mask = _mm_setr_epi8(X, X, X, X, X, X, X, X, X, X, 0, 0, 0, 0, 0, 0); break;
+ case 11: mask = _mm_setr_epi8(X, X, X, X, X, X, X, X, X, X, X, 0, 0, 0, 0, 0); break;
+ case 12: mask = _mm_setr_epi8(X, X, X, X, X, X, X, X, X, X, X, X, 0, 0, 0, 0); break;
+ case 13: mask = _mm_setr_epi8(X, X, X, X, X, X, X, X, X, X, X, X, X, 0, 0, 0); break;
+ case 14: mask = _mm_setr_epi8(X, X, X, X, X, X, X, X, X, X, X, X, X, X, 0, 0); break;
+ case 15: mask = _mm_setr_epi8(X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, 0); break;
+#undef X
+ }
+ sad = _mm_add_epi32(sad, _mm_sad_epu8(_mm_and_si128(mask, a_trail),
+ _mm_and_si128(mask, b_trail)));
+ }
+ return sad;
+}
+#endif
+
+// Computes the sum of absolute differences between pattern and image. Pattern
+// must be 16-byte aligned, and the stride must be a multiple of 16. The image
+// does pointer does not have to be aligned.
+int SumOfAbsoluteDifferencesContiguousImage(
+ const unsigned char *pattern,
+ unsigned int pattern_width,
+ unsigned int pattern_height,
+ unsigned int pattern_stride,
+ const unsigned char *image,
+ unsigned int image_stride) {
+#ifdef __SSE2__
+ // TODO(keir): Add interleaved accumulation, where accumulation is done into
+ // two or more SSE registers that then get combined at the end. This reduces
+ // instruction dependency; in Eigen's squared norm code, splitting the
+ // accumulation produces a ~2x speedup. It's not clear it will help here,
+ // where the number of SSE instructions in the inner loop is smaller.
+ __m128i sad = _mm_setzero_si128();
+ for (int r = 0; r < pattern_height; ++r) {
+ sad = SumOfAbsoluteDifferencesContiguousSSE(&pattern[pattern_stride * r],
+ &image[image_stride * r],
+ pattern_width,
+ sad);
+ }
+ return _mm_cvtsi128_si32(
+ _mm_add_epi32(sad,
+ _mm_shuffle_epi32(sad, _MM_SHUFFLE(3, 0, 1, 2))));
+#else
+ int sad = 0;
+ for (int r = 0; r < pattern_height; ++r) {
+ for (int c = 0; c < pattern_width; ++c) {
+ sad += abs(pattern[pattern_stride * r + c] - image[image_stride * r + c]);
+ }
+ }
+ return sad;
+#endif
+}
+
+// Sample a region of size width, height centered at x,y in image, converting
+// from float to byte in the process. Samples from the first channel. Puts
+// result into *pattern.
+void SampleRectangularPattern(const FloatImage &image,
+ double x, double y,
+ int width,
+ int height,
+ int pattern_stride,
+ unsigned char *pattern) {
+ // There are two cases for width and height: even or odd. If it's odd, then
+ // the bounds [-width / 2, width / 2] works as expected. However, for even,
+ // this results in one extra access past the end. So use < instead of <= in
+ // the loops below, but increase the end limit by one in the odd case.
+ int end_width = (width / 2) + (width % 2);
+ int end_height = (height / 2) + (height % 2);
+ for (int r = -height / 2; r < end_height; ++r) {
+ for (int c = -width / 2; c < end_width; ++c) {
+ pattern[pattern_stride * (r + height / 2) + c + width / 2] =
+ SampleLinear(image, y + r, x + c, 0) * 255.0;
+ }
+ }
+}
+
+// Returns x rounded up to the nearest multiple of alignment.
+inline int PadToAlignment(int x, int alignment) {
+ if (x % alignment != 0) {
+ x += alignment - (x % alignment);
+ }
+ return x;
+}
+
+// Sample a region centered at x,y in image with size extending by half_width
+// from x. Samples from the first channel. The resulting array is placed in
+// *pattern, and the stride, which will be a multiple of 16 if SSE is enabled,
+// is returned in *pattern_stride.
+//
+// NOTE: Caller must free *pattern with aligned_malloc() from above.
+void SampleSquarePattern(const FloatImage &image,
+ double x, double y,
+ int half_width,
+ unsigned char **pattern,
+ int *pattern_stride) {
+ int width = 2 * half_width + 1;
+ // Allocate an aligned block with padding on the end so each row of the
+ // pattern starts on a 16-byte boundary.
+ *pattern_stride = PadToAlignment(width, 16);
+ int pattern_size_bytes = *pattern_stride * width;
+ *pattern = static_cast<unsigned char *>(
+ aligned_malloc(pattern_size_bytes, 16));
+ SampleRectangularPattern(image, x, y, width, width,
+ *pattern_stride,
+ *pattern);
+}
+
+// NOTE: Caller must free *image with aligned_malloc() from above.
+void FloatArrayToByteArrayWithPadding(const FloatImage &float_image,
+ unsigned char **image,
+ int *image_stride) {
+ // Allocate enough so that accessing 16 elements past the end is fine.
+ *image_stride = float_image.Width() + 16;
+ *image = static_cast<unsigned char *>(
+ aligned_malloc(*image_stride * float_image.Height(), 16));
+ for (int i = 0; i < float_image.Height(); ++i) {
+ for (int j = 0; j < float_image.Width(); ++j) {
+ (*image)[*image_stride * i + j] =
+ static_cast<unsigned char>(255.0 * float_image(i, j, 0));
+ }
+ }
+}
+
+} // namespace
+
+// TODO(keir): Compare the "sharpness" of the peak around the best pixel. It's
+// probably worth plotting a few examples to see what the histogram of SAD
+// values for every hypothesis looks like.
+//
+// TODO(keir): Priority queue for multiple hypothesis.
+bool BruteRegionTracker::Track(const FloatImage &image1,
+ const FloatImage &image2,
+ double x1, double y1,
+ double *x2, double *y2) const {
+ if (!RegionIsInBounds(image1, x1, y1, half_window_size)) {
+ LG << "Fell out of image1's window with x1=" << x1 << ", y1=" << y1
+ << ", hw=" << half_window_size << ".";
+ return false;
+ }
+ int pattern_width = 2 * half_window_size + 1;
+
+ Array3Df image_and_gradient1;
+ Array3Df image_and_gradient2;
+ BlurredImageAndDerivativesChannels(image1, 0.9, &image_and_gradient1);
+ BlurredImageAndDerivativesChannels(image2, 0.9, &image_and_gradient2);
+
+ // Sample the pattern to get it aligned to an image grid.
+ unsigned char *pattern;
+ int pattern_stride;
+ SampleSquarePattern(image_and_gradient1, x1, y1, half_window_size,
+ &pattern,
+ &pattern_stride);
+
+ // Convert the search area directly to bytes without sampling.
+ unsigned char *search_area;
+ int search_area_stride;
+ FloatArrayToByteArrayWithPadding(image_and_gradient2, &search_area,
+ &search_area_stride);
+
+ // Try all possible locations inside the search area. Yes, everywhere.
+ int best_i = -1, best_j = -1, best_sad = INT_MAX;
+ for (int i = 0; i < image2.Height() - pattern_width; ++i) {
+ for (int j = 0; j < image2.Width() - pattern_width; ++j) {
+ int sad = SumOfAbsoluteDifferencesContiguousImage(pattern,
+ pattern_width,
+ pattern_width,
+ pattern_stride,
+ search_area + search_area_stride * i + j,
+ search_area_stride);
+ if (sad < best_sad) {
+ best_i = i;
+ best_j = j;
+ best_sad = sad;
+ }
+ }
+ }
+
+ CHECK_NE(best_i, -1);
+ CHECK_NE(best_j, -1);
+
+ aligned_free(pattern);
+ aligned_free(search_area);
+
+ if (best_sad == INT_MAX) {
+ LG << "Hit INT_MAX in SAD; failing.";
+ return false;
+ }
+
+ *x2 = best_j + half_window_size;
+ *y2 = best_i + half_window_size;
+
+ // Calculate the shift done by the fine tracker.
+ double dx2 = *x2 - x1;
+ double dy2 = *y2 - y1;
+ double fine_shift = sqrt(dx2 * dx2 + dy2 * dy2);
+ LG << "Brute shift: dx=" << dx2 << " dy=" << dy2 << ", d=" << fine_shift;
+
+ if (minimum_correlation <= 0) {
+ // No correlation checking requested; nothing else to do.
+ LG << "No correlation checking; returning success. best_sad: " << best_sad;
+ return true;
+ }
+
+ Array3Df image_and_gradient1_sampled, image_and_gradient2_sampled;
+ SamplePattern(image_and_gradient1, x1, y1, half_window_size, 3,
+ &image_and_gradient1_sampled);
+ SamplePattern(image_and_gradient2, *x2, *y2, half_window_size, 3,
+ &image_and_gradient2_sampled);
+
+ // Compute the Pearson product-moment correlation coefficient to check
+ // for sanity.
+ double correlation = PearsonProductMomentCorrelation(
+ image_and_gradient1_sampled,
+ image_and_gradient2_sampled);
+
+ LG << "Final correlation: " << correlation;
+
+ if (correlation < minimum_correlation) {
+ LG << "Correlation " << correlation << " greater than "
+ << minimum_correlation << "; bailing.";
+ return false;
+ }
+ return true;
+}
+
+} // namespace libmv
diff --git a/intern/libmv/libmv/tracking/brute_region_tracker.h b/intern/libmv/libmv/tracking/brute_region_tracker.h
new file mode 100644
index 00000000000..a699c42ee92
--- /dev/null
+++ b/intern/libmv/libmv/tracking/brute_region_tracker.h
@@ -0,0 +1,49 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_REGION_TRACKING_BRUTE_REGION_TRACKER_H_
+#define LIBMV_REGION_TRACKING_BRUTE_REGION_TRACKER_H_
+
+#include "libmv/image/image.h"
+#include "libmv/tracking/region_tracker.h"
+
+namespace libmv {
+
+struct BruteRegionTracker : public RegionTracker {
+ BruteRegionTracker()
+ : half_window_size(4),
+ minimum_correlation(0.78) {}
+
+ virtual ~BruteRegionTracker() {}
+
+ // Tracker interface.
+ virtual bool Track(const FloatImage &image1,
+ const FloatImage &image2,
+ double x1, double y1,
+ double *x2, double *y2) const;
+
+ // No point in creating getters or setters.
+ int half_window_size;
+ double minimum_correlation;
+};
+
+} // namespace libmv
+
+#endif // LIBMV_REGION_TRACKING_BRUTE_REGION_TRACKER_H_
diff --git a/intern/libmv/libmv/tracking/brute_region_tracker_test.cc b/intern/libmv/libmv/tracking/brute_region_tracker_test.cc
new file mode 100644
index 00000000000..9014797c7cf
--- /dev/null
+++ b/intern/libmv/libmv/tracking/brute_region_tracker_test.cc
@@ -0,0 +1,51 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/tracking/brute_region_tracker.h"
+#include "libmv/image/image.h"
+#include "libmv/logging/logging.h"
+#include "testing/testing.h"
+
+namespace libmv {
+namespace {
+
+TEST(KltRegionTracker, Track) {
+ Array3Df image1(51, 51);
+ image1.Fill(0);
+
+ Array3Df image2(image1);
+
+ int x0 = 25, y0 = 25;
+ int dx = 3, dy = 2;
+ image1(y0, x0) = 1.0f;
+ image2(y0 + dy, x0 + dx) = 1.0;
+
+ double x1 = x0;
+ double y1 = y0;
+
+ BruteRegionTracker tracker;
+ EXPECT_TRUE(tracker.Track(image1, image2, x0, y0, &x1, &y1));
+
+ EXPECT_NEAR(x1, x0 + dx, 0.001);
+ EXPECT_NEAR(y1, y0 + dy, 0.001);
+}
+
+} // namespace
+} // namespace libmv
diff --git a/intern/libmv/libmv/tracking/hybrid_region_tracker.cc b/intern/libmv/libmv/tracking/hybrid_region_tracker.cc
new file mode 100644
index 00000000000..ea3b0f5bfc0
--- /dev/null
+++ b/intern/libmv/libmv/tracking/hybrid_region_tracker.cc
@@ -0,0 +1,67 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/tracking/hybrid_region_tracker.h"
+
+#include "libmv/image/image.h"
+#include "libmv/image/convolve.h"
+#include "libmv/image/sample.h"
+#include "libmv/logging/logging.h"
+
+namespace libmv {
+
+bool HybridRegionTracker::Track(const FloatImage &image1,
+ const FloatImage &image2,
+ double x1, double y1,
+ double *x2, double *y2) const {
+ double x2_coarse = *x2;
+ double y2_coarse = *y2;
+ if (!coarse_tracker_->Track(image1, image2, x1, y1, &x2_coarse, &y2_coarse)) {
+ LG << "Coarse tracker failed.";
+ return false;
+ }
+
+ double x2_fine = x2_coarse;
+ double y2_fine = y2_coarse;
+ if (!fine_tracker_->Track(image1, image2, x1, y1, &x2_fine, &y2_fine)) {
+ LG << "Fine tracker failed.";
+ return false;
+ }
+
+ // Calculate the shift done by the fine tracker.
+ double dx2 = x2_coarse - x2_fine;
+ double dy2 = y2_coarse - y2_fine;
+ double fine_shift = sqrt(dx2 * dx2 + dy2 * dy2);
+
+ LG << "Refinement: dx=" << dx2 << " dy=" << dy2 << ", d=" << fine_shift;
+
+ // If the fine tracker shifted the window by more than a pixel, then
+ // something bad probably happened and we should give up tracking.
+ if (fine_shift < 2.0) {
+ LG << "Refinement small enough; success.";
+ *x2 = x2_fine;
+ *y2 = y2_fine;
+ return true;
+ }
+ LG << "Refinement was too big; failing.";
+ return false;
+}
+
+} // namespace libmv
diff --git a/intern/libmv/libmv/tracking/hybrid_region_tracker.h b/intern/libmv/libmv/tracking/hybrid_region_tracker.h
new file mode 100644
index 00000000000..967d2afd1e5
--- /dev/null
+++ b/intern/libmv/libmv/tracking/hybrid_region_tracker.h
@@ -0,0 +1,52 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_REGION_TRACKING_HYBRID_REGION_TRACKER_H_
+#define LIBMV_REGION_TRACKING_HYBRID_REGION_TRACKER_H_
+
+#include "libmv/image/image.h"
+#include "libmv/base/scoped_ptr.h"
+#include "libmv/tracking/region_tracker.h"
+
+namespace libmv {
+
+// TODO(keir): Documentation!
+class HybridRegionTracker : public RegionTracker {
+ public:
+ HybridRegionTracker(RegionTracker *coarse_tracker,
+ RegionTracker *fine_tracker)
+ : coarse_tracker_(coarse_tracker),
+ fine_tracker_(fine_tracker) {}
+
+ virtual ~HybridRegionTracker() {}
+
+ // Tracker interface.
+ virtual bool Track(const FloatImage &image1,
+ const FloatImage &image2,
+ double x1, double y1,
+ double *x2, double *y2) const;
+
+ scoped_ptr<RegionTracker> coarse_tracker_;
+ scoped_ptr<RegionTracker> fine_tracker_;
+};
+
+} // namespace libmv
+
+#endif // LIBMV_REGION_TRACKING_HYBRID_REGION_TRACKER_H_
diff --git a/intern/libmv/libmv/tracking/kalman_filter.h b/intern/libmv/libmv/tracking/kalman_filter.h
new file mode 100644
index 00000000000..9841f0e912c
--- /dev/null
+++ b/intern/libmv/libmv/tracking/kalman_filter.h
@@ -0,0 +1,112 @@
+// Copyright (c) 2014 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_TRACKING_KALMAN_FILTER_H_
+
+#include "libmv/numeric/numeric.h"
+
+namespace mv {
+
+// A Kalman filter with order N and observation size K.
+template<typename T, int N, int K>
+class KalmanFilter {
+ public:
+ struct State {
+ Eigen::Matrix<T, N, 1> mean;
+ Eigen::Matrix<T, N, N> covariance;
+ };
+
+ // Initialize from row-major data; convenient for constant initializers.
+ KalmanFilter(const T* state_transition_data,
+ const T* observation_data,
+ const T* process_covariance_data,
+ const T* default_measurement_covariance_data)
+ : state_transition_matrix_(
+ Eigen::Matrix<T, N, N, Eigen::RowMajor>(state_transition_data)),
+ observation_matrix_(
+ Eigen::Matrix<T, K, N, Eigen::RowMajor>(observation_data)),
+ process_covariance_(
+ Eigen::Matrix<T, N, N, Eigen::RowMajor>(process_covariance_data)),
+ default_measurement_covariance_(
+ Eigen::Matrix<T, K, K, Eigen::RowMajor>(
+ default_measurement_covariance_data)) {
+ }
+
+ KalmanFilter(
+ const Eigen::Matrix<T, N, N> &state_transition_matrix,
+ const Eigen::Matrix<T, K, N> &observation_matrix,
+ const Eigen::Matrix<T, N, N> &process_covariance,
+ const Eigen::Matrix<T, K, K> &default_measurement_covariance)
+ : state_transition_matrix_(state_transition_matrix),
+ observation_matrix_(observation_matrix),
+ process_covariance_(process_covariance),
+ default_measurement_covariance_(default_measurement_covariance) {
+ }
+
+ // Advances the system according to the current state estimate.
+ void Step(State *state) const {
+ state->mean = state_transition_matrix_ * state->mean;
+ state->covariance = state_transition_matrix_ *
+ state->covariance *
+ state_transition_matrix_.transpose() +
+ process_covariance_;
+ }
+
+ // Updates a state with a new measurement.
+ void Update(const Eigen::Matrix<T, K, 1> &measurement_mean,
+ const Eigen::Matrix<T, K, K> &measurement_covariance,
+ State *state) const {
+ // Calculate the innovation, which is a distribution over prediction error.
+ Eigen::Matrix<T, K, 1> innovation_mean = measurement_mean -
+ observation_matrix_ *
+ state->mean;
+ Eigen::Matrix<T, K, K> innovation_covariance =
+ observation_matrix_ *
+ state->covariance *
+ observation_matrix_.transpose() +
+ measurement_covariance;
+
+ // Calculate the Kalman gain.
+ Eigen::Matrix<T, 6, 2> kalman_gain = state->covariance *
+ observation_matrix_.transpose() *
+ innovation_covariance.inverse();
+
+ // Update the state mean and covariance.
+ state->mean += kalman_gain * innovation_mean;
+ state->covariance = (Eigen::Matrix<T, N, N>::Identity() -
+ kalman_gain * observation_matrix_) *
+ state->covariance;
+ }
+
+ void Update(State *state,
+ const Eigen::Matrix<T, K, 1> &measurement_mean) const {
+ Update(state, measurement_mean, default_measurement_covariance_);
+ }
+
+ private:
+ const Eigen::Matrix<T, N, N> state_transition_matrix_;
+ const Eigen::Matrix<T, K, N> observation_matrix_;
+ const Eigen::Matrix<T, N, N> process_covariance_;
+ const Eigen::Matrix<T, K, K> default_measurement_covariance_;
+};
+
+} // namespace mv
+
+#endif // LIBMV_TRACKING_KALMAN_FILTER_H_
diff --git a/intern/libmv/libmv/tracking/klt_region_tracker.cc b/intern/libmv/libmv/tracking/klt_region_tracker.cc
new file mode 100644
index 00000000000..dbbf9f0b996
--- /dev/null
+++ b/intern/libmv/libmv/tracking/klt_region_tracker.cc
@@ -0,0 +1,162 @@
+// Copyright (c) 2007, 2008, 2009, 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/tracking/klt_region_tracker.h"
+
+#include "libmv/logging/logging.h"
+#include "libmv/image/image.h"
+#include "libmv/image/convolve.h"
+#include "libmv/image/sample.h"
+
+namespace libmv {
+
+// Compute the gradient matrix noted by Z and the error vector e. See Good
+// Features to Track.
+//
+// TODO(keir): The calls to SampleLinear() do boundary checking that should
+// instead happen outside the loop. Since this is the innermost loop, the extra
+// bounds checking hurts performance.
+static void ComputeTrackingEquation(const Array3Df &image_and_gradient1,
+ const Array3Df &image_and_gradient2,
+ double x1, double y1,
+ double x2, double y2,
+ int half_width,
+ float *gxx,
+ float *gxy,
+ float *gyy,
+ float *ex,
+ float *ey) {
+ *gxx = *gxy = *gyy = 0;
+ *ex = *ey = 0;
+ for (int r = -half_width; r <= half_width; ++r) {
+ for (int c = -half_width; c <= half_width; ++c) {
+ float xx1 = x1 + c;
+ float yy1 = y1 + r;
+ float xx2 = x2 + c;
+ float yy2 = y2 + r;
+ float I = SampleLinear(image_and_gradient1, yy1, xx1, 0);
+ float J = SampleLinear(image_and_gradient2, yy2, xx2, 0);
+ float gx = SampleLinear(image_and_gradient2, yy2, xx2, 1);
+ float gy = SampleLinear(image_and_gradient2, yy2, xx2, 2);
+ *gxx += gx * gx;
+ *gxy += gx * gy;
+ *gyy += gy * gy;
+ *ex += (I - J) * gx;
+ *ey += (I - J) * gy;
+ }
+ }
+}
+
+static bool RegionIsInBounds(const FloatImage &image1,
+ double x, double y,
+ int half_window_size) {
+ // Check the minimum coordinates.
+ int min_x = floor(x) - half_window_size - 1;
+ int min_y = floor(y) - half_window_size - 1;
+ if (min_x < 0.0 ||
+ min_y < 0.0) {
+ return false;
+ }
+
+ // Check the maximum coordinates.
+ int max_x = ceil(x) + half_window_size + 1;
+ int max_y = ceil(y) + half_window_size + 1;
+ if (max_x > image1.cols() ||
+ max_y > image1.rows()) {
+ return false;
+ }
+
+ // Ok, we're good.
+ return true;
+}
+
+bool KltRegionTracker::Track(const FloatImage &image1,
+ const FloatImage &image2,
+ double x1, double y1,
+ double *x2, double *y2) const {
+ if (!RegionIsInBounds(image1, x1, y1, half_window_size)) {
+ LG << "Fell out of image1's window with x1=" << x1 << ", y1=" << y1
+ << ", hw=" << half_window_size << ".";
+ return false;
+ }
+
+ Array3Df image_and_gradient1;
+ Array3Df image_and_gradient2;
+ BlurredImageAndDerivativesChannels(image1, sigma, &image_and_gradient1);
+ BlurredImageAndDerivativesChannels(image2, sigma, &image_and_gradient2);
+
+ int i;
+ float dx = 0, dy = 0;
+ for (i = 0; i < max_iterations; ++i) {
+ // Check that the entire image patch is within the bounds of the images.
+ if (!RegionIsInBounds(image2, *x2, *y2, half_window_size)) {
+ LG << "Fell out of image2's window with x2=" << *x2 << ", y2=" << *y2
+ << ", hw=" << half_window_size << ".";
+ return false;
+ }
+
+ // Compute gradient matrix and error vector.
+ float gxx, gxy, gyy, ex, ey;
+ ComputeTrackingEquation(image_and_gradient1,
+ image_and_gradient2,
+ x1, y1,
+ *x2, *y2,
+ half_window_size,
+ &gxx, &gxy, &gyy, &ex, &ey);
+
+ // Solve the tracking equation
+ //
+ // [gxx gxy] [dx] = [ex]
+ // [gxy gyy] [dy] = [ey]
+ //
+ // for dx and dy. Borrowed from Stan Birchfield's KLT implementation.
+ float determinant = gxx * gyy - gxy * gxy;
+ dx = (gyy * ex - gxy * ey) / determinant;
+ dy = (gxx * ey - gxy * ex) / determinant;
+
+ // Update the position with the solved displacement.
+ *x2 += dx;
+ *y2 += dy;
+
+ // Check for the quality of the solution, but not until having already
+ // updated the position with our best estimate. The reason to do the update
+ // anyway is that the user already knows the position is bad, so we may as
+ // well try our best.
+ if (determinant < min_determinant) {
+ // The determinant, which indicates the trackiness of the point, is too
+ // small, so fail out.
+ LG << "Determinant " << determinant << " is too small; failing tracking.";
+ return false;
+ }
+ LG << "x=" << *x2 << ", y=" << *y2 << ", dx=" << dx << ", dy=" << dy
+ << ", det=" << determinant;
+
+ // If the update is small, then we probably found the target.
+ if (dx * dx + dy * dy < min_update_squared_distance) {
+ LG << "Successful track in " << i << " iterations.";
+ return true;
+ }
+ }
+ // Getting here means we hit max iterations, so tracking failed.
+ LG << "Too many iterations; max is set to " << max_iterations << ".";
+ return false;
+}
+
+} // namespace libmv
diff --git a/intern/libmv/libmv/tracking/klt_region_tracker.h b/intern/libmv/libmv/tracking/klt_region_tracker.h
new file mode 100644
index 00000000000..43977757084
--- /dev/null
+++ b/intern/libmv/libmv/tracking/klt_region_tracker.h
@@ -0,0 +1,55 @@
+// Copyright (c) 2007, 2008, 2009, 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_REGION_TRACKING_KLT_REGION_TRACKER_H_
+#define LIBMV_REGION_TRACKING_KLT_REGION_TRACKER_H_
+
+#include "libmv/image/image.h"
+#include "libmv/tracking/region_tracker.h"
+
+namespace libmv {
+
+struct KltRegionTracker : public RegionTracker {
+ KltRegionTracker()
+ : half_window_size(4),
+ max_iterations(16),
+ min_determinant(1e-6),
+ min_update_squared_distance(1e-6),
+ sigma(0.9) {}
+
+ virtual ~KltRegionTracker() {}
+
+ // Tracker interface.
+ virtual bool Track(const FloatImage &image1,
+ const FloatImage &image2,
+ double x1, double y1,
+ double *x2, double *y2) const;
+
+ // No point in creating getters or setters.
+ int half_window_size;
+ int max_iterations;
+ double min_determinant;
+ double min_update_squared_distance;
+ double sigma;
+};
+
+} // namespace libmv
+
+#endif // LIBMV_REGION_TRACKING_KLT_REGION_TRACKER_H_
diff --git a/intern/libmv/libmv/tracking/klt_region_tracker_test.cc b/intern/libmv/libmv/tracking/klt_region_tracker_test.cc
new file mode 100644
index 00000000000..07d5d6500e3
--- /dev/null
+++ b/intern/libmv/libmv/tracking/klt_region_tracker_test.cc
@@ -0,0 +1,51 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/tracking/klt_region_tracker.h"
+#include "libmv/image/image.h"
+#include "testing/testing.h"
+
+namespace libmv {
+namespace {
+
+TEST(KltRegionTracker, Track) {
+ Array3Df image1(51, 51);
+ image1.Fill(0);
+
+ Array3Df image2(image1);
+
+ int x0 = 25, y0 = 25;
+ int dx = 3, dy = 2;
+ image1(y0, x0) = 1.0f;
+ image2(y0 + dy, x0 + dx) = 1.0f;
+
+ double x1 = x0;
+ double y1 = y0;
+
+ KltRegionTracker tracker;
+ tracker.half_window_size = 6;
+ EXPECT_TRUE(tracker.Track(image1, image2, x0, y0, &x1, &y1));
+
+ EXPECT_NEAR(x1, x0 + dx, 0.001);
+ EXPECT_NEAR(y1, y0 + dy, 0.001);
+}
+
+} // namespace
+} // namespace libmv
diff --git a/intern/libmv/libmv/tracking/pyramid_region_tracker.cc b/intern/libmv/libmv/tracking/pyramid_region_tracker.cc
new file mode 100644
index 00000000000..4db501050f3
--- /dev/null
+++ b/intern/libmv/libmv/tracking/pyramid_region_tracker.cc
@@ -0,0 +1,98 @@
+// Copyright (c) 2007, 2008, 2009, 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/tracking/pyramid_region_tracker.h"
+
+#include <vector>
+
+#include "libmv/image/convolve.h"
+#include "libmv/image/image.h"
+#include "libmv/image/sample.h"
+#include "libmv/logging/logging.h"
+
+namespace libmv {
+
+static void MakePyramid(const FloatImage &image, int num_levels,
+ std::vector<FloatImage> *pyramid) {
+ pyramid->resize(num_levels);
+ (*pyramid)[0] = image;
+ for (int i = 1; i < num_levels; ++i) {
+ DownsampleChannelsBy2((*pyramid)[i - 1], &(*pyramid)[i]);
+ }
+}
+
+bool PyramidRegionTracker::Track(const FloatImage &image1,
+ const FloatImage &image2,
+ double x1, double y1,
+ double *x2, double *y2) const {
+ // Shrink the guessed x and y location to match the coarsest level + 1 (which
+ // when gets corrected in the loop).
+ *x2 /= pow(2., num_levels_);
+ *y2 /= pow(2., num_levels_);
+
+ // Create all the levels of the pyramid, since tracking has to happen from
+ // the coarsest to finest levels, which means holding on to all levels of the
+ // pyraid at once.
+ std::vector<FloatImage> pyramid1(num_levels_);
+ std::vector<FloatImage> pyramid2(num_levels_);
+ MakePyramid(image1, num_levels_, &pyramid1);
+ MakePyramid(image2, num_levels_, &pyramid2);
+
+ for (int i = num_levels_ - 1; i >= 0; --i) {
+ // Position in the first image at pyramid level i.
+ double xx = x1 / pow(2., i);
+ double yy = y1 / pow(2., i);
+
+ // Guess the new tracked position is where the last level tracked to.
+ *x2 *= 2;
+ *y2 *= 2;
+
+ // Save the previous best guess for this level, since tracking within this
+ // level might fail.
+ double x2_new = *x2;
+ double y2_new = *y2;
+
+ // Track the point on this level with the base tracker.
+ LG << "Tracking on level " << i;
+ bool succeeded = tracker_->Track(pyramid1[i], pyramid2[i], xx, yy,
+ &x2_new, &y2_new);
+
+ if (!succeeded) {
+ if (i == 0) {
+ // Only fail on the highest-resolution level, because a failure on a
+ // coarse level does not mean failure at a lower level (consider
+ // out-of-bounds conditions).
+ LG << "Finest level of pyramid tracking failed; failing.";
+ return false;
+ }
+
+ LG << "Failed to track at level " << i << "; restoring guess.";
+ } else {
+ // Only save the update if the track for this level succeeded. This is a
+ // bit of a hack; the jury remains out on whether this is better than
+ // re-using the previous failed-attempt.
+ *x2 = x2_new;
+ *y2 = y2_new;
+ }
+ }
+ return true;
+}
+
+} // namespace libmv
diff --git a/intern/libmv/libmv/tracking/pyramid_region_tracker.h b/intern/libmv/libmv/tracking/pyramid_region_tracker.h
new file mode 100644
index 00000000000..1f9675469f4
--- /dev/null
+++ b/intern/libmv/libmv/tracking/pyramid_region_tracker.h
@@ -0,0 +1,46 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_CORRESPONDENCE_PYRAMID_TRACKER_H_
+#define LIBMV_CORRESPONDENCE_PYRAMID_TRACKER_H_
+
+#include "libmv/image/image.h"
+#include "libmv/base/scoped_ptr.h"
+#include "libmv/tracking/region_tracker.h"
+
+namespace libmv {
+
+class PyramidRegionTracker : public RegionTracker {
+ public:
+ PyramidRegionTracker(RegionTracker *tracker, int num_levels)
+ : tracker_(tracker), num_levels_(num_levels) {}
+
+ virtual bool Track(const FloatImage &image1,
+ const FloatImage &image2,
+ double x1, double y1,
+ double *x2, double *y2) const;
+ private:
+ scoped_ptr<RegionTracker> tracker_;
+ int num_levels_;
+};
+
+} // namespace libmv
+
+#endif // LIBMV_CORRESPONDENCE_PYRAMID_TRACKER_H_
diff --git a/intern/libmv/libmv/tracking/pyramid_region_tracker_test.cc b/intern/libmv/libmv/tracking/pyramid_region_tracker_test.cc
new file mode 100644
index 00000000000..d90a1012237
--- /dev/null
+++ b/intern/libmv/libmv/tracking/pyramid_region_tracker_test.cc
@@ -0,0 +1,80 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/tracking/pyramid_region_tracker.h"
+#include "libmv/tracking/klt_region_tracker.h"
+#include "libmv/image/image.h"
+#include "testing/testing.h"
+
+namespace libmv {
+namespace {
+
+TEST(PyramidKltRegionTracker, Track) {
+ Array3Df image1(100, 100);
+ image1.Fill(0);
+
+ Array3Df image2(image1);
+
+ int x1 = 25, y1 = 25;
+ image1(y1 + 0, x1 + 0) = 1.0f;
+ image1(y1 + 0, x1 + 1) = 1.0f;
+ image1(y1 + 1, x1 + 0) = 1.0f;
+ image1(y1 + 1, x1 + 1) = 1.0f;
+
+ // Make the displacement too large for a single-level KLT.
+ int x2 = x1 + 6, y2 = y1 + 5;
+ image2(y2 + 0, x2 + 0) = 1.0f;
+ image2(y2 + 0, x2 + 1) = 1.0f;
+ image2(y2 + 1, x2 + 0) = 1.0f;
+ image2(y2 + 1, x2 + 1) = 1.0f;
+
+ // Use a small 5x5 tracking region.
+ int half_window_size = 3;
+
+ // Ensure that the track doesn't work with one level of KLT.
+ {
+ double x2_actual = x1;
+ double y2_actual = y1;
+
+ KltRegionTracker tracker;
+ tracker.half_window_size = half_window_size;
+ EXPECT_FALSE(tracker.Track(image1, image2, x1, y1,
+ &x2_actual, &y2_actual));
+ }
+
+ // Verify that it works with the pyramid tracker.
+ {
+ double x2_actual = x1;
+ double y2_actual = y1;
+
+ KltRegionTracker *klt_tracker = new KltRegionTracker;
+ klt_tracker->half_window_size = half_window_size;
+
+ PyramidRegionTracker tracker(klt_tracker, 3);
+ EXPECT_TRUE(tracker.Track(image1, image2, x1, y1,
+ &x2_actual, &y2_actual));
+
+ EXPECT_NEAR(x2_actual, x2, 0.001);
+ EXPECT_NEAR(y2_actual, y2, 0.001);
+ }
+}
+
+} // namespace
+} // namespace libmv
diff --git a/intern/libmv/libmv/tracking/region_tracker.h b/intern/libmv/libmv/tracking/region_tracker.h
new file mode 100644
index 00000000000..4f7574df1a3
--- /dev/null
+++ b/intern/libmv/libmv/tracking/region_tracker.h
@@ -0,0 +1,48 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_TRACKING_TRACKER_H_
+#define LIBMV_TRACKING_TRACKER_H_
+
+#include "libmv/image/image.h"
+
+namespace libmv {
+
+class RegionTracker {
+ public:
+ RegionTracker() {}
+ virtual ~RegionTracker() {}
+
+ /*!
+ Track a point from \a image1 to \a image2.
+
+ \a x2, \a y2 should start out as a best guess for the position in \a
+ image2. If no guess is available, (\a x1, \a y1) is a good start. Returns
+ true on success, false otherwise
+ */
+ virtual bool Track(const FloatImage &image1,
+ const FloatImage &image2,
+ double x1, double y1,
+ double *x2, double *y2) const = 0;
+};
+
+} // namespace libmv
+
+#endif // LIBMV_CORRESPONDENCE_TRACKER_H_
diff --git a/intern/libmv/libmv/tracking/retrack_region_tracker.cc b/intern/libmv/libmv/tracking/retrack_region_tracker.cc
new file mode 100644
index 00000000000..4d230086d28
--- /dev/null
+++ b/intern/libmv/libmv/tracking/retrack_region_tracker.cc
@@ -0,0 +1,47 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/tracking/retrack_region_tracker.h"
+
+#include <cmath>
+#include <vector>
+
+namespace libmv {
+
+bool RetrackRegionTracker::Track(const FloatImage &image1,
+ const FloatImage &image2,
+ double x1, double y1,
+ double *x2, double *y2) const {
+ // Track forward, getting x2 and y2.
+ if (!tracker_->Track(image1, image2, x1, y1, x2, y2)) {
+ return false;
+ }
+ // Now track x2 and y2 backward, to get xx1 and yy1 which, if the track is
+ // good, should match x1 and y1 (but may not if the track is bad).
+ double xx1 = *x2, yy1 = *x2;
+ if (!tracker_->Track(image2, image1, *x2, *y2, &xx1, &yy1)) {
+ return false;
+ }
+ double dx = xx1 - x1;
+ double dy = yy1 - y1;
+ return sqrt(dx * dx + dy * dy) < tolerance_;
+}
+
+} // namespace libmv
diff --git a/intern/libmv/libmv/tracking/retrack_region_tracker.h b/intern/libmv/libmv/tracking/retrack_region_tracker.h
new file mode 100644
index 00000000000..ab05f320834
--- /dev/null
+++ b/intern/libmv/libmv/tracking/retrack_region_tracker.h
@@ -0,0 +1,48 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_TRACKING_RETRACK_REGION_TRACKER_H_
+#define LIBMV_TRACKING_RETRACK_REGION_TRACKER_H_
+
+#include "libmv/image/image.h"
+#include "libmv/base/scoped_ptr.h"
+#include "libmv/tracking/region_tracker.h"
+
+namespace libmv {
+
+// A region tracker that tries tracking backwards and forwards, rejecting a
+// track that doesn't track backwards to the starting point.
+class RetrackRegionTracker : public RegionTracker {
+ public:
+ RetrackRegionTracker(RegionTracker *tracker, double tolerance)
+ : tracker_(tracker), tolerance_(tolerance) {}
+
+ virtual bool Track(const FloatImage &image1,
+ const FloatImage &image2,
+ double x1, double y1,
+ double *x2, double *y2) const;
+ private:
+ scoped_ptr<RegionTracker> tracker_;
+ double tolerance_;
+};
+
+} // namespace libmv
+
+#endif // LIBMV_TRACKING_RETRACK_REGION_TRACKER_H_
diff --git a/intern/libmv/libmv/tracking/track_region.cc b/intern/libmv/libmv/tracking/track_region.cc
new file mode 100644
index 00000000000..ef6dac65236
--- /dev/null
+++ b/intern/libmv/libmv/tracking/track_region.cc
@@ -0,0 +1,1601 @@
+// Copyright (c) 2012 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+//
+// Author: mierle@gmail.com (Keir Mierle)
+//
+// TODO(keir): While this tracking code works rather well, it has some
+// outragous inefficiencies. There is probably a 5-10x speedup to be had if a
+// smart coder went through the TODO's and made the suggested performance
+// enhancements.
+
+// Necessary for M_E when building with MSVC.
+#define _USE_MATH_DEFINES
+
+#include "libmv/tracking/track_region.h"
+
+#include <Eigen/SVD>
+#include <Eigen/QR>
+#include <iostream>
+#include "ceres/ceres.h"
+#include "libmv/logging/logging.h"
+#include "libmv/image/image.h"
+#include "libmv/image/sample.h"
+#include "libmv/image/convolve.h"
+#include "libmv/multiview/homography.h"
+#include "libmv/numeric/numeric.h"
+
+// Expand the Jet functionality of Ceres to allow mixed numeric/autodiff.
+//
+// TODO(keir): Push this (or something similar) into upstream Ceres.
+namespace ceres {
+
+// A jet traits class to make it easier to work with mixed auto / numeric diff.
+template<typename T>
+struct JetOps {
+ static bool IsScalar() {
+ return true;
+ }
+ static T GetScalar(const T& t) {
+ return t;
+ }
+ static void SetScalar(const T& scalar, T* t) {
+ *t = scalar;
+ }
+ static void ScaleDerivative(double scale_by, T *value) {
+ // For double, there is no derivative to scale.
+ (void) scale_by; // Ignored.
+ (void) value; // Ignored.
+ }
+};
+
+template<typename T, int N>
+struct JetOps<Jet<T, N> > {
+ static bool IsScalar() {
+ return false;
+ }
+ static T GetScalar(const Jet<T, N>& t) {
+ return t.a;
+ }
+ static void SetScalar(const T& scalar, Jet<T, N>* t) {
+ t->a = scalar;
+ }
+ static void ScaleDerivative(double scale_by, Jet<T, N> *value) {
+ value->v *= scale_by;
+ }
+};
+
+template<typename FunctionType, int kNumArgs, typename ArgumentType>
+struct Chain {
+ static ArgumentType Rule(const FunctionType &f,
+ const FunctionType dfdx[kNumArgs],
+ const ArgumentType x[kNumArgs]) {
+ // In the default case of scalars, there's nothing to do since there are no
+ // derivatives to propagate.
+ (void) dfdx; // Ignored.
+ (void) x; // Ignored.
+ return f;
+ }
+};
+
+// XXX Add documentation here!
+template<typename FunctionType, int kNumArgs, typename T, int N>
+struct Chain<FunctionType, kNumArgs, Jet<T, N> > {
+ static Jet<T, N> Rule(const FunctionType &f,
+ const FunctionType dfdx[kNumArgs],
+ const Jet<T, N> x[kNumArgs]) {
+ // x is itself a function of another variable ("z"); what this function
+ // needs to return is "f", but with the derivative with respect to z
+ // attached to the jet. So combine the derivative part of x's jets to form
+ // a Jacobian matrix between x and z (i.e. dx/dz).
+ Eigen::Matrix<T, kNumArgs, N> dxdz;
+ for (int i = 0; i < kNumArgs; ++i) {
+ dxdz.row(i) = x[i].v.transpose();
+ }
+
+ // Map the input gradient dfdx into an Eigen row vector.
+ Eigen::Map<const Eigen::Matrix<FunctionType, 1, kNumArgs> >
+ vector_dfdx(dfdx, 1, kNumArgs);
+
+ // Now apply the chain rule to obtain df/dz. Combine the derivative with
+ // the scalar part to obtain f with full derivative information.
+ Jet<T, N> jet_f;
+ jet_f.a = f;
+ jet_f.v = vector_dfdx.template cast<T>() * dxdz; // Also known as dfdz.
+ return jet_f;
+ }
+};
+
+} // namespace ceres
+
+namespace libmv {
+
+using ceres::Jet;
+using ceres::JetOps;
+using ceres::Chain;
+
+TrackRegionOptions::TrackRegionOptions()
+ : mode(TRANSLATION),
+ minimum_correlation(0),
+ max_iterations(20),
+ use_esm(true),
+ use_brute_initialization(true),
+ use_normalized_intensities(false),
+ sigma(0.9),
+ num_extra_points(0),
+ regularization_coefficient(0.0),
+ minimum_corner_shift_tolerance_pixels(0.005),
+ image1_mask(NULL) {
+}
+
+namespace {
+
+// TODO(keir): Consider adding padding.
+template<typename T>
+bool InBounds(const FloatImage &image,
+ const T &x,
+ const T &y) {
+ return 0.0 <= x && x < image.Width() &&
+ 0.0 <= y && y < image.Height();
+}
+
+bool AllInBounds(const FloatImage &image,
+ const double *x,
+ const double *y) {
+ for (int i = 0; i < 4; ++i) {
+ if (!InBounds(image, x[i], y[i])) {
+ return false;
+ }
+ }
+ return true;
+}
+
+// Sample the image at position (x, y) but use the gradient, if present, to
+// propagate derivatives from x and y. This is needed to integrate the numeric
+// image gradients with Ceres's autodiff framework.
+template<typename T>
+static T SampleWithDerivative(const FloatImage &image_and_gradient,
+ const T &x,
+ const T &y) {
+ float scalar_x = JetOps<T>::GetScalar(x);
+ float scalar_y = JetOps<T>::GetScalar(y);
+
+ // Note that sample[1] and sample[2] will be uninitialized in the scalar
+ // case, but that is not an issue because the Chain::Rule below will not read
+ // the uninitialized values.
+ float sample[3];
+ if (JetOps<T>::IsScalar()) {
+ // For the scalar case, only sample the image.
+ sample[0] = SampleLinear(image_and_gradient, scalar_y, scalar_x, 0);
+ } else {
+ // For the derivative case, sample the gradient as well.
+ SampleLinear(image_and_gradient, scalar_y, scalar_x, sample);
+ }
+ T xy[2] = { x, y };
+ return Chain<float, 2, T>::Rule(sample[0], sample + 1, xy);
+}
+
+template<typename Warp>
+class TerminationCheckingCallback : public ceres::IterationCallback {
+ public:
+ TerminationCheckingCallback(const TrackRegionOptions &options,
+ const FloatImage& image2,
+ const Warp &warp,
+ const double *x1, const double *y1)
+ : options_(options), image2_(image2), warp_(warp), x1_(x1), y1_(y1),
+ have_last_successful_step_(false) {}
+
+ virtual ceres::CallbackReturnType operator()(
+ const ceres::IterationSummary& summary) {
+ // If the step wasn't successful, there's nothing to do.
+ if (!summary.step_is_successful) {
+ return ceres::SOLVER_CONTINUE;
+ }
+ // Warp the original 4 points with the current warp into image2.
+ double x2[4];
+ double y2[4];
+ for (int i = 0; i < 4; ++i) {
+ warp_.Forward(warp_.parameters, x1_[i], y1_[i], x2 + i, y2 + i);
+ }
+ // Ensure the corners are all in bounds.
+ if (!AllInBounds(image2_, x2, y2)) {
+ LG << "Successful step fell outside of the pattern bounds; aborting.";
+ return ceres::SOLVER_ABORT;
+ }
+
+ // Ensure the minimizer is making large enough shifts to bother continuing.
+ // Ideally, this check would happen on the parameters themselves which
+ // Ceres supports directly; however, the mapping from parameter change
+ // magnitude to corner movement in pixels is not a simple norm. Hence, the
+ // need for a stateful callback which tracks the last successful set of
+ // parameters (and the position of the projected patch corners).
+ if (have_last_successful_step_) {
+ // Compute the maximum shift of any corner in pixels since the last
+ // successful iteration.
+ double max_change_pixels = 0;
+ for (int i = 0; i < 4; ++i) {
+ double dx = x2[i] - x2_last_successful_[i];
+ double dy = y2[i] - y2_last_successful_[i];
+ double change_pixels = dx*dx + dy*dy;
+ if (change_pixels > max_change_pixels) {
+ max_change_pixels = change_pixels;
+ }
+ }
+ max_change_pixels = sqrt(max_change_pixels);
+ LG << "Max patch corner shift is " << max_change_pixels;
+
+ // Bail if the shift is too small.
+ if (max_change_pixels < options_.minimum_corner_shift_tolerance_pixels) {
+ LG << "Max patch corner shift is " << max_change_pixels
+ << " from the last iteration; returning success.";
+ return ceres::SOLVER_TERMINATE_SUCCESSFULLY;
+ }
+ }
+
+ // Save the projected corners for checking on the next successful iteration.
+ for (int i = 0; i < 4; ++i) {
+ x2_last_successful_[i] = x2[i];
+ y2_last_successful_[i] = y2[i];
+ }
+ have_last_successful_step_ = true;
+ return ceres::SOLVER_CONTINUE;
+ }
+
+ private:
+ const TrackRegionOptions &options_;
+ const FloatImage &image2_;
+ const Warp &warp_;
+ const double *x1_;
+ const double *y1_;
+
+ bool have_last_successful_step_;
+ double x2_last_successful_[4];
+ double y2_last_successful_[4];
+};
+
+template<typename Warp>
+class PixelDifferenceCostFunctor {
+ public:
+ PixelDifferenceCostFunctor(const TrackRegionOptions &options,
+ const FloatImage &image_and_gradient1,
+ const FloatImage &image_and_gradient2,
+ const Mat3 &canonical_to_image1,
+ int num_samples_x,
+ int num_samples_y,
+ const Warp &warp)
+ : options_(options),
+ image_and_gradient1_(image_and_gradient1),
+ image_and_gradient2_(image_and_gradient2),
+ canonical_to_image1_(canonical_to_image1),
+ num_samples_x_(num_samples_x),
+ num_samples_y_(num_samples_y),
+ warp_(warp),
+ pattern_and_gradient_(num_samples_y_, num_samples_x_, 3),
+ pattern_positions_(num_samples_y_, num_samples_x_, 2),
+ pattern_mask_(num_samples_y_, num_samples_x_, 1) {
+ ComputeCanonicalPatchAndNormalizer();
+ }
+
+ void ComputeCanonicalPatchAndNormalizer() {
+ src_mean_ = 0.0;
+ double num_samples = 0.0;
+ for (int r = 0; r < num_samples_y_; ++r) {
+ for (int c = 0; c < num_samples_x_; ++c) {
+ // Compute the position; cache it.
+ Vec3 image_position = canonical_to_image1_ * Vec3(c, r, 1);
+ image_position /= image_position(2);
+ pattern_positions_(r, c, 0) = image_position(0);
+ pattern_positions_(r, c, 1) = image_position(1);
+
+ // Sample the pattern and gradients.
+ SampleLinear(image_and_gradient1_,
+ image_position(1), // SampleLinear is r, c.
+ image_position(0),
+ &pattern_and_gradient_(r, c, 0));
+
+ // Sample sample the mask.
+ double mask_value = 1.0;
+ if (options_.image1_mask != NULL) {
+ SampleLinear(*options_.image1_mask,
+ image_position(1), // SampleLinear is r, c.
+ image_position(0),
+ &pattern_mask_(r, c, 0));
+ mask_value = pattern_mask_(r, c);
+ }
+ src_mean_ += pattern_and_gradient_(r, c, 0) * mask_value;
+ num_samples += mask_value;
+ }
+ }
+ src_mean_ /= num_samples;
+ }
+
+ template<typename T>
+ bool operator()(const T *warp_parameters, T *residuals) const {
+ if (options_.image1_mask != NULL) {
+ VLOG(2) << "Using a mask.";
+ }
+ for (int i = 0; i < Warp::NUM_PARAMETERS; ++i) {
+ VLOG(2) << "warp_parameters[" << i << "]: " << warp_parameters[i];
+ }
+
+ T dst_mean = T(1.0);
+ if (options_.use_normalized_intensities) {
+ ComputeNormalizingCoefficient(warp_parameters,
+ &dst_mean);
+ }
+
+ int cursor = 0;
+ for (int r = 0; r < num_samples_y_; ++r) {
+ for (int c = 0; c < num_samples_x_; ++c) {
+ // Use the pre-computed image1 position.
+ Vec2 image1_position(pattern_positions_(r, c, 0),
+ pattern_positions_(r, c, 1));
+
+ // Sample the mask early; if it's zero, this pixel has no effect. This
+ // allows early bailout from the expensive sampling that happens below.
+ //
+ // Note that partial masks are not short circuited. To see why short
+ // circuiting produces bitwise-exact same results, consider that the
+ // residual for each pixel is
+ //
+ // residual = mask * (src - dst) ,
+ //
+ // and for jets, multiplying by a scalar multiplies the derivative
+ // components by the scalar as well. Therefore, if the mask is exactly
+ // zero, then so too will the final residual and derivatives.
+ double mask_value = 1.0;
+ if (options_.image1_mask != NULL) {
+ mask_value = pattern_mask_(r, c);
+ if (mask_value == 0.0) {
+ residuals[cursor++] = T(0.0);
+ continue;
+ }
+ }
+
+ // Compute the location of the destination pixel.
+ T image2_position[2];
+ warp_.Forward(warp_parameters,
+ T(image1_position[0]),
+ T(image1_position[1]),
+ &image2_position[0],
+ &image2_position[1]);
+
+ // Sample the destination, propagating derivatives.
+ T dst_sample = SampleWithDerivative(image_and_gradient2_,
+ image2_position[0],
+ image2_position[1]);
+
+ // Sample the source. This is made complicated by ESM mode.
+ T src_sample;
+ if (options_.use_esm && !JetOps<T>::IsScalar()) {
+ // In ESM mode, the derivative of the source is also taken into
+ // account. This changes the linearization in a way that causes
+ // better convergence. Copy the derivative of the warp parameters
+ // onto the jets for the image1 position. This is the ESM hack.
+ T image1_position_jet[2] = {
+ image2_position[0], // Order is x, y. This matches the
+ image2_position[1] // derivative order in the patch.
+ };
+ JetOps<T>::SetScalar(image1_position[0], image1_position_jet + 0);
+ JetOps<T>::SetScalar(image1_position[1], image1_position_jet + 1);
+
+ // Now that the image1 positions have the jets applied from the
+ // image2 position (the ESM hack), chain the image gradients to
+ // obtain a sample with the derivative with respect to the warp
+ // parameters attached.
+ src_sample = Chain<float, 2, T>::Rule(pattern_and_gradient_(r, c),
+ &pattern_and_gradient_(r, c, 1),
+ image1_position_jet);
+
+ // The jacobians for these should be averaged. Due to the subtraction
+ // below, flip the sign of the src derivative so that the effect
+ // after subtraction of the jets is that they are averaged.
+ JetOps<T>::ScaleDerivative(-0.5, &src_sample);
+ JetOps<T>::ScaleDerivative(0.5, &dst_sample);
+ } else {
+ // This is the traditional, forward-mode KLT solution.
+ src_sample = T(pattern_and_gradient_(r, c));
+ }
+
+ // Normalize the samples by the mean values of each signal. The typical
+ // light model assumes multiplicative intensity changes with changing
+ // light, so this is a reasonable choice. Note that dst_mean has
+ // derivative information attached thanks to autodiff.
+ if (options_.use_normalized_intensities) {
+ src_sample /= T(src_mean_);
+ dst_sample /= dst_mean;
+ }
+
+ // The difference is the error.
+ T error = src_sample - dst_sample;
+
+ // Weight the error by the mask, if one is present.
+ if (options_.image1_mask != NULL) {
+ error *= T(mask_value);
+ }
+ residuals[cursor++] = error;
+ }
+ }
+ return true;
+ }
+
+ // For normalized matching, the average and
+ template<typename T>
+ void ComputeNormalizingCoefficient(const T *warp_parameters,
+ T *dst_mean) const {
+ *dst_mean = T(0.0);
+ double num_samples = 0.0;
+ for (int r = 0; r < num_samples_y_; ++r) {
+ for (int c = 0; c < num_samples_x_; ++c) {
+ // Use the pre-computed image1 position.
+ Vec2 image1_position(pattern_positions_(r, c, 0),
+ pattern_positions_(r, c, 1));
+
+ // Sample the mask early; if it's zero, this pixel has no effect. This
+ // allows early bailout from the expensive sampling that happens below.
+ double mask_value = 1.0;
+ if (options_.image1_mask != NULL) {
+ mask_value = pattern_mask_(r, c);
+ if (mask_value == 0.0) {
+ continue;
+ }
+ }
+
+ // Compute the location of the destination pixel.
+ T image2_position[2];
+ warp_.Forward(warp_parameters,
+ T(image1_position[0]),
+ T(image1_position[1]),
+ &image2_position[0],
+ &image2_position[1]);
+
+
+ // Sample the destination, propagating derivatives.
+ // TODO(keir): This accumulation can, surprisingly, be done as a
+ // pre-pass by using integral images. This is complicated by the need
+ // to store the jets in the integral image, but it is possible.
+ T dst_sample = SampleWithDerivative(image_and_gradient2_,
+ image2_position[0],
+ image2_position[1]);
+
+ // Weight the sample by the mask, if one is present.
+ if (options_.image1_mask != NULL) {
+ dst_sample *= T(mask_value);
+ }
+
+ *dst_mean += dst_sample;
+ num_samples += mask_value;
+ }
+ }
+ *dst_mean /= T(num_samples);
+ LG << "Normalization for dst:" << *dst_mean;
+ }
+
+ // TODO(keir): Consider also computing the cost here.
+ double PearsonProductMomentCorrelationCoefficient(
+ const double *warp_parameters) const {
+ for (int i = 0; i < Warp::NUM_PARAMETERS; ++i) {
+ VLOG(2) << "Correlation warp_parameters[" << i << "]: "
+ << warp_parameters[i];
+ }
+
+ // The single-pass PMCC computation is somewhat numerically unstable, but
+ // it's sufficient for the tracker.
+ double sX = 0, sY = 0, sXX = 0, sYY = 0, sXY = 0;
+
+ // Due to masking, it's important to account for fractional samples.
+ // For example, samples with a 50% mask are counted as a half sample.
+ double num_samples = 0;
+
+ for (int r = 0; r < num_samples_y_; ++r) {
+ for (int c = 0; c < num_samples_x_; ++c) {
+ // Use the pre-computed image1 position.
+ Vec2 image1_position(pattern_positions_(r, c, 0),
+ pattern_positions_(r, c, 1));
+
+ double mask_value = 1.0;
+ if (options_.image1_mask != NULL) {
+ mask_value = pattern_mask_(r, c);
+ if (mask_value == 0.0) {
+ continue;
+ }
+ }
+
+ // Compute the location of the destination pixel.
+ double image2_position[2];
+ warp_.Forward(warp_parameters,
+ image1_position[0],
+ image1_position[1],
+ &image2_position[0],
+ &image2_position[1]);
+
+ double x = pattern_and_gradient_(r, c);
+ double y = SampleLinear(image_and_gradient2_,
+ image2_position[1], // SampleLinear is r, c.
+ image2_position[0]);
+
+ // Weight the signals by the mask, if one is present.
+ if (options_.image1_mask != NULL) {
+ x *= mask_value;
+ y *= mask_value;
+ num_samples += mask_value;
+ } else {
+ num_samples++;
+ }
+ sX += x;
+ sY += y;
+ sXX += x*x;
+ sYY += y*y;
+ sXY += x*y;
+ }
+ }
+ // Normalize.
+ sX /= num_samples;
+ sY /= num_samples;
+ sXX /= num_samples;
+ sYY /= num_samples;
+ sXY /= num_samples;
+
+ double var_x = sXX - sX*sX;
+ double var_y = sYY - sY*sY;
+ double covariance_xy = sXY - sX*sY;
+
+ double correlation = covariance_xy / sqrt(var_x * var_y);
+ LG << "Covariance xy: " << covariance_xy
+ << ", var 1: " << var_x << ", var 2: " << var_y
+ << ", correlation: " << correlation;
+ return correlation;
+ }
+
+ private:
+ const TrackRegionOptions &options_;
+ const FloatImage &image_and_gradient1_;
+ const FloatImage &image_and_gradient2_;
+ const Mat3 &canonical_to_image1_;
+ int num_samples_x_;
+ int num_samples_y_;
+ const Warp &warp_;
+ double src_mean_;
+ FloatImage pattern_and_gradient_;
+
+ // This contains the position from where the cached pattern samples were
+ // taken from. This is also used to warp from src to dest without going from
+ // canonical pixels to src first.
+ FloatImage pattern_positions_;
+
+ FloatImage pattern_mask_;
+};
+
+template<typename Warp>
+class WarpRegularizingCostFunctor {
+ public:
+ WarpRegularizingCostFunctor(const TrackRegionOptions &options,
+ const double *x1,
+ const double *y1,
+ const double *x2_original,
+ const double *y2_original,
+ const Warp &warp)
+ : options_(options),
+ x1_(x1),
+ y1_(y1),
+ x2_original_(x2_original),
+ y2_original_(y2_original),
+ warp_(warp) {
+ // Compute the centroid of the first guess quad.
+ // TODO(keir): Use Quad class here.
+ original_centroid_[0] = 0.0;
+ original_centroid_[1] = 0.0;
+ for (int i = 0; i < 4; ++i) {
+ original_centroid_[0] += x2_original[i];
+ original_centroid_[1] += y2_original[i];
+ }
+ original_centroid_[0] /= 4;
+ original_centroid_[1] /= 4;
+ }
+
+ template<typename T>
+ bool operator()(const T *warp_parameters, T *residuals) const {
+ T dst_centroid[2] = { T(0.0), T(0.0) };
+ for (int i = 0; i < 4; ++i) {
+ T image1_position[2] = { T(x1_[i]), T(y1_[i]) };
+ T image2_position[2];
+ warp_.Forward(warp_parameters,
+ T(x1_[i]),
+ T(y1_[i]),
+ &image2_position[0],
+ &image2_position[1]);
+
+ // Subtract the positions. Note that this ignores the centroids.
+ residuals[2 * i + 0] = image2_position[0] - image1_position[0];
+ residuals[2 * i + 1] = image2_position[1] - image1_position[1];
+
+ // Accumulate the dst centroid.
+ dst_centroid[0] += image2_position[0];
+ dst_centroid[1] += image2_position[1];
+ }
+ dst_centroid[0] /= T(4.0);
+ dst_centroid[1] /= T(4.0);
+
+ // Adjust for the centroids.
+ for (int i = 0; i < 4; ++i) {
+ residuals[2 * i + 0] += T(original_centroid_[0]) - dst_centroid[0];
+ residuals[2 * i + 1] += T(original_centroid_[1]) - dst_centroid[1];
+ }
+
+ // Reweight the residuals.
+ for (int i = 0; i < 8; ++i) {
+ residuals[i] *= T(options_.regularization_coefficient);
+ }
+
+ return true;
+ }
+
+ const TrackRegionOptions &options_;
+ const double *x1_;
+ const double *y1_;
+ const double *x2_original_;
+ const double *y2_original_;
+ double original_centroid_[2];
+ const Warp &warp_;
+};
+
+// Compute the warp from rectangular coordinates, where one corner is the
+// origin, and the opposite corner is at (num_samples_x, num_samples_y).
+Mat3 ComputeCanonicalHomography(const double *x1,
+ const double *y1,
+ int num_samples_x,
+ int num_samples_y) {
+ Mat canonical(2, 4);
+ canonical << 0, num_samples_x, num_samples_x, 0,
+ 0, 0, num_samples_y, num_samples_y;
+
+ Mat xy1(2, 4);
+ xy1 << x1[0], x1[1], x1[2], x1[3],
+ y1[0], y1[1], y1[2], y1[3];
+
+ Mat3 H;
+ if (!Homography2DFromCorrespondencesLinear(canonical, xy1, &H, 1e-12)) {
+ LG << "Couldn't construct homography.";
+ }
+ return H;
+}
+
+class Quad {
+ public:
+ Quad(const double *x, const double *y) : x_(x), y_(y) {
+ // Compute the centroid and store it.
+ centroid_ = Vec2(0.0, 0.0);
+ for (int i = 0; i < 4; ++i) {
+ centroid_ += Vec2(x_[i], y_[i]);
+ }
+ centroid_ /= 4.0;
+ }
+
+ // The centroid of the four points representing the quad.
+ const Vec2& Centroid() const {
+ return centroid_;
+ }
+
+ // The average magnitude of the four points relative to the centroid.
+ double Scale() const {
+ double scale = 0.0;
+ for (int i = 0; i < 4; ++i) {
+ scale += (Vec2(x_[i], y_[i]) - Centroid()).norm();
+ }
+ return scale / 4.0;
+ }
+
+ Vec2 CornerRelativeToCentroid(int i) const {
+ return Vec2(x_[i], y_[i]) - centroid_;
+ }
+
+ private:
+ const double *x_;
+ const double *y_;
+ Vec2 centroid_;
+};
+
+struct TranslationWarp {
+ TranslationWarp(const double *x1, const double *y1,
+ const double *x2, const double *y2) {
+ Vec2 t = Quad(x2, y2).Centroid() - Quad(x1, y1).Centroid();
+ parameters[0] = t[0];
+ parameters[1] = t[1];
+ }
+
+ template<typename T>
+ void Forward(const T *warp_parameters,
+ const T &x1, const T& y1, T *x2, T* y2) const {
+ *x2 = x1 + warp_parameters[0];
+ *y2 = y1 + warp_parameters[1];
+ }
+
+ // Translation x, translation y.
+ enum { NUM_PARAMETERS = 2 };
+ double parameters[NUM_PARAMETERS];
+};
+
+struct TranslationScaleWarp {
+ TranslationScaleWarp(const double *x1, const double *y1,
+ const double *x2, const double *y2)
+ : q1(x1, y1) {
+ Quad q2(x2, y2);
+
+ // The difference in centroids is the best guess for translation.
+ Vec2 t = q2.Centroid() - q1.Centroid();
+ parameters[0] = t[0];
+ parameters[1] = t[1];
+
+ // The difference in scales is the estimate for the scale.
+ parameters[2] = 1.0 - q2.Scale() / q1.Scale();
+ }
+
+ // The strange way of parameterizing the translation and scaling is to make
+ // the knobs that the optimizer sees easy to adjust. This is less important
+ // for the scaling case than the rotation case.
+ template<typename T>
+ void Forward(const T *warp_parameters,
+ const T &x1, const T& y1, T *x2, T* y2) const {
+ // Make the centroid of Q1 the origin.
+ const T x1_origin = x1 - q1.Centroid()(0);
+ const T y1_origin = y1 - q1.Centroid()(1);
+
+ // Scale uniformly about the origin.
+ const T scale = 1.0 + warp_parameters[2];
+ const T x1_origin_scaled = scale * x1_origin;
+ const T y1_origin_scaled = scale * y1_origin;
+
+ // Translate back into the space of Q1 (but scaled).
+ const T x1_scaled = x1_origin_scaled + q1.Centroid()(0);
+ const T y1_scaled = y1_origin_scaled + q1.Centroid()(1);
+
+ // Translate into the space of Q2.
+ *x2 = x1_scaled + warp_parameters[0];
+ *y2 = y1_scaled + warp_parameters[1];
+ }
+
+ // Translation x, translation y, scale.
+ enum { NUM_PARAMETERS = 3 };
+ double parameters[NUM_PARAMETERS];
+
+ Quad q1;
+};
+
+// Assumes the given points are already zero-centroid and the same size.
+Mat2 OrthogonalProcrustes(const Mat2 &correlation_matrix) {
+ Eigen::JacobiSVD<Mat2> svd(correlation_matrix,
+ Eigen::ComputeFullU | Eigen::ComputeFullV);
+ return svd.matrixV() * svd.matrixU().transpose();
+}
+
+struct TranslationRotationWarp {
+ TranslationRotationWarp(const double *x1, const double *y1,
+ const double *x2, const double *y2)
+ : q1(x1, y1) {
+ Quad q2(x2, y2);
+
+ // The difference in centroids is the best guess for translation.
+ Vec2 t = q2.Centroid() - q1.Centroid();
+ parameters[0] = t[0];
+ parameters[1] = t[1];
+
+ // Obtain the rotation via orthorgonal procrustes.
+ Mat2 correlation_matrix = Mat2::Zero();
+ for (int i = 0; i < 4; ++i) {
+ correlation_matrix += q1.CornerRelativeToCentroid(i) *
+ q2.CornerRelativeToCentroid(i).transpose();
+ }
+ Mat2 R = OrthogonalProcrustes(correlation_matrix);
+ parameters[2] = atan2(R(1, 0), R(0, 0));
+
+ LG << "Correlation_matrix:\n" << correlation_matrix;
+ LG << "R:\n" << R;
+ LG << "Theta:" << parameters[2];
+ }
+
+ // The strange way of parameterizing the translation and rotation is to make
+ // the knobs that the optimizer sees easy to adjust. The reason is that while
+ // it is always the case that it is possible to express composed rotations
+ // and translations as a single translation and rotation, the numerical
+ // values needed for the composition are often large in magnitude. This is
+ // enough to throw off any minimizer, since it must do the equivalent of
+ // compose rotations and translations.
+ //
+ // Instead, use the parameterization below that offers a parameterization
+ // that exposes the degrees of freedom in a way amenable to optimization.
+ template<typename T>
+ void Forward(const T *warp_parameters,
+ const T &x1, const T& y1, T *x2, T* y2) const {
+ // Make the centroid of Q1 the origin.
+ const T x1_origin = x1 - q1.Centroid()(0);
+ const T y1_origin = y1 - q1.Centroid()(1);
+
+ // Rotate about the origin (i.e. centroid of Q1).
+ const T theta = warp_parameters[2];
+ const T costheta = cos(theta);
+ const T sintheta = sin(theta);
+ const T x1_origin_rotated = costheta * x1_origin - sintheta * y1_origin;
+ const T y1_origin_rotated = sintheta * x1_origin + costheta * y1_origin;
+
+ // Translate back into the space of Q1 (but scaled).
+ const T x1_rotated = x1_origin_rotated + q1.Centroid()(0);
+ const T y1_rotated = y1_origin_rotated + q1.Centroid()(1);
+
+ // Translate into the space of Q2.
+ *x2 = x1_rotated + warp_parameters[0];
+ *y2 = y1_rotated + warp_parameters[1];
+ }
+
+ // Translation x, translation y, rotation about the center of Q1 degrees.
+ enum { NUM_PARAMETERS = 3 };
+ double parameters[NUM_PARAMETERS];
+
+ Quad q1;
+};
+
+struct TranslationRotationScaleWarp {
+ TranslationRotationScaleWarp(const double *x1, const double *y1,
+ const double *x2, const double *y2)
+ : q1(x1, y1) {
+ Quad q2(x2, y2);
+
+ // The difference in centroids is the best guess for translation.
+ Vec2 t = q2.Centroid() - q1.Centroid();
+ parameters[0] = t[0];
+ parameters[1] = t[1];
+
+ // The difference in scales is the estimate for the scale.
+ parameters[2] = 1.0 - q2.Scale() / q1.Scale();
+
+ // Obtain the rotation via orthorgonal procrustes.
+ Mat2 correlation_matrix = Mat2::Zero();
+ for (int i = 0; i < 4; ++i) {
+ correlation_matrix += q1.CornerRelativeToCentroid(i) *
+ q2.CornerRelativeToCentroid(i).transpose();
+ }
+ Mat2 R = OrthogonalProcrustes(correlation_matrix);
+ parameters[3] = atan2(R(1, 0), R(0, 0));
+
+ LG << "Correlation_matrix:\n" << correlation_matrix;
+ LG << "R:\n" << R;
+ LG << "Theta:" << parameters[3];
+ }
+
+ // The strange way of parameterizing the translation and rotation is to make
+ // the knobs that the optimizer sees easy to adjust. The reason is that while
+ // it is always the case that it is possible to express composed rotations
+ // and translations as a single translation and rotation, the numerical
+ // values needed for the composition are often large in magnitude. This is
+ // enough to throw off any minimizer, since it must do the equivalent of
+ // compose rotations and translations.
+ //
+ // Instead, use the parameterization below that offers a parameterization
+ // that exposes the degrees of freedom in a way amenable to optimization.
+ template<typename T>
+ void Forward(const T *warp_parameters,
+ const T &x1, const T& y1, T *x2, T* y2) const {
+ // Make the centroid of Q1 the origin.
+ const T x1_origin = x1 - q1.Centroid()(0);
+ const T y1_origin = y1 - q1.Centroid()(1);
+
+ // Rotate about the origin (i.e. centroid of Q1).
+ const T theta = warp_parameters[3];
+ const T costheta = cos(theta);
+ const T sintheta = sin(theta);
+ const T x1_origin_rotated = costheta * x1_origin - sintheta * y1_origin;
+ const T y1_origin_rotated = sintheta * x1_origin + costheta * y1_origin;
+
+ // Scale uniformly about the origin.
+ const T scale = 1.0 + warp_parameters[2];
+ const T x1_origin_rotated_scaled = scale * x1_origin_rotated;
+ const T y1_origin_rotated_scaled = scale * y1_origin_rotated;
+
+ // Translate back into the space of Q1 (but scaled and rotated).
+ const T x1_rotated_scaled = x1_origin_rotated_scaled + q1.Centroid()(0);
+ const T y1_rotated_scaled = y1_origin_rotated_scaled + q1.Centroid()(1);
+
+ // Translate into the space of Q2.
+ *x2 = x1_rotated_scaled + warp_parameters[0];
+ *y2 = y1_rotated_scaled + warp_parameters[1];
+ }
+
+ // Translation x, translation y, rotation about the center of Q1 degrees,
+ // scale.
+ enum { NUM_PARAMETERS = 4 };
+ double parameters[NUM_PARAMETERS];
+
+ Quad q1;
+};
+
+struct AffineWarp {
+ AffineWarp(const double *x1, const double *y1,
+ const double *x2, const double *y2)
+ : q1(x1, y1) {
+ Quad q2(x2, y2);
+
+ // The difference in centroids is the best guess for translation.
+ Vec2 t = q2.Centroid() - q1.Centroid();
+ parameters[0] = t[0];
+ parameters[1] = t[1];
+
+ // Estimate the four affine parameters with the usual least squares.
+ Mat Q1(8, 4);
+ Vec Q2(8);
+ for (int i = 0; i < 4; ++i) {
+ Vec2 v1 = q1.CornerRelativeToCentroid(i);
+ Vec2 v2 = q2.CornerRelativeToCentroid(i);
+
+ Q1.row(2 * i + 0) << v1[0], v1[1], 0, 0;
+ Q1.row(2 * i + 1) << 0, 0, v1[0], v1[1];
+
+ Q2(2 * i + 0) = v2[0];
+ Q2(2 * i + 1) = v2[1];
+ }
+
+ // TODO(keir): Check solution quality.
+ Vec4 a = Q1.jacobiSvd(Eigen::ComputeThinU | Eigen::ComputeThinV).solve(Q2);
+ parameters[2] = a[0];
+ parameters[3] = a[1];
+ parameters[4] = a[2];
+ parameters[5] = a[3];
+
+ LG << "a:" << a.transpose();
+ LG << "t:" << t.transpose();
+ }
+
+ // See comments in other parameterizations about why the centroid is used.
+ template<typename T>
+ void Forward(const T *p, const T &x1, const T& y1, T *x2, T* y2) const {
+ // Make the centroid of Q1 the origin.
+ const T x1_origin = x1 - q1.Centroid()(0);
+ const T y1_origin = y1 - q1.Centroid()(1);
+
+ // Apply the affine transformation.
+ const T x1_origin_affine = p[2] * x1_origin + p[3] * y1_origin;
+ const T y1_origin_affine = p[4] * x1_origin + p[5] * y1_origin;
+
+ // Translate back into the space of Q1 (but affine transformed).
+ const T x1_affine = x1_origin_affine + q1.Centroid()(0);
+ const T y1_affine = y1_origin_affine + q1.Centroid()(1);
+
+ // Translate into the space of Q2.
+ *x2 = x1_affine + p[0];
+ *y2 = y1_affine + p[1];
+ }
+
+ // Translation x, translation y, rotation about the center of Q1 degrees,
+ // scale.
+ enum { NUM_PARAMETERS = 6 };
+ double parameters[NUM_PARAMETERS];
+
+ Quad q1;
+};
+
+struct HomographyWarp {
+ HomographyWarp(const double *x1, const double *y1,
+ const double *x2, const double *y2) {
+ Mat quad1(2, 4);
+ quad1 << x1[0], x1[1], x1[2], x1[3],
+ y1[0], y1[1], y1[2], y1[3];
+
+ Mat quad2(2, 4);
+ quad2 << x2[0], x2[1], x2[2], x2[3],
+ y2[0], y2[1], y2[2], y2[3];
+
+ Mat3 H;
+ if (!Homography2DFromCorrespondencesLinear(quad1, quad2, &H, 1e-12)) {
+ LG << "Couldn't construct homography.";
+ }
+
+ // Assume H(2, 2) != 0, and fix scale at H(2, 2) == 1.0.
+ H /= H(2, 2);
+
+ // Assume H is close to identity, so subtract out the diagonal.
+ H(0, 0) -= 1.0;
+ H(1, 1) -= 1.0;
+
+ CHECK_NE(H(2, 2), 0.0) << H;
+ for (int i = 0; i < 8; ++i) {
+ parameters[i] = H(i / 3, i % 3);
+ LG << "Parameters[" << i << "]: " << parameters[i];
+ }
+ }
+
+ template<typename T>
+ static void Forward(const T *p,
+ const T &x1, const T& y1, T *x2, T* y2) {
+ // Homography warp with manual 3x3 matrix multiply.
+ const T xx2 = (1.0 + p[0]) * x1 + p[1] * y1 + p[2];
+ const T yy2 = p[3] * x1 + (1.0 + p[4]) * y1 + p[5];
+ const T zz2 = p[6] * x1 + p[7] * y1 + 1.0;
+ *x2 = xx2 / zz2;
+ *y2 = yy2 / zz2;
+ }
+
+ enum { NUM_PARAMETERS = 8 };
+ double parameters[NUM_PARAMETERS];
+};
+
+// Determine the number of samples to use for x and y. Quad winding goes:
+//
+// 0 1
+// 3 2
+//
+// The idea is to take the maximum x or y distance. This may be oversampling.
+// TODO(keir): Investigate the various choices; perhaps average is better?
+void PickSampling(const double *x1, const double *y1,
+ const double *x2, const double *y2,
+ int *num_samples_x, int *num_samples_y) {
+ (void) x2; // Ignored.
+ (void) y2; // Ignored.
+
+ Vec2 a0(x1[0], y1[0]);
+ Vec2 a1(x1[1], y1[1]);
+ Vec2 a2(x1[2], y1[2]);
+ Vec2 a3(x1[3], y1[3]);
+
+ Vec2 b0(x1[0], y1[0]);
+ Vec2 b1(x1[1], y1[1]);
+ Vec2 b2(x1[2], y1[2]);
+ Vec2 b3(x1[3], y1[3]);
+
+ double x_dimensions[4] = {
+ (a1 - a0).norm(),
+ (a3 - a2).norm(),
+ (b1 - b0).norm(),
+ (b3 - b2).norm()
+ };
+
+ double y_dimensions[4] = {
+ (a3 - a0).norm(),
+ (a1 - a2).norm(),
+ (b3 - b0).norm(),
+ (b1 - b2).norm()
+ };
+ const double kScaleFactor = 1.0;
+ *num_samples_x = static_cast<int>(
+ kScaleFactor * *std::max_element(x_dimensions, x_dimensions + 4));
+ *num_samples_y = static_cast<int>(
+ kScaleFactor * *std::max_element(y_dimensions, y_dimensions + 4));
+ LG << "Automatic num_samples_x: " << *num_samples_x
+ << ", num_samples_y: " << *num_samples_y;
+}
+
+bool SearchAreaTooBigForDescent(const FloatImage &image2,
+ const double *x2, const double *y2) {
+ // TODO(keir): Check the bounds and enable only when it makes sense.
+ (void) image2; // Ignored.
+ (void) x2; // Ignored.
+ (void) y2; // Ignored.
+
+ return true;
+}
+
+bool PointOnRightHalfPlane(const Vec2 &a, const Vec2 &b, double x, double y) {
+ Vec2 ba = b - a;
+ return ((Vec2(x, y) - b).transpose() * Vec2(-ba.y(), ba.x())) > 0;
+}
+
+// Determine if a point is in a quad. The quad is arranged as:
+//
+// +-------> x
+// |
+// | a0------a1
+// | | |
+// | | |
+// | | |
+// | a3------a2
+// v
+// y
+//
+// The implementation does up to four half-plane comparisons.
+bool PointInQuad(const double *xs, const double *ys, double x, double y) {
+ Vec2 a0(xs[0], ys[0]);
+ Vec2 a1(xs[1], ys[1]);
+ Vec2 a2(xs[2], ys[2]);
+ Vec2 a3(xs[3], ys[3]);
+
+ return PointOnRightHalfPlane(a0, a1, x, y) &&
+ PointOnRightHalfPlane(a1, a2, x, y) &&
+ PointOnRightHalfPlane(a2, a3, x, y) &&
+ PointOnRightHalfPlane(a3, a0, x, y);
+}
+
+// This makes it possible to map between Eigen float arrays and FloatImage
+// without using comparisons.
+typedef Eigen::Array<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> FloatArray;
+
+// This creates a pattern in the frame of image2, from the pixel is image1,
+// based on the initial guess represented by the two quads x1, y1, and x2, y2.
+template<typename Warp>
+void CreateBrutePattern(const double *x1, const double *y1,
+ const double *x2, const double *y2,
+ const FloatImage &image1,
+ const FloatImage *image1_mask,
+ FloatArray *pattern,
+ FloatArray *mask,
+ int *origin_x,
+ int *origin_y) {
+ // Get integer bounding box of quad2 in image2.
+ int min_x = static_cast<int>(floor(*std::min_element(x2, x2 + 4)));
+ int min_y = static_cast<int>(floor(*std::min_element(y2, y2 + 4)));
+ int max_x = static_cast<int>(ceil (*std::max_element(x2, x2 + 4)));
+ int max_y = static_cast<int>(ceil (*std::max_element(y2, y2 + 4)));
+
+ int w = max_x - min_x;
+ int h = max_y - min_y;
+
+ pattern->resize(h, w);
+ mask->resize(h, w);
+
+ Warp inverse_warp(x2, y2, x1, y1);
+
+ // r,c are in the coordinate frame of image2.
+ for (int r = min_y; r < max_y; ++r) {
+ for (int c = min_x; c < max_x; ++c) {
+ // i and j are in the coordinate frame of the pattern in image2.
+ int i = r - min_y;
+ int j = c - min_x;
+
+ double dst_x = c;
+ double dst_y = r;
+ double src_x;
+ double src_y;
+ inverse_warp.Forward(inverse_warp.parameters,
+ dst_x, dst_y,
+ &src_x, &src_y);
+
+ if (PointInQuad(x1, y1, src_x, src_y)) {
+ (*pattern)(i, j) = SampleLinear(image1, src_y, src_x);
+ (*mask)(i, j) = 1.0;
+ if (image1_mask) {
+ (*mask)(i, j) = SampleLinear(*image1_mask, src_y, src_x);
+ }
+ } else {
+ (*pattern)(i, j) = 0.0;
+ (*mask)(i, j) = 0.0;
+ }
+ }
+ }
+ *origin_x = min_x;
+ *origin_y = min_y;
+}
+
+// Compute a translation-only estimate of the warp, using brute force search. A
+// smarter implementation would use the FFT to compute the normalized cross
+// correlation. Instead, this is a dumb implementation. Surprisingly, it is
+// fast enough in practice.
+//
+// Returns true if any alignment was found, and false if the projected pattern
+// is zero sized.
+//
+// TODO(keir): The normalization is less effective for the brute force search
+// than it is with the Ceres solver. It's unclear if this is a bug or due to
+// the original frame being too different from the reprojected reference in the
+// destination frame.
+//
+// The likely solution is to use the previous frame, instead of the original
+// pattern, when doing brute initialization. Unfortunately that implies a
+// totally different warping interface, since access to more than a the source
+// and current destination frame is necessary.
+template<typename Warp>
+bool BruteTranslationOnlyInitialize(const FloatImage &image1,
+ const FloatImage *image1_mask,
+ const FloatImage &image2,
+ const int num_extra_points,
+ const bool use_normalized_intensities,
+ const double *x1, const double *y1,
+ double *x2, double *y2) {
+ // Create the pattern to match in the space of image2, assuming our inital
+ // guess isn't too far from the template in image1. If there is no image1
+ // mask, then the resulting mask is binary.
+ FloatArray pattern;
+ FloatArray mask;
+ int origin_x = -1, origin_y = -1;
+ CreateBrutePattern<Warp>(x1, y1, x2, y2,
+ image1, image1_mask,
+ &pattern, &mask,
+ &origin_x, &origin_y);
+
+ // For normalization, premultiply the pattern by the inverse pattern mean.
+ double mask_sum = 1.0;
+ if (use_normalized_intensities) {
+ mask_sum = mask.sum();
+ double inverse_pattern_mean = mask_sum / ((mask * pattern).sum());
+ pattern *= inverse_pattern_mean;
+ }
+
+ // Use Eigen on the images via maps for strong vectorization.
+ Map<const FloatArray> search(image2.Data(), image2.Height(), image2.Width());
+
+ // Try all possible locations inside the search area. Yes, everywhere.
+ //
+ // TODO(keir): There are a number of possible optimizations here. One choice
+ // is to make a grid and only try one out of every N possible samples.
+ //
+ // Another, slightly more clever idea, is to compute some sort of spatial
+ // frequency distribution of the pattern patch. If the spatial resolution is
+ // high (e.g. a grating pattern or fine lines) then checking every possible
+ // translation is necessary, since a 1-pixel shift may induce a massive
+ // change in the cost function. If the image is a blob or splotch with blurry
+ // edges, then fewer samples are necessary since a few pixels offset won't
+ // change the cost function much.
+ double best_sad = std::numeric_limits<double>::max();
+ int best_r = -1;
+ int best_c = -1;
+ int w = pattern.cols();
+ int h = pattern.rows();
+
+ for (int r = 0; r < (image2.Height() - h); ++r) {
+ for (int c = 0; c < (image2.Width() - w); ++c) {
+ // Compute the weighted sum of absolute differences, Eigen style. Note
+ // that the block from the search image is never stored in a variable, to
+ // avoid copying overhead and permit inlining.
+ double sad;
+ if (use_normalized_intensities) {
+ // TODO(keir): It's really dumb to recompute the search mean for every
+ // shift. A smarter implementation would use summed area tables
+ // instead, reducing the mean calculation to an O(1) operation.
+ double inverse_search_mean =
+ mask_sum / ((mask * search.block(r, c, h, w)).sum());
+ sad = (mask * (pattern - (search.block(r, c, h, w) *
+ inverse_search_mean))).abs().sum();
+ } else {
+ sad = (mask * (pattern - search.block(r, c, h, w))).abs().sum();
+ }
+ if (sad < best_sad) {
+ best_r = r;
+ best_c = c;
+ best_sad = sad;
+ }
+ }
+ }
+
+ // This mean the effective pattern area is zero. This check could go earlier,
+ // but this is less code.
+ if (best_r == -1 || best_c == -1) {
+ return false;
+ }
+
+ LG << "Brute force translation found a shift. "
+ << "best_c: " << best_c << ", best_r: " << best_r << ", "
+ << "origin_x: " << origin_x << ", origin_y: " << origin_y << ", "
+ << "dc: " << (best_c - origin_x) << ", "
+ << "dr: " << (best_r - origin_y)
+ << ", tried " << ((image2.Height() - h) * (image2.Width() - w))
+ << " shifts.";
+
+ // Apply the shift.
+ for (int i = 0; i < 4 + num_extra_points; ++i) {
+ x2[i] += best_c - origin_x;
+ y2[i] += best_r - origin_y;
+ }
+ return true;
+}
+
+void CopyQuad(double *src_x, double *src_y,
+ double *dst_x, double *dst_y,
+ int num_extra_points) {
+ for (int i = 0; i < 4 + num_extra_points; ++i) {
+ dst_x[i] = src_x[i];
+ dst_y[i] = src_y[i];
+ }
+}
+
+} // namespace
+
+template<typename Warp>
+void TemplatedTrackRegion(const FloatImage &image1,
+ const FloatImage &image2,
+ const double *x1, const double *y1,
+ const TrackRegionOptions &options,
+ double *x2, double *y2,
+ TrackRegionResult *result) {
+ for (int i = 0; i < 4 + options.num_extra_points; ++i) {
+ LG << "P" << i << ": (" << x1[i] << ", " << y1[i] << "); guess ("
+ << x2[i] << ", " << y2[i] << "); (dx, dy): (" << (x2[i] - x1[i]) << ", "
+ << (y2[i] - y1[i]) << ").";
+ }
+
+ // Since (x2, y2) contains a prediction for where the tracked point should
+ // go, try a refinement immediately in the hope that the prediction is close
+ // enough.
+ if (options.attempt_refine_before_brute) {
+ TrackRegionOptions modified_options = options;
+ modified_options.use_brute_initialization = false;
+ modified_options.attempt_refine_before_brute = false;
+
+ double x2_first_try[5];
+ double y2_first_try[5];
+ CopyQuad(x2, y2, x2_first_try, y2_first_try, options.num_extra_points);
+
+ TemplatedTrackRegion<Warp>(image1, image2,
+ x1, y1, modified_options,
+ x2_first_try, y2_first_try, result);
+
+ // Of the things that can happen in the first pass, don't try the brute
+ // pass (and second attempt) if the error is one of the terminations below.
+ if (result->termination == TrackRegionResult::CONVERGENCE ||
+ result->termination == TrackRegionResult::SOURCE_OUT_OF_BOUNDS ||
+ result->termination == TrackRegionResult::DESTINATION_OUT_OF_BOUNDS ||
+ result->termination == TrackRegionResult::INSUFFICIENT_PATTERN_AREA) {
+ LG << "Terminated with first try at refinement; no brute needed.";
+ // TODO(keir): Also check correlation?
+ CopyQuad(x2_first_try, y2_first_try, x2, y2, options.num_extra_points);
+ LG << "Early termination correlation: " << result->correlation;
+ return;
+ } else {
+ LG << "Initial eager-refinement failed; retrying normally.";
+ }
+ }
+
+ if (options.use_normalized_intensities) {
+ LG << "Using normalized intensities.";
+ }
+
+ // Bail early if the points are already outside.
+ if (!AllInBounds(image1, x1, y1)) {
+ result->termination = TrackRegionResult::SOURCE_OUT_OF_BOUNDS;
+ return;
+ }
+ if (!AllInBounds(image2, x2, y2)) {
+ result->termination = TrackRegionResult::DESTINATION_OUT_OF_BOUNDS;
+ return;
+ }
+ // TODO(keir): Check quads to ensure there is some area.
+
+ // Keep a copy of the "original" guess for regularization.
+ double x2_original[4];
+ double y2_original[4];
+ for (int i = 0; i < 4; ++i) {
+ x2_original[i] = x2[i];
+ y2_original[i] = y2[i];
+ }
+
+ // Prepare the image and gradient.
+ Array3Df image_and_gradient1;
+ Array3Df image_and_gradient2;
+ BlurredImageAndDerivativesChannels(image1, options.sigma,
+ &image_and_gradient1);
+ BlurredImageAndDerivativesChannels(image2, options.sigma,
+ &image_and_gradient2);
+
+ // Possibly do a brute-force translation-only initialization.
+ if (SearchAreaTooBigForDescent(image2, x2, y2) &&
+ options.use_brute_initialization) {
+ LG << "Running brute initialization...";
+ bool found_any_alignment = BruteTranslationOnlyInitialize<Warp>(
+ image_and_gradient1,
+ options.image1_mask,
+ image2,
+ options.num_extra_points,
+ options.use_normalized_intensities,
+ x1, y1, x2, y2);
+ if (!found_any_alignment) {
+ LG << "Brute failed to find an alignment; pattern too small. "
+ << "Failing entire track operation.";
+ result->termination = TrackRegionResult::INSUFFICIENT_PATTERN_AREA;
+ return;
+ }
+ for (int i = 0; i < 4; ++i) {
+ LG << "P" << i << ": (" << x1[i] << ", " << y1[i] << "); brute ("
+ << x2[i] << ", " << y2[i] << "); (dx, dy): (" << (x2[i] - x1[i])
+ << ", " << (y2[i] - y1[i]) << ").";
+ }
+ }
+
+ // Prepare the initial warp parameters from the four correspondences.
+ // Note: This must happen after the brute initialization runs, since the
+ // brute initialization mutates x2 and y2 in place.
+ Warp warp(x1, y1, x2, y2);
+
+ // Decide how many samples to use in the x and y dimensions.
+ int num_samples_x;
+ int num_samples_y;
+ PickSampling(x1, y1, x2, y2, &num_samples_x, &num_samples_y);
+
+ // Compute the warp from rectangular coordinates.
+ Mat3 canonical_homography = ComputeCanonicalHomography(x1, y1,
+ num_samples_x,
+ num_samples_y);
+
+ ceres::Problem problem;
+
+ // Construct the warp cost function. AutoDiffCostFunction takes ownership.
+ PixelDifferenceCostFunctor<Warp> *pixel_difference_cost_function =
+ new PixelDifferenceCostFunctor<Warp>(options,
+ image_and_gradient1,
+ image_and_gradient2,
+ canonical_homography,
+ num_samples_x,
+ num_samples_y,
+ warp);
+ problem.AddResidualBlock(
+ new ceres::AutoDiffCostFunction<
+ PixelDifferenceCostFunctor<Warp>,
+ ceres::DYNAMIC,
+ Warp::NUM_PARAMETERS>(pixel_difference_cost_function,
+ num_samples_x * num_samples_y),
+ NULL,
+ warp.parameters);
+
+ // Construct the regularizing cost function
+ if (options.regularization_coefficient != 0.0) {
+ WarpRegularizingCostFunctor<Warp> *regularizing_warp_cost_function =
+ new WarpRegularizingCostFunctor<Warp>(options,
+ x1, y2,
+ x2_original,
+ y2_original,
+ warp);
+
+ problem.AddResidualBlock(
+ new ceres::AutoDiffCostFunction<
+ WarpRegularizingCostFunctor<Warp>,
+ 8 /* num_residuals */,
+ Warp::NUM_PARAMETERS>(regularizing_warp_cost_function),
+ NULL,
+ warp.parameters);
+ }
+
+ // Configure the solve.
+ ceres::Solver::Options solver_options;
+ solver_options.linear_solver_type = ceres::DENSE_QR;
+ solver_options.max_num_iterations = options.max_iterations;
+ solver_options.update_state_every_iteration = true;
+ solver_options.parameter_tolerance = 1e-16;
+ solver_options.function_tolerance = 1e-16;
+
+ // Prevent the corners from going outside the destination image and
+ // terminate if the optimizer is making tiny moves (converged).
+ TerminationCheckingCallback<Warp> callback(options, image2, warp, x1, y1);
+ solver_options.callbacks.push_back(&callback);
+
+ // Run the solve.
+ ceres::Solver::Summary summary;
+ ceres::Solve(solver_options, &problem, &summary);
+
+ LG << "Summary:\n" << summary.FullReport();
+
+ // Update the four points with the found solution; if the solver failed, then
+ // the warp parameters are the identity (so ignore failure).
+ //
+ // Also warp any extra points on the end of the array.
+ for (int i = 0; i < 4 + options.num_extra_points; ++i) {
+ warp.Forward(warp.parameters, x1[i], y1[i], x2 + i, y2 + i);
+ LG << "Warped point " << i << ": (" << x1[i] << ", " << y1[i] << ") -> ("
+ << x2[i] << ", " << y2[i] << "); (dx, dy): (" << (x2[i] - x1[i]) << ", "
+ << (y2[i] - y1[i]) << ").";
+ }
+
+ // TODO(keir): Update the result statistics.
+ // TODO(keir): Add a normalize-cross-correlation variant.
+
+ if (summary.termination_type == ceres::USER_FAILURE) {
+ result->termination = TrackRegionResult::FELL_OUT_OF_BOUNDS;
+ return;
+ }
+
+#define HANDLE_TERMINATION(termination_enum) \
+ if (summary.termination_type == ceres::termination_enum) { \
+ result->termination = TrackRegionResult::termination_enum; \
+ return; \
+ }
+
+ // Avoid computing correlation for tracking failures.
+ HANDLE_TERMINATION(FAILURE);
+
+ // Otherwise, run a final correlation check.
+ if (options.minimum_correlation > 0.0) {
+ result->correlation = pixel_difference_cost_function->
+ PearsonProductMomentCorrelationCoefficient(warp.parameters);
+ if (result->correlation < options.minimum_correlation) {
+ LG << "Failing with insufficient correlation.";
+ result->termination = TrackRegionResult::INSUFFICIENT_CORRELATION;
+ return;
+ }
+ }
+
+ // This happens when the minimum corner shift tolerance is reached. Due to
+ // how the tolerance is computed this can't be done by Ceres. So return the
+ // same termination enum as Ceres, even though this is slightly different
+ // than Ceres's parameter tolerance, which operates on the raw parameter
+ // values rather than the pixel shifts of the patch corners.
+ if (summary.termination_type == ceres::USER_SUCCESS) {
+ result->termination = TrackRegionResult::CONVERGENCE;
+ return;
+ }
+
+ HANDLE_TERMINATION(CONVERGENCE);
+ HANDLE_TERMINATION(NO_CONVERGENCE);
+#undef HANDLE_TERMINATION
+};
+
+void TrackRegion(const FloatImage &image1,
+ const FloatImage &image2,
+ const double *x1, const double *y1,
+ const TrackRegionOptions &options,
+ double *x2, double *y2,
+ TrackRegionResult *result) {
+ // Enum is necessary due to templated nature of autodiff.
+#define HANDLE_MODE(mode_enum, mode_type) \
+ if (options.mode == TrackRegionOptions::mode_enum) { \
+ TemplatedTrackRegion<mode_type>(image1, image2, \
+ x1, y1, \
+ options, \
+ x2, y2, \
+ result); \
+ return; \
+ }
+ HANDLE_MODE(TRANSLATION, TranslationWarp);
+ HANDLE_MODE(TRANSLATION_SCALE, TranslationScaleWarp);
+ HANDLE_MODE(TRANSLATION_ROTATION, TranslationRotationWarp);
+ HANDLE_MODE(TRANSLATION_ROTATION_SCALE, TranslationRotationScaleWarp);
+ HANDLE_MODE(AFFINE, AffineWarp);
+ HANDLE_MODE(HOMOGRAPHY, HomographyWarp);
+#undef HANDLE_MODE
+}
+
+bool SamplePlanarPatch(const FloatImage &image,
+ const double *xs, const double *ys,
+ int num_samples_x, int num_samples_y,
+ FloatImage *mask, FloatImage *patch,
+ double *warped_position_x, double *warped_position_y) {
+ // Bail early if the points are outside the image.
+ if (!AllInBounds(image, xs, ys)) {
+ LG << "Can't sample patch: out of bounds.";
+ return false;
+ }
+
+ // Make the patch have the appropriate size, and match the depth of image.
+ patch->Resize(num_samples_y, num_samples_x, image.Depth());
+
+ // Compute the warp from rectangular coordinates.
+ Mat3 canonical_homography = ComputeCanonicalHomography(xs, ys,
+ num_samples_x,
+ num_samples_y);
+
+ // Walk over the coordinates in the canonical space, sampling from the image
+ // in the original space and copying the result into the patch.
+ for (int r = 0; r < num_samples_y; ++r) {
+ for (int c = 0; c < num_samples_x; ++c) {
+ Vec3 image_position = canonical_homography * Vec3(c, r, 1);
+ image_position /= image_position(2);
+ SampleLinear(image, image_position(1),
+ image_position(0),
+ &(*patch)(r, c, 0));
+ if (mask) {
+ float mask_value = SampleLinear(*mask, image_position(1),
+ image_position(0), 0);
+
+ for (int d = 0; d < image.Depth(); d++)
+ (*patch)(r, c, d) *= mask_value;
+ }
+ }
+ }
+
+ Vec3 warped_position = canonical_homography.inverse() * Vec3(xs[4], ys[4], 1);
+ warped_position /= warped_position(2);
+
+ *warped_position_x = warped_position(0);
+ *warped_position_y = warped_position(1);
+
+ return true;
+}
+
+} // namespace libmv
diff --git a/intern/libmv/libmv/tracking/track_region.h b/intern/libmv/libmv/tracking/track_region.h
new file mode 100644
index 00000000000..be1d8ef3e03
--- /dev/null
+++ b/intern/libmv/libmv/tracking/track_region.h
@@ -0,0 +1,170 @@
+// Copyright (c) 2012 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_TRACKING_TRACK_REGION_H_
+
+// Necessary for M_E when building with MSVC.
+#define _USE_MATH_DEFINES
+
+#include "libmv/image/image.h"
+#include "libmv/image/sample.h"
+#include "libmv/numeric/numeric.h"
+
+namespace libmv {
+
+struct TrackRegionOptions {
+ TrackRegionOptions();
+
+ enum Mode {
+ TRANSLATION,
+ TRANSLATION_ROTATION,
+ TRANSLATION_SCALE,
+ TRANSLATION_ROTATION_SCALE,
+ AFFINE,
+ HOMOGRAPHY,
+ };
+ Mode mode;
+
+ // Minimum normalized cross-correlation necessary between the final tracked
+ // positoin of the patch on the destination image and the reference patch
+ // needed to declare tracking success. If the minimum correlation is not met,
+ // then TrackResult::termination is INSUFFICIENT_CORRELATION.
+ double minimum_correlation;
+
+ // Maximum number of Ceres iterations to run for the inner minimization.
+ int max_iterations;
+
+ // Use the "Efficient Second-order Minimization" scheme. This increases
+ // convergence speed at the cost of more per-iteration work.
+ bool use_esm;
+
+ // If true, apply a brute-force translation-only search before attempting the
+ // full search. This is not enabled if the destination image ("image2") is
+ // too small; in that case eithen the basin of attraction is close enough
+ // that the nearby minima is correct, or the search area is too small.
+ bool use_brute_initialization;
+
+ // If true and brute initialization is enabled, first try refining with the
+ // initial guess instead of starting with the brute initialization. If the
+ // initial refinement fails, then a normal brute search followed by
+ // refinement is attempted. If the initial refinement succeeds, then the
+ // result is returned as is (skipping a costly brute search).
+ bool attempt_refine_before_brute;
+
+ // If true, normalize the image patches by their mean before doing the sum of
+ // squared error calculation. This is reasonable since the effect of
+ // increasing light intensity is multiplicative on the pixel intensities.
+ //
+ // Note: This does nearly double the solving time, so it is not advised to
+ // turn this on all the time.
+ bool use_normalized_intensities;
+
+ // The size in pixels of the blur kernel used to both smooth the image and
+ // take the image derivative.
+ double sigma;
+
+ // Extra points that should get transformed by the warp. These points are
+ // appended to the x and y arrays. This is useful because the actual warp
+ // parameters are not exposed.
+ int num_extra_points;
+
+ // For motion models other than translation, the optimizer sometimes has
+ // trouble deciding what to do around flat areas in the cost function. This
+ // leads to the optimizer picking poor solutions near the minimum. Visually,
+ // the effect is that the quad corners jiggle around, even though the center
+ // of the patch is well estimated. regularization_coefficient controls a term
+ // in the sum of squared error cost that makes it expensive for the optimizer
+ // to pick a warp that changes the shape of the patch dramatically (e.g.
+ // rotating, scaling, skewing, etc).
+ //
+ // In particular it adds an 8-residual cost function to the optimization,
+ // where each corner induces 2 residuals: the difference between the warped
+ // and the initial guess. However, the patch centroids are subtracted so that
+ // only patch distortions are penalized.
+ //
+ // If zero, no regularization is used.
+ double regularization_coefficient;
+
+ // If the maximum shift of any patch corner between successful iterations of
+ // the solver is less than this amount, then the tracking is declared
+ // successful. The solver termination becomes PARAMETER_TOLERANCE.
+ double minimum_corner_shift_tolerance_pixels;
+
+ // If non-null, this is used as the pattern mask. It should match the size of
+ // image1, even though only values inside the image1 quad are examined. The
+ // values must be in the range 0.0 to 0.1.
+ FloatImage *image1_mask;
+};
+
+struct TrackRegionResult {
+ enum Termination {
+ // Ceres termination types, duplicated; though, not the int values.
+ CONVERGENCE,
+ NO_CONVERGENCE,
+ FAILURE,
+
+ // Libmv specific errors.
+ SOURCE_OUT_OF_BOUNDS,
+ DESTINATION_OUT_OF_BOUNDS,
+ FELL_OUT_OF_BOUNDS,
+ INSUFFICIENT_CORRELATION,
+ INSUFFICIENT_PATTERN_AREA,
+ CONFIGURATION_ERROR,
+ };
+ Termination termination;
+
+ bool is_usable() {
+ return termination == CONVERGENCE ||
+ termination == NO_CONVERGENCE;
+ }
+
+ int num_iterations;
+ double correlation;
+
+ // Final parameters?
+ bool used_brute_translation_initialization;
+};
+
+// Always needs 4 correspondences.
+void TrackRegion(const FloatImage &image1,
+ const FloatImage &image2,
+ const double *x1, const double *y1,
+ const TrackRegionOptions &options,
+ double *x2, double *y2,
+ TrackRegionResult *result);
+
+// Sample a "canonical" version of the passed planar patch, using bilinear
+// sampling. The passed corners must be within the image, and have at least two
+// pixels of border around them. (so e.g. a corner of the patch cannot lie
+// directly on the edge of the image). Four corners are always required. All
+// channels are interpolated.
+// When mask is not null it'll be used as a pattern mask. Ot should match
+// the size of image.
+// Warped coordinates of marker's position would be returned in
+// warped_position_x and warped_position_y
+bool SamplePlanarPatch(const FloatImage &image,
+ const double *xs, const double *ys,
+ int num_samples_x, int num_samples_y,
+ FloatImage *mask, FloatImage *patch,
+ double *warped_position_x, double *warped_position_y);
+
+} // namespace libmv
+
+#endif // LIBMV_TRACKING_TRACK_REGION_H_
diff --git a/intern/libmv/libmv/tracking/trklt_region_tracker.cc b/intern/libmv/libmv/tracking/trklt_region_tracker.cc
new file mode 100644
index 00000000000..05ef3d1d272
--- /dev/null
+++ b/intern/libmv/libmv/tracking/trklt_region_tracker.cc
@@ -0,0 +1,176 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/tracking/trklt_region_tracker.h"
+
+#include "libmv/logging/logging.h"
+#include "libmv/numeric/numeric.h"
+#include "libmv/image/image.h"
+#include "libmv/image/convolve.h"
+#include "libmv/image/sample.h"
+
+namespace libmv {
+
+// TODO(keir): Switch this to use the smarter LM loop like in ESM.
+
+// Computes U and e from the Ud = e equation (number 14) from the paper.
+static void ComputeTrackingEquation(const Array3Df &image_and_gradient1,
+ const Array3Df &image_and_gradient2,
+ double x1, double y1,
+ double x2, double y2,
+ int half_width,
+ double lambda,
+ Mat2f *U,
+ Vec2f *e) {
+ Mat2f A, B, C, D;
+ A = B = C = D = Mat2f::Zero();
+
+ Vec2f R, S, V, W;
+ R = S = V = W = Vec2f::Zero();
+
+ for (int r = -half_width; r <= half_width; ++r) {
+ for (int c = -half_width; c <= half_width; ++c) {
+ float xx1 = x1 + c;
+ float yy1 = y1 + r;
+ float xx2 = x2 + c;
+ float yy2 = y2 + r;
+
+ float I = SampleLinear(image_and_gradient1, yy1, xx1, 0);
+ float J = SampleLinear(image_and_gradient2, yy2, xx2, 0);
+
+ Vec2f gI, gJ;
+ gI << SampleLinear(image_and_gradient1, yy1, xx1, 1),
+ SampleLinear(image_and_gradient1, yy1, xx1, 2);
+ gJ << SampleLinear(image_and_gradient2, yy2, xx2, 1),
+ SampleLinear(image_and_gradient2, yy2, xx2, 2);
+
+ // Equation 15 from the paper.
+ A += gI * gI.transpose();
+ B += gI * gJ.transpose();
+ C += gJ * gJ.transpose();
+ R += I * gI;
+ S += J * gI;
+ V += I * gJ;
+ W += J * gJ;
+ }
+ }
+
+ // In the paper they show a D matrix, but it is just B transpose, so use that
+ // instead of explicitly computing D.
+ Mat2f Di = B.transpose().inverse();
+
+ // Equation 14 from the paper.
+ *U = A*Di*C + lambda*Di*C - 0.5*B;
+ *e = (A + lambda*Mat2f::Identity())*Di*(V - W) + 0.5*(S - R);
+}
+
+static bool RegionIsInBounds(const FloatImage &image1,
+ double x, double y,
+ int half_window_size) {
+ // Check the minimum coordinates.
+ int min_x = floor(x) - half_window_size - 1;
+ int min_y = floor(y) - half_window_size - 1;
+ if (min_x < 0.0 ||
+ min_y < 0.0) {
+ return false;
+ }
+
+ // Check the maximum coordinates.
+ int max_x = ceil(x) + half_window_size + 1;
+ int max_y = ceil(y) + half_window_size + 1;
+ if (max_x > image1.cols() ||
+ max_y > image1.rows()) {
+ return false;
+ }
+
+ // Ok, we're good.
+ return true;
+}
+
+bool TrkltRegionTracker::Track(const FloatImage &image1,
+ const FloatImage &image2,
+ double x1, double y1,
+ double *x2, double *y2) const {
+ if (!RegionIsInBounds(image1, x1, y1, half_window_size)) {
+ LG << "Fell out of image1's window with x1=" << x1 << ", y1=" << y1
+ << ", hw=" << half_window_size << ".";
+ return false;
+ }
+
+ Array3Df image_and_gradient1;
+ Array3Df image_and_gradient2;
+ BlurredImageAndDerivativesChannels(image1, sigma, &image_and_gradient1);
+ BlurredImageAndDerivativesChannels(image2, sigma, &image_and_gradient2);
+
+ int i;
+ Vec2f d = Vec2f::Zero();
+ for (i = 0; i < max_iterations; ++i) {
+ // Check that the entire image patch is within the bounds of the images.
+ if (!RegionIsInBounds(image2, *x2, *y2, half_window_size)) {
+ LG << "Fell out of image2's window with x2=" << *x2 << ", y2=" << *y2
+ << ", hw=" << half_window_size << ".";
+ return false;
+ }
+
+ // Compute gradient matrix and error vector.
+ Mat2f U;
+ Vec2f e;
+ ComputeTrackingEquation(image_and_gradient1,
+ image_and_gradient2,
+ x1, y1,
+ *x2, *y2,
+ half_window_size,
+ lambda,
+ &U, &e);
+
+ // Solve the linear system for the best update to x2 and y2.
+ d = U.lu().solve(e);
+
+ // Update the position with the solved displacement.
+ *x2 += d[0];
+ *y2 += d[1];
+
+ // Check for the quality of the solution, but not until having already
+ // updated the position with our best estimate. The reason to do the update
+ // anyway is that the user already knows the position is bad, so we may as
+ // well try our best.
+ float determinant = U.determinant();
+ if (fabs(determinant) < min_determinant) {
+ // The determinant, which indicates the trackiness of the point, is too
+ // small, so fail out.
+ LG << "Determinant " << determinant << " is too small; failing tracking.";
+ return false;
+ }
+ LG << "x=" << *x2 << ", y=" << *y2 << ", dx=" << d[0] << ", dy=" << d[1]
+ << ", det=" << determinant;
+
+
+ // If the update is small, then we probably found the target.
+ if (d.squaredNorm() < min_update_squared_distance) {
+ LG << "Successful track in " << i << " iterations.";
+ return true;
+ }
+ }
+ // Getting here means we hit max iterations, so tracking failed.
+ LG << "Too many iterations; max is set to " << max_iterations << ".";
+ return false;
+}
+
+} // namespace libmv
diff --git a/intern/libmv/libmv/tracking/trklt_region_tracker.h b/intern/libmv/libmv/tracking/trklt_region_tracker.h
new file mode 100644
index 00000000000..26d0621aa02
--- /dev/null
+++ b/intern/libmv/libmv/tracking/trklt_region_tracker.h
@@ -0,0 +1,65 @@
+// Copyright (c) 2011 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_REGION_TRACKING_TRKLT_REGION_TRACKER_H_
+#define LIBMV_REGION_TRACKING_TRKLT_REGION_TRACKER_H_
+
+#include "libmv/image/image.h"
+#include "libmv/tracking/region_tracker.h"
+
+namespace libmv {
+
+// An improved KLT algorithm that enforces that the tracking is time-reversible
+// [1]. This is not the same as the "symmetric" KLT that is sometimes used.
+// Anecdotally, this tracks much more consistently than vanilla KLT.
+//
+// [1] H. Wu, R. Chellappa, and A. Sankaranarayanan and S. Kevin Zhou. Robust
+// visual tracking using the time-reversibility constraint. International
+// Conference on Computer Vision (ICCV), Rio de Janeiro, October 2007.
+//
+struct TrkltRegionTracker : public RegionTracker {
+ TrkltRegionTracker()
+ : half_window_size(4),
+ max_iterations(100),
+ min_determinant(1e-6),
+ min_update_squared_distance(1e-6),
+ sigma(0.9),
+ lambda(0.05) {}
+
+ virtual ~TrkltRegionTracker() {}
+
+ // Tracker interface.
+ virtual bool Track(const FloatImage &image1,
+ const FloatImage &image2,
+ double x1, double y1,
+ double *x2, double *y2) const;
+
+ // No point in creating getters or setters.
+ int half_window_size;
+ int max_iterations;
+ double min_determinant;
+ double min_update_squared_distance;
+ double sigma;
+ double lambda;
+};
+
+} // namespace libmv
+
+#endif // LIBMV_REGION_TRACKING_TRKLT_REGION_TRACKER_H_
diff --git a/intern/libmv/mkfiles.sh b/intern/libmv/mkfiles.sh
new file mode 100755
index 00000000000..618070f0a81
--- /dev/null
+++ b/intern/libmv/mkfiles.sh
@@ -0,0 +1,6 @@
+#!/bin/sh
+
+find ./libmv/ -type f | sed -r 's/^\.\///' | sort > files.txt
+find ./third_party/ -mindepth 2 -type f | \
+ grep -v third_party/ceres | \
+ sed -r 's/^\.\///' | sort >> files.txt
diff --git a/intern/libmv/third_party/msinttypes/README.libmv b/intern/libmv/third_party/msinttypes/README.libmv
new file mode 100644
index 00000000000..423f599b4ad
--- /dev/null
+++ b/intern/libmv/third_party/msinttypes/README.libmv
@@ -0,0 +1,5 @@
+Project: msinttypes
+URL: http://code.google.com/p/msinttypes/
+License: New BSD License
+Upstream version: r24
+Local modifications: None.
diff --git a/intern/libmv/third_party/msinttypes/inttypes.h b/intern/libmv/third_party/msinttypes/inttypes.h
new file mode 100644
index 00000000000..0e8af69cb07
--- /dev/null
+++ b/intern/libmv/third_party/msinttypes/inttypes.h
@@ -0,0 +1,305 @@
+// ISO C9x compliant inttypes.h for Microsoft Visual Studio
+// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
+//
+// Copyright (c) 2006 Alexander Chemeris
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. The name of the author may be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef _MSC_VER // [
+#error "Use this header only with Microsoft Visual C++ compilers!"
+#endif // _MSC_VER ]
+
+#ifndef _MSC_INTTYPES_H_ // [
+#define _MSC_INTTYPES_H_
+
+#if _MSC_VER > 1000
+#pragma once
+#endif
+
+#include <stdint.h>
+
+// 7.8 Format conversion of integer types
+
+typedef struct {
+ intmax_t quot;
+ intmax_t rem;
+} imaxdiv_t;
+
+// 7.8.1 Macros for format specifiers
+
+#if !defined(__cplusplus) || defined(__STDC_FORMAT_MACROS) // [ See footnote 185 at page 198
+
+// The fprintf macros for signed integers are:
+#define PRId8 "d"
+#define PRIi8 "i"
+#define PRIdLEAST8 "d"
+#define PRIiLEAST8 "i"
+#define PRIdFAST8 "d"
+#define PRIiFAST8 "i"
+
+#define PRId16 "hd"
+#define PRIi16 "hi"
+#define PRIdLEAST16 "hd"
+#define PRIiLEAST16 "hi"
+#define PRIdFAST16 "hd"
+#define PRIiFAST16 "hi"
+
+#define PRId32 "I32d"
+#define PRIi32 "I32i"
+#define PRIdLEAST32 "I32d"
+#define PRIiLEAST32 "I32i"
+#define PRIdFAST32 "I32d"
+#define PRIiFAST32 "I32i"
+
+#define PRId64 "I64d"
+#define PRIi64 "I64i"
+#define PRIdLEAST64 "I64d"
+#define PRIiLEAST64 "I64i"
+#define PRIdFAST64 "I64d"
+#define PRIiFAST64 "I64i"
+
+#define PRIdMAX "I64d"
+#define PRIiMAX "I64i"
+
+#define PRIdPTR "Id"
+#define PRIiPTR "Ii"
+
+// The fprintf macros for unsigned integers are:
+#define PRIo8 "o"
+#define PRIu8 "u"
+#define PRIx8 "x"
+#define PRIX8 "X"
+#define PRIoLEAST8 "o"
+#define PRIuLEAST8 "u"
+#define PRIxLEAST8 "x"
+#define PRIXLEAST8 "X"
+#define PRIoFAST8 "o"
+#define PRIuFAST8 "u"
+#define PRIxFAST8 "x"
+#define PRIXFAST8 "X"
+
+#define PRIo16 "ho"
+#define PRIu16 "hu"
+#define PRIx16 "hx"
+#define PRIX16 "hX"
+#define PRIoLEAST16 "ho"
+#define PRIuLEAST16 "hu"
+#define PRIxLEAST16 "hx"
+#define PRIXLEAST16 "hX"
+#define PRIoFAST16 "ho"
+#define PRIuFAST16 "hu"
+#define PRIxFAST16 "hx"
+#define PRIXFAST16 "hX"
+
+#define PRIo32 "I32o"
+#define PRIu32 "I32u"
+#define PRIx32 "I32x"
+#define PRIX32 "I32X"
+#define PRIoLEAST32 "I32o"
+#define PRIuLEAST32 "I32u"
+#define PRIxLEAST32 "I32x"
+#define PRIXLEAST32 "I32X"
+#define PRIoFAST32 "I32o"
+#define PRIuFAST32 "I32u"
+#define PRIxFAST32 "I32x"
+#define PRIXFAST32 "I32X"
+
+#define PRIo64 "I64o"
+#define PRIu64 "I64u"
+#define PRIx64 "I64x"
+#define PRIX64 "I64X"
+#define PRIoLEAST64 "I64o"
+#define PRIuLEAST64 "I64u"
+#define PRIxLEAST64 "I64x"
+#define PRIXLEAST64 "I64X"
+#define PRIoFAST64 "I64o"
+#define PRIuFAST64 "I64u"
+#define PRIxFAST64 "I64x"
+#define PRIXFAST64 "I64X"
+
+#define PRIoMAX "I64o"
+#define PRIuMAX "I64u"
+#define PRIxMAX "I64x"
+#define PRIXMAX "I64X"
+
+#define PRIoPTR "Io"
+#define PRIuPTR "Iu"
+#define PRIxPTR "Ix"
+#define PRIXPTR "IX"
+
+// The fscanf macros for signed integers are:
+#define SCNd8 "d"
+#define SCNi8 "i"
+#define SCNdLEAST8 "d"
+#define SCNiLEAST8 "i"
+#define SCNdFAST8 "d"
+#define SCNiFAST8 "i"
+
+#define SCNd16 "hd"
+#define SCNi16 "hi"
+#define SCNdLEAST16 "hd"
+#define SCNiLEAST16 "hi"
+#define SCNdFAST16 "hd"
+#define SCNiFAST16 "hi"
+
+#define SCNd32 "ld"
+#define SCNi32 "li"
+#define SCNdLEAST32 "ld"
+#define SCNiLEAST32 "li"
+#define SCNdFAST32 "ld"
+#define SCNiFAST32 "li"
+
+#define SCNd64 "I64d"
+#define SCNi64 "I64i"
+#define SCNdLEAST64 "I64d"
+#define SCNiLEAST64 "I64i"
+#define SCNdFAST64 "I64d"
+#define SCNiFAST64 "I64i"
+
+#define SCNdMAX "I64d"
+#define SCNiMAX "I64i"
+
+#ifdef _WIN64 // [
+# define SCNdPTR "I64d"
+# define SCNiPTR "I64i"
+#else // _WIN64 ][
+# define SCNdPTR "ld"
+# define SCNiPTR "li"
+#endif // _WIN64 ]
+
+// The fscanf macros for unsigned integers are:
+#define SCNo8 "o"
+#define SCNu8 "u"
+#define SCNx8 "x"
+#define SCNX8 "X"
+#define SCNoLEAST8 "o"
+#define SCNuLEAST8 "u"
+#define SCNxLEAST8 "x"
+#define SCNXLEAST8 "X"
+#define SCNoFAST8 "o"
+#define SCNuFAST8 "u"
+#define SCNxFAST8 "x"
+#define SCNXFAST8 "X"
+
+#define SCNo16 "ho"
+#define SCNu16 "hu"
+#define SCNx16 "hx"
+#define SCNX16 "hX"
+#define SCNoLEAST16 "ho"
+#define SCNuLEAST16 "hu"
+#define SCNxLEAST16 "hx"
+#define SCNXLEAST16 "hX"
+#define SCNoFAST16 "ho"
+#define SCNuFAST16 "hu"
+#define SCNxFAST16 "hx"
+#define SCNXFAST16 "hX"
+
+#define SCNo32 "lo"
+#define SCNu32 "lu"
+#define SCNx32 "lx"
+#define SCNX32 "lX"
+#define SCNoLEAST32 "lo"
+#define SCNuLEAST32 "lu"
+#define SCNxLEAST32 "lx"
+#define SCNXLEAST32 "lX"
+#define SCNoFAST32 "lo"
+#define SCNuFAST32 "lu"
+#define SCNxFAST32 "lx"
+#define SCNXFAST32 "lX"
+
+#define SCNo64 "I64o"
+#define SCNu64 "I64u"
+#define SCNx64 "I64x"
+#define SCNX64 "I64X"
+#define SCNoLEAST64 "I64o"
+#define SCNuLEAST64 "I64u"
+#define SCNxLEAST64 "I64x"
+#define SCNXLEAST64 "I64X"
+#define SCNoFAST64 "I64o"
+#define SCNuFAST64 "I64u"
+#define SCNxFAST64 "I64x"
+#define SCNXFAST64 "I64X"
+
+#define SCNoMAX "I64o"
+#define SCNuMAX "I64u"
+#define SCNxMAX "I64x"
+#define SCNXMAX "I64X"
+
+#ifdef _WIN64 // [
+# define SCNoPTR "I64o"
+# define SCNuPTR "I64u"
+# define SCNxPTR "I64x"
+# define SCNXPTR "I64X"
+#else // _WIN64 ][
+# define SCNoPTR "lo"
+# define SCNuPTR "lu"
+# define SCNxPTR "lx"
+# define SCNXPTR "lX"
+#endif // _WIN64 ]
+
+#endif // __STDC_FORMAT_MACROS ]
+
+// 7.8.2 Functions for greatest-width integer types
+
+// 7.8.2.1 The imaxabs function
+#define imaxabs _abs64
+
+// 7.8.2.2 The imaxdiv function
+
+// This is modified version of div() function from Microsoft's div.c found
+// in %MSVC.NET%\crt\src\div.c
+#ifdef STATIC_IMAXDIV // [
+static
+#else // STATIC_IMAXDIV ][
+_inline
+#endif // STATIC_IMAXDIV ]
+imaxdiv_t __cdecl imaxdiv(intmax_t numer, intmax_t denom)
+{
+ imaxdiv_t result;
+
+ result.quot = numer / denom;
+ result.rem = numer % denom;
+
+ if (numer < 0 && result.rem > 0) {
+ // did division wrong; must fix up
+ ++result.quot;
+ result.rem -= denom;
+ }
+
+ return result;
+}
+
+// 7.8.2.3 The strtoimax and strtoumax functions
+#define strtoimax _strtoi64
+#define strtoumax _strtoui64
+
+// 7.8.2.4 The wcstoimax and wcstoumax functions
+#define wcstoimax _wcstoi64
+#define wcstoumax _wcstoui64
+
+
+#endif // _MSC_INTTYPES_H_ ]
diff --git a/intern/libmv/third_party/msinttypes/stdint.h b/intern/libmv/third_party/msinttypes/stdint.h
new file mode 100644
index 00000000000..189ee34571c
--- /dev/null
+++ b/intern/libmv/third_party/msinttypes/stdint.h
@@ -0,0 +1,247 @@
+// ISO C9x compliant stdint.h for Microsoft Visual Studio
+// Based on ISO/IEC 9899:TC2 Committee draft (May 6, 2005) WG14/N1124
+//
+// Copyright (c) 2006-2008 Alexander Chemeris
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// 1. Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// 3. The name of the author may be used to endorse or promote products
+// derived from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
+// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+// EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
+// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
+// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+///////////////////////////////////////////////////////////////////////////////
+
+#ifndef _MSC_VER // [
+#error "Use this header only with Microsoft Visual C++ compilers!"
+#endif // _MSC_VER ]
+
+#ifndef _MSC_STDINT_H_ // [
+#define _MSC_STDINT_H_
+
+#if _MSC_VER > 1000
+#pragma once
+#endif
+
+#include <limits.h>
+
+// For Visual Studio 6 in C++ mode and for many Visual Studio versions when
+// compiling for ARM we should wrap <wchar.h> include with 'extern "C++" {}'
+// or compiler give many errors like this:
+// error C2733: second C linkage of overloaded function 'wmemchr' not allowed
+#ifdef __cplusplus
+extern "C" {
+#endif
+# include <wchar.h>
+#ifdef __cplusplus
+}
+#endif
+
+// Define _W64 macros to mark types changing their size, like intptr_t.
+#ifndef _W64
+# if !defined(__midl) && (defined(_X86_) || defined(_M_IX86)) && _MSC_VER >= 1300
+# define _W64 __w64
+# else
+# define _W64
+# endif
+#endif
+
+
+// 7.18.1 Integer types
+
+// 7.18.1.1 Exact-width integer types
+
+// Visual Studio 6 and Embedded Visual C++ 4 doesn't
+// realize that, e.g. char has the same size as __int8
+// so we give up on __intX for them.
+#if (_MSC_VER < 1300)
+ typedef signed char int8_t;
+ typedef signed short int16_t;
+ typedef signed int int32_t;
+ typedef unsigned char uint8_t;
+ typedef unsigned short uint16_t;
+ typedef unsigned int uint32_t;
+#else
+ typedef signed __int8 int8_t;
+ typedef signed __int16 int16_t;
+ typedef signed __int32 int32_t;
+ typedef unsigned __int8 uint8_t;
+ typedef unsigned __int16 uint16_t;
+ typedef unsigned __int32 uint32_t;
+#endif
+typedef __int64 int64_t;
+typedef unsigned __int64 uint64_t;
+
+
+// 7.18.1.2 Minimum-width integer types
+typedef int8_t int_least8_t;
+typedef int16_t int_least16_t;
+typedef int32_t int_least32_t;
+typedef int64_t int_least64_t;
+typedef uint8_t uint_least8_t;
+typedef uint16_t uint_least16_t;
+typedef uint32_t uint_least32_t;
+typedef uint64_t uint_least64_t;
+
+// 7.18.1.3 Fastest minimum-width integer types
+typedef int8_t int_fast8_t;
+typedef int16_t int_fast16_t;
+typedef int32_t int_fast32_t;
+typedef int64_t int_fast64_t;
+typedef uint8_t uint_fast8_t;
+typedef uint16_t uint_fast16_t;
+typedef uint32_t uint_fast32_t;
+typedef uint64_t uint_fast64_t;
+
+// 7.18.1.4 Integer types capable of holding object pointers
+#ifdef _WIN64 // [
+ typedef __int64 intptr_t;
+ typedef unsigned __int64 uintptr_t;
+#else // _WIN64 ][
+ typedef _W64 int intptr_t;
+ typedef _W64 unsigned int uintptr_t;
+#endif // _WIN64 ]
+
+// 7.18.1.5 Greatest-width integer types
+typedef int64_t intmax_t;
+typedef uint64_t uintmax_t;
+
+
+// 7.18.2 Limits of specified-width integer types
+
+#if !defined(__cplusplus) || defined(__STDC_LIMIT_MACROS) // [ See footnote 220 at page 257 and footnote 221 at page 259
+
+// 7.18.2.1 Limits of exact-width integer types
+#define INT8_MIN ((int8_t)_I8_MIN)
+#define INT8_MAX _I8_MAX
+#define INT16_MIN ((int16_t)_I16_MIN)
+#define INT16_MAX _I16_MAX
+#define INT32_MIN ((int32_t)_I32_MIN)
+#define INT32_MAX _I32_MAX
+#define INT64_MIN ((int64_t)_I64_MIN)
+#define INT64_MAX _I64_MAX
+#define UINT8_MAX _UI8_MAX
+#define UINT16_MAX _UI16_MAX
+#define UINT32_MAX _UI32_MAX
+#define UINT64_MAX _UI64_MAX
+
+// 7.18.2.2 Limits of minimum-width integer types
+#define INT_LEAST8_MIN INT8_MIN
+#define INT_LEAST8_MAX INT8_MAX
+#define INT_LEAST16_MIN INT16_MIN
+#define INT_LEAST16_MAX INT16_MAX
+#define INT_LEAST32_MIN INT32_MIN
+#define INT_LEAST32_MAX INT32_MAX
+#define INT_LEAST64_MIN INT64_MIN
+#define INT_LEAST64_MAX INT64_MAX
+#define UINT_LEAST8_MAX UINT8_MAX
+#define UINT_LEAST16_MAX UINT16_MAX
+#define UINT_LEAST32_MAX UINT32_MAX
+#define UINT_LEAST64_MAX UINT64_MAX
+
+// 7.18.2.3 Limits of fastest minimum-width integer types
+#define INT_FAST8_MIN INT8_MIN
+#define INT_FAST8_MAX INT8_MAX
+#define INT_FAST16_MIN INT16_MIN
+#define INT_FAST16_MAX INT16_MAX
+#define INT_FAST32_MIN INT32_MIN
+#define INT_FAST32_MAX INT32_MAX
+#define INT_FAST64_MIN INT64_MIN
+#define INT_FAST64_MAX INT64_MAX
+#define UINT_FAST8_MAX UINT8_MAX
+#define UINT_FAST16_MAX UINT16_MAX
+#define UINT_FAST32_MAX UINT32_MAX
+#define UINT_FAST64_MAX UINT64_MAX
+
+// 7.18.2.4 Limits of integer types capable of holding object pointers
+#ifdef _WIN64 // [
+# define INTPTR_MIN INT64_MIN
+# define INTPTR_MAX INT64_MAX
+# define UINTPTR_MAX UINT64_MAX
+#else // _WIN64 ][
+# define INTPTR_MIN INT32_MIN
+# define INTPTR_MAX INT32_MAX
+# define UINTPTR_MAX UINT32_MAX
+#endif // _WIN64 ]
+
+// 7.18.2.5 Limits of greatest-width integer types
+#define INTMAX_MIN INT64_MIN
+#define INTMAX_MAX INT64_MAX
+#define UINTMAX_MAX UINT64_MAX
+
+// 7.18.3 Limits of other integer types
+
+#ifdef _WIN64 // [
+# define PTRDIFF_MIN _I64_MIN
+# define PTRDIFF_MAX _I64_MAX
+#else // _WIN64 ][
+# define PTRDIFF_MIN _I32_MIN
+# define PTRDIFF_MAX _I32_MAX
+#endif // _WIN64 ]
+
+#define SIG_ATOMIC_MIN INT_MIN
+#define SIG_ATOMIC_MAX INT_MAX
+
+#ifndef SIZE_MAX // [
+# ifdef _WIN64 // [
+# define SIZE_MAX _UI64_MAX
+# else // _WIN64 ][
+# define SIZE_MAX _UI32_MAX
+# endif // _WIN64 ]
+#endif // SIZE_MAX ]
+
+// WCHAR_MIN and WCHAR_MAX are also defined in <wchar.h>
+#ifndef WCHAR_MIN // [
+# define WCHAR_MIN 0
+#endif // WCHAR_MIN ]
+#ifndef WCHAR_MAX // [
+# define WCHAR_MAX _UI16_MAX
+#endif // WCHAR_MAX ]
+
+#define WINT_MIN 0
+#define WINT_MAX _UI16_MAX
+
+#endif // __STDC_LIMIT_MACROS ]
+
+
+// 7.18.4 Limits of other integer types
+
+#if !defined(__cplusplus) || defined(__STDC_CONSTANT_MACROS) // [ See footnote 224 at page 260
+
+// 7.18.4.1 Macros for minimum-width integer constants
+
+#define INT8_C(val) val##i8
+#define INT16_C(val) val##i16
+#define INT32_C(val) val##i32
+#define INT64_C(val) val##i64
+
+#define UINT8_C(val) val##ui8
+#define UINT16_C(val) val##ui16
+#define UINT32_C(val) val##ui32
+#define UINT64_C(val) val##ui64
+
+// 7.18.4.2 Macros for greatest-width integer constants
+#define INTMAX_C INT64_C
+#define UINTMAX_C UINT64_C
+
+#endif // __STDC_CONSTANT_MACROS ]
+
+
+#endif // _MSC_STDINT_H_ ]