Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSergey Sharybin <sergey.vfx@gmail.com>2014-02-20 17:41:05 +0400
committerSergey Sharybin <sergey.vfx@gmail.com>2014-04-17 15:28:41 +0400
commited2ddc9f706b956ea955ac86b3e7ec5e0b41d9cd (patch)
treeb8023b5604fef55f436c0d1be1a03e113f9ef39d
parent39bfde674cfe4f6a9140b6d4c48a348de04d715a (diff)
Support multiple distortion models, including a new division model
This commit makes it so CameraIntrinsics is no longer hardcoded to use the traditional polynomial radial distortion model. Currently the distortion code has generic logic which is shared between different distortion models, but had no other models until now. This moves everything specific to the polynomial radial distortion to a subclass PolynomialDistortionCameraIntrinsics(), and adds a new division distortion model suitable for cameras such as the GoPro which have much stronger distortion due to their fisheye lens. This also cleans up the internal API of CameraIntrinsics to make it easier to understand and reduces old C-style code. New distortion model is available in the Lens panel of MCE. - Polynomial is the old well-known model - Division is the new one which s intended to deal better with huge distortion. Coefficients of this model works independent from each other and for division model one probably want to have positive values to have a barrel distortion.
-rw-r--r--extern/libmv/CMakeLists.txt5
-rw-r--r--extern/libmv/ChangeLog207
-rwxr-xr-xextern/libmv/bundle.sh2
-rw-r--r--extern/libmv/files.txt3
-rw-r--r--extern/libmv/libmv-capi.cc1022
-rw-r--r--extern/libmv/libmv-capi.h25
-rw-r--r--extern/libmv/libmv-capi_stub.cc20
-rw-r--r--extern/libmv/libmv-util.cc309
-rw-r--r--extern/libmv/libmv-util.h69
-rw-r--r--extern/libmv/libmv/simple_pipeline/bundle.cc152
-rw-r--r--extern/libmv/libmv/simple_pipeline/bundle.h13
-rw-r--r--extern/libmv/libmv/simple_pipeline/camera_intrinsics.cc493
-rw-r--r--extern/libmv/libmv/simple_pipeline/camera_intrinsics.h470
-rw-r--r--extern/libmv/libmv/simple_pipeline/camera_intrinsics_impl.h192
-rw-r--r--extern/libmv/libmv/simple_pipeline/detect.cc56
-rw-r--r--extern/libmv/libmv/simple_pipeline/detect.h35
-rw-r--r--extern/libmv/libmv/simple_pipeline/distortion_models.cc197
-rw-r--r--extern/libmv/libmv/simple_pipeline/distortion_models.h131
-rw-r--r--extern/libmv/libmv/simple_pipeline/keyframe_selection.cc31
-rw-r--r--extern/libmv/libmv/simple_pipeline/keyframe_selection.h7
-rw-r--r--release/scripts/startup/bl_ui/space_clip.py16
-rw-r--r--source/blender/blenkernel/BKE_tracking.h3
-rw-r--r--source/blender/blenkernel/intern/movieclip.c22
-rw-r--r--source/blender/blenkernel/intern/tracking.c55
-rw-r--r--source/blender/blenkernel/intern/tracking_solver.c61
-rw-r--r--source/blender/blenkernel/intern/tracking_util.c67
-rw-r--r--source/blender/blenkernel/tracking_private.h9
-rw-r--r--source/blender/editors/space_clip/clip_ops.c12
-rw-r--r--source/blender/makesdna/DNA_tracking_types.h17
-rw-r--r--source/blender/makesrna/intern/rna_tracking.c38
30 files changed, 2479 insertions, 1260 deletions
diff --git a/extern/libmv/CMakeLists.txt b/extern/libmv/CMakeLists.txt
index f7173c3b1e1..f44c78c19aa 100644
--- a/extern/libmv/CMakeLists.txt
+++ b/extern/libmv/CMakeLists.txt
@@ -58,6 +58,7 @@ if(WITH_LIBMV)
list(APPEND SRC
libmv-capi.cc
+ libmv-util.cc
libmv/image/array_nd.cc
libmv/image/convolve.cc
libmv/multiview/conditioning.cc
@@ -72,6 +73,7 @@ if(WITH_LIBMV)
libmv/simple_pipeline/bundle.cc
libmv/simple_pipeline/camera_intrinsics.cc
libmv/simple_pipeline/detect.cc
+ libmv/simple_pipeline/distortion_models.cc
libmv/simple_pipeline/initialize_reconstruction.cc
libmv/simple_pipeline/intersect.cc
libmv/simple_pipeline/keyframe_selection.cc
@@ -93,6 +95,7 @@ if(WITH_LIBMV)
third_party/gflags/gflags_completions.cc
third_party/gflags/gflags_reporting.cc
+ libmv-util.h
libmv/base/id_generator.h
libmv/base/scoped_ptr.h
libmv/base/vector.h
@@ -123,7 +126,9 @@ if(WITH_LIBMV)
libmv/simple_pipeline/bundle.h
libmv/simple_pipeline/callbacks.h
libmv/simple_pipeline/camera_intrinsics.h
+ libmv/simple_pipeline/camera_intrinsics_impl.h
libmv/simple_pipeline/detect.h
+ libmv/simple_pipeline/distortion_models.h
libmv/simple_pipeline/initialize_reconstruction.h
libmv/simple_pipeline/intersect.h
libmv/simple_pipeline/keyframe_selection.h
diff --git a/extern/libmv/ChangeLog b/extern/libmv/ChangeLog
index 6ec5e0aa502..af18b02a4db 100644
--- a/extern/libmv/ChangeLog
+++ b/extern/libmv/ChangeLog
@@ -1,3 +1,127 @@
+commit ee21415a353396df67ef21e82adaffab2a8d2a0a
+Author: Sergey Sharybin <sergey.vfx@gmail.com>
+Date: Thu Apr 17 16:26:12 2014 +0600
+
+ Support multiple distortion models, including a new division model
+
+ This commit makes it so CameraIntrinsics is no longer hardcoded
+ to use the traditional polynomial radial distortion model. Currently
+ the distortion code has generic logic which is shared between
+ different distortion models, but had no other models until now.
+
+ This moves everything specific to the polynomial radial distortion
+ to a subclass PolynomialDistortionCameraIntrinsics(), and adds a
+ new division distortion model suitable for cameras such as the
+ GoPro which have much stronger distortion due to their fisheye lens.
+
+ This also cleans up the internal API of CameraIntrinsics to make
+ it easier to understand and reduces old C-style code.
+
+ Reviewers: keir
+
+ Reviewed By: keir
+
+ CC: jta
+
+ Differential Revision: https://developer.blender.org/D335
+
+commit 313252083f6dfa69a93c287bed81dec616503c1b
+Author: Sergey Sharybin <sergey.vfx@gmail.com>
+Date: Tue Apr 15 18:23:38 2014 +0600
+
+ Fix failure of the image transform linear test
+
+ Mainly was caused by the flakyness of image rotation in cases
+ when image has even size. The test was expecting the transform
+ code to rotate the image around pixel corner, which isn't a
+ common behavior in image processing applications. Rotation
+ is usually done around the pixel center.
+
+ So now made it so RotateImage() rotates the image around the
+ pixel center which gives 100% proper result for odd sized images
+ (i.e. center pixel stays untouched).
+
+ Also made the tests to use odd image sizes which are more
+ predictable by the humans. We can use even sized images in the
+ tests as well but their result wouldn't be so much spectacular.
+
+ Another issue with the tests was caused by RescaleImageTranslation
+ test which did expect things which are not happening in the
+ function.
+
+ Reviewers: keir
+
+ Reviewed By: keir
+
+ Differential Revision: https://developer.blender.org/D463
+
+commit 80d6945bf5f996b97cd41df0e422afce5e10e7f9
+Author: Sergey Sharybin <sergey.vfx@gmail.com>
+Date: Mon Apr 14 00:01:32 2014 +0600
+
+ Unit tests for feature detector
+
+ Currently covers only simplest cases with synthetic images.
+ Also at this point mainly Harris detector is being testes,
+ other detectors behaves a bit unexpected on synthetic images
+ and this is to be investigated further.
+
+ Tests will be extended further later.
+
+ Additional change:
+
+ - Added constructor to Feature structure
+ - Added operator << for feature for easier debug dumps.
+
+ TODO: Some tests are not giving the result which i was expected
+ to. This is to be investigated further by finding the reference
+ detector implementation. For until then keeping that tests
+ commented out.
+
+ Reviewers: keir
+
+ Reviewed By: keir
+
+ Differential Revision: https://developer.blender.org/D316
+
+commit 397c3d3ed46eb4967eb285c8369cc125bea4b132
+Author: Sergey Sharybin <sergey.vfx@gmail.com>
+Date: Fri Apr 4 16:17:57 2014 +0600
+
+ Compilation error fix
+
+ Not totally sure why this is needed, but multiview indeed
+ uses V3D library still, so it needs to be linked against it.
+
+ Patc by Martijn Berger, thanks!
+
+commit 1c36279239cbffe152493106eb04e55df7ebd649
+Author: Sergey Sharybin <sergey.vfx@gmail.com>
+Date: Fri Apr 4 14:03:43 2014 +0600
+
+ Upgrade Eigen to 3.2.1 version
+
+ To main reasons for this:
+ - Probably this would solve strict compiler warnings
+ - It brings new stuff like sparse LU decomposition which
+ might be useful in the future.
+
+commit de698f442934f475478463445f78a00ea632e823
+Author: Sergey Sharybin <sergey.vfx@gmail.com>
+Date: Thu Apr 3 15:08:26 2014 +0600
+
+ Fix compilation error when using make from the sources root
+
+ - Don't force flann to be static. It's a general rule on linux
+ to have dynamic libraries for all the bits instead of having
+ statically-linked dynamic libraries.
+
+ - Some weirdo stuff was happening around OpenExif, it was only
+ built on Apple, so don't link targets against this lib on
+ other platforms.
+
+ - Some libraries were missing for qt-tracker.
+
commit 901b146f28825d3e05f4157ca2a34ae00261b91a
Author: Sergey Sharybin <sergey.vfx@gmail.com>
Date: Wed Mar 26 17:44:09 2014 +0600
@@ -550,86 +674,3 @@ Date: Sun May 12 22:34:54 2013 +0600
- Removed note about unsupported camera intrinsics
refining flags. Technically, all combinations
are possible.
-
-commit 4432eb80f27e929f8750229aaada625d4f3ac5ee
-Author: Sergey Sharybin <sergey.vfx@gmail.com>
-Date: Sun May 12 22:19:31 2013 +0600
-
- Remove check for zero focal length in BA cost functor
-
- This check is actually redundant, because empty intrinsics
- will have focal length of 1.0, which means original comment
- about BundleIntrinsics was not truth.
-
- It is possible that external user will send focal length of
- zero to be refined, but blender prevents this from happening.
-
-commit 34a91c9b8acb0dba3382866fbd29bb9884edb98a
-Author: Sergey Sharybin <sergey.vfx@gmail.com>
-Date: Sat May 11 20:33:54 2013 +0600
-
- Fix compilation error with msvc2012
-
- Using change from glog's upstream for this.
-
-commit 87be4f030d025e4b29d9243d12bc458b2bb6762a
-Author: Sergey Sharybin <sergey.vfx@gmail.com>
-Date: Sat May 11 19:50:57 2013 +0600
-
- Style cleanup, mainly pointed by Sameer in Ceres's codereview
-
-commit 7fa9c0b83d5e0fbd331add2952045076c2028d1b
-Author: Sergey Sharybin <sergey.vfx@gmail.com>
-Date: Fri May 10 18:30:40 2013 +0600
-
- Keyframe selection improvements
-
- Added additional criteria, which ignores
- keyframe pair if success intersection factor
- is lower than current candidate pair factor.
-
- This solves issue with keyframe pair at which
- most of the tracks are intersecting behind the
- camera is accepted (because variance in this
- case is really small),
-
- Also tweaked generalized inverse function,
- which now doesn't scale epsilon by maximal
- matrix element. This gave issues at really bad
- candidates with unstable covariance. In this
- case almost all eigen values getting zeroed
- on inverse leading to small expected error.
-
-commit 0477ef1aa8fc92848f03c45e32539210be583b80
-Author: Sergey Sharybin <sergey.vfx@gmail.com>
-Date: Fri May 10 17:59:40 2013 +0600
-
- Keyframe selection code cleanup
-
- - Updated comments in code.
- - Removed currently unused functions.
- Actually, they'd better be moved to some generic
- logging module, but we don't have it now so was
- lazy to create one now. Could happen later using
- code from git history
- - Renamed function to match better to what's going
- on in it.
-
-commit fee2d7cc6003942f628c9a24b74008fd491b85b9
-Author: Sergey Sharybin <sergey.vfx@gmail.com>
-Date: Fri May 10 17:44:49 2013 +0600
-
- Optimization for reconstruction variance
-
- Achieved by replacing SVD-based pseudo-inverse with
- an eigen solver pseudo inverse.
-
- New function works in the same way: it decomposes
- matrix to V*D*V^-1, then inverts diagonal of D
- and composes matrix back.
-
- The same way is used to deal with gauges - last
- 7 eigen values are getting zeroed.
-
- In own tests gives approx 2x boost, without
- visible affect on selected keyframe quality.
diff --git a/extern/libmv/bundle.sh b/extern/libmv/bundle.sh
index c98f8a31045..238cd503f57 100755
--- a/extern/libmv/bundle.sh
+++ b/extern/libmv/bundle.sh
@@ -146,10 +146,12 @@ if(WITH_LIBMV)
list(APPEND SRC
libmv-capi.cc
+ libmv-util.cc
${sources}
${third_sources}
+ libmv-util.h
${headers}
${third_headers}
diff --git a/extern/libmv/files.txt b/extern/libmv/files.txt
index cb6546ae8ff..316a94fcccc 100644
--- a/extern/libmv/files.txt
+++ b/extern/libmv/files.txt
@@ -41,8 +41,11 @@ libmv/simple_pipeline/bundle.h
libmv/simple_pipeline/callbacks.h
libmv/simple_pipeline/camera_intrinsics.cc
libmv/simple_pipeline/camera_intrinsics.h
+libmv/simple_pipeline/camera_intrinsics_impl.h
libmv/simple_pipeline/detect.cc
libmv/simple_pipeline/detect.h
+libmv/simple_pipeline/distortion_models.cc
+libmv/simple_pipeline/distortion_models.h
libmv/simple_pipeline/initialize_reconstruction.cc
libmv/simple_pipeline/initialize_reconstruction.h
libmv/simple_pipeline/intersect.cc
diff --git a/extern/libmv/libmv-capi.cc b/extern/libmv/libmv-capi.cc
index 2d3afcd16e7..82143d50fbf 100644
--- a/extern/libmv/libmv-capi.cc
+++ b/extern/libmv/libmv-capi.cc
@@ -35,14 +35,10 @@
#undef DUMP_ALWAYS
#include "libmv-capi.h"
+#include "libmv-util.h"
-#include <cstdlib>
#include <cassert>
-#if defined(DUMP_FAILURE) || defined (DUMP_ALWAYS)
-# include <png.h>
-#endif
-
#include "libmv-capi_intern.h"
#include "libmv/logging/logging.h"
#include "libmv/multiview/homography.h"
@@ -58,27 +54,52 @@
#include "libmv/simple_pipeline/reconstruction_scale.h"
#include "libmv/simple_pipeline/keyframe_selection.h"
-#include "libmv/multiview/homography.h"
-
-#include "libmv/image/convolve.h"
-
#ifdef _MSC_VER
# define snprintf _snprintf
#endif
+using libmv::CameraIntrinsics;
+using libmv::DetectOptions;
+using libmv::DivisionCameraIntrinsics;
+using libmv::EuclideanCamera;
+using libmv::EuclideanPoint;
+using libmv::EuclideanReconstruction;
+using libmv::EuclideanScaleToUnity;
+using libmv::Feature;
+using libmv::FloatImage;
+using libmv::Marker;
+using libmv::PolynomialCameraIntrinsics;
+using libmv::ProgressUpdateCallback;
+using libmv::Tracks;
+using libmv::TrackRegionOptions;
+using libmv::TrackRegionResult;
+
+using libmv::Detect;
+using libmv::EuclideanBundle;
+using libmv::EuclideanCompleteReconstruction;
+using libmv::EuclideanReconstructTwoFrames;
+using libmv::EuclideanReprojectionError;
+using libmv::TrackRegion;
+using libmv::SamplePlanarPatch;
+
+typedef struct libmv_Tracks libmv_Tracks;
+typedef struct libmv_Reconstruction libmv_Reconstruction;
+typedef struct libmv_Features libmv_Features;
+typedef struct libmv_CameraIntrinsics libmv_CameraIntrinsics;
+
struct libmv_Reconstruction {
- libmv::EuclideanReconstruction reconstruction;
+ EuclideanReconstruction reconstruction;
/* used for per-track average error calculation after reconstruction */
- libmv::Tracks tracks;
- libmv::CameraIntrinsics intrinsics;
+ Tracks tracks;
+ CameraIntrinsics *intrinsics;
double error;
};
struct libmv_Features {
int count;
- libmv::Feature *features;
+ Feature *features;
};
/* ************ Logging ************ */
@@ -113,190 +134,6 @@ void libmv_setLoggingVerbosity(int verbosity)
google::SetCommandLineOption("v", val);
}
-/* ************ Utility ************ */
-
-static void byteBufToImage(const unsigned char *buf, int width, int height, int channels, libmv::FloatImage *image)
-{
- int x, y, k, a = 0;
-
- image->Resize(height, width, channels);
-
- for (y = 0; y < height; y++) {
- for (x = 0; x < width; x++) {
- for (k = 0; k < channels; k++) {
- (*image)(y, x, k) = (float)buf[a++] / 255.0f;
- }
- }
- }
-}
-
-static void floatBufToImage(const float *buf, int width, int height, int channels, libmv::FloatImage *image)
-{
- int x, y, k, a = 0;
-
- image->Resize(height, width, channels);
-
- for (y = 0; y < height; y++) {
- for (x = 0; x < width; x++) {
- for (k = 0; k < channels; k++) {
- (*image)(y, x, k) = buf[a++];
- }
- }
- }
-}
-
-static void imageToFloatBuf(const libmv::FloatImage *image, int channels, float *buf)
-{
- int x, y, k, a = 0;
-
- for (y = 0; y < image->Height(); y++) {
- for (x = 0; x < image->Width(); x++) {
- for (k = 0; k < channels; k++) {
- buf[a++] = (*image)(y, x, k);
- }
- }
- }
-}
-
-static void imageToByteBuf(const libmv::FloatImage *image, int channels, unsigned char *buf)
-{
- int x, y, k, a = 0;
-
- for (y = 0; y < image->Height(); y++) {
- for (x = 0; x < image->Width(); x++) {
- for (k = 0; k < channels; k++) {
- buf[a++] = (*image)(y, x, k) * 255.0f;
- }
- }
- }
-}
-
-#if defined(DUMP_FAILURE) || defined (DUMP_ALWAYS)
-static void savePNGImage(png_bytep *row_pointers, int width, int height, int depth, int color_type,
- const char *file_name)
-{
- png_infop info_ptr;
- png_structp png_ptr;
- FILE *fp = fopen(file_name, "wb");
-
- if (!fp)
- return;
-
- /* Initialize stuff */
- png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
- info_ptr = png_create_info_struct(png_ptr);
-
- if (setjmp(png_jmpbuf(png_ptr))) {
- fclose(fp);
- return;
- }
-
- png_init_io(png_ptr, fp);
-
- /* write header */
- if (setjmp(png_jmpbuf(png_ptr))) {
- fclose(fp);
- return;
- }
-
- png_set_IHDR(png_ptr, info_ptr, width, height,
- depth, color_type, PNG_INTERLACE_NONE,
- PNG_COMPRESSION_TYPE_BASE, PNG_FILTER_TYPE_BASE);
-
- png_write_info(png_ptr, info_ptr);
-
- /* write bytes */
- if (setjmp(png_jmpbuf(png_ptr))) {
- fclose(fp);
- return;
- }
-
- png_write_image(png_ptr, row_pointers);
-
- /* end write */
- if (setjmp(png_jmpbuf(png_ptr))) {
- fclose(fp);
- return;
- }
-
- png_write_end(png_ptr, NULL);
-
- fclose(fp);
-}
-
-static void saveImage(const char *prefix, libmv::FloatImage image, int x0, int y0)
-{
- int x, y;
- png_bytep *row_pointers;
-
- row_pointers = (png_bytep *) malloc(sizeof(png_bytep) * image.Height());
-
- for (y = 0; y < image.Height(); y++) {
- row_pointers[y] = (png_bytep) malloc(sizeof(png_byte) * 4 * image.Width());
-
- for (x = 0; x < image.Width(); x++) {
- if (x0 == x && image.Height() - y0 - 1 == y) {
- row_pointers[y][x * 4 + 0] = 255;
- row_pointers[y][x * 4 + 1] = 0;
- row_pointers[y][x * 4 + 2] = 0;
- row_pointers[y][x * 4 + 3] = 255;
- }
- else {
- float pixel = image(image.Height() - y - 1, x, 0);
- row_pointers[y][x * 4 + 0] = pixel * 255;
- row_pointers[y][x * 4 + 1] = pixel * 255;
- row_pointers[y][x * 4 + 2] = pixel * 255;
- row_pointers[y][x * 4 + 3] = 255;
- }
- }
- }
-
- {
- static int a = 0;
- char buf[128];
- snprintf(buf, sizeof(buf), "%s_%02d.png", prefix, ++a);
- savePNGImage(row_pointers, image.Width(), image.Height(), 8, PNG_COLOR_TYPE_RGBA, buf);
- }
-
- for (y = 0; y < image.Height(); y++) {
- free(row_pointers[y]);
- }
- free(row_pointers);
-}
-
-static void saveBytesImage(const char *prefix, unsigned char *data, int width, int height)
-{
- int x, y;
- png_bytep *row_pointers;
-
- row_pointers = (png_bytep *) malloc(sizeof(png_bytep) * height);
-
- for (y = 0; y < height; y++) {
- row_pointers[y] = (png_bytep) malloc(sizeof(png_byte) * 4 * width);
-
- for (x = 0; x < width; x++) {
- char pixel = data[width * y + x];
- row_pointers[y][x * 4 + 0] = pixel;
- row_pointers[y][x * 4 + 1] = pixel;
- row_pointers[y][x * 4 + 2] = pixel;
- row_pointers[y][x * 4 + 3] = 255;
- }
- }
-
- {
- static int a = 0;
- char buf[128];
- snprintf(buf, sizeof(buf), "%s_%02d.png", prefix, ++a);
- savePNGImage(row_pointers, width, height, 8, PNG_COLOR_TYPE_RGBA, buf);
- }
-
- for (y = 0; y < height; y++) {
- free(row_pointers[y]);
- }
- free(row_pointers);
-}
-#endif
-
/* ************ Planar tracker ************ */
/* TrackRegion */
@@ -319,13 +156,13 @@ int libmv_trackRegion(const libmv_TrackRegionOptions *options,
yy2[i] = y2[i];
}
- libmv::TrackRegionOptions track_region_options;
- libmv::FloatImage image1_mask;
+ TrackRegionOptions track_region_options;
+ FloatImage image1_mask;
switch (options->motion_model) {
#define LIBMV_CONVERT(the_model) \
- case libmv::TrackRegionOptions::the_model: \
- track_region_options.mode = libmv::TrackRegionOptions::the_model; \
+ case TrackRegionOptions::the_model: \
+ track_region_options.mode = TrackRegionOptions::the_model; \
break;
LIBMV_CONVERT(TRANSLATION)
LIBMV_CONVERT(TRANSLATION_ROTATION)
@@ -355,18 +192,28 @@ int libmv_trackRegion(const libmv_TrackRegionOptions *options,
track_region_options.use_normalized_intensities = options->use_normalization;
if (options->image1_mask) {
- floatBufToImage(options->image1_mask, image1_width, image1_height, 1, &image1_mask);
+ libmv_floatBufferToImage(options->image1_mask,
+ image1_width, image1_height, 1,
+ &image1_mask);
track_region_options.image1_mask = &image1_mask;
}
/* Convert from raw float buffers to libmv's FloatImage. */
- libmv::FloatImage old_patch, new_patch;
- floatBufToImage(image1, image1_width, image1_height, 1, &old_patch);
- floatBufToImage(image2, image2_width, image2_height, 1, &new_patch);
-
- libmv::TrackRegionResult track_region_result;
- libmv::TrackRegion(old_patch, new_patch, xx1, yy1, track_region_options, xx2, yy2, &track_region_result);
+ FloatImage old_patch, new_patch;
+ libmv_floatBufferToImage(image1,
+ image1_width, image1_height, 1,
+ &old_patch);
+ libmv_floatBufferToImage(image2,
+ image2_width, image2_height, 1,
+ &new_patch);
+
+ TrackRegionResult track_region_result;
+ TrackRegion(old_patch, new_patch,
+ xx1, yy1,
+ track_region_options,
+ xx2, yy2,
+ &track_region_result);
/* Convert to floats for the blender api. */
for (int i = 0; i < 5; ++i) {
@@ -375,23 +222,29 @@ int libmv_trackRegion(const libmv_TrackRegionOptions *options,
}
/* TODO(keir): Update the termination string with failure details. */
- if (track_region_result.termination == libmv::TrackRegionResult::CONVERGENCE ||
- track_region_result.termination == libmv::TrackRegionResult::NO_CONVERGENCE)
+ if (track_region_result.termination == TrackRegionResult::CONVERGENCE ||
+ track_region_result.termination == TrackRegionResult::NO_CONVERGENCE)
{
tracking_result = true;
}
+ /* Debug dump of patches. */
#if defined(DUMP_FAILURE) || defined(DUMP_ALWAYS)
-#if defined(DUMP_ALWAYS)
{
-#else
- if (!tracking_result) {
-#endif
- saveImage("old_patch", old_patch, x1[4], y1[4]);
- saveImage("new_patch", new_patch, x2[4], y2[4]);
+ bool need_dump = !tracking_result;
+
+# ifdef DUMP_ALWAYS
+ need_dump = true;
+# endif
- if (options->image1_mask)
- saveImage("mask", image1_mask, x2[4], y2[4]);
+ if (need_dump) {
+ libmv_saveImage(old_patch, "old_patch", x1[4], y1[4]);
+ libmv_saveImage(new_patch, "new_patch", x2[4], y2[4]);
+
+ if (options->image1_mask) {
+ libmv_saveImage(image1_mask, "mask", x2[4], y2[4]);
+ }
+ }
}
#endif
@@ -404,27 +257,29 @@ void libmv_samplePlanarPatch(const float *image,
int num_samples_x, int num_samples_y,
const float *mask,
float *patch,
- double *warped_position_x, double *warped_position_y)
+ double *warped_position_x,
+ double *warped_position_y)
{
- libmv::FloatImage libmv_image, libmv_patch, libmv_mask;
- libmv::FloatImage *libmv_mask_for_sample = NULL;
+ FloatImage libmv_image, libmv_patch, libmv_mask;
+ FloatImage *libmv_mask_for_sample = NULL;
- floatBufToImage(image, width, height, channels, &libmv_image);
+ libmv_floatBufferToImage(image, width, height, channels, &libmv_image);
if (mask) {
- floatBufToImage(mask, width, height, 1, &libmv_mask);
+ libmv_floatBufferToImage(mask, width, height, 1, &libmv_mask);
libmv_mask_for_sample = &libmv_mask;
}
- libmv::SamplePlanarPatch(libmv_image, xs, ys,
- num_samples_x, num_samples_y,
- libmv_mask_for_sample,
- &libmv_patch,
- warped_position_x,
- warped_position_y);
+ SamplePlanarPatch(libmv_image,
+ xs, ys,
+ num_samples_x, num_samples_y,
+ libmv_mask_for_sample,
+ &libmv_patch,
+ warped_position_x,
+ warped_position_y);
- imageToFloatBuf(&libmv_patch, channels, patch);
+ libmv_imageToFloatBuffer(libmv_patch, patch);
}
void libmv_samplePlanarPatchByte(const unsigned char *image,
@@ -438,10 +293,10 @@ void libmv_samplePlanarPatch(const float *image,
libmv::FloatImage libmv_image, libmv_patch, libmv_mask;
libmv::FloatImage *libmv_mask_for_sample = NULL;
- byteBufToImage(image, width, height, channels, &libmv_image);
+ libmv_byteBufferToImage(image, width, height, channels, &libmv_image);
if (mask) {
- floatBufToImage(mask, width, height, 1, &libmv_mask);
+ libmv_floatBufferToImage(mask, width, height, 1, &libmv_mask);
libmv_mask_for_sample = &libmv_mask;
}
@@ -453,34 +308,40 @@ void libmv_samplePlanarPatch(const float *image,
warped_position_x,
warped_position_y);
- imageToByteBuf(&libmv_patch, channels, patch);
+ libmv_imageToByteBuffer(libmv_patch, patch);
}
/* ************ Tracks ************ */
-struct libmv_Tracks *libmv_tracksNew(void)
+libmv_Tracks *libmv_tracksNew(void)
{
- libmv::Tracks *libmv_tracks = LIBMV_OBJECT_NEW(libmv::Tracks);
+ Tracks *libmv_tracks = LIBMV_OBJECT_NEW(Tracks);
- return (struct libmv_Tracks *)libmv_tracks;
+ return (libmv_Tracks *) libmv_tracks;
}
-void libmv_tracksDestroy(struct libmv_Tracks *libmv_tracks)
+void libmv_tracksDestroy(libmv_Tracks *libmv_tracks)
{
- using libmv::Tracks;
LIBMV_OBJECT_DELETE(libmv_tracks, Tracks);
}
-void libmv_tracksInsert(struct libmv_Tracks *libmv_tracks, int image, int track, double x, double y, double weight)
+void libmv_tracksInsert(libmv_Tracks *libmv_tracks,
+ int image, int track,
+ double x, double y,
+ double weight)
{
- ((libmv::Tracks*) libmv_tracks)->Insert(image, track, x, y, weight);
+ ((Tracks *) libmv_tracks)->Insert(image, track, x, y, weight);
}
/* ************ Reconstruction ************ */
-class ReconstructUpdateCallback : public libmv::ProgressUpdateCallback {
+namespace {
+
+class ReconstructUpdateCallback : public ProgressUpdateCallback {
public:
- ReconstructUpdateCallback(reconstruct_progress_update_cb progress_update_callback, void *callback_customdata)
+ ReconstructUpdateCallback(
+ reconstruct_progress_update_cb progress_update_callback,
+ void *callback_customdata)
{
progress_update_callback_ = progress_update_callback;
callback_customdata_ = callback_customdata;
@@ -497,13 +358,14 @@ protected:
void *callback_customdata_;
};
-static void libmv_solveRefineIntrinsics(const libmv::Tracks &tracks,
- const int refine_intrinsics,
- const int bundle_constraints,
- reconstruct_progress_update_cb progress_update_callback,
- void *callback_customdata,
- libmv::EuclideanReconstruction *reconstruction,
- libmv::CameraIntrinsics *intrinsics)
+void libmv_solveRefineIntrinsics(
+ const Tracks &tracks,
+ const int refine_intrinsics,
+ const int bundle_constraints,
+ reconstruct_progress_update_cb progress_update_callback,
+ void *callback_customdata,
+ EuclideanReconstruction *reconstruction,
+ CameraIntrinsics *intrinsics)
{
/* only a few combinations are supported but trust the caller */
int bundle_intrinsics = 0;
@@ -523,61 +385,37 @@ static void libmv_solveRefineIntrinsics(const libmv::Tracks &tracks,
progress_update_callback(callback_customdata, 1.0, "Refining solution");
- libmv::EuclideanBundleCommonIntrinsics(tracks,
- bundle_intrinsics,
- bundle_constraints,
- reconstruction,
- intrinsics);
+ EuclideanBundleCommonIntrinsics(tracks,
+ bundle_intrinsics,
+ bundle_constraints,
+ reconstruction,
+ intrinsics);
}
-static void cameraIntrinsicsFromOptions(const libmv_CameraIntrinsicsOptions *camera_intrinsics_options,
- libmv::CameraIntrinsics *camera_intrinsics)
+void finishReconstruction(
+ const Tracks &tracks,
+ const CameraIntrinsics &camera_intrinsics,
+ libmv_Reconstruction *libmv_reconstruction,
+ reconstruct_progress_update_cb progress_update_callback,
+ void *callback_customdata)
{
- camera_intrinsics->SetFocalLength(camera_intrinsics_options->focal_length,
- camera_intrinsics_options->focal_length);
-
- camera_intrinsics->SetPrincipalPoint(camera_intrinsics_options->principal_point_x,
- camera_intrinsics_options->principal_point_y);
-
- camera_intrinsics->SetRadialDistortion(camera_intrinsics_options->k1,
- camera_intrinsics_options->k2,
- camera_intrinsics_options->k3);
-
- camera_intrinsics->SetImageSize(camera_intrinsics_options->image_width,
- camera_intrinsics_options->image_height);
-}
-
-static libmv::Tracks getNormalizedTracks(const libmv::Tracks &tracks, const libmv::CameraIntrinsics &camera_intrinsics)
-{
- libmv::vector<libmv::Marker> markers = tracks.AllMarkers();
-
- for (int i = 0; i < markers.size(); ++i) {
- camera_intrinsics.InvertIntrinsics(markers[i].x, markers[i].y,
- &(markers[i].x), &(markers[i].y));
- }
-
- return libmv::Tracks(markers);
-}
-
-static void finishReconstruction(const libmv::Tracks &tracks, const libmv::CameraIntrinsics &camera_intrinsics,
- struct libmv_Reconstruction *libmv_reconstruction,
- reconstruct_progress_update_cb progress_update_callback,
- void *callback_customdata)
-{
- libmv::EuclideanReconstruction &reconstruction = libmv_reconstruction->reconstruction;
+ EuclideanReconstruction &reconstruction =
+ libmv_reconstruction->reconstruction;
/* reprojection error calculation */
progress_update_callback(callback_customdata, 1.0, "Finishing solution");
libmv_reconstruction->tracks = tracks;
- libmv_reconstruction->error = libmv::EuclideanReprojectionError(tracks, reconstruction, camera_intrinsics);
+ libmv_reconstruction->error = EuclideanReprojectionError(tracks,
+ reconstruction,
+ camera_intrinsics);
}
-static bool selectTwoKeyframesBasedOnGRICAndVariance(
- libmv::Tracks &tracks,
- libmv::Tracks &normalized_tracks,
- libmv::CameraIntrinsics &camera_intrinsics,
- int &keyframe1,
- int &keyframe2)
+bool selectTwoKeyframesBasedOnGRICAndVariance(
+ Tracks &tracks,
+ Tracks &normalized_tracks,
+ CameraIntrinsics &camera_intrinsics,
+ int &keyframe1,
+ int &keyframe2)
{
libmv::vector<int> keyframes;
@@ -607,25 +445,25 @@ static bool selectTwoKeyframesBasedOnGRICAndVariance(
int previous_keyframe = keyframes[0];
double best_error = std::numeric_limits<double>::max();
for (int i = 1; i < keyframes.size(); i++) {
- libmv::EuclideanReconstruction reconstruction;
+ EuclideanReconstruction reconstruction;
int current_keyframe = keyframes[i];
- libmv::vector<libmv::Marker> keyframe_markers =
+ libmv::vector<Marker> keyframe_markers =
normalized_tracks.MarkersForTracksInBothImages(previous_keyframe,
current_keyframe);
- libmv::Tracks keyframe_tracks(keyframe_markers);
+ Tracks keyframe_tracks(keyframe_markers);
/* get a solution from two keyframes only */
- libmv::EuclideanReconstructTwoFrames(keyframe_markers, &reconstruction);
- libmv::EuclideanBundle(keyframe_tracks, &reconstruction);
- libmv::EuclideanCompleteReconstruction(keyframe_tracks,
- &reconstruction, NULL);
+ EuclideanReconstructTwoFrames(keyframe_markers, &reconstruction);
+ EuclideanBundle(keyframe_tracks, &reconstruction);
+ EuclideanCompleteReconstruction(keyframe_tracks,
+ &reconstruction,
+ NULL);
- double current_error =
- libmv::EuclideanReprojectionError(tracks,
- reconstruction,
- camera_intrinsics);
+ double current_error = EuclideanReprojectionError(tracks,
+ reconstruction,
+ camera_intrinsics);
LG << "Error between " << previous_keyframe
<< " and " << current_keyframe
@@ -643,26 +481,35 @@ static bool selectTwoKeyframesBasedOnGRICAndVariance(
return true;
}
-struct libmv_Reconstruction *libmv_solveReconstruction(const struct libmv_Tracks *libmv_tracks,
- const libmv_CameraIntrinsicsOptions *libmv_camera_intrinsics_options,
- libmv_ReconstructionOptions *libmv_reconstruction_options,
- reconstruct_progress_update_cb progress_update_callback,
- void *callback_customdata)
+} // namespace
+
+libmv_Reconstruction *libmv_solveReconstruction(
+ const libmv_Tracks *libmv_tracks,
+ const libmv_CameraIntrinsicsOptions *libmv_camera_intrinsics_options,
+ libmv_ReconstructionOptions *libmv_reconstruction_options,
+ reconstruct_progress_update_cb progress_update_callback,
+ void *callback_customdata)
{
- struct libmv_Reconstruction *libmv_reconstruction = LIBMV_OBJECT_NEW(libmv_Reconstruction);
+ libmv_Reconstruction *libmv_reconstruction =
+ LIBMV_OBJECT_NEW(libmv_Reconstruction);
- libmv::Tracks &tracks = *((libmv::Tracks *) libmv_tracks);
- libmv::EuclideanReconstruction &reconstruction = libmv_reconstruction->reconstruction;
- libmv::CameraIntrinsics &camera_intrinsics = libmv_reconstruction->intrinsics;
+ Tracks &tracks = *((Tracks *) libmv_tracks);
+ EuclideanReconstruction &reconstruction =
+ libmv_reconstruction->reconstruction;
ReconstructUpdateCallback update_callback =
- ReconstructUpdateCallback(progress_update_callback, callback_customdata);
+ ReconstructUpdateCallback(progress_update_callback,
+ callback_customdata);
/* Retrieve reconstruction options from C-API to libmv API */
- cameraIntrinsicsFromOptions(libmv_camera_intrinsics_options, &camera_intrinsics);
+ CameraIntrinsics *camera_intrinsics;
+ camera_intrinsics = libmv_reconstruction->intrinsics =
+ libmv_cameraIntrinsicsCreateFromOptions(
+ libmv_camera_intrinsics_options);
/* Invert the camera intrinsics */
- libmv::Tracks normalized_tracks = getNormalizedTracks(tracks, camera_intrinsics);
+ Tracks normalized_tracks;
+ libmv_getNormalizedTracks(tracks, *camera_intrinsics, &normalized_tracks);
/* keyframe selection */
int keyframe1 = libmv_reconstruction_options->keyframe1,
@@ -675,7 +522,7 @@ struct libmv_Reconstruction *libmv_solveReconstruction(const struct libmv_Tracks
selectTwoKeyframesBasedOnGRICAndVariance(tracks,
normalized_tracks,
- camera_intrinsics,
+ *camera_intrinsics,
keyframe1,
keyframe2);
@@ -687,95 +534,118 @@ struct libmv_Reconstruction *libmv_solveReconstruction(const struct libmv_Tracks
/* actual reconstruction */
LG << "frames to init from: " << keyframe1 << " " << keyframe2;
- libmv::vector<libmv::Marker> keyframe_markers =
+ libmv::vector<Marker> keyframe_markers =
normalized_tracks.MarkersForTracksInBothImages(keyframe1, keyframe2);
LG << "number of markers for init: " << keyframe_markers.size();
update_callback.invoke(0, "Initial reconstruction");
- libmv::EuclideanReconstructTwoFrames(keyframe_markers, &reconstruction);
- libmv::EuclideanBundle(normalized_tracks, &reconstruction);
- libmv::EuclideanCompleteReconstruction(normalized_tracks,
- &reconstruction, &update_callback);
+ EuclideanReconstructTwoFrames(keyframe_markers, &reconstruction);
+ EuclideanBundle(normalized_tracks, &reconstruction);
+ EuclideanCompleteReconstruction(normalized_tracks,
+ &reconstruction,
+ &update_callback);
/* refinement */
if (libmv_reconstruction_options->refine_intrinsics) {
- libmv_solveRefineIntrinsics(tracks,
- libmv_reconstruction_options->refine_intrinsics,
- libmv::BUNDLE_NO_CONSTRAINTS,
- progress_update_callback,
- callback_customdata,
- &reconstruction,
- &camera_intrinsics);
+ libmv_solveRefineIntrinsics(
+ tracks,
+ libmv_reconstruction_options->refine_intrinsics,
+ libmv::BUNDLE_NO_CONSTRAINTS,
+ progress_update_callback,
+ callback_customdata,
+ &reconstruction,
+ camera_intrinsics);
}
/* set reconstruction scale to unity */
- libmv::EuclideanScaleToUnity(&reconstruction);
+ EuclideanScaleToUnity(&reconstruction);
/* finish reconstruction */
- finishReconstruction(tracks, camera_intrinsics, libmv_reconstruction,
- progress_update_callback, callback_customdata);
+ finishReconstruction(tracks,
+ *camera_intrinsics,
+ libmv_reconstruction,
+ progress_update_callback,
+ callback_customdata);
- return (struct libmv_Reconstruction *)libmv_reconstruction;
+ return (libmv_Reconstruction *) libmv_reconstruction;
}
-struct libmv_Reconstruction *libmv_solveModal(const struct libmv_Tracks *libmv_tracks,
- const libmv_CameraIntrinsicsOptions *libmv_camera_intrinsics_options,
- const libmv_ReconstructionOptions *libmv_reconstruction_options,
- reconstruct_progress_update_cb progress_update_callback,
- void *callback_customdata)
+libmv_Reconstruction *libmv_solveModal(
+ const libmv_Tracks *libmv_tracks,
+ const libmv_CameraIntrinsicsOptions *libmv_camera_intrinsics_options,
+ const libmv_ReconstructionOptions *libmv_reconstruction_options,
+ reconstruct_progress_update_cb progress_update_callback,
+ void *callback_customdata)
{
- struct libmv_Reconstruction *libmv_reconstruction = LIBMV_OBJECT_NEW(libmv_Reconstruction);
+ libmv_Reconstruction *libmv_reconstruction =
+ LIBMV_OBJECT_NEW(libmv_Reconstruction);
- libmv::Tracks &tracks = *((libmv::Tracks *) libmv_tracks);
- libmv::EuclideanReconstruction &reconstruction = libmv_reconstruction->reconstruction;
- libmv::CameraIntrinsics &camera_intrinsics = libmv_reconstruction->intrinsics;
+ Tracks &tracks = *((Tracks *) libmv_tracks);
+ EuclideanReconstruction &reconstruction =
+ libmv_reconstruction->reconstruction;
ReconstructUpdateCallback update_callback =
- ReconstructUpdateCallback(progress_update_callback, callback_customdata);
+ ReconstructUpdateCallback(progress_update_callback,
+ callback_customdata);
- cameraIntrinsicsFromOptions(libmv_camera_intrinsics_options, &camera_intrinsics);
+ /* Retrieve reconstruction options from C-API to libmv API */
+ CameraIntrinsics *camera_intrinsics;
+ camera_intrinsics = libmv_reconstruction->intrinsics =
+ libmv_cameraIntrinsicsCreateFromOptions(
+ libmv_camera_intrinsics_options);
/* Invert the camera intrinsics. */
- libmv::Tracks normalized_tracks = getNormalizedTracks(tracks, camera_intrinsics);
+ Tracks normalized_tracks;
+ libmv_getNormalizedTracks(tracks, *camera_intrinsics, &normalized_tracks);
/* Actual reconstruction. */
- libmv::ModalSolver(normalized_tracks, &reconstruction, &update_callback);
+ ModalSolver(normalized_tracks, &reconstruction, &update_callback);
- libmv::CameraIntrinsics empty_intrinsics;
- libmv::EuclideanBundleCommonIntrinsics(normalized_tracks,
- libmv::BUNDLE_NO_INTRINSICS,
- libmv::BUNDLE_NO_TRANSLATION,
- &reconstruction,
- &empty_intrinsics);
+ PolynomialCameraIntrinsics empty_intrinsics;
+ EuclideanBundleCommonIntrinsics(normalized_tracks,
+ libmv::BUNDLE_NO_INTRINSICS,
+ libmv::BUNDLE_NO_TRANSLATION,
+ &reconstruction,
+ &empty_intrinsics);
/* Refinement. */
if (libmv_reconstruction_options->refine_intrinsics) {
- libmv_solveRefineIntrinsics(tracks,
- libmv_reconstruction_options->refine_intrinsics,
- libmv::BUNDLE_NO_TRANSLATION,
- progress_update_callback, callback_customdata,
- &reconstruction,
- &camera_intrinsics);
+ libmv_solveRefineIntrinsics(
+ tracks,
+ libmv_reconstruction_options->refine_intrinsics,
+ libmv::BUNDLE_NO_TRANSLATION,
+ progress_update_callback, callback_customdata,
+ &reconstruction,
+ camera_intrinsics);
}
/* Finish reconstruction. */
- finishReconstruction(tracks, camera_intrinsics, libmv_reconstruction,
- progress_update_callback, callback_customdata);
+ finishReconstruction(tracks,
+ *camera_intrinsics,
+ libmv_reconstruction,
+ progress_update_callback,
+ callback_customdata);
- return (struct libmv_Reconstruction *)libmv_reconstruction;
+ return (libmv_Reconstruction *) libmv_reconstruction;
}
-void libmv_reconstructionDestroy(struct libmv_Reconstruction *libmv_reconstruction)
+void libmv_reconstructionDestroy(libmv_Reconstruction *libmv_reconstruction)
{
+ LIBMV_OBJECT_DELETE(libmv_reconstruction->intrinsics, CameraIntrinsics);
LIBMV_OBJECT_DELETE(libmv_reconstruction, libmv_Reconstruction);
}
-int libmv_reprojectionPointForTrack(const struct libmv_Reconstruction *libmv_reconstruction, int track, double pos[3])
+int libmv_reprojectionPointForTrack(
+ const libmv_Reconstruction *libmv_reconstruction,
+ int track,
+ double pos[3])
{
- const libmv::EuclideanReconstruction *reconstruction = &libmv_reconstruction->reconstruction;
- const libmv::EuclideanPoint *point = reconstruction->PointForTrack(track);
+ const EuclideanReconstruction *reconstruction =
+ &libmv_reconstruction->reconstruction;
+ const EuclideanPoint *point =
+ reconstruction->PointForTrack(track);
if (point) {
pos[0] = point->X[0];
@@ -788,35 +658,25 @@ int libmv_reprojectionPointForTrack(const struct libmv_Reconstruction *libmv_rec
return 0;
}
-static libmv::Marker ProjectMarker(const libmv::EuclideanPoint &point,
- const libmv::EuclideanCamera &camera,
- const libmv::CameraIntrinsics &intrinsics)
-{
- libmv::Vec3 projected = camera.R * point.X + camera.t;
- projected /= projected(2);
-
- libmv::Marker reprojected_marker;
- intrinsics.ApplyIntrinsics(projected(0), projected(1), &reprojected_marker.x, &reprojected_marker.y);
-
- reprojected_marker.image = camera.image;
- reprojected_marker.track = point.track;
-
- return reprojected_marker;
-}
-
-double libmv_reprojectionErrorForTrack(const struct libmv_Reconstruction *libmv_reconstruction, int track)
+double libmv_reprojectionErrorForTrack(
+ const libmv_Reconstruction *libmv_reconstruction,
+ int track)
{
- const libmv::EuclideanReconstruction *reconstruction = &libmv_reconstruction->reconstruction;
- const libmv::CameraIntrinsics *intrinsics = &libmv_reconstruction->intrinsics;
- libmv::vector<libmv::Marker> markers = libmv_reconstruction->tracks.MarkersForTrack(track);
+ const EuclideanReconstruction *reconstruction =
+ &libmv_reconstruction->reconstruction;
+ const CameraIntrinsics *intrinsics = libmv_reconstruction->intrinsics;
+ libmv::vector<Marker> markers =
+ libmv_reconstruction->tracks.MarkersForTrack(track);
int num_reprojected = 0;
double total_error = 0.0;
for (int i = 0; i < markers.size(); ++i) {
double weight = markers[i].weight;
- const libmv::EuclideanCamera *camera = reconstruction->CameraForImage(markers[i].image);
- const libmv::EuclideanPoint *point = reconstruction->PointForTrack(markers[i].track);
+ const EuclideanCamera *camera =
+ reconstruction->CameraForImage(markers[i].image);
+ const EuclideanPoint *point =
+ reconstruction->PointForTrack(markers[i].track);
if (!camera || !point || weight == 0.0) {
continue;
@@ -824,7 +684,8 @@ double libmv_reprojectionErrorForTrack(const struct libmv_Reconstruction *libmv_
num_reprojected++;
- libmv::Marker reprojected_marker = ProjectMarker(*point, *camera, *intrinsics);
+ Marker reprojected_marker =
+ libmv_projectMarker(*point, *camera, *intrinsics);
double ex = (reprojected_marker.x - markers[i].x) * weight;
double ey = (reprojected_marker.y - markers[i].y) * weight;
@@ -834,20 +695,26 @@ double libmv_reprojectionErrorForTrack(const struct libmv_Reconstruction *libmv_
return total_error / num_reprojected;
}
-double libmv_reprojectionErrorForImage(const struct libmv_Reconstruction *libmv_reconstruction, int image)
+double libmv_reprojectionErrorForImage(
+ const libmv_Reconstruction *libmv_reconstruction,
+ int image)
{
- const libmv::EuclideanReconstruction *reconstruction = &libmv_reconstruction->reconstruction;
- const libmv::CameraIntrinsics *intrinsics = &libmv_reconstruction->intrinsics;
- libmv::vector<libmv::Marker> markers = libmv_reconstruction->tracks.MarkersInImage(image);
- const libmv::EuclideanCamera *camera = reconstruction->CameraForImage(image);
+ const EuclideanReconstruction *reconstruction =
+ &libmv_reconstruction->reconstruction;
+ const CameraIntrinsics *intrinsics = libmv_reconstruction->intrinsics;
+ libmv::vector<Marker> markers =
+ libmv_reconstruction->tracks.MarkersInImage(image);
+ const EuclideanCamera *camera = reconstruction->CameraForImage(image);
int num_reprojected = 0;
double total_error = 0.0;
- if (!camera)
- return 0;
+ if (!camera) {
+ return 0.0;
+ }
for (int i = 0; i < markers.size(); ++i) {
- const libmv::EuclideanPoint *point = reconstruction->PointForTrack(markers[i].track);
+ const EuclideanPoint *point =
+ reconstruction->PointForTrack(markers[i].track);
if (!point) {
continue;
@@ -855,9 +722,10 @@ double libmv_reprojectionErrorForImage(const struct libmv_Reconstruction *libmv_
num_reprojected++;
- libmv::Marker reprojected_marker = ProjectMarker(*point, *camera, *intrinsics);
- double ex = reprojected_marker.x - markers[i].x;
- double ey = reprojected_marker.y - markers[i].y;
+ Marker reprojected_marker =
+ libmv_projectMarker(*point, *camera, *intrinsics);
+ double ex = (reprojected_marker.x - markers[i].x) * markers[i].weight;
+ double ey = (reprojected_marker.y - markers[i].y) * markers[i].weight;
total_error += sqrt(ex * ex + ey * ey);
}
@@ -865,11 +733,14 @@ double libmv_reprojectionErrorForImage(const struct libmv_Reconstruction *libmv_
return total_error / num_reprojected;
}
-int libmv_reprojectionCameraForImage(const struct libmv_Reconstruction *libmv_reconstruction,
- int image, double mat[4][4])
+int libmv_reprojectionCameraForImage(
+ const libmv_Reconstruction *libmv_reconstruction,
+ int image, double mat[4][4])
{
- const libmv::EuclideanReconstruction *reconstruction = &libmv_reconstruction->reconstruction;
- const libmv::EuclideanCamera *camera = reconstruction->CameraForImage(image);
+ const EuclideanReconstruction *reconstruction =
+ &libmv_reconstruction->reconstruction;
+ const EuclideanCamera *camera =
+ reconstruction->CameraForImage(image);
if (camera) {
for (int j = 0; j < 3; ++j) {
@@ -899,25 +770,27 @@ int libmv_reprojectionCameraForImage(const struct libmv_Reconstruction *libmv_re
return 0;
}
-double libmv_reprojectionError(const struct libmv_Reconstruction *libmv_reconstruction)
+double libmv_reprojectionError(
+ const libmv_Reconstruction *libmv_reconstruction)
{
return libmv_reconstruction->error;
}
-struct libmv_CameraIntrinsics *libmv_reconstructionExtractIntrinsics(struct libmv_Reconstruction *libmv_reconstruction)
+libmv_CameraIntrinsics *libmv_reconstructionExtractIntrinsics(
+ libmv_Reconstruction *libmv_reconstruction)
{
- return (struct libmv_CameraIntrinsics *)&libmv_reconstruction->intrinsics;
+ return (libmv_CameraIntrinsics *) libmv_reconstruction->intrinsics;
}
/* ************ Feature detector ************ */
static libmv_Features *libmv_featuresFromVector(
- const libmv::vector<libmv::Feature> &features)
+ const libmv::vector<Feature> &features)
{
- struct libmv_Features *libmv_features = LIBMV_STRUCT_NEW(libmv_Features, 1);
+ libmv_Features *libmv_features = LIBMV_STRUCT_NEW(libmv_Features, 1);
int count = features.size();
if (count) {
- libmv_features->features = LIBMV_STRUCT_NEW(libmv::Feature, count);
+ libmv_features->features = LIBMV_STRUCT_NEW(Feature, count);
for (int i = 0; i < count; i++) {
libmv_features->features[i] = features.at(i);
@@ -933,12 +806,12 @@ static libmv_Features *libmv_featuresFromVector(
}
static void libmv_convertDetectorOptions(libmv_DetectOptions *options,
- libmv::DetectOptions *detector_options)
+ DetectOptions *detector_options)
{
switch (options->detector) {
#define LIBMV_CONVERT(the_detector) \
case LIBMV_DETECTOR_ ## the_detector: \
- detector_options->type = libmv::DetectOptions::the_detector; \
+ detector_options->type = DetectOptions::the_detector; \
break;
LIBMV_CONVERT(FAST)
LIBMV_CONVERT(MORAVEC)
@@ -953,49 +826,52 @@ static void libmv_convertDetectorOptions(libmv_DetectOptions *options,
detector_options->harris_threshold = options->harris_threshold;
}
-struct libmv_Features *libmv_detectFeaturesByte(const unsigned char *image_buffer,
- int width, int height, int channels,
- libmv_DetectOptions *options)
+libmv_Features *libmv_detectFeaturesByte(
+ const unsigned char *image_buffer,
+ int width, int height,
+ int channels,
+ libmv_DetectOptions *options)
{
// Prepare the image.
- libmv::FloatImage image;
- byteBufToImage(image_buffer, width, height, channels, &image);
+ FloatImage image;
+ libmv_byteBufferToImage(image_buffer, width, height, channels, &image);
// Configure detector.
- libmv::DetectOptions detector_options;
+ DetectOptions detector_options;
libmv_convertDetectorOptions(options, &detector_options);
// Run the detector.
- libmv::vector<libmv::Feature> detected_features;
- libmv::Detect(image, detector_options, &detected_features);
+ libmv::vector<Feature> detected_features;
+ Detect(image, detector_options, &detected_features);
// Convert result to C-API.
libmv_Features *result = libmv_featuresFromVector(detected_features);
return result;
}
-struct libmv_Features *libmv_detectFeaturesFloat(const float *image_buffer,
- int width, int height, int channels,
- libmv_DetectOptions *options)
+libmv_Features *libmv_detectFeaturesFloat(const float *image_buffer,
+ int width, int height,
+ int channels,
+ libmv_DetectOptions *options)
{
// Prepare the image.
- libmv::FloatImage image;
- floatBufToImage(image_buffer, width, height, channels, &image);
+ FloatImage image;
+ libmv_floatBufferToImage(image_buffer, width, height, channels, &image);
// Configure detector.
- libmv::DetectOptions detector_options;
+ DetectOptions detector_options;
libmv_convertDetectorOptions(options, &detector_options);
// Run the detector.
- libmv::vector<libmv::Feature> detected_features;
- libmv::Detect(image, detector_options, &detected_features);
+ libmv::vector<Feature> detected_features;
+ Detect(image, detector_options, &detected_features);
// Convert result to C-API.
libmv_Features *result = libmv_featuresFromVector(detected_features);
return result;
}
-void libmv_featuresDestroy(struct libmv_Features *libmv_features)
+void libmv_featuresDestroy(libmv_Features *libmv_features)
{
if (libmv_features->features) {
LIBMV_STRUCT_DELETE(libmv_features->features);
@@ -1004,14 +880,16 @@ void libmv_featuresDestroy(struct libmv_Features *libmv_features)
LIBMV_STRUCT_DELETE(libmv_features);
}
-int libmv_countFeatures(const struct libmv_Features *libmv_features)
+int libmv_countFeatures(const libmv_Features *libmv_features)
{
return libmv_features->count;
}
-void libmv_getFeature(const struct libmv_Features *libmv_features, int number, double *x, double *y, double *score, double *size)
+void libmv_getFeature(const libmv_Features *libmv_features,
+ int number,
+ double *x, double *y, double *score, double *size)
{
- libmv::Feature feature = libmv_features->features[number];
+ Feature &feature = libmv_features->features[number];
*x = feature.x;
*y = feature.y;
@@ -1021,51 +899,67 @@ void libmv_getFeature(const struct libmv_Features *libmv_features, int number, d
/* ************ Camera intrinsics ************ */
-struct libmv_CameraIntrinsics *libmv_cameraIntrinsicsNewEmpty(void)
+libmv_CameraIntrinsics *libmv_cameraIntrinsicsNew(
+ const libmv_CameraIntrinsicsOptions *libmv_camera_intrinsics_options)
{
- libmv::CameraIntrinsics *camera_intrinsics = LIBMV_OBJECT_NEW(libmv::CameraIntrinsics);
+ CameraIntrinsics *camera_intrinsics =
+ libmv_cameraIntrinsicsCreateFromOptions(libmv_camera_intrinsics_options);
- return (struct libmv_CameraIntrinsics *) camera_intrinsics;
+ return (libmv_CameraIntrinsics *) camera_intrinsics;
}
-struct libmv_CameraIntrinsics *libmv_cameraIntrinsicsNew(const libmv_CameraIntrinsicsOptions *libmv_camera_intrinsics_options)
+libmv_CameraIntrinsics *libmv_cameraIntrinsicsCopy(
+ const libmv_CameraIntrinsics *libmvIntrinsics)
{
- libmv::CameraIntrinsics *camera_intrinsics = LIBMV_OBJECT_NEW(libmv::CameraIntrinsics);
-
- cameraIntrinsicsFromOptions(libmv_camera_intrinsics_options, camera_intrinsics);
-
- return (struct libmv_CameraIntrinsics *) camera_intrinsics;
-}
-
-struct libmv_CameraIntrinsics *libmv_cameraIntrinsicsCopy(const libmv_CameraIntrinsics *libmvIntrinsics)
-{
- libmv::CameraIntrinsics *orig_intrinsics = (libmv::CameraIntrinsics *) libmvIntrinsics;
- libmv::CameraIntrinsics *new_intrinsics = LIBMV_OBJECT_NEW(libmv::CameraIntrinsics, *orig_intrinsics);
+ const CameraIntrinsics *orig_intrinsics =
+ (const CameraIntrinsics *) libmvIntrinsics;
+
+ CameraIntrinsics *new_intrinsics = NULL;
+
+ switch (orig_intrinsics->GetDistortionModelType()) {
+ case libmv::DISTORTION_MODEL_POLYNOMIAL:
+ {
+ const PolynomialCameraIntrinsics *polynomial_intrinsics =
+ static_cast<const PolynomialCameraIntrinsics*>(orig_intrinsics);
+ new_intrinsics = LIBMV_OBJECT_NEW(PolynomialCameraIntrinsics,
+ *polynomial_intrinsics);
+ break;
+ }
+ case libmv::DISTORTION_MODEL_DIVISION:
+ {
+ const DivisionCameraIntrinsics *division_intrinsics =
+ static_cast<const DivisionCameraIntrinsics*>(orig_intrinsics);
+ new_intrinsics = LIBMV_OBJECT_NEW(DivisionCameraIntrinsics,
+ *division_intrinsics);
+ break;
+ }
+ default:
+ assert(!"Unknown distortion model");
+ }
- return (struct libmv_CameraIntrinsics *) new_intrinsics;
+ return (libmv_CameraIntrinsics *) new_intrinsics;
}
-void libmv_cameraIntrinsicsDestroy(struct libmv_CameraIntrinsics *libmvIntrinsics)
+void libmv_cameraIntrinsicsDestroy(libmv_CameraIntrinsics *libmvIntrinsics)
{
- using libmv::CameraIntrinsics;
LIBMV_OBJECT_DELETE(libmvIntrinsics, CameraIntrinsics);
}
-void libmv_cameraIntrinsicsUpdate(const libmv_CameraIntrinsicsOptions *libmv_camera_intrinsics_options,
- struct libmv_CameraIntrinsics *libmv_intrinsics)
+void libmv_cameraIntrinsicsUpdate(
+ const libmv_CameraIntrinsicsOptions *libmv_camera_intrinsics_options,
+ libmv_CameraIntrinsics *libmv_intrinsics)
{
- libmv::CameraIntrinsics *camera_intrinsics = (libmv::CameraIntrinsics *) libmv_intrinsics;
+ CameraIntrinsics *camera_intrinsics = (CameraIntrinsics *) libmv_intrinsics;
double focal_length = libmv_camera_intrinsics_options->focal_length;
double principal_x = libmv_camera_intrinsics_options->principal_point_x;
double principal_y = libmv_camera_intrinsics_options->principal_point_y;
- double k1 = libmv_camera_intrinsics_options->k1;
- double k2 = libmv_camera_intrinsics_options->k2;
- double k3 = libmv_camera_intrinsics_options->k3;
int image_width = libmv_camera_intrinsics_options->image_width;
int image_height = libmv_camera_intrinsics_options->image_height;
- /* try avoid unnecessary updates so pre-computed distortion grids are not freed */
+ /* Try avoid unnecessary updates,
+ * so pre-computed distortion grids are not freed.
+ */
if (camera_intrinsics->focal_length() != focal_length)
camera_intrinsics->SetFocalLength(focal_length, focal_length);
@@ -1076,105 +970,192 @@ void libmv_cameraIntrinsicsUpdate(const libmv_CameraIntrinsicsOptions *libmv_cam
camera_intrinsics->SetPrincipalPoint(principal_x, principal_y);
}
- if (camera_intrinsics->k1() != k1 ||
- camera_intrinsics->k2() != k2 ||
- camera_intrinsics->k3() != k3)
- {
- camera_intrinsics->SetRadialDistortion(k1, k2, k3);
- }
-
if (camera_intrinsics->image_width() != image_width ||
camera_intrinsics->image_height() != image_height)
{
camera_intrinsics->SetImageSize(image_width, image_height);
}
+
+ switch (libmv_camera_intrinsics_options->distortion_model) {
+ case LIBMV_DISTORTION_MODEL_POLYNOMIAL:
+ {
+ assert(camera_intrinsics->GetDistortionModelType() ==
+ libmv::DISTORTION_MODEL_POLYNOMIAL);
+
+ PolynomialCameraIntrinsics *polynomial_intrinsics =
+ (PolynomialCameraIntrinsics *) camera_intrinsics;
+
+ double k1 = libmv_camera_intrinsics_options->polynomial_k1;
+ double k2 = libmv_camera_intrinsics_options->polynomial_k2;
+ double k3 = libmv_camera_intrinsics_options->polynomial_k3;
+
+ if (polynomial_intrinsics->k1() != k1 ||
+ polynomial_intrinsics->k2() != k2 ||
+ polynomial_intrinsics->k3() != k3)
+ {
+ polynomial_intrinsics->SetRadialDistortion(k1, k2, k3);
+ }
+
+ break;
+ }
+
+ case LIBMV_DISTORTION_MODEL_DIVISION:
+ {
+ assert(camera_intrinsics->GetDistortionModelType() ==
+ libmv::DISTORTION_MODEL_DIVISION);
+
+ DivisionCameraIntrinsics *division_intrinsics =
+ (DivisionCameraIntrinsics *) camera_intrinsics;
+
+ double k1 = libmv_camera_intrinsics_options->division_k1;
+ double k2 = libmv_camera_intrinsics_options->division_k2;
+
+ if (division_intrinsics->k1() != k1 ||
+ division_intrinsics->k2() != k2)
+ {
+ division_intrinsics->SetDistortion(k1, k2);
+ }
+
+ break;
+ }
+
+ default:
+ assert(!"Unknown distortion model");
+ }
}
-void libmv_cameraIntrinsicsSetThreads(struct libmv_CameraIntrinsics *libmv_intrinsics, int threads)
+void libmv_cameraIntrinsicsSetThreads(
+ libmv_CameraIntrinsics *libmv_intrinsics, int threads)
{
- libmv::CameraIntrinsics *camera_intrinsics = (libmv::CameraIntrinsics *) libmv_intrinsics;
+ CameraIntrinsics *camera_intrinsics = (CameraIntrinsics *) libmv_intrinsics;
camera_intrinsics->SetThreads(threads);
}
-void libmv_cameraIntrinsicsExtract(const struct libmv_CameraIntrinsics *libmv_intrinsics, double *focal_length,
- double *principal_x, double *principal_y, double *k1, double *k2, double *k3,
- int *width, int *height)
+void libmv_cameraIntrinsicsExtractOptions(
+ const libmv_CameraIntrinsics *libmv_intrinsics,
+ libmv_CameraIntrinsicsOptions *camera_intrinsics_options)
{
- libmv::CameraIntrinsics *camera_intrinsics = (libmv::CameraIntrinsics *) libmv_intrinsics;
-
- *focal_length = camera_intrinsics->focal_length();
- *principal_x = camera_intrinsics->principal_point_x();
- *principal_y = camera_intrinsics->principal_point_y();
- *k1 = camera_intrinsics->k1();
- *k2 = camera_intrinsics->k2();
- *k3 = camera_intrinsics->k3();
-}
+ const CameraIntrinsics *camera_intrinsics =
+ (const CameraIntrinsics *) libmv_intrinsics;
+
+ // Fill in options which are common for all distortion models.
+ camera_intrinsics_options->focal_length = camera_intrinsics->focal_length();
+ camera_intrinsics_options->principal_point_x =
+ camera_intrinsics->principal_point_x();
+ camera_intrinsics_options->principal_point_y =
+ camera_intrinsics->principal_point_y();
+
+ camera_intrinsics_options->image_width = camera_intrinsics->image_width();
+ camera_intrinsics_options->image_height = camera_intrinsics->image_height();
+
+ switch (camera_intrinsics->GetDistortionModelType()) {
+ case libmv::DISTORTION_MODEL_POLYNOMIAL:
+ {
+ const PolynomialCameraIntrinsics *polynomial_intrinsics =
+ static_cast<const PolynomialCameraIntrinsics *>(camera_intrinsics);
+ camera_intrinsics_options->distortion_model = LIBMV_DISTORTION_MODEL_POLYNOMIAL;
+ camera_intrinsics_options->polynomial_k1 = polynomial_intrinsics->k1();
+ camera_intrinsics_options->polynomial_k2 = polynomial_intrinsics->k2();
+ camera_intrinsics_options->polynomial_k3 = polynomial_intrinsics->k3();
+ camera_intrinsics_options->polynomial_p1 = polynomial_intrinsics->p1();
+ camera_intrinsics_options->polynomial_p1 = polynomial_intrinsics->p2();
+ break;
+ }
-void libmv_cameraIntrinsicsUndistortByte(const struct libmv_CameraIntrinsics *libmv_intrinsics,
- unsigned char *src, unsigned char *dst, int width, int height,
- float overscan, int channels)
-{
- libmv::CameraIntrinsics *camera_intrinsics = (libmv::CameraIntrinsics *) libmv_intrinsics;
+ case libmv::DISTORTION_MODEL_DIVISION:
+ {
+ const DivisionCameraIntrinsics *division_intrinsics =
+ static_cast<const DivisionCameraIntrinsics *>(camera_intrinsics);
+ camera_intrinsics_options->distortion_model = LIBMV_DISTORTION_MODEL_DIVISION;
+ camera_intrinsics_options->division_k1 = division_intrinsics->k1();
+ camera_intrinsics_options->division_k2 = division_intrinsics->k2();
+ break;
+ }
- camera_intrinsics->Undistort(src, dst, width, height, overscan, channels);
+ default:
+ assert(!"Uknown distortion model");
+ }
}
-void libmv_cameraIntrinsicsUndistortFloat(const struct libmv_CameraIntrinsics *libmvIntrinsics,
- float *src, float *dst, int width, int height,
- float overscan, int channels)
+void libmv_cameraIntrinsicsUndistortByte(
+ const libmv_CameraIntrinsics *libmv_intrinsics,
+ unsigned char *src, unsigned char *dst, int width, int height,
+ float overscan, int channels)
{
- libmv::CameraIntrinsics *intrinsics = (libmv::CameraIntrinsics *) libmvIntrinsics;
-
- intrinsics->Undistort(src, dst, width, height, overscan, channels);
+ CameraIntrinsics *camera_intrinsics = (CameraIntrinsics *) libmv_intrinsics;
+ camera_intrinsics->UndistortBuffer(src,
+ width, height, overscan, channels,
+ dst);
}
-void libmv_cameraIntrinsicsDistortByte(const struct libmv_CameraIntrinsics *libmvIntrinsics,
- unsigned char *src, unsigned char *dst, int width, int height,
- float overscan, int channels)
+void libmv_cameraIntrinsicsUndistortFloat(
+ const libmv_CameraIntrinsics *libmvIntrinsics,
+ float *src, float *dst, int width, int height,
+ float overscan, int channels)
{
- libmv::CameraIntrinsics *intrinsics = (libmv::CameraIntrinsics *) libmvIntrinsics;
- intrinsics->Distort(src, dst, width, height, overscan, channels);
+ CameraIntrinsics *intrinsics = (CameraIntrinsics *) libmvIntrinsics;
+ intrinsics->UndistortBuffer(src,
+ width, height, overscan, channels,
+ dst);
}
-void libmv_cameraIntrinsicsDistortFloat(const struct libmv_CameraIntrinsics *libmvIntrinsics,
- float *src, float *dst, int width, int height,
- float overscan, int channels)
+void libmv_cameraIntrinsicsDistortByte(
+ const libmv_CameraIntrinsics *libmvIntrinsics,
+ unsigned char *src, unsigned char *dst, int width, int height,
+ float overscan, int channels)
{
- libmv::CameraIntrinsics *intrinsics = (libmv::CameraIntrinsics *) libmvIntrinsics;
-
- intrinsics->Distort(src, dst, width, height, overscan, channels);
+ CameraIntrinsics *intrinsics = (CameraIntrinsics *) libmvIntrinsics;
+ intrinsics->DistortBuffer(src,
+ width, height, overscan, channels,
+ dst);
}
-void libmv_cameraIntrinsicsApply(const libmv_CameraIntrinsicsOptions *libmv_camera_intrinsics_options,
- double x, double y, double *x1, double *y1)
+void libmv_cameraIntrinsicsDistortFloat(
+ const libmv_CameraIntrinsics *libmvIntrinsics,
+ float *src, float *dst, int width, int height,
+ float overscan, int channels)
{
- libmv::CameraIntrinsics camera_intrinsics;
-
- cameraIntrinsicsFromOptions(libmv_camera_intrinsics_options, &camera_intrinsics);
+ CameraIntrinsics *intrinsics = (CameraIntrinsics *) libmvIntrinsics;
+ intrinsics->DistortBuffer(src,
+ width, height, overscan, channels,
+ dst);
+}
+void libmv_cameraIntrinsicsApply(
+ const libmv_CameraIntrinsicsOptions *libmv_camera_intrinsics_options,
+ double x, double y, double *x1, double *y1)
+{
+ /* do a lens undistortion if focal length is non-zero only */
if (libmv_camera_intrinsics_options->focal_length) {
- /* do a lens undistortion if focal length is non-zero only */
+ CameraIntrinsics *camera_intrinsics =
+ libmv_cameraIntrinsicsCreateFromOptions(libmv_camera_intrinsics_options);
+
+ camera_intrinsics->ApplyIntrinsics(x, y, x1, y1);
- camera_intrinsics.ApplyIntrinsics(x, y, x1, y1);
+ LIBMV_OBJECT_DELETE(camera_intrinsics, CameraIntrinsics);
}
}
-void libmv_cameraIntrinsicsInvert(const libmv_CameraIntrinsicsOptions *libmv_camera_intrinsics_options,
- double x, double y, double *x1, double *y1)
+void libmv_cameraIntrinsicsInvert(
+ const libmv_CameraIntrinsicsOptions *libmv_camera_intrinsics_options,
+ double x, double y, double *x1, double *y1)
{
- libmv::CameraIntrinsics camera_intrinsics;
-
- cameraIntrinsicsFromOptions(libmv_camera_intrinsics_options, &camera_intrinsics);
-
+ /* do a lens distortion if focal length is non-zero only */
if (libmv_camera_intrinsics_options->focal_length) {
- /* do a lens distortion if focal length is non-zero only */
+ CameraIntrinsics *camera_intrinsics =
+ libmv_cameraIntrinsicsCreateFromOptions(libmv_camera_intrinsics_options);
+
+ camera_intrinsics->InvertIntrinsics(x, y, x1, y1);
- camera_intrinsics.InvertIntrinsics(x, y, x1, y1);
+ LIBMV_OBJECT_DELETE(camera_intrinsics, CameraIntrinsics);
}
}
-void libmv_homography2DFromCorrespondencesEuc(double (*x1)[2], double (*x2)[2], int num_points, double H[3][3])
+void libmv_homography2DFromCorrespondencesEuc(double (*x1)[2],
+ double (*x2)[2],
+ int num_points,
+ double H[3][3])
{
libmv::Mat x1_mat, x2_mat;
libmv::Mat3 H_mat;
@@ -1191,7 +1172,10 @@ void libmv_homography2DFromCorrespondencesEuc(double (*x1)[2], double (*x2)[2],
LG << "x2: " << x2_mat;
libmv::EstimateHomographyOptions options;
- libmv::EstimateHomography2DFromCorrespondences(x1_mat, x2_mat, options, &H_mat);
+ libmv::EstimateHomography2DFromCorrespondences(x1_mat,
+ x2_mat,
+ options,
+ &H_mat);
LG << "H: " << H_mat;
diff --git a/extern/libmv/libmv-capi.h b/extern/libmv/libmv-capi.h
index 53bf40064fb..5cd9936723b 100644
--- a/extern/libmv/libmv-capi.h
+++ b/extern/libmv/libmv-capi.h
@@ -92,12 +92,24 @@ void libmv_tracksInsert(struct libmv_Tracks *libmv_tracks, int image, int track,
#define LIBMV_REFINE_RADIAL_DISTORTION_K1 (1 << 2)
#define LIBMV_REFINE_RADIAL_DISTORTION_K2 (1 << 4)
+enum {
+ LIBMV_DISTORTION_MODEL_POLYNOMIAL = 0,
+ LIBMV_DISTORTION_MODEL_DIVISION = 1,
+};
+
typedef struct libmv_CameraIntrinsicsOptions {
+ /* Common settings of all distortion models. */
+ int distortion_model;
+ int image_width, image_height;
double focal_length;
double principal_point_x, principal_point_y;
- double k1, k2, k3;
- double p1, p2;
- int image_width, image_height;
+
+ /* Radial distortion model. */
+ double polynomial_k1, polynomial_k2, polynomial_k3;
+ double polynomial_p1, polynomial_p2;
+
+ /* Division distortion model. */
+ double division_k1, division_k2;
} libmv_CameraIntrinsicsOptions;
typedef struct libmv_ReconstructionOptions {
@@ -158,7 +170,6 @@ void libmv_getFeature(const struct libmv_Features *libmv_features, int number, d
double *size);
/* Camera intrinsics */
-struct libmv_CameraIntrinsics *libmv_cameraIntrinsicsNewEmpty(void);
struct libmv_CameraIntrinsics *libmv_cameraIntrinsicsNew(
const libmv_CameraIntrinsicsOptions *libmv_camera_intrinsics_options);
struct libmv_CameraIntrinsics *libmv_cameraIntrinsicsCopy(const struct libmv_CameraIntrinsics *libmv_intrinsics);
@@ -166,9 +177,9 @@ void libmv_cameraIntrinsicsDestroy(struct libmv_CameraIntrinsics *libmv_intrinsi
void libmv_cameraIntrinsicsUpdate(const libmv_CameraIntrinsicsOptions *libmv_camera_intrinsics_options,
struct libmv_CameraIntrinsics *libmv_intrinsics);
void libmv_cameraIntrinsicsSetThreads(struct libmv_CameraIntrinsics *libmv_intrinsics, int threads);
-void libmv_cameraIntrinsicsExtract(const struct libmv_CameraIntrinsics *libmv_intrinsics, double *focal_length,
- double *principal_x, double *principal_y, double *k1, double *k2, double *k3,
- int *width, int *height);
+void libmv_cameraIntrinsicsExtractOptions(
+ const struct libmv_CameraIntrinsics *libmv_intrinsics,
+ struct libmv_CameraIntrinsicsOptions *camera_intrinsics_options);
void libmv_cameraIntrinsicsUndistortByte(const struct libmv_CameraIntrinsics *libmv_intrinsics,
unsigned char *src, unsigned char *dst, int width, int height,
float overscan, int channels);
diff --git a/extern/libmv/libmv-capi_stub.cc b/extern/libmv/libmv-capi_stub.cc
index 6e3b7336d86..720e56dbf60 100644
--- a/extern/libmv/libmv-capi_stub.cc
+++ b/extern/libmv/libmv-capi_stub.cc
@@ -199,11 +199,6 @@ struct libmv_CameraIntrinsics *libmv_reconstructionExtractIntrinsics(
return NULL;
}
-struct libmv_CameraIntrinsics *libmv_cameraIntrinsicsNewEmpty(void)
-{
- return NULL;
-}
-
struct libmv_CameraIntrinsics *libmv_cameraIntrinsicsNew(
const libmv_CameraIntrinsicsOptions * /*libmv_camera_intrinsics_options*/)
{
@@ -228,17 +223,12 @@ void libmv_cameraIntrinsicsSetThreads(struct libmv_CameraIntrinsics * /*libmv_in
{
}
-void libmv_cameraIntrinsicsExtract(const struct libmv_CameraIntrinsics * /*libmv_intrinsics*/, double * focal_length,
- double * principal_x, double *principal_y, double *k1, double *k2, double *k3,
- int *width, int *height)
+void libmv_cameraIntrinsicsExtractOptions(
+ const libmv_CameraIntrinsics */*libmv_intrinsics*/,
+ libmv_CameraIntrinsicsOptions *camera_intrinsics_options);
{
- *focal_length = 1.0;
- *principal_x = 0.0;
- *principal_y = 0.0;
- *k1 = 0.0;
- *k2 = 0.0;
- *width = 0.0;
- *height = 0.0;
+ memset(camera_intrinsics_options, 0, sizeof(libmv_CameraIntrinsicsOptions));
+ camera_intrinsics_options->focal_length = 1.0;
}
void libmv_cameraIntrinsicsUndistortByte(const struct libmv_CameraIntrinsics * /*libmv_intrinsics*/,
diff --git a/extern/libmv/libmv-util.cc b/extern/libmv/libmv-util.cc
new file mode 100644
index 00000000000..f969417d6c1
--- /dev/null
+++ b/extern/libmv/libmv-util.cc
@@ -0,0 +1,309 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2014 Blender Foundation.
+ * All rights reserved.
+ *
+ * Contributor(s): Blender Foundation,
+ * Sergey Sharybin
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+#include "libmv-util.h"
+#include "libmv-capi_intern.h"
+
+#include <cassert>
+#include <png.h>
+
+using libmv::CameraIntrinsics;
+using libmv::DivisionCameraIntrinsics;
+using libmv::EuclideanCamera;
+using libmv::EuclideanPoint;
+using libmv::FloatImage;
+using libmv::Marker;
+using libmv::PolynomialCameraIntrinsics;
+using libmv::Tracks;
+
+/* Image <-> buffers conversion */
+
+void libmv_byteBufferToImage(const unsigned char *buf,
+ int width, int height, int channels,
+ FloatImage *image)
+{
+ int x, y, k, a = 0;
+
+ image->Resize(height, width, channels);
+
+ for (y = 0; y < height; y++) {
+ for (x = 0; x < width; x++) {
+ for (k = 0; k < channels; k++) {
+ (*image)(y, x, k) = (float)buf[a++] / 255.0f;
+ }
+ }
+ }
+}
+
+void libmv_floatBufferToImage(const float *buf,
+ int width, int height, int channels,
+ FloatImage *image)
+{
+ image->Resize(height, width, channels);
+
+ for (int y = 0, a = 0; y < height; y++) {
+ for (int x = 0; x < width; x++) {
+ for (int k = 0; k < channels; k++) {
+ (*image)(y, x, k) = buf[a++];
+ }
+ }
+ }
+}
+
+void libmv_imageToFloatBuffer(const FloatImage &image,
+ float *buf)
+{
+ for (int y = 0, a = 0; y < image.Height(); y++) {
+ for (int x = 0; x < image.Width(); x++) {
+ for (int k = 0; k < image.Depth(); k++) {
+ buf[a++] = image(y, x, k);
+ }
+ }
+ }
+}
+
+void libmv_imageToByteBuffer(const libmv::FloatImage &image,
+ unsigned char *buf)
+{
+ for (int y = 0, a= 0; y < image.Height(); y++) {
+ for (int x = 0; x < image.Width(); x++) {
+ for (int k = 0; k < image.Depth(); k++) {
+ buf[a++] = image(y, x, k) * 255.0f;
+ }
+ }
+ }
+}
+
+/* Debugging */
+
+static void savePNGImage(png_bytep *row_pointers,
+ int width, int height, int depth, int color_type,
+ const char *file_name)
+{
+ png_infop info_ptr;
+ png_structp png_ptr;
+ FILE *fp = fopen(file_name, "wb");
+
+ if (!fp) {
+ return;
+ }
+
+ /* Initialize stuff */
+ png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
+ info_ptr = png_create_info_struct(png_ptr);
+
+ if (setjmp(png_jmpbuf(png_ptr))) {
+ fclose(fp);
+ return;
+ }
+
+ png_init_io(png_ptr, fp);
+
+ /* write header */
+ if (setjmp(png_jmpbuf(png_ptr))) {
+ fclose(fp);
+ return;
+ }
+
+ png_set_IHDR(png_ptr, info_ptr,
+ width, height, depth, color_type,
+ PNG_INTERLACE_NONE,
+ PNG_COMPRESSION_TYPE_BASE,
+ PNG_FILTER_TYPE_BASE);
+
+ png_write_info(png_ptr, info_ptr);
+
+ /* write bytes */
+ if (setjmp(png_jmpbuf(png_ptr))) {
+ fclose(fp);
+ return;
+ }
+
+ png_write_image(png_ptr, row_pointers);
+
+ /* end write */
+ if (setjmp(png_jmpbuf(png_ptr))) {
+ fclose(fp);
+ return;
+ }
+
+ png_write_end(png_ptr, NULL);
+
+ fclose(fp);
+}
+
+void libmv_saveImage(const FloatImage &image,
+ const char *prefix,
+ int x0, int y0)
+{
+ int x, y;
+ png_bytep *row_pointers;
+
+ assert(image.Depth() == 1);
+
+ row_pointers = new png_bytep[image.Height()];
+
+ for (y = 0; y < image.Height(); y++) {
+ row_pointers[y] = new png_byte[4 * image.Width()];
+
+ for (x = 0; x < image.Width(); x++) {
+ if (x0 == x && image.Height() - y0 - 1 == y) {
+ row_pointers[y][x * 4 + 0] = 255;
+ row_pointers[y][x * 4 + 1] = 0;
+ row_pointers[y][x * 4 + 2] = 0;
+ row_pointers[y][x * 4 + 3] = 255;
+ }
+ else {
+ float pixel = image(image.Height() - y - 1, x, 0);
+ row_pointers[y][x * 4 + 0] = pixel * 255;
+ row_pointers[y][x * 4 + 1] = pixel * 255;
+ row_pointers[y][x * 4 + 2] = pixel * 255;
+ row_pointers[y][x * 4 + 3] = 255;
+ }
+ }
+ }
+
+ {
+ static int a = 0;
+ char buf[128];
+ snprintf(buf, sizeof(buf), "%s_%02d.png", prefix, ++a);
+ savePNGImage(row_pointers,
+ image.Width(), image.Height(), 8,
+ PNG_COLOR_TYPE_RGBA,
+ buf);
+ }
+
+ for (y = 0; y < image.Height(); y++) {
+ delete [] row_pointers[y];
+ }
+ delete [] row_pointers;
+}
+
+/* Camera intrinsics utility functions */
+
+void libmv_cameraIntrinsicsFillFromOptions(
+ const libmv_CameraIntrinsicsOptions *camera_intrinsics_options,
+ CameraIntrinsics *camera_intrinsics)
+{
+ camera_intrinsics->SetFocalLength(camera_intrinsics_options->focal_length,
+ camera_intrinsics_options->focal_length);
+
+ camera_intrinsics->SetPrincipalPoint(
+ camera_intrinsics_options->principal_point_x,
+ camera_intrinsics_options->principal_point_y);
+
+ camera_intrinsics->SetImageSize(camera_intrinsics_options->image_width,
+ camera_intrinsics_options->image_height);
+
+ switch (camera_intrinsics_options->distortion_model) {
+ case LIBMV_DISTORTION_MODEL_POLYNOMIAL:
+ {
+ PolynomialCameraIntrinsics *polynomial_intrinsics =
+ static_cast<PolynomialCameraIntrinsics*>(camera_intrinsics);
+
+ polynomial_intrinsics->SetRadialDistortion(
+ camera_intrinsics_options->polynomial_k1,
+ camera_intrinsics_options->polynomial_k2,
+ camera_intrinsics_options->polynomial_k3);
+
+ break;
+ }
+
+ case LIBMV_DISTORTION_MODEL_DIVISION:
+ {
+ DivisionCameraIntrinsics *division_intrinsics =
+ static_cast<DivisionCameraIntrinsics*>(camera_intrinsics);
+
+ division_intrinsics->SetDistortion(
+ camera_intrinsics_options->division_k1,
+ camera_intrinsics_options->division_k2);
+
+ break;
+ }
+
+ default:
+ assert(!"Unknown distortion model");
+ }
+}
+
+CameraIntrinsics *libmv_cameraIntrinsicsCreateFromOptions(
+ const libmv_CameraIntrinsicsOptions *camera_intrinsics_options)
+{
+ CameraIntrinsics *camera_intrinsics = NULL;
+
+ switch (camera_intrinsics_options->distortion_model) {
+ case LIBMV_DISTORTION_MODEL_POLYNOMIAL:
+ camera_intrinsics = LIBMV_OBJECT_NEW(PolynomialCameraIntrinsics);
+ break;
+
+ case LIBMV_DISTORTION_MODEL_DIVISION:
+ camera_intrinsics = LIBMV_OBJECT_NEW(DivisionCameraIntrinsics);
+ break;
+
+ default:
+ assert(!"Unknown distortion model");
+ }
+
+ libmv_cameraIntrinsicsFillFromOptions(camera_intrinsics_options, camera_intrinsics);
+
+ return camera_intrinsics;
+}
+
+/* Reconstruction utilities */
+
+void libmv_getNormalizedTracks(const Tracks &tracks,
+ const CameraIntrinsics &camera_intrinsics,
+ Tracks *normalized_tracks)
+{
+ libmv::vector<Marker> markers = tracks.AllMarkers();
+
+ for (int i = 0; i < markers.size(); ++i) {
+ Marker &marker = markers[i];
+ camera_intrinsics.InvertIntrinsics(marker.x, marker.y,
+ &marker.x, &marker.y);
+ normalized_tracks->Insert(marker.image, marker.track,
+ marker.x, marker.y,
+ marker.weight);
+ }
+}
+
+Marker libmv_projectMarker(const EuclideanPoint &point,
+ const EuclideanCamera &camera,
+ const CameraIntrinsics &intrinsics)
+{
+ libmv::Vec3 projected = camera.R * point.X + camera.t;
+ projected /= projected(2);
+
+ libmv::Marker reprojected_marker;
+ intrinsics.ApplyIntrinsics(projected(0), projected(1),
+ &reprojected_marker.x,
+ &reprojected_marker.y);
+
+ reprojected_marker.image = camera.image;
+ reprojected_marker.track = point.track;
+
+ return reprojected_marker;
+}
diff --git a/extern/libmv/libmv-util.h b/extern/libmv/libmv-util.h
new file mode 100644
index 00000000000..d755f98d06d
--- /dev/null
+++ b/extern/libmv/libmv-util.h
@@ -0,0 +1,69 @@
+/*
+ * ***** BEGIN GPL LICENSE BLOCK *****
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ *
+ * The Original Code is Copyright (C) 2014 Blender Foundation.
+ * All rights reserved.
+ *
+ * Contributor(s): Blender Foundation,
+ * Sergey Sharybin
+ *
+ * ***** END GPL LICENSE BLOCK *****
+ */
+
+#ifndef LIBMV_UTIL_H
+#define LIBMV_UTIL_H
+
+#include "libmv-capi.h"
+#include "libmv/image/image.h"
+#include "libmv/simple_pipeline/camera_intrinsics.h"
+#include "libmv/simple_pipeline/tracks.h"
+#include "libmv/simple_pipeline/reconstruction.h"
+
+void libmv_byteBufferToImage(const unsigned char *buf,
+ int width, int height, int channels,
+ libmv::FloatImage *image);
+
+void libmv_floatBufferToImage(const float *buf,
+ int width, int height, int channels,
+ libmv::FloatImage *image);
+
+void libmv_imageToFloatBuffer(const libmv::FloatImage &image,
+ float *buf);
+
+void libmv_imageToByteBuffer(const libmv::FloatImage &image,
+ unsigned char *buf);
+
+void libmv_saveImage(const libmv::FloatImage &image,
+ const char *prefix,
+ int x0, int y0);
+
+void libmv_cameraIntrinsicsFillFromOptions(
+ const libmv_CameraIntrinsicsOptions *camera_intrinsics_options,
+ libmv::CameraIntrinsics *camera_intrinsics);
+
+libmv::CameraIntrinsics *libmv_cameraIntrinsicsCreateFromOptions(
+ const libmv_CameraIntrinsicsOptions *camera_intrinsics_options);
+
+void libmv_getNormalizedTracks(const libmv::Tracks &tracks,
+ const libmv::CameraIntrinsics &camera_intrinsics,
+ libmv::Tracks *normalized_tracks);
+
+libmv::Marker libmv_projectMarker(const libmv::EuclideanPoint &point,
+ const libmv::EuclideanCamera &camera,
+ const libmv::CameraIntrinsics &intrinsics);
+
+#endif
diff --git a/extern/libmv/libmv/simple_pipeline/bundle.cc b/extern/libmv/libmv/simple_pipeline/bundle.cc
index f571b0fcaab..fc1882a0b72 100644
--- a/extern/libmv/libmv/simple_pipeline/bundle.cc
+++ b/extern/libmv/libmv/simple_pipeline/bundle.cc
@@ -32,6 +32,7 @@
#include "libmv/simple_pipeline/camera_intrinsics.h"
#include "libmv/simple_pipeline/reconstruction.h"
#include "libmv/simple_pipeline/tracks.h"
+#include "libmv/simple_pipeline/distortion_models.h"
#ifdef _OPENMP
# include <omp.h>
@@ -42,16 +43,27 @@ namespace libmv {
// The intrinsics need to get combined into a single parameter block; use these
// enums to index instead of numeric constants.
enum {
+ // Camera calibration values.
OFFSET_FOCAL_LENGTH,
OFFSET_PRINCIPAL_POINT_X,
OFFSET_PRINCIPAL_POINT_Y,
+
+ // Distortion model coefficients.
OFFSET_K1,
OFFSET_K2,
OFFSET_K3,
OFFSET_P1,
OFFSET_P2,
+
+ // Maximal possible offset.
+ OFFSET_MAX,
};
+#define FIRST_DISTORTION_COEFFICIENT OFFSET_K1
+#define LAST_DISTORTION_COEFFICIENT OFFSET_P2
+#define NUM_DISTORTION_COEFFICIENTS \
+ (LAST_DISTORTION_COEFFICIENT - FIRST_DISTORTION_COEFFICIENT + 1)
+
namespace {
// Cost functor which computes reprojection error of 3D point X
@@ -60,10 +72,12 @@ namespace {
//
// This functor uses a radial distortion model.
struct OpenCVReprojectionError {
- OpenCVReprojectionError(const double observed_x,
+ OpenCVReprojectionError(const DistortionModelType distortion_model,
+ const double observed_x,
const double observed_y,
const double weight)
- : observed_x_(observed_x), observed_y_(observed_y),
+ : distortion_model_(distortion_model),
+ observed_x_(observed_x), observed_y_(observed_y),
weight_(weight) {}
template <typename T>
@@ -76,11 +90,6 @@ struct OpenCVReprojectionError {
const T& focal_length = intrinsics[OFFSET_FOCAL_LENGTH];
const T& principal_point_x = intrinsics[OFFSET_PRINCIPAL_POINT_X];
const T& principal_point_y = intrinsics[OFFSET_PRINCIPAL_POINT_Y];
- const T& k1 = intrinsics[OFFSET_K1];
- const T& k2 = intrinsics[OFFSET_K2];
- const T& k3 = intrinsics[OFFSET_K3];
- const T& p1 = intrinsics[OFFSET_P1];
- const T& p2 = intrinsics[OFFSET_P2];
// Compute projective coordinates: x = RX + t.
T x[3];
@@ -104,15 +113,44 @@ struct OpenCVReprojectionError {
// Apply distortion to the normalized points to get (xd, yd).
// TODO(keir): Do early bailouts for zero distortion; these are expensive
// jet operations.
- ApplyRadialDistortionCameraIntrinsics(focal_length,
- focal_length,
- principal_point_x,
- principal_point_y,
- k1, k2, k3,
- p1, p2,
- xn, yn,
- &predicted_x,
- &predicted_y);
+ switch (distortion_model_) {
+ case DISTORTION_MODEL_POLYNOMIAL:
+ {
+ const T& k1 = intrinsics[OFFSET_K1];
+ const T& k2 = intrinsics[OFFSET_K2];
+ const T& k3 = intrinsics[OFFSET_K3];
+ const T& p1 = intrinsics[OFFSET_P1];
+ const T& p2 = intrinsics[OFFSET_P2];
+
+ ApplyPolynomialDistortionModel(focal_length,
+ focal_length,
+ principal_point_x,
+ principal_point_y,
+ k1, k2, k3,
+ p1, p2,
+ xn, yn,
+ &predicted_x,
+ &predicted_y);
+ break;
+ }
+ case DISTORTION_MODEL_DIVISION:
+ {
+ const T& k1 = intrinsics[OFFSET_K1];
+ const T& k2 = intrinsics[OFFSET_K2];
+
+ ApplyDivisionDistortionModel(focal_length,
+ focal_length,
+ principal_point_x,
+ principal_point_y,
+ k1, k2,
+ xn, yn,
+ &predicted_x,
+ &predicted_y);
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unknown distortion model";
+ }
// The error is the difference between the predicted and observed position.
residuals[0] = (predicted_x - T(observed_x_)) * weight_;
@@ -120,6 +158,7 @@ struct OpenCVReprojectionError {
return true;
}
+ const DistortionModelType distortion_model_;
const double observed_x_;
const double observed_y_;
const double weight_;
@@ -154,32 +193,38 @@ void BundleIntrinsicsLogMessage(const int bundle_intrinsics) {
// Pack intrinsics from object to an array for easier
// and faster minimization.
void PackIntrinisicsIntoArray(const CameraIntrinsics &intrinsics,
- double ceres_intrinsics[8]) {
+ double ceres_intrinsics[OFFSET_MAX]) {
ceres_intrinsics[OFFSET_FOCAL_LENGTH] = intrinsics.focal_length();
ceres_intrinsics[OFFSET_PRINCIPAL_POINT_X] = intrinsics.principal_point_x();
ceres_intrinsics[OFFSET_PRINCIPAL_POINT_Y] = intrinsics.principal_point_y();
- ceres_intrinsics[OFFSET_K1] = intrinsics.k1();
- ceres_intrinsics[OFFSET_K2] = intrinsics.k2();
- ceres_intrinsics[OFFSET_K3] = intrinsics.k3();
- ceres_intrinsics[OFFSET_P1] = intrinsics.p1();
- ceres_intrinsics[OFFSET_P2] = intrinsics.p2();
+
+ int num_distortion_parameters = intrinsics.num_distortion_parameters();
+ assert(num_distortion_parameters <= NUM_DISTORTION_COEFFICIENTS);
+
+ const double *distortion_parameters = intrinsics.distortion_parameters();
+ for (int i = 0; i < num_distortion_parameters; ++i) {
+ ceres_intrinsics[FIRST_DISTORTION_COEFFICIENT + i] =
+ distortion_parameters[i];
+ }
}
// Unpack intrinsics back from an array to an object.
-void UnpackIntrinsicsFromArray(const double ceres_intrinsics[8],
- CameraIntrinsics *intrinsics) {
- intrinsics->SetFocalLength(ceres_intrinsics[OFFSET_FOCAL_LENGTH],
- ceres_intrinsics[OFFSET_FOCAL_LENGTH]);
+void UnpackIntrinsicsFromArray(const double ceres_intrinsics[OFFSET_MAX],
+ CameraIntrinsics *intrinsics) {
+ intrinsics->SetFocalLength(ceres_intrinsics[OFFSET_FOCAL_LENGTH],
+ ceres_intrinsics[OFFSET_FOCAL_LENGTH]);
- intrinsics->SetPrincipalPoint(ceres_intrinsics[OFFSET_PRINCIPAL_POINT_X],
- ceres_intrinsics[OFFSET_PRINCIPAL_POINT_Y]);
+ intrinsics->SetPrincipalPoint(ceres_intrinsics[OFFSET_PRINCIPAL_POINT_X],
+ ceres_intrinsics[OFFSET_PRINCIPAL_POINT_Y]);
- intrinsics->SetRadialDistortion(ceres_intrinsics[OFFSET_K1],
- ceres_intrinsics[OFFSET_K2],
- ceres_intrinsics[OFFSET_K3]);
+ int num_distortion_parameters = intrinsics->num_distortion_parameters();
+ assert(num_distortion_parameters <= NUM_DISTORTION_COEFFICIENTS);
- intrinsics->SetTangentialDistortion(ceres_intrinsics[OFFSET_P1],
- ceres_intrinsics[OFFSET_P2]);
+ double *distortion_parameters = intrinsics->distortion_parameters();
+ for (int i = 0; i < num_distortion_parameters; ++i) {
+ distortion_parameters[i] =
+ ceres_intrinsics[FIRST_DISTORTION_COEFFICIENT + i];
+ }
}
// Get a vector of camera's rotations denoted by angle axis
@@ -330,9 +375,10 @@ void EuclideanBundlerPerformEvaluation(const Tracks &tracks,
//
// At this point we only need to bundle points positions, cameras
// are to be totally still here.
-void EuclideanBundlePointsOnly(const vector<Marker> &markers,
+void EuclideanBundlePointsOnly(const DistortionModelType distortion_model,
+ const vector<Marker> &markers,
vector<Vec6> &all_cameras_R_t,
- double ceres_intrinsics[8],
+ double ceres_intrinsics[OFFSET_MAX],
EuclideanReconstruction *reconstruction) {
ceres::Problem::Options problem_options;
ceres::Problem problem(problem_options);
@@ -350,8 +396,9 @@ void EuclideanBundlePointsOnly(const vector<Marker> &markers,
double *current_camera_R_t = &all_cameras_R_t[camera->image](0);
problem.AddResidualBlock(new ceres::AutoDiffCostFunction<
- OpenCVReprojectionError, 2, 8, 6, 3>(
+ OpenCVReprojectionError, 2, OFFSET_MAX, 6, 3>(
new OpenCVReprojectionError(
+ distortion_model,
marker.x,
marker.y,
1.0)),
@@ -397,21 +444,22 @@ void EuclideanBundlePointsOnly(const vector<Marker> &markers,
void EuclideanBundle(const Tracks &tracks,
EuclideanReconstruction *reconstruction) {
- CameraIntrinsics intrinsics;
+ PolynomialCameraIntrinsics empty_intrinsics;
EuclideanBundleCommonIntrinsics(tracks,
BUNDLE_NO_INTRINSICS,
BUNDLE_NO_CONSTRAINTS,
reconstruction,
- &intrinsics,
+ &empty_intrinsics,
NULL);
}
-void EuclideanBundleCommonIntrinsics(const Tracks &tracks,
- const int bundle_intrinsics,
- const int bundle_constraints,
- EuclideanReconstruction *reconstruction,
- CameraIntrinsics *intrinsics,
- BundleEvaluation *evaluation) {
+void EuclideanBundleCommonIntrinsics(
+ const Tracks &tracks,
+ const int bundle_intrinsics,
+ const int bundle_constraints,
+ EuclideanReconstruction *reconstruction,
+ CameraIntrinsics *intrinsics,
+ BundleEvaluation *evaluation) {
LG << "Original intrinsics: " << *intrinsics;
vector<Marker> markers = tracks.AllMarkers();
@@ -421,7 +469,7 @@ void EuclideanBundleCommonIntrinsics(const Tracks &tracks,
// Residual blocks with 10 parameters are unwieldly with Ceres, so pack the
// intrinsics into a single block and rely on local parameterizations to
// control which intrinsics are allowed to vary.
- double ceres_intrinsics[8];
+ double ceres_intrinsics[OFFSET_MAX];
PackIntrinisicsIntoArray(*intrinsics, ceres_intrinsics);
// Convert cameras rotations to angle axis and merge with translation
@@ -468,8 +516,9 @@ void EuclideanBundleCommonIntrinsics(const Tracks &tracks,
// This way ceres is not gonna to go crazy.
if (marker.weight != 0.0) {
problem.AddResidualBlock(new ceres::AutoDiffCostFunction<
- OpenCVReprojectionError, 2, 8, 6, 3>(
+ OpenCVReprojectionError, 2, OFFSET_MAX, 6, 3>(
new OpenCVReprojectionError(
+ intrinsics->GetDistortionModelType(),
marker.x,
marker.y,
marker.weight)),
@@ -501,6 +550,12 @@ void EuclideanBundleCommonIntrinsics(const Tracks &tracks,
return;
}
+ if (intrinsics->GetDistortionModelType() == DISTORTION_MODEL_DIVISION &&
+ (bundle_intrinsics & BUNDLE_TANGENTIAL) != 0) {
+ LOG(FATAL) << "Division model doesn't support bundling "
+ "of tangential distortion";
+ }
+
BundleIntrinsicsLogMessage(bundle_intrinsics);
if (bundle_intrinsics == BUNDLE_NO_INTRINSICS) {
@@ -529,7 +584,7 @@ void EuclideanBundleCommonIntrinsics(const Tracks &tracks,
constant_intrinsics.push_back(OFFSET_K3);
ceres::SubsetParameterization *subset_parameterization =
- new ceres::SubsetParameterization(8, constant_intrinsics);
+ new ceres::SubsetParameterization(OFFSET_MAX, constant_intrinsics);
problem.SetParameterization(ceres_intrinsics, subset_parameterization);
}
@@ -585,7 +640,8 @@ void EuclideanBundleCommonIntrinsics(const Tracks &tracks,
if (zero_weight_markers.size()) {
LG << "Refining position of constant zero-weighted tracks";
- EuclideanBundlePointsOnly(zero_weight_markers,
+ EuclideanBundlePointsOnly(intrinsics->GetDistortionModelType(),
+ zero_weight_markers,
all_cameras_R_t,
ceres_intrinsics,
reconstruction);
diff --git a/extern/libmv/libmv/simple_pipeline/bundle.h b/extern/libmv/libmv/simple_pipeline/bundle.h
index d19eec16d6d..781bd8476fe 100644
--- a/extern/libmv/libmv/simple_pipeline/bundle.h
+++ b/extern/libmv/libmv/simple_pipeline/bundle.h
@@ -114,12 +114,13 @@ enum BundleConstraints {
BUNDLE_NO_CONSTRAINTS = 0,
BUNDLE_NO_TRANSLATION = 1,
};
-void EuclideanBundleCommonIntrinsics(const Tracks &tracks,
- const int bundle_intrinsics,
- const int bundle_constraints,
- EuclideanReconstruction *reconstruction,
- CameraIntrinsics *intrinsics,
- BundleEvaluation *evaluation = NULL);
+void EuclideanBundleCommonIntrinsics(
+ const Tracks &tracks,
+ const int bundle_intrinsics,
+ const int bundle_constraints,
+ EuclideanReconstruction *reconstruction,
+ CameraIntrinsics *intrinsics,
+ BundleEvaluation *evaluation = NULL);
/*!
Refine camera poses and 3D coordinates using bundle adjustment.
diff --git a/extern/libmv/libmv/simple_pipeline/camera_intrinsics.cc b/extern/libmv/libmv/simple_pipeline/camera_intrinsics.cc
index ddbbec58def..5e4e07b3c4c 100644
--- a/extern/libmv/libmv/simple_pipeline/camera_intrinsics.cc
+++ b/extern/libmv/libmv/simple_pipeline/camera_intrinsics.cc
@@ -19,358 +19,226 @@
// IN THE SOFTWARE.
#include "libmv/simple_pipeline/camera_intrinsics.h"
-#include "libmv/numeric/levenberg_marquardt.h"
-namespace libmv {
+#include "libmv/logging/logging.h"
+#include "libmv/simple_pipeline/distortion_models.h"
-struct Offset {
- short ix, iy;
- unsigned char fx, fy;
-};
+namespace libmv {
-struct Grid {
- struct Offset *offset;
- int width, height;
- double overscan;
-};
+namespace internal {
-static struct Grid *copyGrid(struct Grid *from) {
- struct Grid *to = NULL;
+LookupWarpGrid::LookupWarpGrid()
+ : offset_(NULL),
+ width_(0),
+ height_(0),
+ overscan_(0.0),
+ threads_(1) {}
- if (from) {
- to = new Grid;
+LookupWarpGrid::LookupWarpGrid(const LookupWarpGrid &from)
+ : offset_(NULL),
+ width_(from.width_),
+ height_(from.height_),
+ overscan_(from.overscan_),
+ threads_(from.threads_) {
+ if (from.offset_) {
+ offset_ = new Offset[width_ * height_];
+ memcpy(offset_, from.offset_, sizeof(Offset) * width_ * height_);
+ }
+}
- to->width = from->width;
- to->height = from->height;
- to->overscan = from->overscan;
+LookupWarpGrid::~LookupWarpGrid() {
+ delete [] offset_;
+}
- to->offset = new Offset[to->width*to->height];
- memcpy(to->offset, from->offset, sizeof(struct Offset)*to->width*to->height);
- }
+void LookupWarpGrid::Reset() {
+ delete [] offset_;
+ offset_ = NULL;
+}
- return to;
+// Set number of threads used for threaded buffer distortion/undistortion.
+void LookupWarpGrid::SetThreads(int threads) {
+ threads_ = threads;
}
+} // namespace internal
+
CameraIntrinsics::CameraIntrinsics()
- : K_(Mat3::Identity()),
- image_width_(0),
+ : image_width_(0),
image_height_(0),
- k1_(0),
- k2_(0),
- k3_(0),
- p1_(0),
- p2_(0),
- distort_(0),
- undistort_(0),
- threads_(1) {}
+ K_(Mat3::Identity()) {}
CameraIntrinsics::CameraIntrinsics(const CameraIntrinsics &from)
- : K_(from.K_),
- image_width_(from.image_width_),
+ : image_width_(from.image_width_),
image_height_(from.image_height_),
- k1_(from.k1_),
- k2_(from.k2_),
- k3_(from.k3_),
- p1_(from.p1_),
- p2_(from.p2_),
- threads_(from.threads_) {
- distort_ = copyGrid(from.distort_);
- undistort_ = copyGrid(from.undistort_);
-}
+ K_(from.K_),
+ distort_(from.distort_),
+ undistort_(from.undistort_) {}
-CameraIntrinsics::~CameraIntrinsics() {
- FreeLookupGrid();
+// Set the image size in pixels.
+void CameraIntrinsics::SetImageSize(int width, int height) {
+ image_width_ = width;
+ image_height_ = height;
+ ResetLookupGrids();
}
-/// Set the entire calibration matrix at once.
+// Set the entire calibration matrix at once.
void CameraIntrinsics::SetK(const Mat3 new_k) {
K_ = new_k;
- FreeLookupGrid();
+ ResetLookupGrids();
}
-/// Set both x and y focal length in pixels.
-void CameraIntrinsics::SetFocalLength(double focal_x, double focal_y) {
+// Set both x and y focal length in pixels.
+void CameraIntrinsics::SetFocalLength(double focal_x,
+ double focal_y) {
K_(0, 0) = focal_x;
K_(1, 1) = focal_y;
- FreeLookupGrid();
+ ResetLookupGrids();
}
-void CameraIntrinsics::SetPrincipalPoint(double cx, double cy) {
+// Set principal point in pixels.
+void CameraIntrinsics::SetPrincipalPoint(double cx,
+ double cy) {
K_(0, 2) = cx;
K_(1, 2) = cy;
- FreeLookupGrid();
+ ResetLookupGrids();
}
-void CameraIntrinsics::SetImageSize(int width, int height) {
- image_width_ = width;
- image_height_ = height;
- FreeLookupGrid();
+// Set number of threads used for threaded buffer distortion/undistortion.
+void CameraIntrinsics::SetThreads(int threads) {
+ distort_.SetThreads(threads);
+ undistort_.SetThreads(threads);
}
-void CameraIntrinsics::SetRadialDistortion(double k1, double k2, double k3) {
- k1_ = k1;
- k2_ = k2;
- k3_ = k3;
- FreeLookupGrid();
+void CameraIntrinsics::ImageSpaceToNormalized(double image_x,
+ double image_y,
+ double *normalized_x,
+ double *normalized_y) const {
+ *normalized_x = (image_x - principal_point_x()) / focal_length_x();
+ *normalized_y = (image_y - principal_point_y()) / focal_length_y();
}
-void CameraIntrinsics::SetTangentialDistortion(double p1, double p2) {
- p1_ = p1;
- p2_ = p2;
- FreeLookupGrid();
+void CameraIntrinsics::NormalizedToImageSpace(double normalized_x,
+ double normalized_y,
+ double *image_x,
+ double *image_y) const {
+ *image_x = normalized_x * focal_length_x() + principal_point_x();
+ *image_y = normalized_y * focal_length_y() + principal_point_y();
}
-void CameraIntrinsics::SetThreads(int threads) {
- threads_ = threads;
+// Reset lookup grids after changing the distortion model.
+void CameraIntrinsics::ResetLookupGrids() {
+ distort_.Reset();
+ undistort_.Reset();
}
-void CameraIntrinsics::ApplyIntrinsics(double normalized_x,
- double normalized_y,
- double *image_x,
- double *image_y) const {
- ApplyRadialDistortionCameraIntrinsics(focal_length_x(),
- focal_length_y(),
- principal_point_x(),
- principal_point_y(),
- k1(), k2(), k3(),
- p1(), p2(),
- normalized_x,
- normalized_y,
- image_x,
- image_y);
+PolynomialCameraIntrinsics::PolynomialCameraIntrinsics()
+ : CameraIntrinsics() {
+ SetRadialDistortion(0.0, 0.0, 0.0);
+ SetTangentialDistortion(0.0, 0.0);
}
-struct InvertIntrinsicsCostFunction {
- public:
- typedef Vec2 FMatrixType;
- typedef Vec2 XMatrixType;
-
- InvertIntrinsicsCostFunction(const CameraIntrinsics &intrinsics,
- double image_x, double image_y)
- : intrinsics(intrinsics), x(image_x), y(image_y) {}
-
- Vec2 operator()(const Vec2 &u) const {
- double xx, yy;
- intrinsics.ApplyIntrinsics(u(0), u(1), &xx, &yy);
- Vec2 fx;
- fx << (xx - x), (yy - y);
- return fx;
- }
- const CameraIntrinsics &intrinsics;
- double x, y;
-};
-
-void CameraIntrinsics::InvertIntrinsics(double image_x,
- double image_y,
- double *normalized_x,
- double *normalized_y) const {
- // Compute the initial guess. For a camera with no distortion, this will also
- // be the final answer; the LM iteration will terminate immediately.
- Vec2 normalized;
- normalized(0) = (image_x - principal_point_x()) / focal_length_x();
- normalized(1) = (image_y - principal_point_y()) / focal_length_y();
-
- typedef LevenbergMarquardt<InvertIntrinsicsCostFunction> Solver;
-
- InvertIntrinsicsCostFunction intrinsics_cost(*this, image_x, image_y);
- Solver::SolverParameters params;
- Solver solver(intrinsics_cost);
-
- /*Solver::Results results =*/ solver.minimize(params, &normalized);
-
- // TODO(keir): Better error handling.
-
- *normalized_x = normalized(0);
- *normalized_y = normalized(1);
+PolynomialCameraIntrinsics::PolynomialCameraIntrinsics(
+ const PolynomialCameraIntrinsics &from)
+ : CameraIntrinsics(from) {
+ SetRadialDistortion(from.k1(), from.k2(), from.k3());
+ SetTangentialDistortion(from.p1(), from.p2());
}
-// TODO(MatthiasF): downsample lookup
-template<typename WarpFunction>
-void CameraIntrinsics::ComputeLookupGrid(Grid* grid, int width, int height,
- double overscan) {
- double w = (double)width / (1 + overscan);
- double h = (double)height / (1 + overscan);
- double aspx = (double)w / image_width_;
- double aspy = (double)h / image_height_;
-#if defined(_OPENMP)
-# pragma omp parallel for schedule(dynamic) num_threads(threads_) \
- if (threads_ > 1 && height > 100)
-#endif
- for (int y = 0; y < height; y++) {
- for (int x = 0; x < width; x++) {
- double src_x = (x - 0.5 * overscan * w) / aspx,
- src_y = (y - 0.5 * overscan * h) / aspy;
- double warp_x, warp_y;
- WarpFunction(this, src_x, src_y, &warp_x, &warp_y);
- warp_x = warp_x*aspx + 0.5 * overscan * w;
- warp_y = warp_y*aspy + 0.5 * overscan * h;
- int ix = int(warp_x), iy = int(warp_y);
- int fx = round((warp_x-ix)*256), fy = round((warp_y-iy)*256);
- if (fx == 256) { fx = 0; ix++; } // NOLINT
- if (fy == 256) { fy = 0; iy++; } // NOLINT
- // Use nearest border pixel
- if (ix < 0) { ix = 0, fx = 0; } // NOLINT
- if (iy < 0) { iy = 0, fy = 0; } // NOLINT
- if (ix >= width - 2) ix = width-2;
- if (iy >= height - 2) iy = height-2;
-
- Offset offset = { (short)(ix-x), (short)(iy-y),
- (unsigned char)fx, (unsigned char)fy };
- grid->offset[y*width+x] = offset;
- }
- }
+void PolynomialCameraIntrinsics::SetRadialDistortion(double k1,
+ double k2,
+ double k3) {
+ parameters_[OFFSET_K1] = k1;
+ parameters_[OFFSET_K2] = k2;
+ parameters_[OFFSET_K3] = k3;
+ ResetLookupGrids();
}
-// TODO(MatthiasF): cubic B-Spline image sampling, bilinear lookup
-template<typename T>
-static void Warp(const Grid* grid, const T* src, T* dst,
- const int width, const int height, const int channels,
- const int threads) {
- (void) threads; // Ignored if OpenMP is disabled
-#if defined(_OPENMP)
-# pragma omp parallel for schedule(dynamic) num_threads(threads) \
- if (threads > 1 && height > 100)
-#endif
- for (int y = 0; y < height; y++) {
- for (int x = 0; x < width; x++) {
- Offset offset = grid->offset[y*width+x];
- const T* s = &src[((y + offset.iy) * width + (x + offset.ix)) * channels];
- for (int i = 0; i < channels; i++) {
- // TODO(sergey): Finally wrap this into ultiple lines nicely.
- dst[(y*width+x)*channels+i] =
- ((s[ i] * (256-offset.fx) + s[ channels+i] * offset.fx) * (256-offset.fy) // NOLINT
- +(s[width*channels+i] * (256-offset.fx) + s[width*channels+channels+i] * offset.fx) * offset.fy) / (256*256); // NOLINT
- }
- }
- }
+void PolynomialCameraIntrinsics::SetTangentialDistortion(double p1,
+ double p2) {
+ parameters_[OFFSET_P1] = p1;
+ parameters_[OFFSET_P2] = p2;
+ ResetLookupGrids();
}
-void CameraIntrinsics::FreeLookupGrid() {
- if (distort_) {
- delete distort_->offset;
- delete distort_;
- distort_ = NULL;
- }
-
- if (undistort_) {
- delete undistort_->offset;
- delete undistort_;
- undistort_ = NULL;
- }
+void PolynomialCameraIntrinsics::ApplyIntrinsics(double normalized_x,
+ double normalized_y,
+ double *image_x,
+ double *image_y) const {
+ ApplyPolynomialDistortionModel(focal_length_x(),
+ focal_length_y(),
+ principal_point_x(),
+ principal_point_y(),
+ k1(), k2(), k3(),
+ p1(), p2(),
+ normalized_x,
+ normalized_y,
+ image_x,
+ image_y);
}
-// FIXME: C++ templates limitations makes thing complicated,
-// but maybe there is a simpler method.
-struct ApplyIntrinsicsFunction {
- ApplyIntrinsicsFunction(CameraIntrinsics* intrinsics, double x, double y,
- double *warp_x, double *warp_y) {
- intrinsics->ApplyIntrinsics(
- (x-intrinsics->principal_point_x())/intrinsics->focal_length_x(),
- (y-intrinsics->principal_point_y())/intrinsics->focal_length_y(),
- warp_x, warp_y);
- }
-};
-struct InvertIntrinsicsFunction {
- InvertIntrinsicsFunction(CameraIntrinsics* intrinsics, double x, double y,
- double *warp_x, double *warp_y) {
- intrinsics->InvertIntrinsics(x, y, warp_x, warp_y);
-
- *warp_x = *warp_x * intrinsics->focal_length_x() +
- intrinsics->principal_point_x();
-
- *warp_y = *warp_y * intrinsics->focal_length_y() +
- intrinsics->principal_point_y();
- }
-};
-
-void CameraIntrinsics::CheckDistortLookupGrid(int width, int height,
- double overscan) {
- if (distort_) {
- if (distort_->width != width || distort_->height != height ||
- distort_->overscan != overscan) {
- delete [] distort_->offset;
- distort_->offset = NULL;
- }
- } else {
- distort_ = new Grid;
- distort_->offset = NULL;
- }
-
- if (!distort_->offset) {
- distort_->offset = new Offset[width * height];
- ComputeLookupGrid<InvertIntrinsicsFunction>(distort_, width,
- height, overscan);
- }
-
- distort_->width = width;
- distort_->height = height;
- distort_->overscan = overscan;
+void PolynomialCameraIntrinsics::InvertIntrinsics(
+ double image_x,
+ double image_y,
+ double *normalized_x,
+ double *normalized_y) const {
+ InvertPolynomialDistortionModel(focal_length_x(),
+ focal_length_y(),
+ principal_point_x(),
+ principal_point_y(),
+ k1(), k2(), k3(),
+ p1(), p2(),
+ image_x,
+ image_y,
+ normalized_x,
+ normalized_y);
}
-void CameraIntrinsics::CheckUndistortLookupGrid(int width, int height,
- double overscan) {
- if (undistort_) {
- if (undistort_->width != width || undistort_->height != height ||
- undistort_->overscan != overscan) {
- delete [] undistort_->offset;
- undistort_->offset = NULL;
- }
- } else {
- undistort_ = new Grid;
- undistort_->offset = NULL;
- }
-
- if (!undistort_->offset) {
- undistort_->offset = new Offset[width * height];
- ComputeLookupGrid<ApplyIntrinsicsFunction>(undistort_, width,
- height, overscan);
- }
-
- undistort_->width = width;
- undistort_->height = height;
- undistort_->overscan = overscan;
+DivisionCameraIntrinsics::DivisionCameraIntrinsics()
+ : CameraIntrinsics() {
+ SetDistortion(0.0, 0.0);
}
-void CameraIntrinsics::Distort(const float* src, float* dst,
- int width, int height,
- double overscan,
- int channels) {
- assert(channels >= 1);
- assert(channels <= 4);
- CheckDistortLookupGrid(width, height, overscan);
- Warp<float>(distort_, src, dst, width, height, channels, threads_);
+DivisionCameraIntrinsics::DivisionCameraIntrinsics(
+ const DivisionCameraIntrinsics &from)
+ : CameraIntrinsics(from) {
+ SetDistortion(from.k1(), from.k1());
}
-void CameraIntrinsics::Distort(const unsigned char* src,
- unsigned char* dst,
- int width, int height,
- double overscan,
- int channels) {
- assert(channels >= 1);
- assert(channels <= 4);
- CheckDistortLookupGrid(width, height, overscan);
- Warp<unsigned char>(distort_, src, dst, width, height, channels, threads_);
+void DivisionCameraIntrinsics::SetDistortion(double k1,
+ double k2) {
+ parameters_[OFFSET_K1] = k1;
+ parameters_[OFFSET_K2] = k2;
+ ResetLookupGrids();
}
-void CameraIntrinsics::Undistort(const float* src, float* dst,
- int width, int height,
- double overscan,
- int channels) {
- assert(channels >= 1);
- assert(channels <= 4);
- CheckUndistortLookupGrid(width, height, overscan);
- Warp<float>(undistort_, src, dst, width, height, channels, threads_);
+void DivisionCameraIntrinsics::ApplyIntrinsics(double normalized_x,
+ double normalized_y,
+ double *image_x,
+ double *image_y) const {
+ ApplyDivisionDistortionModel(focal_length_x(),
+ focal_length_y(),
+ principal_point_x(),
+ principal_point_y(),
+ k1(), k2(),
+ normalized_x,
+ normalized_y,
+ image_x,
+ image_y);
}
-void CameraIntrinsics::Undistort(const unsigned char* src,
- unsigned char* dst,
- int width, int height,
- double overscan,
- int channels) {
- assert(channels >= 1);
- assert(channels <= 4);
- CheckUndistortLookupGrid(width, height, overscan);
- Warp<unsigned char>(undistort_, src, dst, width, height, channels, threads_);
+void DivisionCameraIntrinsics::InvertIntrinsics(double image_x,
+ double image_y,
+ double *normalized_x,
+ double *normalized_y) const {
+ InvertDivisionDistortionModel(focal_length_x(),
+ focal_length_y(),
+ principal_point_x(),
+ principal_point_y(),
+ k1(), k2(),
+ image_x,
+ image_y,
+ normalized_x,
+ normalized_y);
}
std::ostream& operator <<(std::ostream &os,
@@ -386,11 +254,38 @@ std::ostream& operator <<(std::ostream &os,
<< " w=" << intrinsics.image_width()
<< " h=" << intrinsics.image_height();
- if (intrinsics.k1() != 0.0) { os << " k1=" << intrinsics.k1(); }
- if (intrinsics.k2() != 0.0) { os << " k2=" << intrinsics.k2(); }
- if (intrinsics.k3() != 0.0) { os << " k3=" << intrinsics.k3(); }
- if (intrinsics.p1() != 0.0) { os << " p1=" << intrinsics.p1(); }
- if (intrinsics.p2() != 0.0) { os << " p2=" << intrinsics.p2(); }
+#define PRINT_NONZERO_COEFFICIENT(intrinsics, coeff) \
+ { \
+ if (intrinsics->coeff() != 0.0) { \
+ os << " " #coeff "=" << intrinsics->coeff(); \
+ } \
+ } (void) 0
+
+ switch (intrinsics.GetDistortionModelType()) {
+ case DISTORTION_MODEL_POLYNOMIAL:
+ {
+ const PolynomialCameraIntrinsics *polynomial_intrinsics =
+ static_cast<const PolynomialCameraIntrinsics *>(&intrinsics);
+ PRINT_NONZERO_COEFFICIENT(polynomial_intrinsics, k1);
+ PRINT_NONZERO_COEFFICIENT(polynomial_intrinsics, k2);
+ PRINT_NONZERO_COEFFICIENT(polynomial_intrinsics, k3);
+ PRINT_NONZERO_COEFFICIENT(polynomial_intrinsics, p1);
+ PRINT_NONZERO_COEFFICIENT(polynomial_intrinsics, p2);
+ break;
+ }
+ case DISTORTION_MODEL_DIVISION:
+ {
+ const DivisionCameraIntrinsics *division_intrinsics =
+ static_cast<const DivisionCameraIntrinsics *>(&intrinsics);
+ PRINT_NONZERO_COEFFICIENT(division_intrinsics, k1);
+ PRINT_NONZERO_COEFFICIENT(division_intrinsics, k2);
+ break;
+ }
+ default:
+ LOG(FATAL) << "Unknown distortion model.";
+ }
+
+#undef PRINT_NONZERO_COEFFICIENT
return os;
}
diff --git a/extern/libmv/libmv/simple_pipeline/camera_intrinsics.h b/extern/libmv/libmv/simple_pipeline/camera_intrinsics.h
index 5a348899a11..6a3ade81089 100644
--- a/extern/libmv/libmv/simple_pipeline/camera_intrinsics.h
+++ b/extern/libmv/libmv/simple_pipeline/camera_intrinsics.h
@@ -26,181 +26,381 @@
#include <Eigen/Core>
+#include "libmv/numeric/numeric.h"
+#include "libmv/simple_pipeline/distortion_models.h"
+
namespace libmv {
-typedef Eigen::Matrix<double, 3, 3> Mat3;
+class CameraIntrinsics;
-struct Grid;
+namespace internal {
-class CameraIntrinsics {
+// This class is responsible to store a lookup grid to perform
+// image warping using this lookup grid. Such a magic is needed
+// to make (un)distortion as fast as possible in cases multiple
+// images are to be processed.
+class LookupWarpGrid {
public:
- CameraIntrinsics();
- CameraIntrinsics(const CameraIntrinsics &from);
- ~CameraIntrinsics();
-
- const Mat3 &K() const { return K_; }
- double focal_length() const { return K_(0, 0); }
- double focal_length_x() const { return K_(0, 0); }
- double focal_length_y() const { return K_(1, 1); }
- double principal_point_x() const { return K_(0, 2); }
- double principal_point_y() const { return K_(1, 2); }
- int image_width() const { return image_width_; }
- int image_height() const { return image_height_; }
- double k1() const { return k1_; }
- double k2() const { return k2_; }
- double k3() const { return k3_; }
- double p1() const { return p1_; }
- double p2() const { return p2_; }
-
- /// Set the entire calibration matrix at once.
- void SetK(const Mat3 new_k);
-
- /// Set both x and y focal length in pixels.
- void SetFocalLength(double focal_x, double focal_y);
-
- /// Set principal point in pixels.
- void SetPrincipalPoint(double cx, double cy);
-
- /// Set the image size in pixels.
- void SetImageSize(int width, int height);
-
- void SetRadialDistortion(double k1, double k2, double k3 = 0);
-
- void SetTangentialDistortion(double p1, double p2);
-
- /// Set number of threads using for buffer distortion/undistortion
+ LookupWarpGrid();
+ LookupWarpGrid(const LookupWarpGrid &from);
+ ~LookupWarpGrid();
+
+ // Width and height og the image, measured in pixels.
+ int width() const { return width_; }
+ int height() const { return height_; }
+
+ // Overscan factor of the image, so that
+ // - 0.0 is overscan of 0 pixels,
+ // - 1.0 is overscan of image weight pixels in horizontal direction
+ // and image height pixels in vertical direction.
+ double overscan() const { return overscan_; }
+
+ // Update lookup grid in order to be sure it's calculated for
+ // given image width, height and overscan.
+ //
+ // See comment for CameraIntrinsics::DistortBuffer to get more
+ // details about what overscan is.
+ template<typename WarpFunction>
+ void Update(const CameraIntrinsics &intrinsics,
+ int width,
+ int height,
+ double overscan);
+
+ // Apply coordinate lookup grid on a giver input buffer.
+ //
+ // See comment for CameraIntrinsics::DistortBuffer to get more
+ // details about template type.
+ template<typename PixelType>
+ void Apply(const PixelType *input_buffer,
+ int width,
+ int height,
+ int channels,
+ PixelType *output_buffer);
+
+ // Reset lookup grids.
+ // This will tag the grid for update without re-computing it.
+ void Reset();
+
+ // Set number of threads used for threaded buffer distortion/undistortion.
void SetThreads(int threads);
- /*!
- Apply camera intrinsics to the normalized point to get image coordinates.
-
- This applies the lens distortion to a point which is in normalized
- camera coordinates (i.e. the principal point is at (0, 0)) to get image
- coordinates in pixels.
- */
- void ApplyIntrinsics(double normalized_x, double normalized_y,
- double *image_x, double *image_y) const;
+ private:
+ // This structure contains an offset in both x,y directions
+ // in an optimized way sawing some bytes per pixel in the memory.
+ //
+ // TODO(sergey): This is rather questionable optimizations, memory
+ // is cheap nowadays and storing offset in such a way implies much
+ // more operations comparing to using bare floats.
+ struct Offset {
+ // Integer part of the offset.
+ short ix, iy;
+
+ // Float part of an offset, to get a real float value divide this
+ // value by 255.
+ unsigned char fx, fy;
+ };
+
+ // Compute coordinate lookup grid using a giver warp functor.
+ //
+ // width and height corresponds to a size of buffer which will
+ // be warped later.
+ template<typename WarpFunction>
+ void Compute(const CameraIntrinsics &intrinsics,
+ int width,
+ int height,
+ double overscan);
+
+ // This is a buffer which contains per-pixel offset of the
+ // pixels from input buffer to correspond the warping function.
+ Offset *offset_;
+
+ // Dimensions of the image this lookup grid processes.
+ int width_, height_;
+
+ // Overscan of the image being processed by this grid.
+ double overscan_;
+
+ // Number of threads which will be used for buffer istortion/undistortion.
+ int threads_;
+};
- /*!
- Invert camera intrinsics on the image point to get normalized coordinates.
+} // namespace internal
- This reverses the effect of lens distortion on a point which is in image
- coordinates to get normalized camera coordinates.
- */
- void InvertIntrinsics(double image_x, double image_y,
- double *normalized_x, double *normalized_y) const;
+class CameraIntrinsics {
+ public:
+ CameraIntrinsics();
+ CameraIntrinsics(const CameraIntrinsics &from);
+ virtual ~CameraIntrinsics() {}
- /*!
- Distort an image using the current camera instrinsics
+ virtual DistortionModelType GetDistortionModelType() const = 0;
- The distorted image is computed in \a dst using samples from \a src.
- both buffers should be \a width x \a height x \a channels sized.
+ int image_width() const { return image_width_; }
+ int image_height() const { return image_height_; }
- \note This is the reference implementation using floating point images.
- */
- void Distort(const float* src, float* dst,
- int width, int height, double overscan, int channels);
+ const Mat3 &K() const { return K_; }
- /*!
- Distort an image using the current camera instrinsics
+ double focal_length() const { return K_(0, 0); }
+ double focal_length_x() const { return K_(0, 0); }
+ double focal_length_y() const { return K_(1, 1); }
- The distorted image is computed in \a dst using samples from \a src.
- both buffers should be \a width x \a height x \a channels sized.
+ double principal_point_x() const { return K_(0, 2); }
+ double principal_point_y() const { return K_(1, 2); }
- \note This version is much faster.
- */
- void Distort(const unsigned char* src, unsigned char* dst,
- int width, int height, double overscan, int channels);
+ virtual int num_distortion_parameters() const = 0;
+ virtual double *distortion_parameters() = 0;
+ virtual const double *distortion_parameters() const = 0;
- /*!
- Undistort an image using the current camera instrinsics
+ // Set the image size in pixels.
+ // Image is the size of image camera intrinsics were calibrated with.
+ void SetImageSize(int width, int height);
- The undistorted image is computed in \a dst using samples from \a src.
- both buffers should be \a width x \a height x \a channels sized.
+ // Set the entire calibration matrix at once.
+ void SetK(const Mat3 new_k);
- \note This is the reference implementation using floating point images.
- */
- void Undistort(const float* src, float* dst,
- int width, int height, double overscan, int channels);
+ // Set both x and y focal length in pixels.
+ void SetFocalLength(double focal_x, double focal_y);
- /*!
- Undistort an image using the current camera instrinsics
+ // Set principal point in pixels.
+ void SetPrincipalPoint(double cx, double cy);
- The undistorted image is computed in \a dst using samples from \a src.
- both buffers should be \a width x \a height x \a channels sized.
+ // Set number of threads used for threaded buffer distortion/undistortion.
+ void SetThreads(int threads);
- \note This version is much faster.
- */
- void Undistort(const unsigned char* src, unsigned char* dst,
- int width, int height, double overscan, int channels);
+ // Convert image space coordinates to normalized.
+ void ImageSpaceToNormalized(double image_x,
+ double image_y,
+ double *normalized_x,
+ double *normalized_y) const;
+
+ // Convert normalized coordinates to image space.
+ void NormalizedToImageSpace(double normalized_x,
+ double normalized_y,
+ double *image_x,
+ double *image_y) const;
+
+ // Apply camera intrinsics to the normalized point to get image coordinates.
+ //
+ // This applies the lens distortion to a point which is in normalized
+ // camera coordinates (i.e. the principal point is at (0, 0)) to get image
+ // coordinates in pixels.
+ virtual void ApplyIntrinsics(double normalized_x,
+ double normalized_y,
+ double *image_x,
+ double *image_y) const = 0;
+
+ // Invert camera intrinsics on the image point to get normalized coordinates.
+ //
+ // This reverses the effect of lens distortion on a point which is in image
+ // coordinates to get normalized camera coordinates.
+ virtual void InvertIntrinsics(double image_x,
+ double image_y,
+ double *normalized_x,
+ double *normalized_y) const = 0;
+
+ // Distort an image using the current camera instrinsics
+ //
+ // The distorted image is computed in output_buffer using samples from
+ // input_buffer. Both buffers should be width x height x channels sized.
+ //
+ // Overscan is a percentage value of how much overcan the image have.
+ // For example overscal value of 0.2 means 20% of overscan in the
+ // buffers.
+ //
+ // Overscan is usually used in cases when one need to distort an image
+ // and don't have a barrel in the distorted buffer. For example, when
+ // one need to render properly distorted FullHD frame without barrel
+ // visible. For such cases renderers usually renders bigger images and
+ // crops them after the distortion.
+ //
+ // This method is templated to be able to distort byte and float buffers
+ // without having separate methods for this two types. So basically only
+ //
+ // But in fact PixelType might be any type for which multiplication by
+ // a scalar and addition are implemented. For example PixelType might be
+ // Vec3 as well.
+ template<typename PixelType>
+ void DistortBuffer(const PixelType *input_buffer,
+ int width,
+ int height,
+ double overscan,
+ int channels,
+ PixelType *output_buffer);
+
+ // Undistort an image using the current camera instrinsics
+ //
+ // The undistorted image is computed in output_buffer using samples from
+ // input_buffer. Both buffers should be width x height x channels sized.
+ //
+ // Overscan is a percentage value of how much overcan the image have.
+ // For example overscal value of 0.2 means 20% of overscan in the
+ // buffers.
+ //
+ // Overscan is usually used in cases when one need to distort an image
+ // and don't have a barrel in the distorted buffer. For example, when
+ // one need to render properly distorted FullHD frame without barrel
+ // visible. For such cases renderers usually renders bigger images and
+ // crops them after the distortion.
+ //
+ // This method is templated to be able to distort byte and float buffers
+ // without having separate methods for this two types. So basically only
+ //
+ // But in fact PixelType might be any type for which multiplication by
+ // a scalar and addition are implemented. For example PixelType might be
+ // Vec3 as well.
+ template<typename PixelType>
+ void UndistortBuffer(const PixelType *input_buffer,
+ int width,
+ int height,
+ double overscan,
+ int channels,
+ PixelType *output_buffer);
private:
- template<typename WarpFunction> void ComputeLookupGrid(struct Grid* grid,
- int width,
- int height,
- double overscan);
- void CheckUndistortLookupGrid(int width, int height, double overscan);
- void CheckDistortLookupGrid(int width, int height, double overscan);
- void FreeLookupGrid();
-
- // The traditional intrinsics matrix from x = K[R|t]X.
- Mat3 K_;
-
// This is the size of the image. This is necessary to, for example, handle
// the case of processing a scaled image.
int image_width_;
int image_height_;
+ // The traditional intrinsics matrix from x = K[R|t]X.
+ Mat3 K_;
+
+ // Coordinate lookup grids for distortion and undistortion.
+ internal::LookupWarpGrid distort_;
+ internal::LookupWarpGrid undistort_;
+
+ protected:
+ // Reset lookup grids after changing the distortion model.
+ void ResetLookupGrids();
+};
+
+class PolynomialCameraIntrinsics : public CameraIntrinsics {
+ public:
+ // This constants defines an offset of corresponding coefficients
+ // in the arameters_ array.
+ enum {
+ OFFSET_K1,
+ OFFSET_K2,
+ OFFSET_K3,
+ OFFSET_P1,
+ OFFSET_P2,
+
+ // This defines the size of array which we need to have in order
+ // to store all the coefficients.
+ NUM_PARAMETERS,
+ };
+
+ PolynomialCameraIntrinsics();
+ PolynomialCameraIntrinsics(const PolynomialCameraIntrinsics &from);
+
+ DistortionModelType GetDistortionModelType() const {
+ return DISTORTION_MODEL_POLYNOMIAL;
+ }
+
+ int num_distortion_parameters() const { return NUM_PARAMETERS; }
+ double *distortion_parameters() { return parameters_; };
+ const double *distortion_parameters() const { return parameters_; };
+
+ double k1() const { return parameters_[OFFSET_K1]; }
+ double k2() const { return parameters_[OFFSET_K2]; }
+ double k3() const { return parameters_[OFFSET_K3]; }
+ double p1() const { return parameters_[OFFSET_P1]; }
+ double p2() const { return parameters_[OFFSET_P2]; }
+
+ // Set radial distortion coeffcients.
+ void SetRadialDistortion(double k1, double k2, double k3);
+
+ // Set tangential distortion coeffcients.
+ void SetTangentialDistortion(double p1, double p2);
+
+ // Apply camera intrinsics to the normalized point to get image coordinates.
+ //
+ // This applies the lens distortion to a point which is in normalized
+ // camera coordinates (i.e. the principal point is at (0, 0)) to get image
+ // coordinates in pixels.
+ void ApplyIntrinsics(double normalized_x,
+ double normalized_y,
+ double *image_x,
+ double *image_y) const;
+
+ // Invert camera intrinsics on the image point to get normalized coordinates.
+ //
+ // This reverses the effect of lens distortion on a point which is in image
+ // coordinates to get normalized camera coordinates.
+ void InvertIntrinsics(double image_x,
+ double image_y,
+ double *normalized_x,
+ double *normalized_y) const;
+
+ private:
// OpenCV's distortion model with third order polynomial radial distortion
// terms and second order tangential distortion. The distortion is applied to
// the normalized coordinates before the focal length, which makes them
// independent of image size.
- double k1_, k2_, k3_, p1_, p2_;
+ double parameters_[NUM_PARAMETERS];
+};
- struct Grid *distort_;
- struct Grid *undistort_;
+class DivisionCameraIntrinsics : public CameraIntrinsics {
+ public:
+ // This constants defines an offset of corresponding coefficients
+ // in the arameters_ array.
+ enum {
+ OFFSET_K1,
+ OFFSET_K2,
+
+ // This defines the size of array which we need to have in order
+ // to store all the coefficients.
+ NUM_PARAMETERS,
+ };
+
+ DivisionCameraIntrinsics();
+ DivisionCameraIntrinsics(const DivisionCameraIntrinsics &from);
+
+ DistortionModelType GetDistortionModelType() const {
+ return DISTORTION_MODEL_DIVISION;
+ }
+
+ int num_distortion_parameters() const { return NUM_PARAMETERS; }
+ double *distortion_parameters() { return parameters_; };
+ const double *distortion_parameters() const { return parameters_; };
+
+ double k1() const { return parameters_[OFFSET_K1]; }
+ double k2() const { return parameters_[OFFSET_K2]; }
+
+ // Set radial distortion coeffcients.
+ void SetDistortion(double k1, double k2);
+
+ // Apply camera intrinsics to the normalized point to get image coordinates.
+ //
+ // This applies the lens distortion to a point which is in normalized
+ // camera coordinates (i.e. the principal point is at (0, 0)) to get image
+ // coordinates in pixels.
+ void ApplyIntrinsics(double normalized_x,
+ double normalized_y,
+ double *image_x,
+ double *image_y) const;
+
+ // Invert camera intrinsics on the image point to get normalized coordinates.
+ //
+ // This reverses the effect of lens distortion on a point which is in image
+ // coordinates to get normalized camera coordinates.
+ void InvertIntrinsics(double image_x,
+ double image_y,
+ double *normalized_x,
+ double *normalized_y) const;
- int threads_;
+ private:
+ // Double-parameter division distortion model.
+ double parameters_[NUM_PARAMETERS];
};
/// A human-readable representation of the camera intrinsic parameters.
std::ostream& operator <<(std::ostream &os,
const CameraIntrinsics &intrinsics);
-// Apply camera intrinsics to the normalized point to get image coordinates.
-// This applies the radial lens distortion to a point which is in normalized
-// camera coordinates (i.e. the principal point is at (0, 0)) to get image
-// coordinates in pixels. Templated for use with autodifferentiation.
-template <typename T>
-inline void ApplyRadialDistortionCameraIntrinsics(const T &focal_length_x,
- const T &focal_length_y,
- const T &principal_point_x,
- const T &principal_point_y,
- const T &k1,
- const T &k2,
- const T &k3,
- const T &p1,
- const T &p2,
- const T &normalized_x,
- const T &normalized_y,
- T *image_x,
- T *image_y) {
- T x = normalized_x;
- T y = normalized_y;
-
- // Apply distortion to the normalized points to get (xd, yd).
- T r2 = x*x + y*y;
- T r4 = r2 * r2;
- T r6 = r4 * r2;
- T r_coeff = (T(1) + k1*r2 + k2*r4 + k3*r6);
- T xd = x * r_coeff + T(2)*p1*x*y + p2*(r2 + T(2)*x*x);
- T yd = y * r_coeff + T(2)*p2*x*y + p1*(r2 + T(2)*y*y);
-
- // Apply focal length and principal point to get the final image coordinates.
- *image_x = focal_length_x * xd + principal_point_x;
- *image_y = focal_length_y * yd + principal_point_y;
-}
-
} // namespace libmv
+// Include implementation of all templated methods here,
+// so they're visible to the compiler.
+#include "libmv/simple_pipeline/camera_intrinsics_impl.h"
+
#endif // LIBMV_SIMPLE_PIPELINE_CAMERA_INTRINSICS_H_
diff --git a/extern/libmv/libmv/simple_pipeline/camera_intrinsics_impl.h b/extern/libmv/libmv/simple_pipeline/camera_intrinsics_impl.h
new file mode 100644
index 00000000000..66264bdbd93
--- /dev/null
+++ b/extern/libmv/libmv/simple_pipeline/camera_intrinsics_impl.h
@@ -0,0 +1,192 @@
+// Copyright (c) 2014 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+namespace libmv {
+
+namespace {
+
+// FIXME: C++ templates limitations makes thing complicated,
+// but maybe there is a simpler method.
+struct ApplyIntrinsicsFunction {
+ ApplyIntrinsicsFunction(const CameraIntrinsics &intrinsics,
+ double x,
+ double y,
+ double *warp_x,
+ double *warp_y) {
+ double normalized_x, normalized_y;
+ intrinsics.ImageSpaceToNormalized(x, y, &normalized_x, &normalized_y);
+ intrinsics.ApplyIntrinsics(normalized_x, normalized_y, warp_x, warp_y);
+ }
+};
+
+struct InvertIntrinsicsFunction {
+ InvertIntrinsicsFunction(const CameraIntrinsics &intrinsics,
+ double x,
+ double y,
+ double *warp_x,
+ double *warp_y) {
+ double normalized_x, normalized_y;
+ intrinsics.InvertIntrinsics(x, y, &normalized_x, &normalized_y);
+ intrinsics.NormalizedToImageSpace(normalized_x, normalized_y, warp_x, warp_y);
+ }
+};
+
+} // namespace
+
+namespace internal {
+
+// TODO(MatthiasF): downsample lookup
+template<typename WarpFunction>
+void LookupWarpGrid::Compute(const CameraIntrinsics &intrinsics,
+ int width,
+ int height,
+ double overscan) {
+ double w = (double) width / (1.0 + overscan);
+ double h = (double) height / (1.0 + overscan);
+ double aspx = (double) w / intrinsics.image_width();
+ double aspy = (double) h / intrinsics.image_height();
+#if defined(_OPENMP)
+# pragma omp parallel for schedule(dynamic) num_threads(threads_) \
+ if (threads_ > 1 && height > 100)
+#endif
+ for (int y = 0; y < height; y++) {
+ for (int x = 0; x < width; x++) {
+ double src_x = (x - 0.5 * overscan * w) / aspx,
+ src_y = (y - 0.5 * overscan * h) / aspy;
+ double warp_x, warp_y;
+ WarpFunction(intrinsics, src_x, src_y, &warp_x, &warp_y);
+ warp_x = warp_x * aspx + 0.5 * overscan * w;
+ warp_y = warp_y * aspy + 0.5 * overscan * h;
+ int ix = int(warp_x), iy = int(warp_y);
+ int fx = round((warp_x - ix) * 256), fy = round((warp_y - iy) * 256);
+ if (fx == 256) { fx = 0; ix++; } // NOLINT
+ if (fy == 256) { fy = 0; iy++; } // NOLINT
+ // Use nearest border pixel
+ if (ix < 0) { ix = 0, fx = 0; } // NOLINT
+ if (iy < 0) { iy = 0, fy = 0; } // NOLINT
+ if (ix >= width - 2) ix = width - 2;
+ if (iy >= height - 2) iy = height - 2;
+
+ Offset offset = { (short) (ix - x),
+ (short) (iy - y),
+ (unsigned char) fx,
+ (unsigned char) fy };
+ offset_[y * width + x] = offset;
+ }
+ }
+}
+
+template<typename WarpFunction>
+void LookupWarpGrid::Update(const CameraIntrinsics &intrinsics,
+ int width,
+ int height,
+ double overscan) {
+ if (width_ != width ||
+ height_ != height ||
+ overscan_ != overscan) {
+ Reset();
+ }
+
+ if (offset_ == NULL) {
+ offset_ = new Offset[width_ * height_];
+ Compute<WarpFunction>(intrinsics,
+ width,
+ height,
+ overscan);
+ }
+
+ width_ = width;
+ height_ = height;
+ overscan_ = overscan;
+}
+
+// TODO(MatthiasF): cubic B-Spline image sampling, bilinear lookup
+template<typename PixelType>
+void LookupWarpGrid::Apply(const PixelType *input_buffer,
+ int width,
+ int height,
+ int channels,
+ PixelType *output_buffer) {
+#if defined(_OPENMP)
+# pragma omp parallel for schedule(dynamic) num_threads(threads_) \
+ if (threads_ > 1 && height > 100)
+#endif
+ for (int y = 0; y < height; y++) {
+ for (int x = 0; x < width; x++) {
+ Offset offset = offset_[y * width + x];
+ const int pixel_index = ((y + offset.iy) * width +
+ (x + offset.ix)) * channels;
+ const PixelType *s = &input_buffer[pixel_index];
+ for (int i = 0; i < channels; i++) {
+ output_buffer[(y * width + x) * channels + i] =
+ ((s[i] * (256 - offset.fx) +
+ s[channels + i] * offset.fx) * (256 - offset.fy) +
+ (s[width * channels + i] * (256 - offset.fx) +
+ s[width * channels + channels + i] * offset.fx) * offset.fy)
+ / (256 * 256);
+ }
+ }
+ }
+}
+
+} // namespace internal
+
+template<typename PixelType>
+void CameraIntrinsics::DistortBuffer(const PixelType *input_buffer,
+ int width,
+ int height,
+ double overscan,
+ int channels,
+ PixelType *output_buffer) {
+ assert(channels >= 1);
+ assert(channels <= 4);
+ distort_.Update<InvertIntrinsicsFunction>(*this,
+ width,
+ height,
+ overscan);
+ distort_.Apply<PixelType>(input_buffer,
+ width,
+ height,
+ channels,
+ output_buffer);
+}
+
+template<typename PixelType>
+void CameraIntrinsics::UndistortBuffer(const PixelType *input_buffer,
+ int width,
+ int height,
+ double overscan,
+ int channels,
+ PixelType *output_buffer) {
+ assert(channels >= 1);
+ assert(channels <= 4);
+ undistort_.Update<ApplyIntrinsicsFunction>(*this,
+ width,
+ height,
+ overscan);
+
+ undistort_.Apply<PixelType>(input_buffer,
+ width,
+ height,
+ channels,
+ output_buffer);
+}
+
+} // namespace libmv
diff --git a/extern/libmv/libmv/simple_pipeline/detect.cc b/extern/libmv/libmv/simple_pipeline/detect.cc
index 4039c83210c..46599a4c49e 100644
--- a/extern/libmv/libmv/simple_pipeline/detect.cc
+++ b/extern/libmv/libmv/simple_pipeline/detect.cc
@@ -45,6 +45,14 @@ namespace libmv {
namespace {
+// Default value for FAST minimal trackness in the DetectOptions structure.
+// TODO(sergey): Think of a better default value here.
+int kDefaultFastMinTrackness = 128;
+
+// Default value for Harris threshold in the DetectOptions structure.
+// TODO(sergey): Think of a better default value here.
+double kDefaultHarrisThreshold = 1e-5;
+
class FeatureComparison {
public:
bool operator() (const Feature &left, const Feature &right) const {
@@ -134,11 +142,10 @@ void DetectFAST(const FloatImage &grayscale_image,
if (num_features) {
vector<Feature> all_features;
for (int i = 0; i < num_features; ++i) {
- Feature new_feature = {(float)nonmax[i].x + margin,
- (float)nonmax[i].y + margin,
- (float)scores[i],
- 0};
- all_features.push_back(new_feature);
+ all_features.push_back(Feature((float)nonmax[i].x + margin,
+ (float)nonmax[i].y + margin,
+ (float)scores[i],
+ 9.0));
}
FilterFeaturesByDistance(all_features, min_distance, detected_features);
}
@@ -233,15 +240,24 @@ void DetectMORAVEC(const FloatImage &grayscale_image,
int min = 255, total = 0;
for (; min > 0; min--) {
int h = histogram[min];
- if (total+h > count) break;
+ if (total + h > count) {
+ break;
+ }
total += h;
}
- for (int y = 16; y < height-16; y++) {
- for (int x = 16; x < width-16; x++) {
- int s = scores[y*width+x];
- Feature f = { (float)x+8.0f, (float)y+8.0f, (float)s, 16 };
+ for (int y = 16; y < height - 16; y++) {
+ for (int x = 16; x < width - 16; x++) {
+ int s = scores[y * width + x];
if (s > min) {
- detected_features->push_back(f);
+ // Currently SAD works with the patterns of 16x16 pixels.
+ //
+ // Score calculation above uses top left corner of the
+ // patch as the origin, here we need to convert this value
+ // to a pattrn center by adding 8 pixels.
+ detected_features->push_back(Feature((float) x + 8.0f,
+ (float) y + 8.0f,
+ (float) s,
+ 16.0f));
}
}
}
@@ -288,8 +304,10 @@ void DetectHarris(const FloatImage &grayscale_image,
double traceA = A.trace();
double harris_function = detA - alpha * traceA * traceA;
if (harris_function > threshold) {
- Feature new_feature = {(float)x, (float)y, (float)harris_function, 0.0f};
- all_features.push_back(new_feature);
+ all_features.push_back(Feature((float) x,
+ (float) y,
+ (float) harris_function,
+ 5.0f));
}
}
}
@@ -303,10 +321,10 @@ DetectOptions::DetectOptions()
: type(DetectOptions::HARRIS),
margin(0),
min_distance(120),
- fast_min_trackness(128),
+ fast_min_trackness(kDefaultFastMinTrackness),
moravec_max_count(0),
moravec_pattern(NULL),
- harris_threshold(0.0) {}
+ harris_threshold(kDefaultHarrisThreshold) {}
void Detect(const FloatImage &image,
const DetectOptions &options,
@@ -332,4 +350,12 @@ void Detect(const FloatImage &image,
}
}
+std::ostream& operator <<(std::ostream &os,
+ const Feature &feature) {
+ os << "x: " << feature.x << ", y: " << feature.y;
+ os << ", score: " << feature.score;
+ os << ", size: " << feature.size;
+ return os;
+}
+
} // namespace libmv
diff --git a/extern/libmv/libmv/simple_pipeline/detect.h b/extern/libmv/libmv/simple_pipeline/detect.h
index 2c782294d1b..1035287bcf2 100644
--- a/extern/libmv/libmv/simple_pipeline/detect.h
+++ b/extern/libmv/libmv/simple_pipeline/detect.h
@@ -25,6 +25,7 @@
#ifndef LIBMV_SIMPLE_PIPELINE_DETECT_H_
#define LIBMV_SIMPLE_PIPELINE_DETECT_H_
+#include <iostream>
#include <vector>
#include "libmv/base/vector.h"
@@ -34,22 +35,27 @@ namespace libmv {
typedef unsigned char ubyte;
-/*!
- A Feature is the 2D location of a detected feature in an image.
-
- \a x, \a y is the position of the feature in pixels from the top left corner.
- \a score is an estimate of how well the feature will be tracked.
- \a size can be used as an initial pattern size to track the feature.
-
- \sa Detect
-*/
+// A Feature is the 2D location of a detected feature in an image.
struct Feature {
- /// Position in pixels (from top-left corner)
- /// \note libmv might eventually support subpixel precision.
+ Feature(float x, float y) : x(x), y(y) {}
+ Feature(float x, float y, float score, float size)
+ : x(x), y(y), score(score), size(size) {}
+
+ // Position of the feature in pixels from top-left corner.
+ // Note: Libmv detector might eventually support subpixel precision.
float x, y;
- /// Trackness of the feature
+
+ // An estimate of how well the feature will be tracked.
+ //
+ // Absolute values totally depends on particular detector type
+ // used for detection. It's only guaranteed that features with
+ // higher score from the same Detect() result will be tracked better.
float score;
- /// Size of the feature in pixels
+
+ // An approximate feature size in pixels.
+ //
+ // If the feature is approximately a 5x5 square region, then size will be 5.
+ // It can be used as an initial pattern size to track the feature.
float size;
};
@@ -99,6 +105,9 @@ void Detect(const FloatImage &image,
const DetectOptions &options,
vector<Feature> *detected_features);
+std::ostream& operator <<(std::ostream &os,
+ const Feature &feature);
+
} // namespace libmv
#endif
diff --git a/extern/libmv/libmv/simple_pipeline/distortion_models.cc b/extern/libmv/libmv/simple_pipeline/distortion_models.cc
new file mode 100644
index 00000000000..9b6dca2678a
--- /dev/null
+++ b/extern/libmv/libmv/simple_pipeline/distortion_models.cc
@@ -0,0 +1,197 @@
+// Copyright (c) 2014 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#include "libmv/simple_pipeline/distortion_models.h"
+#include "libmv/numeric/levenberg_marquardt.h"
+
+namespace libmv {
+
+namespace {
+
+struct InvertPolynomialIntrinsicsCostFunction {
+ public:
+ typedef Vec2 FMatrixType;
+ typedef Vec2 XMatrixType;
+
+ InvertPolynomialIntrinsicsCostFunction(const double focal_length_x,
+ const double focal_length_y,
+ const double principal_point_x,
+ const double principal_point_y,
+ const double k1,
+ const double k2,
+ const double k3,
+ const double p1,
+ const double p2,
+ const double image_x,
+ const double image_y)
+ : focal_length_x_(focal_length_x),
+ focal_length_y_(focal_length_y),
+ principal_point_x_(principal_point_x),
+ principal_point_y_(principal_point_y),
+ k1_(k1), k2_(k2), k3_(k3),
+ p1_(p1), p2_(p2),
+ x_(image_x), y_(image_y) {}
+
+ Vec2 operator()(const Vec2 &u) const {
+ double xx, yy;
+
+ ApplyPolynomialDistortionModel(focal_length_x_,
+ focal_length_y_,
+ principal_point_x_,
+ principal_point_y_,
+ k1_, k2_, k3_,
+ p1_, p2_,
+ u(0), u(1),
+ &xx, &yy);
+
+ Vec2 fx;
+ fx << (xx - x_), (yy - y_);
+ return fx;
+ }
+ double focal_length_x_;
+ double focal_length_y_;
+ double principal_point_x_;
+ double principal_point_y_;
+ double k1_, k2_, k3_;
+ double p1_, p2_;
+ double x_, y_;
+};
+
+struct InvertDivisionIntrinsicsCostFunction {
+ public:
+ typedef Vec2 FMatrixType;
+ typedef Vec2 XMatrixType;
+
+ InvertDivisionIntrinsicsCostFunction(const double focal_length_x,
+ const double focal_length_y,
+ const double principal_point_x,
+ const double principal_point_y,
+ const double k1,
+ const double k2,
+ const double image_x,
+ const double image_y)
+ : focal_length_x_(focal_length_x),
+ focal_length_y_(focal_length_y),
+ principal_point_x_(principal_point_x),
+ principal_point_y_(principal_point_y),
+ k1_(k1), k2_(k2),
+ x_(image_x), y_(image_y) {}
+
+ Vec2 operator()(const Vec2 &u) const {
+ double xx, yy;
+
+ ApplyDivisionDistortionModel(focal_length_x_,
+ focal_length_y_,
+ principal_point_x_,
+ principal_point_y_,
+ k1_, k2_,
+ u(0), u(1),
+ &xx, &yy);
+
+ Vec2 fx;
+ fx << (xx - x_), (yy - y_);
+ return fx;
+ }
+ double focal_length_x_;
+ double focal_length_y_;
+ double principal_point_x_;
+ double principal_point_y_;
+ double k1_, k2_;
+ double x_, y_;
+};
+
+} // namespace
+
+void InvertPolynomialDistortionModel(const double focal_length_x,
+ const double focal_length_y,
+ const double principal_point_x,
+ const double principal_point_y,
+ const double k1,
+ const double k2,
+ const double k3,
+ const double p1,
+ const double p2,
+ const double image_x,
+ const double image_y,
+ double *normalized_x,
+ double *normalized_y) {
+ // Compute the initial guess. For a camera with no distortion, this will also
+ // be the final answer; the LM iteration will terminate immediately.
+ Vec2 normalized;
+ normalized(0) = (image_x - principal_point_x) / focal_length_x;
+ normalized(1) = (image_y - principal_point_y) / focal_length_y;
+
+ typedef LevenbergMarquardt<InvertPolynomialIntrinsicsCostFunction> Solver;
+
+ InvertPolynomialIntrinsicsCostFunction intrinsics_cost(focal_length_x,
+ focal_length_y,
+ principal_point_x,
+ principal_point_y,
+ k1, k2, k3,
+ p1, p2,
+ image_x, image_y);
+ Solver::SolverParameters params;
+ Solver solver(intrinsics_cost);
+
+ /*Solver::Results results =*/ solver.minimize(params, &normalized);
+
+ // TODO(keir): Better error handling.
+
+ *normalized_x = normalized(0);
+ *normalized_y = normalized(1);
+}
+
+void InvertDivisionDistortionModel(const double focal_length_x,
+ const double focal_length_y,
+ const double principal_point_x,
+ const double principal_point_y,
+ const double k1,
+ const double k2,
+ const double image_x,
+ const double image_y,
+ double *normalized_x,
+ double *normalized_y) {
+ // Compute the initial guess. For a camera with no distortion, this will also
+ // be the final answer; the LM iteration will terminate immediately.
+ Vec2 normalized;
+ normalized(0) = (image_x - principal_point_x) / focal_length_x;
+ normalized(1) = (image_y - principal_point_y) / focal_length_y;
+
+ // TODO(sergey): Use Ceres minimizer instead.
+ typedef LevenbergMarquardt<InvertDivisionIntrinsicsCostFunction> Solver;
+
+ InvertDivisionIntrinsicsCostFunction intrinsics_cost(focal_length_x,
+ focal_length_y,
+ principal_point_x,
+ principal_point_y,
+ k1, k2,
+ image_x, image_y);
+ Solver::SolverParameters params;
+ Solver solver(intrinsics_cost);
+
+ /*Solver::Results results =*/ solver.minimize(params, &normalized);
+
+ // TODO(keir): Better error handling.
+
+ *normalized_x = normalized(0);
+ *normalized_y = normalized(1);
+}
+
+} // namespace libmv
diff --git a/extern/libmv/libmv/simple_pipeline/distortion_models.h b/extern/libmv/libmv/simple_pipeline/distortion_models.h
new file mode 100644
index 00000000000..4f8e2295a0e
--- /dev/null
+++ b/extern/libmv/libmv/simple_pipeline/distortion_models.h
@@ -0,0 +1,131 @@
+// Copyright (c) 2014 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_SIMPLE_PIPELINE_DISTORTION_MODELS_H_
+#define LIBMV_SIMPLE_PIPELINE_DISTORTION_MODELS_H_
+
+namespace libmv {
+
+enum DistortionModelType {
+ DISTORTION_MODEL_POLYNOMIAL,
+ DISTORTION_MODEL_DIVISION
+};
+
+// Invert camera intrinsics on the image point to get normalized coordinates.
+// This inverts the radial lens distortion to a point which is in image pixel
+// coordinates to get normalized coordinates.
+void InvertPolynomialDistortionModel(const double focal_length_x,
+ const double focal_length_y,
+ const double principal_point_x,
+ const double principal_point_y,
+ const double k1,
+ const double k2,
+ const double k3,
+ const double p1,
+ const double p2,
+ const double image_x,
+ const double image_y,
+ double *normalized_x,
+ double *normalized_y);
+
+// Apply camera intrinsics to the normalized point to get image coordinates.
+// This applies the radial lens distortion to a point which is in normalized
+// camera coordinates (i.e. the principal point is at (0, 0)) to get image
+// coordinates in pixels. Templated for use with autodifferentiation.
+template <typename T>
+inline void ApplyPolynomialDistortionModel(const T &focal_length_x,
+ const T &focal_length_y,
+ const T &principal_point_x,
+ const T &principal_point_y,
+ const T &k1,
+ const T &k2,
+ const T &k3,
+ const T &p1,
+ const T &p2,
+ const T &normalized_x,
+ const T &normalized_y,
+ T *image_x,
+ T *image_y) {
+ T x = normalized_x;
+ T y = normalized_y;
+
+ // Apply distortion to the normalized points to get (xd, yd).
+ T r2 = x*x + y*y;
+ T r4 = r2 * r2;
+ T r6 = r4 * r2;
+ T r_coeff = (T(1) + k1*r2 + k2*r4 + k3*r6);
+ T xd = x * r_coeff + T(2)*p1*x*y + p2*(r2 + T(2)*x*x);
+ T yd = y * r_coeff + T(2)*p2*x*y + p1*(r2 + T(2)*y*y);
+
+ // Apply focal length and principal point to get the final image coordinates.
+ *image_x = focal_length_x * xd + principal_point_x;
+ *image_y = focal_length_y * yd + principal_point_y;
+}
+
+// Invert camera intrinsics on the image point to get normalized coordinates.
+// This inverts the radial lens distortion to a point which is in image pixel
+// coordinates to get normalized coordinates.
+//
+// Uses division distortion model.
+void InvertDivisionDistortionModel(const double focal_length_x,
+ const double focal_length_y,
+ const double principal_point_x,
+ const double principal_point_y,
+ const double k1,
+ const double k2,
+ const double image_x,
+ const double image_y,
+ double *normalized_x,
+ double *normalized_y);
+
+// Apply camera intrinsics to the normalized point to get image coordinates.
+// This applies the radial lens distortion to a point which is in normalized
+// camera coordinates (i.e. the principal point is at (0, 0)) to get image
+// coordinates in pixels. Templated for use with autodifferentiation.
+//
+// Uses division distortion model.
+template <typename T>
+inline void ApplyDivisionDistortionModel(const T &focal_length_x,
+ const T &focal_length_y,
+ const T &principal_point_x,
+ const T &principal_point_y,
+ const T &k1,
+ const T &k2,
+ const T &normalized_x,
+ const T &normalized_y,
+ T *image_x,
+ T *image_y) {
+
+ T x = normalized_x;
+ T y = normalized_y;
+ T r2 = x*x + y*y;
+ T r4 = r2 * r2;
+
+ T xd = x / (T(1) + k1 * r2 + k2 * r4);
+ T yd = y / (T(1) + k1 * r2 + k2 * r4);
+
+ // Apply focal length and principal point to get the final image coordinates.
+ *image_x = focal_length_x * xd + principal_point_x;
+ *image_y = focal_length_y * yd + principal_point_y;
+}
+
+} // namespace libmv
+
+#endif // LIBMV_SIMPLE_PIPELINE_DISTORTION_MODELS_H_
diff --git a/extern/libmv/libmv/simple_pipeline/keyframe_selection.cc b/extern/libmv/libmv/simple_pipeline/keyframe_selection.cc
index 64504e7af9a..7b90c28bbca 100644
--- a/extern/libmv/libmv/simple_pipeline/keyframe_selection.cc
+++ b/extern/libmv/libmv/simple_pipeline/keyframe_selection.cc
@@ -33,22 +33,6 @@
namespace libmv {
namespace {
-Vec2 NorrmalizedToPixelSpace(const Vec2 &vec,
- const CameraIntrinsics &intrinsics) {
- Vec2 result;
-
- double focal_length_x = intrinsics.focal_length_x();
- double focal_length_y = intrinsics.focal_length_y();
-
- double principal_point_x = intrinsics.principal_point_x();
- double principal_point_y = intrinsics.principal_point_y();
-
- result(0) = vec(0) * focal_length_x + principal_point_x;
- result(1) = vec(1) * focal_length_y + principal_point_y;
-
- return result;
-}
-
Mat3 IntrinsicsNormalizationMatrix(const CameraIntrinsics &intrinsics) {
Mat3 T = Mat3::Identity(), S = Mat3::Identity();
@@ -151,7 +135,7 @@ void filterZeroWeightMarkersFromTracks(const Tracks &tracks,
} // namespace
void SelectKeyframesBasedOnGRICAndVariance(const Tracks &_tracks,
- CameraIntrinsics &intrinsics,
+ const CameraIntrinsics &intrinsics,
vector<int> &keyframes) {
// Mirza Tahir Ahmed, Matthew N. Dailey
// Robust key frame extraction for 3D reconstruction from video streams
@@ -256,10 +240,13 @@ void SelectKeyframesBasedOnGRICAndVariance(const Tracks &_tracks,
H_e.resize(x1.cols());
F_e.resize(x1.cols());
for (int i = 0; i < x1.cols(); i++) {
- Vec2 current_x1 =
- NorrmalizedToPixelSpace(Vec2(x1(0, i), x1(1, i)), intrinsics);
- Vec2 current_x2 =
- NorrmalizedToPixelSpace(Vec2(x2(0, i), x2(1, i)), intrinsics);
+ Vec2 current_x1, current_x2;
+
+ intrinsics.NormalizedToImageSpace(x1(0, i), x1(1, i),
+ &current_x1(0), &current_x1(1));
+
+ intrinsics.NormalizedToImageSpace(x2(0, i), x2(1, i),
+ &current_x2(0), &current_x2(1));
H_e(i) = SymmetricGeometricDistance(H, current_x1, current_x2);
F_e(i) = SymmetricEpipolarDistance(F, current_x1, current_x2);
@@ -378,7 +365,7 @@ void SelectKeyframesBasedOnGRICAndVariance(const Tracks &_tracks,
success_intersects_factor_best = success_intersects_factor;
Tracks two_frames_tracks(tracked_markers);
- CameraIntrinsics empty_intrinsics;
+ PolynomialCameraIntrinsics empty_intrinsics;
BundleEvaluation evaluation;
evaluation.evaluate_jacobian = true;
diff --git a/extern/libmv/libmv/simple_pipeline/keyframe_selection.h b/extern/libmv/libmv/simple_pipeline/keyframe_selection.h
index da131010d35..aa3eeaf193d 100644
--- a/extern/libmv/libmv/simple_pipeline/keyframe_selection.h
+++ b/extern/libmv/libmv/simple_pipeline/keyframe_selection.h
@@ -43,9 +43,10 @@ namespace libmv {
// \param intrinsics is camera intrinsics
// \param keyframes will contain all images number which are considered
// good to be used for reconstruction
-void SelectKeyframesBasedOnGRICAndVariance(const Tracks &tracks,
- CameraIntrinsics &intrinsics,
- vector<int> &keyframes);
+void SelectKeyframesBasedOnGRICAndVariance(
+ const Tracks &tracks,
+ const CameraIntrinsics &intrinsics,
+ vector<int> &keyframes);
} // namespace libmv
diff --git a/release/scripts/startup/bl_ui/space_clip.py b/release/scripts/startup/bl_ui/space_clip.py
index 76cb666c342..d638fad8d6a 100644
--- a/release/scripts/startup/bl_ui/space_clip.py
+++ b/release/scripts/startup/bl_ui/space_clip.py
@@ -769,11 +769,19 @@ class CLIP_PT_tracking_lens(Panel):
sub.prop(clip.tracking.camera, "focal_length_pixels")
sub.prop(clip.tracking.camera, "units", text="")
- col = layout.column(align=True)
+ col = layout.column()
col.label(text="Lens Distortion:")
- col.prop(clip.tracking.camera, "k1")
- col.prop(clip.tracking.camera, "k2")
- col.prop(clip.tracking.camera, "k3")
+ camera = clip.tracking.camera
+ col.prop(camera, "distortion_model", text="")
+ if camera.distortion_model == 'POLYNOMIAL':
+ col = layout.column(align=True)
+ col.prop(camera, "k1")
+ col.prop(camera, "k2")
+ col.prop(camera, "k3")
+ elif camera.distortion_model == 'DIVISION':
+ col = layout.column(align=True)
+ col.prop(camera, "division_k1")
+ col.prop(camera, "division_k2")
class CLIP_PT_display(CLIP_PT_clip_view_panel, Panel):
diff --git a/source/blender/blenkernel/BKE_tracking.h b/source/blender/blenkernel/BKE_tracking.h
index ecf6b789227..6d155ba37de 100644
--- a/source/blender/blenkernel/BKE_tracking.h
+++ b/source/blender/blenkernel/BKE_tracking.h
@@ -175,7 +175,8 @@ void BKE_tracking_camera_get_reconstructed_interpolate(struct MovieTracking *tra
int framenr, float mat[4][4]);
/* **** Distortion/Undistortion **** */
-struct MovieDistortion *BKE_tracking_distortion_new(void);
+struct MovieDistortion *BKE_tracking_distortion_new(struct MovieTracking *tracking,
+ int calibration_width, int calibration_height);
void BKE_tracking_distortion_update(struct MovieDistortion *distortion, struct MovieTracking *tracking,
int calibration_width, int calibration_height);
void BKE_tracking_distortion_set_threads(struct MovieDistortion *distortion, int threads);
diff --git a/source/blender/blenkernel/intern/movieclip.c b/source/blender/blenkernel/intern/movieclip.c
index de4846e1772..5012c08d95d 100644
--- a/source/blender/blenkernel/intern/movieclip.c
+++ b/source/blender/blenkernel/intern/movieclip.c
@@ -335,7 +335,9 @@ typedef struct MovieClipCache {
/* cache for undistorted shot */
float principal[2];
- float k1, k2, k3;
+ float polynomial_k1, polynomial_k2, polynomial_k3;
+ float division_k1, division_k2;
+ short distortion_model;
bool undistortion_used;
int proxy;
@@ -732,11 +734,21 @@ static bool check_undistortion_cache_flags(MovieClip *clip)
MovieTrackingCamera *camera = &clip->tracking.camera;
/* check for distortion model changes */
- if (!equals_v2v2(camera->principal, cache->postprocessed.principal))
+ if (!equals_v2v2(camera->principal, cache->postprocessed.principal)) {
return false;
+ }
+
+ if (camera->distortion_model != cache->postprocessed.distortion_model) {
+ return false;
+ }
- if (!equals_v3v3(&camera->k1, &cache->postprocessed.k1))
+ if (!equals_v3v3(&camera->k1, &cache->postprocessed.polynomial_k1)) {
return false;
+ }
+
+ if (!equals_v2v2(&camera->division_k1, &cache->postprocessed.division_k1)) {
+ return false;
+ }
return true;
}
@@ -823,8 +835,10 @@ static void put_postprocessed_frame_to_cache(MovieClip *clip, MovieClipUser *use
}
if (need_undistortion_postprocess(user)) {
+ cache->postprocessed.distortion_model = camera->distortion_model;
copy_v2_v2(cache->postprocessed.principal, camera->principal);
- copy_v3_v3(&cache->postprocessed.k1, &camera->k1);
+ copy_v3_v3(&cache->postprocessed.polynomial_k1, &camera->k1);
+ copy_v2_v2(&cache->postprocessed.division_k1, &camera->division_k1);
cache->postprocessed.undistortion_used = true;
}
else {
diff --git a/source/blender/blenkernel/intern/tracking.c b/source/blender/blenkernel/intern/tracking.c
index 8434fde3005..ca0b52bd7c2 100644
--- a/source/blender/blenkernel/intern/tracking.c
+++ b/source/blender/blenkernel/intern/tracking.c
@@ -1733,32 +1733,19 @@ void BKE_tracking_camera_get_reconstructed_interpolate(MovieTracking *tracking,
/*********************** Distortion/Undistortion *************************/
-static void cameraIntrinscisOptionsFromTracking(libmv_CameraIntrinsicsOptions *camera_intrinsics_options,
- MovieTracking *tracking, int calibration_width, int calibration_height)
-{
- MovieTrackingCamera *camera = &tracking->camera;
- float aspy = 1.0f / tracking->camera.pixel_aspect;
-
- camera_intrinsics_options->focal_length = camera->focal;
-
- camera_intrinsics_options->principal_point_x = camera->principal[0];
- camera_intrinsics_options->principal_point_y = camera->principal[1] * aspy;
-
- camera_intrinsics_options->k1 = camera->k1;
- camera_intrinsics_options->k2 = camera->k2;
- camera_intrinsics_options->k3 = camera->k3;
-
- camera_intrinsics_options->image_width = calibration_width;
- camera_intrinsics_options->image_height = (int) (calibration_height * aspy);
-}
-
-MovieDistortion *BKE_tracking_distortion_new(void)
+MovieDistortion *BKE_tracking_distortion_new(MovieTracking *tracking,
+ int calibration_width, int calibration_height)
{
MovieDistortion *distortion;
+ libmv_CameraIntrinsicsOptions camera_intrinsics_options;
- distortion = MEM_callocN(sizeof(MovieDistortion), "BKE_tracking_distortion_create");
+ tracking_cameraIntrinscisOptionsFromTracking(tracking,
+ calibration_width,
+ calibration_height,
+ &camera_intrinsics_options);
- distortion->intrinsics = libmv_cameraIntrinsicsNewEmpty();
+ distortion = MEM_callocN(sizeof(MovieDistortion), "BKE_tracking_distortion_create");
+ distortion->intrinsics = libmv_cameraIntrinsicsNew(&camera_intrinsics_options);
return distortion;
}
@@ -1768,8 +1755,10 @@ void BKE_tracking_distortion_update(MovieDistortion *distortion, MovieTracking *
{
libmv_CameraIntrinsicsOptions camera_intrinsics_options;
- cameraIntrinscisOptionsFromTracking(&camera_intrinsics_options, tracking,
- calibration_width, calibration_height);
+ tracking_cameraIntrinscisOptionsFromTracking(tracking,
+ calibration_width,
+ calibration_height,
+ &camera_intrinsics_options);
libmv_cameraIntrinsicsUpdate(&camera_intrinsics_options, distortion->intrinsics);
}
@@ -1845,7 +1834,9 @@ void BKE_tracking_distort_v2(MovieTracking *tracking, const float co[2], float r
double x, y;
float aspy = 1.0f / tracking->camera.pixel_aspect;
- cameraIntrinscisOptionsFromTracking(&camera_intrinsics_options, tracking, 0, 0);
+ tracking_cameraIntrinscisOptionsFromTracking(tracking,
+ 0, 0,
+ &camera_intrinsics_options);
/* normalize coords */
x = (co[0] - camera->principal[0]) / camera->focal;
@@ -1866,7 +1857,9 @@ void BKE_tracking_undistort_v2(MovieTracking *tracking, const float co[2], float
double x = co[0], y = co[1];
float aspy = 1.0f / tracking->camera.pixel_aspect;
- cameraIntrinscisOptionsFromTracking(&camera_intrinsics_options, tracking, 0, 0);
+ tracking_cameraIntrinscisOptionsFromTracking(tracking,
+ 0, 0,
+ &camera_intrinsics_options);
libmv_cameraIntrinsicsInvert(&camera_intrinsics_options, x, y, &x, &y);
@@ -1879,8 +1872,9 @@ ImBuf *BKE_tracking_undistort_frame(MovieTracking *tracking, ImBuf *ibuf, int ca
{
MovieTrackingCamera *camera = &tracking->camera;
- if (camera->intrinsics == NULL)
- camera->intrinsics = BKE_tracking_distortion_new();
+ if (camera->intrinsics == NULL) {
+ camera->intrinsics = BKE_tracking_distortion_new(tracking, calibration_width, calibration_height);
+ }
return BKE_tracking_distortion_exec(camera->intrinsics, tracking, ibuf, calibration_width,
calibration_height, overscan, true);
@@ -1891,8 +1885,9 @@ ImBuf *BKE_tracking_distort_frame(MovieTracking *tracking, ImBuf *ibuf, int cali
{
MovieTrackingCamera *camera = &tracking->camera;
- if (camera->intrinsics == NULL)
- camera->intrinsics = BKE_tracking_distortion_new();
+ if (camera->intrinsics == NULL) {
+ camera->intrinsics = BKE_tracking_distortion_new(tracking, calibration_width, calibration_height);
+ }
return BKE_tracking_distortion_exec(camera->intrinsics, tracking, ibuf, calibration_width,
calibration_height, overscan, false);
diff --git a/source/blender/blenkernel/intern/tracking_solver.c b/source/blender/blenkernel/intern/tracking_solver.c
index 44a4c11ec92..9ab1643aab1 100644
--- a/source/blender/blenkernel/intern/tracking_solver.c
+++ b/source/blender/blenkernel/intern/tracking_solver.c
@@ -66,11 +66,7 @@ typedef struct MovieReconstructContext {
bool is_camera;
short motion_flag;
- float focal_length;
- float principal_point[2];
- float k1, k2, k3;
-
- int width, height;
+ libmv_CameraIntrinsicsOptions camera_intrinsics_options;
float reprojection_error;
@@ -134,22 +130,11 @@ static void reconstruct_retrieve_libmv_intrinsics(MovieReconstructContext *conte
struct libmv_Reconstruction *libmv_reconstruction = context->reconstruction;
struct libmv_CameraIntrinsics *libmv_intrinsics = libmv_reconstructionExtractIntrinsics(libmv_reconstruction);
- float aspy = 1.0f / tracking->camera.pixel_aspect;
-
- double focal_length, principal_x, principal_y, k1, k2, k3;
- int width, height;
-
- libmv_cameraIntrinsicsExtract(libmv_intrinsics, &focal_length, &principal_x, &principal_y,
- &k1, &k2, &k3, &width, &height);
-
- tracking->camera.focal = focal_length;
-
- tracking->camera.principal[0] = principal_x;
- tracking->camera.principal[1] = principal_y / (double)aspy;
+ libmv_CameraIntrinsicsOptions camera_intrinsics_options;
+ libmv_cameraIntrinsicsExtractOptions(libmv_intrinsics, &camera_intrinsics_options);
- tracking->camera.k1 = k1;
- tracking->camera.k2 = k2;
- tracking->camera.k3 = k3;
+ tracking_trackingCameraFromIntrinscisOptions(tracking,
+ &camera_intrinsics_options);
}
/* Retrieve reconstructed tracks from libmv to blender.
@@ -369,7 +354,6 @@ MovieReconstructContext *BKE_tracking_reconstruction_context_new(MovieClip *clip
{
MovieTracking *tracking = &clip->tracking;
MovieReconstructContext *context = MEM_callocN(sizeof(MovieReconstructContext), "MovieReconstructContext data");
- MovieTrackingCamera *camera = &tracking->camera;
ListBase *tracksbase = BKE_tracking_object_get_tracks(tracking, object);
float aspy = 1.0f / tracking->camera.pixel_aspect;
int num_tracks = BLI_countlist(tracksbase);
@@ -383,16 +367,9 @@ MovieReconstructContext *BKE_tracking_reconstruction_context_new(MovieClip *clip
context->select_keyframes =
(tracking->settings.reconstruction_flag & TRACKING_USE_KEYFRAME_SELECTION) != 0;
- context->focal_length = camera->focal;
- context->principal_point[0] = camera->principal[0];
- context->principal_point[1] = camera->principal[1] * aspy;
-
- context->width = width;
- context->height = height;
-
- context->k1 = camera->k1;
- context->k2 = camera->k2;
- context->k3 = camera->k3;
+ tracking_cameraIntrinscisOptionsFromTracking(tracking,
+ width, height,
+ &context->camera_intrinsics_options);
context->tracks_map = tracks_map_new(context->object_name, context->is_camera, num_tracks, 0);
@@ -461,22 +438,6 @@ static void reconstruct_update_solve_cb(void *customdata, double progress, const
BLI_snprintf(progressdata->stats_message, progressdata->message_size, "Solving camera | %s", message);
}
-/* FIll in camera intrinsics structure from reconstruction context. */
-static void camraIntrincicsOptionsFromContext(libmv_CameraIntrinsicsOptions *camera_intrinsics_options,
- MovieReconstructContext *context)
-{
- camera_intrinsics_options->focal_length = context->focal_length;
-
- camera_intrinsics_options->principal_point_x = context->principal_point[0];
- camera_intrinsics_options->principal_point_y = context->principal_point[1];
-
- camera_intrinsics_options->k1 = context->k1;
- camera_intrinsics_options->k2 = context->k2;
- camera_intrinsics_options->k3 = context->k3;
-
- camera_intrinsics_options->image_width = context->width;
- camera_intrinsics_options->image_height = context->height;
-}
/* Fill in reconstruction options structure from reconstruction context. */
static void reconstructionOptionsFromContext(libmv_ReconstructionOptions *reconstruction_options,
@@ -506,7 +467,6 @@ void BKE_tracking_reconstruction_solve(MovieReconstructContext *context, short *
ReconstructProgressData progressdata;
- libmv_CameraIntrinsicsOptions camera_intrinsics_options;
libmv_ReconstructionOptions reconstruction_options;
progressdata.stop = stop;
@@ -515,18 +475,17 @@ void BKE_tracking_reconstruction_solve(MovieReconstructContext *context, short *
progressdata.stats_message = stats_message;
progressdata.message_size = message_size;
- camraIntrincicsOptionsFromContext(&camera_intrinsics_options, context);
reconstructionOptionsFromContext(&reconstruction_options, context);
if (context->motion_flag & TRACKING_MOTION_MODAL) {
context->reconstruction = libmv_solveModal(context->tracks,
- &camera_intrinsics_options,
+ &context->camera_intrinsics_options,
&reconstruction_options,
reconstruct_update_solve_cb, &progressdata);
}
else {
context->reconstruction = libmv_solveReconstruction(context->tracks,
- &camera_intrinsics_options,
+ &context->camera_intrinsics_options,
&reconstruction_options,
reconstruct_update_solve_cb, &progressdata);
diff --git a/source/blender/blenkernel/intern/tracking_util.c b/source/blender/blenkernel/intern/tracking_util.c
index 4b3c354dd12..4ebe8494101 100644
--- a/source/blender/blenkernel/intern/tracking_util.c
+++ b/source/blender/blenkernel/intern/tracking_util.c
@@ -51,6 +51,8 @@
#include "tracking_private.h"
+#include "libmv-capi.h"
+
/*********************** Tracks map *************************/
TracksMap *tracks_map_new(const char *object_name, bool is_camera, int num_tracks, int customdata_size)
@@ -386,3 +388,68 @@ void tracking_marker_insert_disabled(MovieTrackingTrack *track, const MovieTrack
if (overwrite || !BKE_tracking_track_has_marker_at_frame(track, marker_new.framenr))
BKE_tracking_marker_insert(track, &marker_new);
}
+
+
+/* Fill in Libmv C-API camera intrinsics options from tracking structure.
+ */
+void tracking_cameraIntrinscisOptionsFromTracking(MovieTracking *tracking,
+ int calibration_width, int calibration_height,
+ libmv_CameraIntrinsicsOptions *camera_intrinsics_options)
+{
+ MovieTrackingCamera *camera = &tracking->camera;
+ float aspy = 1.0f / tracking->camera.pixel_aspect;
+
+ camera_intrinsics_options->focal_length = camera->focal;
+
+ camera_intrinsics_options->principal_point_x = camera->principal[0];
+ camera_intrinsics_options->principal_point_y = camera->principal[1] * aspy;
+
+ switch (camera->distortion_model) {
+ case TRACKING_DISTORTION_MODEL_POLYNOMIAL:
+ camera_intrinsics_options->distortion_model = LIBMV_DISTORTION_MODEL_POLYNOMIAL;
+ camera_intrinsics_options->polynomial_k1 = camera->k1;
+ camera_intrinsics_options->polynomial_k2 = camera->k2;
+ camera_intrinsics_options->polynomial_k3 = camera->k3;
+ camera_intrinsics_options->polynomial_p1 = 0.0;
+ camera_intrinsics_options->polynomial_p2 = 0.0;
+ break;
+ case TRACKING_DISTORTION_MODEL_DIVISION:
+ camera_intrinsics_options->distortion_model = LIBMV_DISTORTION_MODEL_DIVISION;
+ camera_intrinsics_options->division_k1 = camera->division_k1;
+ camera_intrinsics_options->division_k2 = camera->division_k2;
+ break;
+ default:
+ BLI_assert(!"Unknown distortion model");
+ }
+
+ camera_intrinsics_options->image_width = calibration_width;
+ camera_intrinsics_options->image_height = (int) (calibration_height * aspy);
+}
+
+void tracking_trackingCameraFromIntrinscisOptions(MovieTracking *tracking,
+ const libmv_CameraIntrinsicsOptions *camera_intrinsics_options)
+{
+ float aspy = 1.0f / tracking->camera.pixel_aspect;
+ MovieTrackingCamera *camera = &tracking->camera;
+
+ camera->focal = camera_intrinsics_options->focal_length;
+
+ camera->principal[0] = camera_intrinsics_options->principal_point_x;
+ camera->principal[1] = camera_intrinsics_options->principal_point_y / (double) aspy;
+
+ switch (camera_intrinsics_options->distortion_model) {
+ case LIBMV_DISTORTION_MODEL_POLYNOMIAL:
+ camera->distortion_model = TRACKING_DISTORTION_MODEL_POLYNOMIAL;
+ camera->k1 = camera_intrinsics_options->polynomial_k1;
+ camera->k2 = camera_intrinsics_options->polynomial_k2;
+ camera->k3 = camera_intrinsics_options->polynomial_k3;
+ break;
+ case LIBMV_DISTORTION_MODEL_DIVISION:
+ camera->distortion_model = TRACKING_DISTORTION_MODEL_DIVISION;
+ camera->division_k1 = camera_intrinsics_options->division_k1;
+ camera->division_k2 = camera_intrinsics_options->division_k2;
+ break;
+ default:
+ BLI_assert(!"Unknown distortion model");
+ }
+}
diff --git a/source/blender/blenkernel/tracking_private.h b/source/blender/blenkernel/tracking_private.h
index 981b7951097..6efa1533e3e 100644
--- a/source/blender/blenkernel/tracking_private.h
+++ b/source/blender/blenkernel/tracking_private.h
@@ -41,6 +41,8 @@ struct GHash;
struct MovieTracking;
struct MovieTrackingMarker;
+struct libmv_CameraIntrinsicsOptions;
+
/*********************** Tracks map *************************/
typedef struct TracksMap {
@@ -86,4 +88,11 @@ void tracking_set_marker_coords_from_tracking(int frame_width, int frame_height,
void tracking_marker_insert_disabled(struct MovieTrackingTrack *track, const struct MovieTrackingMarker *ref_marker,
bool before, bool overwrite);
+void tracking_cameraIntrinscisOptionsFromTracking(struct MovieTracking *tracking,
+ int calibration_width, int calibration_height,
+ struct libmv_CameraIntrinsicsOptions *camera_intrinsics_options);
+
+void tracking_trackingCameraFromIntrinscisOptions(struct MovieTracking *tracking,
+ const struct libmv_CameraIntrinsicsOptions *camera_intrinsics_options);
+
#endif /* __TRACKING_PRIVATE_H__ */
diff --git a/source/blender/editors/space_clip/clip_ops.c b/source/blender/editors/space_clip/clip_ops.c
index 3f0a376d791..b8fe9fe4a17 100644
--- a/source/blender/editors/space_clip/clip_ops.c
+++ b/source/blender/editors/space_clip/clip_ops.c
@@ -1018,8 +1018,11 @@ static void do_movie_proxy(void *pjv, int *UNUSED(build_sizes), int UNUSED(build
if (build_undistort_count) {
int threads = BLI_system_thread_count();
+ int width, height;
- distortion = BKE_tracking_distortion_new();
+ BKE_movieclip_get_size(clip, NULL, &width, &height);
+
+ distortion = BKE_tracking_distortion_new(&clip->tracking, width, height);
BKE_tracking_distortion_set_threads(distortion, threads);
}
@@ -1185,8 +1188,11 @@ static void do_sequence_proxy(void *pjv, int *build_sizes, int build_count,
handle->build_undistort_count = build_undistort_count;
handle->build_undistort_sizes = build_undistort_sizes;
- if (build_undistort_count)
- handle->distortion = BKE_tracking_distortion_new();
+ if (build_undistort_count) {
+ int width, height;
+ BKE_movieclip_get_size(clip, NULL, &width, &height);
+ handle->distortion = BKE_tracking_distortion_new(&clip->tracking, width, height);
+ }
if (tot_thread > 1)
BLI_insert_thread(&threads, handle);
diff --git a/source/blender/makesdna/DNA_tracking_types.h b/source/blender/makesdna/DNA_tracking_types.h
index a5079cb6c6f..c471cb26892 100644
--- a/source/blender/makesdna/DNA_tracking_types.h
+++ b/source/blender/makesdna/DNA_tracking_types.h
@@ -61,14 +61,21 @@ typedef struct MovieReconstructedCamera {
typedef struct MovieTrackingCamera {
void *intrinsics; /* intrinsics handle */
+ short distortion_model;
+ short pad;
+
float sensor_width; /* width of CCD sensor */
float pixel_aspect; /* pixel aspect ratio */
- float pad;
float focal; /* focal length */
short units; /* units of focal length user is working with */
short pad1;
float principal[2]; /* principal point */
- float k1, k2, k3; /* radial distortion */
+
+ /* Polynomial distortion */
+ float k1, k2, k3; /* polynomial radial distortion */
+
+ /* Division distortion model coefficients */
+ float division_k1, division_k2;
} MovieTrackingCamera;
typedef struct MovieTrackingMarker {
@@ -348,6 +355,12 @@ typedef struct MovieTracking {
MovieTrackingDopesheet dopesheet; /* dopesheet data */
} MovieTracking;
+/* MovieTrackingCamera->distortion_model */
+enum {
+ TRACKING_DISTORTION_MODEL_POLYNOMIAL = 0,
+ TRACKING_DISTORTION_MODEL_DIVISION = 1
+};
+
/* MovieTrackingCamera->units */
enum {
CAMERA_UNITS_PX = 0,
diff --git a/source/blender/makesrna/intern/rna_tracking.c b/source/blender/makesrna/intern/rna_tracking.c
index 847efa2fad5..537ffe630a2 100644
--- a/source/blender/makesrna/intern/rna_tracking.c
+++ b/source/blender/makesrna/intern/rna_tracking.c
@@ -372,6 +372,17 @@ static void rna_tracking_stabTracks_active_index_range(PointerRNA *ptr, int *min
*max = max_ii(0, clip->tracking.stabilization.tot_track - 1);
}
+static void rna_tracking_resetIntrinsics(Main *UNUSED(bmain), Scene *UNUSED(scene), PointerRNA *ptr)
+{
+ MovieClip *clip = (MovieClip *)ptr->id.data;
+ MovieTracking *tracking = &clip->tracking;
+
+ if (tracking->camera.intrinsics) {
+ BKE_tracking_distortion_free(tracking->camera.intrinsics);
+ tracking->camera.intrinsics = NULL;
+ }
+}
+
static void rna_tracking_flushUpdate(Main *UNUSED(bmain), Scene *scene, PointerRNA *ptr)
{
MovieClip *clip = (MovieClip *)ptr->id.data;
@@ -990,6 +1001,13 @@ static void rna_def_trackingCamera(BlenderRNA *brna)
StructRNA *srna;
PropertyRNA *prop;
+ static EnumPropertyItem distortion_model_items[] = {
+ {TRACKING_DISTORTION_MODEL_POLYNOMIAL, "POLYNOMIAL", 0, "Polynomial", "Radial distortion model which fits common cameras"},
+ {TRACKING_DISTORTION_MODEL_DIVISION, "DIVISION", 0, "Divisions", "Division distortion model which "
+ "better represents wide-angle cameras"},
+ {0, NULL, 0, NULL, NULL}
+ };
+
static EnumPropertyItem camera_units_items[] = {
{CAMERA_UNITS_PX, "PIXELS", 0, "px", "Use pixels for units of focal length"},
{CAMERA_UNITS_MM, "MILLIMETERS", 0, "mm", "Use millimeters for units of focal length"},
@@ -1000,6 +1018,13 @@ static void rna_def_trackingCamera(BlenderRNA *brna)
RNA_def_struct_path_func(srna, "rna_trackingCamera_path");
RNA_def_struct_ui_text(srna, "Movie tracking camera data", "Match-moving camera data for tracking");
+ /* Distortion model */
+ prop = RNA_def_property(srna, "distortion_model", PROP_ENUM, PROP_NONE);
+ RNA_def_property_clear_flag(prop, PROP_ANIMATABLE);
+ RNA_def_property_enum_items(prop, distortion_model_items);
+ RNA_def_property_ui_text(prop, "Distortion Model", "Distortion model used for camera lenses");
+ RNA_def_property_update(prop, NC_MOVIECLIP | NA_EDITED, "rna_tracking_resetIntrinsics");
+
/* Sensor */
prop = RNA_def_property(srna, "sensor_width", PROP_FLOAT, PROP_NONE);
RNA_def_property_float_sdna(prop, NULL, "sensor_width");
@@ -1065,6 +1090,19 @@ static void rna_def_trackingCamera(BlenderRNA *brna)
RNA_def_property_ui_text(prop, "K3", "Third coefficient of third order polynomial radial distortion");
RNA_def_property_update(prop, NC_MOVIECLIP | NA_EDITED, "rna_tracking_flushUpdate");
+ /* Division distortion parameters */
+ prop = RNA_def_property(srna, "division_k1", PROP_FLOAT, PROP_NONE);
+ RNA_def_property_clear_flag(prop, PROP_ANIMATABLE);
+ RNA_def_property_ui_range(prop, -10, 10, 0.1, 3);
+ RNA_def_property_ui_text(prop, "K1", "First coefficient of second order division distortion");
+ RNA_def_property_update(prop, NC_MOVIECLIP | NA_EDITED, "rna_tracking_flushUpdate");
+
+ prop = RNA_def_property(srna, "division_k2", PROP_FLOAT, PROP_NONE);
+ RNA_def_property_clear_flag(prop, PROP_ANIMATABLE);
+ RNA_def_property_ui_range(prop, -10, 10, 0.1, 3);
+ RNA_def_property_ui_text(prop, "K2", "First coefficient of second order division distortion");
+ RNA_def_property_update(prop, NC_MOVIECLIP | NA_EDITED, "rna_tracking_flushUpdate");
+
/* pixel aspect */
prop = RNA_def_property(srna, "pixel_aspect", PROP_FLOAT, PROP_XYZ);
RNA_def_property_float_sdna(prop, NULL, "pixel_aspect");