diff options
author | Sergey Sharybin <sergey.vfx@gmail.com> | 2013-05-30 13:03:49 +0400 |
---|---|---|
committer | Sergey Sharybin <sergey.vfx@gmail.com> | 2013-05-30 13:03:49 +0400 |
commit | cf5e979fb4cef064f60d901c89c2a8a2f039d410 (patch) | |
tree | 5d7f4d9937808ab42d1abb78cf9bb769c3efc574 /source/blender/blenkernel | |
parent | 9fb3d3e0322cb6692f822ee374de1ec03ab70454 (diff) |
Motion tracking: automatic keyframe selection
Implements an automatic keyframe selection algorithm which uses
couple of approaches to find out best keyframes candidates:
- First, slightly modifier Pollefeys's criteria is used, which
limits correspondence ration from 80% to 100%. This allows to
reject keyframe candidate early without doing heavy math in
cases there're not much common features with first keyframe.
- Second step is based on Geometric Robust Information Criteria
(aka GRIC), which checks whether features motion between
candidate keyframes is better defined by homography or
fundamental matrices.
To be a good keyframe candidate, fundamental matrix need to
define motion better than homography (in this case F-GRIC will
be smaller than H-GRIC).
This two criteria are well described in this paper:
http://www.cs.ait.ac.th/~mdailey/papers/Tahir-KeyFrame.pdf
- Final step is based on estimating reconstruction error of
a full-scene solution using candidate keyframes. This part
is based on the following paper:
ftp://ftp.tnt.uni-hannover.de/pub/papers/2004/ECCV2004-TTHBAW.pdf
This step requires reconstruction using candidate keyframes
and obtaining covariance matrix of 3D points positions.
Reconstruction was done pretty much straightforward using
other simple pipeline routines, and for covariance estimation
pseudo-inverse of Hessian is used, which is in this case
(J^T * J)+, where + denotes pseudo-inverse.
Jacobian matrix is estimating using Ceres evaluate API.
This is also crucial to get rid of possible gauge ambiguity,
which is in our case made by zero-ing 7 (by gauge freedoms
number) eigen values in pseudo-inverse.
There're still room for improving and optimizing the code,
but we need some point to start with anyway :)
Thanks to Keir Mierle and Sameer Agarwal who assisted a lot
to make this feature working.
Diffstat (limited to 'source/blender/blenkernel')
-rw-r--r-- | source/blender/blenkernel/intern/tracking.c | 41 |
1 files changed, 30 insertions, 11 deletions
diff --git a/source/blender/blenkernel/intern/tracking.c b/source/blender/blenkernel/intern/tracking.c index 96d7d7d4fd4..b934640e5a0 100644 --- a/source/blender/blenkernel/intern/tracking.c +++ b/source/blender/blenkernel/intern/tracking.c @@ -2857,6 +2857,7 @@ void BKE_tracking_refine_marker(MovieClip *clip, MovieTrackingTrack *track, Movi typedef struct MovieReconstructContext { struct libmv_Tracks *tracks; + bool select_keyframes; int keyframe1, keyframe2; short refine_flags; @@ -3130,12 +3131,15 @@ int BKE_tracking_reconstruction_check(MovieTracking *tracking, MovieTrackingObje /* TODO: check for number of tracks? */ return TRUE; } - else if (reconstruct_count_tracks_on_both_keyframes(tracking, object) < 8) { - BLI_strncpy(error_msg, - N_("At least 8 common tracks on both of keyframes are needed for reconstruction"), - error_size); + else if ((tracking->settings.reconstruction_flag & TRACKING_USE_KEYFRAME_SELECTION) == 0) { + /* automatic keyframe selection does not require any pre-process checks */ + if (reconstruct_count_tracks_on_both_keyframes(tracking, object) < 8) { + BLI_strncpy(error_msg, + N_("At least 8 common tracks on both of keyframes are needed for reconstruction"), + error_size); - return FALSE; + return FALSE; + } } #ifndef WITH_LIBMV @@ -3166,6 +3170,9 @@ MovieReconstructContext *BKE_tracking_reconstruction_context_new(MovieTracking * context->is_camera = object->flag & TRACKING_OBJECT_CAMERA; context->motion_flag = tracking->settings.motion_flag; + context->select_keyframes = + (tracking->settings.reconstruction_flag & TRACKING_USE_KEYFRAME_SELECTION) != 0; + context->focal_length = camera->focal; context->principal_point[0] = camera->principal[0]; context->principal_point[1] = camera->principal[1] * aspy; @@ -3269,6 +3276,8 @@ static void camraIntrincicsOptionsFromContext(libmv_cameraIntrinsicsOptions *cam static void reconstructionOptionsFromContext(libmv_reconstructionOptions *reconstruction_options, MovieReconstructContext *context) { + reconstruction_options->select_keyframes = context->select_keyframes; + reconstruction_options->keyframe1 = context->keyframe1; reconstruction_options->keyframe2 = context->keyframe2; @@ -3317,6 +3326,12 @@ void BKE_tracking_reconstruction_solve(MovieReconstructContext *context, short * &camera_intrinsics_options, &reconstruction_options, reconstruct_update_solve_cb, &progressdata); + + if (context->select_keyframes) { + /* store actual keyframes used for reconstruction to update them in the interface later */ + context->keyframe1 = reconstruction_options.keyframe1; + context->keyframe2 = reconstruction_options.keyframe2; + } } error = libmv_reprojectionError(context->reconstruction); @@ -3330,18 +3345,22 @@ void BKE_tracking_reconstruction_solve(MovieReconstructContext *context, short * int BKE_tracking_reconstruction_finish(MovieReconstructContext *context, MovieTracking *tracking) { MovieTrackingReconstruction *reconstruction; + MovieTrackingObject *object; tracks_map_merge(context->tracks_map, tracking); BKE_tracking_dopesheet_tag_update(tracking); - if (context->is_camera) { + object = BKE_tracking_object_get_named(tracking, context->object_name); + + if (context->is_camera) reconstruction = &tracking->reconstruction; - } - else { - MovieTrackingObject *object; - - object = BKE_tracking_object_get_named(tracking, context->object_name); + else reconstruction = &object->reconstruction; + + /* update keyframe in the interface */ + if (context->select_keyframes) { + object->keyframe1 = context->keyframe1; + object->keyframe2 = context->keyframe2; } reconstruction->error = context->reprojection_error; |