Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'intern/libmv/libmv')
-rw-r--r--intern/libmv/libmv/autotrack/reconstruction.h5
-rw-r--r--intern/libmv/libmv/base/map.h34
-rw-r--r--intern/libmv/libmv/simple_pipeline/bundle.cc584
-rw-r--r--intern/libmv/libmv/simple_pipeline/camera_intrinsics.cc63
-rw-r--r--intern/libmv/libmv/simple_pipeline/camera_intrinsics.h58
-rw-r--r--intern/libmv/libmv/simple_pipeline/camera_intrinsics_impl.h4
-rw-r--r--intern/libmv/libmv/simple_pipeline/distortion_models.cc92
-rw-r--r--intern/libmv/libmv/simple_pipeline/distortion_models.h78
-rw-r--r--intern/libmv/libmv/simple_pipeline/initialize_reconstruction.h4
-rw-r--r--intern/libmv/libmv/simple_pipeline/intersect.h8
-rw-r--r--intern/libmv/libmv/simple_pipeline/reconstruction.cc65
-rw-r--r--intern/libmv/libmv/simple_pipeline/reconstruction.h29
-rw-r--r--intern/libmv/libmv/simple_pipeline/resect.h4
-rw-r--r--intern/libmv/libmv/simple_pipeline/tracks.h6
14 files changed, 782 insertions, 252 deletions
diff --git a/intern/libmv/libmv/autotrack/reconstruction.h b/intern/libmv/libmv/autotrack/reconstruction.h
index e1d4e882cbd..732e74063f1 100644
--- a/intern/libmv/libmv/autotrack/reconstruction.h
+++ b/intern/libmv/libmv/autotrack/reconstruction.h
@@ -23,6 +23,7 @@
#ifndef LIBMV_AUTOTRACK_RECONSTRUCTION_H_
#define LIBMV_AUTOTRACK_RECONSTRUCTION_H_
+#include "libmv/base/map.h"
#include "libmv/base/vector.h"
#include "libmv/numeric/numeric.h"
#include "libmv/simple_pipeline/camera_intrinsics.h"
@@ -51,7 +52,7 @@ class Point {
};
// A reconstruction for a set of tracks. The indexing for clip, frame, and
-// track should match that of a Tracs object, stored elsewhere.
+// track should match that of a Tracks object, stored elsewhere.
class Reconstruction {
public:
// All methods copy their input reference or take ownership of the pointer.
@@ -75,7 +76,7 @@ class Reconstruction {
vector<CameraIntrinsics*> camera_intrinsics_;
// Indexed by Marker::clip then by Marker::frame.
- vector<vector<CameraPose> > camera_poses_;
+ vector<map<int, CameraPose>> camera_poses_;
// Indexed by Marker::track.
vector<Point> points_;
diff --git a/intern/libmv/libmv/base/map.h b/intern/libmv/libmv/base/map.h
new file mode 100644
index 00000000000..88b720f17fe
--- /dev/null
+++ b/intern/libmv/libmv/base/map.h
@@ -0,0 +1,34 @@
+// Copyright (c) 2020 libmv authors.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to
+// deal in the Software without restriction, including without limitation the
+// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+// sell copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+// IN THE SOFTWARE.
+
+#ifndef LIBMV_BASE_MAP_H
+#define LIBMV_BASE_MAP_H
+
+#include <map>
+#include <utility>
+
+namespace libmv {
+
+using std::map;
+using std::make_pair;
+
+} // namespace libmv
+
+#endif // LIBMV_BASE_MAP_H
diff --git a/intern/libmv/libmv/simple_pipeline/bundle.cc b/intern/libmv/libmv/simple_pipeline/bundle.cc
index e61650fb371..2ecc0505e1f 100644
--- a/intern/libmv/libmv/simple_pipeline/bundle.cc
+++ b/intern/libmv/libmv/simple_pipeline/bundle.cc
@@ -24,6 +24,7 @@
#include "ceres/ceres.h"
#include "ceres/rotation.h"
+#include "libmv/base/map.h"
#include "libmv/base/vector.h"
#include "libmv/logging/logging.h"
#include "libmv/multiview/fundamental.h"
@@ -66,18 +67,221 @@ enum {
namespace {
-// Cost functor which computes reprojection error of 3D point X
-// on camera defined by angle-axis rotation and it's translation
-// (which are in the same block due to optimization reasons).
+bool NeedUseInvertIntrinsicsPipeline(const CameraIntrinsics *intrinsics) {
+ const DistortionModelType distortion_model =
+ intrinsics->GetDistortionModelType();
+ return (distortion_model == DISTORTION_MODEL_NUKE);
+}
+
+// Apply distortion model (distort the input) on the input point in the
+// normalized space to get distorted coordinate in the image space.
+//
+// Using intrinsics values from the parameter block, which makes this function
+// suitable for use from a cost functor.
+//
+// Only use for distortion models which are analytically defined for their
+// Apply() function.
+//
+// The invariant_intrinsics are used to access intrinsics which are never
+// packed into parameter block: for example, distortion model type and image
+// dimension.
+template<typename T>
+void ApplyDistortionModelUsingIntrinsicsBlock(
+ const CameraIntrinsics *invariant_intrinsics,
+ const T* const intrinsics_block,
+ const T& normalized_x, const T& normalized_y,
+ T* distorted_x, T* distorted_y) {
+ // Unpack the intrinsics.
+ const T& focal_length = intrinsics_block[OFFSET_FOCAL_LENGTH];
+ const T& principal_point_x = intrinsics_block[OFFSET_PRINCIPAL_POINT_X];
+ const T& principal_point_y = intrinsics_block[OFFSET_PRINCIPAL_POINT_Y];
+
+ // TODO(keir): Do early bailouts for zero distortion; these are expensive
+ // jet operations.
+ switch (invariant_intrinsics->GetDistortionModelType()) {
+ case DISTORTION_MODEL_POLYNOMIAL:
+ {
+ const T& k1 = intrinsics_block[OFFSET_K1];
+ const T& k2 = intrinsics_block[OFFSET_K2];
+ const T& k3 = intrinsics_block[OFFSET_K3];
+ const T& p1 = intrinsics_block[OFFSET_P1];
+ const T& p2 = intrinsics_block[OFFSET_P2];
+
+ ApplyPolynomialDistortionModel(focal_length,
+ focal_length,
+ principal_point_x,
+ principal_point_y,
+ k1, k2, k3,
+ p1, p2,
+ normalized_x, normalized_y,
+ distorted_x, distorted_y);
+ return;
+ }
+
+ case DISTORTION_MODEL_DIVISION:
+ {
+ const T& k1 = intrinsics_block[OFFSET_K1];
+ const T& k2 = intrinsics_block[OFFSET_K2];
+
+ ApplyDivisionDistortionModel(focal_length,
+ focal_length,
+ principal_point_x,
+ principal_point_y,
+ k1, k2,
+ normalized_x, normalized_y,
+ distorted_x, distorted_y);
+ return;
+ }
+
+ case DISTORTION_MODEL_NUKE:
+ {
+ LOG(FATAL) << "Unsupported distortion model.";
+ return;
+ }
+ }
+
+ LOG(FATAL) << "Unknown distortion model.";
+}
+
+// Invert distortion model (undistort the input) on the input point in the
+// image space to get undistorted coordinate in the normalized space.
+//
+// Using intrinsics values from the parameter block, which makes this function
+// suitable for use from a cost functor.
+//
+// Only use for distortion models which are analytically defined for their
+// Invert() function.
+//
+// The invariant_intrinsics are used to access intrinsics which are never
+// packed into parameter block: for example, distortion model type and image
+// dimension.
+template<typename T>
+void InvertDistortionModelUsingIntrinsicsBlock(
+ const CameraIntrinsics *invariant_intrinsics,
+ const T* const intrinsics_block,
+ const T& image_x, const T& image_y,
+ T* normalized_x, T* normalized_y) {
+ // Unpack the intrinsics.
+ const T& focal_length = intrinsics_block[OFFSET_FOCAL_LENGTH];
+ const T& principal_point_x = intrinsics_block[OFFSET_PRINCIPAL_POINT_X];
+ const T& principal_point_y = intrinsics_block[OFFSET_PRINCIPAL_POINT_Y];
+
+ // TODO(keir): Do early bailouts for zero distortion; these are expensive
+ // jet operations.
+ switch (invariant_intrinsics->GetDistortionModelType()) {
+ case DISTORTION_MODEL_POLYNOMIAL:
+ case DISTORTION_MODEL_DIVISION:
+ LOG(FATAL) << "Unsupported distortion model.";
+ return;
+
+ case DISTORTION_MODEL_NUKE:
+ {
+ const T& k1 = intrinsics_block[OFFSET_K1];
+ const T& k2 = intrinsics_block[OFFSET_K2];
+
+ InvertNukeDistortionModel(focal_length,
+ focal_length,
+ principal_point_x,
+ principal_point_y,
+ invariant_intrinsics->image_width(),
+ invariant_intrinsics->image_height(),
+ k1, k2,
+ image_x, image_y,
+ normalized_x, normalized_y);
+ return;
+ }
+ }
+
+ LOG(FATAL) << "Unknown distortion model.";
+}
+
+template<typename T>
+void NormalizedToImageSpace(const T* const intrinsics_block,
+ const T& normalized_x, const T& normalized_y,
+ T* image_x, T* image_y) {
+ // Unpack the intrinsics.
+ const T& focal_length = intrinsics_block[OFFSET_FOCAL_LENGTH];
+ const T& principal_point_x = intrinsics_block[OFFSET_PRINCIPAL_POINT_X];
+ const T& principal_point_y = intrinsics_block[OFFSET_PRINCIPAL_POINT_Y];
+
+ *image_x = normalized_x * focal_length + principal_point_x;
+ *image_y = normalized_y * focal_length + principal_point_y;
+}
+
+// Cost functor which computes reprojection error of 3D point X on camera
+// defined by angle-axis rotation and it's translation (which are in the same
+// block due to optimization reasons).
+//
+// This functor can only be used for distortion models which have analytically
+// defined Apply() function.
+struct ReprojectionErrorApplyIntrinsics {
+ ReprojectionErrorApplyIntrinsics(
+ const CameraIntrinsics *invariant_intrinsics,
+ const double observed_distorted_x,
+ const double observed_distorted_y,
+ const double weight)
+ : invariant_intrinsics_(invariant_intrinsics),
+ observed_distorted_x_(observed_distorted_x),
+ observed_distorted_y_(observed_distorted_y),
+ weight_(weight) {}
+
+ template <typename T>
+ bool operator()(const T* const intrinsics,
+ const T* const R_t, // Rotation denoted by angle axis
+ // followed with translation
+ const T* const X, // Point coordinates 3x1.
+ T* residuals) const {
+ // Compute projective coordinates: x = RX + t.
+ T x[3];
+
+ ceres::AngleAxisRotatePoint(R_t, X, x);
+ x[0] += R_t[3];
+ x[1] += R_t[4];
+ x[2] += R_t[5];
+
+ // Prevent points from going behind the camera.
+ if (x[2] < T(0)) {
+ return false;
+ }
+
+ // Compute normalized coordinates: x /= x[2].
+ T xn = x[0] / x[2];
+ T yn = x[1] / x[2];
+
+ T predicted_distorted_x, predicted_distorted_y;
+ ApplyDistortionModelUsingIntrinsicsBlock(
+ invariant_intrinsics_,
+ intrinsics,
+ xn, yn,
+ &predicted_distorted_x, &predicted_distorted_y);
+
+ // The error is the difference between the predicted and observed position.
+ residuals[0] = (predicted_distorted_x - T(observed_distorted_x_)) * weight_;
+ residuals[1] = (predicted_distorted_y - T(observed_distorted_y_)) * weight_;
+ return true;
+ }
+
+ const CameraIntrinsics *invariant_intrinsics_;
+ const double observed_distorted_x_;
+ const double observed_distorted_y_;
+ const double weight_;
+};
+
+// Cost functor which computes reprojection error of 3D point X on camera
+// defined by angle-axis rotation and it's translation (which are in the same
+// block due to optimization reasons).
//
-// This functor uses a radial distortion model.
-struct OpenCVReprojectionError {
- OpenCVReprojectionError(const DistortionModelType distortion_model,
- const double observed_x,
- const double observed_y,
- const double weight)
- : distortion_model_(distortion_model),
- observed_x_(observed_x), observed_y_(observed_y),
+// This functor can only be used for distortion models which have analytically
+// defined Invert() function.
+struct ReprojectionErrorInvertIntrinsics {
+ ReprojectionErrorInvertIntrinsics(
+ const CameraIntrinsics *invariant_intrinsics,
+ const double observed_distorted_x,
+ const double observed_distorted_y,
+ const double weight)
+ : invariant_intrinsics_(invariant_intrinsics),
+ observed_distorted_x_(observed_distorted_x),
+ observed_distorted_y_(observed_distorted_y),
weight_(weight) {}
template <typename T>
@@ -108,63 +312,37 @@ struct OpenCVReprojectionError {
T xn = x[0] / x[2];
T yn = x[1] / x[2];
- T predicted_x, predicted_y;
-
- // Apply distortion to the normalized points to get (xd, yd).
- // TODO(keir): Do early bailouts for zero distortion; these are expensive
- // jet operations.
- switch (distortion_model_) {
- case DISTORTION_MODEL_POLYNOMIAL:
- {
- const T& k1 = intrinsics[OFFSET_K1];
- const T& k2 = intrinsics[OFFSET_K2];
- const T& k3 = intrinsics[OFFSET_K3];
- const T& p1 = intrinsics[OFFSET_P1];
- const T& p2 = intrinsics[OFFSET_P2];
-
- ApplyPolynomialDistortionModel(focal_length,
- focal_length,
- principal_point_x,
- principal_point_y,
- k1, k2, k3,
- p1, p2,
- xn, yn,
- &predicted_x,
- &predicted_y);
- break;
- }
- case DISTORTION_MODEL_DIVISION:
- {
- const T& k1 = intrinsics[OFFSET_K1];
- const T& k2 = intrinsics[OFFSET_K2];
+ // Compute image space coordinate from normalized.
+ T predicted_x = focal_length * xn + principal_point_x;
+ T predicted_y = focal_length * yn + principal_point_y;
- ApplyDivisionDistortionModel(focal_length,
- focal_length,
- principal_point_x,
- principal_point_y,
- k1, k2,
- xn, yn,
- &predicted_x,
- &predicted_y);
- break;
- }
- default:
- LOG(FATAL) << "Unknown distortion model";
- }
+ T observed_undistorted_normalized_x, observed_undistorted_normalized_y;
+ InvertDistortionModelUsingIntrinsicsBlock(
+ invariant_intrinsics_,
+ intrinsics,
+ T(observed_distorted_x_), T(observed_distorted_y_),
+ &observed_undistorted_normalized_x, &observed_undistorted_normalized_y);
+
+ T observed_undistorted_image_x, observed_undistorted_image_y;
+ NormalizedToImageSpace(
+ intrinsics,
+ observed_undistorted_normalized_x, observed_undistorted_normalized_y,
+ &observed_undistorted_image_x, &observed_undistorted_image_y);
// The error is the difference between the predicted and observed position.
- residuals[0] = (predicted_x - T(observed_x_)) * weight_;
- residuals[1] = (predicted_y - T(observed_y_)) * weight_;
+ residuals[0] = (predicted_x - observed_undistorted_image_x) * weight_;
+ residuals[1] = (predicted_y - observed_undistorted_image_y) * weight_;
+
return true;
}
- const DistortionModelType distortion_model_;
- const double observed_x_;
- const double observed_y_;
+ const CameraIntrinsics *invariant_intrinsics_;
+ const double observed_distorted_x_;
+ const double observed_distorted_y_;
const double weight_;
};
-// Print a message to the log which camera intrinsics are gonna to be optimixed.
+// Print a message to the log which camera intrinsics are gonna to be optimized.
void BundleIntrinsicsLogMessage(const int bundle_intrinsics) {
if (bundle_intrinsics == BUNDLE_NO_INTRINSICS) {
LOG(INFO) << "Bundling only camera positions.";
@@ -193,29 +371,29 @@ void BundleIntrinsicsLogMessage(const int bundle_intrinsics) {
// Pack intrinsics from object to an array for easier
// and faster minimization.
void PackIntrinisicsIntoArray(const CameraIntrinsics &intrinsics,
- double ceres_intrinsics[OFFSET_MAX]) {
- ceres_intrinsics[OFFSET_FOCAL_LENGTH] = intrinsics.focal_length();
- ceres_intrinsics[OFFSET_PRINCIPAL_POINT_X] = intrinsics.principal_point_x();
- ceres_intrinsics[OFFSET_PRINCIPAL_POINT_Y] = intrinsics.principal_point_y();
+ double intrinsics_block[OFFSET_MAX]) {
+ intrinsics_block[OFFSET_FOCAL_LENGTH] = intrinsics.focal_length();
+ intrinsics_block[OFFSET_PRINCIPAL_POINT_X] = intrinsics.principal_point_x();
+ intrinsics_block[OFFSET_PRINCIPAL_POINT_Y] = intrinsics.principal_point_y();
int num_distortion_parameters = intrinsics.num_distortion_parameters();
assert(num_distortion_parameters <= NUM_DISTORTION_COEFFICIENTS);
const double *distortion_parameters = intrinsics.distortion_parameters();
for (int i = 0; i < num_distortion_parameters; ++i) {
- ceres_intrinsics[FIRST_DISTORTION_COEFFICIENT + i] =
+ intrinsics_block[FIRST_DISTORTION_COEFFICIENT + i] =
distortion_parameters[i];
}
}
// Unpack intrinsics back from an array to an object.
-void UnpackIntrinsicsFromArray(const double ceres_intrinsics[OFFSET_MAX],
+void UnpackIntrinsicsFromArray(const double intrinsics_block[OFFSET_MAX],
CameraIntrinsics *intrinsics) {
- intrinsics->SetFocalLength(ceres_intrinsics[OFFSET_FOCAL_LENGTH],
- ceres_intrinsics[OFFSET_FOCAL_LENGTH]);
+ intrinsics->SetFocalLength(intrinsics_block[OFFSET_FOCAL_LENGTH],
+ intrinsics_block[OFFSET_FOCAL_LENGTH]);
- intrinsics->SetPrincipalPoint(ceres_intrinsics[OFFSET_PRINCIPAL_POINT_X],
- ceres_intrinsics[OFFSET_PRINCIPAL_POINT_Y]);
+ intrinsics->SetPrincipalPoint(intrinsics_block[OFFSET_PRINCIPAL_POINT_X],
+ intrinsics_block[OFFSET_PRINCIPAL_POINT_Y]);
int num_distortion_parameters = intrinsics->num_distortion_parameters();
assert(num_distortion_parameters <= NUM_DISTORTION_COEFFICIENTS);
@@ -223,54 +401,46 @@ void UnpackIntrinsicsFromArray(const double ceres_intrinsics[OFFSET_MAX],
double *distortion_parameters = intrinsics->distortion_parameters();
for (int i = 0; i < num_distortion_parameters; ++i) {
distortion_parameters[i] =
- ceres_intrinsics[FIRST_DISTORTION_COEFFICIENT + i];
+ intrinsics_block[FIRST_DISTORTION_COEFFICIENT + i];
}
}
// Get a vector of camera's rotations denoted by angle axis
// conjuncted with translations into single block
//
-// Element with index i matches to a rotation+translation for
+// Element with key i matches to a rotation+translation for
// camera at image i.
-vector<Vec6> PackCamerasRotationAndTranslation(
- const Tracks &tracks,
+map<int, Vec6> PackCamerasRotationAndTranslation(
const EuclideanReconstruction &reconstruction) {
- vector<Vec6> all_cameras_R_t;
- int max_image = tracks.MaxImage();
-
- all_cameras_R_t.resize(max_image + 1);
-
- for (int i = 0; i <= max_image; i++) {
- const EuclideanCamera *camera = reconstruction.CameraForImage(i);
-
- if (!camera) {
- continue;
- }
-
- ceres::RotationMatrixToAngleAxis(&camera->R(0, 0),
- &all_cameras_R_t[i](0));
- all_cameras_R_t[i].tail<3>() = camera->t;
+ map<int, Vec6> all_cameras_R_t;
+
+ vector<EuclideanCamera> all_cameras = reconstruction.AllCameras();
+ for (const EuclideanCamera& camera : all_cameras) {
+ Vec6 camera_R_t;
+ ceres::RotationMatrixToAngleAxis(&camera.R(0, 0), &camera_R_t(0));
+ camera_R_t.tail<3>() = camera.t;
+ all_cameras_R_t.insert(make_pair(camera.image, camera_R_t));
}
+
return all_cameras_R_t;
}
// Convert cameras rotations fro mangle axis back to rotation matrix.
void UnpackCamerasRotationAndTranslation(
- const Tracks &tracks,
- const vector<Vec6> &all_cameras_R_t,
+ const map<int, Vec6> &all_cameras_R_t,
EuclideanReconstruction *reconstruction) {
- int max_image = tracks.MaxImage();
- for (int i = 0; i <= max_image; i++) {
- EuclideanCamera *camera = reconstruction->CameraForImage(i);
+ for (map<int, Vec6>::value_type image_and_camera_R_T : all_cameras_R_t) {
+ const int image = image_and_camera_R_T.first;
+ const Vec6& camera_R_t = image_and_camera_R_T.second;
+ EuclideanCamera *camera = reconstruction->CameraForImage(image);
if (!camera) {
continue;
}
- ceres::AngleAxisToRotationMatrix(&all_cameras_R_t[i](0),
- &camera->R(0, 0));
- camera->t = all_cameras_R_t[i].tail<3>();
+ ceres::AngleAxisToRotationMatrix(&camera_R_t(0), &camera->R(0, 0));
+ camera->t = camera_R_t.tail<3>();
}
}
@@ -299,71 +469,120 @@ void CRSMatrixToEigenMatrix(const ceres::CRSMatrix &crs_matrix,
void EuclideanBundlerPerformEvaluation(const Tracks &tracks,
EuclideanReconstruction *reconstruction,
- vector<Vec6> *all_cameras_R_t,
+ map<int, Vec6> *all_cameras_R_t,
ceres::Problem *problem,
BundleEvaluation *evaluation) {
- int max_track = tracks.MaxTrack();
- // Number of camera rotations equals to number of translation,
- int num_cameras = all_cameras_R_t->size();
- int num_points = 0;
-
- vector<EuclideanPoint*> minimized_points;
- for (int i = 0; i <= max_track; i++) {
- EuclideanPoint *point = reconstruction->PointForTrack(i);
- if (point) {
- // We need to know whether the track is constant zero weight,
- // and it so it wouldn't have parameter block in the problem.
- //
- // Getting all markers for track is not so bac currently since
- // this code is only used by keyframe selection when there are
- // not so much tracks and only 2 frames anyway.
- vector<Marker> markera_of_track = tracks.MarkersForTrack(i);
- for (int j = 0; j < markera_of_track.size(); j++) {
- if (markera_of_track.at(j).weight != 0.0) {
- minimized_points.push_back(point);
- num_points++;
- break;
- }
+ int max_track = tracks.MaxTrack();
+ // Number of camera rotations equals to number of translation,
+ int num_cameras = all_cameras_R_t->size();
+ int num_points = 0;
+
+ vector<EuclideanPoint*> minimized_points;
+ for (int i = 0; i <= max_track; i++) {
+ EuclideanPoint *point = reconstruction->PointForTrack(i);
+ if (point) {
+ // We need to know whether the track is a constant zero weight.
+ // If it is so it wouldn't have a parameter block in the problem.
+ //
+ // Usually getting all markers of a track is considered slow, but this
+ // code is only used by the keyframe selection code where there aren't
+ // that many tracks in the storage and there are only 2 frames for each
+ // of the tracks.
+ vector<Marker> markera_of_track = tracks.MarkersForTrack(i);
+ for (int j = 0; j < markera_of_track.size(); j++) {
+ if (markera_of_track.at(j).weight != 0.0) {
+ minimized_points.push_back(point);
+ num_points++;
+ break;
}
}
}
+ }
- LG << "Number of cameras " << num_cameras;
- LG << "Number of points " << num_points;
+ LG << "Number of cameras " << num_cameras;
+ LG << "Number of points " << num_points;
- evaluation->num_cameras = num_cameras;
- evaluation->num_points = num_points;
+ evaluation->num_cameras = num_cameras;
+ evaluation->num_points = num_points;
- if (evaluation->evaluate_jacobian) { // Evaluate jacobian matrix.
- ceres::CRSMatrix evaluated_jacobian;
- ceres::Problem::EvaluateOptions eval_options;
+ if (evaluation->evaluate_jacobian) { // Evaluate jacobian matrix.
+ ceres::CRSMatrix evaluated_jacobian;
+ ceres::Problem::EvaluateOptions eval_options;
- // Cameras goes first in the ordering.
- int max_image = tracks.MaxImage();
- for (int i = 0; i <= max_image; i++) {
- const EuclideanCamera *camera = reconstruction->CameraForImage(i);
- if (camera) {
- double *current_camera_R_t = &(*all_cameras_R_t)[i](0);
+ // Cameras goes first in the ordering.
+ int max_image = tracks.MaxImage();
+ for (int i = 0; i <= max_image; i++) {
+ const EuclideanCamera *camera = reconstruction->CameraForImage(i);
+ if (camera) {
+ double *current_camera_R_t = &(*all_cameras_R_t)[i](0);
- // All cameras are variable now.
- problem->SetParameterBlockVariable(current_camera_R_t);
+ // All cameras are variable now.
+ problem->SetParameterBlockVariable(current_camera_R_t);
- eval_options.parameter_blocks.push_back(current_camera_R_t);
- }
+ eval_options.parameter_blocks.push_back(current_camera_R_t);
}
+ }
- // Points goes at the end of ordering,
- for (int i = 0; i < minimized_points.size(); i++) {
- EuclideanPoint *point = minimized_points.at(i);
- eval_options.parameter_blocks.push_back(&point->X(0));
- }
+ // Points goes at the end of ordering,
+ for (int i = 0; i < minimized_points.size(); i++) {
+ EuclideanPoint *point = minimized_points.at(i);
+ eval_options.parameter_blocks.push_back(&point->X(0));
+ }
- problem->Evaluate(eval_options,
- NULL, NULL, NULL,
- &evaluated_jacobian);
+ problem->Evaluate(eval_options,
+ NULL, NULL, NULL,
+ &evaluated_jacobian);
- CRSMatrixToEigenMatrix(evaluated_jacobian, &evaluation->jacobian);
- }
+ CRSMatrixToEigenMatrix(evaluated_jacobian, &evaluation->jacobian);
+ }
+}
+
+template<typename CostFunction>
+void AddResidualBlockToProblemImpl(const CameraIntrinsics *invariant_intrinsics,
+ double observed_x, double observed_y,
+ double weight,
+ double intrinsics_block[OFFSET_MAX],
+ double *camera_R_t,
+ EuclideanPoint *point,
+ ceres::Problem* problem) {
+ problem->AddResidualBlock(new ceres::AutoDiffCostFunction<
+ CostFunction, 2, OFFSET_MAX, 6, 3>(
+ new CostFunction(
+ invariant_intrinsics,
+ observed_x, observed_y,
+ weight)),
+ NULL,
+ intrinsics_block,
+ camera_R_t,
+ &point->X(0));
+}
+
+void AddResidualBlockToProblem(const CameraIntrinsics *invariant_intrinsics,
+ const Marker &marker,
+ double marker_weight,
+ double intrinsics_block[OFFSET_MAX],
+ double *camera_R_t,
+ EuclideanPoint *point,
+ ceres::Problem* problem) {
+ if (NeedUseInvertIntrinsicsPipeline(invariant_intrinsics)) {
+ AddResidualBlockToProblemImpl<ReprojectionErrorInvertIntrinsics>(
+ invariant_intrinsics,
+ marker.x, marker.y,
+ marker_weight,
+ intrinsics_block,
+ camera_R_t,
+ point,
+ problem);
+ } else {
+ AddResidualBlockToProblemImpl<ReprojectionErrorApplyIntrinsics>(
+ invariant_intrinsics,
+ marker.x, marker.y,
+ marker_weight,
+ intrinsics_block,
+ camera_R_t,
+ point,
+ problem);
+ }
}
// This is an utility function to only bundle 3D position of
@@ -375,10 +594,10 @@ void EuclideanBundlerPerformEvaluation(const Tracks &tracks,
//
// At this point we only need to bundle points positions, cameras
// are to be totally still here.
-void EuclideanBundlePointsOnly(const DistortionModelType distortion_model,
+void EuclideanBundlePointsOnly(const CameraIntrinsics *invariant_intrinsics,
const vector<Marker> &markers,
- vector<Vec6> &all_cameras_R_t,
- double ceres_intrinsics[OFFSET_MAX],
+ map<int, Vec6> &all_cameras_R_t,
+ double intrinsics_block[OFFSET_MAX],
EuclideanReconstruction *reconstruction) {
ceres::Problem::Options problem_options;
ceres::Problem problem(problem_options);
@@ -392,20 +611,16 @@ void EuclideanBundlePointsOnly(const DistortionModelType distortion_model,
}
// Rotation of camera denoted in angle axis followed with
- // camera translaiton.
+ // camera translation.
double *current_camera_R_t = &all_cameras_R_t[camera->image](0);
- problem.AddResidualBlock(new ceres::AutoDiffCostFunction<
- OpenCVReprojectionError, 2, OFFSET_MAX, 6, 3>(
- new OpenCVReprojectionError(
- distortion_model,
- marker.x,
- marker.y,
- 1.0)),
- NULL,
- ceres_intrinsics,
- current_camera_R_t,
- &point->X(0));
+ AddResidualBlockToProblem(invariant_intrinsics,
+ marker,
+ 1.0,
+ intrinsics_block,
+ current_camera_R_t,
+ point,
+ &problem);
problem.SetParameterBlockConstant(current_camera_R_t);
num_residuals++;
@@ -417,7 +632,7 @@ void EuclideanBundlePointsOnly(const DistortionModelType distortion_model,
return;
}
- problem.SetParameterBlockConstant(ceres_intrinsics);
+ problem.SetParameterBlockConstant(intrinsics_block);
// Configure the solver.
ceres::Solver::Options options;
@@ -438,7 +653,6 @@ void EuclideanBundlePointsOnly(const DistortionModelType distortion_model,
ceres::Solve(options, &problem, &summary);
LG << "Final report:\n" << summary.FullReport();
-
}
} // namespace
@@ -464,22 +678,22 @@ void EuclideanBundleCommonIntrinsics(
LG << "Original intrinsics: " << *intrinsics;
vector<Marker> markers = tracks.AllMarkers();
- // N-th element denotes whether track N is a constant zero-weigthed track.
+ // N-th element denotes whether track N is a constant zero-weighted track.
vector<bool> zero_weight_tracks_flags(tracks.MaxTrack() + 1, true);
// Residual blocks with 10 parameters are unwieldly with Ceres, so pack the
// intrinsics into a single block and rely on local parameterizations to
// control which intrinsics are allowed to vary.
- double ceres_intrinsics[OFFSET_MAX];
- PackIntrinisicsIntoArray(*intrinsics, ceres_intrinsics);
+ double intrinsics_block[OFFSET_MAX];
+ PackIntrinisicsIntoArray(*intrinsics, intrinsics_block);
// Convert cameras rotations to angle axis and merge with translation
// into single parameter block for maximal minimization speed.
//
// Block for minimization has got the following structure:
// <3 elements for angle-axis> <3 elements for translation>
- vector<Vec6> all_cameras_R_t =
- PackCamerasRotationAndTranslation(tracks, *reconstruction);
+ map<int, Vec6> all_cameras_R_t =
+ PackCamerasRotationAndTranslation(*reconstruction);
// Parameterization used to restrict camera motion for modal solvers.
ceres::SubsetParameterization *constant_translation_parameterization = NULL;
@@ -509,24 +723,20 @@ void EuclideanBundleCommonIntrinsics(
}
// Rotation of camera denoted in angle axis followed with
- // camera translaiton.
+ // camera translation.
double *current_camera_R_t = &all_cameras_R_t[camera->image](0);
// Skip residual block for markers which does have absolutely
// no affect on the final solution.
// This way ceres is not gonna to go crazy.
if (marker.weight != 0.0) {
- problem.AddResidualBlock(new ceres::AutoDiffCostFunction<
- OpenCVReprojectionError, 2, OFFSET_MAX, 6, 3>(
- new OpenCVReprojectionError(
- intrinsics->GetDistortionModelType(),
- marker.x,
- marker.y,
- marker.weight)),
- NULL,
- ceres_intrinsics,
- current_camera_R_t,
- &point->X(0));
+ AddResidualBlockToProblem(intrinsics,
+ marker,
+ marker.weight,
+ intrinsics_block,
+ current_camera_R_t,
+ point,
+ &problem);
// We lock the first camera to better deal with scene orientation ambiguity.
if (!have_locked_camera) {
@@ -561,7 +771,7 @@ void EuclideanBundleCommonIntrinsics(
if (bundle_intrinsics == BUNDLE_NO_INTRINSICS) {
// No camera intrinsics are being refined,
// set the whole parameter block as constant for best performance.
- problem.SetParameterBlockConstant(ceres_intrinsics);
+ problem.SetParameterBlockConstant(intrinsics_block);
} else {
// Set the camera intrinsics that are not to be bundled as
// constant using some macro trickery.
@@ -586,7 +796,7 @@ void EuclideanBundleCommonIntrinsics(
ceres::SubsetParameterization *subset_parameterization =
new ceres::SubsetParameterization(OFFSET_MAX, constant_intrinsics);
- problem.SetParameterization(ceres_intrinsics, subset_parameterization);
+ problem.SetParameterization(intrinsics_block, subset_parameterization);
}
// Configure the solver.
@@ -610,13 +820,11 @@ void EuclideanBundleCommonIntrinsics(
LG << "Final report:\n" << summary.FullReport();
// Copy rotations and translations back.
- UnpackCamerasRotationAndTranslation(tracks,
- all_cameras_R_t,
- reconstruction);
+ UnpackCamerasRotationAndTranslation(all_cameras_R_t, reconstruction);
// Copy intrinsics back.
if (bundle_intrinsics != BUNDLE_NO_INTRINSICS)
- UnpackIntrinsicsFromArray(ceres_intrinsics, intrinsics);
+ UnpackIntrinsicsFromArray(intrinsics_block, intrinsics);
LG << "Final intrinsics: " << *intrinsics;
@@ -641,10 +849,10 @@ void EuclideanBundleCommonIntrinsics(
if (zero_weight_markers.size()) {
LG << "Refining position of constant zero-weighted tracks";
- EuclideanBundlePointsOnly(intrinsics->GetDistortionModelType(),
+ EuclideanBundlePointsOnly(intrinsics,
zero_weight_markers,
all_cameras_R_t,
- ceres_intrinsics,
+ intrinsics_block,
reconstruction);
}
}
diff --git a/intern/libmv/libmv/simple_pipeline/camera_intrinsics.cc b/intern/libmv/libmv/simple_pipeline/camera_intrinsics.cc
index 5e4e07b3c4c..a95b394ad06 100644
--- a/intern/libmv/libmv/simple_pipeline/camera_intrinsics.cc
+++ b/intern/libmv/libmv/simple_pipeline/camera_intrinsics.cc
@@ -131,6 +131,8 @@ void CameraIntrinsics::ResetLookupGrids() {
undistort_.Reset();
}
+// Polynomial model.
+
PolynomialCameraIntrinsics::PolynomialCameraIntrinsics()
: CameraIntrinsics() {
SetRadialDistortion(0.0, 0.0, 0.0);
@@ -193,6 +195,8 @@ void PolynomialCameraIntrinsics::InvertIntrinsics(
normalized_y);
}
+// Division model.
+
DivisionCameraIntrinsics::DivisionCameraIntrinsics()
: CameraIntrinsics() {
SetDistortion(0.0, 0.0);
@@ -241,6 +245,57 @@ void DivisionCameraIntrinsics::InvertIntrinsics(double image_x,
normalized_y);
}
+// Nuke model.
+
+NukeCameraIntrinsics::NukeCameraIntrinsics()
+ : CameraIntrinsics() {
+ SetDistortion(0.0, 0.0);
+}
+
+NukeCameraIntrinsics::NukeCameraIntrinsics(
+ const NukeCameraIntrinsics &from)
+ : CameraIntrinsics(from) {
+ SetDistortion(from.k1(), from.k1());
+}
+
+void NukeCameraIntrinsics::SetDistortion(double k1, double k2) {
+ parameters_[OFFSET_K1] = k1;
+ parameters_[OFFSET_K2] = k2;
+ ResetLookupGrids();
+}
+
+void NukeCameraIntrinsics::ApplyIntrinsics(double normalized_x,
+ double normalized_y,
+ double *image_x,
+ double *image_y) const {
+ ApplyNukeDistortionModel(focal_length_x(),
+ focal_length_y(),
+ principal_point_x(),
+ principal_point_y(),
+ image_width(), image_height(),
+ k1(), k2(),
+ normalized_x,
+ normalized_y,
+ image_x,
+ image_y);
+}
+
+void NukeCameraIntrinsics::InvertIntrinsics(double image_x,
+ double image_y,
+ double *normalized_x,
+ double *normalized_y) const {
+ InvertNukeDistortionModel(focal_length_x(),
+ focal_length_y(),
+ principal_point_x(),
+ principal_point_y(),
+ image_width(), image_height(),
+ k1(), k2(),
+ image_x,
+ image_y,
+ normalized_x,
+ normalized_y);
+}
+
std::ostream& operator <<(std::ostream &os,
const CameraIntrinsics &intrinsics) {
if (intrinsics.focal_length_x() == intrinsics.focal_length_x()) {
@@ -281,6 +336,14 @@ std::ostream& operator <<(std::ostream &os,
PRINT_NONZERO_COEFFICIENT(division_intrinsics, k2);
break;
}
+ case DISTORTION_MODEL_NUKE:
+ {
+ const NukeCameraIntrinsics *nuke_intrinsics =
+ static_cast<const NukeCameraIntrinsics *>(&intrinsics);
+ PRINT_NONZERO_COEFFICIENT(nuke_intrinsics, k1);
+ PRINT_NONZERO_COEFFICIENT(nuke_intrinsics, k2);
+ break;
+ }
default:
LOG(FATAL) << "Unknown distortion model.";
}
diff --git a/intern/libmv/libmv/simple_pipeline/camera_intrinsics.h b/intern/libmv/libmv/simple_pipeline/camera_intrinsics.h
index 6a3ade81089..782fd56c54c 100644
--- a/intern/libmv/libmv/simple_pipeline/camera_intrinsics.h
+++ b/intern/libmv/libmv/simple_pipeline/camera_intrinsics.h
@@ -276,7 +276,7 @@ class CameraIntrinsics {
class PolynomialCameraIntrinsics : public CameraIntrinsics {
public:
// This constants defines an offset of corresponding coefficients
- // in the arameters_ array.
+ // in the parameters_ array.
enum {
OFFSET_K1,
OFFSET_K2,
@@ -342,7 +342,7 @@ class PolynomialCameraIntrinsics : public CameraIntrinsics {
class DivisionCameraIntrinsics : public CameraIntrinsics {
public:
// This constants defines an offset of corresponding coefficients
- // in the arameters_ array.
+ // in the parameters_ array.
enum {
OFFSET_K1,
OFFSET_K2,
@@ -393,6 +393,60 @@ class DivisionCameraIntrinsics : public CameraIntrinsics {
double parameters_[NUM_PARAMETERS];
};
+class NukeCameraIntrinsics : public CameraIntrinsics {
+ public:
+ // This constants defines an offset of corresponding coefficients
+ // in the parameters_ array.
+ enum {
+ OFFSET_K1,
+ OFFSET_K2,
+
+ // This defines the size of array which we need to have in order
+ // to store all the coefficients.
+ NUM_PARAMETERS,
+ };
+
+ NukeCameraIntrinsics();
+ NukeCameraIntrinsics(const NukeCameraIntrinsics &from);
+
+ DistortionModelType GetDistortionModelType() const {
+ return DISTORTION_MODEL_NUKE;
+ }
+
+ int num_distortion_parameters() const { return NUM_PARAMETERS; }
+ double *distortion_parameters() { return parameters_; };
+ const double *distortion_parameters() const { return parameters_; };
+
+ double k1() const { return parameters_[OFFSET_K1]; }
+ double k2() const { return parameters_[OFFSET_K2]; }
+
+ // Set radial distortion coeffcients.
+ void SetDistortion(double k1, double k2);
+
+ // Apply camera intrinsics to the normalized point to get image coordinates.
+ //
+ // This applies the lens distortion to a point which is in normalized
+ // camera coordinates (i.e. the principal point is at (0, 0)) to get image
+ // coordinates in pixels.
+ void ApplyIntrinsics(double normalized_x,
+ double normalized_y,
+ double *image_x,
+ double *image_y) const;
+
+ // Invert camera intrinsics on the image point to get normalized coordinates.
+ //
+ // This reverses the effect of lens distortion on a point which is in image
+ // coordinates to get normalized camera coordinates.
+ void InvertIntrinsics(double image_x,
+ double image_y,
+ double *normalized_x,
+ double *normalized_y) const;
+
+ private:
+ // Double-parameter division distortion model.
+ double parameters_[NUM_PARAMETERS];
+};
+
/// A human-readable representation of the camera intrinsic parameters.
std::ostream& operator <<(std::ostream &os,
const CameraIntrinsics &intrinsics);
diff --git a/intern/libmv/libmv/simple_pipeline/camera_intrinsics_impl.h b/intern/libmv/libmv/simple_pipeline/camera_intrinsics_impl.h
index 97abee7ab01..e1b53992dfd 100644
--- a/intern/libmv/libmv/simple_pipeline/camera_intrinsics_impl.h
+++ b/intern/libmv/libmv/simple_pipeline/camera_intrinsics_impl.h
@@ -63,7 +63,7 @@ void LookupWarpGrid::Compute(const CameraIntrinsics &intrinsics,
double aspx = (double) w / intrinsics.image_width();
double aspy = (double) h / intrinsics.image_height();
#if defined(_OPENMP)
-# pragma omp parallel for schedule(dynamic) num_threads(threads_) \
+# pragma omp parallel for schedule(static) num_threads(threads_) \
if (threads_ > 1 && height > 100)
#endif
for (int y = 0; y < height; y++) {
@@ -125,7 +125,7 @@ void LookupWarpGrid::Apply(const PixelType *input_buffer,
int channels,
PixelType *output_buffer) {
#if defined(_OPENMP)
-# pragma omp parallel for schedule(dynamic) num_threads(threads_) \
+# pragma omp parallel for schedule(static) num_threads(threads_) \
if (threads_ > 1 && height > 100)
#endif
for (int y = 0; y < height; y++) {
diff --git a/intern/libmv/libmv/simple_pipeline/distortion_models.cc b/intern/libmv/libmv/simple_pipeline/distortion_models.cc
index 9b6dca2678a..c069fc6f623 100644
--- a/intern/libmv/libmv/simple_pipeline/distortion_models.cc
+++ b/intern/libmv/libmv/simple_pipeline/distortion_models.cc
@@ -194,4 +194,96 @@ void InvertDivisionDistortionModel(const double focal_length_x,
*normalized_y = normalized(1);
}
+struct ApplyNukeIntrinsicsCostFunction {
+ public:
+ typedef Vec2 FMatrixType;
+ typedef Vec2 XMatrixType;
+
+ ApplyNukeIntrinsicsCostFunction(const double focal_length_x,
+ const double focal_length_y,
+ const double principal_point_x,
+ const double principal_point_y,
+ const int image_width,
+ const int image_height,
+ const double k1,
+ const double k2,
+ const double expected_normalized_x,
+ const double expected_normalized_y)
+ : focal_length_x_(focal_length_x),
+ focal_length_y_(focal_length_y),
+ principal_point_x_(principal_point_x),
+ principal_point_y_(principal_point_y),
+ image_width_(image_width),
+ image_height_(image_height),
+ k1_(k1), k2_(k2),
+ expected_normalized_x_(expected_normalized_x),
+ expected_normalized_y_(expected_normalized_y) {}
+
+ Vec2 operator()(const Vec2 &image_coordinate) const {
+ double actual_normalized_x, actual_normalized_y;
+
+ InvertNukeDistortionModel(focal_length_x_,
+ focal_length_y_,
+ principal_point_x_,
+ principal_point_y_,
+ image_width_, image_height_,
+ k1_, k2_,
+ image_coordinate(0), image_coordinate(1),
+ &actual_normalized_x, &actual_normalized_y);
+
+ Vec2 fx;
+ fx << (actual_normalized_x - expected_normalized_x_),
+ (actual_normalized_y - expected_normalized_y_);
+ return fx;
+ }
+ double focal_length_x_;
+ double focal_length_y_;
+ double principal_point_x_;
+ double principal_point_y_;
+ int image_width_;
+ int image_height_;
+ double k1_, k2_;
+ double expected_normalized_x_, expected_normalized_y_;
+};
+
+void ApplyNukeDistortionModel(const double focal_length_x,
+ const double focal_length_y,
+ const double principal_point_x,
+ const double principal_point_y,
+ const int image_width,
+ const int image_height,
+ const double k1,
+ const double k2,
+ const double normalized_x,
+ const double normalized_y,
+ double *image_x,
+ double *image_y) {
+ // Compute the initial guess. For a camera with no distortion, this will also
+ // be the final answer; the LM iteration will terminate immediately.
+ Vec2 image;
+ image(0) = normalized_x * focal_length_x + principal_point_x;
+ image(1) = normalized_y * focal_length_y + principal_point_y;
+
+ // TODO(sergey): Use Ceres minimizer instead.
+ typedef LevenbergMarquardt<ApplyNukeIntrinsicsCostFunction> Solver;
+
+ ApplyNukeIntrinsicsCostFunction intrinsics_cost(focal_length_x,
+ focal_length_y,
+ principal_point_x,
+ principal_point_y,
+ image_width,
+ image_height,
+ k1, k2,
+ normalized_x, normalized_y);
+ Solver::SolverParameters params;
+ Solver solver(intrinsics_cost);
+
+ /*Solver::Results results =*/ solver.minimize(params, &image);
+
+ // TODO(keir): Better error handling.
+
+ *image_x = image(0);
+ *image_y = image(1);
+}
+
} // namespace libmv
diff --git a/intern/libmv/libmv/simple_pipeline/distortion_models.h b/intern/libmv/libmv/simple_pipeline/distortion_models.h
index 4f8e2295a0e..6ba351d729d 100644
--- a/intern/libmv/libmv/simple_pipeline/distortion_models.h
+++ b/intern/libmv/libmv/simple_pipeline/distortion_models.h
@@ -21,11 +21,14 @@
#ifndef LIBMV_SIMPLE_PIPELINE_DISTORTION_MODELS_H_
#define LIBMV_SIMPLE_PIPELINE_DISTORTION_MODELS_H_
+#include <algorithm>
+
namespace libmv {
enum DistortionModelType {
DISTORTION_MODEL_POLYNOMIAL,
- DISTORTION_MODEL_DIVISION
+ DISTORTION_MODEL_DIVISION,
+ DISTORTION_MODEL_NUKE,
};
// Invert camera intrinsics on the image point to get normalized coordinates.
@@ -126,6 +129,79 @@ inline void ApplyDivisionDistortionModel(const T &focal_length_x,
*image_y = focal_length_y * yd + principal_point_y;
}
+// Invert camera intrinsics on the image point to get normalized coordinates.
+// This inverts the radial lens distortion to a point which is in image pixel
+// coordinates to get normalized coordinates.
+//
+// Uses Nuke distortion model.
+template <typename T>
+void InvertNukeDistortionModel(const T &focal_length_x,
+ const T &focal_length_y,
+ const T &principal_point_x,
+ const T &principal_point_y,
+ const int image_width,
+ const int image_height,
+ const T &k1,
+ const T &k2,
+ const T &image_x,
+ const T &image_y,
+ T *normalized_x,
+ T *normalized_y) {
+ // According to the documentation:
+ //
+ // xu = xd / (1 + k0 * rd^2 + k1 * rd^4)
+ // yu = yd / (1 + k0 * rd^2 + k1 * rd^4)
+ //
+ // Legend:
+ // (xd, yd) are the distorted cartesian coordinates,
+ // (rd, phid) are the distorted polar coordinates,
+ // (xu, yu) are the undistorted cartesian coordinates,
+ // (ru, phiu) are the undistorted polar coordinates,
+ // the k-values are the distortion coefficients.
+ //
+ // The coordinate systems are relative to the distortion centre.
+
+ const int max_image_size = std::max(image_width, image_height);
+ const double max_half_image_size = max_image_size * 0.5;
+
+ if (max_half_image_size == 0.0) {
+ *normalized_x = image_x * max_half_image_size / focal_length_x;
+ *normalized_y = image_y * max_half_image_size / focal_length_y;
+ return;
+ }
+
+ const T xd = (image_x - principal_point_x) / max_half_image_size;
+ const T yd = (image_y - principal_point_y) / max_half_image_size;
+
+ T rd2 = xd*xd + yd*yd;
+ T rd4 = rd2 * rd2;
+ T r_coeff = T(1) / (T(1) + k1*rd2 + k2*rd4);
+ T xu = xd * r_coeff;
+ T yu = yd * r_coeff;
+
+ *normalized_x = xu * max_half_image_size / focal_length_x;
+ *normalized_y = yu * max_half_image_size / focal_length_y;
+}
+
+// Apply camera intrinsics to the normalized point to get image coordinates.
+// This applies the radial lens distortion to a point which is in normalized
+// camera coordinates (i.e. the principal point is at (0, 0)) to get image
+// coordinates in pixels. Templated for use with autodifferentiation.
+//
+// Uses Nuke distortion model.
+void ApplyNukeDistortionModel(const double focal_length_x,
+ const double focal_length_y,
+ const double principal_point_x,
+ const double principal_point_y,
+ const int image_width,
+ const int image_height,
+ const double k1,
+ const double k2,
+ const double normalized_x,
+ const double normalized_y,
+ double *image_x,
+ double *image_y);
+
} // namespace libmv
#endif // LIBMV_SIMPLE_PIPELINE_DISTORTION_MODELS_H_
diff --git a/intern/libmv/libmv/simple_pipeline/initialize_reconstruction.h b/intern/libmv/libmv/simple_pipeline/initialize_reconstruction.h
index 744436246b0..32cd4285190 100644
--- a/intern/libmv/libmv/simple_pipeline/initialize_reconstruction.h
+++ b/intern/libmv/libmv/simple_pipeline/initialize_reconstruction.h
@@ -33,7 +33,7 @@ class ProjectiveReconstruction;
Initialize the \link EuclideanReconstruction reconstruction \endlink using
two frames.
- \a markers should contain all \l Marker markers \endlink belonging to
+ \a markers should contain all \link Marker markers \endlink belonging to
tracks visible in both frames. The pose estimation of the camera for
these frames will be inserted into \a *reconstruction.
@@ -54,7 +54,7 @@ bool EuclideanReconstructTwoFrames(const vector<Marker> &markers,
Initialize the \link ProjectiveReconstruction reconstruction \endlink using
two frames.
- \a markers should contain all \l Marker markers \endlink belonging to
+ \a markers should contain all \link Marker markers \endlink belonging to
tracks visible in both frames. An estimate of the projection matrices for
the two frames will get added to the reconstruction.
diff --git a/intern/libmv/libmv/simple_pipeline/intersect.h b/intern/libmv/libmv/simple_pipeline/intersect.h
index 3a0ffa7418b..15d6f998557 100644
--- a/intern/libmv/libmv/simple_pipeline/intersect.h
+++ b/intern/libmv/libmv/simple_pipeline/intersect.h
@@ -35,10 +35,10 @@ namespace libmv {
the frames for which there is a marker for that track must have a
corresponding reconstructed camera in \a *reconstruction.
- \a markers should contain all \l Marker markers \endlink belonging to
+ \a markers should contain all \link Marker markers \endlink belonging to
tracks visible in all frames.
\a reconstruction should contain the cameras for all frames.
- The new \l Point points \endlink will be inserted in \a reconstruction.
+ The new \link Point points \endlink will be inserted in \a reconstruction.
\note This assumes a calibrated reconstruction, e.g. the markers are
already corrected for camera intrinsics and radial distortion.
@@ -57,10 +57,10 @@ bool EuclideanIntersect(const vector<Marker> &markers,
track. Each of the frames for which there is a marker for that track must
have a corresponding reconstructed camera in \a *reconstruction.
- \a markers should contain all \l Marker markers \endlink belonging to
+ \a markers should contain all \link Marker markers \endlink belonging to
tracks visible in all frames.
\a reconstruction should contain the cameras for all frames.
- The new \l Point points \endlink will be inserted in \a reconstruction.
+ The new \link Point points \endlink will be inserted in \a reconstruction.
\note This assumes that radial distortion is already corrected for, but
does not assume that e.g. focal length and principal point are
diff --git a/intern/libmv/libmv/simple_pipeline/reconstruction.cc b/intern/libmv/libmv/simple_pipeline/reconstruction.cc
index 65e5dd27d5d..851eedb5bb1 100644
--- a/intern/libmv/libmv/simple_pipeline/reconstruction.cc
+++ b/intern/libmv/libmv/simple_pipeline/reconstruction.cc
@@ -27,14 +27,14 @@ namespace libmv {
EuclideanReconstruction::EuclideanReconstruction() {}
EuclideanReconstruction::EuclideanReconstruction(
const EuclideanReconstruction &other) {
- cameras_ = other.cameras_;
+ image_to_cameras_map_ = other.image_to_cameras_map_;
points_ = other.points_;
}
EuclideanReconstruction &EuclideanReconstruction::operator=(
const EuclideanReconstruction &other) {
if (&other != this) {
- cameras_ = other.cameras_;
+ image_to_cameras_map_ = other.image_to_cameras_map_;
points_ = other.points_;
}
return *this;
@@ -44,12 +44,13 @@ void EuclideanReconstruction::InsertCamera(int image,
const Mat3 &R,
const Vec3 &t) {
LG << "InsertCamera " << image << ":\nR:\n"<< R << "\nt:\n" << t;
- if (image >= cameras_.size()) {
- cameras_.resize(image + 1);
- }
- cameras_[image].image = image;
- cameras_[image].R = R;
- cameras_[image].t = t;
+
+ EuclideanCamera camera;
+ camera.image = image;
+ camera.R = R;
+ camera.t = t;
+
+ image_to_cameras_map_.insert(make_pair(image, camera));
}
void EuclideanReconstruction::InsertPoint(int track, const Vec3 &X) {
@@ -69,22 +70,18 @@ EuclideanCamera *EuclideanReconstruction::CameraForImage(int image) {
const EuclideanCamera *EuclideanReconstruction::CameraForImage(
int image) const {
- if (image < 0 || image >= cameras_.size()) {
+ ImageToCameraMap::const_iterator it = image_to_cameras_map_.find(image);
+ if (it == image_to_cameras_map_.end()) {
return NULL;
}
- const EuclideanCamera *camera = &cameras_[image];
- if (camera->image == -1) {
- return NULL;
- }
- return camera;
+ return &it->second;
}
vector<EuclideanCamera> EuclideanReconstruction::AllCameras() const {
vector<EuclideanCamera> cameras;
- for (int i = 0; i < cameras_.size(); ++i) {
- if (cameras_[i].image != -1) {
- cameras.push_back(cameras_[i]);
- }
+ for (const ImageToCameraMap::value_type& image_and_camera :
+ image_to_cameras_map_) {
+ cameras.push_back(image_and_camera.second);
}
return cameras;
}
@@ -115,14 +112,14 @@ vector<EuclideanPoint> EuclideanReconstruction::AllPoints() const {
return points;
}
-void ProjectiveReconstruction::InsertCamera(int image,
- const Mat34 &P) {
+void ProjectiveReconstruction::InsertCamera(int image, const Mat34 &P) {
LG << "InsertCamera " << image << ":\nP:\n"<< P;
- if (image >= cameras_.size()) {
- cameras_.resize(image + 1);
- }
- cameras_[image].image = image;
- cameras_[image].P = P;
+
+ ProjectiveCamera camera;
+ camera.image = image;
+ camera.P = P;
+
+ image_to_cameras_map_.insert(make_pair(image, camera));
}
void ProjectiveReconstruction::InsertPoint(int track, const Vec4 &X) {
@@ -142,22 +139,18 @@ ProjectiveCamera *ProjectiveReconstruction::CameraForImage(int image) {
const ProjectiveCamera *ProjectiveReconstruction::CameraForImage(
int image) const {
- if (image < 0 || image >= cameras_.size()) {
- return NULL;
+ ImageToCameraMap::const_iterator it = image_to_cameras_map_.find(image);
+ if (it == image_to_cameras_map_.end()) {
+ return NULL;
}
- const ProjectiveCamera *camera = &cameras_[image];
- if (camera->image == -1) {
- return NULL;
- }
- return camera;
+ return &it->second;
}
vector<ProjectiveCamera> ProjectiveReconstruction::AllCameras() const {
vector<ProjectiveCamera> cameras;
- for (int i = 0; i < cameras_.size(); ++i) {
- if (cameras_[i].image != -1) {
- cameras.push_back(cameras_[i]);
- }
+ for (const ImageToCameraMap::value_type& image_and_camera :
+ image_to_cameras_map_) {
+ cameras.push_back(image_and_camera.second);
}
return cameras;
}
diff --git a/intern/libmv/libmv/simple_pipeline/reconstruction.h b/intern/libmv/libmv/simple_pipeline/reconstruction.h
index 947a0636476..544aeac042e 100644
--- a/intern/libmv/libmv/simple_pipeline/reconstruction.h
+++ b/intern/libmv/libmv/simple_pipeline/reconstruction.h
@@ -22,6 +22,7 @@
#define LIBMV_SIMPLE_PIPELINE_RECONSTRUCTION_H_
#include "libmv/base/vector.h"
+#include "libmv/base/map.h"
#include "libmv/numeric/numeric.h"
namespace libmv {
@@ -29,7 +30,7 @@ namespace libmv {
/*!
A EuclideanCamera is the location and rotation of the camera viewing \a image.
- \a image identify which image from \l Tracks this camera represents.
+ \a image identify which image from \link Tracks this camera represents.
\a R is a 3x3 matrix representing the rotation of the camera.
\a t is a translation vector representing its positions.
@@ -47,7 +48,7 @@ struct EuclideanCamera {
/*!
A Point is the 3D location of a track.
- \a track identify which track from \l Tracks this point corresponds to.
+ \a track identify which track from \link Tracks this point corresponds to.
\a X represents the 3D position of the track.
\sa Reconstruction
@@ -89,7 +90,7 @@ class EuclideanReconstruction {
\a image is the key used to retrieve the cameras with the other methods
in this class.
- \note You should use the same \a image identifier as in \l Tracks.
+ \note You should use the same \a image identifier as in \link Tracks.
*/
void InsertCamera(int image, const Mat3 &R, const Vec3 &t);
@@ -101,7 +102,7 @@ class EuclideanReconstruction {
\a track is the key used to retrieve the points with the
other methods in this class.
- \note You should use the same \a track identifier as in \l Tracks.
+ \note You should use the same \a track identifier as in \link Tracks.
*/
void InsertPoint(int track, const Vec3 &X);
@@ -120,14 +121,18 @@ class EuclideanReconstruction {
vector<EuclideanPoint> AllPoints() const;
private:
- vector<EuclideanCamera> cameras_;
+ // Indexed by frame number.
+ typedef map<int, EuclideanCamera> ImageToCameraMap;
+ ImageToCameraMap image_to_cameras_map_;
+
+ // Insxed by track.
vector<EuclideanPoint> points_;
};
/*!
A ProjectiveCamera is the projection matrix for the camera of \a image.
- \a image identify which image from \l Tracks this camera represents.
+ \a image identify which image from \link Tracks this camera represents.
\a P is the 3x4 projection matrix.
\sa ProjectiveReconstruction
@@ -143,7 +148,7 @@ struct ProjectiveCamera {
/*!
A Point is the 3D location of a track.
- \a track identifies which track from \l Tracks this point corresponds to.
+ \a track identifies which track from \link Tracks this point corresponds to.
\a X is the homogeneous 3D position of the track.
\sa Reconstruction
@@ -177,7 +182,7 @@ class ProjectiveReconstruction {
\a image is the key used to retrieve the cameras with the other methods
in this class.
- \note You should use the same \a image identifier as in \l Tracks.
+ \note You should use the same \a image identifier as in \link Tracks.
*/
void InsertCamera(int image, const Mat34 &P);
@@ -189,7 +194,7 @@ class ProjectiveReconstruction {
\a track is the key used to retrieve the points with the
other methods in this class.
- \note You should use the same \a track identifier as in \l Tracks.
+ \note You should use the same \a track identifier as in \link Tracks.
*/
void InsertPoint(int track, const Vec4 &X);
@@ -208,7 +213,11 @@ class ProjectiveReconstruction {
vector<ProjectivePoint> AllPoints() const;
private:
- vector<ProjectiveCamera> cameras_;
+ // Indexed by frame number.
+ typedef map<int, ProjectiveCamera> ImageToCameraMap;
+ ImageToCameraMap image_to_cameras_map_;
+
+ // Indexed by track.
vector<ProjectivePoint> points_;
};
diff --git a/intern/libmv/libmv/simple_pipeline/resect.h b/intern/libmv/libmv/simple_pipeline/resect.h
index 7ca3237437e..f13d2e2d425 100644
--- a/intern/libmv/libmv/simple_pipeline/resect.h
+++ b/intern/libmv/libmv/simple_pipeline/resect.h
@@ -35,7 +35,7 @@ namespace libmv {
reconstruction object, and solves for the pose and orientation of the
camera for that frame.
- \a markers should contain \l Marker markers \endlink belonging to tracks
+ \a markers should contain \link Marker markers \endlink belonging to tracks
visible in the one frame to be resectioned. Each of the tracks associated
with the markers must have a corresponding reconstructed 3D position in the
\a *reconstruction object.
@@ -62,7 +62,7 @@ bool EuclideanResect(const vector<Marker> &markers,
frame in the reconstruction object, and solves for the projective matrix of
the camera for that frame.
- \a markers should contain \l Marker markers \endlink belonging to tracks
+ \a markers should contain \link Marker markers \endlink belonging to tracks
visible in the one frame to be resectioned. Each of the tracks associated
with the markers must have a corresponding reconstructed homogeneous 3D
position in the \a *reconstruction object.
diff --git a/intern/libmv/libmv/simple_pipeline/tracks.h b/intern/libmv/libmv/simple_pipeline/tracks.h
index a54a43659b7..752d2790a1c 100644
--- a/intern/libmv/libmv/simple_pipeline/tracks.h
+++ b/intern/libmv/libmv/simple_pipeline/tracks.h
@@ -36,7 +36,7 @@ namespace libmv {
\a weight is used by bundle adjustment and weight means how much the
track affects on a final solution.
- \note Markers are typically aggregated with the help of the \l Tracks class.
+ \note Markers are typically aggregated with the help of the \link Tracks class.
\sa Tracks
*/
@@ -56,7 +56,7 @@ struct Marker {
images, which must get created before any 3D reconstruction can take place.
The container has several fast lookups for queries typically needed for
- structure from motion algorithms, such as \l MarkersForTracksInBothImages().
+ structure from motion algorithms, such as \link MarkersForTracksInBothImages().
\sa Marker
*/
@@ -81,7 +81,7 @@ class Tracks {
\a weight is used by bundle adjustment and weight means how much the
track affects on a final solution.
- \note To get an identifier for a new track, use \l MaxTrack() + 1.
+ \note To get an identifier for a new track, use \link MaxTrack() + 1.
*/
// TODO(sergey): Consider using InsetWeightedMarker istead of using
// stupid default value?