Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--intern/libmv/intern/camera_intrinsics.cc53
-rw-r--r--intern/libmv/intern/camera_intrinsics.h4
-rw-r--r--intern/libmv/libmv/simple_pipeline/bundle.cc181
-rw-r--r--intern/libmv/libmv/simple_pipeline/camera_intrinsics.cc63
-rw-r--r--intern/libmv/libmv/simple_pipeline/camera_intrinsics.h58
-rw-r--r--intern/libmv/libmv/simple_pipeline/distortion_models.cc92
-rw-r--r--intern/libmv/libmv/simple_pipeline/distortion_models.h78
-rw-r--r--release/scripts/startup/bl_ui/space_clip.py4
-rw-r--r--source/blender/blenkernel/intern/movieclip.c5
-rw-r--r--source/blender/blenkernel/intern/tracking_util.c12
-rw-r--r--source/blender/makesdna/DNA_tracking_types.h4
-rw-r--r--source/blender/makesrna/intern/rna_tracking.c14
12 files changed, 555 insertions, 13 deletions
diff --git a/intern/libmv/intern/camera_intrinsics.cc b/intern/libmv/intern/camera_intrinsics.cc
index 89e3d0d1178..554c4350b0a 100644
--- a/intern/libmv/intern/camera_intrinsics.cc
+++ b/intern/libmv/intern/camera_intrinsics.cc
@@ -24,6 +24,7 @@
using libmv::CameraIntrinsics;
using libmv::DivisionCameraIntrinsics;
using libmv::PolynomialCameraIntrinsics;
+using libmv::NukeCameraIntrinsics;
libmv_CameraIntrinsics *libmv_cameraIntrinsicsNew(
const libmv_CameraIntrinsicsOptions* libmv_camera_intrinsics_options) {
@@ -55,6 +56,14 @@ libmv_CameraIntrinsics *libmv_cameraIntrinsicsCopy(
*division_intrinsics);
break;
}
+ case libmv::DISTORTION_MODEL_NUKE:
+ {
+ const NukeCameraIntrinsics *nuke_intrinsics =
+ static_cast<const NukeCameraIntrinsics*>(orig_intrinsics);
+ new_intrinsics = LIBMV_OBJECT_NEW(NukeCameraIntrinsics,
+ *nuke_intrinsics);
+ break;
+ }
default:
assert(!"Unknown distortion model");
}
@@ -136,6 +145,25 @@ void libmv_cameraIntrinsicsUpdate(
break;
}
+ case LIBMV_DISTORTION_MODEL_NUKE:
+ {
+ assert(camera_intrinsics->GetDistortionModelType() ==
+ libmv::DISTORTION_MODEL_NUKE);
+
+ NukeCameraIntrinsics *nuke_intrinsics =
+ (NukeCameraIntrinsics *) camera_intrinsics;
+
+ double k1 = libmv_camera_intrinsics_options->nuke_k1;
+ double k2 = libmv_camera_intrinsics_options->nuke_k2;
+
+ if (nuke_intrinsics->k1() != k1 ||
+ nuke_intrinsics->k2() != k2) {
+ nuke_intrinsics->SetDistortion(k1, k2);
+ }
+
+ break;
+ }
+
default:
assert(!"Unknown distortion model");
}
@@ -189,6 +217,17 @@ void libmv_cameraIntrinsicsExtractOptions(
break;
}
+ case libmv::DISTORTION_MODEL_NUKE:
+ {
+ const NukeCameraIntrinsics *nuke_intrinsics =
+ static_cast<const NukeCameraIntrinsics *>(camera_intrinsics);
+ camera_intrinsics_options->distortion_model =
+ LIBMV_DISTORTION_MODEL_NUKE;
+ camera_intrinsics_options->nuke_k1 = nuke_intrinsics->k1();
+ camera_intrinsics_options->nuke_k2 = nuke_intrinsics->k2();
+ break;
+ }
+
default:
assert(!"Unknown distortion model");
}
@@ -316,6 +355,17 @@ static void libmv_cameraIntrinsicsFillFromOptions(
break;
}
+ case LIBMV_DISTORTION_MODEL_NUKE:
+ {
+ NukeCameraIntrinsics *nuke_intrinsics =
+ static_cast<NukeCameraIntrinsics*>(camera_intrinsics);
+
+ nuke_intrinsics->SetDistortion(
+ camera_intrinsics_options->nuke_k1,
+ camera_intrinsics_options->nuke_k2);
+ break;
+ }
+
default:
assert(!"Unknown distortion model");
}
@@ -331,6 +381,9 @@ CameraIntrinsics* libmv_cameraIntrinsicsCreateFromOptions(
case LIBMV_DISTORTION_MODEL_DIVISION:
camera_intrinsics = LIBMV_OBJECT_NEW(DivisionCameraIntrinsics);
break;
+ case LIBMV_DISTORTION_MODEL_NUKE:
+ camera_intrinsics = LIBMV_OBJECT_NEW(NukeCameraIntrinsics);
+ break;
default:
assert(!"Unknown distortion model");
}
diff --git a/intern/libmv/intern/camera_intrinsics.h b/intern/libmv/intern/camera_intrinsics.h
index 40a5826a9c4..b3d259893bd 100644
--- a/intern/libmv/intern/camera_intrinsics.h
+++ b/intern/libmv/intern/camera_intrinsics.h
@@ -29,6 +29,7 @@ typedef struct libmv_CameraIntrinsics libmv_CameraIntrinsics;
enum {
LIBMV_DISTORTION_MODEL_POLYNOMIAL = 0,
LIBMV_DISTORTION_MODEL_DIVISION = 1,
+ LIBMV_DISTORTION_MODEL_NUKE = 2,
};
typedef struct libmv_CameraIntrinsicsOptions {
@@ -45,6 +46,9 @@ typedef struct libmv_CameraIntrinsicsOptions {
// Division distortion model.
double division_k1, division_k2;
+
+ // Nuke distortion model.
+ double nuke_k1, nuke_k2;
} libmv_CameraIntrinsicsOptions;
libmv_CameraIntrinsics *libmv_cameraIntrinsicsNew(
diff --git a/intern/libmv/libmv/simple_pipeline/bundle.cc b/intern/libmv/libmv/simple_pipeline/bundle.cc
index 2976dd5053f..a70fdbc9888 100644
--- a/intern/libmv/libmv/simple_pipeline/bundle.cc
+++ b/intern/libmv/libmv/simple_pipeline/bundle.cc
@@ -66,6 +66,12 @@ enum {
namespace {
+bool NeedUseInvertIntrinsicsPipeline(const CameraIntrinsics *intrinsics) {
+ const DistortionModelType distortion_model =
+ intrinsics->GetDistortionModelType();
+ return (distortion_model == DISTORTION_MODEL_NUKE);
+}
+
// Apply distortion model (distort the input) on the input point in the
// normalized space to get distorted coordinate in the image space.
//
@@ -89,8 +95,6 @@ void ApplyDistortionModelUsingIntrinsicsBlock(
const T& principal_point_x = intrinsics_block[OFFSET_PRINCIPAL_POINT_X];
const T& principal_point_y = intrinsics_block[OFFSET_PRINCIPAL_POINT_Y];
- // Apply distortion to the normalized points to get (xd, yd).
- //
// TODO(keir): Do early bailouts for zero distortion; these are expensive
// jet operations.
switch (invariant_intrinsics->GetDistortionModelType()) {
@@ -127,11 +131,82 @@ void ApplyDistortionModelUsingIntrinsicsBlock(
distorted_x, distorted_y);
return;
}
+
+ case DISTORTION_MODEL_NUKE:
+ {
+ LOG(FATAL) << "Unsupported distortion model.";
+ return;
+ }
+ }
+
+ LOG(FATAL) << "Unknown distortion model.";
+}
+
+// Invert distortion model (undistort the input) on the input point in the
+// image space to get undistorted coordinate in the normalized space.
+//
+// Using intrinsics values from the parameter block, which makes this function
+// suitable for use from a cost functor.
+//
+// Only use for distortion models which are analytically defined for their
+// Invert() function.
+//
+// The invariant_intrinsics are used to access intrinsics which are never
+// packed into parameter block: for example, distortion model type and image
+// dimension.
+template<typename T>
+void InvertDistortionModelUsingIntrinsicsBlock(
+ const CameraIntrinsics *invariant_intrinsics,
+ const T* const intrinsics_block,
+ const T& image_x, const T& image_y,
+ T* normalized_x, T* normalized_y) {
+ // Unpack the intrinsics.
+ const T& focal_length = intrinsics_block[OFFSET_FOCAL_LENGTH];
+ const T& principal_point_x = intrinsics_block[OFFSET_PRINCIPAL_POINT_X];
+ const T& principal_point_y = intrinsics_block[OFFSET_PRINCIPAL_POINT_Y];
+
+ // TODO(keir): Do early bailouts for zero distortion; these are expensive
+ // jet operations.
+ switch (invariant_intrinsics->GetDistortionModelType()) {
+ case DISTORTION_MODEL_POLYNOMIAL:
+ case DISTORTION_MODEL_DIVISION:
+ LOG(FATAL) << "Unsupported distortion model.";
+ return;
+
+ case DISTORTION_MODEL_NUKE:
+ {
+ const T& k1 = intrinsics_block[OFFSET_K1];
+ const T& k2 = intrinsics_block[OFFSET_K2];
+
+ InvertNukeDistortionModel(focal_length,
+ focal_length,
+ principal_point_x,
+ principal_point_y,
+ invariant_intrinsics->image_width(),
+ invariant_intrinsics->image_height(),
+ k1, k2,
+ image_x, image_y,
+ normalized_x, normalized_y);
+ return;
+ }
}
LOG(FATAL) << "Unknown distortion model.";
}
+template<typename T>
+void NormalizedToImageSpace(const T* const intrinsics_block,
+ const T& normalized_x, const T& normalized_y,
+ T* image_x, T* image_y) {
+ // Unpack the intrinsics.
+ const T& focal_length = intrinsics_block[OFFSET_FOCAL_LENGTH];
+ const T& principal_point_x = intrinsics_block[OFFSET_PRINCIPAL_POINT_X];
+ const T& principal_point_y = intrinsics_block[OFFSET_PRINCIPAL_POINT_Y];
+
+ *image_x = normalized_x * focal_length + principal_point_x;
+ *image_y = normalized_y * focal_length + principal_point_y;
+}
+
// Cost functor which computes reprojection error of 3D point X on camera
// defined by angle-axis rotation and it's translation (which are in the same
// block due to optimization reasons).
@@ -191,6 +266,81 @@ struct ReprojectionErrorApplyIntrinsics {
const double weight_;
};
+// Cost functor which computes reprojection error of 3D point X on camera
+// defined by angle-axis rotation and it's translation (which are in the same
+// block due to optimization reasons).
+//
+// This functor can only be used for distortion models which have analytically
+// defined Invert() function.
+struct ReprojectionErrorInvertIntrinsics {
+ ReprojectionErrorInvertIntrinsics(
+ const CameraIntrinsics *invariant_intrinsics,
+ const double observed_distorted_x,
+ const double observed_distorted_y,
+ const double weight)
+ : invariant_intrinsics_(invariant_intrinsics),
+ observed_distorted_x_(observed_distorted_x),
+ observed_distorted_y_(observed_distorted_y),
+ weight_(weight) {}
+
+ template <typename T>
+ bool operator()(const T* const intrinsics,
+ const T* const R_t, // Rotation denoted by angle axis
+ // followed with translation
+ const T* const X, // Point coordinates 3x1.
+ T* residuals) const {
+ // Unpack the intrinsics.
+ const T& focal_length = intrinsics[OFFSET_FOCAL_LENGTH];
+ const T& principal_point_x = intrinsics[OFFSET_PRINCIPAL_POINT_X];
+ const T& principal_point_y = intrinsics[OFFSET_PRINCIPAL_POINT_Y];
+
+ // Compute projective coordinates: x = RX + t.
+ T x[3];
+
+ ceres::AngleAxisRotatePoint(R_t, X, x);
+ x[0] += R_t[3];
+ x[1] += R_t[4];
+ x[2] += R_t[5];
+
+ // Prevent points from going behind the camera.
+ if (x[2] < T(0)) {
+ return false;
+ }
+
+ // Compute normalized coordinates: x /= x[2].
+ T xn = x[0] / x[2];
+ T yn = x[1] / x[2];
+
+ // Compute image space coordinate from normalized.
+ T predicted_x = focal_length * xn + principal_point_x;
+ T predicted_y = focal_length * yn + principal_point_y;
+
+ T observed_undistorted_normalized_x, observed_undistorted_normalized_y;
+ InvertDistortionModelUsingIntrinsicsBlock(
+ invariant_intrinsics_,
+ intrinsics,
+ T(observed_distorted_x_), T(observed_distorted_y_),
+ &observed_undistorted_normalized_x, &observed_undistorted_normalized_y);
+
+ T observed_undistorted_image_x, observed_undistorted_image_y;
+ NormalizedToImageSpace(
+ intrinsics,
+ observed_undistorted_normalized_x, observed_undistorted_normalized_y,
+ &observed_undistorted_image_x, &observed_undistorted_image_y);
+
+ // The error is the difference between the predicted and observed position.
+ residuals[0] = (predicted_x - observed_undistorted_image_x) * weight_;
+ residuals[1] = (predicted_y - observed_undistorted_image_y) * weight_;
+
+ return true;
+ }
+
+ const CameraIntrinsics *invariant_intrinsics_;
+ const double observed_distorted_x_;
+ const double observed_distorted_y_;
+ const double weight_;
+};
+
// Print a message to the log which camera intrinsics are gonna to be optimized.
void BundleIntrinsicsLogMessage(const int bundle_intrinsics) {
if (bundle_intrinsics == BUNDLE_NO_INTRINSICS) {
@@ -421,14 +571,25 @@ void AddResidualBlockToProblem(const CameraIntrinsics *invariant_intrinsics,
double *camera_R_t,
EuclideanPoint *point,
ceres::Problem* problem) {
- AddResidualBlockToProblemImpl<ReprojectionErrorApplyIntrinsics>(
- invariant_intrinsics,
- marker.x, marker.y,
- marker_weight,
- intrinsics_block,
- camera_R_t,
- point,
- problem);
+ if (NeedUseInvertIntrinsicsPipeline(invariant_intrinsics)) {
+ AddResidualBlockToProblemImpl<ReprojectionErrorInvertIntrinsics>(
+ invariant_intrinsics,
+ marker.x, marker.y,
+ marker_weight,
+ intrinsics_block,
+ camera_R_t,
+ point,
+ problem);
+ } else {
+ AddResidualBlockToProblemImpl<ReprojectionErrorApplyIntrinsics>(
+ invariant_intrinsics,
+ marker.x, marker.y,
+ marker_weight,
+ intrinsics_block,
+ camera_R_t,
+ point,
+ problem);
+ }
}
// This is an utility function to only bundle 3D position of
diff --git a/intern/libmv/libmv/simple_pipeline/camera_intrinsics.cc b/intern/libmv/libmv/simple_pipeline/camera_intrinsics.cc
index 5e4e07b3c4c..a95b394ad06 100644
--- a/intern/libmv/libmv/simple_pipeline/camera_intrinsics.cc
+++ b/intern/libmv/libmv/simple_pipeline/camera_intrinsics.cc
@@ -131,6 +131,8 @@ void CameraIntrinsics::ResetLookupGrids() {
undistort_.Reset();
}
+// Polynomial model.
+
PolynomialCameraIntrinsics::PolynomialCameraIntrinsics()
: CameraIntrinsics() {
SetRadialDistortion(0.0, 0.0, 0.0);
@@ -193,6 +195,8 @@ void PolynomialCameraIntrinsics::InvertIntrinsics(
normalized_y);
}
+// Division model.
+
DivisionCameraIntrinsics::DivisionCameraIntrinsics()
: CameraIntrinsics() {
SetDistortion(0.0, 0.0);
@@ -241,6 +245,57 @@ void DivisionCameraIntrinsics::InvertIntrinsics(double image_x,
normalized_y);
}
+// Nuke model.
+
+NukeCameraIntrinsics::NukeCameraIntrinsics()
+ : CameraIntrinsics() {
+ SetDistortion(0.0, 0.0);
+}
+
+NukeCameraIntrinsics::NukeCameraIntrinsics(
+ const NukeCameraIntrinsics &from)
+ : CameraIntrinsics(from) {
+ SetDistortion(from.k1(), from.k1());
+}
+
+void NukeCameraIntrinsics::SetDistortion(double k1, double k2) {
+ parameters_[OFFSET_K1] = k1;
+ parameters_[OFFSET_K2] = k2;
+ ResetLookupGrids();
+}
+
+void NukeCameraIntrinsics::ApplyIntrinsics(double normalized_x,
+ double normalized_y,
+ double *image_x,
+ double *image_y) const {
+ ApplyNukeDistortionModel(focal_length_x(),
+ focal_length_y(),
+ principal_point_x(),
+ principal_point_y(),
+ image_width(), image_height(),
+ k1(), k2(),
+ normalized_x,
+ normalized_y,
+ image_x,
+ image_y);
+}
+
+void NukeCameraIntrinsics::InvertIntrinsics(double image_x,
+ double image_y,
+ double *normalized_x,
+ double *normalized_y) const {
+ InvertNukeDistortionModel(focal_length_x(),
+ focal_length_y(),
+ principal_point_x(),
+ principal_point_y(),
+ image_width(), image_height(),
+ k1(), k2(),
+ image_x,
+ image_y,
+ normalized_x,
+ normalized_y);
+}
+
std::ostream& operator <<(std::ostream &os,
const CameraIntrinsics &intrinsics) {
if (intrinsics.focal_length_x() == intrinsics.focal_length_x()) {
@@ -281,6 +336,14 @@ std::ostream& operator <<(std::ostream &os,
PRINT_NONZERO_COEFFICIENT(division_intrinsics, k2);
break;
}
+ case DISTORTION_MODEL_NUKE:
+ {
+ const NukeCameraIntrinsics *nuke_intrinsics =
+ static_cast<const NukeCameraIntrinsics *>(&intrinsics);
+ PRINT_NONZERO_COEFFICIENT(nuke_intrinsics, k1);
+ PRINT_NONZERO_COEFFICIENT(nuke_intrinsics, k2);
+ break;
+ }
default:
LOG(FATAL) << "Unknown distortion model.";
}
diff --git a/intern/libmv/libmv/simple_pipeline/camera_intrinsics.h b/intern/libmv/libmv/simple_pipeline/camera_intrinsics.h
index 6a3ade81089..782fd56c54c 100644
--- a/intern/libmv/libmv/simple_pipeline/camera_intrinsics.h
+++ b/intern/libmv/libmv/simple_pipeline/camera_intrinsics.h
@@ -276,7 +276,7 @@ class CameraIntrinsics {
class PolynomialCameraIntrinsics : public CameraIntrinsics {
public:
// This constants defines an offset of corresponding coefficients
- // in the arameters_ array.
+ // in the parameters_ array.
enum {
OFFSET_K1,
OFFSET_K2,
@@ -342,7 +342,7 @@ class PolynomialCameraIntrinsics : public CameraIntrinsics {
class DivisionCameraIntrinsics : public CameraIntrinsics {
public:
// This constants defines an offset of corresponding coefficients
- // in the arameters_ array.
+ // in the parameters_ array.
enum {
OFFSET_K1,
OFFSET_K2,
@@ -393,6 +393,60 @@ class DivisionCameraIntrinsics : public CameraIntrinsics {
double parameters_[NUM_PARAMETERS];
};
+class NukeCameraIntrinsics : public CameraIntrinsics {
+ public:
+ // This constants defines an offset of corresponding coefficients
+ // in the parameters_ array.
+ enum {
+ OFFSET_K1,
+ OFFSET_K2,
+
+ // This defines the size of array which we need to have in order
+ // to store all the coefficients.
+ NUM_PARAMETERS,
+ };
+
+ NukeCameraIntrinsics();
+ NukeCameraIntrinsics(const NukeCameraIntrinsics &from);
+
+ DistortionModelType GetDistortionModelType() const {
+ return DISTORTION_MODEL_NUKE;
+ }
+
+ int num_distortion_parameters() const { return NUM_PARAMETERS; }
+ double *distortion_parameters() { return parameters_; };
+ const double *distortion_parameters() const { return parameters_; };
+
+ double k1() const { return parameters_[OFFSET_K1]; }
+ double k2() const { return parameters_[OFFSET_K2]; }
+
+ // Set radial distortion coeffcients.
+ void SetDistortion(double k1, double k2);
+
+ // Apply camera intrinsics to the normalized point to get image coordinates.
+ //
+ // This applies the lens distortion to a point which is in normalized
+ // camera coordinates (i.e. the principal point is at (0, 0)) to get image
+ // coordinates in pixels.
+ void ApplyIntrinsics(double normalized_x,
+ double normalized_y,
+ double *image_x,
+ double *image_y) const;
+
+ // Invert camera intrinsics on the image point to get normalized coordinates.
+ //
+ // This reverses the effect of lens distortion on a point which is in image
+ // coordinates to get normalized camera coordinates.
+ void InvertIntrinsics(double image_x,
+ double image_y,
+ double *normalized_x,
+ double *normalized_y) const;
+
+ private:
+ // Double-parameter division distortion model.
+ double parameters_[NUM_PARAMETERS];
+};
+
/// A human-readable representation of the camera intrinsic parameters.
std::ostream& operator <<(std::ostream &os,
const CameraIntrinsics &intrinsics);
diff --git a/intern/libmv/libmv/simple_pipeline/distortion_models.cc b/intern/libmv/libmv/simple_pipeline/distortion_models.cc
index 9b6dca2678a..c069fc6f623 100644
--- a/intern/libmv/libmv/simple_pipeline/distortion_models.cc
+++ b/intern/libmv/libmv/simple_pipeline/distortion_models.cc
@@ -194,4 +194,96 @@ void InvertDivisionDistortionModel(const double focal_length_x,
*normalized_y = normalized(1);
}
+struct ApplyNukeIntrinsicsCostFunction {
+ public:
+ typedef Vec2 FMatrixType;
+ typedef Vec2 XMatrixType;
+
+ ApplyNukeIntrinsicsCostFunction(const double focal_length_x,
+ const double focal_length_y,
+ const double principal_point_x,
+ const double principal_point_y,
+ const int image_width,
+ const int image_height,
+ const double k1,
+ const double k2,
+ const double expected_normalized_x,
+ const double expected_normalized_y)
+ : focal_length_x_(focal_length_x),
+ focal_length_y_(focal_length_y),
+ principal_point_x_(principal_point_x),
+ principal_point_y_(principal_point_y),
+ image_width_(image_width),
+ image_height_(image_height),
+ k1_(k1), k2_(k2),
+ expected_normalized_x_(expected_normalized_x),
+ expected_normalized_y_(expected_normalized_y) {}
+
+ Vec2 operator()(const Vec2 &image_coordinate) const {
+ double actual_normalized_x, actual_normalized_y;
+
+ InvertNukeDistortionModel(focal_length_x_,
+ focal_length_y_,
+ principal_point_x_,
+ principal_point_y_,
+ image_width_, image_height_,
+ k1_, k2_,
+ image_coordinate(0), image_coordinate(1),
+ &actual_normalized_x, &actual_normalized_y);
+
+ Vec2 fx;
+ fx << (actual_normalized_x - expected_normalized_x_),
+ (actual_normalized_y - expected_normalized_y_);
+ return fx;
+ }
+ double focal_length_x_;
+ double focal_length_y_;
+ double principal_point_x_;
+ double principal_point_y_;
+ int image_width_;
+ int image_height_;
+ double k1_, k2_;
+ double expected_normalized_x_, expected_normalized_y_;
+};
+
+void ApplyNukeDistortionModel(const double focal_length_x,
+ const double focal_length_y,
+ const double principal_point_x,
+ const double principal_point_y,
+ const int image_width,
+ const int image_height,
+ const double k1,
+ const double k2,
+ const double normalized_x,
+ const double normalized_y,
+ double *image_x,
+ double *image_y) {
+ // Compute the initial guess. For a camera with no distortion, this will also
+ // be the final answer; the LM iteration will terminate immediately.
+ Vec2 image;
+ image(0) = normalized_x * focal_length_x + principal_point_x;
+ image(1) = normalized_y * focal_length_y + principal_point_y;
+
+ // TODO(sergey): Use Ceres minimizer instead.
+ typedef LevenbergMarquardt<ApplyNukeIntrinsicsCostFunction> Solver;
+
+ ApplyNukeIntrinsicsCostFunction intrinsics_cost(focal_length_x,
+ focal_length_y,
+ principal_point_x,
+ principal_point_y,
+ image_width,
+ image_height,
+ k1, k2,
+ normalized_x, normalized_y);
+ Solver::SolverParameters params;
+ Solver solver(intrinsics_cost);
+
+ /*Solver::Results results =*/ solver.minimize(params, &image);
+
+ // TODO(keir): Better error handling.
+
+ *image_x = image(0);
+ *image_y = image(1);
+}
+
} // namespace libmv
diff --git a/intern/libmv/libmv/simple_pipeline/distortion_models.h b/intern/libmv/libmv/simple_pipeline/distortion_models.h
index 4f8e2295a0e..6ba351d729d 100644
--- a/intern/libmv/libmv/simple_pipeline/distortion_models.h
+++ b/intern/libmv/libmv/simple_pipeline/distortion_models.h
@@ -21,11 +21,14 @@
#ifndef LIBMV_SIMPLE_PIPELINE_DISTORTION_MODELS_H_
#define LIBMV_SIMPLE_PIPELINE_DISTORTION_MODELS_H_
+#include <algorithm>
+
namespace libmv {
enum DistortionModelType {
DISTORTION_MODEL_POLYNOMIAL,
- DISTORTION_MODEL_DIVISION
+ DISTORTION_MODEL_DIVISION,
+ DISTORTION_MODEL_NUKE,
};
// Invert camera intrinsics on the image point to get normalized coordinates.
@@ -126,6 +129,79 @@ inline void ApplyDivisionDistortionModel(const T &focal_length_x,
*image_y = focal_length_y * yd + principal_point_y;
}
+// Invert camera intrinsics on the image point to get normalized coordinates.
+// This inverts the radial lens distortion to a point which is in image pixel
+// coordinates to get normalized coordinates.
+//
+// Uses Nuke distortion model.
+template <typename T>
+void InvertNukeDistortionModel(const T &focal_length_x,
+ const T &focal_length_y,
+ const T &principal_point_x,
+ const T &principal_point_y,
+ const int image_width,
+ const int image_height,
+ const T &k1,
+ const T &k2,
+ const T &image_x,
+ const T &image_y,
+ T *normalized_x,
+ T *normalized_y) {
+ // According to the documentation:
+ //
+ // xu = xd / (1 + k0 * rd^2 + k1 * rd^4)
+ // yu = yd / (1 + k0 * rd^2 + k1 * rd^4)
+ //
+ // Legend:
+ // (xd, yd) are the distorted cartesian coordinates,
+ // (rd, phid) are the distorted polar coordinates,
+ // (xu, yu) are the undistorted cartesian coordinates,
+ // (ru, phiu) are the undistorted polar coordinates,
+ // the k-values are the distortion coefficients.
+ //
+ // The coordinate systems are relative to the distortion centre.
+
+ const int max_image_size = std::max(image_width, image_height);
+ const double max_half_image_size = max_image_size * 0.5;
+
+ if (max_half_image_size == 0.0) {
+ *normalized_x = image_x * max_half_image_size / focal_length_x;
+ *normalized_y = image_y * max_half_image_size / focal_length_y;
+ return;
+ }
+
+ const T xd = (image_x - principal_point_x) / max_half_image_size;
+ const T yd = (image_y - principal_point_y) / max_half_image_size;
+
+ T rd2 = xd*xd + yd*yd;
+ T rd4 = rd2 * rd2;
+ T r_coeff = T(1) / (T(1) + k1*rd2 + k2*rd4);
+ T xu = xd * r_coeff;
+ T yu = yd * r_coeff;
+
+ *normalized_x = xu * max_half_image_size / focal_length_x;
+ *normalized_y = yu * max_half_image_size / focal_length_y;
+}
+
+// Apply camera intrinsics to the normalized point to get image coordinates.
+// This applies the radial lens distortion to a point which is in normalized
+// camera coordinates (i.e. the principal point is at (0, 0)) to get image
+// coordinates in pixels. Templated for use with autodifferentiation.
+//
+// Uses Nuke distortion model.
+void ApplyNukeDistortionModel(const double focal_length_x,
+ const double focal_length_y,
+ const double principal_point_x,
+ const double principal_point_y,
+ const int image_width,
+ const int image_height,
+ const double k1,
+ const double k2,
+ const double normalized_x,
+ const double normalized_y,
+ double *image_x,
+ double *image_y);
+
} // namespace libmv
#endif // LIBMV_SIMPLE_PIPELINE_DISTORTION_MODELS_H_
diff --git a/release/scripts/startup/bl_ui/space_clip.py b/release/scripts/startup/bl_ui/space_clip.py
index f93629a4f03..5b6cc6609e0 100644
--- a/release/scripts/startup/bl_ui/space_clip.py
+++ b/release/scripts/startup/bl_ui/space_clip.py
@@ -918,6 +918,10 @@ class CLIP_PT_tracking_lens(Panel):
col = layout.column(align=True)
col.prop(camera, "division_k1")
col.prop(camera, "division_k2")
+ elif camera.distortion_model == 'NUKE':
+ col = layout.column(align=True)
+ col.prop(camera, "nuke_k1")
+ col.prop(camera, "nuke_k2")
class CLIP_PT_marker(CLIP_PT_tracking_panel, Panel):
diff --git a/source/blender/blenkernel/intern/movieclip.c b/source/blender/blenkernel/intern/movieclip.c
index 7991559d1ce..4c7b791f103 100644
--- a/source/blender/blenkernel/intern/movieclip.c
+++ b/source/blender/blenkernel/intern/movieclip.c
@@ -460,6 +460,7 @@ typedef struct MovieClipCache {
float principal[2];
float polynomial_k[3];
float division_k[2];
+ float nuke_k[2];
short distortion_model;
bool undistortion_used;
@@ -908,6 +909,9 @@ static bool check_undistortion_cache_flags(const MovieClip *clip)
if (!equals_v2v2(&camera->division_k1, cache->postprocessed.division_k)) {
return false;
}
+ if (!equals_v2v2(&camera->nuke_k1, cache->postprocessed.nuke_k)) {
+ return false;
+ }
return true;
}
@@ -1010,6 +1014,7 @@ static void put_postprocessed_frame_to_cache(
copy_v2_v2(cache->postprocessed.principal, camera->principal);
copy_v3_v3(cache->postprocessed.polynomial_k, &camera->k1);
copy_v2_v2(cache->postprocessed.division_k, &camera->division_k1);
+ copy_v2_v2(cache->postprocessed.nuke_k, &camera->nuke_k1);
cache->postprocessed.undistortion_used = true;
}
else {
diff --git a/source/blender/blenkernel/intern/tracking_util.c b/source/blender/blenkernel/intern/tracking_util.c
index 5f7452e4775..629c01ec298 100644
--- a/source/blender/blenkernel/intern/tracking_util.c
+++ b/source/blender/blenkernel/intern/tracking_util.c
@@ -451,6 +451,12 @@ static void distortion_model_parameters_from_tracking(
camera_intrinsics_options->division_k1 = camera->division_k1;
camera_intrinsics_options->division_k2 = camera->division_k2;
return;
+
+ case TRACKING_DISTORTION_MODEL_NUKE:
+ camera_intrinsics_options->distortion_model = LIBMV_DISTORTION_MODEL_NUKE;
+ camera_intrinsics_options->nuke_k1 = camera->nuke_k1;
+ camera_intrinsics_options->nuke_k2 = camera->nuke_k2;
+ return;
}
/* Unknown distortion model, which might be due to opening newer file in older Blender.
@@ -479,6 +485,12 @@ static void distortion_model_parameters_from_options(
camera->division_k1 = camera_intrinsics_options->division_k1;
camera->division_k2 = camera_intrinsics_options->division_k2;
return;
+
+ case LIBMV_DISTORTION_MODEL_NUKE:
+ camera->distortion_model = TRACKING_DISTORTION_MODEL_NUKE;
+ camera->nuke_k1 = camera_intrinsics_options->nuke_k1;
+ camera->nuke_k2 = camera_intrinsics_options->nuke_k2;
+ return;
}
/* Libmv returned distortion model which is not known to Blender. This is a logical error in code
diff --git a/source/blender/makesdna/DNA_tracking_types.h b/source/blender/makesdna/DNA_tracking_types.h
index ab9f3d07849..32a00cc25d1 100644
--- a/source/blender/makesdna/DNA_tracking_types.h
+++ b/source/blender/makesdna/DNA_tracking_types.h
@@ -70,6 +70,9 @@ typedef struct MovieTrackingCamera {
/* Division distortion model coefficients */
float division_k1, division_k2;
+
+ /* Nuke distortion model coefficients */
+ float nuke_k1, nuke_k2;
} MovieTrackingCamera;
typedef struct MovieTrackingMarker {
@@ -455,6 +458,7 @@ typedef struct MovieTracking {
enum {
TRACKING_DISTORTION_MODEL_POLYNOMIAL = 0,
TRACKING_DISTORTION_MODEL_DIVISION = 1,
+ TRACKING_DISTORTION_MODEL_NUKE = 2,
};
/* MovieTrackingCamera->units */
diff --git a/source/blender/makesrna/intern/rna_tracking.c b/source/blender/makesrna/intern/rna_tracking.c
index 131c13c4d90..507d06482df 100644
--- a/source/blender/makesrna/intern/rna_tracking.c
+++ b/source/blender/makesrna/intern/rna_tracking.c
@@ -1149,6 +1149,7 @@ static void rna_def_trackingCamera(BlenderRNA *brna)
"Divisions",
"Division distortion model which "
"better represents wide-angle cameras"},
+ {TRACKING_DISTORTION_MODEL_NUKE, "NUKE", 0, "Nuke", "Nuke distortion model"},
{0, NULL, 0, NULL, NULL},
};
@@ -1252,6 +1253,19 @@ static void rna_def_trackingCamera(BlenderRNA *brna)
RNA_def_property_ui_text(prop, "K2", "First coefficient of second order division distortion");
RNA_def_property_update(prop, NC_MOVIECLIP | NA_EDITED, "rna_tracking_flushUpdate");
+ /* Nuke distortion parameters */
+ prop = RNA_def_property(srna, "nuke_k1", PROP_FLOAT, PROP_NONE);
+ RNA_def_property_clear_flag(prop, PROP_ANIMATABLE);
+ RNA_def_property_ui_range(prop, -10, 10, 0.1, 3);
+ RNA_def_property_ui_text(prop, "K1", "First coefficient of second order Nuke distortion");
+ RNA_def_property_update(prop, NC_MOVIECLIP | NA_EDITED, "rna_tracking_flushUpdate");
+
+ prop = RNA_def_property(srna, "nuke_k2", PROP_FLOAT, PROP_NONE);
+ RNA_def_property_clear_flag(prop, PROP_ANIMATABLE);
+ RNA_def_property_ui_range(prop, -10, 10, 0.1, 3);
+ RNA_def_property_ui_text(prop, "K2", "Second coefficient of second order Nuke distortion");
+ RNA_def_property_update(prop, NC_MOVIECLIP | NA_EDITED, "rna_tracking_flushUpdate");
+
/* pixel aspect */
prop = RNA_def_property(srna, "pixel_aspect", PROP_FLOAT, PROP_XYZ);
RNA_def_property_float_sdna(prop, NULL, "pixel_aspect");