diff options
Diffstat (limited to 'extern/ceres/include')
56 files changed, 3905 insertions, 654 deletions
diff --git a/extern/ceres/include/ceres/autodiff_cost_function.h b/extern/ceres/include/ceres/autodiff_cost_function.h index 207f0a41688..cd256432a98 100644 --- a/extern/ceres/include/ceres/autodiff_cost_function.h +++ b/extern/ceres/include/ceres/autodiff_cost_function.h @@ -151,7 +151,8 @@ namespace ceres { template <typename CostFunctor, int kNumResiduals, // Number of residuals, or ceres::DYNAMIC. int... Ns> // Number of parameters in each parameter block. -class AutoDiffCostFunction : public SizedCostFunction<kNumResiduals, Ns...> { +class AutoDiffCostFunction final + : public SizedCostFunction<kNumResiduals, Ns...> { public: // Takes ownership of functor by default. Uses the template-provided // value for the number of residuals ("kNumResiduals"). @@ -178,7 +179,7 @@ class AutoDiffCostFunction : public SizedCostFunction<kNumResiduals, Ns...> { SizedCostFunction<kNumResiduals, Ns...>::set_num_residuals(num_residuals); } - explicit AutoDiffCostFunction(AutoDiffCostFunction&& other) + AutoDiffCostFunction(AutoDiffCostFunction&& other) : functor_(std::move(other.functor_)), ownership_(other.ownership_) {} virtual ~AutoDiffCostFunction() { @@ -215,6 +216,8 @@ class AutoDiffCostFunction : public SizedCostFunction<kNumResiduals, Ns...> { jacobians); }; + const CostFunctor& functor() const { return *functor_; } + private: std::unique_ptr<CostFunctor> functor_; Ownership ownership_; diff --git a/extern/ceres/include/ceres/autodiff_first_order_function.h b/extern/ceres/include/ceres/autodiff_first_order_function.h index b98d845655b..7c13f4239a6 100644 --- a/extern/ceres/include/ceres/autodiff_first_order_function.h +++ b/extern/ceres/include/ceres/autodiff_first_order_function.h @@ -102,7 +102,7 @@ namespace ceres { // seen where instead of using a_ directly, a_ is wrapped with T(a_). template <typename FirstOrderFunctor, int kNumParameters> -class AutoDiffFirstOrderFunction : public FirstOrderFunction { +class AutoDiffFirstOrderFunction final : public FirstOrderFunction { public: // Takes ownership of functor. explicit AutoDiffFirstOrderFunction(FirstOrderFunctor* functor) @@ -110,8 +110,6 @@ class AutoDiffFirstOrderFunction : public FirstOrderFunction { static_assert(kNumParameters > 0, "kNumParameters must be positive"); } - virtual ~AutoDiffFirstOrderFunction() {} - bool Evaluate(const double* const parameters, double* cost, double* gradient) const override { @@ -119,7 +117,7 @@ class AutoDiffFirstOrderFunction : public FirstOrderFunction { return (*functor_)(parameters, cost); } - typedef Jet<double, kNumParameters> JetT; + using JetT = Jet<double, kNumParameters>; internal::FixedArray<JetT, (256 * 7) / sizeof(JetT)> x(kNumParameters); for (int i = 0; i < kNumParameters; ++i) { x[i].a = parameters[i]; @@ -142,6 +140,8 @@ class AutoDiffFirstOrderFunction : public FirstOrderFunction { int NumParameters() const override { return kNumParameters; } + const FirstOrderFunctor& functor() const { return *functor_; } + private: std::unique_ptr<FirstOrderFunctor> functor_; }; diff --git a/extern/ceres/include/ceres/autodiff_local_parameterization.h b/extern/ceres/include/ceres/autodiff_local_parameterization.h index d694376fdd1..5f9b04d0670 100644 --- a/extern/ceres/include/ceres/autodiff_local_parameterization.h +++ b/extern/ceres/include/ceres/autodiff_local_parameterization.h @@ -40,6 +40,10 @@ namespace ceres { +// WARNING: LocalParameterizations are deprecated, so is +// AutoDiffLocalParameterization. They will be removed from Ceres Solver in +// version 2.2.0. Please use Manifolds and AutoDiffManifold instead. + // Create local parameterization with Jacobians computed via automatic // differentiation. For more information on local parameterizations, // see include/ceres/local_parameterization.h @@ -106,7 +110,8 @@ namespace ceres { // seen where instead of using k_ directly, k_ is wrapped with T(k_). template <typename Functor, int kGlobalSize, int kLocalSize> -class AutoDiffLocalParameterization : public LocalParameterization { +class CERES_DEPRECATED_WITH_MSG("Use AutoDiffManifold instead.") + AutoDiffLocalParameterization : public LocalParameterization { public: AutoDiffLocalParameterization() : functor_(new Functor()) {} @@ -114,7 +119,6 @@ class AutoDiffLocalParameterization : public LocalParameterization { explicit AutoDiffLocalParameterization(Functor* functor) : functor_(functor) {} - virtual ~AutoDiffLocalParameterization() {} bool Plus(const double* x, const double* delta, double* x_plus_delta) const override { @@ -133,7 +137,7 @@ class AutoDiffLocalParameterization : public LocalParameterization { } const double* parameter_ptrs[2] = {x, zero_delta}; - double* jacobian_ptrs[2] = {NULL, jacobian}; + double* jacobian_ptrs[2] = {nullptr, jacobian}; return internal::AutoDifferentiate< kGlobalSize, internal::StaticParameterDims<kGlobalSize, kLocalSize>>( @@ -143,6 +147,8 @@ class AutoDiffLocalParameterization : public LocalParameterization { int GlobalSize() const override { return kGlobalSize; } int LocalSize() const override { return kLocalSize; } + const Functor& functor() const { return *functor_; } + private: std::unique_ptr<Functor> functor_; }; diff --git a/extern/ceres/include/ceres/autodiff_manifold.h b/extern/ceres/include/ceres/autodiff_manifold.h new file mode 100644 index 00000000000..3063e19e802 --- /dev/null +++ b/extern/ceres/include/ceres/autodiff_manifold.h @@ -0,0 +1,259 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2022 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + +#ifndef CERES_PUBLIC_AUTODIFF_MANIFOLD_H_ +#define CERES_PUBLIC_AUTODIFF_MANIFOLD_H_ + +#include <memory> + +#include "ceres/internal/autodiff.h" +#include "ceres/manifold.h" + +namespace ceres { + +// Create a Manifold with Jacobians computed via automatic differentiation. For +// more information on manifolds, see include/ceres/manifold.h +// +// To get an auto differentiated manifold, you must define a class/struct with +// templated Plus and Minus functions that compute +// +// x_plus_delta = Plus(x, delta); +// y_minus_x = Minus(y, x); +// +// Where, x, y and x_plus_y are vectors on the manifold in the ambient space (so +// they are kAmbientSize vectors) and delta, y_minus_x are vectors in the +// tangent space (so they are kTangentSize vectors). +// +// The Functor should have the signature: +// +// struct Functor { +// template <typename T> +// bool Plus(const T* x, const T* delta, T* x_plus_delta) const; +// +// template <typename T> +// bool Minus(const T* y, const T* x, T* y_minus_x) const; +// }; +// +// Observe that the Plus and Minus operations are templated on the parameter T. +// The autodiff framework substitutes appropriate "Jet" objects for T in order +// to compute the derivative when necessary. This is the same mechanism that is +// used to compute derivatives when using AutoDiffCostFunction. +// +// Plus and Minus should return true if the computation is successful and false +// otherwise, in which case the result will not be used. +// +// Given this Functor, the corresponding Manifold can be constructed as: +// +// AutoDiffManifold<Functor, kAmbientSize, kTangentSize> manifold; +// +// As a concrete example consider the case of Quaternions. Quaternions form a +// three dimensional manifold embedded in R^4, i.e. they have an ambient +// dimension of 4 and their tangent space has dimension 3. The following Functor +// (taken from autodiff_manifold_test.cc) defines the Plus and Minus operations +// on the Quaternion manifold: +// +// NOTE: The following is only used for illustration purposes. Ceres Solver +// ships with optimized production grade QuaternionManifold implementation. See +// manifold.h. +// +// This functor assumes that the quaternions are laid out as [w,x,y,z] in +// memory, i.e. the real or scalar part is the first coordinate. +// +// struct QuaternionFunctor { +// template <typename T> +// bool Plus(const T* x, const T* delta, T* x_plus_delta) const { +// const T squared_norm_delta = +// delta[0] * delta[0] + delta[1] * delta[1] + delta[2] * delta[2]; +// +// T q_delta[4]; +// if (squared_norm_delta > T(0.0)) { +// T norm_delta = sqrt(squared_norm_delta); +// const T sin_delta_by_delta = sin(norm_delta) / norm_delta; +// q_delta[0] = cos(norm_delta); +// q_delta[1] = sin_delta_by_delta * delta[0]; +// q_delta[2] = sin_delta_by_delta * delta[1]; +// q_delta[3] = sin_delta_by_delta * delta[2]; +// } else { +// // We do not just use q_delta = [1,0,0,0] here because that is a +// // constant and when used for automatic differentiation will +// // lead to a zero derivative. Instead we take a first order +// // approximation and evaluate it at zero. +// q_delta[0] = T(1.0); +// q_delta[1] = delta[0]; +// q_delta[2] = delta[1]; +// q_delta[3] = delta[2]; +// } +// +// QuaternionProduct(q_delta, x, x_plus_delta); +// return true; +// } +// +// template <typename T> +// bool Minus(const T* y, const T* x, T* y_minus_x) const { +// T minus_x[4] = {x[0], -x[1], -x[2], -x[3]}; +// T ambient_y_minus_x[4]; +// QuaternionProduct(y, minus_x, ambient_y_minus_x); +// T u_norm = sqrt(ambient_y_minus_x[1] * ambient_y_minus_x[1] + +// ambient_y_minus_x[2] * ambient_y_minus_x[2] + +// ambient_y_minus_x[3] * ambient_y_minus_x[3]); +// if (u_norm > 0.0) { +// T theta = atan2(u_norm, ambient_y_minus_x[0]); +// y_minus_x[0] = theta * ambient_y_minus_x[1] / u_norm; +// y_minus_x[1] = theta * ambient_y_minus_x[2] / u_norm; +// y_minus_x[2] = theta * ambient_y_minus_x[3] / u_norm; +// } else { +// // We do not use [0,0,0] here because even though the value part is +// // a constant, the derivative part is not. +// y_minus_x[0] = ambient_y_minus_x[1]; +// y_minus_x[1] = ambient_y_minus_x[2]; +// y_minus_x[2] = ambient_y_minus_x[3]; +// } +// return true; +// } +// }; +// +// Then given this struct, the auto differentiated Quaternion Manifold can now +// be constructed as +// +// Manifold* manifold = new AutoDiffManifold<QuaternionFunctor, 4, 3>; + +template <typename Functor, int kAmbientSize, int kTangentSize> +class AutoDiffManifold final : public Manifold { + public: + AutoDiffManifold() : functor_(std::make_unique<Functor>()) {} + + // Takes ownership of functor. + explicit AutoDiffManifold(Functor* functor) : functor_(functor) {} + + int AmbientSize() const override { return kAmbientSize; } + int TangentSize() const override { return kTangentSize; } + + bool Plus(const double* x, + const double* delta, + double* x_plus_delta) const override { + return functor_->Plus(x, delta, x_plus_delta); + } + + bool PlusJacobian(const double* x, double* jacobian) const override; + + bool Minus(const double* y, + const double* x, + double* y_minus_x) const override { + return functor_->Minus(y, x, y_minus_x); + } + + bool MinusJacobian(const double* x, double* jacobian) const override; + + const Functor& functor() const { return *functor_; } + + private: + std::unique_ptr<Functor> functor_; +}; + +namespace internal { + +// The following two helper structs are needed to interface the Plus and Minus +// methods of the ManifoldFunctor with the automatic differentiation which +// expects a Functor with operator(). +template <typename Functor> +struct PlusWrapper { + explicit PlusWrapper(const Functor& functor) : functor(functor) {} + template <typename T> + bool operator()(const T* x, const T* delta, T* x_plus_delta) const { + return functor.Plus(x, delta, x_plus_delta); + } + const Functor& functor; +}; + +template <typename Functor> +struct MinusWrapper { + explicit MinusWrapper(const Functor& functor) : functor(functor) {} + template <typename T> + bool operator()(const T* y, const T* x, T* y_minus_x) const { + return functor.Minus(y, x, y_minus_x); + } + const Functor& functor; +}; +} // namespace internal + +template <typename Functor, int kAmbientSize, int kTangentSize> +bool AutoDiffManifold<Functor, kAmbientSize, kTangentSize>::PlusJacobian( + const double* x, double* jacobian) const { + double zero_delta[kTangentSize]; + for (int i = 0; i < kTangentSize; ++i) { + zero_delta[i] = 0.0; + } + + double x_plus_delta[kAmbientSize]; + for (int i = 0; i < kAmbientSize; ++i) { + x_plus_delta[i] = 0.0; + } + + const double* parameter_ptrs[2] = {x, zero_delta}; + + // PlusJacobian is D_2 Plus(x,0) so we only need to compute the Jacobian + // w.r.t. the second argument. + double* jacobian_ptrs[2] = {nullptr, jacobian}; + return internal::AutoDifferentiate< + kAmbientSize, + internal::StaticParameterDims<kAmbientSize, kTangentSize>>( + internal::PlusWrapper<Functor>(*functor_), + parameter_ptrs, + kAmbientSize, + x_plus_delta, + jacobian_ptrs); +} + +template <typename Functor, int kAmbientSize, int kTangentSize> +bool AutoDiffManifold<Functor, kAmbientSize, kTangentSize>::MinusJacobian( + const double* x, double* jacobian) const { + double y_minus_x[kTangentSize]; + for (int i = 0; i < kTangentSize; ++i) { + y_minus_x[i] = 0.0; + } + + const double* parameter_ptrs[2] = {x, x}; + + // MinusJacobian is D_1 Minus(x,x), so we only need to compute the Jacobian + // w.r.t. the first argument. + double* jacobian_ptrs[2] = {jacobian, nullptr}; + return internal::AutoDifferentiate< + kTangentSize, + internal::StaticParameterDims<kAmbientSize, kAmbientSize>>( + internal::MinusWrapper<Functor>(*functor_), + parameter_ptrs, + kTangentSize, + y_minus_x, + jacobian_ptrs); +} + +} // namespace ceres + +#endif // CERES_PUBLIC_AUTODIFF_MANIFOLD_H_ diff --git a/extern/ceres/include/ceres/c_api.h b/extern/ceres/include/ceres/c_api.h index 91b82bf995f..1be8ca2e077 100644 --- a/extern/ceres/include/ceres/c_api.h +++ b/extern/ceres/include/ceres/c_api.h @@ -39,7 +39,7 @@ #define CERES_PUBLIC_C_API_H_ // clang-format off -#include "ceres/internal/port.h" +#include "ceres/internal/export.h" #include "ceres/internal/disable_warnings.h" // clang-format on diff --git a/extern/ceres/include/ceres/ceres.h b/extern/ceres/include/ceres/ceres.h index d249351694c..c32477d4254 100644 --- a/extern/ceres/include/ceres/ceres.h +++ b/extern/ceres/include/ceres/ceres.h @@ -1,5 +1,5 @@ // Ceres Solver - A fast non-linear least squares minimizer -// Copyright 2019 Google Inc. All rights reserved. +// Copyright 2022 Google Inc. All rights reserved. // http://ceres-solver.org/ // // Redistribution and use in source and binary forms, with or without @@ -35,7 +35,9 @@ #define CERES_PUBLIC_CERES_H_ #include "ceres/autodiff_cost_function.h" +#include "ceres/autodiff_first_order_function.h" #include "ceres/autodiff_local_parameterization.h" +#include "ceres/autodiff_manifold.h" #include "ceres/conditioned_cost_function.h" #include "ceres/context.h" #include "ceres/cost_function.h" @@ -47,19 +49,25 @@ #include "ceres/dynamic_cost_function_to_functor.h" #include "ceres/dynamic_numeric_diff_cost_function.h" #include "ceres/evaluation_callback.h" +#include "ceres/first_order_function.h" #include "ceres/gradient_checker.h" #include "ceres/gradient_problem.h" #include "ceres/gradient_problem_solver.h" #include "ceres/iteration_callback.h" #include "ceres/jet.h" +#include "ceres/line_manifold.h" #include "ceres/local_parameterization.h" #include "ceres/loss_function.h" +#include "ceres/manifold.h" #include "ceres/numeric_diff_cost_function.h" +#include "ceres/numeric_diff_first_order_function.h" #include "ceres/numeric_diff_options.h" #include "ceres/ordered_groups.h" #include "ceres/problem.h" +#include "ceres/product_manifold.h" #include "ceres/sized_cost_function.h" #include "ceres/solver.h" +#include "ceres/sphere_manifold.h" #include "ceres/types.h" #include "ceres/version.h" diff --git a/extern/ceres/include/ceres/conditioned_cost_function.h b/extern/ceres/include/ceres/conditioned_cost_function.h index a57ee209b80..e4c3decbfd5 100644 --- a/extern/ceres/include/ceres/conditioned_cost_function.h +++ b/extern/ceres/include/ceres/conditioned_cost_function.h @@ -71,18 +71,18 @@ namespace ceres { // ccf_residual[i] = f_i(my_cost_function_residual[i]) // // and the Jacobian will be affected appropriately. -class CERES_EXPORT ConditionedCostFunction : public CostFunction { +class CERES_EXPORT ConditionedCostFunction final : public CostFunction { public: // Builds a cost function based on a wrapped cost function, and a // per-residual conditioner. Takes ownership of all of the wrapped cost // functions, or not, depending on the ownership parameter. Conditioners - // may be NULL, in which case the corresponding residual is not modified. + // may be nullptr, in which case the corresponding residual is not modified. // // The conditioners can repeat. ConditionedCostFunction(CostFunction* wrapped_cost_function, const std::vector<CostFunction*>& conditioners, Ownership ownership); - virtual ~ConditionedCostFunction(); + ~ConditionedCostFunction() override; bool Evaluate(double const* const* parameters, double* residuals, diff --git a/extern/ceres/include/ceres/context.h b/extern/ceres/include/ceres/context.h index d08e32b31a8..6c6e8f4c953 100644 --- a/extern/ceres/include/ceres/context.h +++ b/extern/ceres/include/ceres/context.h @@ -31,6 +31,8 @@ #ifndef CERES_PUBLIC_CONTEXT_H_ #define CERES_PUBLIC_CONTEXT_H_ +#include "ceres/internal/export.h" + namespace ceres { // A global context for processing data in Ceres. This provides a mechanism to @@ -39,13 +41,13 @@ namespace ceres { // Problems, either serially or in parallel. When using it with multiple // Problems at the same time, they may end up contending for resources // (e.g. threads) managed by the Context. -class Context { +class CERES_EXPORT Context { public: - Context() {} + Context(); Context(const Context&) = delete; void operator=(const Context&) = delete; - virtual ~Context() {} + virtual ~Context(); // Creates a context object and the caller takes ownership. static Context* Create(); diff --git a/extern/ceres/include/ceres/cost_function.h b/extern/ceres/include/ceres/cost_function.h index d1550c119e8..fef972b75af 100644 --- a/extern/ceres/include/ceres/cost_function.h +++ b/extern/ceres/include/ceres/cost_function.h @@ -48,7 +48,7 @@ #include <vector> #include "ceres/internal/disable_warnings.h" -#include "ceres/internal/port.h" +#include "ceres/internal/export.h" namespace ceres { @@ -63,11 +63,11 @@ namespace ceres { // when added with AddResidualBlock(). class CERES_EXPORT CostFunction { public: - CostFunction() : num_residuals_(0) {} + CostFunction(); CostFunction(const CostFunction&) = delete; void operator=(const CostFunction&) = delete; - virtual ~CostFunction() {} + virtual ~CostFunction(); // Inputs: // @@ -92,8 +92,8 @@ class CERES_EXPORT CostFunction { // jacobians[i][r*parameter_block_size_[i] + c] = // d residual[r] / d parameters[i][c] // - // If jacobians is NULL, then no derivatives are returned; this is - // the case when computing cost only. If jacobians[i] is NULL, then + // If jacobians is nullptr, then no derivatives are returned; this is + // the case when computing cost only. If jacobians[i] is nullptr, then // the jacobian block corresponding to the i'th parameter block must // not to be returned. // diff --git a/extern/ceres/include/ceres/cost_function_to_functor.h b/extern/ceres/include/ceres/cost_function_to_functor.h index 9364293afc5..08a8050c5f8 100644 --- a/extern/ceres/include/ceres/cost_function_to_functor.h +++ b/extern/ceres/include/ceres/cost_function_to_functor.h @@ -94,10 +94,11 @@ #include "ceres/cost_function.h" #include "ceres/dynamic_cost_function_to_functor.h" +#include "ceres/internal/export.h" #include "ceres/internal/fixed_array.h" #include "ceres/internal/parameter_dims.h" -#include "ceres/internal/port.h" #include "ceres/types.h" +#include "glog/logging.h" namespace ceres { diff --git a/extern/ceres/include/ceres/covariance.h b/extern/ceres/include/ceres/covariance.h index 2fe025df3ce..60bcc80b80f 100644 --- a/extern/ceres/include/ceres/covariance.h +++ b/extern/ceres/include/ceres/covariance.h @@ -35,8 +35,9 @@ #include <utility> #include <vector> +#include "ceres/internal/config.h" #include "ceres/internal/disable_warnings.h" -#include "ceres/internal/port.h" +#include "ceres/internal/export.h" #include "ceres/types.h" namespace ceres { @@ -145,7 +146,7 @@ class CovarianceImpl; // a. The rank deficiency arises from overparameterization. e.g., a // four dimensional quaternion used to parameterize SO(3), which is // a three dimensional manifold. In cases like this, the user should -// use an appropriate LocalParameterization. Not only will this lead +// use an appropriate LocalParameterization/Manifold. Not only will this lead // to better numerical behaviour of the Solver, it will also expose // the rank deficiency to the Covariance object so that it can // handle it correctly. diff --git a/extern/ceres/include/ceres/crs_matrix.h b/extern/ceres/include/ceres/crs_matrix.h index bc618fa0905..faa0f988528 100644 --- a/extern/ceres/include/ceres/crs_matrix.h +++ b/extern/ceres/include/ceres/crs_matrix.h @@ -34,17 +34,17 @@ #include <vector> #include "ceres/internal/disable_warnings.h" -#include "ceres/internal/port.h" +#include "ceres/internal/export.h" namespace ceres { // A compressed row sparse matrix used primarily for communicating the // Jacobian matrix to the user. struct CERES_EXPORT CRSMatrix { - CRSMatrix() : num_rows(0), num_cols(0) {} + CRSMatrix() = default; - int num_rows; - int num_cols; + int num_rows{0}; + int num_cols{0}; // A compressed row matrix stores its contents in three arrays, // rows, cols and values. diff --git a/extern/ceres/include/ceres/cubic_interpolation.h b/extern/ceres/include/ceres/cubic_interpolation.h index 9b9ea4a942c..3ca6b11b407 100644 --- a/extern/ceres/include/ceres/cubic_interpolation.h +++ b/extern/ceres/include/ceres/cubic_interpolation.h @@ -32,7 +32,7 @@ #define CERES_PUBLIC_CUBIC_INTERPOLATION_H_ #include "Eigen/Core" -#include "ceres/internal/port.h" +#include "ceres/internal/export.h" #include "glog/logging.h" namespace ceres { @@ -59,8 +59,8 @@ namespace ceres { // http://en.wikipedia.org/wiki/Cubic_Hermite_spline // http://en.wikipedia.org/wiki/Bicubic_interpolation // -// f if not NULL will contain the interpolated function values. -// dfdx if not NULL will contain the interpolated derivative values. +// f if not nullptr will contain the interpolated function values. +// dfdx if not nullptr will contain the interpolated derivative values. template <int kDataDimension> void CubicHermiteSpline(const Eigen::Matrix<double, kDataDimension, 1>& p0, const Eigen::Matrix<double, kDataDimension, 1>& p1, @@ -69,7 +69,7 @@ void CubicHermiteSpline(const Eigen::Matrix<double, kDataDimension, 1>& p0, const double x, double* f, double* dfdx) { - typedef Eigen::Matrix<double, kDataDimension, 1> VType; + using VType = Eigen::Matrix<double, kDataDimension, 1>; const VType a = 0.5 * (-p0 + 3.0 * p1 - 3.0 * p2 + p3); const VType b = 0.5 * (2.0 * p0 - 5.0 * p1 + 4.0 * p2 - p3); const VType c = 0.5 * (-p0 + p2); @@ -79,12 +79,12 @@ void CubicHermiteSpline(const Eigen::Matrix<double, kDataDimension, 1>& p0, // derivative. // f = ax^3 + bx^2 + cx + d - if (f != NULL) { + if (f != nullptr) { Eigen::Map<VType>(f, kDataDimension) = d + x * (c + x * (b + x * a)); } // dfdx = 3ax^2 + 2bx + c - if (dfdx != NULL) { + if (dfdx != nullptr) { Eigen::Map<VType>(dfdx, kDataDimension) = c + x * (2.0 * b + 3.0 * a * x); } } @@ -143,7 +143,7 @@ class CubicInterpolator { // The following two Evaluate overloads are needed for interfacing // with automatic differentiation. The first is for when a scalar // evaluation is done, and the second one is for when Jets are used. - void Evaluate(const double& x, double* f) const { Evaluate(x, f, NULL); } + void Evaluate(const double& x, double* f) const { Evaluate(x, f, nullptr); } template <typename JetT> void Evaluate(const JetT& x, JetT* f) const { @@ -191,7 +191,7 @@ struct Grid1D { } EIGEN_STRONG_INLINE void GetValue(const int n, double* f) const { - const int idx = std::min(std::max(begin_, n), end_ - 1) - begin_; + const int idx = (std::min)((std::max)(begin_, n), end_ - 1) - begin_; if (kInterleaved) { for (int i = 0; i < kDataDimension; ++i) { f[i] = static_cast<double>(data_[kDataDimension * idx + i]); @@ -317,10 +317,10 @@ class BiCubicInterpolator { // Interpolate vertically the interpolated value from each row and // compute the derivative along the columns. CubicHermiteSpline<Grid::DATA_DIMENSION>(f0, f1, f2, f3, r - row, f, dfdr); - if (dfdc != NULL) { + if (dfdc != nullptr) { // Interpolate vertically the derivative along the columns. CubicHermiteSpline<Grid::DATA_DIMENSION>( - df0dc, df1dc, df2dc, df3dc, r - row, dfdc, NULL); + df0dc, df1dc, df2dc, df3dc, r - row, dfdc, nullptr); } } @@ -328,7 +328,7 @@ class BiCubicInterpolator { // with automatic differentiation. The first is for when a scalar // evaluation is done, and the second one is for when Jets are used. void Evaluate(const double& r, const double& c, double* f) const { - Evaluate(r, c, f, NULL, NULL); + Evaluate(r, c, f, nullptr, nullptr); } template <typename JetT> @@ -402,9 +402,9 @@ struct Grid2D { EIGEN_STRONG_INLINE void GetValue(const int r, const int c, double* f) const { const int row_idx = - std::min(std::max(row_begin_, r), row_end_ - 1) - row_begin_; + (std::min)((std::max)(row_begin_, r), row_end_ - 1) - row_begin_; const int col_idx = - std::min(std::max(col_begin_, c), col_end_ - 1) - col_begin_; + (std::min)((std::max)(col_begin_, c), col_end_ - 1) - col_begin_; const int n = (kRowMajor) ? num_cols_ * row_idx + col_idx : num_rows_ * col_idx + row_idx; diff --git a/extern/ceres/include/ceres/dynamic_autodiff_cost_function.h b/extern/ceres/include/ceres/dynamic_autodiff_cost_function.h index 7ccf6a88c32..c21d0517f27 100644 --- a/extern/ceres/include/ceres/dynamic_autodiff_cost_function.h +++ b/extern/ceres/include/ceres/dynamic_autodiff_cost_function.h @@ -77,17 +77,17 @@ namespace ceres { // pass. There is a tradeoff with the size of the passes; you may want // to experiment with the stride. template <typename CostFunctor, int Stride = 4> -class DynamicAutoDiffCostFunction : public DynamicCostFunction { +class DynamicAutoDiffCostFunction final : public DynamicCostFunction { public: // Takes ownership by default. - DynamicAutoDiffCostFunction(CostFunctor* functor, - Ownership ownership = TAKE_OWNERSHIP) + explicit DynamicAutoDiffCostFunction(CostFunctor* functor, + Ownership ownership = TAKE_OWNERSHIP) : functor_(functor), ownership_(ownership) {} - explicit DynamicAutoDiffCostFunction(DynamicAutoDiffCostFunction&& other) + DynamicAutoDiffCostFunction(DynamicAutoDiffCostFunction&& other) : functor_(std::move(other.functor_)), ownership_(other.ownership_) {} - virtual ~DynamicAutoDiffCostFunction() { + ~DynamicAutoDiffCostFunction() override { // Manually release pointer if configured to not take ownership // rather than deleting only if ownership is taken. This is to // stay maximally compatible to old user code which may have @@ -105,7 +105,7 @@ class DynamicAutoDiffCostFunction : public DynamicCostFunction { << "You must call DynamicAutoDiffCostFunction::SetNumResiduals() " << "before DynamicAutoDiffCostFunction::Evaluate()."; - if (jacobians == NULL) { + if (jacobians == nullptr) { return (*functor_)(parameters, residuals); } @@ -150,7 +150,7 @@ class DynamicAutoDiffCostFunction : public DynamicCostFunction { jet_parameters[i] = &input_jets[parameter_cursor]; const int parameter_block_size = parameter_block_sizes()[i]; - if (jacobians[i] != NULL) { + if (jacobians[i] != nullptr) { if (!in_derivative_section) { start_derivative_section.push_back(parameter_cursor); in_derivative_section = true; @@ -209,7 +209,7 @@ class DynamicAutoDiffCostFunction : public DynamicCostFunction { parameter_cursor >= (start_derivative_section[current_derivative_section] + current_derivative_section_cursor)) { - if (jacobians[i] != NULL) { + if (jacobians[i] != nullptr) { input_jets[parameter_cursor].v[active_parameter_count] = 1.0; ++active_parameter_count; ++current_derivative_section_cursor; @@ -238,7 +238,7 @@ class DynamicAutoDiffCostFunction : public DynamicCostFunction { parameter_cursor >= (start_derivative_section[current_derivative_section] + current_derivative_section_cursor)) { - if (jacobians[i] != NULL) { + if (jacobians[i] != nullptr) { for (int k = 0; k < num_residuals(); ++k) { jacobians[i][k * parameter_block_sizes()[i] + j] = output_jets[k].v[active_parameter_count]; diff --git a/extern/ceres/include/ceres/dynamic_cost_function.h b/extern/ceres/include/ceres/dynamic_cost_function.h index 6e8a076ecd0..c84a366dafb 100644 --- a/extern/ceres/include/ceres/dynamic_cost_function.h +++ b/extern/ceres/include/ceres/dynamic_cost_function.h @@ -32,6 +32,7 @@ #define CERES_PUBLIC_DYNAMIC_COST_FUNCTION_H_ #include "ceres/cost_function.h" +#include "ceres/internal/disable_warnings.h" namespace ceres { @@ -40,8 +41,6 @@ namespace ceres { // parameter blocks and set the number of residuals at run time. class CERES_EXPORT DynamicCostFunction : public CostFunction { public: - ~DynamicCostFunction() {} - virtual void AddParameterBlock(int size) { mutable_parameter_block_sizes()->push_back(size); } @@ -53,4 +52,6 @@ class CERES_EXPORT DynamicCostFunction : public CostFunction { } // namespace ceres +#include "ceres/internal/reenable_warnings.h" + #endif // CERES_PUBLIC_DYNAMIC_COST_FUNCTION_H_ diff --git a/extern/ceres/include/ceres/dynamic_cost_function_to_functor.h b/extern/ceres/include/ceres/dynamic_cost_function_to_functor.h index 8d174d8ecc2..5b5feaaf58e 100644 --- a/extern/ceres/include/ceres/dynamic_cost_function_to_functor.h +++ b/extern/ceres/include/ceres/dynamic_cost_function_to_functor.h @@ -37,8 +37,10 @@ #include <vector> #include "ceres/dynamic_cost_function.h" +#include "ceres/internal/disable_warnings.h" +#include "ceres/internal/export.h" #include "ceres/internal/fixed_array.h" -#include "ceres/internal/port.h" +#include "glog/logging.h" namespace ceres { @@ -100,7 +102,7 @@ namespace ceres { // private: // DynamicCostFunctionToFunctor intrinsic_projection_; // }; -class DynamicCostFunctionToFunctor { +class CERES_EXPORT DynamicCostFunctionToFunctor { public: // Takes ownership of cost_function. explicit DynamicCostFunctionToFunctor(CostFunction* cost_function) @@ -109,7 +111,7 @@ class DynamicCostFunctionToFunctor { } bool operator()(double const* const* parameters, double* residuals) const { - return cost_function_->Evaluate(parameters, residuals, NULL); + return cost_function_->Evaluate(parameters, residuals, nullptr); } template <typename JetT> @@ -187,4 +189,6 @@ class DynamicCostFunctionToFunctor { } // namespace ceres +#include "ceres/internal/reenable_warnings.h" + #endif // CERES_PUBLIC_DYNAMIC_COST_FUNCTION_TO_FUNCTOR_H_ diff --git a/extern/ceres/include/ceres/dynamic_numeric_diff_cost_function.h b/extern/ceres/include/ceres/dynamic_numeric_diff_cost_function.h index ccc8f66db43..e1892e8ba4a 100644 --- a/extern/ceres/include/ceres/dynamic_numeric_diff_cost_function.h +++ b/extern/ceres/include/ceres/dynamic_numeric_diff_cost_function.h @@ -77,7 +77,7 @@ namespace ceres { // cost_function.AddParameterBlock(10); // cost_function.SetNumResiduals(21); template <typename CostFunctor, NumericDiffMethodType method = CENTRAL> -class DynamicNumericDiffCostFunction : public DynamicCostFunction { +class DynamicNumericDiffCostFunction final : public DynamicCostFunction { public: explicit DynamicNumericDiffCostFunction( const CostFunctor* functor, @@ -85,11 +85,10 @@ class DynamicNumericDiffCostFunction : public DynamicCostFunction { const NumericDiffOptions& options = NumericDiffOptions()) : functor_(functor), ownership_(ownership), options_(options) {} - explicit DynamicNumericDiffCostFunction( - DynamicNumericDiffCostFunction&& other) + DynamicNumericDiffCostFunction(DynamicNumericDiffCostFunction&& other) : functor_(std::move(other.functor_)), ownership_(other.ownership_) {} - virtual ~DynamicNumericDiffCostFunction() { + ~DynamicNumericDiffCostFunction() override { if (ownership_ != TAKE_OWNERSHIP) { functor_.release(); } @@ -111,7 +110,7 @@ class DynamicNumericDiffCostFunction : public DynamicCostFunction { const bool status = internal::VariadicEvaluate<internal::DynamicParameterDims>( *functor_.get(), parameters, residuals); - if (jacobians == NULL || !status) { + if (jacobians == nullptr || !status) { return status; } @@ -133,7 +132,7 @@ class DynamicNumericDiffCostFunction : public DynamicCostFunction { } for (size_t block = 0; block < block_sizes.size(); ++block) { - if (jacobians[block] != NULL && + if (jacobians[block] != nullptr && !NumericDiff<CostFunctor, method, ceres::DYNAMIC, diff --git a/extern/ceres/include/ceres/evaluation_callback.h b/extern/ceres/include/ceres/evaluation_callback.h index b9f5bbb5194..495d565047a 100644 --- a/extern/ceres/include/ceres/evaluation_callback.h +++ b/extern/ceres/include/ceres/evaluation_callback.h @@ -31,7 +31,7 @@ #ifndef CERES_PUBLIC_EVALUATION_CALLBACK_H_ #define CERES_PUBLIC_EVALUATION_CALLBACK_H_ -#include "ceres/internal/port.h" +#include "ceres/internal/export.h" namespace ceres { @@ -62,7 +62,7 @@ namespace ceres { // execute faster. class CERES_EXPORT EvaluationCallback { public: - virtual ~EvaluationCallback() {} + virtual ~EvaluationCallback(); // Called before Ceres requests residuals or jacobians for a given setting of // the parameters. User parameters (the double* values provided to the cost diff --git a/extern/ceres/include/ceres/first_order_function.h b/extern/ceres/include/ceres/first_order_function.h index 1420153b2aa..d718b6679ce 100644 --- a/extern/ceres/include/ceres/first_order_function.h +++ b/extern/ceres/include/ceres/first_order_function.h @@ -31,7 +31,7 @@ #ifndef CERES_PUBLIC_FIRST_ORDER_FUNCTION_H_ #define CERES_PUBLIC_FIRST_ORDER_FUNCTION_H_ -#include "ceres/internal/port.h" +#include "ceres/internal/export.h" namespace ceres { @@ -39,7 +39,7 @@ namespace ceres { // and its gradient. class CERES_EXPORT FirstOrderFunction { public: - virtual ~FirstOrderFunction() {} + virtual ~FirstOrderFunction(); // cost is never null. gradient may be null. The return value // indicates whether the evaluation was successful or not. diff --git a/extern/ceres/include/ceres/gradient_checker.h b/extern/ceres/include/ceres/gradient_checker.h index b79cf86b314..178fa2b0dd2 100644 --- a/extern/ceres/include/ceres/gradient_checker.h +++ b/extern/ceres/include/ceres/gradient_checker.h @@ -40,9 +40,12 @@ #include "ceres/cost_function.h" #include "ceres/dynamic_numeric_diff_cost_function.h" +#include "ceres/internal/disable_warnings.h" #include "ceres/internal/eigen.h" +#include "ceres/internal/export.h" #include "ceres/internal/fixed_array.h" #include "ceres/local_parameterization.h" +#include "ceres/manifold.h" #include "glog/logging.h" namespace ceres { @@ -65,19 +68,42 @@ namespace ceres { // CostFunction, and then call Probe(). Check that the return value is 'true'. class CERES_EXPORT GradientChecker { public: - // This will not take ownership of the cost function or local + // This constructor will not take ownership of the cost function or local // parameterizations. // // function: The cost function to probe. - // local_parameterizations: A vector of local parameterizations for each - // parameter. May be NULL or contain NULL pointers to indicate that the + // + // local_parameterizations: A vector of local parameterizations, one for each + // parameter block. May be nullptr or contain nullptrs to indicate that the // respective parameter does not have a local parameterization. + // // options: Options to use for numerical differentiation. + // + // NOTE: This constructor is deprecated and will be removed in the next public + // release of Ceres Solver. Please transition to using the Manifold based + // version. + CERES_DEPRECATED_WITH_MSG( + "Local Parameterizations are deprecated. Use the constructor that uses " + "Manifolds instead.") GradientChecker( const CostFunction* function, const std::vector<const LocalParameterization*>* local_parameterizations, const NumericDiffOptions& options); + // This will not take ownership of the cost function or manifolds. + // + // function: The cost function to probe. + // + // manifolds: A vector of manifolds for each parameter. May be nullptr or + // contain nullptrs to indicate that the respective parameter blocks are + // Euclidean. + // + // options: Options to use for numerical differentiation. + GradientChecker(const CostFunction* function, + const std::vector<const Manifold*>* manifolds, + const NumericDiffOptions& options); + ~GradientChecker(); + // Contains results from a call to Probe for later inspection. struct CERES_EXPORT ProbeResults { // The return value of the cost function. @@ -87,11 +113,11 @@ class CERES_EXPORT GradientChecker { Vector residuals; // The sizes of the Jacobians below are dictated by the cost function's - // parameter block size and residual block sizes. If a parameter block - // has a local parameterization associated with it, the size of the "local" - // Jacobian will be determined by the local parameterization dimension and - // residual block size, otherwise it will be identical to the regular - // Jacobian. + // parameter block size and residual block sizes. If a parameter block has a + // manifold associated with it, the size of the "local" Jacobian will be + // determined by the dimension of the manifold (which is the same as the + // dimension of the tangent space) and residual block size, otherwise it + // will be identical to the regular Jacobian. // Derivatives as computed by the cost function. std::vector<Matrix> jacobians; @@ -114,20 +140,20 @@ class CERES_EXPORT GradientChecker { }; // Call the cost function, compute alternative Jacobians using finite - // differencing and compare results. If local parameterizations are given, - // the Jacobians will be multiplied by the local parameterization Jacobians - // before performing the check, which effectively means that all errors along - // the null space of the local parameterization will be ignored. - // Returns false if the Jacobians don't match, the cost function return false, - // or if the cost function returns different residual when called with a - // Jacobian output argument vs. calling it without. Otherwise returns true. + // differencing and compare results. If manifolds are given, the Jacobians + // will be multiplied by the manifold Jacobians before performing the check, + // which effectively means that all errors along the null space of the + // manifold will be ignored. Returns false if the Jacobians don't match, the + // cost function return false, or if a cost function returns a different + // residual when called with a Jacobian output argument vs. calling it + // without. Otherwise returns true. // // parameters: The parameter values at which to probe. // relative_precision: A threshold for the relative difference between the // Jacobians. If the Jacobians differ by more than this amount, then the // probe fails. // results: On return, the Jacobians (and other information) will be stored - // here. May be NULL. + // here. May be nullptr. // // Returns true if no problems are detected and the difference between the // Jacobians is less than error_tolerance. @@ -140,11 +166,24 @@ class CERES_EXPORT GradientChecker { GradientChecker(const GradientChecker&) = delete; void operator=(const GradientChecker&) = delete; - std::vector<const LocalParameterization*> local_parameterizations_; + // This bool is used to determine whether the constructor with the + // LocalParameterizations is called or the one with Manifolds is called. If + // the former, then the vector of manifolds is a vector of ManifoldAdapter + // objects which we own and should be deleted. If the latter then they are + // real Manifold objects owned by the caller and will not be deleted. + // + // This bool is only needed during the LocalParameterization to Manifold + // transition, once this transition is complete the LocalParameterization + // based constructor and this bool will be removed. + const bool delete_manifolds_ = false; + + std::vector<const Manifold*> manifolds_; const CostFunction* function_; std::unique_ptr<CostFunction> finite_diff_cost_function_; }; } // namespace ceres +#include "ceres/internal/reenable_warnings.h" + #endif // CERES_PUBLIC_GRADIENT_CHECKER_H_ diff --git a/extern/ceres/include/ceres/gradient_problem.h b/extern/ceres/include/ceres/gradient_problem.h index 49d605ea2d6..b6a8b867421 100644 --- a/extern/ceres/include/ceres/gradient_problem.h +++ b/extern/ceres/include/ceres/gradient_problem.h @@ -34,8 +34,10 @@ #include <memory> #include "ceres/first_order_function.h" -#include "ceres/internal/port.h" +#include "ceres/internal/disable_warnings.h" +#include "ceres/internal/export.h" #include "ceres/local_parameterization.h" +#include "ceres/manifold.h" namespace ceres { @@ -43,23 +45,22 @@ class FirstOrderFunction; // Instances of GradientProblem represent general non-linear // optimization problems that must be solved using just the value of -// the objective function and its gradient. Unlike the Problem class, -// which can only be used to model non-linear least squares problems, -// instances of GradientProblem not restricted in the form of the -// objective function. +// the objective function and its gradient. + +// Unlike the Problem class, which can only be used to model non-linear least +// squares problems, instances of GradientProblem are not restricted in the form +// of the objective function. // -// Structurally GradientProblem is a composition of a -// FirstOrderFunction and optionally a LocalParameterization. +// Structurally GradientProblem is a composition of a FirstOrderFunction and +// optionally a Manifold. // -// The FirstOrderFunction is responsible for evaluating the cost and -// gradient of the objective function. +// The FirstOrderFunction is responsible for evaluating the cost and gradient of +// the objective function. // -// The LocalParameterization is responsible for going back and forth -// between the ambient space and the local tangent space. (See -// local_parameterization.h for more details). When a -// LocalParameterization is not provided, then the tangent space is -// assumed to coincide with the ambient Euclidean space that the -// gradient vector lives in. +// The Manifold is responsible for going back and forth between the ambient +// space and the local tangent space. (See manifold.h for more details). When a +// Manifold is not provided, then the tangent space is assumed to coincide with +// the ambient Euclidean space that the gradient vector lives in. // // Example usage: // @@ -78,7 +79,7 @@ class FirstOrderFunction; // const double y = parameters[1]; // // cost[0] = (1.0 - x) * (1.0 - x) + 100.0 * (y - x * x) * (y - x * x); -// if (gradient != NULL) { +// if (gradient != nullptr) { // gradient[0] = -2.0 * (1.0 - x) - 200.0 * (y - x * x) * 2.0 * x; // gradient[1] = 200.0 * (y - x * x); // } @@ -89,28 +90,96 @@ class FirstOrderFunction; // }; // // ceres::GradientProblem problem(new Rosenbrock()); +// +// NOTE: We are currently in the process of transitioning from +// LocalParameterization to Manifolds in the Ceres API. During this period, +// GradientProblem will support using both Manifold and LocalParameterization +// objects interchangably. For methods in the API affected by this change, see +// their documentation below. class CERES_EXPORT GradientProblem { public: // Takes ownership of the function. explicit GradientProblem(FirstOrderFunction* function); // Takes ownership of the function and the parameterization. + // + // NOTE: This constructor is deprecated and will be removed in the next public + // release of Ceres Solver. Please move to using the Manifold based + // constructor. + CERES_DEPRECATED_WITH_MSG( + "LocalParameterizations are deprecated. Please use the constructor that " + "uses Manifold instead.") GradientProblem(FirstOrderFunction* function, LocalParameterization* parameterization); + // Takes ownership of the function and the manifold. + GradientProblem(FirstOrderFunction* function, Manifold* manifold); + int NumParameters() const; - int NumLocalParameters() const; + + // Dimension of the manifold (and its tangent space). + // + // During the transition from LocalParameterization to Manifold, this method + // reports the LocalSize of the LocalParameterization or the TangentSize of + // the Manifold object associated with this problem. + int NumTangentParameters() const; + + // Dimension of the manifold (and its tangent space). + // + // NOTE: This method is deprecated and will be removed in the next public + // release of Ceres Solver. Please move to using NumTangentParameters() + // instead. + int NumLocalParameters() const { return NumTangentParameters(); } // This call is not thread safe. bool Evaluate(const double* parameters, double* cost, double* gradient) const; bool Plus(const double* x, const double* delta, double* x_plus_delta) const; + const FirstOrderFunction* function() const { return function_.get(); } + FirstOrderFunction* mutable_function() { return function_.get(); } + + // NOTE: During the transition from LocalParameterization to Manifold we need + // to support both The LocalParameterization and Manifold based constructors. + // + // When the user uses the LocalParameterization, internally the solver will + // wrap it in a ManifoldAdapter object and return it when manifold or + // mutable_manifold are called. + // + // As a result this method will return a non-nullptr result if a Manifold or a + // LocalParameterization was used when constructing the GradientProblem. + const Manifold* manifold() const { return manifold_.get(); } + Manifold* mutable_manifold() { return manifold_.get(); } + + // If the problem is constructed without a LocalParameterization or with a + // Manifold this method will return a nullptr. + // + // NOTE: This method is deprecated and will be removed in the next public + // release of Ceres Solver. + CERES_DEPRECATED_WITH_MSG("Use Manifolds instead.") + const LocalParameterization* parameterization() const { + return parameterization_.get(); + } + + // If the problem is constructed without a LocalParameterization or with a + // Manifold this method will return a nullptr. + // + // NOTE: This method is deprecated and will be removed in the next public + // release of Ceres Solver. + CERES_DEPRECATED_WITH_MSG("Use Manifolds instead.") + LocalParameterization* mutable_parameterization() { + return parameterization_.get(); + } + private: std::unique_ptr<FirstOrderFunction> function_; + CERES_DEPRECATED_WITH_MSG("") std::unique_ptr<LocalParameterization> parameterization_; + std::unique_ptr<Manifold> manifold_; std::unique_ptr<double[]> scratch_; }; } // namespace ceres +#include "ceres/internal/reenable_warnings.h" + #endif // CERES_PUBLIC_GRADIENT_PROBLEM_H_ diff --git a/extern/ceres/include/ceres/gradient_problem_solver.h b/extern/ceres/include/ceres/gradient_problem_solver.h index 9fab62e6d94..b6290c80c28 100644 --- a/extern/ceres/include/ceres/gradient_problem_solver.h +++ b/extern/ceres/include/ceres/gradient_problem_solver.h @@ -36,6 +36,7 @@ #include <vector> #include "ceres/internal/disable_warnings.h" +#include "ceres/internal/export.h" #include "ceres/internal/port.h" #include "ceres/iteration_callback.h" #include "ceres/types.h" @@ -305,8 +306,12 @@ class CERES_EXPORT GradientProblemSolver { int num_parameters = -1; // Dimension of the tangent space of the problem. + CERES_DEPRECATED_WITH_MSG("Use num_tangent_parameters.") int num_local_parameters = -1; + // Dimension of the tangent space of the problem. + int num_tangent_parameters = -1; + // Type of line search direction used. LineSearchDirectionType line_search_direction_type = LBFGS; diff --git a/extern/ceres/include/ceres/internal/array_selector.h b/extern/ceres/include/ceres/internal/array_selector.h index 841797f4c69..b4db012f00b 100644 --- a/extern/ceres/include/ceres/internal/array_selector.h +++ b/extern/ceres/include/ceres/internal/array_selector.h @@ -73,20 +73,22 @@ struct ArraySelector<T, true, fits_on_stack> : ceres::internal::FixedArray<T, max_num_elements_on_stack> { - ArraySelector(int s) + explicit ArraySelector(int s) : ceres::internal::FixedArray<T, max_num_elements_on_stack>(s) {} }; template <typename T, int num_elements, int max_num_elements_on_stack> struct ArraySelector<T, num_elements, max_num_elements_on_stack, false, true> : std::array<T, num_elements> { - ArraySelector(int s) { CHECK_EQ(s, num_elements); } + explicit ArraySelector(int s) { CHECK_EQ(s, num_elements); } }; template <typename T, int num_elements, int max_num_elements_on_stack> struct ArraySelector<T, num_elements, max_num_elements_on_stack, false, false> : std::vector<T> { - ArraySelector(int s) : std::vector<T>(s) { CHECK_EQ(s, num_elements); } + explicit ArraySelector(int s) : std::vector<T>(s) { + CHECK_EQ(s, num_elements); + } }; } // namespace internal diff --git a/extern/ceres/include/ceres/internal/autodiff.h b/extern/ceres/include/ceres/internal/autodiff.h index 9d7de758508..c796618cd2d 100644 --- a/extern/ceres/include/ceres/internal/autodiff.h +++ b/extern/ceres/include/ceres/internal/autodiff.h @@ -132,17 +132,16 @@ // respectively. This is how autodiff works for functors taking multiple vector // valued arguments (up to 6). // -// Jacobian NULL pointers -// ---------------------- -// In general, the functions below will accept NULL pointers for all or some of -// the Jacobian parameters, meaning that those Jacobians will not be computed. +// Jacobian null pointers (nullptr) +// -------------------------------- +// In general, the functions below will accept nullptr for all or some of the +// Jacobian parameters, meaning that those Jacobians will not be computed. #ifndef CERES_PUBLIC_INTERNAL_AUTODIFF_H_ #define CERES_PUBLIC_INTERNAL_AUTODIFF_H_ -#include <stddef.h> - #include <array> +#include <cstddef> #include <utility> #include "ceres/internal/array_selector.h" @@ -198,7 +197,7 @@ struct Make1stOrderPerturbation { template <int N, int Offset, typename T, typename JetT> struct Make1stOrderPerturbation<N, N, Offset, T, JetT> { public: - static void Apply(const T* src, JetT* dst) {} + static void Apply(const T* /* NOT USED */, JetT* /* NOT USED */) {} }; // Calls Make1stOrderPerturbation for every parameter block. @@ -311,7 +310,7 @@ inline bool AutoDifferentiate(const Functor& functor, int dynamic_num_outputs, T* function_value, T** jacobians) { - typedef Jet<T, ParameterDims::kNumParameters> JetT; + using JetT = Jet<T, ParameterDims::kNumParameters>; using Parameters = typename ParameterDims::Parameters; if (kNumResiduals != DYNAMIC) { diff --git a/extern/ceres/include/ceres/internal/eigen.h b/extern/ceres/include/ceres/internal/eigen.h index b6d0b7f610c..111cc7a07bb 100644 --- a/extern/ceres/include/ceres/internal/eigen.h +++ b/extern/ceres/include/ceres/internal/eigen.h @@ -35,39 +35,39 @@ namespace ceres { -typedef Eigen::Matrix<double, Eigen::Dynamic, 1> Vector; -typedef Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> - Matrix; -typedef Eigen::Map<Vector> VectorRef; -typedef Eigen::Map<Matrix> MatrixRef; -typedef Eigen::Map<const Vector> ConstVectorRef; -typedef Eigen::Map<const Matrix> ConstMatrixRef; +using Vector = Eigen::Matrix<double, Eigen::Dynamic, 1>; +using Matrix = + Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>; +using VectorRef = Eigen::Map<Vector>; +using MatrixRef = Eigen::Map<Matrix>; +using ConstVectorRef = Eigen::Map<const Vector>; +using ConstMatrixRef = Eigen::Map<const Matrix>; // Column major matrices for DenseSparseMatrix/DenseQRSolver -typedef Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::ColMajor> - ColMajorMatrix; +using ColMajorMatrix = + Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::ColMajor>; -typedef Eigen::Map<ColMajorMatrix, 0, Eigen::Stride<Eigen::Dynamic, 1>> - ColMajorMatrixRef; +using ColMajorMatrixRef = + Eigen::Map<ColMajorMatrix, 0, Eigen::Stride<Eigen::Dynamic, 1>>; -typedef Eigen::Map<const ColMajorMatrix, 0, Eigen::Stride<Eigen::Dynamic, 1>> - ConstColMajorMatrixRef; +using ConstColMajorMatrixRef = + Eigen::Map<const ColMajorMatrix, 0, Eigen::Stride<Eigen::Dynamic, 1>>; // C++ does not support templated typdefs, thus the need for this // struct so that we can support statically sized Matrix and Maps. template <int num_rows = Eigen::Dynamic, int num_cols = Eigen::Dynamic> struct EigenTypes { - typedef Eigen::Matrix<double, - num_rows, - num_cols, - num_cols == 1 ? Eigen::ColMajor : Eigen::RowMajor> - Matrix; + using Matrix = + Eigen::Matrix<double, + num_rows, + num_cols, + num_cols == 1 ? Eigen::ColMajor : Eigen::RowMajor>; - typedef Eigen::Map<Matrix> MatrixRef; - typedef Eigen::Map<const Matrix> ConstMatrixRef; - typedef Eigen::Matrix<double, num_rows, 1> Vector; - typedef Eigen::Map<Eigen::Matrix<double, num_rows, 1>> VectorRef; - typedef Eigen::Map<const Eigen::Matrix<double, num_rows, 1>> ConstVectorRef; + using MatrixRef = Eigen::Map<Matrix>; + using ConstMatrixRef = Eigen::Map<const Matrix>; + using Vector = Eigen::Matrix<double, num_rows, 1>; + using VectorRef = Eigen::Map<Eigen::Matrix<double, num_rows, 1>>; + using ConstVectorRef = Eigen::Map<const Eigen::Matrix<double, num_rows, 1>>; }; } // namespace ceres diff --git a/extern/ceres/include/ceres/internal/householder_vector.h b/extern/ceres/include/ceres/internal/householder_vector.h index 55f68e526a0..7700208be22 100644 --- a/extern/ceres/include/ceres/internal/householder_vector.h +++ b/extern/ceres/include/ceres/internal/householder_vector.h @@ -82,6 +82,14 @@ void ComputeHouseholderVector(const XVectorType& x, v->head(v->rows() - 1) /= v_pivot; } +template <typename XVectorType, typename Derived> +typename Derived::PlainObject ApplyHouseholderVector( + const XVectorType& y, + const Eigen::MatrixBase<Derived>& v, + const typename Derived::Scalar& beta) { + return (y - v * (beta * (v.transpose() * y))); +} + } // namespace internal } // namespace ceres diff --git a/extern/ceres/include/ceres/internal/integer_sequence_algorithm.h b/extern/ceres/include/ceres/internal/integer_sequence_algorithm.h index 8c0f3bc8ac4..777c119a77f 100644 --- a/extern/ceres/include/ceres/internal/integer_sequence_algorithm.h +++ b/extern/ceres/include/ceres/internal/integer_sequence_algorithm.h @@ -1,5 +1,5 @@ // Ceres Solver - A fast non-linear least squares minimizer -// Copyright 2018 Google Inc. All rights reserved. +// Copyright 2022 Google Inc. All rights reserved. // http://ceres-solver.org/ // // Redistribution and use in source and binary forms, with or without @@ -27,6 +27,7 @@ // POSSIBILITY OF SUCH DAMAGE. // // Author: jodebo_beck@gmx.de (Johannes Beck) +// sergiu.deitsch@gmail.com (Sergiu Deitsch) // // Algorithms to be used together with integer_sequence, like computing the sum // or the exclusive scan (sometimes called exclusive prefix sum) at compile @@ -37,6 +38,8 @@ #include <utility> +#include "ceres/jet_fwd.h" + namespace ceres { namespace internal { @@ -164,6 +167,124 @@ class ExclusiveScanT { template <typename Seq> using ExclusiveScan = typename ExclusiveScanT<Seq>::Type; +// Removes all elements from a integer sequence corresponding to specified +// ValueToRemove. +// +// This type should not be used directly but instead RemoveValue. +template <typename T, T ValueToRemove, typename... Sequence> +struct RemoveValueImpl; + +// Final filtered sequence +template <typename T, T ValueToRemove, T... Values> +struct RemoveValueImpl<T, + ValueToRemove, + std::integer_sequence<T, Values...>, + std::integer_sequence<T>> { + using type = std::integer_sequence<T, Values...>; +}; + +// Found a matching value +template <typename T, T ValueToRemove, T... Head, T... Tail> +struct RemoveValueImpl<T, + ValueToRemove, + std::integer_sequence<T, Head...>, + std::integer_sequence<T, ValueToRemove, Tail...>> + : RemoveValueImpl<T, + ValueToRemove, + std::integer_sequence<T, Head...>, + std::integer_sequence<T, Tail...>> {}; + +// Move one element from the tail to the head +template <typename T, T ValueToRemove, T... Head, T MiddleValue, T... Tail> +struct RemoveValueImpl<T, + ValueToRemove, + std::integer_sequence<T, Head...>, + std::integer_sequence<T, MiddleValue, Tail...>> + : RemoveValueImpl<T, + ValueToRemove, + std::integer_sequence<T, Head..., MiddleValue>, + std::integer_sequence<T, Tail...>> {}; + +// Start recursion by splitting the integer sequence into two separate ones +template <typename T, T ValueToRemove, T... Tail> +struct RemoveValueImpl<T, ValueToRemove, std::integer_sequence<T, Tail...>> + : RemoveValueImpl<T, + ValueToRemove, + std::integer_sequence<T>, + std::integer_sequence<T, Tail...>> {}; + +// RemoveValue takes an integer Sequence of arbitrary type and removes all +// elements matching ValueToRemove. +// +// In contrast to RemoveValueImpl, this implementation deduces the value type +// eliminating the need to specify it explicitly. +// +// As an example, RemoveValue<std::integer_sequence<int, 1, 2, 3>, 4>::type will +// not transform the type of the original sequence. However, +// RemoveValue<std::integer_sequence<int, 0, 0, 2>, 2>::type will generate a new +// sequence of type std::integer_sequence<int, 0, 0> by removing the value 2. +template <typename Sequence, typename Sequence::value_type ValueToRemove> +struct RemoveValue + : RemoveValueImpl<typename Sequence::value_type, ValueToRemove, Sequence> { +}; + +// Convenience template alias for RemoveValue. +template <typename Sequence, typename Sequence::value_type ValueToRemove> +using RemoveValue_t = typename RemoveValue<Sequence, ValueToRemove>::type; + +// Determines whether the values of an integer sequence are all the same. +// +// The integer sequence must contain at least one value. The predicate is +// undefined for empty sequences. The evaluation result of the predicate for a +// sequence containing only one value is defined to be true. +template <typename... Sequence> +struct AreAllEqual; + +// The predicate result for a sequence containing one element is defined to be +// true. +template <typename T, T Value> +struct AreAllEqual<std::integer_sequence<T, Value>> : std::true_type {}; + +// Recursion end. +template <typename T, T Value1, T Value2> +struct AreAllEqual<std::integer_sequence<T, Value1, Value2>> + : std::integral_constant<bool, Value1 == Value2> {}; + +// Recursion for sequences containing at least two elements. +template <typename T, T Value1, T Value2, T... Values> +// clang-format off +struct AreAllEqual<std::integer_sequence<T, Value1, Value2, Values...> > + : std::integral_constant +< + bool, + AreAllEqual<std::integer_sequence<T, Value1, Value2> >::value && + AreAllEqual<std::integer_sequence<T, Value2, Values...> >::value +> +// clang-format on +{}; + +// Convenience variable template for AreAllEqual. +template <class Sequence> +constexpr bool AreAllEqual_v = AreAllEqual<Sequence>::value; + +// Predicate determining whether an integer sequence is either empty or all +// values are equal. +template <typename Sequence> +struct IsEmptyOrAreAllEqual; + +// Empty case. +template <typename T> +struct IsEmptyOrAreAllEqual<std::integer_sequence<T>> : std::true_type {}; + +// General case for sequences containing at least one value. +template <typename T, T HeadValue, T... Values> +struct IsEmptyOrAreAllEqual<std::integer_sequence<T, HeadValue, Values...>> + : AreAllEqual<std::integer_sequence<T, HeadValue, Values...>> {}; + +// Convenience variable template for IsEmptyOrAreAllEqual. +template <class Sequence> +constexpr bool IsEmptyOrAreAllEqual_v = IsEmptyOrAreAllEqual<Sequence>::value; + } // namespace internal } // namespace ceres diff --git a/extern/ceres/include/ceres/internal/jet_traits.h b/extern/ceres/include/ceres/internal/jet_traits.h new file mode 100644 index 00000000000..2a38c05b7da --- /dev/null +++ b/extern/ceres/include/ceres/internal/jet_traits.h @@ -0,0 +1,223 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2022 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sergiu.deitsch@gmail.com (Sergiu Deitsch) +// + +#ifndef CERES_PUBLIC_INTERNAL_JET_TRAITS_H_ +#define CERES_PUBLIC_INTERNAL_JET_TRAITS_H_ + +#include <tuple> +#include <type_traits> +#include <utility> + +#include "ceres/internal/integer_sequence_algorithm.h" +#include "ceres/jet_fwd.h" + +namespace ceres { +namespace internal { + +// Predicate that determines whether T is a Jet. +template <typename T, typename E = void> +struct IsJet : std::false_type {}; + +template <typename T, int N> +struct IsJet<Jet<T, N>> : std::true_type {}; + +// Convenience variable template for IsJet. +template <typename T> +constexpr bool IsJet_v = IsJet<T>::value; + +// Predicate that determines whether any of the Types is a Jet. +template <typename... Types> +struct AreAnyJet : std::false_type {}; + +template <typename T, typename... Types> +struct AreAnyJet<T, Types...> : AreAnyJet<Types...> {}; + +template <typename T, int N, typename... Types> +struct AreAnyJet<Jet<T, N>, Types...> : std::true_type {}; + +// Convenience variable template for AreAnyJet. +template <typename... Types> +constexpr bool AreAnyJet_v = AreAnyJet<Types...>::value; + +// Extracts the underlying floating-point from a type T. +template <typename T, typename E = void> +struct UnderlyingScalar { + using type = T; +}; + +template <typename T, int N> +struct UnderlyingScalar<Jet<T, N>> : UnderlyingScalar<T> {}; + +// Convenience template alias for UnderlyingScalar type trait. +template <typename T> +using UnderlyingScalar_t = typename UnderlyingScalar<T>::type; + +// Predicate determining whether all Types in the pack are the same. +// +// Specifically, the predicate applies std::is_same recursively to pairs of +// Types in the pack. +// +// The predicate is defined only for template packs containing at least two +// types. +template <typename T1, typename T2, typename... Types> +// clang-format off +struct AreAllSame : std::integral_constant +< + bool, + AreAllSame<T1, T2>::value && + AreAllSame<T2, Types...>::value +> +// clang-format on +{}; + +// AreAllSame pairwise test. +template <typename T1, typename T2> +struct AreAllSame<T1, T2> : std::is_same<T1, T2> {}; + +// Convenience variable template for AreAllSame. +template <typename... Types> +constexpr bool AreAllSame_v = AreAllSame<Types...>::value; + +// Determines the rank of a type. This allows to ensure that types passed as +// arguments are compatible to each other. The rank of Jet is determined by the +// dimensions of the dual part. The rank of a scalar is always 0. +// Non-specialized types default to a rank of -1. +template <typename T, typename E = void> +struct Rank : std::integral_constant<int, -1> {}; + +// The rank of a scalar is 0. +template <typename T> +struct Rank<T, std::enable_if_t<std::is_scalar<T>::value>> + : std::integral_constant<int, 0> {}; + +// The rank of a Jet is given by its dimensionality. +template <typename T, int N> +struct Rank<Jet<T, N>> : std::integral_constant<int, N> {}; + +// Convenience variable template for Rank. +template <typename T> +constexpr int Rank_v = Rank<T>::value; + +// Constructs an integer sequence of ranks for each of the Types in the pack. +template <typename... Types> +using Ranks_t = std::integer_sequence<int, Rank_v<Types>...>; + +// Returns the scalar part of a type. This overload acts as an identity. +template <typename T> +constexpr decltype(auto) AsScalar(T&& value) noexcept { + return std::forward<T>(value); +} + +// Recursively unwraps the scalar part of a Jet until a non-Jet scalar type is +// encountered. +template <typename T, int N> +constexpr decltype(auto) AsScalar(const Jet<T, N>& value) noexcept( + noexcept(AsScalar(value.a))) { + return AsScalar(value.a); +} + +} // namespace internal + +// Type trait ensuring at least one of the types is a Jet, +// the underlying scalar types are the same and Jet dimensions match. +// +// The type trait can be further specialized if necessary. +// +// This trait is a candidate for a concept definition once C++20 features can +// be used. +template <typename... Types> +// clang-format off +struct CompatibleJetOperands : std::integral_constant +< + bool, + // At least one of the types is a Jet + internal::AreAnyJet_v<Types...> && + // The underlying floating-point types are exactly the same + internal::AreAllSame_v<internal::UnderlyingScalar_t<Types>...> && + // Non-zero ranks of types are equal + internal::IsEmptyOrAreAllEqual_v<internal::RemoveValue_t<internal::Ranks_t<Types...>, 0>> +> +// clang-format on +{}; + +// Single Jet operand is always compatible. +template <typename T, int N> +struct CompatibleJetOperands<Jet<T, N>> : std::true_type {}; + +// Single non-Jet operand is always incompatible. +template <typename T> +struct CompatibleJetOperands<T> : std::false_type {}; + +// Empty operands are always incompatible. +template <> +struct CompatibleJetOperands<> : std::false_type {}; + +// Convenience variable template ensuring at least one of the types is a Jet, +// the underlying scalar types are the same and Jet dimensions match. +// +// This trait is a candidate for a concept definition once C++20 features can +// be used. +template <typename... Types> +constexpr bool CompatibleJetOperands_v = CompatibleJetOperands<Types...>::value; + +// Type trait ensuring at least one of the types is a Jet, +// the underlying scalar types are compatible among each other and Jet +// dimensions match. +// +// The type trait can be further specialized if necessary. +// +// This trait is a candidate for a concept definition once C++20 features can +// be used. +template <typename... Types> +// clang-format off +struct PromotableJetOperands : std::integral_constant +< + bool, + // Types can be compatible among each other + internal::AreAnyJet_v<Types...> && + // Non-zero ranks of types are equal + internal::IsEmptyOrAreAllEqual_v<internal::RemoveValue_t<internal::Ranks_t<Types...>, 0>> +> +// clang-format on +{}; + +// Convenience variable template ensuring at least one of the types is a Jet, +// the underlying scalar types are compatible among each other and Jet +// dimensions match. +// +// This trait is a candidate for a concept definition once C++20 features can +// be used. +template <typename... Types> +constexpr bool PromotableJetOperands_v = PromotableJetOperands<Types...>::value; + +} // namespace ceres + +#endif // CERES_PUBLIC_INTERNAL_JET_TRAITS_H_ diff --git a/extern/ceres/include/ceres/internal/numeric_diff.h b/extern/ceres/include/ceres/internal/numeric_diff.h index ff7a2c345e4..351845c05fb 100644 --- a/extern/ceres/include/ceres/internal/numeric_diff.h +++ b/extern/ceres/include/ceres/internal/numeric_diff.h @@ -86,18 +86,18 @@ struct NumericDiff { (kParameterBlockSize != ceres::DYNAMIC ? kParameterBlockSize : parameter_block_size); - typedef Matrix<double, kNumResiduals, 1> ResidualVector; - typedef Matrix<double, kParameterBlockSize, 1> ParameterVector; + using ResidualVector = Matrix<double, kNumResiduals, 1>; + using ParameterVector = Matrix<double, kParameterBlockSize, 1>; // The convoluted reasoning for choosing the Row/Column major // ordering of the matrix is an artifact of the restrictions in // Eigen that prevent it from creating RowMajor matrices with a // single column. In these cases, we ask for a ColMajor matrix. - typedef Matrix<double, - kNumResiduals, - kParameterBlockSize, - (kParameterBlockSize == 1) ? ColMajor : RowMajor> - JacobianMatrix; + using JacobianMatrix = + Matrix<double, + kNumResiduals, + kParameterBlockSize, + (kParameterBlockSize == 1) ? ColMajor : RowMajor>; Map<JacobianMatrix> parameter_jacobian( jacobian, num_residuals_internal, parameter_block_size_internal); @@ -121,7 +121,7 @@ struct NumericDiff { // thus ridders_relative_initial_step_size is used. if (kMethod == RIDDERS) { min_step_size = - std::max(min_step_size, options.ridders_relative_initial_step_size); + (std::max)(min_step_size, options.ridders_relative_initial_step_size); } // For each parameter in the parameter block, use finite differences to @@ -132,7 +132,7 @@ struct NumericDiff { num_residuals_internal); for (int j = 0; j < parameter_block_size_internal; ++j) { - const double delta = std::max(min_step_size, step_size(j)); + const double delta = (std::max)(min_step_size, step_size(j)); if (kMethod == RIDDERS) { if (!EvaluateRiddersJacobianColumn(functor, @@ -184,8 +184,8 @@ struct NumericDiff { using Eigen::Map; using Eigen::Matrix; - typedef Matrix<double, kNumResiduals, 1> ResidualVector; - typedef Matrix<double, kParameterBlockSize, 1> ParameterVector; + using ResidualVector = Matrix<double, kNumResiduals, 1>; + using ParameterVector = Matrix<double, kParameterBlockSize, 1>; Map<const ParameterVector> x(x_ptr, parameter_block_size); Map<ParameterVector> x_plus_delta(x_plus_delta_ptr, parameter_block_size); @@ -260,10 +260,10 @@ struct NumericDiff { using Eigen::Map; using Eigen::Matrix; - typedef Matrix<double, kNumResiduals, 1> ResidualVector; - typedef Matrix<double, kNumResiduals, Eigen::Dynamic> - ResidualCandidateMatrix; - typedef Matrix<double, kParameterBlockSize, 1> ParameterVector; + using ResidualVector = Matrix<double, kNumResiduals, 1>; + using ResidualCandidateMatrix = + Matrix<double, kNumResiduals, Eigen::Dynamic>; + using ParameterVector = Matrix<double, kParameterBlockSize, 1>; Map<const ParameterVector> x(x_ptr, parameter_block_size); Map<ParameterVector> x_plus_delta(x_plus_delta_ptr, parameter_block_size); @@ -296,7 +296,7 @@ struct NumericDiff { // norm_error is supposed to decrease as the finite difference tableau // generation progresses, serving both as an estimate for differentiation // error and as a measure of differentiation numerical stability. - double norm_error = std::numeric_limits<double>::max(); + double norm_error = (std::numeric_limits<double>::max)(); // Loop over decreasing step sizes until: // 1. Error is smaller than a given value (ridders_epsilon), @@ -342,7 +342,7 @@ struct NumericDiff { options.ridders_step_shrink_factor; // Compute the difference between the previous value and the current. - double candidate_error = std::max( + double candidate_error = (std::max)( (current_candidates->col(k) - current_candidates->col(k - 1)) .norm(), (current_candidates->col(k) - previous_candidates->col(k - 1)) diff --git a/extern/ceres/include/ceres/internal/port.h b/extern/ceres/include/ceres/internal/port.h index 040a1efba02..4275b0e15c3 100644 --- a/extern/ceres/include/ceres/internal/port.h +++ b/extern/ceres/include/ceres/internal/port.h @@ -1,5 +1,5 @@ // Ceres Solver - A fast non-linear least squares minimizer -// Copyright 2015 Google Inc. All rights reserved. +// Copyright 2022 Google Inc. All rights reserved. // http://ceres-solver.org/ // // Redistribution and use in source and binary forms, with or without @@ -31,80 +31,58 @@ #ifndef CERES_PUBLIC_INTERNAL_PORT_H_ #define CERES_PUBLIC_INTERNAL_PORT_H_ -// This file needs to compile as c code. -#include "ceres/internal/config.h" - -#if defined(CERES_USE_OPENMP) -#if defined(CERES_USE_CXX_THREADS) || defined(CERES_NO_THREADS) -#error CERES_USE_OPENMP is mutually exclusive to CERES_USE_CXX_THREADS and CERES_NO_THREADS -#endif -#elif defined(CERES_USE_CXX_THREADS) -#if defined(CERES_USE_OPENMP) || defined(CERES_NO_THREADS) -#error CERES_USE_CXX_THREADS is mutually exclusive to CERES_USE_OPENMP, CERES_USE_CXX_THREADS and CERES_NO_THREADS -#endif -#elif defined(CERES_NO_THREADS) -#if defined(CERES_USE_OPENMP) || defined(CERES_USE_CXX_THREADS) -#error CERES_NO_THREADS is mutually exclusive to CERES_USE_OPENMP and CERES_USE_CXX_THREADS -#endif -#else -# error One of CERES_USE_OPENMP, CERES_USE_CXX_THREADS or CERES_NO_THREADS must be defined. -#endif - -// CERES_NO_SPARSE should be automatically defined by config.h if Ceres was -// compiled without any sparse back-end. Verify that it has not subsequently -// been inconsistently redefined. -#if defined(CERES_NO_SPARSE) -#if !defined(CERES_NO_SUITESPARSE) -#error CERES_NO_SPARSE requires CERES_NO_SUITESPARSE. -#endif -#if !defined(CERES_NO_CXSPARSE) -#error CERES_NO_SPARSE requires CERES_NO_CXSPARSE -#endif -#if !defined(CERES_NO_ACCELERATE_SPARSE) -#error CERES_NO_SPARSE requires CERES_NO_ACCELERATE_SPARSE -#endif -#if defined(CERES_USE_EIGEN_SPARSE) -#error CERES_NO_SPARSE requires !CERES_USE_EIGEN_SPARSE -#endif -#endif - -// A macro to signal which functions and classes are exported when -// building a shared library. +// A macro to mark a function/variable/class as deprecated. +// We use compiler specific attributes rather than the c++ +// attribute because they do not mix well with each other. #if defined(_MSC_VER) -#define CERES_API_SHARED_IMPORT __declspec(dllimport) -#define CERES_API_SHARED_EXPORT __declspec(dllexport) +#define CERES_DEPRECATED_WITH_MSG(message) __declspec(deprecated(message)) #elif defined(__GNUC__) -#define CERES_API_SHARED_IMPORT __attribute__((visibility("default"))) -#define CERES_API_SHARED_EXPORT __attribute__((visibility("default"))) +#define CERES_DEPRECATED_WITH_MSG(message) __attribute__((deprecated(message))) #else -#define CERES_API_SHARED_IMPORT -#define CERES_API_SHARED_EXPORT +// In the worst case fall back to c++ attribute. +#define CERES_DEPRECATED_WITH_MSG(message) [[deprecated(message)]] #endif -// CERES_BUILDING_SHARED_LIBRARY is only defined locally when Ceres itself is -// compiled as a shared library, it is never exported to users. In order that -// we do not have to configure config.h separately when building Ceres as either -// a static or dynamic library, we define both CERES_USING_SHARED_LIBRARY and -// CERES_BUILDING_SHARED_LIBRARY when building as a shared library. -#if defined(CERES_USING_SHARED_LIBRARY) -#if defined(CERES_BUILDING_SHARED_LIBRARY) -// Compiling Ceres itself as a shared library. -#define CERES_EXPORT CERES_API_SHARED_EXPORT -#else -// Using Ceres as a shared library. -#define CERES_EXPORT CERES_API_SHARED_IMPORT -#endif -#else -// Ceres was compiled as a static library, export everything. -#define CERES_EXPORT +#ifndef CERES_GET_FLAG +#define CERES_GET_FLAG(X) X #endif -// Unit tests reach in and test internal functionality so we need a way to make -// those symbols visible -#ifdef CERES_EXPORT_INTERNAL_SYMBOLS -#define CERES_EXPORT_INTERNAL CERES_EXPORT -#else -#define CERES_EXPORT_INTERNAL -#endif +// Indicates whether C++17 is currently active +#ifndef CERES_HAS_CPP17 +#if __cplusplus >= 201703L || (defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) +#define CERES_HAS_CPP17 +#endif // __cplusplus >= 201703L || (defined(_MSVC_LANG) && _MSVC_LANG >= + // 201703L) +#endif // !defined(CERES_HAS_CPP17) + +// Indicates whether C++20 is currently active +#ifndef CERES_HAS_CPP20 +#if __cplusplus >= 202002L || (defined(_MSVC_LANG) && _MSVC_LANG >= 202002L) +#define CERES_HAS_CPP20 +#endif // __cplusplus >= 202002L || (defined(_MSVC_LANG) && _MSVC_LANG >= + // 202002L) +#endif // !defined(CERES_HAS_CPP20) + +// Prevents symbols from being substituted by the corresponding macro definition +// under the same name. For instance, min and max are defined as macros on +// Windows (unless NOMINMAX is defined) which causes compilation errors when +// defining or referencing symbols under the same name. +// +// To be robust in all cases particularly when NOMINMAX cannot be used, use this +// macro to annotate min/max declarations/definitions. Examples: +// +// int max CERES_PREVENT_MACRO_SUBSTITUTION(); +// min CERES_PREVENT_MACRO_SUBSTITUTION(a, b); +// max CERES_PREVENT_MACRO_SUBSTITUTION(a, b); +// +// NOTE: In case the symbols for which the substitution must be prevented are +// used within another macro, the substitution must be inhibited using parens as +// +// (std::numerical_limits<double>::max)() +// +// since the helper macro will not work here. Do not use this technique in +// general case, because it will prevent argument-dependent lookup (ADL). +// +#define CERES_PREVENT_MACRO_SUBSTITUTION // Yes, it's empty #endif // CERES_PUBLIC_INTERNAL_PORT_H_ diff --git a/extern/ceres/include/ceres/internal/sphere_manifold_functions.h b/extern/ceres/include/ceres/internal/sphere_manifold_functions.h new file mode 100644 index 00000000000..5be3321a579 --- /dev/null +++ b/extern/ceres/include/ceres/internal/sphere_manifold_functions.h @@ -0,0 +1,162 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2022 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: vitus@google.com (Mike Vitus) +// jodebo_beck@gmx.de (Johannes Beck) + +#ifndef CERES_PUBLIC_INTERNAL_SPHERE_MANIFOLD_HELPERS_H_ +#define CERES_PUBLIC_INTERNAL_SPHERE_MANIFOLD_HELPERS_H_ + +#include "ceres/internal/householder_vector.h" + +// This module contains functions to compute the SphereManifold plus and minus +// operator and their Jacobians. +// +// As the parameters to these functions are shared between them, they are +// described here: The following variable names are used: +// Plus(x, delta) = x + delta = x_plus_delta, +// Minus(y, x) = y - x = y_minus_x. +// +// The remaining ones are v and beta which describe the Householder +// transformation of x, and norm_delta which is the norm of delta. +// +// The types of x, y, x_plus_delta and y_minus_x need to be equivalent to +// Eigen::Matrix<double, AmbientSpaceDimension, 1> and the type of delta needs +// to be equivalent to Eigen::Matrix<double, TangentSpaceDimension, 1>. +// +// The type of Jacobian plus needs to be equivalent to Eigen::Matrix<double, +// AmbientSpaceDimension, TangentSpaceDimension, Eigen::RowMajor> and for +// Jacobian minus Eigen::Matrix<double, TangentSpaceDimension, +// AmbientSpaceDimension, Eigen::RowMajor>. +// +// For all vector / matrix inputs and outputs, template parameters are +// used in order to allow also Eigen::Ref and Eigen block expressions to +// be passed to the function. + +namespace ceres { +namespace internal { + +template <typename VT, typename XT, typename DeltaT, typename XPlusDeltaT> +inline void ComputeSphereManifoldPlus(const VT& v, + double beta, + const XT& x, + const DeltaT& delta, + double norm_delta, + XPlusDeltaT* x_plus_delta) { + constexpr int AmbientDim = VT::RowsAtCompileTime; + + // Map the delta from the minimum representation to the over parameterized + // homogeneous vector. See B.2 p.25 equation (106) - (107) for more details. + const double norm_delta_div_2 = 0.5 * norm_delta; + const double sin_delta_by_delta = + std::sin(norm_delta_div_2) / norm_delta_div_2; + + Eigen::Matrix<double, AmbientDim, 1> y(v.size()); + y << 0.5 * sin_delta_by_delta * delta, std::cos(norm_delta_div_2); + + // Apply the delta update to remain on the sphere. + *x_plus_delta = x.norm() * ApplyHouseholderVector(y, v, beta); +} + +template <typename VT, typename JacobianT> +inline void ComputeSphereManifoldPlusJacobian(const VT& x, + JacobianT* jacobian) { + constexpr int AmbientSpaceDim = VT::RowsAtCompileTime; + using AmbientVector = Eigen::Matrix<double, AmbientSpaceDim, 1>; + const int ambient_size = x.size(); + const int tangent_size = x.size() - 1; + + AmbientVector v(ambient_size); + double beta; + + // NOTE: The explicit template arguments are needed here because + // ComputeHouseholderVector is templated and some versions of MSVC + // have trouble deducing the type of v automatically. + ComputeHouseholderVector<VT, double, AmbientSpaceDim>(x, &v, &beta); + + // The Jacobian is equal to J = 0.5 * H.leftCols(size_ - 1) where H is the + // Householder matrix (H = I - beta * v * v'). + for (int i = 0; i < tangent_size; ++i) { + (*jacobian).col(i) = -0.5 * beta * v(i) * v; + (*jacobian)(i, i) += 0.5; + } + (*jacobian) *= x.norm(); +} + +template <typename VT, typename XT, typename YT, typename YMinusXT> +inline void ComputeSphereManifoldMinus( + const VT& v, double beta, const XT& x, const YT& y, YMinusXT* y_minus_x) { + constexpr int AmbientSpaceDim = VT::RowsAtCompileTime; + constexpr int TangentSpaceDim = + AmbientSpaceDim == Eigen::Dynamic ? Eigen::Dynamic : AmbientSpaceDim - 1; + using AmbientVector = Eigen::Matrix<double, AmbientSpaceDim, 1>; + + const int tanget_size = v.size() - 1; + + const AmbientVector hy = ApplyHouseholderVector(y, v, beta) / x.norm(); + + // Calculate y - x. See B.2 p.25 equation (108). + double y_last = hy[tanget_size]; + double hy_norm = hy.template head<TangentSpaceDim>(tanget_size).norm(); + if (hy_norm == 0.0) { + y_minus_x->setZero(); + } else { + *y_minus_x = 2.0 * std::atan2(hy_norm, y_last) / hy_norm * + hy.template head<TangentSpaceDim>(tanget_size); + } +} + +template <typename VT, typename JacobianT> +inline void ComputeSphereManifoldMinusJacobian(const VT& x, + JacobianT* jacobian) { + constexpr int AmbientSpaceDim = VT::RowsAtCompileTime; + using AmbientVector = Eigen::Matrix<double, AmbientSpaceDim, 1>; + const int ambient_size = x.size(); + const int tangent_size = x.size() - 1; + + AmbientVector v(ambient_size); + double beta; + + // NOTE: The explicit template arguments are needed here because + // ComputeHouseholderVector is templated and some versions of MSVC + // have trouble deducing the type of v automatically. + ComputeHouseholderVector<VT, double, AmbientSpaceDim>(x, &v, &beta); + + // The Jacobian is equal to J = 2.0 * H.leftCols(size_ - 1) where H is the + // Householder matrix (H = I - beta * v * v'). + for (int i = 0; i < tangent_size; ++i) { + (*jacobian).row(i) = -2.0 * beta * v(i) * v; + (*jacobian)(i, i) += 2.0; + } + (*jacobian) /= x.norm(); +} + +} // namespace internal +} // namespace ceres + +#endif diff --git a/extern/ceres/include/ceres/internal/variadic_evaluate.h b/extern/ceres/include/ceres/internal/variadic_evaluate.h index 47ff6b18fa0..b8408237cc3 100644 --- a/extern/ceres/include/ceres/internal/variadic_evaluate.h +++ b/extern/ceres/include/ceres/internal/variadic_evaluate.h @@ -33,8 +33,7 @@ #ifndef CERES_PUBLIC_INTERNAL_VARIADIC_EVALUATE_H_ #define CERES_PUBLIC_INTERNAL_VARIADIC_EVALUATE_H_ -#include <stddef.h> - +#include <cstddef> #include <type_traits> #include <utility> diff --git a/extern/ceres/include/ceres/iteration_callback.h b/extern/ceres/include/ceres/iteration_callback.h index 4507fdf748c..3d7e8e94f30 100644 --- a/extern/ceres/include/ceres/iteration_callback.h +++ b/extern/ceres/include/ceres/iteration_callback.h @@ -36,6 +36,7 @@ #define CERES_PUBLIC_ITERATION_CALLBACK_H_ #include "ceres/internal/disable_warnings.h" +#include "ceres/internal/export.h" #include "ceres/types.h" namespace ceres { @@ -164,8 +165,6 @@ struct CERES_EXPORT IterationSummary { // explicit LoggingCallback(bool log_to_stdout) // : log_to_stdout_(log_to_stdout) {} // -// ~LoggingCallback() {} -// // CallbackReturnType operator()(const IterationSummary& summary) { // const char* kReportRowFormat = // "% 4d: f:% 8e d:% 3.2e g:% 3.2e h:% 3.2e " @@ -194,7 +193,7 @@ struct CERES_EXPORT IterationSummary { // class CERES_EXPORT IterationCallback { public: - virtual ~IterationCallback() {} + virtual ~IterationCallback(); virtual CallbackReturnType operator()(const IterationSummary& summary) = 0; }; diff --git a/extern/ceres/include/ceres/jet.h b/extern/ceres/include/ceres/jet.h index da49f32019f..fba1e2ab6e0 100644 --- a/extern/ceres/include/ceres/jet.h +++ b/extern/ceres/include/ceres/jet.h @@ -1,5 +1,5 @@ // Ceres Solver - A fast non-linear least squares minimizer -// Copyright 2019 Google Inc. All rights reserved. +// Copyright 2022 Google Inc. All rights reserved. // http://ceres-solver.org/ // // Redistribution and use in source and binary forms, with or without @@ -158,20 +158,59 @@ #define CERES_PUBLIC_JET_H_ #include <cmath> +#include <complex> #include <iosfwd> #include <iostream> // NOLINT #include <limits> +#include <numeric> #include <string> +#include <type_traits> #include "Eigen/Core" +#include "ceres/internal/jet_traits.h" #include "ceres/internal/port.h" +#include "ceres/jet_fwd.h" + +// Here we provide partial specializations of std::common_type for the Jet class +// to allow determining a Jet type with a common underlying arithmetic type. +// Such an arithmetic type can be either a scalar or an another Jet. An example +// for a common type, say, between a float and a Jet<double, N> is a Jet<double, +// N> (i.e., std::common_type_t<float, ceres::Jet<double, N>> and +// ceres::Jet<double, N> refer to the same type.) +// +// The partial specialization are also used for determining compatible types by +// means of SFINAE and thus allow such types to be expressed as operands of +// logical comparison operators. Missing (partial) specialization of +// std::common_type for a particular (custom) type will therefore disable the +// use of comparison operators defined by Ceres. +// +// Since these partial specializations are used as SFINAE constraints, they +// enable standard promotion rules between various scalar types and consequently +// their use in comparison against a Jet without providing implicit +// conversions from a scalar, such as an int, to a Jet (see the implementation +// of logical comparison operators below). + +template <typename T, int N, typename U> +struct std::common_type<T, ceres::Jet<U, N>> { + using type = ceres::Jet<common_type_t<T, U>, N>; +}; + +template <typename T, int N, typename U> +struct std::common_type<ceres::Jet<T, N>, U> { + using type = ceres::Jet<common_type_t<T, U>, N>; +}; + +template <typename T, int N, typename U> +struct std::common_type<ceres::Jet<T, N>, ceres::Jet<U, N>> { + using type = ceres::Jet<common_type_t<T, U>, N>; +}; namespace ceres { template <typename T, int N> struct Jet { enum { DIMENSION = N }; - typedef T Scalar; + using Scalar = T; // Default-construct "a" because otherwise this can lead to false errors about // uninitialized uses when other classes relying on default constructed T @@ -352,19 +391,21 @@ inline Jet<T, N> operator/(const Jet<T, N>& f, T s) { return Jet<T, N>(f.a * s_inverse, f.v * s_inverse); } -// Binary comparison operators for both scalars and jets. -#define CERES_DEFINE_JET_COMPARISON_OPERATOR(op) \ - template <typename T, int N> \ - inline bool operator op(const Jet<T, N>& f, const Jet<T, N>& g) { \ - return f.a op g.a; \ - } \ - template <typename T, int N> \ - inline bool operator op(const T& s, const Jet<T, N>& g) { \ - return s op g.a; \ - } \ - template <typename T, int N> \ - inline bool operator op(const Jet<T, N>& f, const T& s) { \ - return f.a op s; \ +// Binary comparison operators for both scalars and jets. At least one of the +// operands must be a Jet. Promotable scalars (e.g., int, float, double etc.) +// can appear on either side of the operator. std::common_type_t is used as an +// SFINAE constraint to selectively enable compatible operand types. This allows +// comparison, for instance, against int literals without implicit conversion. +// In case the Jet arithmetic type is a Jet itself, a recursive expansion of Jet +// value is performed. +#define CERES_DEFINE_JET_COMPARISON_OPERATOR(op) \ + template <typename Lhs, \ + typename Rhs, \ + std::enable_if_t<PromotableJetOperands_v<Lhs, Rhs>>* = nullptr> \ + constexpr bool operator op(const Lhs& f, const Rhs& g) noexcept( \ + noexcept(internal::AsScalar(f) op internal::AsScalar(g))) { \ + using internal::AsScalar; \ + return AsScalar(f) op AsScalar(g); \ } CERES_DEFINE_JET_COMPARISON_OPERATOR(<) // NOLINT CERES_DEFINE_JET_COMPARISON_OPERATOR(<=) // NOLINT @@ -386,43 +427,138 @@ using std::atan; using std::atan2; using std::cbrt; using std::ceil; +using std::copysign; using std::cos; using std::cosh; using std::erf; using std::erfc; using std::exp; using std::exp2; +using std::expm1; +using std::fdim; using std::floor; +using std::fma; using std::fmax; using std::fmin; +using std::fpclassify; using std::hypot; using std::isfinite; using std::isinf; using std::isnan; using std::isnormal; using std::log; +using std::log10; +using std::log1p; using std::log2; +using std::norm; using std::pow; +using std::signbit; using std::sin; using std::sinh; using std::sqrt; using std::tan; using std::tanh; +// MSVC (up to 1930) defines quiet comparison functions as template functions +// which causes compilation errors due to ambiguity in the template parameter +// type resolution for using declarations in the ceres namespace. Workaround the +// issue by defining specific overload and bypass MSVC standard library +// definitions. +#if defined(_MSC_VER) +inline bool isgreater(double lhs, + double rhs) noexcept(noexcept(std::isgreater(lhs, rhs))) { + return std::isgreater(lhs, rhs); +} +inline bool isless(double lhs, + double rhs) noexcept(noexcept(std::isless(lhs, rhs))) { + return std::isless(lhs, rhs); +} +inline bool islessequal(double lhs, + double rhs) noexcept(noexcept(std::islessequal(lhs, + rhs))) { + return std::islessequal(lhs, rhs); +} +inline bool isgreaterequal(double lhs, double rhs) noexcept( + noexcept(std::isgreaterequal(lhs, rhs))) { + return std::isgreaterequal(lhs, rhs); +} +inline bool islessgreater(double lhs, double rhs) noexcept( + noexcept(std::islessgreater(lhs, rhs))) { + return std::islessgreater(lhs, rhs); +} +inline bool isunordered(double lhs, + double rhs) noexcept(noexcept(std::isunordered(lhs, + rhs))) { + return std::isunordered(lhs, rhs); +} +#else +using std::isgreater; +using std::isgreaterequal; +using std::isless; +using std::islessequal; +using std::islessgreater; +using std::isunordered; +#endif + +#ifdef CERES_HAS_CPP20 +using std::lerp; +using std::midpoint; +#endif // defined(CERES_HAS_CPP20) + // Legacy names from pre-C++11 days. // clang-format off +CERES_DEPRECATED_WITH_MSG("ceres::IsFinite will be removed in a future Ceres Solver release. Please use ceres::isfinite.") inline bool IsFinite(double x) { return std::isfinite(x); } +CERES_DEPRECATED_WITH_MSG("ceres::IsInfinite will be removed in a future Ceres Solver release. Please use ceres::isinf.") inline bool IsInfinite(double x) { return std::isinf(x); } +CERES_DEPRECATED_WITH_MSG("ceres::IsNaN will be removed in a future Ceres Solver release. Please use ceres::isnan.") inline bool IsNaN(double x) { return std::isnan(x); } +CERES_DEPRECATED_WITH_MSG("ceres::IsNormal will be removed in a future Ceres Solver release. Please use ceres::isnormal.") inline bool IsNormal(double x) { return std::isnormal(x); } // clang-format on // In general, f(a + h) ~= f(a) + f'(a) h, via the chain rule. -// abs(x + h) ~= x + h or -(x + h) +// abs(x + h) ~= abs(x) + sgn(x)h template <typename T, int N> inline Jet<T, N> abs(const Jet<T, N>& f) { - return (f.a < T(0.0) ? -f : f); + return Jet<T, N>(abs(f.a), copysign(T(1), f.a) * f.v); +} + +// copysign(a, b) composes a float with the magnitude of a and the sign of b. +// Therefore, the function can be formally defined as +// +// copysign(a, b) = sgn(b)|a| +// +// where +// +// d/dx |x| = sgn(x) +// d/dx sgn(x) = 2δ(x) +// +// sgn(x) being the signum function. Differentiating copysign(a, b) with respect +// to a and b gives: +// +// d/da sgn(b)|a| = sgn(a) sgn(b) +// d/db sgn(b)|a| = 2|a|δ(b) +// +// with the dual representation given by +// +// copysign(a + da, b + db) ~= sgn(b)|a| + (sgn(a)sgn(b) da + 2|a|δ(b) db) +// +// where δ(b) is the Dirac delta function. +template <typename T, int N> +inline Jet<T, N> copysign(const Jet<T, N>& f, const Jet<T, N> g) { + // The Dirac delta function δ(b) is undefined at b=0 (here it's + // infinite) and 0 everywhere else. + T d = fpclassify(g) == FP_ZERO ? std::numeric_limits<T>::infinity() : T(0); + T sa = copysign(T(1), f.a); // sgn(a) + T sb = copysign(T(1), g.a); // sgn(b) + // The second part of the infinitesimal is 2|a|δ(b) which is either infinity + // or 0 unless a or any of the values of the b infinitesimal are 0. In the + // latter case, the corresponding values become NaNs (multiplying 0 by + // infinity gives NaN). We drop the constant factor 2 since it does not change + // the result (its values will still be either 0, infinity or NaN). + return Jet<T, N>(copysign(f.a, g.a), sa * sb * f.v + abs(f.a) * d * g.v); } // log(a + h) ~= log(a) + h / a @@ -432,6 +568,21 @@ inline Jet<T, N> log(const Jet<T, N>& f) { return Jet<T, N>(log(f.a), f.v * a_inverse); } +// log10(a + h) ~= log10(a) + h / (a log(10)) +template <typename T, int N> +inline Jet<T, N> log10(const Jet<T, N>& f) { + // Most compilers will expand log(10) to a constant. + const T a_inverse = T(1.0) / (f.a * log(T(10.0))); + return Jet<T, N>(log10(f.a), f.v * a_inverse); +} + +// log1p(a + h) ~= log1p(a) + h / (1 + a) +template <typename T, int N> +inline Jet<T, N> log1p(const Jet<T, N>& f) { + const T a_inverse = T(1.0) / (T(1.0) + f.a); + return Jet<T, N>(log1p(f.a), f.v * a_inverse); +} + // exp(a + h) ~= exp(a) + exp(a) h template <typename T, int N> inline Jet<T, N> exp(const Jet<T, N>& f) { @@ -439,6 +590,14 @@ inline Jet<T, N> exp(const Jet<T, N>& f) { return Jet<T, N>(tmp, tmp * f.v); } +// expm1(a + h) ~= expm1(a) + exp(a) h +template <typename T, int N> +inline Jet<T, N> expm1(const Jet<T, N>& f) { + const T tmp = expm1(f.a); + const T expa = tmp + T(1.0); // exp(a) = expm1(a) + 1 + return Jet<T, N>(tmp, expa * f.v); +} + // sqrt(a + h) ~= sqrt(a) + h / (2 sqrt(a)) template <typename T, int N> inline Jet<T, N> sqrt(const Jet<T, N>& f) { @@ -565,29 +724,101 @@ inline Jet<T, N> hypot(const Jet<T, N>& x, const Jet<T, N>& y) { return Jet<T, N>(tmp, x.a / tmp * x.v + y.a / tmp * y.v); } +#ifdef CERES_HAS_CPP17 +// Like sqrt(x^2 + y^2 + z^2), +// but acts to prevent underflow/overflow for small/large x/y/z. +// Note that the function is non-smooth at x=y=z=0, +// so the derivative is undefined there. template <typename T, int N> -inline Jet<T, N> fmax(const Jet<T, N>& x, const Jet<T, N>& y) { - return x < y ? y : x; +inline Jet<T, N> hypot(const Jet<T, N>& x, + const Jet<T, N>& y, + const Jet<T, N>& z) { + // d/da sqrt(a) = 0.5 / sqrt(a) + // d/dx x^2 + y^2 + z^2 = 2x + // So by the chain rule: + // d/dx sqrt(x^2 + y^2 + z^2) + // = 0.5 / sqrt(x^2 + y^2 + z^2) * 2x + // = x / sqrt(x^2 + y^2 + z^2) + // d/dy sqrt(x^2 + y^2 + z^2) = y / sqrt(x^2 + y^2 + z^2) + // d/dz sqrt(x^2 + y^2 + z^2) = z / sqrt(x^2 + y^2 + z^2) + const T tmp = hypot(x.a, y.a, z.a); + return Jet<T, N>(tmp, x.a / tmp * x.v + y.a / tmp * y.v + z.a / tmp * z.v); } +#endif // defined(CERES_HAS_CPP17) +// Like x * y + z but rounded only once. template <typename T, int N> -inline Jet<T, N> fmin(const Jet<T, N>& x, const Jet<T, N>& y) { - return y < x ? y : x; +inline Jet<T, N> fma(const Jet<T, N>& x, + const Jet<T, N>& y, + const Jet<T, N>& z) { + // d/dx fma(x, y, z) = y + // d/dy fma(x, y, z) = x + // d/dz fma(x, y, z) = 1 + return Jet<T, N>(fma(x.a, y.a, z.a), y.a * x.v + x.a * y.v + z.v); +} + +// Returns the larger of the two arguments. NaNs are treated as missing data. +// +// NOTE: This function is NOT subject to any of the error conditions specified +// in `math_errhandling`. +template <typename Lhs, + typename Rhs, + std::enable_if_t<CompatibleJetOperands_v<Lhs, Rhs>>* = nullptr> +inline decltype(auto) fmax(const Lhs& f, const Rhs& g) { + using J = std::common_type_t<Lhs, Rhs>; + return (isnan(g) || isgreater(f, g)) ? J{f} : J{g}; +} + +// Returns the smaller of the two arguments. NaNs are treated as missing data. +// +// NOTE: This function is NOT subject to any of the error conditions specified +// in `math_errhandling`. +template <typename Lhs, + typename Rhs, + std::enable_if_t<CompatibleJetOperands_v<Lhs, Rhs>>* = nullptr> +inline decltype(auto) fmin(const Lhs& f, const Rhs& g) { + using J = std::common_type_t<Lhs, Rhs>; + return (isnan(f) || isless(g, f)) ? J{g} : J{f}; +} + +// Returns the positive difference (f - g) of two arguments and zero if f <= g. +// If at least one argument is NaN, a NaN is return. +// +// NOTE At least one of the argument types must be a Jet, the other one can be a +// scalar. In case both arguments are Jets, their dimensionality must match. +template <typename Lhs, + typename Rhs, + std::enable_if_t<CompatibleJetOperands_v<Lhs, Rhs>>* = nullptr> +inline decltype(auto) fdim(const Lhs& f, const Rhs& g) { + using J = std::common_type_t<Lhs, Rhs>; + if (isnan(f) || isnan(g)) { + return std::numeric_limits<J>::quiet_NaN(); + } + return isgreater(f, g) ? J{f - g} : J{}; } -// erf is defined as an integral that cannot be expressed analyticaly +// erf is defined as an integral that cannot be expressed analytically // however, the derivative is trivial to compute // erf(x + h) = erf(x) + h * 2*exp(-x^2)/sqrt(pi) template <typename T, int N> inline Jet<T, N> erf(const Jet<T, N>& x) { - return Jet<T, N>(erf(x.a), x.v * M_2_SQRTPI * exp(-x.a * x.a)); + // We evaluate the constant as follows: + // 2 / sqrt(pi) = 1 / sqrt(atan(1.)) + // On POSIX sytems it is defined as M_2_SQRTPI, but this is not + // portable and the type may not be T. The above expression + // evaluates to full precision with IEEE arithmetic and, since it's + // constant, the compiler can generate exactly the same code. gcc + // does so even at -O0. + return Jet<T, N>(erf(x.a), x.v * exp(-x.a * x.a) * (T(1) / sqrt(atan(T(1))))); } // erfc(x) = 1-erf(x) // erfc(x + h) = erfc(x) + h * (-2*exp(-x^2)/sqrt(pi)) template <typename T, int N> inline Jet<T, N> erfc(const Jet<T, N>& x) { - return Jet<T, N>(erfc(x.a), -x.v * M_2_SQRTPI * exp(-x.a * x.a)); + // See in erf() above for the evaluation of the constant in the derivative. + return Jet<T, N>(erfc(x.a), + -x.v * exp(-x.a * x.a) * (T(1) / sqrt(atan(T(1))))); } // Bessel functions of the first kind with integer order equal to 0, 1, n. @@ -648,80 +879,210 @@ inline Jet<T, N> BesselJn(int n, const Jet<T, N>& f) { T(0.5) * (BesselJn(n - 1, f.a) - BesselJn(n + 1, f.a)) * f.v); } -// Jet Classification. It is not clear what the appropriate semantics are for -// these classifications. This picks that std::isfinite and std::isnormal are -// "all" operations, i.e. all elements of the jet must be finite for the jet -// itself to be finite (or normal). For IsNaN and IsInfinite, the answer is less -// clear. This takes a "any" approach for IsNaN and IsInfinite such that if any -// part of a jet is nan or inf, then the entire jet is nan or inf. This leads -// to strange situations like a jet can be both IsInfinite and IsNaN, but in -// practice the "any" semantics are the most useful for e.g. checking that -// derivatives are sane. - -// The jet is finite if all parts of the jet are finite. +// Classification and comparison functionality referencing only the scalar part +// of a Jet. To classify the derivatives (e.g., for sanity checks), the dual +// part should be referenced explicitly. For instance, to check whether the +// derivatives of a Jet 'f' are reasonable, one can use +// +// isfinite(f.v.array()).all() +// !isnan(f.v.array()).any() +// +// etc., depending on the desired semantics. +// +// NOTE: Floating-point classification and comparison functions and operators +// should be used with care as no derivatives can be propagated by such +// functions directly but only by expressions resulting from corresponding +// conditional statements. At the same time, conditional statements can possibly +// introduce a discontinuity in the cost function making it impossible to +// evaluate its derivative and thus the optimization problem intractable. + +// Determines whether the scalar part of the Jet is finite. template <typename T, int N> inline bool isfinite(const Jet<T, N>& f) { - // Branchless implementation. This is more efficient for the false-case and - // works with the codegen system. - auto result = isfinite(f.a); - for (int i = 0; i < N; ++i) { - result = result & isfinite(f.v[i]); - } - return result; + return isfinite(f.a); } -// The jet is infinite if any part of the Jet is infinite. +// Determines whether the scalar part of the Jet is infinite. template <typename T, int N> inline bool isinf(const Jet<T, N>& f) { - auto result = isinf(f.a); - for (int i = 0; i < N; ++i) { - result = result | isinf(f.v[i]); - } - return result; + return isinf(f.a); } -// The jet is NaN if any part of the jet is NaN. +// Determines whether the scalar part of the Jet is NaN. template <typename T, int N> inline bool isnan(const Jet<T, N>& f) { - auto result = isnan(f.a); - for (int i = 0; i < N; ++i) { - result = result | isnan(f.v[i]); - } - return result; + return isnan(f.a); } -// The jet is normal if all parts of the jet are normal. +// Determines whether the scalar part of the Jet is neither zero, subnormal, +// infinite, nor NaN. template <typename T, int N> inline bool isnormal(const Jet<T, N>& f) { - auto result = isnormal(f.a); - for (int i = 0; i < N; ++i) { - result = result & isnormal(f.v[i]); - } - return result; + return isnormal(f.a); +} + +// Determines whether the scalar part of the Jet f is less than the scalar +// part of g. +// +// NOTE: This function does NOT set any floating-point exceptions. +template <typename Lhs, + typename Rhs, + std::enable_if_t<CompatibleJetOperands_v<Lhs, Rhs>>* = nullptr> +inline bool isless(const Lhs& f, const Rhs& g) { + using internal::AsScalar; + return isless(AsScalar(f), AsScalar(g)); +} + +// Determines whether the scalar part of the Jet f is greater than the scalar +// part of g. +// +// NOTE: This function does NOT set any floating-point exceptions. +template <typename Lhs, + typename Rhs, + std::enable_if_t<CompatibleJetOperands_v<Lhs, Rhs>>* = nullptr> +inline bool isgreater(const Lhs& f, const Rhs& g) { + using internal::AsScalar; + return isgreater(AsScalar(f), AsScalar(g)); +} + +// Determines whether the scalar part of the Jet f is less than or equal to the +// scalar part of g. +// +// NOTE: This function does NOT set any floating-point exceptions. +template <typename Lhs, + typename Rhs, + std::enable_if_t<CompatibleJetOperands_v<Lhs, Rhs>>* = nullptr> +inline bool islessequal(const Lhs& f, const Rhs& g) { + using internal::AsScalar; + return islessequal(AsScalar(f), AsScalar(g)); +} + +// Determines whether the scalar part of the Jet f is less than or greater than +// (f < g || f > g) the scalar part of g. +// +// NOTE: This function does NOT set any floating-point exceptions. +template <typename Lhs, + typename Rhs, + std::enable_if_t<CompatibleJetOperands_v<Lhs, Rhs>>* = nullptr> +inline bool islessgreater(const Lhs& f, const Rhs& g) { + using internal::AsScalar; + return islessgreater(AsScalar(f), AsScalar(g)); +} + +// Determines whether the scalar part of the Jet f is greater than or equal to +// the scalar part of g. +// +// NOTE: This function does NOT set any floating-point exceptions. +template <typename Lhs, + typename Rhs, + std::enable_if_t<CompatibleJetOperands_v<Lhs, Rhs>>* = nullptr> +inline bool isgreaterequal(const Lhs& f, const Rhs& g) { + using internal::AsScalar; + return isgreaterequal(AsScalar(f), AsScalar(g)); +} + +// Determines if either of the scalar parts of the arguments are NaN and +// thus cannot be ordered with respect to each other. +template <typename Lhs, + typename Rhs, + std::enable_if_t<CompatibleJetOperands_v<Lhs, Rhs>>* = nullptr> +inline bool isunordered(const Lhs& f, const Rhs& g) { + using internal::AsScalar; + return isunordered(AsScalar(f), AsScalar(g)); +} + +// Categorize scalar part as zero, subnormal, normal, infinite, NaN, or +// implementation-defined. +template <typename T, int N> +inline int fpclassify(const Jet<T, N>& f) { + return fpclassify(f.a); +} + +// Determines whether the scalar part of the argument is negative. +template <typename T, int N> +inline bool signbit(const Jet<T, N>& f) { + return signbit(f.a); } // Legacy functions from the pre-C++11 days. template <typename T, int N> +CERES_DEPRECATED_WITH_MSG( + "ceres::IsFinite will be removed in a future Ceres Solver release. Please " + "use ceres::isfinite.") inline bool IsFinite(const Jet<T, N>& f) { return isfinite(f); } template <typename T, int N> +CERES_DEPRECATED_WITH_MSG( + "ceres::IsNaN will be removed in a future Ceres Solver release. Please use " + "ceres::isnan.") inline bool IsNaN(const Jet<T, N>& f) { return isnan(f); } template <typename T, int N> +CERES_DEPRECATED_WITH_MSG( + "ceres::IsNormal will be removed in a future Ceres Solver release. Please " + "use ceres::isnormal.") inline bool IsNormal(const Jet<T, N>& f) { return isnormal(f); } // The jet is infinite if any part of the jet is infinite. template <typename T, int N> +CERES_DEPRECATED_WITH_MSG( + "ceres::IsInfinite will be removed in a future Ceres Solver release. " + "Please use ceres::isinf.") inline bool IsInfinite(const Jet<T, N>& f) { return isinf(f); } +#ifdef CERES_HAS_CPP20 +// Computes the linear interpolation a + t(b - a) between a and b at the value +// t. For arguments outside of the range 0 <= t <= 1, the values are +// extrapolated. +// +// Differentiating lerp(a, b, t) with respect to a, b, and t gives: +// +// d/da lerp(a, b, t) = 1 - t +// d/db lerp(a, b, t) = t +// d/dt lerp(a, b, t) = b - a +// +// with the dual representation given by +// +// lerp(a + da, b + db, t + dt) +// ~= lerp(a, b, t) + (1 - t) da + t db + (b - a) dt . +template <typename T, int N> +inline Jet<T, N> lerp(const Jet<T, N>& a, + const Jet<T, N>& b, + const Jet<T, N>& t) { + return Jet<T, N>{lerp(a.a, b.a, t.a), + (T(1) - t.a) * a.v + t.a * b.v + (b.a - a.a) * t.v}; +} + +// Computes the midpoint a + (b - a) / 2. +// +// Differentiating midpoint(a, b) with respect to a and b gives: +// +// d/da midpoint(a, b) = 1/2 +// d/db midpoint(a, b) = 1/2 +// +// with the dual representation given by +// +// midpoint(a + da, b + db) ~= midpoint(a, b) + (da + db) / 2 . +template <typename T, int N> +inline Jet<T, N> midpoint(const Jet<T, N>& a, const Jet<T, N>& b) { + Jet<T, N> result{midpoint(a.a, b.a)}; + // To avoid overflow in the differential, compute + // (da + db) / 2 using midpoint. + for (int i = 0; i < N; ++i) { + result.v[i] = midpoint(a.v[i], b.v[i]); + } + return result; +} +#endif // defined(CERES_HAS_CPP20) + // atan2(b + db, a + da) ~= atan2(b, a) + (- b da + a db) / (a^2 + b^2) // // In words: the rate of change of theta is 1/r times the rate of @@ -737,6 +1098,22 @@ inline Jet<T, N> atan2(const Jet<T, N>& g, const Jet<T, N>& f) { return Jet<T, N>(atan2(g.a, f.a), tmp * (-g.a * f.v + f.a * g.v)); } +// Computes the square x^2 of a real number x (not the Euclidean L^2 norm as +// the name might suggest). +// +// NOTE: While std::norm is primarily intended for computing the squared +// magnitude of a std::complex<> number, the current Jet implementation does not +// support mixing a scalar T in its real part and std::complex<T> and in the +// infinitesimal. Mixed Jet support is necessary for the type decay from +// std::complex<T> to T (the squared magnitude of a complex number is always +// real) performed by std::norm. +// +// norm(x + h) ~= norm(x) + 2x h +template <typename T, int N> +inline Jet<T, N> norm(const Jet<T, N>& f) { + return Jet<T, N>(norm(f.a), T(2) * f.a * f.v); +} + // pow -- base is a differentiable function, exponent is a constant. // (a+da)^p ~= a^p + p*a^(p-1) da template <typename T, int N> @@ -760,14 +1137,14 @@ template <typename T, int N> inline Jet<T, N> pow(T f, const Jet<T, N>& g) { Jet<T, N> result; - if (f == T(0) && g.a > T(0)) { + if (fpclassify(f) == FP_ZERO && g > 0) { // Handle case 2. result = Jet<T, N>(T(0.0)); } else { - if (f < 0 && g.a == floor(g.a)) { // Handle case 3. + if (f < 0 && g == floor(g.a)) { // Handle case 3. result = Jet<T, N>(pow(f, g.a)); for (int i = 0; i < N; i++) { - if (g.v[i] != T(0.0)) { + if (fpclassify(g.v[i]) != FP_ZERO) { // Return a NaN when g.v != 0. result.v[i] = std::numeric_limits<T>::quiet_NaN(); } @@ -822,21 +1199,21 @@ template <typename T, int N> inline Jet<T, N> pow(const Jet<T, N>& f, const Jet<T, N>& g) { Jet<T, N> result; - if (f.a == T(0) && g.a >= T(1)) { + if (fpclassify(f) == FP_ZERO && g >= 1) { // Handle cases 2 and 3. - if (g.a > T(1)) { + if (g > 1) { result = Jet<T, N>(T(0.0)); } else { result = f; } } else { - if (f.a < T(0) && g.a == floor(g.a)) { + if (f < 0 && g == floor(g.a)) { // Handle cases 7 and 8. T const tmp = g.a * pow(f.a, g.a - T(1.0)); result = Jet<T, N>(pow(f.a, g.a), tmp * f.v); for (int i = 0; i < N; i++) { - if (g.v[i] != T(0.0)) { + if (fpclassify(g.v[i]) != FP_ZERO) { // Return a NaN when g.v != 0. result.v[i] = T(std::numeric_limits<double>::quiet_NaN()); } @@ -904,8 +1281,9 @@ struct numeric_limits<ceres::Jet<T, N>> { static constexpr bool tinyness_before = std::numeric_limits<T>::tinyness_before; - static constexpr ceres::Jet<T, N> min() noexcept { - return ceres::Jet<T, N>(std::numeric_limits<T>::min()); + static constexpr ceres::Jet<T, N> min + CERES_PREVENT_MACRO_SUBSTITUTION() noexcept { + return ceres::Jet<T, N>((std::numeric_limits<T>::min)()); } static constexpr ceres::Jet<T, N> lowest() noexcept { return ceres::Jet<T, N>(std::numeric_limits<T>::lowest()); @@ -929,8 +1307,9 @@ struct numeric_limits<ceres::Jet<T, N>> { return ceres::Jet<T, N>(std::numeric_limits<T>::denorm_min()); } - static constexpr ceres::Jet<T, N> max() noexcept { - return ceres::Jet<T, N>(std::numeric_limits<T>::max()); + static constexpr ceres::Jet<T, N> max + CERES_PREVENT_MACRO_SUBSTITUTION() noexcept { + return ceres::Jet<T, N>((std::numeric_limits<T>::max)()); } }; @@ -942,10 +1321,10 @@ namespace Eigen { // Eigen arrays, getting all the goodness of Eigen combined with autodiff. template <typename T, int N> struct NumTraits<ceres::Jet<T, N>> { - typedef ceres::Jet<T, N> Real; - typedef ceres::Jet<T, N> NonInteger; - typedef ceres::Jet<T, N> Nested; - typedef ceres::Jet<T, N> Literal; + using Real = ceres::Jet<T, N>; + using NonInteger = ceres::Jet<T, N>; + using Nested = ceres::Jet<T, N>; + using Literal = ceres::Jet<T, N>; static typename ceres::Jet<T, N> dummy_precision() { return ceres::Jet<T, N>(1e-12); @@ -984,8 +1363,8 @@ struct NumTraits<ceres::Jet<T, N>> { }; }; - static inline Real highest() { return Real(std::numeric_limits<T>::max()); } - static inline Real lowest() { return Real(-std::numeric_limits<T>::max()); } + static inline Real highest() { return Real((std::numeric_limits<T>::max)()); } + static inline Real lowest() { return Real(-(std::numeric_limits<T>::max)()); } }; // Specifying the return type of binary operations between Jets and scalar types @@ -996,11 +1375,11 @@ struct NumTraits<ceres::Jet<T, N>> { // is only available on Eigen versions >= 3.3 template <typename BinaryOp, typename T, int N> struct ScalarBinaryOpTraits<ceres::Jet<T, N>, T, BinaryOp> { - typedef ceres::Jet<T, N> ReturnType; + using ReturnType = ceres::Jet<T, N>; }; template <typename BinaryOp, typename T, int N> struct ScalarBinaryOpTraits<T, ceres::Jet<T, N>, BinaryOp> { - typedef ceres::Jet<T, N> ReturnType; + using ReturnType = ceres::Jet<T, N>; }; } // namespace Eigen diff --git a/extern/ceres/include/ceres/jet_fwd.h b/extern/ceres/include/ceres/jet_fwd.h new file mode 100644 index 00000000000..fbb6286958c --- /dev/null +++ b/extern/ceres/include/ceres/jet_fwd.h @@ -0,0 +1,44 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2022 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sergiu.deitsch@gmail.com (Sergiu Deitsch) +// + +#ifndef CERES_PUBLIC_JET_FWD_H_ +#define CERES_PUBLIC_JET_FWD_H_ + +namespace ceres { + +// Jet forward declaration necessary for the following partial specialization of +// std::common_type and type traits. +template <typename T, int N> +struct Jet; + +} // namespace ceres + +#endif // CERES_PUBLIC_JET_FWD_H_ diff --git a/extern/ceres/include/ceres/line_manifold.h b/extern/ceres/include/ceres/line_manifold.h new file mode 100644 index 00000000000..f8f1b235220 --- /dev/null +++ b/extern/ceres/include/ceres/line_manifold.h @@ -0,0 +1,304 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2022 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: jodebo_beck@gmx.de (Johannes Beck) +// + +#ifndef CERES_PUBLIC_LINE_MANIFOLD_H_ +#define CERES_PUBLIC_LINE_MANIFOLD_H_ + +#include <Eigen/Core> +#include <algorithm> +#include <array> +#include <memory> +#include <vector> + +#include "ceres/internal/disable_warnings.h" +#include "ceres/internal/export.h" +#include "ceres/internal/householder_vector.h" +#include "ceres/internal/sphere_manifold_functions.h" +#include "ceres/manifold.h" +#include "ceres/types.h" +#include "glog/logging.h" + +namespace ceres { +// This provides a manifold for lines, where the line is +// over-parameterized by an origin point and a direction vector. So the +// parameter vector size needs to be two times the ambient space dimension, +// where the first half is interpreted as the origin point and the second half +// as the direction. +// +// The plus operator for the line direction is the same as for the +// SphereManifold. The update of the origin point is +// perpendicular to the line direction before the update. +// +// This manifold is a special case of the affine Grassmannian +// manifold (see https://en.wikipedia.org/wiki/Affine_Grassmannian_(manifold)) +// for the case Graff_1(R^n). +// +// The class works with dynamic and static ambient space dimensions. If the +// ambient space dimensions is known at compile time use +// +// LineManifold<3> manifold; +// +// If the ambient space dimensions is not known at compile time the template +// parameter needs to be set to ceres::DYNAMIC and the actual dimension needs +// to be provided as a constructor argument: +// +// LineManifold<ceres::DYNAMIC> manifold(ambient_dim); +// +template <int AmbientSpaceDimension> +class LineManifold final : public Manifold { + public: + static_assert(AmbientSpaceDimension == DYNAMIC || AmbientSpaceDimension >= 2, + "The ambient space must be at least 2."); + static_assert(ceres::DYNAMIC == Eigen::Dynamic, + "ceres::DYNAMIC needs to be the same as Eigen::Dynamic."); + + LineManifold(); + explicit LineManifold(int size); + + int AmbientSize() const override { return 2 * size_; } + int TangentSize() const override { return 2 * (size_ - 1); } + bool Plus(const double* x, + const double* delta, + double* x_plus_delta) const override; + bool PlusJacobian(const double* x, double* jacobian) const override; + bool Minus(const double* y, + const double* x, + double* y_minus_x) const override; + bool MinusJacobian(const double* x, double* jacobian) const override; + + private: + static constexpr bool IsDynamic = (AmbientSpaceDimension == ceres::DYNAMIC); + static constexpr int TangentSpaceDimension = + IsDynamic ? ceres::DYNAMIC : AmbientSpaceDimension - 1; + + static constexpr int DAmbientSpaceDimension = + IsDynamic ? ceres::DYNAMIC : 2 * AmbientSpaceDimension; + static constexpr int DTangentSpaceDimension = + IsDynamic ? ceres::DYNAMIC : 2 * TangentSpaceDimension; + + using AmbientVector = Eigen::Matrix<double, AmbientSpaceDimension, 1>; + using TangentVector = Eigen::Matrix<double, TangentSpaceDimension, 1>; + using MatrixPlusJacobian = Eigen::Matrix<double, + DAmbientSpaceDimension, + DTangentSpaceDimension, + Eigen::RowMajor>; + using MatrixMinusJacobian = Eigen::Matrix<double, + DTangentSpaceDimension, + DAmbientSpaceDimension, + Eigen::RowMajor>; + + const int size_{AmbientSpaceDimension}; +}; + +template <int AmbientSpaceDimension> +LineManifold<AmbientSpaceDimension>::LineManifold() + : size_{AmbientSpaceDimension} { + static_assert( + AmbientSpaceDimension != Eigen::Dynamic, + "The size is set to dynamic. Please call the constructor with a size."); +} + +template <int AmbientSpaceDimension> +LineManifold<AmbientSpaceDimension>::LineManifold(int size) : size_{size} { + if (AmbientSpaceDimension != Eigen::Dynamic) { + CHECK_EQ(AmbientSpaceDimension, size) + << "Specified size by template parameter differs from the supplied " + "one."; + } else { + CHECK_GT(size_, 1) + << "The size of the manifold needs to be greater than 1."; + } +} + +template <int AmbientSpaceDimension> +bool LineManifold<AmbientSpaceDimension>::Plus(const double* x_ptr, + const double* delta_ptr, + double* x_plus_delta_ptr) const { + // We seek a box plus operator of the form + // + // [o*, d*] = Plus([o, d], [delta_o, delta_d]) + // + // where o is the origin point, d is the direction vector, delta_o is + // the delta of the origin point and delta_d the delta of the direction and + // o* and d* is the updated origin point and direction. + // + // We separate the Plus operator into the origin point and directional part + // d* = Plus_d(d, delta_d) + // o* = Plus_o(o, d, delta_o) + // + // The direction update function Plus_d is the same as as the SphereManifold: + // + // d* = H_{v(d)} [0.5 sinc(0.5 |delta_d|) delta_d, cos(0.5 |delta_d|)]^T + // + // where H is the householder matrix + // H_{v} = I - (2 / |v|^2) v v^T + // and + // v(d) = d - sign(d_n) |d| e_n. + // + // The origin point update function Plus_o is defined as + // + // o* = o + H_{v(d)} [0.5 delta_o, 0]^T. + + Eigen::Map<const AmbientVector> o(x_ptr, size_); + Eigen::Map<const AmbientVector> d(x_ptr + size_, size_); + + Eigen::Map<const TangentVector> delta_o(delta_ptr, size_ - 1); + Eigen::Map<const TangentVector> delta_d(delta_ptr + size_ - 1, size_ - 1); + Eigen::Map<AmbientVector> o_plus_delta(x_plus_delta_ptr, size_); + Eigen::Map<AmbientVector> d_plus_delta(x_plus_delta_ptr + size_, size_); + + const double norm_delta_d = delta_d.norm(); + + o_plus_delta = o; + + // Shortcut for zero delta direction. + if (norm_delta_d == 0.0) { + d_plus_delta = d; + + if (delta_o.isZero(0.0)) { + return true; + } + } + + // Calculate the householder transformation which is needed for f_d and f_o. + AmbientVector v(size_); + double beta; + + // NOTE: The explicit template arguments are needed here because + // ComputeHouseholderVector is templated and some versions of MSVC + // have trouble deducing the type of v automatically. + internal::ComputeHouseholderVector<Eigen::Map<const AmbientVector>, + double, + AmbientSpaceDimension>(d, &v, &beta); + + if (norm_delta_d != 0.0) { + internal::ComputeSphereManifoldPlus( + v, beta, d, delta_d, norm_delta_d, &d_plus_delta); + } + + // The null space is in the direction of the line, so the tangent space is + // perpendicular to the line direction. This is achieved by using the + // householder matrix of the direction and allow only movements + // perpendicular to e_n. + // + // The factor of 0.5 is used to be consistent with the line direction + // update. + AmbientVector y(size_); + y << 0.5 * delta_o, 0; + o_plus_delta += internal::ApplyHouseholderVector(y, v, beta); + + return true; +} + +template <int AmbientSpaceDimension> +bool LineManifold<AmbientSpaceDimension>::PlusJacobian( + const double* x_ptr, double* jacobian_ptr) const { + Eigen::Map<const AmbientVector> d(x_ptr + size_, size_); + Eigen::Map<MatrixPlusJacobian> jacobian( + jacobian_ptr, 2 * size_, 2 * (size_ - 1)); + + // Clear the Jacobian as only half of the matrix is not zero. + jacobian.setZero(); + + auto jacobian_d = + jacobian + .template topLeftCorner<AmbientSpaceDimension, TangentSpaceDimension>( + size_, size_ - 1); + auto jacobian_o = jacobian.template bottomRightCorner<AmbientSpaceDimension, + TangentSpaceDimension>( + size_, size_ - 1); + internal::ComputeSphereManifoldPlusJacobian(d, &jacobian_d); + jacobian_o = jacobian_d; + return true; +} + +template <int AmbientSpaceDimension> +bool LineManifold<AmbientSpaceDimension>::Minus(const double* y_ptr, + const double* x_ptr, + double* y_minus_x) const { + Eigen::Map<const AmbientVector> y_o(y_ptr, size_); + Eigen::Map<const AmbientVector> y_d(y_ptr + size_, size_); + Eigen::Map<const AmbientVector> x_o(x_ptr, size_); + Eigen::Map<const AmbientVector> x_d(x_ptr + size_, size_); + + Eigen::Map<TangentVector> y_minus_x_o(y_minus_x, size_ - 1); + Eigen::Map<TangentVector> y_minus_x_d(y_minus_x + size_ - 1, size_ - 1); + + AmbientVector v(size_); + double beta; + + // NOTE: The explicit template arguments are needed here because + // ComputeHouseholderVector is templated and some versions of MSVC + // have trouble deducing the type of v automatically. + internal::ComputeHouseholderVector<Eigen::Map<const AmbientVector>, + double, + AmbientSpaceDimension>(x_d, &v, &beta); + + internal::ComputeSphereManifoldMinus(v, beta, x_d, y_d, &y_minus_x_d); + + AmbientVector delta_o = y_o - x_o; + const AmbientVector h_delta_o = + 2.0 * internal::ApplyHouseholderVector(delta_o, v, beta); + y_minus_x_o = h_delta_o.template head<TangentSpaceDimension>(size_ - 1); + + return true; +} + +template <int AmbientSpaceDimension> +bool LineManifold<AmbientSpaceDimension>::MinusJacobian( + const double* x_ptr, double* jacobian_ptr) const { + Eigen::Map<const AmbientVector> d(x_ptr + size_, size_); + Eigen::Map<MatrixMinusJacobian> jacobian( + jacobian_ptr, 2 * (size_ - 1), 2 * size_); + + // Clear the Jacobian as only half of the matrix is not zero. + jacobian.setZero(); + + auto jacobian_d = + jacobian + .template topLeftCorner<TangentSpaceDimension, AmbientSpaceDimension>( + size_ - 1, size_); + auto jacobian_o = jacobian.template bottomRightCorner<TangentSpaceDimension, + AmbientSpaceDimension>( + size_ - 1, size_); + internal::ComputeSphereManifoldMinusJacobian(d, &jacobian_d); + jacobian_o = jacobian_d; + + return true; +} + +} // namespace ceres + +// clang-format off +#include "ceres/internal/reenable_warnings.h" +// clang-format on + +#endif // CERES_PUBLIC_LINE_MANIFOLD_H_ diff --git a/extern/ceres/include/ceres/local_parameterization.h b/extern/ceres/include/ceres/local_parameterization.h index ba7579deca0..5815dd17d15 100644 --- a/extern/ceres/include/ceres/local_parameterization.h +++ b/extern/ceres/include/ceres/local_parameterization.h @@ -37,10 +37,14 @@ #include <vector> #include "ceres/internal/disable_warnings.h" +#include "ceres/internal/export.h" #include "ceres/internal/port.h" namespace ceres { +// WARNING: LocalParameterizations are deprecated. They will be removed from +// Ceres Solver in version 2.2.0. Please use Manifolds instead. + // Purpose: Sometimes parameter blocks x can overparameterize a problem // // min f(x) @@ -111,7 +115,10 @@ namespace ceres { // // The class LocalParameterization defines the function Plus and its // Jacobian which is needed to compute the Jacobian of f w.r.t delta. -class CERES_EXPORT LocalParameterization { +class CERES_DEPRECATED_WITH_MSG( + "LocalParameterizations will be removed from the Ceres Solver API in " + "version 2.2.0. Use Manifolds instead.") + CERES_EXPORT LocalParameterization { public: virtual ~LocalParameterization(); @@ -120,6 +127,7 @@ class CERES_EXPORT LocalParameterization { // x_plus_delta = Plus(x, delta) // // with the condition that Plus(x, 0) = x. + // virtual bool Plus(const double* x, const double* delta, double* x_plus_delta) const = 0; @@ -152,10 +160,10 @@ class CERES_EXPORT LocalParameterization { // Some basic parameterizations // Identity Parameterization: Plus(x, delta) = x + delta -class CERES_EXPORT IdentityParameterization : public LocalParameterization { +class CERES_DEPRECATED_WITH_MSG("Use EuclideanManifold instead.") + CERES_EXPORT IdentityParameterization : public LocalParameterization { public: explicit IdentityParameterization(int size); - virtual ~IdentityParameterization() {} bool Plus(const double* x, const double* delta, double* x_plus_delta) const override; @@ -172,11 +180,11 @@ class CERES_EXPORT IdentityParameterization : public LocalParameterization { }; // Hold a subset of the parameters inside a parameter block constant. -class CERES_EXPORT SubsetParameterization : public LocalParameterization { +class CERES_DEPRECATED_WITH_MSG("Use SubsetManifold instead.") + CERES_EXPORT SubsetParameterization : public LocalParameterization { public: explicit SubsetParameterization(int size, const std::vector<int>& constant_parameters); - virtual ~SubsetParameterization() {} bool Plus(const double* x, const double* delta, double* x_plus_delta) const override; @@ -199,9 +207,9 @@ class CERES_EXPORT SubsetParameterization : public LocalParameterization { // with * being the quaternion multiplication operator. Here we assume // that the first element of the quaternion vector is the real (cos // theta) part. -class CERES_EXPORT QuaternionParameterization : public LocalParameterization { +class CERES_DEPRECATED_WITH_MSG("Use QuaternionManifold instead.") + CERES_EXPORT QuaternionParameterization : public LocalParameterization { public: - virtual ~QuaternionParameterization() {} bool Plus(const double* x, const double* delta, double* x_plus_delta) const override; @@ -221,10 +229,10 @@ class CERES_EXPORT QuaternionParameterization : public LocalParameterization { // // Plus(x, delta) = [sin(|delta|) delta / |delta|, cos(|delta|)] * x // with * being the quaternion multiplication operator. -class CERES_EXPORT EigenQuaternionParameterization +class CERES_DEPRECATED_WITH_MSG("Use EigenQuaternionManifold instead.") + CERES_EXPORT EigenQuaternionParameterization : public ceres::LocalParameterization { public: - virtual ~EigenQuaternionParameterization() {} bool Plus(const double* x, const double* delta, double* x_plus_delta) const override; @@ -234,23 +242,23 @@ class CERES_EXPORT EigenQuaternionParameterization }; // This provides a parameterization for homogeneous vectors which are commonly -// used in Structure for Motion problems. One example where they are used is -// in representing points whose triangulation is ill-conditioned. Here -// it is advantageous to use an over-parameterization since homogeneous vectors -// can represent points at infinity. +// used in Structure from Motion problems. One example where they are used is +// in representing points whose triangulation is ill-conditioned. Here it is +// advantageous to use an over-parameterization since homogeneous vectors can +// represent points at infinity. // // The plus operator is defined as // Plus(x, delta) = // [sin(0.5 * |delta|) * delta / |delta|, cos(0.5 * |delta|)] * x +// // with * defined as an operator which applies the update orthogonal to x to // remain on the sphere. We assume that the last element of x is the scalar // component. The size of the homogeneous vector is required to be greater than // 1. -class CERES_EXPORT HomogeneousVectorParameterization - : public LocalParameterization { +class CERES_DEPRECATED_WITH_MSG("Use SphereManifold instead.") CERES_EXPORT + HomogeneousVectorParameterization : public LocalParameterization { public: explicit HomogeneousVectorParameterization(int size); - virtual ~HomogeneousVectorParameterization() {} bool Plus(const double* x, const double* delta, double* x_plus_delta) const override; @@ -276,7 +284,8 @@ class CERES_EXPORT HomogeneousVectorParameterization // manifold (see https://en.wikipedia.org/wiki/Affine_Grassmannian_(manifold)) // for the case Graff_1(R^n). template <int AmbientSpaceDimension> -class LineParameterization : public LocalParameterization { +class CERES_DEPRECATED_WITH_MSG("Use LineManifold instead.") + LineParameterization : public LocalParameterization { public: static_assert(AmbientSpaceDimension >= 2, "The ambient space must be at least 2"); @@ -302,21 +311,19 @@ class LineParameterization : public LocalParameterization { // // is the local parameterization for a rigid transformation, where the // rotation is represented using a quaternion. -class CERES_EXPORT ProductParameterization : public LocalParameterization { +// +class CERES_DEPRECATED_WITH_MSG("Use ProductManifold instead.") + CERES_EXPORT ProductParameterization : public LocalParameterization { public: ProductParameterization(const ProductParameterization&) = delete; ProductParameterization& operator=(const ProductParameterization&) = delete; - virtual ~ProductParameterization() {} // // NOTE: The constructor takes ownership of the input local // parameterizations. // template <typename... LocalParams> - ProductParameterization(LocalParams*... local_params) - : local_params_(sizeof...(LocalParams)), - local_size_{0}, - global_size_{0}, - buffer_size_{0} { + explicit ProductParameterization(LocalParams*... local_params) + : local_params_(sizeof...(LocalParams)) { constexpr int kNumLocalParams = sizeof...(LocalParams); static_assert(kNumLocalParams >= 2, "At least two local parameterizations must be specified."); @@ -342,22 +349,23 @@ class CERES_EXPORT ProductParameterization : public LocalParameterization { bool Plus(const double* x, const double* delta, double* x_plus_delta) const override; - bool ComputeJacobian(const double* x, - double* jacobian) const override; + bool ComputeJacobian(const double* x, double* jacobian) const override; int GlobalSize() const override { return global_size_; } int LocalSize() const override { return local_size_; } private: std::vector<std::unique_ptr<LocalParameterization>> local_params_; - int local_size_; - int global_size_; - int buffer_size_; + int local_size_{0}; + int global_size_{0}; + int buffer_size_{0}; }; } // namespace ceres // clang-format off #include "ceres/internal/reenable_warnings.h" +// clang-format on + #include "ceres/internal/line_parameterization.h" #endif // CERES_PUBLIC_LOCAL_PARAMETERIZATION_H_ diff --git a/extern/ceres/include/ceres/loss_function.h b/extern/ceres/include/ceres/loss_function.h index 7aabf7dfce1..8a5a37ff665 100644 --- a/extern/ceres/include/ceres/loss_function.h +++ b/extern/ceres/include/ceres/loss_function.h @@ -35,7 +35,7 @@ // // For least squares problem where there are no outliers and standard // squared loss is expected, it is not necessary to create a loss -// function; instead passing a NULL to the problem when adding +// function; instead passing a nullptr to the problem when adding // residuals implies a standard squared loss. // // For least squares problems where the minimization may encounter @@ -78,6 +78,7 @@ #include <memory> #include "ceres/internal/disable_warnings.h" +#include "ceres/internal/export.h" #include "ceres/types.h" #include "glog/logging.h" @@ -85,7 +86,7 @@ namespace ceres { class CERES_EXPORT LossFunction { public: - virtual ~LossFunction() {} + virtual ~LossFunction(); // For a residual vector with squared 2-norm 'sq_norm', this method // is required to fill in the value and derivatives of the loss @@ -125,10 +126,10 @@ class CERES_EXPORT LossFunction { // // At s = 0: rho = [0, 1, 0]. // -// It is not normally necessary to use this, as passing NULL for the +// It is not normally necessary to use this, as passing nullptr for the // loss function when building the problem accomplishes the same // thing. -class CERES_EXPORT TrivialLoss : public LossFunction { +class CERES_EXPORT TrivialLoss final : public LossFunction { public: void Evaluate(double, double*) const override; }; @@ -171,7 +172,7 @@ class CERES_EXPORT TrivialLoss : public LossFunction { // // The scaling parameter 'a' corresponds to 'delta' on this page: // http://en.wikipedia.org/wiki/Huber_Loss_Function -class CERES_EXPORT HuberLoss : public LossFunction { +class CERES_EXPORT HuberLoss final : public LossFunction { public: explicit HuberLoss(double a) : a_(a), b_(a * a) {} void Evaluate(double, double*) const override; @@ -187,7 +188,7 @@ class CERES_EXPORT HuberLoss : public LossFunction { // rho(s) = 2 (sqrt(1 + s) - 1). // // At s = 0: rho = [0, 1, -1 / (2 * a^2)]. -class CERES_EXPORT SoftLOneLoss : public LossFunction { +class CERES_EXPORT SoftLOneLoss final : public LossFunction { public: explicit SoftLOneLoss(double a) : b_(a * a), c_(1 / b_) {} void Evaluate(double, double*) const override; @@ -204,7 +205,7 @@ class CERES_EXPORT SoftLOneLoss : public LossFunction { // rho(s) = log(1 + s). // // At s = 0: rho = [0, 1, -1 / a^2]. -class CERES_EXPORT CauchyLoss : public LossFunction { +class CERES_EXPORT CauchyLoss final : public LossFunction { public: explicit CauchyLoss(double a) : b_(a * a), c_(1 / b_) {} void Evaluate(double, double*) const override; @@ -225,7 +226,7 @@ class CERES_EXPORT CauchyLoss : public LossFunction { // rho(s) = a atan(s / a). // // At s = 0: rho = [0, 1, 0]. -class CERES_EXPORT ArctanLoss : public LossFunction { +class CERES_EXPORT ArctanLoss final : public LossFunction { public: explicit ArctanLoss(double a) : a_(a), b_(1 / (a * a)) {} void Evaluate(double, double*) const override; @@ -264,7 +265,7 @@ class CERES_EXPORT ArctanLoss : public LossFunction { // concentrated in the range a - b to a + b. // // At s = 0: rho = [0, ~0, ~0]. -class CERES_EXPORT TolerantLoss : public LossFunction { +class CERES_EXPORT TolerantLoss final : public LossFunction { public: explicit TolerantLoss(double a, double b); void Evaluate(double, double*) const override; @@ -283,7 +284,7 @@ class CERES_EXPORT TolerantLoss : public LossFunction { // rho(s) = a^2 / 3 for s > a^2. // // At s = 0: rho = [0, 1, -2 / a^2] -class CERES_EXPORT TukeyLoss : public ceres::LossFunction { +class CERES_EXPORT TukeyLoss final : public ceres::LossFunction { public: explicit TukeyLoss(double a) : a_squared_(a * a) {} void Evaluate(double, double*) const override; @@ -294,14 +295,14 @@ class CERES_EXPORT TukeyLoss : public ceres::LossFunction { // Composition of two loss functions. The error is the result of first // evaluating g followed by f to yield the composition f(g(s)). -// The loss functions must not be NULL. -class CERES_EXPORT ComposedLoss : public LossFunction { +// The loss functions must not be nullptr. +class CERES_EXPORT ComposedLoss final : public LossFunction { public: explicit ComposedLoss(const LossFunction* f, Ownership ownership_f, const LossFunction* g, Ownership ownership_g); - virtual ~ComposedLoss(); + ~ComposedLoss() override; void Evaluate(double, double*) const override; private: @@ -322,11 +323,11 @@ class CERES_EXPORT ComposedLoss : public LossFunction { // s -> a * rho'(s) // s -> a * rho''(s) // -// Since we treat the a NULL Loss function as the Identity loss -// function, rho = NULL is a valid input and will result in the input +// Since we treat the a nullptr Loss function as the Identity loss +// function, rho = nullptr is a valid input and will result in the input // being scaled by a. This provides a simple way of implementing a // scaled ResidualBlock. -class CERES_EXPORT ScaledLoss : public LossFunction { +class CERES_EXPORT ScaledLoss final : public LossFunction { public: // Constructs a ScaledLoss wrapping another loss function. Takes // ownership of the wrapped loss function or not depending on the @@ -336,7 +337,7 @@ class CERES_EXPORT ScaledLoss : public LossFunction { ScaledLoss(const ScaledLoss&) = delete; void operator=(const ScaledLoss&) = delete; - virtual ~ScaledLoss() { + ~ScaledLoss() override { if (ownership_ == DO_NOT_TAKE_OWNERSHIP) { rho_.release(); } @@ -361,8 +362,8 @@ class CERES_EXPORT ScaledLoss : public LossFunction { // whose scale can be mutated after an optimization problem has been // constructed. // -// Since we treat the a NULL Loss function as the Identity loss -// function, rho = NULL is a valid input. +// Since we treat the a nullptr Loss function as the Identity loss +// function, rho = nullptr is a valid input. // // Example usage // @@ -374,7 +375,8 @@ class CERES_EXPORT ScaledLoss : public LossFunction { // new AutoDiffCostFunction < UW_Camera_Mapper, 2, 9, 3>( // new UW_Camera_Mapper(feature_x, feature_y)); // -// LossFunctionWrapper* loss_function(new HuberLoss(1.0), TAKE_OWNERSHIP); +// LossFunctionWrapper* loss_function = new LossFunctionWrapper( +// new HuberLoss(1.0), TAKE_OWNERSHIP); // // problem.AddResidualBlock(cost_function, loss_function, parameters); // @@ -387,7 +389,7 @@ class CERES_EXPORT ScaledLoss : public LossFunction { // // Solve(options, &problem, &summary) // -class CERES_EXPORT LossFunctionWrapper : public LossFunction { +class CERES_EXPORT LossFunctionWrapper final : public LossFunction { public: LossFunctionWrapper(LossFunction* rho, Ownership ownership) : rho_(rho), ownership_(ownership) {} @@ -395,14 +397,14 @@ class CERES_EXPORT LossFunctionWrapper : public LossFunction { LossFunctionWrapper(const LossFunctionWrapper&) = delete; void operator=(const LossFunctionWrapper&) = delete; - virtual ~LossFunctionWrapper() { + ~LossFunctionWrapper() override { if (ownership_ == DO_NOT_TAKE_OWNERSHIP) { rho_.release(); } } void Evaluate(double sq_norm, double out[3]) const override { - if (rho_.get() == NULL) { + if (rho_.get() == nullptr) { out[0] = sq_norm; out[1] = 1.0; out[2] = 0.0; diff --git a/extern/ceres/include/ceres/manifold.h b/extern/ceres/include/ceres/manifold.h new file mode 100644 index 00000000000..4d6e9fa0f59 --- /dev/null +++ b/extern/ceres/include/ceres/manifold.h @@ -0,0 +1,411 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2022 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + +#ifndef CERES_PUBLIC_MANIFOLD_H_ +#define CERES_PUBLIC_MANIFOLD_H_ + +#include <Eigen/Core> +#include <algorithm> +#include <array> +#include <memory> +#include <utility> +#include <vector> + +#include "ceres/internal/disable_warnings.h" +#include "ceres/internal/export.h" +#include "ceres/types.h" +#include "glog/logging.h" + +namespace ceres { + +// In sensor fusion problems, often we have to model quantities that live in +// spaces known as Manifolds, for example the rotation/orientation of a sensor +// that is represented by a quaternion. +// +// Manifolds are spaces which locally look like Euclidean spaces. More +// precisely, at each point on the manifold there is a linear space that is +// tangent to the manifold. It has dimension equal to the intrinsic dimension of +// the manifold itself, which is less than or equal to the ambient space in +// which the manifold is embedded. +// +// For example, the tangent space to a point on a sphere in three dimensions is +// the two dimensional plane that is tangent to the sphere at that point. There +// are two reasons tangent spaces are interesting: +// +// 1. They are Eucliean spaces so the usual vector space operations apply there, +// which makes numerical operations easy. +// 2. Movement in the tangent space translate into movements along the manifold. +// Movements perpendicular to the tangent space do not translate into +// movements on the manifold. +// +// Returning to our sphere example, moving in the 2 dimensional plane +// tangent to the sphere and projecting back onto the sphere will move you away +// from the point you started from but moving along the normal at the same point +// and the projecting back onto the sphere brings you back to the point. +// +// The Manifold interface defines two operations (and their derivatives) +// involving the tangent space, allowing filtering and optimization to be +// performed on said manifold: +// +// 1. x_plus_delta = Plus(x, delta) +// 2. delta = Minus(x_plus_delta, x) +// +// "Plus" computes the result of moving along delta in the tangent space at x, +// and then projecting back onto the manifold that x belongs to. In Differential +// Geometry this is known as a "Retraction". It is a generalization of vector +// addition in Euclidean spaces. +// +// Given two points on the manifold, "Minus" computes the change delta to x in +// the tangent space at x, that will take it to x_plus_delta. +// +// Let us now consider two examples. +// +// The Euclidean space R^n is the simplest example of a manifold. It has +// dimension n (and so does its tangent space) and Plus and Minus are the +// familiar vector sum and difference operations. +// +// Plus(x, delta) = x + delta = y, +// Minus(y, x) = y - x = delta. +// +// A more interesting case is SO(3), the special orthogonal group in three +// dimensions - the space of 3x3 rotation matrices. SO(3) is a three dimensional +// manifold embedded in R^9 or R^(3x3). So points on SO(3) are represented using +// 9 dimensional vectors or 3x3 matrices, and points in its tangent spaces are +// represented by 3 dimensional vectors. +// +// Defining Plus and Minus are defined in terms of the matrix Exp and Log +// operations as follows: +// +// Let Exp(p, q, r) = [cos(theta) + cp^2, -sr + cpq , sq + cpr ] +// [sr + cpq , cos(theta) + cq^2, -sp + cqr ] +// [-sq + cpr , sp + cqr , cos(theta) + cr^2] +// +// where: theta = sqrt(p^2 + q^2 + r^2) +// s = sinc(theta) +// c = (1 - cos(theta))/theta^2 +// +// and Log(x) = 1/(2 sinc(theta))[x_32 - x_23, x_13 - x_31, x_21 - x_12] +// +// where: theta = acos((Trace(x) - 1)/2) +// +// Then, +// +// Plus(x, delta) = x Exp(delta) +// Minus(y, x) = Log(x^T y) +// +// For Plus and Minus to be mathematically consistent, the following identities +// must be satisfied at all points x on the manifold: +// +// 1. Plus(x, 0) = x. +// 2. For all y, Plus(x, Minus(y, x)) = y. +// 3. For all delta, Minus(Plus(x, delta), x) = delta. +// 4. For all delta_1, delta_2 +// |Minus(Plus(x, delta_1), Plus(x, delta_2)) <= |delta_1 - delta_2| +// +// Briefly: +// (1) Ensures that the tangent space is "centered" at x, and the zero vector is +// the identity element. +// (2) Ensures that any y can be reached from x. +// (3) Ensures that Plus is an injective (one-to-one) map. +// (4) Allows us to define a metric on the manifold. +// +// Additionally we require that Plus and Minus be sufficiently smooth. In +// particular they need to be differentiable everywhere on the manifold. +// +// For more details, please see +// +// "Integrating Generic Sensor Fusion Algorithms with Sound State +// Representations through Encapsulation of Manifolds" +// By C. Hertzberg, R. Wagner, U. Frese and L. Schroder +// https://arxiv.org/pdf/1107.1119.pdf +class CERES_EXPORT Manifold { + public: + virtual ~Manifold(); + + // Dimension of the ambient space in which the manifold is embedded. + virtual int AmbientSize() const = 0; + + // Dimension of the manifold/tangent space. + virtual int TangentSize() const = 0; + + // x_plus_delta = Plus(x, delta), + // + // A generalization of vector addition in Euclidean space, Plus computes the + // result of moving along delta in the tangent space at x, and then projecting + // back onto the manifold that x belongs to. + // + // x and x_plus_delta are AmbientSize() vectors. + // delta is a TangentSize() vector. + // + // Return value indicates if the operation was successful or not. + virtual bool Plus(const double* x, + const double* delta, + double* x_plus_delta) const = 0; + + // Compute the derivative of Plus(x, delta) w.r.t delta at delta = 0, i.e. + // + // (D_2 Plus)(x, 0) + // + // jacobian is a row-major AmbientSize() x TangentSize() matrix. + // + // Return value indicates whether the operation was successful or not. + virtual bool PlusJacobian(const double* x, double* jacobian) const = 0; + + // tangent_matrix = ambient_matrix * (D_2 Plus)(x, 0) + // + // ambient_matrix is a row-major num_rows x AmbientSize() matrix. + // tangent_matrix is a row-major num_rows x TangentSize() matrix. + // + // Return value indicates whether the operation was successful or not. + // + // This function is only used by the GradientProblemSolver, where the + // dimension of the parameter block can be large and it may be more efficient + // to compute this product directly rather than first evaluating the Jacobian + // into a matrix and then doing a matrix vector product. + // + // Because this is not an often used function, we provide a default + // implementation for convenience. If performance becomes an issue then the + // user should consider implementing a specialization. + virtual bool RightMultiplyByPlusJacobian(const double* x, + const int num_rows, + const double* ambient_matrix, + double* tangent_matrix) const; + + // y_minus_x = Minus(y, x) + // + // Given two points on the manifold, Minus computes the change to x in the + // tangent space at x, that will take it to y. + // + // x and y are AmbientSize() vectors. + // y_minus_x is a TangentSize() vector. + // + // Return value indicates if the operation was successful or not. + virtual bool Minus(const double* y, + const double* x, + double* y_minus_x) const = 0; + + // Compute the derivative of Minus(y, x) w.r.t y at y = x, i.e + // + // (D_1 Minus) (x, x) + // + // Jacobian is a row-major TangentSize() x AmbientSize() matrix. + // + // Return value indicates whether the operation was successful or not. + virtual bool MinusJacobian(const double* x, double* jacobian) const = 0; +}; + +// The Euclidean manifold is another name for the ordinary vector space R^size, +// where the plus and minus operations are the usual vector addition and +// subtraction: +// Plus(x, delta) = x + delta +// Minus(y, x) = y - x. +// +// The class works with dynamic and static ambient space dimensions. If the +// ambient space dimensions is know at compile time use +// +// EuclideanManifold<3> manifold; +// +// If the ambient space dimensions is not known at compile time the template +// parameter needs to be set to ceres::DYNAMIC and the actual dimension needs +// to be provided as a constructor argument: +// +// EuclideanManifold<ceres::DYNAMIC> manifold(ambient_dim); +template <int Size> +class EuclideanManifold final : public Manifold { + public: + static_assert(Size == ceres::DYNAMIC || Size >= 0, + "The size of the manifold needs to be non-negative."); + static_assert(ceres::DYNAMIC == Eigen::Dynamic, + "ceres::DYNAMIC needs to be the same as Eigen::Dynamic."); + + EuclideanManifold() : size_{Size} { + static_assert( + Size != ceres::DYNAMIC, + "The size is set to dynamic. Please call the constructor with a size."); + } + + explicit EuclideanManifold(int size) : size_(size) { + if (Size != ceres::DYNAMIC) { + CHECK_EQ(Size, size) + << "Specified size by template parameter differs from the supplied " + "one."; + } else { + CHECK_GE(size_, 0) + << "The size of the manifold needs to be non-negative."; + } + } + + int AmbientSize() const override { return size_; } + int TangentSize() const override { return size_; } + + bool Plus(const double* x_ptr, + const double* delta_ptr, + double* x_plus_delta_ptr) const override { + Eigen::Map<const AmbientVector> x(x_ptr, size_); + Eigen::Map<const AmbientVector> delta(delta_ptr, size_); + Eigen::Map<AmbientVector> x_plus_delta(x_plus_delta_ptr, size_); + x_plus_delta = x + delta; + return true; + } + + bool PlusJacobian(const double* x_ptr, double* jacobian_ptr) const override { + Eigen::Map<MatrixJacobian> jacobian(jacobian_ptr, size_, size_); + jacobian.setIdentity(); + return true; + } + + bool RightMultiplyByPlusJacobian(const double* x, + const int num_rows, + const double* ambient_matrix, + double* tangent_matrix) const override { + std::copy_n(ambient_matrix, num_rows * size_, tangent_matrix); + return true; + } + + bool Minus(const double* y_ptr, + const double* x_ptr, + double* y_minus_x_ptr) const override { + Eigen::Map<const AmbientVector> x(x_ptr, size_); + Eigen::Map<const AmbientVector> y(y_ptr, size_); + Eigen::Map<AmbientVector> y_minus_x(y_minus_x_ptr, size_); + y_minus_x = y - x; + return true; + } + + bool MinusJacobian(const double* x_ptr, double* jacobian_ptr) const override { + Eigen::Map<MatrixJacobian> jacobian(jacobian_ptr, size_, size_); + jacobian.setIdentity(); + return true; + } + + private: + static constexpr bool IsDynamic = (Size == ceres::DYNAMIC); + using AmbientVector = Eigen::Matrix<double, Size, 1>; + using MatrixJacobian = Eigen::Matrix<double, Size, Size, Eigen::RowMajor>; + + int size_{}; +}; + +// Hold a subset of the parameters inside a parameter block constant. +class CERES_EXPORT SubsetManifold final : public Manifold { + public: + SubsetManifold(int size, const std::vector<int>& constant_parameters); + int AmbientSize() const override; + int TangentSize() const override; + + bool Plus(const double* x, + const double* delta, + double* x_plus_delta) const override; + bool PlusJacobian(const double* x, double* jacobian) const override; + bool RightMultiplyByPlusJacobian(const double* x, + const int num_rows, + const double* ambient_matrix, + double* tangent_matrix) const override; + bool Minus(const double* y, + const double* x, + double* y_minus_x) const override; + bool MinusJacobian(const double* x, double* jacobian) const override; + + private: + const int tangent_size_ = 0; + std::vector<bool> constancy_mask_; +}; + +// Implements the manifold for a Hamilton quaternion as defined in +// https://en.wikipedia.org/wiki/Quaternion. Quaternions are represented as +// unit norm 4-vectors, i.e. +// +// q = [q0; q1; q2; q3], |q| = 1 +// +// is the ambient space representation. +// +// q0 scalar part. +// q1 coefficient of i. +// q2 coefficient of j. +// q3 coefficient of k. +// +// where: i*i = j*j = k*k = -1 and i*j = k, j*k = i, k*i = j. +// +// The tangent space is R^3, which relates to the ambient space through the +// Plus and Minus operations defined as: +// +// Plus(x, delta) = [cos(|delta|); sin(|delta|) * delta / |delta|] * x +// Minus(y, x) = to_delta(y * x^{-1}) +// +// where "*" is the quaternion product and because q is a unit quaternion +// (|q|=1), q^-1 = [q0; -q1; -q2; -q3] +// +// and to_delta( [q0; u_{3x1}] ) = u / |u| * atan2(|u|, q0) +class CERES_EXPORT QuaternionManifold final : public Manifold { + public: + int AmbientSize() const override { return 4; } + int TangentSize() const override { return 3; } + + bool Plus(const double* x, + const double* delta, + double* x_plus_delta) const override; + bool PlusJacobian(const double* x, double* jacobian) const override; + bool Minus(const double* y, + const double* x, + double* y_minus_x) const override; + bool MinusJacobian(const double* x, double* jacobian) const override; +}; + +// Implements the quaternion manifold for Eigen's representation of the +// Hamilton quaternion. Geometrically it is exactly the same as the +// QuaternionManifold defined above. However, Eigen uses a different internal +// memory layout for the elements of the quaternion than what is commonly +// used. It stores the quaternion in memory as [q1, q2, q3, q0] or +// [x, y, z, w] where the real (scalar) part is last. +// +// Since Ceres operates on parameter blocks which are raw double pointers this +// difference is important and requires a different manifold. +class CERES_EXPORT EigenQuaternionManifold final : public Manifold { + public: + int AmbientSize() const override { return 4; } + int TangentSize() const override { return 3; } + + bool Plus(const double* x, + const double* delta, + double* x_plus_delta) const override; + bool PlusJacobian(const double* x, double* jacobian) const override; + bool Minus(const double* y, + const double* x, + double* y_minus_x) const override; + bool MinusJacobian(const double* x, double* jacobian) const override; +}; + +} // namespace ceres + +// clang-format off +#include "ceres/internal/reenable_warnings.h" +// clang-format on + +#endif // CERES_PUBLIC_MANIFOLD_H_ diff --git a/extern/ceres/include/ceres/manifold_test_utils.h b/extern/ceres/include/ceres/manifold_test_utils.h new file mode 100644 index 00000000000..3f9fb21e8f3 --- /dev/null +++ b/extern/ceres/include/ceres/manifold_test_utils.h @@ -0,0 +1,328 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2022 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + +#include <cmath> +#include <limits> +#include <memory> + +#include "ceres/dynamic_numeric_diff_cost_function.h" +#include "ceres/internal/eigen.h" +#include "ceres/manifold.h" +#include "ceres/numeric_diff_options.h" +#include "ceres/types.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace ceres { + +// Matchers and macros for help with testing Manifold objects. +// +// Testing a Manifold has two parts. +// +// 1. Checking that Manifold::Plus is correctly defined. This requires per +// manifold tests. +// +// 2. The other methods of the manifold have mathematical properties that make +// it compatible with Plus, as described in: +// +// "Integrating Generic Sensor Fusion Algorithms with Sound State +// Representations through Encapsulation of Manifolds" +// By C. Hertzberg, R. Wagner, U. Frese and L. Schroder +// https://arxiv.org/pdf/1107.1119.pdf +// +// These tests are implemented using generic matchers defined below which can +// all be called by the macro EXPECT_THAT_MANIFOLD_INVARIANTS_HOLD(manifold, x, +// delta, y, tolerance). See manifold_test.cc for example usage. + +// Checks that the invariant Plus(x, 0) == x holds. +MATCHER_P2(XPlusZeroIsXAt, x, tolerance, "") { + const int ambient_size = arg.AmbientSize(); + const int tangent_size = arg.TangentSize(); + + Vector actual = Vector::Zero(ambient_size); + Vector zero = Vector::Zero(tangent_size); + EXPECT_TRUE(arg.Plus(x.data(), zero.data(), actual.data())); + const double n = (actual - x).norm(); + const double d = x.norm(); + const double diffnorm = (d == 0.0) ? n : (n / d); + if (diffnorm > tolerance) { + *result_listener << "\nexpected (x): " << x.transpose() + << "\nactual: " << actual.transpose() + << "\ndiffnorm: " << diffnorm; + return false; + } + return true; +} + +// Checks that the invariant Minus(x, x) == 0 holds. +MATCHER_P2(XMinusXIsZeroAt, x, tolerance, "") { + const int tangent_size = arg.TangentSize(); + Vector actual = Vector::Zero(tangent_size); + EXPECT_TRUE(arg.Minus(x.data(), x.data(), actual.data())); + const double diffnorm = actual.norm(); + if (diffnorm > tolerance) { + *result_listener << "\nx: " << x.transpose() // + << "\nexpected: 0 0 0" + << "\nactual: " << actual.transpose() + << "\ndiffnorm: " << diffnorm; + return false; + } + return true; +} + +// Helper struct to curry Plus(x, .) so that it can be numerically +// differentiated. +struct PlusFunctor { + PlusFunctor(const Manifold& manifold, const double* x) + : manifold(manifold), x(x) {} + bool operator()(double const* const* parameters, double* x_plus_delta) const { + return manifold.Plus(x, parameters[0], x_plus_delta); + } + + const Manifold& manifold; + const double* x; +}; + +// Checks that the output of PlusJacobian matches the one obtained by +// numerically evaluating D_2 Plus(x,0). +MATCHER_P2(HasCorrectPlusJacobianAt, x, tolerance, "") { + const int ambient_size = arg.AmbientSize(); + const int tangent_size = arg.TangentSize(); + + NumericDiffOptions options; + options.ridders_relative_initial_step_size = 1e-4; + + DynamicNumericDiffCostFunction<PlusFunctor, RIDDERS> cost_function( + new PlusFunctor(arg, x.data()), TAKE_OWNERSHIP, options); + cost_function.AddParameterBlock(tangent_size); + cost_function.SetNumResiduals(ambient_size); + + Vector zero = Vector::Zero(tangent_size); + double* parameters[1] = {zero.data()}; + + Vector x_plus_zero = Vector::Zero(ambient_size); + Matrix expected = Matrix::Zero(ambient_size, tangent_size); + double* jacobians[1] = {expected.data()}; + + EXPECT_TRUE( + cost_function.Evaluate(parameters, x_plus_zero.data(), jacobians)); + + Matrix actual = Matrix::Random(ambient_size, tangent_size); + EXPECT_TRUE(arg.PlusJacobian(x.data(), actual.data())); + + const double n = (actual - expected).norm(); + const double d = expected.norm(); + const double diffnorm = (d == 0.0) ? n : n / d; + if (diffnorm > tolerance) { + *result_listener << "\nx: " << x.transpose() << "\nexpected: \n" + << expected << "\nactual:\n" + << actual << "\ndiff:\n" + << expected - actual << "\ndiffnorm : " << diffnorm; + return false; + } + return true; +} + +// Checks that the invariant Minus(Plus(x, delta), x) == delta holds. +MATCHER_P3(MinusPlusIsIdentityAt, x, delta, tolerance, "") { + const int ambient_size = arg.AmbientSize(); + const int tangent_size = arg.TangentSize(); + Vector x_plus_delta = Vector::Zero(ambient_size); + EXPECT_TRUE(arg.Plus(x.data(), delta.data(), x_plus_delta.data())); + Vector actual = Vector::Zero(tangent_size); + EXPECT_TRUE(arg.Minus(x_plus_delta.data(), x.data(), actual.data())); + + const double n = (actual - delta).norm(); + const double d = delta.norm(); + const double diffnorm = (d == 0.0) ? n : (n / d); + if (diffnorm > tolerance) { + *result_listener << "\nx: " << x.transpose() + << "\nexpected: " << delta.transpose() + << "\nactual:" << actual.transpose() + << "\ndiff:" << (delta - actual).transpose() + << "\ndiffnorm: " << diffnorm; + return false; + } + return true; +} + +// Checks that the invariant Plus(Minus(y, x), x) == y holds. +MATCHER_P3(PlusMinusIsIdentityAt, x, y, tolerance, "") { + const int ambient_size = arg.AmbientSize(); + const int tangent_size = arg.TangentSize(); + + Vector y_minus_x = Vector::Zero(tangent_size); + EXPECT_TRUE(arg.Minus(y.data(), x.data(), y_minus_x.data())); + + Vector actual = Vector::Zero(ambient_size); + EXPECT_TRUE(arg.Plus(x.data(), y_minus_x.data(), actual.data())); + + const double n = (actual - y).norm(); + const double d = y.norm(); + const double diffnorm = (d == 0.0) ? n : (n / d); + if (diffnorm > tolerance) { + *result_listener << "\nx: " << x.transpose() + << "\nexpected: " << y.transpose() + << "\nactual:" << actual.transpose() + << "\ndiff:" << (y - actual).transpose() + << "\ndiffnorm: " << diffnorm; + return false; + } + return true; +} + +// Helper struct to curry Minus(., x) so that it can be numerically +// differentiated. +struct MinusFunctor { + MinusFunctor(const Manifold& manifold, const double* x) + : manifold(manifold), x(x) {} + bool operator()(double const* const* parameters, double* y_minus_x) const { + return manifold.Minus(parameters[0], x, y_minus_x); + } + + const Manifold& manifold; + const double* x; +}; + +// Checks that the output of MinusJacobian matches the one obtained by +// numerically evaluating D_1 Minus(x,x). +MATCHER_P2(HasCorrectMinusJacobianAt, x, tolerance, "") { + const int ambient_size = arg.AmbientSize(); + const int tangent_size = arg.TangentSize(); + + Vector y = x; + Vector y_minus_x = Vector::Zero(tangent_size); + + NumericDiffOptions options; + options.ridders_relative_initial_step_size = 1e-4; + DynamicNumericDiffCostFunction<MinusFunctor, RIDDERS> cost_function( + new MinusFunctor(arg, x.data()), TAKE_OWNERSHIP, options); + cost_function.AddParameterBlock(ambient_size); + cost_function.SetNumResiduals(tangent_size); + + double* parameters[1] = {y.data()}; + + Matrix expected = Matrix::Zero(tangent_size, ambient_size); + double* jacobians[1] = {expected.data()}; + + EXPECT_TRUE(cost_function.Evaluate(parameters, y_minus_x.data(), jacobians)); + + Matrix actual = Matrix::Random(tangent_size, ambient_size); + EXPECT_TRUE(arg.MinusJacobian(x.data(), actual.data())); + + const double n = (actual - expected).norm(); + const double d = expected.norm(); + const double diffnorm = (d == 0.0) ? n : (n / d); + if (diffnorm > tolerance) { + *result_listener << "\nx: " << x.transpose() << "\nexpected: \n" + << expected << "\nactual:\n" + << actual << "\ndiff:\n" + << expected - actual << "\ndiffnorm: " << diffnorm; + return false; + } + return true; +} + +// Checks that D_delta Minus(Plus(x, delta), x) at delta = 0 is an identity +// matrix. +MATCHER_P2(MinusPlusJacobianIsIdentityAt, x, tolerance, "") { + const int ambient_size = arg.AmbientSize(); + const int tangent_size = arg.TangentSize(); + + Matrix plus_jacobian(ambient_size, tangent_size); + EXPECT_TRUE(arg.PlusJacobian(x.data(), plus_jacobian.data())); + Matrix minus_jacobian(tangent_size, ambient_size); + EXPECT_TRUE(arg.MinusJacobian(x.data(), minus_jacobian.data())); + + const Matrix actual = minus_jacobian * plus_jacobian; + const Matrix expected = Matrix::Identity(tangent_size, tangent_size); + + const double n = (actual - expected).norm(); + const double d = expected.norm(); + const double diffnorm = n / d; + if (diffnorm > tolerance) { + *result_listener << "\nx: " << x.transpose() << "\nexpected: \n" + << expected << "\nactual:\n" + << actual << "\ndiff:\n" + << expected - actual << "\ndiffnorm: " << diffnorm; + + return false; + } + return true; +} + +// Verify that the output of RightMultiplyByPlusJacobian is ambient_matrix * +// plus_jacobian. +MATCHER_P2(HasCorrectRightMultiplyByPlusJacobianAt, x, tolerance, "") { + const int ambient_size = arg.AmbientSize(); + const int tangent_size = arg.TangentSize(); + + constexpr int kMinNumRows = 0; + constexpr int kMaxNumRows = 3; + for (int num_rows = kMinNumRows; num_rows <= kMaxNumRows; ++num_rows) { + Matrix plus_jacobian = Matrix::Random(ambient_size, tangent_size); + EXPECT_TRUE(arg.PlusJacobian(x.data(), plus_jacobian.data())); + + Matrix ambient_matrix = Matrix::Random(num_rows, ambient_size); + Matrix expected = ambient_matrix * plus_jacobian; + + Matrix actual = Matrix::Random(num_rows, tangent_size); + EXPECT_TRUE(arg.RightMultiplyByPlusJacobian( + x.data(), num_rows, ambient_matrix.data(), actual.data())); + const double n = (actual - expected).norm(); + const double d = expected.norm(); + const double diffnorm = (d == 0.0) ? n : (n / d); + if (diffnorm > tolerance) { + *result_listener << "\nx: " << x.transpose() << "\nambient_matrix : \n" + << ambient_matrix << "\nplus_jacobian : \n" + << plus_jacobian << "\nexpected: \n" + << expected << "\nactual:\n" + << actual << "\ndiff:\n" + << expected - actual << "\ndiffnorm : " << diffnorm; + return false; + } + } + return true; +} + +#define EXPECT_THAT_MANIFOLD_INVARIANTS_HOLD(manifold, x, delta, y, tolerance) \ + Vector zero_tangent = Vector::Zero(manifold.TangentSize()); \ + EXPECT_THAT(manifold, XPlusZeroIsXAt(x, tolerance)); \ + EXPECT_THAT(manifold, XMinusXIsZeroAt(x, tolerance)); \ + EXPECT_THAT(manifold, MinusPlusIsIdentityAt(x, delta, tolerance)); \ + EXPECT_THAT(manifold, MinusPlusIsIdentityAt(x, zero_tangent, tolerance)); \ + EXPECT_THAT(manifold, PlusMinusIsIdentityAt(x, x, tolerance)); \ + EXPECT_THAT(manifold, PlusMinusIsIdentityAt(x, y, tolerance)); \ + EXPECT_THAT(manifold, HasCorrectPlusJacobianAt(x, tolerance)); \ + EXPECT_THAT(manifold, HasCorrectMinusJacobianAt(x, tolerance)); \ + EXPECT_THAT(manifold, MinusPlusJacobianIsIdentityAt(x, tolerance)); \ + EXPECT_THAT(manifold, HasCorrectRightMultiplyByPlusJacobianAt(x, tolerance)); + +} // namespace ceres diff --git a/extern/ceres/include/ceres/normal_prior.h b/extern/ceres/include/ceres/normal_prior.h index 14ab379f4af..c5c7f3e623e 100644 --- a/extern/ceres/include/ceres/normal_prior.h +++ b/extern/ceres/include/ceres/normal_prior.h @@ -57,7 +57,7 @@ namespace ceres { // which would be the case if the covariance matrix S is rank // deficient. -class CERES_EXPORT NormalPrior : public CostFunction { +class CERES_EXPORT NormalPrior final : public CostFunction { public: // Check that the number of rows in the vector b are the same as the // number of columns in the matrix A, crash otherwise. diff --git a/extern/ceres/include/ceres/numeric_diff_cost_function.h b/extern/ceres/include/ceres/numeric_diff_cost_function.h index cf7971cde79..6ec53175030 100644 --- a/extern/ceres/include/ceres/numeric_diff_cost_function.h +++ b/extern/ceres/include/ceres/numeric_diff_cost_function.h @@ -179,9 +179,10 @@ template <typename CostFunctor, NumericDiffMethodType method = CENTRAL, int kNumResiduals = 0, // Number of residuals, or ceres::DYNAMIC int... Ns> // Parameters dimensions for each block. -class NumericDiffCostFunction : public SizedCostFunction<kNumResiduals, Ns...> { +class NumericDiffCostFunction final + : public SizedCostFunction<kNumResiduals, Ns...> { public: - NumericDiffCostFunction( + explicit NumericDiffCostFunction( CostFunctor* functor, Ownership ownership = TAKE_OWNERSHIP, int num_residuals = kNumResiduals, @@ -192,7 +193,7 @@ class NumericDiffCostFunction : public SizedCostFunction<kNumResiduals, Ns...> { } } - explicit NumericDiffCostFunction(NumericDiffCostFunction&& other) + NumericDiffCostFunction(NumericDiffCostFunction&& other) : functor_(std::move(other.functor_)), ownership_(other.ownership_) {} virtual ~NumericDiffCostFunction() { @@ -219,7 +220,7 @@ class NumericDiffCostFunction : public SizedCostFunction<kNumResiduals, Ns...> { return false; } - if (jacobians == NULL) { + if (jacobians == nullptr) { return true; } @@ -246,6 +247,8 @@ class NumericDiffCostFunction : public SizedCostFunction<kNumResiduals, Ns...> { return true; } + const CostFunctor& functor() const { return *functor_; } + private: std::unique_ptr<CostFunctor> functor_; Ownership ownership_; diff --git a/extern/ceres/include/ceres/numeric_diff_first_order_function.h b/extern/ceres/include/ceres/numeric_diff_first_order_function.h new file mode 100644 index 00000000000..f5bb005be58 --- /dev/null +++ b/extern/ceres/include/ceres/numeric_diff_first_order_function.h @@ -0,0 +1,163 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2019 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) + +#ifndef CERES_PUBLIC_NUMERIC_DIFF_FIRST_ORDER_FUNCTION_H_ +#define CERES_PUBLIC_NUMERIC_DIFF_FIRST_ORDER_FUNCTION_H_ + +#include <algorithm> +#include <memory> + +#include "ceres/first_order_function.h" +#include "ceres/internal/eigen.h" +#include "ceres/internal/fixed_array.h" +#include "ceres/internal/numeric_diff.h" +#include "ceres/internal/parameter_dims.h" +#include "ceres/internal/variadic_evaluate.h" +#include "ceres/numeric_diff_options.h" +#include "ceres/types.h" + +namespace ceres { + +// Creates FirstOrderFunctions as needed by the GradientProblem +// framework, with gradients computed via numeric differentiation. For +// more information on numeric differentiation, see the wikipedia +// article at https://en.wikipedia.org/wiki/Numerical_differentiation +// +// To get an numerically differentiated cost function, you must define +// a class with an operator() (a functor) that computes the cost. +// +// The function must write the computed value in the last argument +// (the only non-const one) and return true to indicate success. +// +// For example, consider a scalar error e = x'y - a, where both x and y are +// two-dimensional column vector parameters, the prime sign indicates +// transposition, and a is a constant. +// +// To write an numerically-differentiable cost function for the above model, +// first define the object +// +// class QuadraticCostFunctor { +// public: +// explicit QuadraticCostFunctor(double a) : a_(a) {} +// bool operator()(const double* const xy, double* cost) const { +// constexpr int kInputVectorLength = 2; +// const double* const x = xy; +// const double* const y = xy + kInputVectorLength; +// *cost = x[0] * y[0] + x[1] * y[1] - a_; +// return true; +// } +// +// private: +// double a_; +// }; +// +// +// Note that in the declaration of operator() the input parameters xy +// come first, and are passed as const pointers to array of +// doubles. The output cost is the last parameter. +// +// Then given this class definition, the numerically differentiated +// first order function with central differences used for computing the +// derivative can be constructed as follows. +// +// FirstOrderFunction* function +// = new NumericDiffFirstOrderFunction<MyScalarCostFunctor, CENTRAL, 4>( +// new QuadraticCostFunctor(1.0)); ^ ^ ^ +// | | | +// Finite Differencing Scheme -+ | | +// Dimension of xy ------------------------+ +// +// +// In the instantiation above, the template parameters following +// "QuadraticCostFunctor", "CENTRAL, 4", describe the finite +// differencing scheme as "central differencing" and the functor as +// computing its cost from a 4 dimensional input. +template <typename FirstOrderFunctor, + NumericDiffMethodType method, + int kNumParameters> +class NumericDiffFirstOrderFunction final : public FirstOrderFunction { + public: + explicit NumericDiffFirstOrderFunction( + FirstOrderFunctor* functor, + Ownership ownership = TAKE_OWNERSHIP, + const NumericDiffOptions& options = NumericDiffOptions()) + : functor_(functor), ownership_(ownership), options_(options) { + static_assert(kNumParameters > 0, "kNumParameters must be positive"); + } + + ~NumericDiffFirstOrderFunction() override { + if (ownership_ != TAKE_OWNERSHIP) { + functor_.release(); + } + } + + bool Evaluate(const double* const parameters, + double* cost, + double* gradient) const override { + using ParameterDims = internal::StaticParameterDims<kNumParameters>; + constexpr int kNumResiduals = 1; + + // Get the function value (cost) at the the point to evaluate. + if (!internal::VariadicEvaluate<ParameterDims>( + *functor_, ¶meters, cost)) { + return false; + } + + if (gradient == nullptr) { + return true; + } + + // Create a copy of the parameters which will get mutated. + internal::FixedArray<double, 32> parameters_copy(kNumParameters); + std::copy_n(parameters, kNumParameters, parameters_copy.data()); + double* parameters_ptr = parameters_copy.data(); + internal::EvaluateJacobianForParameterBlocks< + ParameterDims>::template Apply<method, kNumResiduals>(functor_.get(), + cost, + options_, + kNumResiduals, + ¶meters_ptr, + &gradient); + return true; + } + + int NumParameters() const override { return kNumParameters; } + + const FirstOrderFunctor& functor() const { return *functor_; } + + private: + std::unique_ptr<FirstOrderFunctor> functor_; + Ownership ownership_; + NumericDiffOptions options_; +}; + +} // namespace ceres + +#endif // CERES_PUBLIC_NUMERIC_DIFF_FIRST_ORDER_FUNCTION_H_ diff --git a/extern/ceres/include/ceres/numeric_diff_options.h b/extern/ceres/include/ceres/numeric_diff_options.h index 64919ed5ab1..b025b51d938 100644 --- a/extern/ceres/include/ceres/numeric_diff_options.h +++ b/extern/ceres/include/ceres/numeric_diff_options.h @@ -32,7 +32,8 @@ #ifndef CERES_PUBLIC_NUMERIC_DIFF_OPTIONS_H_ #define CERES_PUBLIC_NUMERIC_DIFF_OPTIONS_H_ -#include "ceres/internal/port.h" +#include "ceres/internal/disable_warnings.h" +#include "ceres/internal/export.h" namespace ceres { @@ -70,4 +71,6 @@ struct CERES_EXPORT NumericDiffOptions { } // namespace ceres +#include "ceres/internal/reenable_warnings.h" + #endif // CERES_PUBLIC_NUMERIC_DIFF_OPTIONS_H_ diff --git a/extern/ceres/include/ceres/ordered_groups.h b/extern/ceres/include/ceres/ordered_groups.h index 954663c97e6..c1531cce65f 100644 --- a/extern/ceres/include/ceres/ordered_groups.h +++ b/extern/ceres/include/ceres/ordered_groups.h @@ -36,7 +36,7 @@ #include <unordered_map> #include <vector> -#include "ceres/internal/port.h" +#include "ceres/internal/export.h" #include "glog/logging.h" namespace ceres { @@ -190,7 +190,7 @@ class OrderedGroups { }; // Typedef for the most commonly used version of OrderedGroups. -typedef OrderedGroups<double*> ParameterBlockOrdering; +using ParameterBlockOrdering = OrderedGroups<double*>; } // namespace ceres diff --git a/extern/ceres/include/ceres/problem.h b/extern/ceres/include/ceres/problem.h index add12ea401d..819fa454b21 100644 --- a/extern/ceres/include/ceres/problem.h +++ b/extern/ceres/include/ceres/problem.h @@ -1,5 +1,5 @@ // Ceres Solver - A fast non-linear least squares minimizer -// Copyright 2015 Google Inc. All rights reserved. +// Copyright 2021 Google Inc. All rights reserved. // http://ceres-solver.org/ // // Redistribution and use in source and binary forms, with or without @@ -43,6 +43,7 @@ #include "ceres/context.h" #include "ceres/internal/disable_warnings.h" +#include "ceres/internal/export.h" #include "ceres/internal/port.h" #include "ceres/types.h" #include "glog/logging.h" @@ -53,6 +54,7 @@ class CostFunction; class EvaluationCallback; class LossFunction; class LocalParameterization; +class Manifold; class Solver; struct CRSMatrix; @@ -65,7 +67,7 @@ class ResidualBlock; // A ResidualBlockId is an opaque handle clients can use to remove residual // blocks from a Problem after adding them. -typedef internal::ResidualBlock* ResidualBlockId; +using ResidualBlockId = internal::ResidualBlock*; // A class to represent non-linear least squares problems. Such // problems have a cost function that is a sum of error terms (known @@ -78,31 +80,28 @@ typedef internal::ResidualBlock* ResidualBlockId; // // where // -// r_ij is residual number i, component j; the residual is a -// function of some subset of the parameters x1...xk. For -// example, in a structure from motion problem a residual -// might be the difference between a measured point in an -// image and the reprojected position for the matching -// camera, point pair. The residual would have two -// components, error in x and error in y. +// r_ij is residual number i, component j; the residual is a function of some +// subset of the parameters x1...xk. For example, in a structure from +// motion problem a residual might be the difference between a measured +// point in an image and the reprojected position for the matching +// camera, point pair. The residual would have two components, error in x +// and error in y. // -// loss(y) is the loss function; for example, squared error or -// Huber L1 loss. If loss(y) = y, then the cost function is -// non-robustified least squares. +// loss(y) is the loss function; for example, squared error or Huber L1 +// loss. If loss(y) = y, then the cost function is non-robustified +// least squares. // -// This class is specifically designed to address the important subset -// of "sparse" least squares problems, where each component of the -// residual depends only on a small number number of parameters, even -// though the total number of residuals and parameters may be very -// large. This property affords tremendous gains in scale, allowing -// efficient solving of large problems that are otherwise -// inaccessible. +// This class is specifically designed to address the important subset of +// "sparse" least squares problems, where each component of the residual depends +// only on a small number number of parameters, even though the total number of +// residuals and parameters may be very large. This property affords tremendous +// gains in scale, allowing efficient solving of large problems that are +// otherwise inaccessible. // // The canonical example of a sparse least squares problem is -// "structure-from-motion" (SFM), where the parameters are points and -// cameras, and residuals are reprojection errors. Typically a single -// residual will depend only on 9 parameters (3 for the point, 6 for -// the camera). +// "structure-from-motion" (SFM), where the parameters are points and cameras, +// and residuals are reprojection errors. Typically a single residual will +// depend only on 9 parameters (3 for the point, 6 for the camera). // // To create a least squares problem, use the AddResidualBlock() and // AddParameterBlock() methods, documented below. Here is an example least @@ -119,41 +118,52 @@ typedef internal::ResidualBlock* ResidualBlockId; // problem.AddResidualBlock(new MyBinaryCostFunction(...), nullptr, x2, x3); // // Please see cost_function.h for details of the CostFunction object. +// +// NOTE: We are currently in the process of transitioning from +// LocalParameterization to Manifolds in the Ceres API. During this period, +// Problem will support using both Manifold and LocalParameterization objects +// interchangably. In particular, adding a LocalParameterization to a parameter +// block is the same as adding a Manifold to that parameter block. For methods +// in the API affected by this change, see their documentation below. class CERES_EXPORT Problem { public: struct CERES_EXPORT Options { - // These flags control whether the Problem object owns the cost - // functions, loss functions, and parameterizations passed into - // the Problem. If set to TAKE_OWNERSHIP, then the problem object - // will delete the corresponding cost or loss functions on - // destruction. The destructor is careful to delete the pointers - // only once, since sharing cost/loss/parameterizations is - // allowed. + // These flags control whether the Problem object owns the CostFunctions, + // LossFunctions, LocalParameterizations, and Manifolds passed into the + // Problem. + // + // If set to TAKE_OWNERSHIP, then the problem object will delete the + // corresponding object on destruction. The destructor is careful to delete + // the pointers only once, since sharing objects is allowed. Ownership cost_function_ownership = TAKE_OWNERSHIP; Ownership loss_function_ownership = TAKE_OWNERSHIP; + CERES_DEPRECATED_WITH_MSG( + "Local Parameterizations are deprecated. Use Manifold and " + "manifold_ownership instead.") Ownership local_parameterization_ownership = TAKE_OWNERSHIP; + Ownership manifold_ownership = TAKE_OWNERSHIP; // If true, trades memory for faster RemoveResidualBlock() and // RemoveParameterBlock() operations. // // By default, RemoveParameterBlock() and RemoveResidualBlock() take time - // proportional to the size of the entire problem. If you only ever remove + // proportional to the size of the entire problem. If you only ever remove // parameters or residuals from the problem occasionally, this might be - // acceptable. However, if you have memory to spare, enable this option to + // acceptable. However, if you have memory to spare, enable this option to // make RemoveParameterBlock() take time proportional to the number of // residual blocks that depend on it, and RemoveResidualBlock() take (on // average) constant time. // - // The increase in memory usage is twofold: an additional hash set per + // The increase in memory usage is two-fold: an additional hash set per // parameter block containing all the residuals that depend on the parameter // block; and a hash set in the problem containing all residuals. bool enable_fast_removal = false; // By default, Ceres performs a variety of safety checks when constructing - // the problem. There is a small but measurable performance penalty to - // these checks, typically around 5% of construction time. If you are sure - // your problem construction is correct, and 5% of the problem construction - // time is truly an overhead you want to avoid, then you can set + // the problem. There is a small but measurable performance penalty to these + // checks, typically around 5% of construction time. If you are sure your + // problem construction is correct, and 5% of the problem construction time + // is truly an overhead you want to avoid, then you can set // disable_all_safety_checks to true. // // WARNING: Do not set this to true, unless you are absolutely sure of what @@ -167,26 +177,23 @@ class CERES_EXPORT Problem { // Ceres does NOT take ownership of the pointer. Context* context = nullptr; - // Using this callback interface, Ceres can notify you when it is - // about to evaluate the residuals or jacobians. With the - // callback, you can share computation between residual blocks by - // doing the shared computation in + // Using this callback interface, Ceres can notify you when it is about to + // evaluate the residuals or jacobians. With the callback, you can share + // computation between residual blocks by doing the shared computation in // EvaluationCallback::PrepareForEvaluation() before Ceres calls - // CostFunction::Evaluate(). It also enables caching results - // between a pure residual evaluation and a residual & jacobian - // evaluation. + // CostFunction::Evaluate(). It also enables caching results between a pure + // residual evaluation and a residual & jacobian evaluation. // // Problem DOES NOT take ownership of the callback. // - // NOTE: Evaluation callbacks are incompatible with inner - // iterations. So calling Solve with - // Solver::Options::use_inner_iterations = true on a Problem with - // a non-null evaluation callback is an error. + // NOTE: Evaluation callbacks are incompatible with inner iterations. So + // calling Solve with Solver::Options::use_inner_iterations = true on a + // Problem with a non-null evaluation callback is an error. EvaluationCallback* evaluation_callback = nullptr; }; - // The default constructor is equivalent to the - // invocation Problem(Problem::Options()). + // The default constructor is equivalent to the invocation + // Problem(Problem::Options()). Problem(); explicit Problem(const Options& options); Problem(Problem&&); @@ -197,31 +204,29 @@ class CERES_EXPORT Problem { ~Problem(); - // Add a residual block to the overall cost function. The cost - // function carries with its information about the sizes of the - // parameter blocks it expects. The function checks that these match - // the sizes of the parameter blocks listed in parameter_blocks. The - // program aborts if a mismatch is detected. loss_function can be - // nullptr, in which case the cost of the term is just the squared norm - // of the residuals. - // - // The user has the option of explicitly adding the parameter blocks - // using AddParameterBlock. This causes additional correctness - // checking; however, AddResidualBlock implicitly adds the parameter - // blocks if they are not present, so calling AddParameterBlock - // explicitly is not required. - // - // The Problem object by default takes ownership of the - // cost_function and loss_function pointers. These objects remain - // live for the life of the Problem object. If the user wishes to - // keep control over the destruction of these objects, then they can + // Add a residual block to the overall cost function. The cost function + // carries with its information about the sizes of the parameter blocks it + // expects. The function checks that these match the sizes of the parameter + // blocks listed in parameter_blocks. The program aborts if a mismatch is + // detected. loss_function can be nullptr, in which case the cost of the term + // is just the squared norm of the residuals. + // + // The user has the option of explicitly adding the parameter blocks using + // AddParameterBlock. This causes additional correctness checking; however, + // AddResidualBlock implicitly adds the parameter blocks if they are not + // present, so calling AddParameterBlock explicitly is not required. + // + // The Problem object by default takes ownership of the cost_function and + // loss_function pointers (See Problem::Options to override this behaviour). + // These objects remain live for the life of the Problem object. If the user + // wishes to keep control over the destruction of these objects, then they can // do this by setting the corresponding enums in the Options struct. // - // Note: Even though the Problem takes ownership of cost_function - // and loss_function, it does not preclude the user from re-using - // them in another residual block. The destructor takes care to call - // delete on each cost_function or loss_function pointer only once, - // regardless of how many residual blocks refer to them. + // Note: Even though the Problem takes ownership of cost_function and + // loss_function, it does not preclude the user from re-using them in another + // residual block. The destructor takes care to call delete on each + // cost_function or loss_function pointer only once, regardless of how many + // residual blocks refer to them. // // Example usage: // @@ -234,8 +239,8 @@ class CERES_EXPORT Problem { // problem.AddResidualBlock(new MyUnaryCostFunction(...), nullptr, x1); // problem.AddResidualBlock(new MyBinaryCostFunction(...), nullptr, x2, x1); // - // Add a residual block by listing the parameter block pointers - // directly instead of wapping them in a container. + // Add a residual block by listing the parameter block pointers directly + // instead of wapping them in a container. template <typename... Ts> ResidualBlockId AddResidualBlock(CostFunction* cost_function, LossFunction* loss_function, @@ -261,29 +266,75 @@ class CERES_EXPORT Problem { double* const* const parameter_blocks, int num_parameter_blocks); - // Add a parameter block with appropriate size to the problem. - // Repeated calls with the same arguments are ignored. Repeated - // calls with the same double pointer but a different size results - // in undefined behaviour. + // Add a parameter block with appropriate size to the problem. Repeated calls + // with the same arguments are ignored. Repeated calls with the same double + // pointer but a different size will result in a crash. void AddParameterBlock(double* values, int size); - // Add a parameter block with appropriate size and parameterization - // to the problem. Repeated calls with the same arguments are - // ignored. Repeated calls with the same double pointer but a - // different size results in undefined behaviour. + // Add a parameter block with appropriate size and parameterization to the + // problem. It is okay for local_parameterization to be nullptr. + // + // Repeated calls with the same arguments are ignored. Repeated calls + // with the same double pointer but a different size results in a crash + // (unless Solver::Options::diable_all_safety_checks is set to true). + // + // Repeated calls with the same double pointer and size but different + // LocalParameterization is equivalent to calling + // SetParameterization(local_parameterization), i.e., any previously + // associated LocalParameterization or Manifold object will be replaced with + // the local_parameterization. + // + // NOTE: + // ---- + // + // This method is deprecated and will be removed in the next public + // release of Ceres Solver. Please move to using the Manifold based version of + // AddParameterBlock. + // + // During the transition from LocalParameterization to Manifold, internally + // the LocalParameterization is treated as a Manifold by wrapping it using a + // ManifoldAdapter object. So HasManifold() will return true, GetManifold() + // will return the wrapped object and ParameterBlockTangentSize() will return + // the LocalSize of the LocalParameterization. + CERES_DEPRECATED_WITH_MSG( + "LocalParameterizations are deprecated. Use the version with Manifolds " + "instead.") void AddParameterBlock(double* values, int size, LocalParameterization* local_parameterization); - // Remove a parameter block from the problem. The parameterization of the - // parameter block, if it exists, will persist until the deletion of the - // problem (similar to cost/loss functions in residual block removal). Any - // residual blocks that depend on the parameter are also removed, as - // described above in RemoveResidualBlock(). + // Add a parameter block with appropriate size and Manifold to the + // problem. It is okay for manifold to be nullptr. // - // If Problem::Options::enable_fast_removal is true, then the - // removal is fast (almost constant time). Otherwise, removing a parameter - // block will incur a scan of the entire Problem object. + // Repeated calls with the same arguments are ignored. Repeated calls + // with the same double pointer but a different size results in a crash + // (unless Solver::Options::diable_all_safety_checks is set to true). + // + // Repeated calls with the same double pointer and size but different Manifold + // is equivalent to calling SetManifold(manifold), i.e., any previously + // associated LocalParameterization or Manifold object will be replaced with + // the manifold. + // + // Note: + // ---- + // + // During the transition from LocalParameterization to Manifold, calling + // AddParameterBlock with a Manifold when a LocalParameterization is already + // associated with the parameter block is okay. It is equivalent to calling + // SetManifold(manifold), i.e., any previously associated + // LocalParameterization or Manifold object will be replaced with the + // manifold. + void AddParameterBlock(double* values, int size, Manifold* manifold); + + // Remove a parameter block from the problem. The LocalParameterization or + // Manifold of the parameter block, if it exists, will persist until the + // deletion of the problem (similar to cost/loss functions in residual block + // removal). Any residual blocks that depend on the parameter are also + // removed, as described above in RemoveResidualBlock(). + // + // If Problem::Options::enable_fast_removal is true, then the removal is fast + // (almost constant time). Otherwise, removing a parameter block will incur a + // scan of the entire Problem object. // // WARNING: Removing a residual or parameter block will destroy the implicit // ordering, rendering the jacobian or residuals returned from the solver @@ -308,35 +359,109 @@ class CERES_EXPORT Problem { // Allow the indicated parameter block to vary during optimization. void SetParameterBlockVariable(double* values); - // Returns true if a parameter block is set constant, and false - // otherwise. A parameter block may be set constant in two ways: - // either by calling SetParameterBlockConstant or by associating a - // LocalParameterization with a zero dimensional tangent space with - // it. + // Returns true if a parameter block is set constant, and false otherwise. A + // parameter block may be set constant in two ways: either by calling + // SetParameterBlockConstant or by associating a LocalParameterization or + // Manifold with a zero dimensional tangent space with it. bool IsParameterBlockConstant(const double* values) const; - // Set the local parameterization for one of the parameter blocks. - // The local_parameterization is owned by the Problem by default. It - // is acceptable to set the same parameterization for multiple - // parameters; the destructor is careful to delete local - // parameterizations only once. Calling SetParameterization with - // nullptr will clear any previously set parameterization. + // Set the LocalParameterization for the parameter block. Calling + // SetParameterization with nullptr will clear any previously set + // LocalParameterization or Manifold for the parameter block. + // + // Repeated calls will cause any previously associated LocalParameterization + // or Manifold object to be replaced with the local_parameterization. + // + // The local_parameterization is owned by the Problem by default (See + // Problem::Options to override this behaviour). + // + // It is acceptable to set the same LocalParameterization for multiple + // parameter blocks; the destructor is careful to delete + // LocalParamaterizations only once. + // + // NOTE: + // ---- + // + // This method is deprecated and will be removed in the next public + // release of Ceres Solver. Please move to using the SetManifold instead. + // + // During the transition from LocalParameterization to Manifold, internally + // the LocalParameterization is treated as a Manifold by wrapping it using a + // ManifoldAdapter object. So HasManifold() will return true, GetManifold() + // will return the wrapped object and ParameterBlockTangentSize will return + // the same value of ParameterBlockLocalSize. + CERES_DEPRECATED_WITH_MSG( + "LocalParameterizations are deprecated. Use SetManifold instead.") void SetParameterization(double* values, LocalParameterization* local_parameterization); - // Get the local parameterization object associated with this - // parameter block. If there is no parameterization object - // associated then nullptr is returned. + // Get the LocalParameterization object associated with this parameter block. + // If there is no LocalParameterization associated then nullptr is returned. + // + // NOTE: This method is deprecated and will be removed in the next public + // release of Ceres Solver. Use GetManifold instead. + // + // Note also that if a LocalParameterization is associated with a parameter + // block, HasManifold will return true and GetManifold will return the + // LocalParameterization wrapped in a ManifoldAdapter. + // + // The converse is NOT true, i.e., if a Manifold is associated with a + // parameter block, HasParameterization will return false and + // GetParameterization will return a nullptr. + CERES_DEPRECATED_WITH_MSG( + "LocalParameterizations are deprecated. Use GetManifold " + "instead.") const LocalParameterization* GetParameterization(const double* values) const; + // Returns true if a LocalParameterization is associated with this parameter + // block, false otherwise. + // + // NOTE: This method is deprecated and will be removed in the next public + // release of Ceres Solver. Use HasManifold instead. + // + // Note also that if a Manifold is associated with the parameter block, this + // method will return false. + CERES_DEPRECATED_WITH_MSG( + "LocalParameterizations are deprecated. Use HasManifold instead.") + bool HasParameterization(const double* values) const; + + // Set the Manifold for the parameter block. Calling SetManifold with nullptr + // will clear any previously set LocalParameterization or Manifold for the + // parameter block. + // + // Repeated calls will result in any previously associated + // LocalParameterization or Manifold object to be replaced with the manifold. + // + // The manifold is owned by the Problem by default (See Problem::Options to + // override this behaviour). + // + // It is acceptable to set the same Manifold for multiple parameter blocks. + void SetManifold(double* values, Manifold* manifold); + + // Get the Manifold object associated with this parameter block. + // + // If there is no Manifold Or LocalParameterization object associated then + // nullptr is returned. + // + // NOTE: During the transition from LocalParameterization to Manifold, + // internally the LocalParameterization is treated as a Manifold by wrapping + // it using a ManifoldAdapter object. So calling GetManifold on a parameter + // block with a LocalParameterization associated with it will return the + // LocalParameterization wrapped in a ManifoldAdapter + const Manifold* GetManifold(const double* values) const; + + // Returns true if a Manifold or a LocalParameterization is associated with + // this parameter block, false otherwise. + bool HasManifold(const double* values) const; + // Set the lower/upper bound for the parameter at position "index". void SetParameterLowerBound(double* values, int index, double lower_bound); void SetParameterUpperBound(double* values, int index, double upper_bound); - // Get the lower/upper bound for the parameter at position - // "index". If the parameter is not bounded by the user, then its - // lower bound is -std::numeric_limits<double>::max() and upper - // bound is std::numeric_limits<double>::max(). + // Get the lower/upper bound for the parameter at position "index". If the + // parameter is not bounded by the user, then its lower bound is + // -std::numeric_limits<double>::max() and upper bound is + // std::numeric_limits<double>::max(). double GetParameterLowerBound(const double* values, int index) const; double GetParameterUpperBound(const double* values, int index) const; @@ -344,37 +469,47 @@ class CERES_EXPORT Problem { // parameter_blocks().size() and parameter_block_sizes().size(). int NumParameterBlocks() const; - // The size of the parameter vector obtained by summing over the - // sizes of all the parameter blocks. + // The size of the parameter vector obtained by summing over the sizes of all + // the parameter blocks. int NumParameters() const; // Number of residual blocks in the problem. Always equals // residual_blocks().size(). int NumResidualBlocks() const; - // The size of the residual vector obtained by summing over the - // sizes of all of the residual blocks. + // The size of the residual vector obtained by summing over the sizes of all + // of the residual blocks. int NumResiduals() const; // The size of the parameter block. int ParameterBlockSize(const double* values) const; - // The size of local parameterization for the parameter block. If - // there is no local parameterization associated with this parameter - // block, then ParameterBlockLocalSize = ParameterBlockSize. + // The dimension of the tangent space of the LocalParameterization or Manifold + // for the parameter block. If there is no LocalParameterization or Manifold + // associated with this parameter block, then ParameterBlockLocalSize = + // ParameterBlockSize. + CERES_DEPRECATED_WITH_MSG( + "LocalParameterizations are deprecated. Use ParameterBlockTangentSize " + "instead.") int ParameterBlockLocalSize(const double* values) const; + // The dimenion of the tangent space of the LocalParameterization or Manifold + // for the parameter block. If there is no LocalParameterization or Manifold + // associated with this parameter block, then ParameterBlockTangentSize = + // ParameterBlockSize. + int ParameterBlockTangentSize(const double* values) const; + // Is the given parameter block present in this problem or not? bool HasParameterBlock(const double* values) const; - // Fills the passed parameter_blocks vector with pointers to the - // parameter blocks currently in the problem. After this call, - // parameter_block.size() == NumParameterBlocks. + // Fills the passed parameter_blocks vector with pointers to the parameter + // blocks currently in the problem. After this call, parameter_block.size() == + // NumParameterBlocks. void GetParameterBlocks(std::vector<double*>* parameter_blocks) const; - // Fills the passed residual_blocks vector with pointers to the - // residual blocks currently in the problem. After this call, - // residual_blocks.size() == NumResidualBlocks. + // Fills the passed residual_blocks vector with pointers to the residual + // blocks currently in the problem. After this call, residual_blocks.size() == + // NumResidualBlocks. void GetResidualBlocks(std::vector<ResidualBlockId>* residual_blocks) const; // Get all the parameter blocks that depend on the given residual block. @@ -393,10 +528,10 @@ class CERES_EXPORT Problem { // Get all the residual blocks that depend on the given parameter block. // - // If Problem::Options::enable_fast_removal is true, then - // getting the residual blocks is fast and depends only on the number of - // residual blocks. Otherwise, getting the residual blocks for a parameter - // block will incur a scan of the entire Problem object. + // If Problem::Options::enable_fast_removal is true, then getting the residual + // blocks is fast and depends only on the number of residual + // blocks. Otherwise, getting the residual blocks for a parameter block will + // incur a scan of the entire Problem object. void GetResidualBlocksForParameterBlock( const double* values, std::vector<ResidualBlockId>* residual_blocks) const; @@ -404,49 +539,45 @@ class CERES_EXPORT Problem { // Options struct to control Problem::Evaluate. struct EvaluateOptions { // The set of parameter blocks for which evaluation should be - // performed. This vector determines the order that parameter - // blocks occur in the gradient vector and in the columns of the - // jacobian matrix. If parameter_blocks is empty, then it is - // assumed to be equal to vector containing ALL the parameter - // blocks. Generally speaking the parameter blocks will occur in - // the order in which they were added to the problem. But, this - // may change if the user removes any parameter blocks from the - // problem. + // performed. This vector determines the order that parameter blocks occur + // in the gradient vector and in the columns of the jacobian matrix. If + // parameter_blocks is empty, then it is assumed to be equal to vector + // containing ALL the parameter blocks. Generally speaking the parameter + // blocks will occur in the order in which they were added to the + // problem. But, this may change if the user removes any parameter blocks + // from the problem. // - // NOTE: This vector should contain the same pointers as the ones - // used to add parameter blocks to the Problem. These parameter - // block should NOT point to new memory locations. Bad things will - // happen otherwise. + // NOTE: This vector should contain the same pointers as the ones used to + // add parameter blocks to the Problem. These parameter block should NOT + // point to new memory locations. Bad things will happen otherwise. std::vector<double*> parameter_blocks; - // The set of residual blocks to evaluate. This vector determines - // the order in which the residuals occur, and how the rows of the - // jacobian are ordered. If residual_blocks is empty, then it is - // assumed to be equal to the vector containing ALL the residual - // blocks. Generally speaking the residual blocks will occur in - // the order in which they were added to the problem. But, this - // may change if the user removes any residual blocks from the - // problem. + // The set of residual blocks to evaluate. This vector determines the order + // in which the residuals occur, and how the rows of the jacobian are + // ordered. If residual_blocks is empty, then it is assumed to be equal to + // the vector containing ALL the residual blocks. Generally speaking the + // residual blocks will occur in the order in which they were added to the + // problem. But, this may change if the user removes any residual blocks + // from the problem. std::vector<ResidualBlockId> residual_blocks; // Even though the residual blocks in the problem may contain loss - // functions, setting apply_loss_function to false will turn off - // the application of the loss function to the output of the cost - // function. This is of use for example if the user wishes to - // analyse the solution quality by studying the distribution of - // residuals before and after the solve. + // functions, setting apply_loss_function to false will turn off the + // application of the loss function to the output of the cost function. This + // is of use for example if the user wishes to analyse the solution quality + // by studying the distribution of residuals before and after the solve. bool apply_loss_function = true; int num_threads = 1; }; - // Evaluate Problem. Any of the output pointers can be nullptr. Which - // residual blocks and parameter blocks are used is controlled by - // the EvaluateOptions struct above. + // Evaluate Problem. Any of the output pointers can be nullptr. Which residual + // blocks and parameter blocks are used is controlled by the EvaluateOptions + // struct above. // - // Note 1: The evaluation will use the values stored in the memory - // locations pointed to by the parameter block pointers used at the - // time of the construction of the problem. i.e., + // Note 1: The evaluation will use the values stored in the memory locations + // pointed to by the parameter block pointers used at the time of the + // construction of the problem. i.e., // // Problem problem; // double x = 1; @@ -456,8 +587,8 @@ class CERES_EXPORT Problem { // problem.Evaluate(Problem::EvaluateOptions(), &cost, // nullptr, nullptr, nullptr); // - // The cost is evaluated at x = 1. If you wish to evaluate the - // problem at x = 2, then + // The cost is evaluated at x = 1. If you wish to evaluate the problem at x = + // 2, then // // x = 2; // problem.Evaluate(Problem::EvaluateOptions(), &cost, @@ -465,80 +596,75 @@ class CERES_EXPORT Problem { // // is the way to do so. // - // Note 2: If no local parameterizations are used, then the size of - // the gradient vector (and the number of columns in the jacobian) - // is the sum of the sizes of all the parameter blocks. If a - // parameter block has a local parameterization, then it contributes - // "LocalSize" entries to the gradient vector (and the number of - // columns in the jacobian). + // Note 2: If no LocalParameterizations or Manifolds are used, then the size + // of the gradient vector (and the number of columns in the jacobian) is the + // sum of the sizes of all the parameter blocks. If a parameter block has a + // LocalParameterization or Manifold, then it contributes "TangentSize" + // entries to the gradient vector (and the number of columns in the jacobian). // - // Note 3: This function cannot be called while the problem is being - // solved, for example it cannot be called from an IterationCallback - // at the end of an iteration during a solve. + // Note 3: This function cannot be called while the problem is being solved, + // for example it cannot be called from an IterationCallback at the end of an + // iteration during a solve. // - // Note 4: If an EvaluationCallback is associated with the problem, - // then its PrepareForEvaluation method will be called every time - // this method is called with new_point = true. + // Note 4: If an EvaluationCallback is associated with the problem, then its + // PrepareForEvaluation method will be called every time this method is called + // with new_point = true. bool Evaluate(const EvaluateOptions& options, double* cost, std::vector<double>* residuals, std::vector<double>* gradient, CRSMatrix* jacobian); - // Evaluates the residual block, storing the scalar cost in *cost, - // the residual components in *residuals, and the jacobians between - // the parameters and residuals in jacobians[i], in row-major order. + // Evaluates the residual block, storing the scalar cost in *cost, the + // residual components in *residuals, and the jacobians between the parameters + // and residuals in jacobians[i], in row-major order. // // If residuals is nullptr, the residuals are not computed. // - // If jacobians is nullptr, no Jacobians are computed. If - // jacobians[i] is nullptr, then the Jacobian for that parameter - // block is not computed. + // If jacobians is nullptr, no Jacobians are computed. If jacobians[i] is + // nullptr, then the Jacobian for that parameter block is not computed. // - // It is not okay to request the Jacobian w.r.t a parameter block - // that is constant. + // It is not okay to request the Jacobian w.r.t a parameter block that is + // constant. // - // The return value indicates the success or failure. Even if the - // function returns false, the caller should expect the output - // memory locations to have been modified. + // The return value indicates the success or failure. Even if the function + // returns false, the caller should expect the output memory locations to have + // been modified. // // The returned cost and jacobians have had robustification and - // local parameterizations applied already; for example, the - // jacobian for a 4-dimensional quaternion parameter using the - // "QuaternionParameterization" is num_residuals by 3 instead of - // num_residuals by 4. + // LocalParameterization/Manifold applied already; for example, the jacobian + // for a 4-dimensional quaternion parameter using the + // "QuaternionParameterization" is num_residuals by 3 instead of num_residuals + // by 4. // - // apply_loss_function as the name implies allows the user to switch - // the application of the loss function on and off. + // apply_loss_function as the name implies allows the user to switch the + // application of the loss function on and off. // // If an EvaluationCallback is associated with the problem, then its - // PrepareForEvaluation method will be called every time this method - // is called with new_point = true. This conservatively assumes that - // the user may have changed the parameter values since the previous - // call to evaluate / solve. For improved efficiency, and only if - // you know that the parameter values have not changed between - // calls, see EvaluateResidualBlockAssumingParametersUnchanged(). + // PrepareForEvaluation method will be called every time this method is called + // with new_point = true. This conservatively assumes that the user may have + // changed the parameter values since the previous call to evaluate / solve. + // For improved efficiency, and only if you know that the parameter values + // have not changed between calls, see + // EvaluateResidualBlockAssumingParametersUnchanged(). bool EvaluateResidualBlock(ResidualBlockId residual_block_id, bool apply_loss_function, double* cost, double* residuals, double** jacobians) const; - // Same as EvaluateResidualBlock except that if an - // EvaluationCallback is associated with the problem, then its - // PrepareForEvaluation method will be called every time this method - // is called with new_point = false. - // - // This means, if an EvaluationCallback is associated with the - // problem then it is the user's responsibility to call - // PrepareForEvaluation before calling this method if necessary, - // i.e. iff the parameter values have been changed since the last - // call to evaluate / solve.' - // - // This is because, as the name implies, we assume that the - // parameter blocks did not change since the last time - // PrepareForEvaluation was called (via Solve, Evaluate or - // EvaluateResidualBlock). + // Same as EvaluateResidualBlock except that if an EvaluationCallback is + // associated with the problem, then its PrepareForEvaluation method will be + // called every time this method is called with new_point = false. + // + // This means, if an EvaluationCallback is associated with the problem then it + // is the user's responsibility to call PrepareForEvaluation before calling + // this method if necessary, i.e. iff the parameter values have been changed + // since the last call to evaluate / solve.' + // + // This is because, as the name implies, we assume that the parameter blocks + // did not change since the last time PrepareForEvaluation was called (via + // Solve, Evaluate or EvaluateResidualBlock). bool EvaluateResidualBlockAssumingParametersUnchanged( ResidualBlockId residual_block_id, bool apply_loss_function, diff --git a/extern/ceres/include/ceres/product_manifold.h b/extern/ceres/include/ceres/product_manifold.h new file mode 100644 index 00000000000..33f046da24e --- /dev/null +++ b/extern/ceres/include/ceres/product_manifold.h @@ -0,0 +1,328 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2022 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: sameeragarwal@google.com (Sameer Agarwal) +// sergiu.deitsch@gmail.com (Sergiu Deitsch) +// + +#ifndef CERES_PUBLIC_PRODUCT_MANIFOLD_H_ +#define CERES_PUBLIC_PRODUCT_MANIFOLD_H_ + +#include <algorithm> +#include <array> +#include <cassert> +#include <cstddef> +#include <numeric> +#include <tuple> +#include <type_traits> +#include <utility> + +#include "ceres/internal/eigen.h" +#include "ceres/internal/fixed_array.h" +#include "ceres/internal/port.h" +#include "ceres/manifold.h" + +namespace ceres { + +// Construct a manifold by taking the Cartesian product of a number of other +// manifolds. This is useful, when a parameter block is the Cartesian product +// of two or more manifolds. For example the parameters of a camera consist of +// a rotation and a translation, i.e., SO(3) x R^3. +// +// Example usage: +// +// ProductManifold<QuaternionManifold, EuclideanManifold<3>> se3; +// +// is the manifold for a rigid transformation, where the rotation is +// represented using a quaternion. +// +// Manifolds can be copied and moved to ProductManifold: +// +// SubsetManifold manifold1(5, {2}); +// SubsetManifold manifold2(3, {0, 1}); +// ProductManifold<SubsetManifold, SubsetManifold> manifold(manifold1, +// manifold2); +// +// In advanced use cases, manifolds can be dynamically allocated and passed as +// (smart) pointers: +// +// ProductManifold<std::unique_ptr<QuaternionManifold>, EuclideanManifold<3>> +// se3{std::make_unique<QuaternionManifold>(), EuclideanManifold<3>{}}; +// +// In C++17, the template parameters can be left out as they are automatically +// deduced making the initialization much simpler: +// +// ProductManifold se3{QuaternionManifold{}, EuclideanManifold<3>{}}; +// +// The manifold implementations must be either default constructible, copyable +// or moveable to be usable in a ProductManifold. +template <typename Manifold0, typename Manifold1, typename... ManifoldN> +class ProductManifold final : public Manifold { + public: + // ProductManifold constructor perfect forwards arguments to store manifolds. + // + // Either use default construction or if you need to copy or move-construct a + // manifold instance, you need to pass an instance as an argument for all + // types given as class template parameters. + template <typename... Args, + std::enable_if_t<std::is_constructible< + std::tuple<Manifold0, Manifold1, ManifoldN...>, + Args...>::value>* = nullptr> + explicit ProductManifold(Args&&... manifolds) + : ProductManifold{std::make_index_sequence<kNumManifolds>{}, + std::forward<Args>(manifolds)...} {} + + int AmbientSize() const override { return ambient_size_; } + int TangentSize() const override { return tangent_size_; } + + bool Plus(const double* x, + const double* delta, + double* x_plus_delta) const override { + return PlusImpl( + x, delta, x_plus_delta, std::make_index_sequence<kNumManifolds>{}); + } + + bool Minus(const double* y, + const double* x, + double* y_minus_x) const override { + return MinusImpl( + y, x, y_minus_x, std::make_index_sequence<kNumManifolds>{}); + } + + bool PlusJacobian(const double* x, double* jacobian_ptr) const override { + MatrixRef jacobian(jacobian_ptr, AmbientSize(), TangentSize()); + jacobian.setZero(); + internal::FixedArray<double> buffer(buffer_size_); + + return PlusJacobianImpl( + x, jacobian, buffer, std::make_index_sequence<kNumManifolds>{}); + } + + bool MinusJacobian(const double* x, double* jacobian_ptr) const override { + MatrixRef jacobian(jacobian_ptr, TangentSize(), AmbientSize()); + jacobian.setZero(); + internal::FixedArray<double> buffer(buffer_size_); + + return MinusJacobianImpl( + x, jacobian, buffer, std::make_index_sequence<kNumManifolds>{}); + } + + private: + static constexpr std::size_t kNumManifolds = 2 + sizeof...(ManifoldN); + + template <std::size_t... Indices, typename... Args> + explicit ProductManifold(std::index_sequence<Indices...>, Args&&... manifolds) + : manifolds_{std::forward<Args>(manifolds)...}, + buffer_size_{(std::max)( + {(Dereference(std::get<Indices>(manifolds_)).TangentSize() * + Dereference(std::get<Indices>(manifolds_)).AmbientSize())...})}, + ambient_sizes_{ + Dereference(std::get<Indices>(manifolds_)).AmbientSize()...}, + tangent_sizes_{ + Dereference(std::get<Indices>(manifolds_)).TangentSize()...}, + ambient_offsets_{ExclusiveScan(ambient_sizes_)}, + tangent_offsets_{ExclusiveScan(tangent_sizes_)}, + ambient_size_{ + std::accumulate(ambient_sizes_.begin(), ambient_sizes_.end(), 0)}, + tangent_size_{ + std::accumulate(tangent_sizes_.begin(), tangent_sizes_.end(), 0)} {} + + template <std::size_t Index0, std::size_t... Indices> + bool PlusImpl(const double* x, + const double* delta, + double* x_plus_delta, + std::index_sequence<Index0, Indices...>) const { + if (!Dereference(std::get<Index0>(manifolds_)) + .Plus(x + ambient_offsets_[Index0], + delta + tangent_offsets_[Index0], + x_plus_delta + ambient_offsets_[Index0])) { + return false; + } + + return PlusImpl(x, delta, x_plus_delta, std::index_sequence<Indices...>{}); + } + + static constexpr bool PlusImpl(const double* /*x*/, + const double* /*delta*/, + double* /*x_plus_delta*/, + std::index_sequence<>) noexcept { + return true; + } + + template <std::size_t Index0, std::size_t... Indices> + bool MinusImpl(const double* y, + const double* x, + double* y_minus_x, + std::index_sequence<Index0, Indices...>) const { + if (!Dereference(std::get<Index0>(manifolds_)) + .Minus(y + ambient_offsets_[Index0], + x + ambient_offsets_[Index0], + y_minus_x + tangent_offsets_[Index0])) { + return false; + } + + return MinusImpl(y, x, y_minus_x, std::index_sequence<Indices...>{}); + } + + static constexpr bool MinusImpl(const double* /*y*/, + const double* /*x*/, + double* /*y_minus_x*/, + std::index_sequence<>) noexcept { + return true; + } + + template <std::size_t Index0, std::size_t... Indices> + bool PlusJacobianImpl(const double* x, + MatrixRef& jacobian, + internal::FixedArray<double>& buffer, + std::index_sequence<Index0, Indices...>) const { + if (!Dereference(std::get<Index0>(manifolds_)) + .PlusJacobian(x + ambient_offsets_[Index0], buffer.data())) { + return false; + } + + jacobian.block(ambient_offsets_[Index0], + tangent_offsets_[Index0], + ambient_sizes_[Index0], + tangent_sizes_[Index0]) = + MatrixRef( + buffer.data(), ambient_sizes_[Index0], tangent_sizes_[Index0]); + + return PlusJacobianImpl( + x, jacobian, buffer, std::index_sequence<Indices...>{}); + } + + static constexpr bool PlusJacobianImpl( + const double* /*x*/, + MatrixRef& /*jacobian*/, + internal::FixedArray<double>& /*buffer*/, + std::index_sequence<>) noexcept { + return true; + } + + template <std::size_t Index0, std::size_t... Indices> + bool MinusJacobianImpl(const double* x, + MatrixRef& jacobian, + internal::FixedArray<double>& buffer, + std::index_sequence<Index0, Indices...>) const { + if (!Dereference(std::get<Index0>(manifolds_)) + .MinusJacobian(x + ambient_offsets_[Index0], buffer.data())) { + return false; + } + + jacobian.block(tangent_offsets_[Index0], + ambient_offsets_[Index0], + tangent_sizes_[Index0], + ambient_sizes_[Index0]) = + MatrixRef( + buffer.data(), tangent_sizes_[Index0], ambient_sizes_[Index0]); + + return MinusJacobianImpl( + x, jacobian, buffer, std::index_sequence<Indices...>{}); + } + + static constexpr bool MinusJacobianImpl( + const double* /*x*/, + MatrixRef& /*jacobian*/, + internal::FixedArray<double>& /*buffer*/, + std::index_sequence<>) noexcept { + return true; + } + + template <typename T, std::size_t N> + static std::array<T, N> ExclusiveScan(const std::array<T, N>& values) { + std::array<T, N> result; + T init = 0; + + // TODO Replace by std::exclusive_scan once C++17 is available + for (std::size_t i = 0; i != N; ++i) { + result[i] = init; + init += values[i]; + } + + return result; + } + + // TODO Replace by std::void_t once C++17 is available + template <typename... Types> + struct Void { + using type = void; + }; + + template <typename T, typename E = void> + struct IsDereferenceable : std::false_type {}; + + template <typename T> + struct IsDereferenceable<T, typename Void<decltype(*std::declval<T>())>::type> + : std::true_type {}; + + template <typename T, + std::enable_if_t<!IsDereferenceable<T>::value>* = nullptr> + static constexpr decltype(auto) Dereference(T& value) { + return value; + } + + // Support dereferenceable types such as std::unique_ptr, std::shared_ptr, raw + // pointers etc. + template <typename T, + std::enable_if_t<IsDereferenceable<T>::value>* = nullptr> + static constexpr decltype(auto) Dereference(T& value) { + return *value; + } + + template <typename T> + static constexpr decltype(auto) Dereference(T* p) { + assert(p != nullptr); + return *p; + } + + std::tuple<Manifold0, Manifold1, ManifoldN...> manifolds_; + int buffer_size_; + std::array<int, kNumManifolds> ambient_sizes_; + std::array<int, kNumManifolds> tangent_sizes_; + std::array<int, kNumManifolds> ambient_offsets_; + std::array<int, kNumManifolds> tangent_offsets_; + int ambient_size_; + int tangent_size_; +}; + +#ifdef CERES_HAS_CPP17 +// C++17 deduction guide that allows the user to avoid explicitly specifying +// the template parameters of ProductManifold. The class can instead be +// instantiated as follows: +// +// ProductManifold manifold{QuaternionManifold{}, EuclideanManifold<3>{}}; +// +template <typename Manifold0, typename Manifold1, typename... Manifolds> +ProductManifold(Manifold0&&, Manifold1&&, Manifolds&&...) + -> ProductManifold<Manifold0, Manifold1, Manifolds...>; +#endif + +} // namespace ceres + +#endif // CERES_PUBLIC_PRODUCT_MANIFOLD_H_ diff --git a/extern/ceres/include/ceres/rotation.h b/extern/ceres/include/ceres/rotation.h index 0c82a417a2c..51079901aaf 100644 --- a/extern/ceres/include/ceres/rotation.h +++ b/extern/ceres/include/ceres/rotation.h @@ -521,18 +521,18 @@ inline void UnitQuaternionRotatePoint(const T q[4], DCHECK_NE(pt, result) << "Inplace rotation is not supported."; // clang-format off - const T t2 = q[0] * q[1]; - const T t3 = q[0] * q[2]; - const T t4 = q[0] * q[3]; - const T t5 = -q[1] * q[1]; - const T t6 = q[1] * q[2]; - const T t7 = q[1] * q[3]; - const T t8 = -q[2] * q[2]; - const T t9 = q[2] * q[3]; - const T t1 = -q[3] * q[3]; - result[0] = T(2) * ((t8 + t1) * pt[0] + (t6 - t4) * pt[1] + (t3 + t7) * pt[2]) + pt[0]; // NOLINT - result[1] = T(2) * ((t4 + t6) * pt[0] + (t5 + t1) * pt[1] + (t9 - t2) * pt[2]) + pt[1]; // NOLINT - result[2] = T(2) * ((t7 - t3) * pt[0] + (t2 + t9) * pt[1] + (t5 + t8) * pt[2]) + pt[2]; // NOLINT + T uv0 = q[2] * pt[2] - q[3] * pt[1]; + T uv1 = q[3] * pt[0] - q[1] * pt[2]; + T uv2 = q[1] * pt[1] - q[2] * pt[0]; + uv0 += uv0; + uv1 += uv1; + uv2 += uv2; + result[0] = pt[0] + q[0] * uv0; + result[1] = pt[1] + q[0] * uv1; + result[2] = pt[2] + q[0] * uv2; + result[0] += q[2] * uv2 - q[3] * uv1; + result[1] += q[3] * uv0 - q[1] * uv2; + result[2] += q[1] * uv1 - q[2] * uv0; // clang-format on } @@ -624,16 +624,16 @@ inline void AngleAxisRotatePoint(const T angle_axis[3], result[2] = pt[2] * costheta + w_cross_pt[2] * sintheta + w[2] * tmp; } else { // Near zero, the first order Taylor approximation of the rotation - // matrix R corresponding to a vector w and angle w is + // matrix R corresponding to a vector w and angle theta is // // R = I + hat(w) * sin(theta) // // But sintheta ~ theta and theta * w = angle_axis, which gives us // - // R = I + hat(w) + // R = I + hat(angle_axis) // // and actually performing multiplication with the point pt, gives us - // R * pt = pt + w x pt. + // R * pt = pt + angle_axis x pt. // // Switching to the Taylor expansion near zero provides meaningful // derivatives when evaluated using Jets. diff --git a/extern/ceres/include/ceres/sized_cost_function.h b/extern/ceres/include/ceres/sized_cost_function.h index 8e92f1b796c..d76b5c26b4c 100644 --- a/extern/ceres/include/ceres/sized_cost_function.h +++ b/extern/ceres/include/ceres/sized_cost_function.h @@ -61,8 +61,6 @@ class SizedCostFunction : public CostFunction { *mutable_parameter_block_sizes() = std::vector<int32_t>{Ns...}; } - virtual ~SizedCostFunction() {} - // Subclasses must implement Evaluate(). }; diff --git a/extern/ceres/include/ceres/solver.h b/extern/ceres/include/ceres/solver.h index 61b8dd53eb3..026fc1c0830 100644 --- a/extern/ceres/include/ceres/solver.h +++ b/extern/ceres/include/ceres/solver.h @@ -38,8 +38,9 @@ #include <vector> #include "ceres/crs_matrix.h" +#include "ceres/internal/config.h" #include "ceres/internal/disable_warnings.h" -#include "ceres/internal/port.h" +#include "ceres/internal/export.h" #include "ceres/iteration_callback.h" #include "ceres/ordered_groups.h" #include "ceres/problem.h" @@ -363,23 +364,23 @@ class CERES_EXPORT Solver { std::unordered_set<ResidualBlockId> residual_blocks_for_subset_preconditioner; - // Ceres supports using multiple dense linear algebra libraries - // for dense matrix factorizations. Currently EIGEN and LAPACK are - // the valid choices. EIGEN is always available, LAPACK refers to - // the system BLAS + LAPACK library which may or may not be + // Ceres supports using multiple dense linear algebra libraries for dense + // matrix factorizations. Currently EIGEN, LAPACK and CUDA are the valid + // choices. EIGEN is always available, LAPACK refers to the system BLAS + + // LAPACK library which may or may not be available. CUDA refers to Nvidia's + // GPU based dense linear algebra library, which may or may not be // available. // - // This setting affects the DENSE_QR, DENSE_NORMAL_CHOLESKY and - // DENSE_SCHUR solvers. For small to moderate sized problem EIGEN - // is a fine choice but for large problems, an optimized LAPACK + - // BLAS implementation can make a substantial difference in - // performance. + // This setting affects the DENSE_QR, DENSE_NORMAL_CHOLESKY and DENSE_SCHUR + // solvers. For small to moderate sized problem EIGEN is a fine choice but + // for large problems, an optimized LAPACK + BLAS or CUDA implementation can + // make a substantial difference in performance. DenseLinearAlgebraLibraryType dense_linear_algebra_library_type = EIGEN; - // Ceres supports using multiple sparse linear algebra libraries - // for sparse matrix ordering and factorizations. Currently, - // SUITE_SPARSE and CX_SPARSE are the valid choices, depending on - // whether they are linked into Ceres at build time. + // Ceres supports using multiple sparse linear algebra libraries for sparse + // matrix ordering and factorizations. Currently, SUITE_SPARSE and CX_SPARSE + // are the valid choices, depending on whether they are linked into Ceres at + // build time. SparseLinearAlgebraLibraryType sparse_linear_algebra_library_type = #if !defined(CERES_NO_SUITESPARSE) SUITE_SPARSE; @@ -423,7 +424,7 @@ class CERES_EXPORT Solver { // each group, Ceres is free to order the parameter blocks as it // chooses. // - // If NULL, then all parameter blocks are assumed to be in the + // If nullptr, then all parameter blocks are assumed to be in the // same group and the solver is free to decide the best // ordering. // @@ -536,8 +537,9 @@ class CERES_EXPORT Solver { // max_num_refinement_iterations to 2-3. // // NOTE2: The following two options are currently only applicable - // if sparse_linear_algebra_library_type is EIGEN_SPARSE and - // linear_solver_type is SPARSE_NORMAL_CHOLESKY, or SPARSE_SCHUR. + // if sparse_linear_algebra_library_type is EIGEN_SPARSE or + // ACCELERATE_SPARSE, and linear_solver_type is SPARSE_NORMAL_CHOLESKY + // or SPARSE_SCHUR. bool use_mixed_precision_solves = false; // Number steps of the iterative refinement process to run when @@ -882,7 +884,7 @@ class CERES_EXPORT Solver { // Dimension of the tangent space of the problem (or the number of // columns in the Jacobian for the problem). This is different // from num_parameters if a parameter block is associated with a - // LocalParameterization + // LocalParameterization/Manifold. int num_effective_parameters = -1; // Number of residual blocks in the problem. @@ -903,7 +905,7 @@ class CERES_EXPORT Solver { // number of columns in the Jacobian for the reduced // problem). This is different from num_parameters_reduced if a // parameter block in the reduced problem is associated with a - // LocalParameterization. + // LocalParameterization/Manifold. int num_effective_parameters_reduced = -1; // Number of residual blocks in the reduced problem. diff --git a/extern/ceres/include/ceres/sphere_manifold.h b/extern/ceres/include/ceres/sphere_manifold.h new file mode 100644 index 00000000000..5d71cbbca9a --- /dev/null +++ b/extern/ceres/include/ceres/sphere_manifold.h @@ -0,0 +1,231 @@ +// Ceres Solver - A fast non-linear least squares minimizer +// Copyright 2022 Google Inc. All rights reserved. +// http://ceres-solver.org/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// * Redistributions of source code must retain the above copyright notice, +// this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// * Neither the name of Google Inc. nor the names of its contributors may be +// used to endorse or promote products derived from this software without +// specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +// POSSIBILITY OF SUCH DAMAGE. +// +// Author: vitus@google.com (Mike Vitus) +// jodebo_beck@gmx.de (Johannes Beck) + +#ifndef CERES_PUBLIC_SPHERE_MANIFOLD_H_ +#define CERES_PUBLIC_SPHERE_MANIFOLD_H_ + +#include <Eigen/Core> +#include <algorithm> +#include <array> +#include <memory> +#include <vector> + +#include "ceres/internal/disable_warnings.h" +#include "ceres/internal/export.h" +#include "ceres/internal/householder_vector.h" +#include "ceres/internal/sphere_manifold_functions.h" +#include "ceres/manifold.h" +#include "ceres/types.h" +#include "glog/logging.h" + +namespace ceres { + +// This provides a manifold on a sphere meaning that the norm of the vector +// stays the same. Such cases often arises in Structure for Motion +// problems. One example where they are used is in representing points whose +// triangulation is ill-conditioned. Here it is advantageous to use an +// over-parameterization since homogeneous vectors can represent points at +// infinity. +// +// The plus operator is defined as +// Plus(x, delta) = +// [sin(0.5 * |delta|) * delta / |delta|, cos(0.5 * |delta|)] * x +// +// The minus operator is defined as +// Minus(x, y) = 2 atan2(nhy, y[-1]) / nhy * hy[0 : size_ - 1] +// with nhy = norm(hy[0 : size_ - 1]) +// +// with * defined as an operator which applies the update orthogonal to x to +// remain on the sphere. The ambient space dimension is required to be greater +// than 1. +// +// The class works with dynamic and static ambient space dimensions. If the +// ambient space dimensions is known at compile time use +// +// SphereManifold<3> manifold; +// +// If the ambient space dimensions is not known at compile time the template +// parameter needs to be set to ceres::DYNAMIC and the actual dimension needs +// to be provided as a constructor argument: +// +// SphereManifold<ceres::DYNAMIC> manifold(ambient_dim); +// +// See section B.2 (p.25) in "Integrating Generic Sensor Fusion Algorithms +// with Sound State Representations through Encapsulation of Manifolds" by C. +// Hertzberg, R. Wagner, U. Frese and L. Schroder for more details +// (https://arxiv.org/pdf/1107.1119.pdf) +template <int AmbientSpaceDimension> +class SphereManifold final : public Manifold { + public: + static_assert( + AmbientSpaceDimension == ceres::DYNAMIC || AmbientSpaceDimension > 1, + "The size of the homogeneous vector needs to be greater than 1."); + static_assert(ceres::DYNAMIC == Eigen::Dynamic, + "ceres::DYNAMIC needs to be the same as Eigen::Dynamic."); + + SphereManifold(); + explicit SphereManifold(int size); + + int AmbientSize() const override { + return AmbientSpaceDimension == ceres::DYNAMIC ? size_ + : AmbientSpaceDimension; + } + int TangentSize() const override { return AmbientSize() - 1; } + + bool Plus(const double* x, + const double* delta, + double* x_plus_delta) const override; + bool PlusJacobian(const double* x, double* jacobian) const override; + + bool Minus(const double* y, + const double* x, + double* y_minus_x) const override; + bool MinusJacobian(const double* x, double* jacobian) const override; + + private: + static constexpr int TangentSpaceDimension = + AmbientSpaceDimension > 0 ? AmbientSpaceDimension - 1 : Eigen::Dynamic; + + using AmbientVector = Eigen::Matrix<double, AmbientSpaceDimension, 1>; + using TangentVector = Eigen::Matrix<double, TangentSpaceDimension, 1>; + using MatrixPlusJacobian = Eigen::Matrix<double, + AmbientSpaceDimension, + TangentSpaceDimension, + Eigen::RowMajor>; + using MatrixMinusJacobian = Eigen::Matrix<double, + TangentSpaceDimension, + AmbientSpaceDimension, + Eigen::RowMajor>; + + const int size_{}; +}; + +template <int AmbientSpaceDimension> +SphereManifold<AmbientSpaceDimension>::SphereManifold() + : size_{AmbientSpaceDimension} { + static_assert( + AmbientSpaceDimension != Eigen::Dynamic, + "The size is set to dynamic. Please call the constructor with a size."); +} + +template <int AmbientSpaceDimension> +SphereManifold<AmbientSpaceDimension>::SphereManifold(int size) : size_{size} { + if (AmbientSpaceDimension != Eigen::Dynamic) { + CHECK_EQ(AmbientSpaceDimension, size) + << "Specified size by template parameter differs from the supplied " + "one."; + } else { + CHECK_GT(size_, 1) + << "The size of the manifold needs to be greater than 1."; + } +} + +template <int AmbientSpaceDimension> +bool SphereManifold<AmbientSpaceDimension>::Plus( + const double* x_ptr, + const double* delta_ptr, + double* x_plus_delta_ptr) const { + Eigen::Map<const AmbientVector> x(x_ptr, size_); + Eigen::Map<const TangentVector> delta(delta_ptr, size_ - 1); + Eigen::Map<AmbientVector> x_plus_delta(x_plus_delta_ptr, size_); + + const double norm_delta = delta.norm(); + + if (norm_delta == 0.0) { + x_plus_delta = x; + return true; + } + + AmbientVector v(size_); + double beta; + + // NOTE: The explicit template arguments are needed here because + // ComputeHouseholderVector is templated and some versions of MSVC + // have trouble deducing the type of v automatically. + internal::ComputeHouseholderVector<Eigen::Map<const AmbientVector>, + double, + AmbientSpaceDimension>(x, &v, &beta); + + internal::ComputeSphereManifoldPlus( + v, beta, x, delta, norm_delta, &x_plus_delta); + + return true; +} + +template <int AmbientSpaceDimension> +bool SphereManifold<AmbientSpaceDimension>::PlusJacobian( + const double* x_ptr, double* jacobian_ptr) const { + Eigen::Map<const AmbientVector> x(x_ptr, size_); + Eigen::Map<MatrixPlusJacobian> jacobian(jacobian_ptr, size_, size_ - 1); + internal::ComputeSphereManifoldPlusJacobian(x, &jacobian); + + return true; +} + +template <int AmbientSpaceDimension> +bool SphereManifold<AmbientSpaceDimension>::Minus(const double* y_ptr, + const double* x_ptr, + double* y_minus_x_ptr) const { + AmbientVector y = Eigen::Map<const AmbientVector>(y_ptr, size_); + Eigen::Map<const AmbientVector> x(x_ptr, size_); + Eigen::Map<TangentVector> y_minus_x(y_minus_x_ptr, size_ - 1); + + // Apply hoseholder transformation. + AmbientVector v(size_); + double beta; + + // NOTE: The explicit template arguments are needed here because + // ComputeHouseholderVector is templated and some versions of MSVC + // have trouble deducing the type of v automatically. + internal::ComputeHouseholderVector<Eigen::Map<const AmbientVector>, + double, + AmbientSpaceDimension>(x, &v, &beta); + internal::ComputeSphereManifoldMinus(v, beta, x, y, &y_minus_x); + return true; +} + +template <int AmbientSpaceDimension> +bool SphereManifold<AmbientSpaceDimension>::MinusJacobian( + const double* x_ptr, double* jacobian_ptr) const { + Eigen::Map<const AmbientVector> x(x_ptr, size_); + Eigen::Map<MatrixMinusJacobian> jacobian(jacobian_ptr, size_ - 1, size_); + + internal::ComputeSphereManifoldMinusJacobian(x, &jacobian); + return true; +} + +} // namespace ceres + +// clang-format off +#include "ceres/internal/reenable_warnings.h" +// clang-format on + +#endif // CERES_PUBLIC_SPHERE_MANIFOLD_H_ diff --git a/extern/ceres/include/ceres/tiny_solver.h b/extern/ceres/include/ceres/tiny_solver.h index 47db5824dc5..86a655dc07d 100644 --- a/extern/ceres/include/ceres/tiny_solver.h +++ b/extern/ceres/include/ceres/tiny_solver.h @@ -1,5 +1,5 @@ // Ceres Solver - A fast non-linear least squares minimizer -// Copyright 2019 Google Inc. All rights reserved. +// Copyright 2021 Google Inc. All rights reserved. // http://ceres-solver.org/ // // Redistribution and use in source and binary forms, with or without @@ -84,7 +84,8 @@ namespace ceres { // double* parameters -- NUM_PARAMETERS or NumParameters() // double* residuals -- NUM_RESIDUALS or NumResiduals() // double* jacobian -- NUM_RESIDUALS * NUM_PARAMETERS in column-major format -// (Eigen's default); or NULL if no jacobian requested. +// (Eigen's default); or nullptr if no jacobian +// requested. // // An example (fully statically sized): // @@ -126,8 +127,8 @@ namespace ceres { // template <typename Function, typename LinearSolver = - Eigen::LDLT<Eigen::Matrix<typename Function::Scalar, - Function::NUM_PARAMETERS, + Eigen::LDLT<Eigen::Matrix<typename Function::Scalar, // + Function::NUM_PARAMETERS, // Function::NUM_PARAMETERS>>> class TinySolver { public: @@ -139,41 +140,59 @@ class TinySolver { NUM_RESIDUALS = Function::NUM_RESIDUALS, NUM_PARAMETERS = Function::NUM_PARAMETERS }; - typedef typename Function::Scalar Scalar; - typedef typename Eigen::Matrix<Scalar, NUM_PARAMETERS, 1> Parameters; + using Scalar = typename Function::Scalar; + using Parameters = typename Eigen::Matrix<Scalar, NUM_PARAMETERS, 1>; enum Status { - GRADIENT_TOO_SMALL, // eps > max(J'*f(x)) - RELATIVE_STEP_SIZE_TOO_SMALL, // eps > ||dx|| / (||x|| + eps) - COST_TOO_SMALL, // eps > ||f(x)||^2 / 2 + // max_norm |J'(x) * f(x)| < gradient_tolerance + GRADIENT_TOO_SMALL, + // ||dx|| <= parameter_tolerance * (||x|| + parameter_tolerance) + RELATIVE_STEP_SIZE_TOO_SMALL, + // cost_threshold > ||f(x)||^2 / 2 + COST_TOO_SMALL, + // num_iterations >= max_num_iterations HIT_MAX_ITERATIONS, + // (new_cost - old_cost) < function_tolerance * old_cost + COST_CHANGE_TOO_SMALL, // TODO(sameeragarwal): Deal with numerical failures. }; struct Options { - Scalar gradient_tolerance = 1e-10; // eps > max(J'*f(x)) - Scalar parameter_tolerance = 1e-8; // eps > ||dx|| / ||x|| - Scalar cost_threshold = // eps > ||f(x)|| - std::numeric_limits<Scalar>::epsilon(); - Scalar initial_trust_region_radius = 1e4; int max_num_iterations = 50; + + // max_norm |J'(x) * f(x)| < gradient_tolerance + Scalar gradient_tolerance = 1e-10; + + // ||dx|| <= parameter_tolerance * (||x|| + parameter_tolerance) + Scalar parameter_tolerance = 1e-8; + + // (new_cost - old_cost) < function_tolerance * old_cost + Scalar function_tolerance = 1e-6; + + // cost_threshold > ||f(x)||^2 / 2 + Scalar cost_threshold = std::numeric_limits<Scalar>::epsilon(); + + Scalar initial_trust_region_radius = 1e4; }; struct Summary { - Scalar initial_cost = -1; // 1/2 ||f(x)||^2 - Scalar final_cost = -1; // 1/2 ||f(x)||^2 - Scalar gradient_max_norm = -1; // max(J'f(x)) + // 1/2 ||f(x_0)||^2 + Scalar initial_cost = -1; + // 1/2 ||f(x)||^2 + Scalar final_cost = -1; + // max_norm(J'f(x)) + Scalar gradient_max_norm = -1; int iterations = -1; Status status = HIT_MAX_ITERATIONS; }; bool Update(const Function& function, const Parameters& x) { - if (!function(x.data(), error_.data(), jacobian_.data())) { + if (!function(x.data(), residuals_.data(), jacobian_.data())) { return false; } - error_ = -error_; + residuals_ = -residuals_; // On the first iteration, compute a diagonal (Jacobi) scaling // matrix, which we store as a vector. @@ -192,9 +211,9 @@ class TinySolver { // factorization. jacobian_ = jacobian_ * jacobi_scaling_.asDiagonal(); jtj_ = jacobian_.transpose() * jacobian_; - g_ = jacobian_.transpose() * error_; + g_ = jacobian_.transpose() * residuals_; summary.gradient_max_norm = g_.array().abs().maxCoeff(); - cost_ = error_.squaredNorm() / 2; + cost_ = residuals_.squaredNorm() / 2; return true; } @@ -231,7 +250,7 @@ class TinySolver { const Scalar max_diagonal = 1e32; for (int i = 0; i < lm_diagonal_.rows(); ++i) { lm_diagonal_[i] = std::sqrt( - u * std::min(std::max(jtj_(i, i), min_diagonal), max_diagonal)); + u * (std::min)((std::max)(jtj_(i, i), min_diagonal), max_diagonal)); jtj_regularized_(i, i) += lm_diagonal_[i] * lm_diagonal_[i]; } @@ -253,10 +272,9 @@ class TinySolver { // TODO(keir): Add proper handling of errors from user eval of cost // functions. - function(&x_new_[0], &f_x_new_[0], NULL); + function(&x_new_[0], &f_x_new_[0], nullptr); const Scalar cost_change = (2 * cost_ - f_x_new_.squaredNorm()); - // TODO(sameeragarwal): Better more numerically stable evaluation. const Scalar model_cost_change = lm_step_.dot(2 * g_ - jtj_ * lm_step_); @@ -269,6 +287,12 @@ class TinySolver { // model fits well. x = x_new_; + if (std::abs(cost_change) < options.function_tolerance) { + cost_ = f_x_new_.squaredNorm() / 2; + summary.status = COST_CHANGE_TOO_SMALL; + break; + } + // TODO(sameeragarwal): Deal with failure. Update(function, x); if (summary.gradient_max_norm < options.gradient_tolerance) { @@ -282,16 +306,24 @@ class TinySolver { } Scalar tmp = Scalar(2 * rho - 1); - u = u * std::max(1 / 3., 1 - tmp * tmp * tmp); + u = u * (std::max)(Scalar(1 / 3.), Scalar(1) - tmp * tmp * tmp); v = 2; - continue; - } - // Reject the update because either the normal equations failed to solve - // or the local linear model was not good (rho < 0). Instead, increase u - // to move closer to gradient descent. - u *= v; - v *= 2; + } else { + // Reject the update because either the normal equations failed to solve + // or the local linear model was not good (rho < 0). + + // Additionally if the cost change is too small, then terminate. + if (std::abs(cost_change) < options.function_tolerance) { + // Terminate + summary.status = COST_CHANGE_TOO_SMALL; + break; + } + + // Reduce the size of the trust region. + u *= v; + v *= 2; + } } summary.final_cost = cost_; @@ -307,7 +339,7 @@ class TinySolver { LinearSolver linear_solver_; Scalar cost_; Parameters dx_, x_new_, g_, jacobi_scaling_, lm_diagonal_, lm_step_; - Eigen::Matrix<Scalar, NUM_RESIDUALS, 1> error_, f_x_new_; + Eigen::Matrix<Scalar, NUM_RESIDUALS, 1> residuals_, f_x_new_; Eigen::Matrix<Scalar, NUM_RESIDUALS, NUM_PARAMETERS> jacobian_; Eigen::Matrix<Scalar, NUM_PARAMETERS, NUM_PARAMETERS> jtj_, jtj_regularized_; @@ -317,7 +349,7 @@ class TinySolver { template <typename T> struct enable_if<true, T> { - typedef T type; + using type = T; }; // The number of parameters and residuals are dynamically sized. @@ -355,7 +387,7 @@ class TinySolver { jacobi_scaling_.resize(num_parameters); lm_diagonal_.resize(num_parameters); lm_step_.resize(num_parameters); - error_.resize(num_residuals); + residuals_.resize(num_residuals); f_x_new_.resize(num_residuals); jacobian_.resize(num_residuals, num_parameters); jtj_.resize(num_parameters, num_parameters); diff --git a/extern/ceres/include/ceres/tiny_solver_autodiff_function.h b/extern/ceres/include/ceres/tiny_solver_autodiff_function.h index b782f549cc1..3e3675ff070 100644 --- a/extern/ceres/include/ceres/tiny_solver_autodiff_function.h +++ b/extern/ceres/include/ceres/tiny_solver_autodiff_function.h @@ -113,12 +113,12 @@ class TinySolverAutoDiffFunction { // as a member a Jet type, which itself has a fixed-size Eigen type as member. EIGEN_MAKE_ALIGNED_OPERATOR_NEW - TinySolverAutoDiffFunction(const CostFunctor& cost_functor) + explicit TinySolverAutoDiffFunction(const CostFunctor& cost_functor) : cost_functor_(cost_functor) { Initialize<kNumResiduals>(cost_functor); } - typedef T Scalar; + using Scalar = T; enum { NUM_PARAMETERS = kNumParameters, NUM_RESIDUALS = kNumResiduals, @@ -127,7 +127,7 @@ class TinySolverAutoDiffFunction { // This is similar to AutoDifferentiate(), but since there is only one // parameter block it is easier to inline to avoid overhead. bool operator()(const T* parameters, T* residuals, T* jacobian) const { - if (jacobian == NULL) { + if (jacobian == nullptr) { // No jacobian requested, so just directly call the cost function with // doubles, skipping jets and derivatives. return cost_functor_(parameters, residuals); diff --git a/extern/ceres/include/ceres/tiny_solver_cost_function_adapter.h b/extern/ceres/include/ceres/tiny_solver_cost_function_adapter.h index 18ccb398f90..cc5ca16af5d 100644 --- a/extern/ceres/include/ceres/tiny_solver_cost_function_adapter.h +++ b/extern/ceres/include/ceres/tiny_solver_cost_function_adapter.h @@ -75,7 +75,7 @@ template <int kNumResiduals = Eigen::Dynamic, int kNumParameters = Eigen::Dynamic> class TinySolverCostFunctionAdapter { public: - typedef double Scalar; + using Scalar = double; enum ComponentSizeType { NUM_PARAMETERS = kNumParameters, NUM_RESIDUALS = kNumResiduals @@ -85,7 +85,7 @@ class TinySolverCostFunctionAdapter { // fixed-size Eigen types. EIGEN_MAKE_ALIGNED_OPERATOR_NEW - TinySolverCostFunctionAdapter(const CostFunction& cost_function) + explicit TinySolverCostFunctionAdapter(const CostFunction& cost_function) : cost_function_(cost_function) { CHECK_EQ(cost_function_.parameter_block_sizes().size(), 1) << "Only CostFunctions with exactly one parameter blocks are allowed."; @@ -108,7 +108,7 @@ class TinySolverCostFunctionAdapter { double* residuals, double* jacobian) const { if (!jacobian) { - return cost_function_.Evaluate(¶meters, residuals, NULL); + return cost_function_.Evaluate(¶meters, residuals, nullptr); } double* jacobians[1] = {row_major_jacobian_.data()}; diff --git a/extern/ceres/include/ceres/types.h b/extern/ceres/include/ceres/types.h index 5ee6fdca576..e5224238129 100644 --- a/extern/ceres/include/ceres/types.h +++ b/extern/ceres/include/ceres/types.h @@ -40,7 +40,7 @@ #include <string> #include "ceres/internal/disable_warnings.h" -#include "ceres/internal/port.h" +#include "ceres/internal/export.h" namespace ceres { @@ -186,6 +186,7 @@ enum SparseLinearAlgebraLibraryType { enum DenseLinearAlgebraLibraryType { EIGEN, LAPACK, + CUDA, }; // Logging options diff --git a/extern/ceres/include/ceres/version.h b/extern/ceres/include/ceres/version.h index a76cc1099c5..e0d61972896 100644 --- a/extern/ceres/include/ceres/version.h +++ b/extern/ceres/include/ceres/version.h @@ -1,5 +1,5 @@ // Ceres Solver - A fast non-linear least squares minimizer -// Copyright 2019 Google Inc. All rights reserved. +// Copyright 2021 Google Inc. All rights reserved. // http://ceres-solver.org/ // // Redistribution and use in source and binary forms, with or without @@ -32,7 +32,7 @@ #define CERES_PUBLIC_VERSION_H_ #define CERES_VERSION_MAJOR 2 -#define CERES_VERSION_MINOR 0 +#define CERES_VERSION_MINOR 1 #define CERES_VERSION_REVISION 0 // Classic CPP stringifcation; the extra level of indirection allows the |