Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'extern/ceres/include/ceres/loss_function.h')
-rw-r--r--extern/ceres/include/ceres/loss_function.h48
1 files changed, 25 insertions, 23 deletions
diff --git a/extern/ceres/include/ceres/loss_function.h b/extern/ceres/include/ceres/loss_function.h
index 7aabf7dfce1..8a5a37ff665 100644
--- a/extern/ceres/include/ceres/loss_function.h
+++ b/extern/ceres/include/ceres/loss_function.h
@@ -35,7 +35,7 @@
//
// For least squares problem where there are no outliers and standard
// squared loss is expected, it is not necessary to create a loss
-// function; instead passing a NULL to the problem when adding
+// function; instead passing a nullptr to the problem when adding
// residuals implies a standard squared loss.
//
// For least squares problems where the minimization may encounter
@@ -78,6 +78,7 @@
#include <memory>
#include "ceres/internal/disable_warnings.h"
+#include "ceres/internal/export.h"
#include "ceres/types.h"
#include "glog/logging.h"
@@ -85,7 +86,7 @@ namespace ceres {
class CERES_EXPORT LossFunction {
public:
- virtual ~LossFunction() {}
+ virtual ~LossFunction();
// For a residual vector with squared 2-norm 'sq_norm', this method
// is required to fill in the value and derivatives of the loss
@@ -125,10 +126,10 @@ class CERES_EXPORT LossFunction {
//
// At s = 0: rho = [0, 1, 0].
//
-// It is not normally necessary to use this, as passing NULL for the
+// It is not normally necessary to use this, as passing nullptr for the
// loss function when building the problem accomplishes the same
// thing.
-class CERES_EXPORT TrivialLoss : public LossFunction {
+class CERES_EXPORT TrivialLoss final : public LossFunction {
public:
void Evaluate(double, double*) const override;
};
@@ -171,7 +172,7 @@ class CERES_EXPORT TrivialLoss : public LossFunction {
//
// The scaling parameter 'a' corresponds to 'delta' on this page:
// http://en.wikipedia.org/wiki/Huber_Loss_Function
-class CERES_EXPORT HuberLoss : public LossFunction {
+class CERES_EXPORT HuberLoss final : public LossFunction {
public:
explicit HuberLoss(double a) : a_(a), b_(a * a) {}
void Evaluate(double, double*) const override;
@@ -187,7 +188,7 @@ class CERES_EXPORT HuberLoss : public LossFunction {
// rho(s) = 2 (sqrt(1 + s) - 1).
//
// At s = 0: rho = [0, 1, -1 / (2 * a^2)].
-class CERES_EXPORT SoftLOneLoss : public LossFunction {
+class CERES_EXPORT SoftLOneLoss final : public LossFunction {
public:
explicit SoftLOneLoss(double a) : b_(a * a), c_(1 / b_) {}
void Evaluate(double, double*) const override;
@@ -204,7 +205,7 @@ class CERES_EXPORT SoftLOneLoss : public LossFunction {
// rho(s) = log(1 + s).
//
// At s = 0: rho = [0, 1, -1 / a^2].
-class CERES_EXPORT CauchyLoss : public LossFunction {
+class CERES_EXPORT CauchyLoss final : public LossFunction {
public:
explicit CauchyLoss(double a) : b_(a * a), c_(1 / b_) {}
void Evaluate(double, double*) const override;
@@ -225,7 +226,7 @@ class CERES_EXPORT CauchyLoss : public LossFunction {
// rho(s) = a atan(s / a).
//
// At s = 0: rho = [0, 1, 0].
-class CERES_EXPORT ArctanLoss : public LossFunction {
+class CERES_EXPORT ArctanLoss final : public LossFunction {
public:
explicit ArctanLoss(double a) : a_(a), b_(1 / (a * a)) {}
void Evaluate(double, double*) const override;
@@ -264,7 +265,7 @@ class CERES_EXPORT ArctanLoss : public LossFunction {
// concentrated in the range a - b to a + b.
//
// At s = 0: rho = [0, ~0, ~0].
-class CERES_EXPORT TolerantLoss : public LossFunction {
+class CERES_EXPORT TolerantLoss final : public LossFunction {
public:
explicit TolerantLoss(double a, double b);
void Evaluate(double, double*) const override;
@@ -283,7 +284,7 @@ class CERES_EXPORT TolerantLoss : public LossFunction {
// rho(s) = a^2 / 3 for s > a^2.
//
// At s = 0: rho = [0, 1, -2 / a^2]
-class CERES_EXPORT TukeyLoss : public ceres::LossFunction {
+class CERES_EXPORT TukeyLoss final : public ceres::LossFunction {
public:
explicit TukeyLoss(double a) : a_squared_(a * a) {}
void Evaluate(double, double*) const override;
@@ -294,14 +295,14 @@ class CERES_EXPORT TukeyLoss : public ceres::LossFunction {
// Composition of two loss functions. The error is the result of first
// evaluating g followed by f to yield the composition f(g(s)).
-// The loss functions must not be NULL.
-class CERES_EXPORT ComposedLoss : public LossFunction {
+// The loss functions must not be nullptr.
+class CERES_EXPORT ComposedLoss final : public LossFunction {
public:
explicit ComposedLoss(const LossFunction* f,
Ownership ownership_f,
const LossFunction* g,
Ownership ownership_g);
- virtual ~ComposedLoss();
+ ~ComposedLoss() override;
void Evaluate(double, double*) const override;
private:
@@ -322,11 +323,11 @@ class CERES_EXPORT ComposedLoss : public LossFunction {
// s -> a * rho'(s)
// s -> a * rho''(s)
//
-// Since we treat the a NULL Loss function as the Identity loss
-// function, rho = NULL is a valid input and will result in the input
+// Since we treat the a nullptr Loss function as the Identity loss
+// function, rho = nullptr is a valid input and will result in the input
// being scaled by a. This provides a simple way of implementing a
// scaled ResidualBlock.
-class CERES_EXPORT ScaledLoss : public LossFunction {
+class CERES_EXPORT ScaledLoss final : public LossFunction {
public:
// Constructs a ScaledLoss wrapping another loss function. Takes
// ownership of the wrapped loss function or not depending on the
@@ -336,7 +337,7 @@ class CERES_EXPORT ScaledLoss : public LossFunction {
ScaledLoss(const ScaledLoss&) = delete;
void operator=(const ScaledLoss&) = delete;
- virtual ~ScaledLoss() {
+ ~ScaledLoss() override {
if (ownership_ == DO_NOT_TAKE_OWNERSHIP) {
rho_.release();
}
@@ -361,8 +362,8 @@ class CERES_EXPORT ScaledLoss : public LossFunction {
// whose scale can be mutated after an optimization problem has been
// constructed.
//
-// Since we treat the a NULL Loss function as the Identity loss
-// function, rho = NULL is a valid input.
+// Since we treat the a nullptr Loss function as the Identity loss
+// function, rho = nullptr is a valid input.
//
// Example usage
//
@@ -374,7 +375,8 @@ class CERES_EXPORT ScaledLoss : public LossFunction {
// new AutoDiffCostFunction < UW_Camera_Mapper, 2, 9, 3>(
// new UW_Camera_Mapper(feature_x, feature_y));
//
-// LossFunctionWrapper* loss_function(new HuberLoss(1.0), TAKE_OWNERSHIP);
+// LossFunctionWrapper* loss_function = new LossFunctionWrapper(
+// new HuberLoss(1.0), TAKE_OWNERSHIP);
//
// problem.AddResidualBlock(cost_function, loss_function, parameters);
//
@@ -387,7 +389,7 @@ class CERES_EXPORT ScaledLoss : public LossFunction {
//
// Solve(options, &problem, &summary)
//
-class CERES_EXPORT LossFunctionWrapper : public LossFunction {
+class CERES_EXPORT LossFunctionWrapper final : public LossFunction {
public:
LossFunctionWrapper(LossFunction* rho, Ownership ownership)
: rho_(rho), ownership_(ownership) {}
@@ -395,14 +397,14 @@ class CERES_EXPORT LossFunctionWrapper : public LossFunction {
LossFunctionWrapper(const LossFunctionWrapper&) = delete;
void operator=(const LossFunctionWrapper&) = delete;
- virtual ~LossFunctionWrapper() {
+ ~LossFunctionWrapper() override {
if (ownership_ == DO_NOT_TAKE_OWNERSHIP) {
rho_.release();
}
}
void Evaluate(double sq_norm, double out[3]) const override {
- if (rho_.get() == NULL) {
+ if (rho_.get() == nullptr) {
out[0] = sq_norm;
out[1] = 1.0;
out[2] = 0.0;