Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'intern/libmv/libmv/simple_pipeline/detect.cc')
-rw-r--r--intern/libmv/libmv/simple_pipeline/detect.cc172
1 files changed, 91 insertions, 81 deletions
diff --git a/intern/libmv/libmv/simple_pipeline/detect.cc b/intern/libmv/libmv/simple_pipeline/detect.cc
index 46599a4c49e..0897c5598c6 100644
--- a/intern/libmv/libmv/simple_pipeline/detect.cc
+++ b/intern/libmv/libmv/simple_pipeline/detect.cc
@@ -22,14 +22,14 @@
**
****************************************************************************/
-#include <stdlib.h>
#include <memory.h>
+#include <stdlib.h>
#include <queue>
#include "libmv/base/scoped_ptr.h"
#include "libmv/image/array_nd.h"
-#include "libmv/image/image_converter.h"
#include "libmv/image/convolve.h"
+#include "libmv/image/image_converter.h"
#include "libmv/logging/logging.h"
#include "libmv/simple_pipeline/detect.h"
@@ -55,7 +55,7 @@ double kDefaultHarrisThreshold = 1e-5;
class FeatureComparison {
public:
- bool operator() (const Feature &left, const Feature &right) const {
+ bool operator()(const Feature& left, const Feature& right) const {
return right.score > left.score;
}
};
@@ -63,18 +63,17 @@ class FeatureComparison {
// Filter the features so there are no features closer than
// minimal distance to each other.
// This is a naive implementation with O(n^2) asymptotic.
-void FilterFeaturesByDistance(const vector<Feature> &all_features,
+void FilterFeaturesByDistance(const vector<Feature>& all_features,
int min_distance,
- vector<Feature> *detected_features) {
+ vector<Feature>* detected_features) {
const int min_distance_squared = min_distance * min_distance;
// Use priority queue to sort the features by their score.
//
// Do this on copy of the input features to prevent possible
// distortion in callee function behavior.
- std::priority_queue<Feature,
- std::vector<Feature>,
- FeatureComparison> priority_features;
+ std::priority_queue<Feature, std::vector<Feature>, FeatureComparison>
+ priority_features;
for (int i = 0; i < all_features.size(); i++) {
priority_features.push(all_features.at(i));
@@ -85,7 +84,7 @@ void FilterFeaturesByDistance(const vector<Feature> &all_features,
Feature a = priority_features.top();
for (int i = 0; i < detected_features->size(); i++) {
- Feature &b = detected_features->at(i);
+ Feature& b = detected_features->at(i);
if (Square(a.x - b.x) + Square(a.y - b.y) < min_distance_squared) {
ok = false;
break;
@@ -100,9 +99,9 @@ void FilterFeaturesByDistance(const vector<Feature> &all_features,
}
}
-void DetectFAST(const FloatImage &grayscale_image,
- const DetectOptions &options,
- vector<Feature> *detected_features) {
+void DetectFAST(const FloatImage& grayscale_image,
+ const DetectOptions& options,
+ vector<Feature>* detected_features) {
#ifndef LIBMV_NO_FAST_DETECTOR
const int min_distance = options.min_distance;
const int min_trackness = options.fast_min_trackness;
@@ -111,12 +110,14 @@ void DetectFAST(const FloatImage &grayscale_image,
const int height = grayscale_image.Width() - 2 * margin;
const int stride = grayscale_image.Width();
- scoped_array<unsigned char> byte_image(FloatImageToUCharArray(grayscale_image));
+ scoped_array<unsigned char> byte_image(
+ FloatImageToUCharArray(grayscale_image));
const int byte_image_offset = margin * stride + margin;
- // TODO(MatthiasF): Support targetting a feature count (binary search trackness)
+ // TODO(MatthiasF): Support targetting a feature count (binary search
+ // trackness)
int num_features;
- xy *all = fast9_detect(byte_image.get() + byte_image_offset,
+ xy* all = fast9_detect(byte_image.get() + byte_image_offset,
width,
height,
stride,
@@ -126,13 +127,13 @@ void DetectFAST(const FloatImage &grayscale_image,
free(all);
return;
}
- int *scores = fast9_score(byte_image.get() + byte_image_offset,
+ int* scores = fast9_score(byte_image.get() + byte_image_offset,
stride,
all,
num_features,
min_trackness);
// TODO(MatthiasF): merge with close feature suppression
- xy *nonmax = nonmax_suppression(all, scores, num_features, &num_features);
+ xy* nonmax = nonmax_suppression(all, scores, num_features, &num_features);
free(all);
// Remove too close features
// TODO(MatthiasF): A resolution independent parameter would be better than
@@ -152,89 +153,104 @@ void DetectFAST(const FloatImage &grayscale_image,
free(scores);
free(nonmax);
#else
- (void) grayscale_image; // Ignored.
- (void) options; // Ignored.
- (void) detected_features; // Ignored.
+ (void)grayscale_image; // Ignored.
+ (void)options; // Ignored.
+ (void)detected_features; // Ignored.
LOG(FATAL) << "FAST detector is disabled in this build.";
#endif
}
#ifdef __SSE2__
-static unsigned int SAD(const ubyte* imageA, const ubyte* imageB,
- int strideA, int strideB) {
+static unsigned int SAD(const ubyte* imageA,
+ const ubyte* imageB,
+ int strideA,
+ int strideB) {
__m128i a = _mm_setzero_si128();
for (int i = 0; i < 16; i++) {
- a = _mm_adds_epu16(a,
- _mm_sad_epu8(_mm_loadu_si128((__m128i*)(imageA+i*strideA)),
- _mm_loadu_si128((__m128i*)(imageB+i*strideB))));
+ a = _mm_adds_epu16(
+ a,
+ _mm_sad_epu8(_mm_loadu_si128((__m128i*)(imageA + i * strideA)),
+ _mm_loadu_si128((__m128i*)(imageB + i * strideB))));
}
return _mm_extract_epi16(a, 0) + _mm_extract_epi16(a, 4);
}
#else
-static unsigned int SAD(const ubyte* imageA, const ubyte* imageB,
- int strideA, int strideB) {
+static unsigned int SAD(const ubyte* imageA,
+ const ubyte* imageB,
+ int strideA,
+ int strideB) {
unsigned int sad = 0;
for (int i = 0; i < 16; i++) {
for (int j = 0; j < 16; j++) {
- sad += abs((int)imageA[i*strideA+j] - imageB[i*strideB+j]);
+ sad += abs((int)imageA[i * strideA + j] - imageB[i * strideB + j]);
}
}
return sad;
}
#endif
-void DetectMORAVEC(const FloatImage &grayscale_image,
- const DetectOptions &options,
- vector<Feature> *detected_features) {
+void DetectMORAVEC(const FloatImage& grayscale_image,
+ const DetectOptions& options,
+ vector<Feature>* detected_features) {
const int distance = options.min_distance;
const int margin = options.margin;
- const unsigned char *pattern = options.moravec_pattern;
+ const unsigned char* pattern = options.moravec_pattern;
const int count = options.moravec_max_count;
const int width = grayscale_image.Width() - 2 * margin;
const int height = grayscale_image.Width() - 2 * margin;
const int stride = grayscale_image.Width();
- scoped_array<unsigned char> byte_image(FloatImageToUCharArray(grayscale_image));
+ scoped_array<unsigned char> byte_image(
+ FloatImageToUCharArray(grayscale_image));
unsigned short histogram[256];
memset(histogram, 0, sizeof(histogram));
- scoped_array<ubyte> scores(new ubyte[width*height]);
- memset(scores.get(), 0, width*height);
+ scoped_array<ubyte> scores(new ubyte[width * height]);
+ memset(scores.get(), 0, width * height);
const int r = 1; // radius for self similarity comparison
- for (int y = distance; y < height-distance; y++) {
- for (int x = distance; x < width-distance; x++) {
- const ubyte* s = &byte_image[y*stride+x];
- int score = // low self-similarity with overlapping patterns
- // OPTI: load pattern once
+ for (int y = distance; y < height - distance; y++) {
+ for (int x = distance; x < width - distance; x++) {
+ const ubyte* s = &byte_image[y * stride + x];
+ // low self-similarity with overlapping patterns
+ // OPTI: load pattern once
+ // clang-format off
+ int score =
SAD(s, s-r*stride-r, stride, stride)+SAD(s, s-r*stride, stride, stride)+SAD(s, s-r*stride+r, stride, stride)+
SAD(s, s -r, stride, stride)+ SAD(s, s +r, stride, stride)+
SAD(s, s+r*stride-r, stride, stride)+SAD(s, s+r*stride, stride, stride)+SAD(s, s+r*stride+r, stride, stride);
+ // clang-format on
+
score /= 256; // normalize
- if (pattern) // find only features similar to pattern
+ if (pattern) // find only features similar to pattern
score -= SAD(s, pattern, stride, 16);
- if (score <= 16) continue; // filter very self-similar features
+ if (score <= 16)
+ continue; // filter very self-similar features
score -= 16; // translate to score/histogram values
- if (score>255) score=255; // clip
- ubyte* c = &scores[y*width+x];
+ if (score > 255)
+ score = 255; // clip
+ ubyte* c = &scores[y * width + x];
for (int i = -distance; i < 0; i++) {
for (int j = -distance; j < distance; j++) {
- int s = c[i*width+j];
- if (s == 0) continue;
- if (s >= score) goto nonmax;
- c[i*width+j] = 0;
+ int s = c[i * width + j];
+ if (s == 0)
+ continue;
+ if (s >= score)
+ goto nonmax;
+ c[i * width + j] = 0;
histogram[s]--;
}
}
for (int i = 0, j = -distance; j < 0; j++) {
- int s = c[i*width+j];
- if (s == 0) continue;
- if (s >= score) goto nonmax;
- c[i*width+j] = 0;
+ int s = c[i * width + j];
+ if (s == 0)
+ continue;
+ if (s >= score)
+ goto nonmax;
+ c[i * width + j] = 0;
histogram[s]--;
}
c[0] = score, histogram[score]++;
- nonmax:
- { } // Do nothing.
+ nonmax : {} // Do nothing.
}
}
int min = 255, total = 0;
@@ -254,18 +270,16 @@ void DetectMORAVEC(const FloatImage &grayscale_image,
// Score calculation above uses top left corner of the
// patch as the origin, here we need to convert this value
// to a pattrn center by adding 8 pixels.
- detected_features->push_back(Feature((float) x + 8.0f,
- (float) y + 8.0f,
- (float) s,
- 16.0f));
+ detected_features->push_back(
+ Feature((float)x + 8.0f, (float)y + 8.0f, (float)s, 16.0f));
}
}
}
}
-void DetectHarris(const FloatImage &grayscale_image,
- const DetectOptions &options,
- vector<Feature> *detected_features) {
+void DetectHarris(const FloatImage& grayscale_image,
+ const DetectOptions& options,
+ vector<Feature>* detected_features) {
const double alpha = 0.06;
const double sigma = 0.9;
@@ -281,9 +295,7 @@ void DetectHarris(const FloatImage &grayscale_image,
MultiplyElements(gradient_y, gradient_y, &gradient_yy);
MultiplyElements(gradient_x, gradient_y, &gradient_xy);
- FloatImage gradient_xx_blurred,
- gradient_yy_blurred,
- gradient_xy_blurred;
+ FloatImage gradient_xx_blurred, gradient_yy_blurred, gradient_xy_blurred;
ConvolveGaussian(gradient_xx, sigma, &gradient_xx_blurred);
ConvolveGaussian(gradient_yy, sigma, &gradient_yy_blurred);
ConvolveGaussian(gradient_xy, sigma, &gradient_xy_blurred);
@@ -304,10 +316,8 @@ void DetectHarris(const FloatImage &grayscale_image,
double traceA = A.trace();
double harris_function = detA - alpha * traceA * traceA;
if (harris_function > threshold) {
- all_features.push_back(Feature((float) x,
- (float) y,
- (float) harris_function,
- 5.0f));
+ all_features.push_back(
+ Feature((float)x, (float)y, (float)harris_function, 5.0f));
}
}
}
@@ -318,17 +328,18 @@ void DetectHarris(const FloatImage &grayscale_image,
} // namespace
DetectOptions::DetectOptions()
- : type(DetectOptions::HARRIS),
- margin(0),
- min_distance(120),
- fast_min_trackness(kDefaultFastMinTrackness),
- moravec_max_count(0),
- moravec_pattern(NULL),
- harris_threshold(kDefaultHarrisThreshold) {}
-
-void Detect(const FloatImage &image,
- const DetectOptions &options,
- vector<Feature> *detected_features) {
+ : type(DetectOptions::HARRIS),
+ margin(0),
+ min_distance(120),
+ fast_min_trackness(kDefaultFastMinTrackness),
+ moravec_max_count(0),
+ moravec_pattern(NULL),
+ harris_threshold(kDefaultHarrisThreshold) {
+}
+
+void Detect(const FloatImage& image,
+ const DetectOptions& options,
+ vector<Feature>* detected_features) {
// Currently all the detectors requires image to be grayscale.
// Do it here to avoid code duplication.
FloatImage grayscale_image;
@@ -350,8 +361,7 @@ void Detect(const FloatImage &image,
}
}
-std::ostream& operator <<(std::ostream &os,
- const Feature &feature) {
+std::ostream& operator<<(std::ostream& os, const Feature& feature) {
os << "x: " << feature.x << ", y: " << feature.y;
os << ", score: " << feature.score;
os << ", size: " << feature.size;