Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'extern/ceres/internal/ceres/compressed_row_sparse_matrix.cc')
-rw-r--r--extern/ceres/internal/ceres/compressed_row_sparse_matrix.cc682
1 files changed, 424 insertions, 258 deletions
diff --git a/extern/ceres/internal/ceres/compressed_row_sparse_matrix.cc b/extern/ceres/internal/ceres/compressed_row_sparse_matrix.cc
index 91d18bbd604..e56de16bf92 100644
--- a/extern/ceres/internal/ceres/compressed_row_sparse_matrix.cc
+++ b/extern/ceres/internal/ceres/compressed_row_sparse_matrix.cc
@@ -1,5 +1,5 @@
// Ceres Solver - A fast non-linear least squares minimizer
-// Copyright 2015 Google Inc. All rights reserved.
+// Copyright 2017 Google Inc. All rights reserved.
// http://ceres-solver.org/
//
// Redistribution and use in source and binary forms, with or without
@@ -35,6 +35,7 @@
#include <vector>
#include "ceres/crs_matrix.h"
#include "ceres/internal/port.h"
+#include "ceres/random.h"
#include "ceres/triplet_sparse_matrix.h"
#include "glog/logging.h"
@@ -54,9 +55,7 @@ namespace {
//
// If this is the case, this functor will not be a StrictWeakOrdering.
struct RowColLessThan {
- RowColLessThan(const int* rows, const int* cols)
- : rows(rows), cols(cols) {
- }
+ RowColLessThan(const int* rows, const int* cols) : rows(rows), cols(cols) {}
bool operator()(const int x, const int y) const {
if (rows[x] == rows[y]) {
@@ -69,6 +68,91 @@ struct RowColLessThan {
const int* cols;
};
+void TransposeForCompressedRowSparseStructure(const int num_rows,
+ const int num_cols,
+ const int num_nonzeros,
+ const int* rows,
+ const int* cols,
+ const double* values,
+ int* transpose_rows,
+ int* transpose_cols,
+ double* transpose_values) {
+ // Explicitly zero out transpose_rows.
+ std::fill(transpose_rows, transpose_rows + num_cols + 1, 0);
+
+ // Count the number of entries in each column of the original matrix
+ // and assign to transpose_rows[col + 1].
+ for (int idx = 0; idx < num_nonzeros; ++idx) {
+ ++transpose_rows[cols[idx] + 1];
+ }
+
+ // Compute the starting position for each row in the transpose by
+ // computing the cumulative sum of the entries of transpose_rows.
+ for (int i = 1; i < num_cols + 1; ++i) {
+ transpose_rows[i] += transpose_rows[i - 1];
+ }
+
+ // Populate transpose_cols and (optionally) transpose_values by
+ // walking the entries of the source matrices. For each entry that
+ // is added, the value of transpose_row is incremented allowing us
+ // to keep track of where the next entry for that row should go.
+ //
+ // As a result transpose_row is shifted to the left by one entry.
+ for (int r = 0; r < num_rows; ++r) {
+ for (int idx = rows[r]; idx < rows[r + 1]; ++idx) {
+ const int c = cols[idx];
+ const int transpose_idx = transpose_rows[c]++;
+ transpose_cols[transpose_idx] = r;
+ if (values != NULL && transpose_values != NULL) {
+ transpose_values[transpose_idx] = values[idx];
+ }
+ }
+ }
+
+ // This loop undoes the left shift to transpose_rows introduced by
+ // the previous loop.
+ for (int i = num_cols - 1; i > 0; --i) {
+ transpose_rows[i] = transpose_rows[i - 1];
+ }
+ transpose_rows[0] = 0;
+}
+
+void AddRandomBlock(const int num_rows,
+ const int num_cols,
+ const int row_block_begin,
+ const int col_block_begin,
+ std::vector<int>* rows,
+ std::vector<int>* cols,
+ std::vector<double>* values) {
+ for (int r = 0; r < num_rows; ++r) {
+ for (int c = 0; c < num_cols; ++c) {
+ rows->push_back(row_block_begin + r);
+ cols->push_back(col_block_begin + c);
+ values->push_back(RandNormal());
+ }
+ }
+}
+
+void AddSymmetricRandomBlock(const int num_rows,
+ const int row_block_begin,
+ std::vector<int>* rows,
+ std::vector<int>* cols,
+ std::vector<double>* values) {
+ for (int r = 0; r < num_rows; ++r) {
+ for (int c = r; c < num_rows; ++c) {
+ const double v = RandNormal();
+ rows->push_back(row_block_begin + r);
+ cols->push_back(row_block_begin + c);
+ values->push_back(v);
+ if (r != c) {
+ rows->push_back(row_block_begin + c);
+ cols->push_back(row_block_begin + r);
+ values->push_back(v);
+ }
+ }
+ }
+}
+
} // namespace
// This constructor gives you a semi-initialized CompressedRowSparseMatrix.
@@ -77,69 +161,96 @@ CompressedRowSparseMatrix::CompressedRowSparseMatrix(int num_rows,
int max_num_nonzeros) {
num_rows_ = num_rows;
num_cols_ = num_cols;
+ storage_type_ = UNSYMMETRIC;
rows_.resize(num_rows + 1, 0);
cols_.resize(max_num_nonzeros, 0);
values_.resize(max_num_nonzeros, 0.0);
+ VLOG(1) << "# of rows: " << num_rows_ << " # of columns: " << num_cols_
+ << " max_num_nonzeros: " << cols_.size() << ". Allocating "
+ << (num_rows_ + 1) * sizeof(int) + // NOLINT
+ cols_.size() * sizeof(int) + // NOLINT
+ cols_.size() * sizeof(double); // NOLINT
+}
+
+CompressedRowSparseMatrix* CompressedRowSparseMatrix::FromTripletSparseMatrix(
+ const TripletSparseMatrix& input) {
+ return CompressedRowSparseMatrix::FromTripletSparseMatrix(input, false);
+}
- VLOG(1) << "# of rows: " << num_rows_
- << " # of columns: " << num_cols_
- << " max_num_nonzeros: " << cols_.size()
- << ". Allocating " << (num_rows_ + 1) * sizeof(int) + // NOLINT
- cols_.size() * sizeof(int) + // NOLINT
- cols_.size() * sizeof(double); // NOLINT
+CompressedRowSparseMatrix*
+CompressedRowSparseMatrix::FromTripletSparseMatrixTransposed(
+ const TripletSparseMatrix& input) {
+ return CompressedRowSparseMatrix::FromTripletSparseMatrix(input, true);
}
-CompressedRowSparseMatrix::CompressedRowSparseMatrix(
- const TripletSparseMatrix& m) {
- num_rows_ = m.num_rows();
- num_cols_ = m.num_cols();
+CompressedRowSparseMatrix* CompressedRowSparseMatrix::FromTripletSparseMatrix(
+ const TripletSparseMatrix& input, bool transpose) {
+ int num_rows = input.num_rows();
+ int num_cols = input.num_cols();
+ const int* rows = input.rows();
+ const int* cols = input.cols();
+ const double* values = input.values();
- rows_.resize(num_rows_ + 1, 0);
- cols_.resize(m.num_nonzeros(), 0);
- values_.resize(m.max_num_nonzeros(), 0.0);
+ if (transpose) {
+ std::swap(num_rows, num_cols);
+ std::swap(rows, cols);
+ }
- // index is the list of indices into the TripletSparseMatrix m.
- vector<int> index(m.num_nonzeros(), 0);
- for (int i = 0; i < m.num_nonzeros(); ++i) {
+ // index is the list of indices into the TripletSparseMatrix input.
+ vector<int> index(input.num_nonzeros(), 0);
+ for (int i = 0; i < input.num_nonzeros(); ++i) {
index[i] = i;
}
// Sort index such that the entries of m are ordered by row and ties
// are broken by column.
- sort(index.begin(), index.end(), RowColLessThan(m.rows(), m.cols()));
+ std::sort(index.begin(), index.end(), RowColLessThan(rows, cols));
+
+ VLOG(1) << "# of rows: " << num_rows << " # of columns: " << num_cols
+ << " num_nonzeros: " << input.num_nonzeros() << ". Allocating "
+ << ((num_rows + 1) * sizeof(int) + // NOLINT
+ input.num_nonzeros() * sizeof(int) + // NOLINT
+ input.num_nonzeros() * sizeof(double)); // NOLINT
+
+ CompressedRowSparseMatrix* output =
+ new CompressedRowSparseMatrix(num_rows, num_cols, input.num_nonzeros());
- VLOG(1) << "# of rows: " << num_rows_
- << " # of columns: " << num_cols_
- << " max_num_nonzeros: " << cols_.size()
- << ". Allocating "
- << ((num_rows_ + 1) * sizeof(int) + // NOLINT
- cols_.size() * sizeof(int) + // NOLINT
- cols_.size() * sizeof(double)); // NOLINT
+ if (num_rows == 0) {
+ // No data to copy.
+ return output;
+ }
// Copy the contents of the cols and values array in the order given
// by index and count the number of entries in each row.
- for (int i = 0; i < m.num_nonzeros(); ++i) {
+ int* output_rows = output->mutable_rows();
+ int* output_cols = output->mutable_cols();
+ double* output_values = output->mutable_values();
+
+ output_rows[0] = 0;
+ for (int i = 0; i < index.size(); ++i) {
const int idx = index[i];
- ++rows_[m.rows()[idx] + 1];
- cols_[i] = m.cols()[idx];
- values_[i] = m.values()[idx];
+ ++output_rows[rows[idx] + 1];
+ output_cols[i] = cols[idx];
+ output_values[i] = values[idx];
}
// Find the cumulative sum of the row counts.
- for (int i = 1; i < num_rows_ + 1; ++i) {
- rows_[i] += rows_[i - 1];
+ for (int i = 1; i < num_rows + 1; ++i) {
+ output_rows[i] += output_rows[i - 1];
}
- CHECK_EQ(num_nonzeros(), m.num_nonzeros());
+ CHECK_EQ(output->num_nonzeros(), input.num_nonzeros());
+ return output;
}
CompressedRowSparseMatrix::CompressedRowSparseMatrix(const double* diagonal,
int num_rows) {
- CHECK_NOTNULL(diagonal);
+ CHECK(diagonal != nullptr);
num_rows_ = num_rows;
num_cols_ = num_rows;
+ storage_type_ = UNSYMMETRIC;
rows_.resize(num_rows + 1);
cols_.resize(num_rows);
values_.resize(num_rows);
@@ -154,47 +265,150 @@ CompressedRowSparseMatrix::CompressedRowSparseMatrix(const double* diagonal,
CHECK_EQ(num_nonzeros(), num_rows);
}
-CompressedRowSparseMatrix::~CompressedRowSparseMatrix() {
-}
+CompressedRowSparseMatrix::~CompressedRowSparseMatrix() {}
void CompressedRowSparseMatrix::SetZero() {
std::fill(values_.begin(), values_.end(), 0);
}
+// TODO(sameeragarwal): Make RightMultiply and LeftMultiply
+// block-aware for higher performance.
void CompressedRowSparseMatrix::RightMultiply(const double* x,
double* y) const {
- CHECK_NOTNULL(x);
- CHECK_NOTNULL(y);
+ CHECK(x != nullptr);
+ CHECK(y != nullptr);
+
+ if (storage_type_ == UNSYMMETRIC) {
+ for (int r = 0; r < num_rows_; ++r) {
+ for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
+ const int c = cols_[idx];
+ const double v = values_[idx];
+ y[r] += v * x[c];
+ }
+ }
+ } else if (storage_type_ == UPPER_TRIANGULAR) {
+ // Because of their block structure, we will have entries that lie
+ // above (below) the diagonal for lower (upper) triangular matrices,
+ // so the loops below need to account for this.
+ for (int r = 0; r < num_rows_; ++r) {
+ int idx = rows_[r];
+ const int idx_end = rows_[r + 1];
+
+ // For upper triangular matrices r <= c, so skip entries with r
+ // > c.
+ while (idx < idx_end && r > cols_[idx]) {
+ ++idx;
+ }
- for (int r = 0; r < num_rows_; ++r) {
- for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
- y[r] += values_[idx] * x[cols_[idx]];
+ for (; idx < idx_end; ++idx) {
+ const int c = cols_[idx];
+ const double v = values_[idx];
+ y[r] += v * x[c];
+ // Since we are only iterating over the upper triangular part
+ // of the matrix, add contributions for the strictly lower
+ // triangular part.
+ if (r != c) {
+ y[c] += v * x[r];
+ }
+ }
+ }
+ } else if (storage_type_ == LOWER_TRIANGULAR) {
+ for (int r = 0; r < num_rows_; ++r) {
+ int idx = rows_[r];
+ const int idx_end = rows_[r + 1];
+ // For lower triangular matrices, we only iterate till we are r >=
+ // c.
+ for (; idx < idx_end && r >= cols_[idx]; ++idx) {
+ const int c = cols_[idx];
+ const double v = values_[idx];
+ y[r] += v * x[c];
+ // Since we are only iterating over the lower triangular part
+ // of the matrix, add contributions for the strictly upper
+ // triangular part.
+ if (r != c) {
+ y[c] += v * x[r];
+ }
+ }
}
+ } else {
+ LOG(FATAL) << "Unknown storage type: " << storage_type_;
}
}
void CompressedRowSparseMatrix::LeftMultiply(const double* x, double* y) const {
- CHECK_NOTNULL(x);
- CHECK_NOTNULL(y);
+ CHECK(x != nullptr);
+ CHECK(y != nullptr);
- for (int r = 0; r < num_rows_; ++r) {
- for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
- y[cols_[idx]] += values_[idx] * x[r];
+ if (storage_type_ == UNSYMMETRIC) {
+ for (int r = 0; r < num_rows_; ++r) {
+ for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
+ y[cols_[idx]] += values_[idx] * x[r];
+ }
}
+ } else {
+ // Since the matrix is symmetric, LeftMultiply = RightMultiply.
+ RightMultiply(x, y);
}
}
void CompressedRowSparseMatrix::SquaredColumnNorm(double* x) const {
- CHECK_NOTNULL(x);
+ CHECK(x != nullptr);
std::fill(x, x + num_cols_, 0.0);
- for (int idx = 0; idx < rows_[num_rows_]; ++idx) {
- x[cols_[idx]] += values_[idx] * values_[idx];
+ if (storage_type_ == UNSYMMETRIC) {
+ for (int idx = 0; idx < rows_[num_rows_]; ++idx) {
+ x[cols_[idx]] += values_[idx] * values_[idx];
+ }
+ } else if (storage_type_ == UPPER_TRIANGULAR) {
+ // Because of their block structure, we will have entries that lie
+ // above (below) the diagonal for lower (upper) triangular
+ // matrices, so the loops below need to account for this.
+ for (int r = 0; r < num_rows_; ++r) {
+ int idx = rows_[r];
+ const int idx_end = rows_[r + 1];
+
+ // For upper triangular matrices r <= c, so skip entries with r
+ // > c.
+ while (idx < idx_end && r > cols_[idx]) {
+ ++idx;
+ }
+
+ for (; idx < idx_end; ++idx) {
+ const int c = cols_[idx];
+ const double v2 = values_[idx] * values_[idx];
+ x[c] += v2;
+ // Since we are only iterating over the upper triangular part
+ // of the matrix, add contributions for the strictly lower
+ // triangular part.
+ if (r != c) {
+ x[r] += v2;
+ }
+ }
+ }
+ } else if (storage_type_ == LOWER_TRIANGULAR) {
+ for (int r = 0; r < num_rows_; ++r) {
+ int idx = rows_[r];
+ const int idx_end = rows_[r + 1];
+ // For lower triangular matrices, we only iterate till we are r >=
+ // c.
+ for (; idx < idx_end && r >= cols_[idx]; ++idx) {
+ const int c = cols_[idx];
+ const double v2 = values_[idx] * values_[idx];
+ x[c] += v2;
+ // Since we are only iterating over the lower triangular part
+ // of the matrix, add contributions for the strictly upper
+ // triangular part.
+ if (r != c) {
+ x[r] += v2;
+ }
+ }
+ }
+ } else {
+ LOG(FATAL) << "Unknown storage type: " << storage_type_;
}
}
-
void CompressedRowSparseMatrix::ScaleColumns(const double* scale) {
- CHECK_NOTNULL(scale);
+ CHECK(scale != nullptr);
for (int idx = 0; idx < rows_[num_rows_]; ++idx) {
values_[idx] *= scale[cols_[idx]];
@@ -202,7 +416,7 @@ void CompressedRowSparseMatrix::ScaleColumns(const double* scale) {
}
void CompressedRowSparseMatrix::ToDenseMatrix(Matrix* dense_matrix) const {
- CHECK_NOTNULL(dense_matrix);
+ CHECK(dense_matrix != nullptr);
dense_matrix->resize(num_rows_, num_cols_);
dense_matrix->setZero();
@@ -216,10 +430,17 @@ void CompressedRowSparseMatrix::ToDenseMatrix(Matrix* dense_matrix) const {
void CompressedRowSparseMatrix::DeleteRows(int delta_rows) {
CHECK_GE(delta_rows, 0);
CHECK_LE(delta_rows, num_rows_);
+ CHECK_EQ(storage_type_, UNSYMMETRIC);
num_rows_ -= delta_rows;
rows_.resize(num_rows_ + 1);
+ // The rest of the code updates the block information. Immediately
+ // return in case of no block information.
+ if (row_blocks_.empty()) {
+ return;
+ }
+
// Walk the list of row blocks until we reach the new number of rows
// and the drop the rest of the row blocks.
int num_row_blocks = 0;
@@ -233,9 +454,11 @@ void CompressedRowSparseMatrix::DeleteRows(int delta_rows) {
}
void CompressedRowSparseMatrix::AppendRows(const CompressedRowSparseMatrix& m) {
+ CHECK_EQ(storage_type_, UNSYMMETRIC);
CHECK_EQ(m.num_cols(), num_cols_);
- CHECK(row_blocks_.size() == 0 || m.row_blocks().size() !=0)
+ CHECK((row_blocks_.empty() && m.row_blocks().empty()) ||
+ (!row_blocks_.empty() && !m.row_blocks().empty()))
<< "Cannot append a matrix with row blocks to one without and vice versa."
<< "This matrix has : " << row_blocks_.size() << " row blocks."
<< "The matrix being appended has: " << m.row_blocks().size()
@@ -254,9 +477,8 @@ void CompressedRowSparseMatrix::AppendRows(const CompressedRowSparseMatrix& m) {
DCHECK_LT(num_nonzeros(), cols_.size());
if (m.num_nonzeros() > 0) {
std::copy(m.cols(), m.cols() + m.num_nonzeros(), &cols_[num_nonzeros()]);
- std::copy(m.values(),
- m.values() + m.num_nonzeros(),
- &values_[num_nonzeros()]);
+ std::copy(
+ m.values(), m.values() + m.num_nonzeros(), &values_[num_nonzeros()]);
}
rows_.resize(num_rows_ + m.num_rows() + 1);
@@ -270,20 +492,22 @@ void CompressedRowSparseMatrix::AppendRows(const CompressedRowSparseMatrix& m) {
}
num_rows_ += m.num_rows();
- row_blocks_.insert(row_blocks_.end(),
- m.row_blocks().begin(),
- m.row_blocks().end());
+
+ // The rest of the code updates the block information. Immediately
+ // return in case of no block information.
+ if (row_blocks_.empty()) {
+ return;
+ }
+
+ row_blocks_.insert(
+ row_blocks_.end(), m.row_blocks().begin(), m.row_blocks().end());
}
void CompressedRowSparseMatrix::ToTextFile(FILE* file) const {
- CHECK_NOTNULL(file);
+ CHECK(file != nullptr);
for (int r = 0; r < num_rows_; ++r) {
for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
- fprintf(file,
- "% 10d % 10d %17f\n",
- r,
- cols_[idx],
- values_[idx]);
+ fprintf(file, "% 10d % 10d %17f\n", r, cols_[idx], values_[idx]);
}
}
}
@@ -308,29 +532,8 @@ void CompressedRowSparseMatrix::SetMaxNumNonZeros(int num_nonzeros) {
values_.resize(num_nonzeros);
}
-void CompressedRowSparseMatrix::SolveLowerTriangularInPlace(
- double* solution) const {
- for (int r = 0; r < num_rows_; ++r) {
- for (int idx = rows_[r]; idx < rows_[r + 1] - 1; ++idx) {
- solution[r] -= values_[idx] * solution[cols_[idx]];
- }
- solution[r] /= values_[rows_[r + 1] - 1];
- }
-}
-
-void CompressedRowSparseMatrix::SolveLowerTriangularTransposeInPlace(
- double* solution) const {
- for (int r = num_rows_ - 1; r >= 0; --r) {
- solution[r] /= values_[rows_[r + 1] - 1];
- for (int idx = rows_[r + 1] - 2; idx >= rows_[r]; --idx) {
- solution[cols_[idx]] -= values_[idx] * solution[r];
- }
- }
-}
-
CompressedRowSparseMatrix* CompressedRowSparseMatrix::CreateBlockDiagonalMatrix(
- const double* diagonal,
- const vector<int>& blocks) {
+ const double* diagonal, const vector<int>& blocks) {
int num_rows = 0;
int num_nonzeros = 0;
for (int i = 0; i < blocks.size(); ++i) {
@@ -373,189 +576,152 @@ CompressedRowSparseMatrix* CompressedRowSparseMatrix::Transpose() const {
CompressedRowSparseMatrix* transpose =
new CompressedRowSparseMatrix(num_cols_, num_rows_, num_nonzeros());
- int* transpose_rows = transpose->mutable_rows();
- int* transpose_cols = transpose->mutable_cols();
- double* transpose_values = transpose->mutable_values();
-
- for (int idx = 0; idx < num_nonzeros(); ++idx) {
- ++transpose_rows[cols_[idx] + 1];
- }
-
- for (int i = 1; i < transpose->num_rows() + 1; ++i) {
- transpose_rows[i] += transpose_rows[i - 1];
- }
-
- for (int r = 0; r < num_rows(); ++r) {
- for (int idx = rows_[r]; idx < rows_[r + 1]; ++idx) {
- const int c = cols_[idx];
- const int transpose_idx = transpose_rows[c]++;
- transpose_cols[transpose_idx] = r;
- transpose_values[transpose_idx] = values_[idx];
- }
- }
-
- for (int i = transpose->num_rows() - 1; i > 0 ; --i) {
- transpose_rows[i] = transpose_rows[i - 1];
+ switch (storage_type_) {
+ case UNSYMMETRIC:
+ transpose->set_storage_type(UNSYMMETRIC);
+ break;
+ case LOWER_TRIANGULAR:
+ transpose->set_storage_type(UPPER_TRIANGULAR);
+ break;
+ case UPPER_TRIANGULAR:
+ transpose->set_storage_type(LOWER_TRIANGULAR);
+ break;
+ default:
+ LOG(FATAL) << "Unknown storage type: " << storage_type_;
+ };
+
+ TransposeForCompressedRowSparseStructure(num_rows(),
+ num_cols(),
+ num_nonzeros(),
+ rows(),
+ cols(),
+ values(),
+ transpose->mutable_rows(),
+ transpose->mutable_cols(),
+ transpose->mutable_values());
+
+ // The rest of the code updates the block information. Immediately
+ // return in case of no block information.
+ if (row_blocks_.empty()) {
+ return transpose;
}
- transpose_rows[0] = 0;
*(transpose->mutable_row_blocks()) = col_blocks_;
*(transpose->mutable_col_blocks()) = row_blocks_;
-
return transpose;
}
-namespace {
-// A ProductTerm is a term in the outer product of a matrix with
-// itself.
-struct ProductTerm {
- ProductTerm(const int row, const int col, const int index)
- : row(row), col(col), index(index) {
- }
-
- bool operator<(const ProductTerm& right) const {
- if (row == right.row) {
- if (col == right.col) {
- return index < right.index;
- }
- return col < right.col;
- }
- return row < right.row;
- }
-
- int row;
- int col;
- int index;
-};
-
-CompressedRowSparseMatrix*
-CompressAndFillProgram(const int num_rows,
- const int num_cols,
- const vector<ProductTerm>& product,
- vector<int>* program) {
- CHECK_GT(product.size(), 0);
-
- // Count the number of unique product term, which in turn is the
- // number of non-zeros in the outer product.
- int num_nonzeros = 1;
- for (int i = 1; i < product.size(); ++i) {
- if (product[i].row != product[i - 1].row ||
- product[i].col != product[i - 1].col) {
- ++num_nonzeros;
- }
- }
-
- CompressedRowSparseMatrix* matrix =
- new CompressedRowSparseMatrix(num_rows, num_cols, num_nonzeros);
-
- int* crsm_rows = matrix->mutable_rows();
- std::fill(crsm_rows, crsm_rows + num_rows + 1, 0);
- int* crsm_cols = matrix->mutable_cols();
- std::fill(crsm_cols, crsm_cols + num_nonzeros, 0);
-
- CHECK_NOTNULL(program)->clear();
- program->resize(product.size());
-
- // Iterate over the sorted product terms. This means each row is
- // filled one at a time, and we are able to assign a position in the
- // values array to each term.
- //
- // If terms repeat, i.e., they contribute to the same entry in the
- // result matrix), then they do not affect the sparsity structure of
- // the result matrix.
- int nnz = 0;
- crsm_cols[0] = product[0].col;
- crsm_rows[product[0].row + 1]++;
- (*program)[product[0].index] = nnz;
- for (int i = 1; i < product.size(); ++i) {
- const ProductTerm& previous = product[i - 1];
- const ProductTerm& current = product[i];
-
- // Sparsity structure is updated only if the term is not a repeat.
- if (previous.row != current.row || previous.col != current.col) {
- crsm_cols[++nnz] = current.col;
- crsm_rows[current.row + 1]++;
- }
-
- // All terms get assigned the position in the values array where
- // their value is accumulated.
- (*program)[current.index] = nnz;
- }
-
- for (int i = 1; i < num_rows + 1; ++i) {
- crsm_rows[i] += crsm_rows[i - 1];
- }
-
- return matrix;
-}
-
-} // namespace
-
-CompressedRowSparseMatrix*
-CompressedRowSparseMatrix::CreateOuterProductMatrixAndProgram(
- const CompressedRowSparseMatrix& m,
- vector<int>* program) {
- CHECK_NOTNULL(program)->clear();
- CHECK_GT(m.num_nonzeros(), 0)
- << "Congratulations, "
- << "you found a bug in Ceres. Please report it.";
-
- vector<ProductTerm> product;
- const vector<int>& row_blocks = m.row_blocks();
- int row_block_begin = 0;
- // Iterate over row blocks
- for (int row_block = 0; row_block < row_blocks.size(); ++row_block) {
- const int row_block_end = row_block_begin + row_blocks[row_block];
- // Compute the outer product terms for just one row per row block.
- const int r = row_block_begin;
- // Compute the lower triangular part of the product.
- for (int idx1 = m.rows()[r]; idx1 < m.rows()[r + 1]; ++idx1) {
- for (int idx2 = m.rows()[r]; idx2 <= idx1; ++idx2) {
- product.push_back(ProductTerm(m.cols()[idx1],
- m.cols()[idx2],
- product.size()));
- }
+CompressedRowSparseMatrix* CompressedRowSparseMatrix::CreateRandomMatrix(
+ CompressedRowSparseMatrix::RandomMatrixOptions options) {
+ CHECK_GT(options.num_row_blocks, 0);
+ CHECK_GT(options.min_row_block_size, 0);
+ CHECK_GT(options.max_row_block_size, 0);
+ CHECK_LE(options.min_row_block_size, options.max_row_block_size);
+
+ if (options.storage_type == UNSYMMETRIC) {
+ CHECK_GT(options.num_col_blocks, 0);
+ CHECK_GT(options.min_col_block_size, 0);
+ CHECK_GT(options.max_col_block_size, 0);
+ CHECK_LE(options.min_col_block_size, options.max_col_block_size);
+ } else {
+ // Symmetric matrices (LOWER_TRIANGULAR or UPPER_TRIANGULAR);
+ options.num_col_blocks = options.num_row_blocks;
+ options.min_col_block_size = options.min_row_block_size;
+ options.max_col_block_size = options.max_row_block_size;
+ }
+
+ CHECK_GT(options.block_density, 0.0);
+ CHECK_LE(options.block_density, 1.0);
+
+ vector<int> row_blocks;
+ vector<int> col_blocks;
+
+ // Generate the row block structure.
+ for (int i = 0; i < options.num_row_blocks; ++i) {
+ // Generate a random integer in [min_row_block_size, max_row_block_size]
+ const int delta_block_size =
+ Uniform(options.max_row_block_size - options.min_row_block_size);
+ row_blocks.push_back(options.min_row_block_size + delta_block_size);
+ }
+
+ if (options.storage_type == UNSYMMETRIC) {
+ // Generate the col block structure.
+ for (int i = 0; i < options.num_col_blocks; ++i) {
+ // Generate a random integer in [min_col_block_size, max_col_block_size]
+ const int delta_block_size =
+ Uniform(options.max_col_block_size - options.min_col_block_size);
+ col_blocks.push_back(options.min_col_block_size + delta_block_size);
}
- row_block_begin = row_block_end;
- }
- CHECK_EQ(row_block_begin, m.num_rows());
- sort(product.begin(), product.end());
- return CompressAndFillProgram(m.num_cols(), m.num_cols(), product, program);
-}
+ } else {
+ // Symmetric matrices (LOWER_TRIANGULAR or UPPER_TRIANGULAR);
+ col_blocks = row_blocks;
+ }
+
+ vector<int> tsm_rows;
+ vector<int> tsm_cols;
+ vector<double> tsm_values;
+
+ // For ease of construction, we are going to generate the
+ // CompressedRowSparseMatrix by generating it as a
+ // TripletSparseMatrix and then converting it to a
+ // CompressedRowSparseMatrix.
+
+ // It is possible that the random matrix is empty which is likely
+ // not what the user wants, so do the matrix generation till we have
+ // at least one non-zero entry.
+ while (tsm_values.empty()) {
+ tsm_rows.clear();
+ tsm_cols.clear();
+ tsm_values.clear();
+
+ int row_block_begin = 0;
+ for (int r = 0; r < options.num_row_blocks; ++r) {
+ int col_block_begin = 0;
+ for (int c = 0; c < options.num_col_blocks; ++c) {
+ if (((options.storage_type == UPPER_TRIANGULAR) && (r > c)) ||
+ ((options.storage_type == LOWER_TRIANGULAR) && (r < c))) {
+ col_block_begin += col_blocks[c];
+ continue;
+ }
-void CompressedRowSparseMatrix::ComputeOuterProduct(
- const CompressedRowSparseMatrix& m,
- const vector<int>& program,
- CompressedRowSparseMatrix* result) {
- result->SetZero();
- double* values = result->mutable_values();
- const vector<int>& row_blocks = m.row_blocks();
-
- int cursor = 0;
- int row_block_begin = 0;
- const double* m_values = m.values();
- const int* m_rows = m.rows();
- // Iterate over row blocks.
- for (int row_block = 0; row_block < row_blocks.size(); ++row_block) {
- const int row_block_end = row_block_begin + row_blocks[row_block];
- const int saved_cursor = cursor;
- for (int r = row_block_begin; r < row_block_end; ++r) {
- // Reuse the program segment for each row in this row block.
- cursor = saved_cursor;
- const int row_begin = m_rows[r];
- const int row_end = m_rows[r + 1];
- for (int idx1 = row_begin; idx1 < row_end; ++idx1) {
- const double v1 = m_values[idx1];
- for (int idx2 = row_begin; idx2 <= idx1; ++idx2, ++cursor) {
- values[program[cursor]] += v1 * m_values[idx2];
+ // Randomly determine if this block is present or not.
+ if (RandDouble() <= options.block_density) {
+ // If the matrix is symmetric, then we take care to generate
+ // symmetric diagonal blocks.
+ if (options.storage_type == UNSYMMETRIC || r != c) {
+ AddRandomBlock(row_blocks[r],
+ col_blocks[c],
+ row_block_begin,
+ col_block_begin,
+ &tsm_rows,
+ &tsm_cols,
+ &tsm_values);
+ } else {
+ AddSymmetricRandomBlock(row_blocks[r],
+ row_block_begin,
+ &tsm_rows,
+ &tsm_cols,
+ &tsm_values);
+ }
}
+ col_block_begin += col_blocks[c];
}
+ row_block_begin += row_blocks[r];
}
- row_block_begin = row_block_end;
}
- CHECK_EQ(row_block_begin, m.num_rows());
- CHECK_EQ(cursor, program.size());
+ const int num_rows = std::accumulate(row_blocks.begin(), row_blocks.end(), 0);
+ const int num_cols = std::accumulate(col_blocks.begin(), col_blocks.end(), 0);
+ const bool kDoNotTranspose = false;
+ CompressedRowSparseMatrix* matrix =
+ CompressedRowSparseMatrix::FromTripletSparseMatrix(
+ TripletSparseMatrix(
+ num_rows, num_cols, tsm_rows, tsm_cols, tsm_values),
+ kDoNotTranspose);
+ (*matrix->mutable_row_blocks()) = row_blocks;
+ (*matrix->mutable_col_blocks()) = col_blocks;
+ matrix->set_storage_type(options.storage_type);
+ return matrix;
}
} // namespace internal