diff options
Diffstat (limited to 'extern/Eigen3/Eigen/src/SparseCore')
21 files changed, 1135 insertions, 652 deletions
diff --git a/extern/Eigen3/Eigen/src/SparseCore/AmbiVector.h b/extern/Eigen3/Eigen/src/SparseCore/AmbiVector.h index 6cfaadbaa9a..17fff96a78b 100644 --- a/extern/Eigen3/Eigen/src/SparseCore/AmbiVector.h +++ b/extern/Eigen3/Eigen/src/SparseCore/AmbiVector.h @@ -288,9 +288,10 @@ class AmbiVector<_Scalar,_Index>::Iterator * In practice, all coefficients having a magnitude smaller than \a epsilon * are skipped. */ - Iterator(const AmbiVector& vec, RealScalar epsilon = 0) + Iterator(const AmbiVector& vec, const RealScalar& epsilon = 0) : m_vector(vec) { + using std::abs; m_epsilon = epsilon; m_isDense = m_vector.m_mode==IsDense; if (m_isDense) @@ -304,7 +305,7 @@ class AmbiVector<_Scalar,_Index>::Iterator { ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer); m_currentEl = m_vector.m_llStart; - while (m_currentEl>=0 && internal::abs(llElements[m_currentEl].value)<=m_epsilon) + while (m_currentEl>=0 && abs(llElements[m_currentEl].value)<=m_epsilon) m_currentEl = llElements[m_currentEl].next; if (m_currentEl<0) { @@ -326,11 +327,12 @@ class AmbiVector<_Scalar,_Index>::Iterator Iterator& operator++() { + using std::abs; if (m_isDense) { do { ++m_cachedIndex; - } while (m_cachedIndex<m_vector.m_end && internal::abs(m_vector.m_buffer[m_cachedIndex])<m_epsilon); + } while (m_cachedIndex<m_vector.m_end && abs(m_vector.m_buffer[m_cachedIndex])<m_epsilon); if (m_cachedIndex<m_vector.m_end) m_cachedValue = m_vector.m_buffer[m_cachedIndex]; else @@ -341,7 +343,7 @@ class AmbiVector<_Scalar,_Index>::Iterator ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer); do { m_currentEl = llElements[m_currentEl].next; - } while (m_currentEl>=0 && internal::abs(llElements[m_currentEl].value)<m_epsilon); + } while (m_currentEl>=0 && abs(llElements[m_currentEl].value)<m_epsilon); if (m_currentEl<0) { m_cachedIndex = -1; diff --git a/extern/Eigen3/Eigen/src/SparseCore/CompressedStorage.h b/extern/Eigen3/Eigen/src/SparseCore/CompressedStorage.h index 85a998aff10..3321fab4a8a 100644 --- a/extern/Eigen3/Eigen/src/SparseCore/CompressedStorage.h +++ b/extern/Eigen3/Eigen/src/SparseCore/CompressedStorage.h @@ -139,7 +139,7 @@ class CompressedStorage /** \returns the stored value at index \a key * If the value does not exist, then the value \a defaultValue is returned without any insertion. */ - inline Scalar at(Index key, Scalar defaultValue = Scalar(0)) const + inline Scalar at(Index key, const Scalar& defaultValue = Scalar(0)) const { if (m_size==0) return defaultValue; @@ -152,7 +152,7 @@ class CompressedStorage } /** Like at(), but the search is performed in the range [start,end) */ - inline Scalar atInRange(size_t start, size_t end, Index key, Scalar defaultValue = Scalar(0)) const + inline Scalar atInRange(size_t start, size_t end, Index key, const Scalar& defaultValue = Scalar(0)) const { if (start>=end) return Scalar(0); @@ -167,7 +167,7 @@ class CompressedStorage /** \returns a reference to the value at index \a key * If the value does not exist, then the value \a defaultValue is inserted * such that the keys are sorted. */ - inline Scalar& atWithInsertion(Index key, Scalar defaultValue = Scalar(0)) + inline Scalar& atWithInsertion(Index key, const Scalar& defaultValue = Scalar(0)) { size_t id = searchLowerIndex(0,m_size,key); if (id>=m_size || m_indices[id]!=key) @@ -184,7 +184,7 @@ class CompressedStorage return m_values[id]; } - void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision()) + void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision()) { size_t k = 0; size_t n = size(); diff --git a/extern/Eigen3/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h b/extern/Eigen3/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h index 16b5e1dba6c..5c320e2d2db 100644 --- a/extern/Eigen3/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h +++ b/extern/Eigen3/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h @@ -66,9 +66,9 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r } // unordered insertion - for(int k=0; k<nnz; ++k) + for(Index k=0; k<nnz; ++k) { - int i = indices[k]; + Index i = indices[k]; res.insertBackByOuterInnerUnordered(j,i) = values[i]; mask[i] = false; } @@ -76,8 +76,8 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r #if 0 // alternative ordered insertion code: - int t200 = rows/(log2(200)*1.39); - int t = (rows*100)/139; + Index t200 = rows/(log2(200)*1.39); + Index t = (rows*100)/139; // FIXME reserve nnz non zeros // FIXME implement fast sort algorithms for very small nnz @@ -90,9 +90,9 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r if(true) { if(nnz>1) std::sort(indices.data(),indices.data()+nnz); - for(int k=0; k<nnz; ++k) + for(Index k=0; k<nnz; ++k) { - int i = indices[k]; + Index i = indices[k]; res.insertBackByOuterInner(j,i) = values[i]; mask[i] = false; } @@ -100,7 +100,7 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r else { // dense path - for(int i=0; i<rows; ++i) + for(Index i=0; i<rows; ++i) { if(mask[i]) { @@ -121,9 +121,9 @@ static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& r namespace internal { template<typename Lhs, typename Rhs, typename ResultType, - int LhsStorageOrder = traits<Lhs>::Flags&RowMajorBit, - int RhsStorageOrder = traits<Rhs>::Flags&RowMajorBit, - int ResStorageOrder = traits<ResultType>::Flags&RowMajorBit> + int LhsStorageOrder = (traits<Lhs>::Flags&RowMajorBit) ? RowMajor : ColMajor, + int RhsStorageOrder = (traits<Rhs>::Flags&RowMajorBit) ? RowMajor : ColMajor, + int ResStorageOrder = (traits<ResultType>::Flags&RowMajorBit) ? RowMajor : ColMajor> struct conservative_sparse_sparse_product_selector; template<typename Lhs, typename Rhs, typename ResultType> @@ -134,8 +134,8 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,C static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { - typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix; - typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix; + typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix; + typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix; ColMajorMatrix resCol(lhs.rows(),rhs.cols()); internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol); // sort the non zeros: @@ -149,7 +149,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,C { static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { - typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix; + typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix; RowMajorMatrix rhsRow = rhs; RowMajorMatrix resRow(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl<RowMajorMatrix,Lhs,RowMajorMatrix>(rhsRow, lhs, resRow); @@ -162,7 +162,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,R { static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { - typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix; + typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix; RowMajorMatrix lhsRow = lhs; RowMajorMatrix resRow(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl<Rhs,RowMajorMatrix,RowMajorMatrix>(rhs, lhsRow, resRow); @@ -175,7 +175,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,R { static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { - typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix; + typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix; RowMajorMatrix resRow(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl<Rhs,Lhs,RowMajorMatrix>(rhs, lhs, resRow); res = resRow; @@ -190,7 +190,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,C static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { - typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix; + typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix; ColMajorMatrix resCol(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol); res = resCol; @@ -202,7 +202,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,C { static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { - typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix; + typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix; ColMajorMatrix lhsCol = lhs; ColMajorMatrix resCol(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl<ColMajorMatrix,Rhs,ColMajorMatrix>(lhsCol, rhs, resCol); @@ -215,7 +215,7 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,R { static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { - typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix; + typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix; ColMajorMatrix rhsCol = rhs; ColMajorMatrix resCol(lhs.rows(), rhs.cols()); internal::conservative_sparse_sparse_product_impl<Lhs,ColMajorMatrix,ColMajorMatrix>(lhs, rhsCol, resCol); @@ -228,8 +228,8 @@ struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,R { static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res) { - typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix; - typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix; + typedef SparseMatrix<typename ResultType::Scalar,RowMajor,typename ResultType::Index> RowMajorMatrix; + typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> ColMajorMatrix; RowMajorMatrix resRow(lhs.rows(),rhs.cols()); internal::conservative_sparse_sparse_product_impl<Rhs,Lhs,RowMajorMatrix>(rhs, lhs, resRow); // sort the non zeros: diff --git a/extern/Eigen3/Eigen/src/SparseCore/MappedSparseMatrix.h b/extern/Eigen3/Eigen/src/SparseCore/MappedSparseMatrix.h index 93cd4832dea..ab1a266a905 100644 --- a/extern/Eigen3/Eigen/src/SparseCore/MappedSparseMatrix.h +++ b/extern/Eigen3/Eigen/src/SparseCore/MappedSparseMatrix.h @@ -50,6 +50,8 @@ class MappedSparseMatrix inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; } inline Index innerSize() const { return m_innerSize; } inline Index outerSize() const { return m_outerSize; } + + bool isCompressed() const { return true; } //---------------------------------------- // direct access interface diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseBlock.h b/extern/Eigen3/Eigen/src/SparseCore/SparseBlock.h index eefd8070251..16a20a5744e 100644 --- a/extern/Eigen3/Eigen/src/SparseCore/SparseBlock.h +++ b/extern/Eigen3/Eigen/src/SparseCore/SparseBlock.h @@ -12,51 +12,37 @@ namespace Eigen { -namespace internal { -template<typename MatrixType, int Size> -struct traits<SparseInnerVectorSet<MatrixType, Size> > +template<typename XprType, int BlockRows, int BlockCols> +class BlockImpl<XprType,BlockRows,BlockCols,true,Sparse> + : public SparseMatrixBase<Block<XprType,BlockRows,BlockCols,true> > { - typedef typename traits<MatrixType>::Scalar Scalar; - typedef typename traits<MatrixType>::Index Index; - typedef typename traits<MatrixType>::StorageKind StorageKind; - typedef MatrixXpr XprKind; - enum { - IsRowMajor = (int(MatrixType::Flags)&RowMajorBit)==RowMajorBit, - Flags = MatrixType::Flags, - RowsAtCompileTime = IsRowMajor ? Size : MatrixType::RowsAtCompileTime, - ColsAtCompileTime = IsRowMajor ? MatrixType::ColsAtCompileTime : Size, - MaxRowsAtCompileTime = RowsAtCompileTime, - MaxColsAtCompileTime = ColsAtCompileTime, - CoeffReadCost = MatrixType::CoeffReadCost - }; -}; -} // end namespace internal - -template<typename MatrixType, int Size> -class SparseInnerVectorSet : internal::no_assignment_operator, - public SparseMatrixBase<SparseInnerVectorSet<MatrixType, Size> > -{ - public: - - enum { IsRowMajor = internal::traits<SparseInnerVectorSet>::IsRowMajor }; - - EIGEN_SPARSE_PUBLIC_INTERFACE(SparseInnerVectorSet) - class InnerIterator: public MatrixType::InnerIterator + typedef typename internal::remove_all<typename XprType::Nested>::type _MatrixTypeNested; + typedef Block<XprType, BlockRows, BlockCols, true> BlockType; +public: + enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor }; +protected: + enum { OuterSize = IsRowMajor ? BlockRows : BlockCols }; +public: + EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType) + + class InnerIterator: public XprType::InnerIterator { + typedef typename BlockImpl::Index Index; public: - inline InnerIterator(const SparseInnerVectorSet& xpr, Index outer) - : MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) + inline InnerIterator(const BlockType& xpr, Index outer) + : XprType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) {} inline Index row() const { return IsRowMajor ? m_outer : this->index(); } inline Index col() const { return IsRowMajor ? this->index() : m_outer; } protected: Index m_outer; }; - class ReverseInnerIterator: public MatrixType::ReverseInnerIterator + class ReverseInnerIterator: public XprType::ReverseInnerIterator { + typedef typename BlockImpl::Index Index; public: - inline ReverseInnerIterator(const SparseInnerVectorSet& xpr, Index outer) - : MatrixType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) + inline ReverseInnerIterator(const BlockType& xpr, Index outer) + : XprType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) {} inline Index row() const { return IsRowMajor ? m_outer : this->index(); } inline Index col() const { return IsRowMajor ? this->index() : m_outer; } @@ -64,39 +50,24 @@ class SparseInnerVectorSet : internal::no_assignment_operator, Index m_outer; }; - inline SparseInnerVectorSet(const MatrixType& matrix, Index outerStart, Index outerSize) - : m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize) - { - eigen_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) ); - } - - inline SparseInnerVectorSet(const MatrixType& matrix, Index outer) - : m_matrix(matrix), m_outerStart(outer), m_outerSize(Size) - { - eigen_assert(Size!=Dynamic); - eigen_assert( (outer>=0) && (outer<matrix.outerSize()) ); - } - -// template<typename OtherDerived> -// inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other) -// { -// return *this; -// } + inline BlockImpl(const XprType& xpr, int i) + : m_matrix(xpr), m_outerStart(i), m_outerSize(OuterSize) + {} -// template<typename Sparse> -// inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other) -// { -// return *this; -// } + inline BlockImpl(const XprType& xpr, int startRow, int startCol, int blockRows, int blockCols) + : m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols) + {} EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } protected: - const typename MatrixType::Nested m_matrix; + typename XprType::Nested m_matrix; Index m_outerStart; - const internal::variable_if_dynamic<Index, Size> m_outerSize; + const internal::variable_if_dynamic<Index, OuterSize> m_outerSize; + + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl) }; @@ -104,32 +75,36 @@ class SparseInnerVectorSet : internal::no_assignment_operator, * specialisation for SparseMatrix ***************************************************************************/ -template<typename _Scalar, int _Options, typename _Index, int Size> -class SparseInnerVectorSet<SparseMatrix<_Scalar, _Options, _Index>, Size> - : public SparseMatrixBase<SparseInnerVectorSet<SparseMatrix<_Scalar, _Options, _Index>, Size> > +template<typename _Scalar, int _Options, typename _Index, int BlockRows, int BlockCols> +class BlockImpl<SparseMatrix<_Scalar, _Options, _Index>,BlockRows,BlockCols,true,Sparse> + : public SparseMatrixBase<Block<SparseMatrix<_Scalar, _Options, _Index>,BlockRows,BlockCols,true> > { - typedef SparseMatrix<_Scalar, _Options, _Index> MatrixType; - public: - - enum { IsRowMajor = internal::traits<SparseInnerVectorSet>::IsRowMajor }; - - EIGEN_SPARSE_PUBLIC_INTERFACE(SparseInnerVectorSet) - class InnerIterator: public MatrixType::InnerIterator + typedef SparseMatrix<_Scalar, _Options, _Index> SparseMatrixType; + typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _MatrixTypeNested; + typedef Block<SparseMatrixType, BlockRows, BlockCols, true> BlockType; +public: + enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor }; + EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType) +protected: + enum { OuterSize = IsRowMajor ? BlockRows : BlockCols }; +public: + + class InnerIterator: public SparseMatrixType::InnerIterator { public: - inline InnerIterator(const SparseInnerVectorSet& xpr, Index outer) - : MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) + inline InnerIterator(const BlockType& xpr, Index outer) + : SparseMatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) {} inline Index row() const { return IsRowMajor ? m_outer : this->index(); } inline Index col() const { return IsRowMajor ? this->index() : m_outer; } protected: Index m_outer; }; - class ReverseInnerIterator: public MatrixType::ReverseInnerIterator + class ReverseInnerIterator: public SparseMatrixType::ReverseInnerIterator { public: - inline ReverseInnerIterator(const SparseInnerVectorSet& xpr, Index outer) - : MatrixType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) + inline ReverseInnerIterator(const BlockType& xpr, Index outer) + : SparseMatrixType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer) {} inline Index row() const { return IsRowMajor ? m_outer : this->index(); } inline Index col() const { return IsRowMajor ? this->index() : m_outer; } @@ -137,23 +112,18 @@ class SparseInnerVectorSet<SparseMatrix<_Scalar, _Options, _Index>, Size> Index m_outer; }; - inline SparseInnerVectorSet(const MatrixType& matrix, Index outerStart, Index outerSize) - : m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize) - { - eigen_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) ); - } + inline BlockImpl(const SparseMatrixType& xpr, int i) + : m_matrix(xpr), m_outerStart(i), m_outerSize(OuterSize) + {} - inline SparseInnerVectorSet(const MatrixType& matrix, Index outer) - : m_matrix(matrix), m_outerStart(outer), m_outerSize(Size) - { - eigen_assert(Size==1); - eigen_assert( (outer>=0) && (outer<matrix.outerSize()) ); - } + inline BlockImpl(const SparseMatrixType& xpr, int startRow, int startCol, int blockRows, int blockCols) + : m_matrix(xpr), m_outerStart(IsRowMajor ? startRow : startCol), m_outerSize(IsRowMajor ? blockRows : blockCols) + {} template<typename OtherDerived> - inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other) + inline BlockType& operator=(const SparseMatrixBase<OtherDerived>& other) { - typedef typename internal::remove_all<typename MatrixType::Nested>::type _NestedMatrixType; + typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _NestedMatrixType; _NestedMatrixType& matrix = const_cast<_NestedMatrixType&>(m_matrix);; // This assignement is slow if this vector set is not empty // and/or it is not at the end of the nonzeros of the underlying matrix. @@ -163,70 +133,69 @@ class SparseInnerVectorSet<SparseMatrix<_Scalar, _Options, _Index>, Size> // 2 - let's check whether there is enough allocated memory Index nnz = tmp.nonZeros(); - Index nnz_previous = nonZeros(); - Index free_size = Index(matrix.data().allocatedSize()) + nnz_previous; - Index nnz_head = m_outerStart==0 ? 0 : matrix.outerIndexPtr()[m_outerStart]; - Index tail = m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]; - Index nnz_tail = matrix.nonZeros() - tail; - - if(nnz>free_size) + Index start = m_outerStart==0 ? 0 : matrix.outerIndexPtr()[m_outerStart]; // starting position of the current block + Index end = m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]; // ending posiiton of the current block + Index block_size = end - start; // available room in the current block + Index tail_size = m_matrix.outerIndexPtr()[m_matrix.outerSize()] - end; + + Index free_size = m_matrix.isCompressed() + ? Index(matrix.data().allocatedSize()) + block_size + : block_size; + + if(nnz>free_size) { // realloc manually to reduce copies - typename MatrixType::Storage newdata(m_matrix.nonZeros() - nnz_previous + nnz); + typename SparseMatrixType::Storage newdata(m_matrix.data().allocatedSize() - block_size + nnz); - std::memcpy(&newdata.value(0), &m_matrix.data().value(0), nnz_head*sizeof(Scalar)); - std::memcpy(&newdata.index(0), &m_matrix.data().index(0), nnz_head*sizeof(Index)); + std::memcpy(&newdata.value(0), &m_matrix.data().value(0), start*sizeof(Scalar)); + std::memcpy(&newdata.index(0), &m_matrix.data().index(0), start*sizeof(Index)); - std::memcpy(&newdata.value(nnz_head), &tmp.data().value(0), nnz*sizeof(Scalar)); - std::memcpy(&newdata.index(nnz_head), &tmp.data().index(0), nnz*sizeof(Index)); + std::memcpy(&newdata.value(start), &tmp.data().value(0), nnz*sizeof(Scalar)); + std::memcpy(&newdata.index(start), &tmp.data().index(0), nnz*sizeof(Index)); - std::memcpy(&newdata.value(nnz_head+nnz), &matrix.data().value(tail), nnz_tail*sizeof(Scalar)); - std::memcpy(&newdata.index(nnz_head+nnz), &matrix.data().index(tail), nnz_tail*sizeof(Index)); + std::memcpy(&newdata.value(start+nnz), &matrix.data().value(end), tail_size*sizeof(Scalar)); + std::memcpy(&newdata.index(start+nnz), &matrix.data().index(end), tail_size*sizeof(Index)); + + newdata.resize(m_matrix.outerIndexPtr()[m_matrix.outerSize()] - block_size + nnz); matrix.data().swap(newdata); } else { // no need to realloc, simply copy the tail at its respective position and insert tmp - matrix.data().resize(nnz_head + nnz + nnz_tail); - - if(nnz<nnz_previous) - { - std::memcpy(&matrix.data().value(nnz_head+nnz), &matrix.data().value(tail), nnz_tail*sizeof(Scalar)); - std::memcpy(&matrix.data().index(nnz_head+nnz), &matrix.data().index(tail), nnz_tail*sizeof(Index)); - } - else - { - for(Index i=nnz_tail-1; i>=0; --i) - { - matrix.data().value(nnz_head+nnz+i) = matrix.data().value(tail+i); - matrix.data().index(nnz_head+nnz+i) = matrix.data().index(tail+i); - } - } - - std::memcpy(&matrix.data().value(nnz_head), &tmp.data().value(0), nnz*sizeof(Scalar)); - std::memcpy(&matrix.data().index(nnz_head), &tmp.data().index(0), nnz*sizeof(Index)); + matrix.data().resize(start + nnz + tail_size); + + std::memmove(&matrix.data().value(start+nnz), &matrix.data().value(end), tail_size*sizeof(Scalar)); + std::memmove(&matrix.data().index(start+nnz), &matrix.data().index(end), tail_size*sizeof(Index)); + + std::memcpy(&matrix.data().value(start), &tmp.data().value(0), nnz*sizeof(Scalar)); + std::memcpy(&matrix.data().index(start), &tmp.data().index(0), nnz*sizeof(Index)); } + + // update innerNonZeros + if(!m_matrix.isCompressed()) + for(Index j=0; j<m_outerSize.value(); ++j) + matrix.innerNonZeroPtr()[m_outerStart+j] = tmp.innerVector(j).nonZeros(); // update outer index pointers - Index p = nnz_head; + Index p = start; for(Index k=0; k<m_outerSize.value(); ++k) { matrix.outerIndexPtr()[m_outerStart+k] = p; p += tmp.innerVector(k).nonZeros(); } - std::ptrdiff_t offset = nnz - nnz_previous; + std::ptrdiff_t offset = nnz - block_size; for(Index k = m_outerStart + m_outerSize.value(); k<=matrix.outerSize(); ++k) { matrix.outerIndexPtr()[k] += offset; } - return *this; + return derived(); } - inline SparseInnerVectorSet& operator=(const SparseInnerVectorSet& other) + inline BlockType& operator=(const BlockType& other) { - return operator=<SparseInnerVectorSet>(other); + return operator=<BlockType>(other); } inline const Scalar* valuePtr() const @@ -252,12 +221,12 @@ class SparseInnerVectorSet<SparseMatrix<_Scalar, _Options, _Index>, Size> else if(m_outerSize.value()==0) return 0; else - return Map<const Matrix<Index,Size,1> >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum(); + return Map<const Matrix<Index,OuterSize,1> >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum(); } const Scalar& lastCoeff() const { - EIGEN_STATIC_ASSERT_VECTOR_ONLY(SparseInnerVectorSet); + EIGEN_STATIC_ASSERT_VECTOR_ONLY(BlockImpl); eigen_assert(nonZeros()>0); if(m_matrix.isCompressed()) return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart+1]-1]; @@ -265,122 +234,175 @@ class SparseInnerVectorSet<SparseMatrix<_Scalar, _Options, _Index>, Size> return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart]+m_matrix.innerNonZeroPtr()[m_outerStart]-1]; } -// template<typename Sparse> -// inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other) -// { -// return *this; -// } - EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); } EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); } protected: - typename MatrixType::Nested m_matrix; + typename SparseMatrixType::Nested m_matrix; Index m_outerStart; - const internal::variable_if_dynamic<Index, Size> m_outerSize; + const internal::variable_if_dynamic<Index, OuterSize> m_outerSize; }; //---------- -/** \returns the i-th row of the matrix \c *this. For row-major matrix only. */ -template<typename Derived> -SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::row(Index i) -{ - EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES); - return innerVector(i); -} - -/** \returns the i-th row of the matrix \c *this. For row-major matrix only. - * (read-only version) */ -template<typename Derived> -const SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::row(Index i) const -{ - EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES); - return innerVector(i); -} - -/** \returns the i-th column of the matrix \c *this. For column-major matrix only. */ -template<typename Derived> -SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::col(Index i) -{ - EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); - return innerVector(i); -} - -/** \returns the i-th column of the matrix \c *this. For column-major matrix only. - * (read-only version) */ -template<typename Derived> -const SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::col(Index i) const -{ - EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); - return innerVector(i); -} - /** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this * is col-major (resp. row-major). */ template<typename Derived> -SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::innerVector(Index outer) -{ return SparseInnerVectorSet<Derived,1>(derived(), outer); } +typename SparseMatrixBase<Derived>::InnerVectorReturnType SparseMatrixBase<Derived>::innerVector(Index outer) +{ return InnerVectorReturnType(derived(), outer); } /** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this * is col-major (resp. row-major). Read-only. */ template<typename Derived> -const SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::innerVector(Index outer) const -{ return SparseInnerVectorSet<Derived,1>(derived(), outer); } +const typename SparseMatrixBase<Derived>::ConstInnerVectorReturnType SparseMatrixBase<Derived>::innerVector(Index outer) const +{ return ConstInnerVectorReturnType(derived(), outer); } -/** \returns the i-th row of the matrix \c *this. For row-major matrix only. */ +/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this + * is col-major (resp. row-major). + */ template<typename Derived> -SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::middleRows(Index start, Index size) +Block<Derived,Dynamic,Dynamic,true> SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize) { - EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES); - return innerVectors(start, size); + return Block<Derived,Dynamic,Dynamic,true>(derived(), + IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart, + IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize); + } -/** \returns the i-th row of the matrix \c *this. For row-major matrix only. - * (read-only version) */ +/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this + * is col-major (resp. row-major). Read-only. + */ template<typename Derived> -const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::middleRows(Index start, Index size) const +const Block<const Derived,Dynamic,Dynamic,true> SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize) const { - EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES); - return innerVectors(start, size); + return Block<const Derived,Dynamic,Dynamic,true>(derived(), + IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart, + IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize); + } -/** \returns the i-th column of the matrix \c *this. For column-major matrix only. */ -template<typename Derived> -SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::middleCols(Index start, Index size) +/** Generic implementation of sparse Block expression. + * Real-only. + */ +template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel> +class BlockImpl<XprType,BlockRows,BlockCols,InnerPanel,Sparse> + : public SparseMatrixBase<Block<XprType,BlockRows,BlockCols,InnerPanel> >, internal::no_assignment_operator { - EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); - return innerVectors(start, size); -} + typedef typename internal::remove_all<typename XprType::Nested>::type _MatrixTypeNested; + typedef Block<XprType, BlockRows, BlockCols, InnerPanel> BlockType; +public: + enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor }; + EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType) + + /** Column or Row constructor + */ + inline BlockImpl(const XprType& xpr, int i) + : m_matrix(xpr), + m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? i : 0), + m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? i : 0), + m_blockRows(xpr.rows()), + m_blockCols(xpr.cols()) + {} + + /** Dynamic-size constructor + */ + inline BlockImpl(const XprType& xpr, int startRow, int startCol, int blockRows, int blockCols) + : m_matrix(xpr), m_startRow(startRow), m_startCol(startCol), m_blockRows(blockRows), m_blockCols(blockCols) + {} + + inline int rows() const { return m_blockRows.value(); } + inline int cols() const { return m_blockCols.value(); } + + inline Scalar& coeffRef(int row, int col) + { + return m_matrix.const_cast_derived() + .coeffRef(row + m_startRow.value(), col + m_startCol.value()); + } -/** \returns the i-th column of the matrix \c *this. For column-major matrix only. - * (read-only version) */ -template<typename Derived> -const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::middleCols(Index start, Index size) const -{ - EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES); - return innerVectors(start, size); -} + inline const Scalar coeff(int row, int col) const + { + return m_matrix.coeff(row + m_startRow.value(), col + m_startCol.value()); + } + inline Scalar& coeffRef(int index) + { + return m_matrix.const_cast_derived() + .coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), + m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); + } + inline const Scalar coeff(int index) const + { + return m_matrix + .coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index), + m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0)); + } + + inline const _MatrixTypeNested& nestedExpression() const { return m_matrix; } + + class InnerIterator : public _MatrixTypeNested::InnerIterator + { + typedef typename _MatrixTypeNested::InnerIterator Base; + const BlockType& m_block; + Index m_end; + public: + + EIGEN_STRONG_INLINE InnerIterator(const BlockType& block, Index outer) + : Base(block.derived().nestedExpression(), outer + (IsRowMajor ? block.m_startRow.value() : block.m_startCol.value())), + m_block(block), + m_end(IsRowMajor ? block.m_startCol.value()+block.m_blockCols.value() : block.m_startRow.value()+block.m_blockRows.value()) + { + while( (Base::operator bool()) && (Base::index() < (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value())) ) + Base::operator++(); + } -/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this - * is col-major (resp. row-major). - */ -template<typename Derived> -SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize) -{ return SparseInnerVectorSet<Derived,Dynamic>(derived(), outerStart, outerSize); } + inline Index index() const { return Base::index() - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); } + inline Index outer() const { return Base::outer() - (IsRowMajor ? m_block.m_startRow.value() : m_block.m_startCol.value()); } + inline Index row() const { return Base::row() - m_block.m_startRow.value(); } + inline Index col() const { return Base::col() - m_block.m_startCol.value(); } + + inline operator bool() const { return Base::operator bool() && Base::index() < m_end; } + }; + class ReverseInnerIterator : public _MatrixTypeNested::ReverseInnerIterator + { + typedef typename _MatrixTypeNested::ReverseInnerIterator Base; + const BlockType& m_block; + Index m_begin; + public: + + EIGEN_STRONG_INLINE ReverseInnerIterator(const BlockType& block, Index outer) + : Base(block.derived().nestedExpression(), outer + (IsRowMajor ? block.m_startRow.value() : block.m_startCol.value())), + m_block(block), + m_begin(IsRowMajor ? block.m_startCol.value() : block.m_startRow.value()) + { + while( (Base::operator bool()) && (Base::index() >= (IsRowMajor ? m_block.m_startCol.value()+block.m_blockCols.value() : m_block.m_startRow.value()+block.m_blockRows.value())) ) + Base::operator--(); + } -/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this - * is col-major (resp. row-major). Read-only. - */ -template<typename Derived> -const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize) const -{ return SparseInnerVectorSet<Derived,Dynamic>(derived(), outerStart, outerSize); } + inline Index index() const { return Base::index() - (IsRowMajor ? m_block.m_startCol.value() : m_block.m_startRow.value()); } + inline Index outer() const { return Base::outer() - (IsRowMajor ? m_block.m_startRow.value() : m_block.m_startCol.value()); } + inline Index row() const { return Base::row() - m_block.m_startRow.value(); } + inline Index col() const { return Base::col() - m_block.m_startCol.value(); } + + inline operator bool() const { return Base::operator bool() && Base::index() >= m_begin; } + }; + protected: + friend class InnerIterator; + friend class ReverseInnerIterator; + + EIGEN_INHERIT_ASSIGNMENT_OPERATORS(BlockImpl) + + typename XprType::Nested m_matrix; + const internal::variable_if_dynamic<Index, XprType::RowsAtCompileTime == 1 ? 0 : Dynamic> m_startRow; + const internal::variable_if_dynamic<Index, XprType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol; + const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_blockRows; + const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_blockCols; + +}; } // end namespace Eigen diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseColEtree.h b/extern/Eigen3/Eigen/src/SparseCore/SparseColEtree.h new file mode 100644 index 00000000000..f8745f46100 --- /dev/null +++ b/extern/Eigen3/Eigen/src/SparseCore/SparseColEtree.h @@ -0,0 +1,206 @@ +// This file is part of Eigen, a lightweight C++ template library +// for linear algebra. +// +// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr> +// +// This Source Code Form is subject to the terms of the Mozilla +// Public License v. 2.0. If a copy of the MPL was not distributed +// with this file, You can obtain one at http://mozilla.org/MPL/2.0/. + + +/* + + * NOTE: This file is the modified version of sp_coletree.c file in SuperLU + + * -- SuperLU routine (version 3.1) -- + * Univ. of California Berkeley, Xerox Palo Alto Research Center, + * and Lawrence Berkeley National Lab. + * August 1, 2008 + * + * Copyright (c) 1994 by Xerox Corporation. All rights reserved. + * + * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY + * EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK. + * + * Permission is hereby granted to use or copy this program for any + * purpose, provided the above notices are retained on all copies. + * Permission to modify the code and to distribute modified code is + * granted, provided the above notices are retained, and a notice that + * the code was modified is included with the above copyright notice. + */ +#ifndef SPARSE_COLETREE_H +#define SPARSE_COLETREE_H + +namespace Eigen { + +namespace internal { + +/** Find the root of the tree/set containing the vertex i : Use Path halving */ +template<typename Index, typename IndexVector> +Index etree_find (Index i, IndexVector& pp) +{ + Index p = pp(i); // Parent + Index gp = pp(p); // Grand parent + while (gp != p) + { + pp(i) = gp; // Parent pointer on find path is changed to former grand parent + i = gp; + p = pp(i); + gp = pp(p); + } + return p; +} + +/** Compute the column elimination tree of a sparse matrix + * \param mat The matrix in column-major format. + * \param parent The elimination tree + * \param firstRowElt The column index of the first element in each row + * \param perm The permutation to apply to the column of \b mat + */ +template <typename MatrixType, typename IndexVector> +int coletree(const MatrixType& mat, IndexVector& parent, IndexVector& firstRowElt, typename MatrixType::Index *perm=0) +{ + typedef typename MatrixType::Index Index; + Index nc = mat.cols(); // Number of columns + Index m = mat.rows(); + Index diagSize = (std::min)(nc,m); + IndexVector root(nc); // root of subtree of etree + root.setZero(); + IndexVector pp(nc); // disjoint sets + pp.setZero(); // Initialize disjoint sets + parent.resize(mat.cols()); + //Compute first nonzero column in each row + Index row,col; + firstRowElt.resize(m); + firstRowElt.setConstant(nc); + firstRowElt.segment(0, diagSize).setLinSpaced(diagSize, 0, diagSize-1); + bool found_diag; + for (col = 0; col < nc; col++) + { + Index pcol = col; + if(perm) pcol = perm[col]; + for (typename MatrixType::InnerIterator it(mat, pcol); it; ++it) + { + row = it.row(); + firstRowElt(row) = (std::min)(firstRowElt(row), col); + } + } + /* Compute etree by Liu's algorithm for symmetric matrices, + except use (firstRowElt[r],c) in place of an edge (r,c) of A. + Thus each row clique in A'*A is replaced by a star + centered at its first vertex, which has the same fill. */ + Index rset, cset, rroot; + for (col = 0; col < nc; col++) + { + found_diag = col>=m; + pp(col) = col; + cset = col; + root(cset) = col; + parent(col) = nc; + /* The diagonal element is treated here even if it does not exist in the matrix + * hence the loop is executed once more */ + Index pcol = col; + if(perm) pcol = perm[col]; + for (typename MatrixType::InnerIterator it(mat, pcol); it||!found_diag; ++it) + { // A sequence of interleaved find and union is performed + Index i = col; + if(it) i = it.index(); + if (i == col) found_diag = true; + + row = firstRowElt(i); + if (row >= col) continue; + rset = internal::etree_find(row, pp); // Find the name of the set containing row + rroot = root(rset); + if (rroot != col) + { + parent(rroot) = col; + pp(cset) = rset; + cset = rset; + root(cset) = col; + } + } + } + return 0; +} + +/** + * Depth-first search from vertex n. No recursion. + * This routine was contributed by Cédric Doucet, CEDRAT Group, Meylan, France. +*/ +template <typename Index, typename IndexVector> +void nr_etdfs (Index n, IndexVector& parent, IndexVector& first_kid, IndexVector& next_kid, IndexVector& post, Index postnum) +{ + Index current = n, first, next; + while (postnum != n) + { + // No kid for the current node + first = first_kid(current); + + // no kid for the current node + if (first == -1) + { + // Numbering this node because it has no kid + post(current) = postnum++; + + // looking for the next kid + next = next_kid(current); + while (next == -1) + { + // No more kids : back to the parent node + current = parent(current); + // numbering the parent node + post(current) = postnum++; + + // Get the next kid + next = next_kid(current); + } + // stopping criterion + if (postnum == n+1) return; + + // Updating current node + current = next; + } + else + { + current = first; + } + } +} + + +/** + * \brief Post order a tree + * \param n the number of nodes + * \param parent Input tree + * \param post postordered tree + */ +template <typename Index, typename IndexVector> +void treePostorder(Index n, IndexVector& parent, IndexVector& post) +{ + IndexVector first_kid, next_kid; // Linked list of children + Index postnum; + // Allocate storage for working arrays and results + first_kid.resize(n+1); + next_kid.setZero(n+1); + post.setZero(n+1); + + // Set up structure describing children + Index v, dad; + first_kid.setConstant(-1); + for (v = n-1; v >= 0; v--) + { + dad = parent(v); + next_kid(v) = first_kid(dad); + first_kid(dad) = v; + } + + // Depth-first search from dummy root vertex #n + postnum = 0; + internal::nr_etdfs(n, parent, first_kid, next_kid, post, postnum); +} + +} // end namespace internal + +} // end namespace Eigen + +#endif // SPARSE_COLETREE_H diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseCwiseBinaryOp.h b/extern/Eigen3/Eigen/src/SparseCore/SparseCwiseBinaryOp.h index d5f97f78fc9..ec86ca933c2 100644 --- a/extern/Eigen3/Eigen/src/SparseCore/SparseCwiseBinaryOp.h +++ b/extern/Eigen3/Eigen/src/SparseCore/SparseCwiseBinaryOp.h @@ -73,7 +73,7 @@ class CwiseBinaryOpImpl<BinaryOp,Lhs,Rhs,Sparse>::InnerIterator typedef internal::sparse_cwise_binary_op_inner_iterator_selector< BinaryOp,Lhs,Rhs, InnerIterator> Base; - EIGEN_STRONG_INLINE InnerIterator(const CwiseBinaryOpImpl& binOp, typename CwiseBinaryOpImpl::Index outer) + EIGEN_STRONG_INLINE InnerIterator(const CwiseBinaryOpImpl& binOp, Index outer) : Base(binOp.derived(),outer) {} }; @@ -300,7 +300,7 @@ template<typename OtherDerived> EIGEN_STRONG_INLINE Derived & SparseMatrixBase<Derived>::operator-=(const SparseMatrixBase<OtherDerived> &other) { - return *this = derived() - other.derived(); + return derived() = derived() - other.derived(); } template<typename Derived> @@ -308,7 +308,7 @@ template<typename OtherDerived> EIGEN_STRONG_INLINE Derived & SparseMatrixBase<Derived>::operator+=(const SparseMatrixBase<OtherDerived>& other) { - return *this = derived() + other.derived(); + return derived() = derived() + other.derived(); } template<typename Derived> diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseDenseProduct.h b/extern/Eigen3/Eigen/src/SparseCore/SparseDenseProduct.h index 6f32940d6c1..54fd633a10c 100644 --- a/extern/Eigen3/Eigen/src/SparseCore/SparseDenseProduct.h +++ b/extern/Eigen3/Eigen/src/SparseCore/SparseDenseProduct.h @@ -39,7 +39,7 @@ struct traits<SparseDenseOuterProduct<Lhs,Rhs,Tr> > { typedef Sparse StorageKind; typedef typename scalar_product_traits<typename traits<Lhs>::Scalar, - typename traits<Rhs>::Scalar>::ReturnType Scalar; + typename traits<Rhs>::Scalar>::ReturnType Scalar; typedef typename Lhs::Index Index; typedef typename Lhs::Nested LhsNested; typedef typename Rhs::Nested RhsNested; @@ -111,6 +111,7 @@ template<typename Lhs, typename Rhs, bool Transpose> class SparseDenseOuterProduct<Lhs,Rhs,Transpose>::InnerIterator : public _LhsNested::InnerIterator { typedef typename _LhsNested::InnerIterator Base; + typedef typename SparseDenseOuterProduct::Index Index; public: EIGEN_STRONG_INLINE InnerIterator(const SparseDenseOuterProduct& prod, Index outer) : Base(prod.lhs(), 0), m_outer(outer), m_factor(prod.rhs().coeff(outer)) @@ -124,7 +125,7 @@ class SparseDenseOuterProduct<Lhs,Rhs,Transpose>::InnerIterator : public _LhsNes inline Scalar value() const { return Base::value() * m_factor; } protected: - int m_outer; + Index m_outer; Scalar m_factor; }; @@ -150,11 +151,11 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, R typedef typename internal::remove_all<DenseResType>::type Res; typedef typename Lhs::Index Index; typedef typename Lhs::InnerIterator LhsInnerIterator; - static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, typename Res::Scalar alpha) + static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) { for(Index c=0; c<rhs.cols(); ++c) { - int n = lhs.outerSize(); + Index n = lhs.outerSize(); for(Index j=0; j<n; ++j) { typename Res::Scalar tmp(0); @@ -174,7 +175,7 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, C typedef typename internal::remove_all<DenseResType>::type Res; typedef typename Lhs::InnerIterator LhsInnerIterator; typedef typename Lhs::Index Index; - static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, typename Res::Scalar alpha) + static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) { for(Index c=0; c<rhs.cols(); ++c) { @@ -196,7 +197,7 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, R typedef typename internal::remove_all<DenseResType>::type Res; typedef typename Lhs::InnerIterator LhsInnerIterator; typedef typename Lhs::Index Index; - static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, typename Res::Scalar alpha) + static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) { for(Index j=0; j<lhs.outerSize(); ++j) { @@ -215,7 +216,7 @@ struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, C typedef typename internal::remove_all<DenseResType>::type Res; typedef typename Lhs::InnerIterator LhsInnerIterator; typedef typename Lhs::Index Index; - static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, typename Res::Scalar alpha) + static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const typename Res::Scalar& alpha) { for(Index j=0; j<lhs.outerSize(); ++j) { @@ -244,7 +245,7 @@ class SparseTimeDenseProduct SparseTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {} - template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const + template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const { internal::sparse_time_dense_product(m_lhs, m_rhs, dest, alpha); } @@ -274,7 +275,7 @@ class DenseTimeSparseProduct DenseTimeSparseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {} - template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const + template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const { Transpose<const _LhsNested> lhs_t(m_lhs); Transpose<const _RhsNested> rhs_t(m_rhs); diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseDiagonalProduct.h b/extern/Eigen3/Eigen/src/SparseCore/SparseDiagonalProduct.h index 095bf6863fc..1bb590e64d4 100644 --- a/extern/Eigen3/Eigen/src/SparseCore/SparseDiagonalProduct.h +++ b/extern/Eigen3/Eigen/src/SparseCore/SparseDiagonalProduct.h @@ -78,7 +78,11 @@ class SparseDiagonalProduct EIGEN_SPARSE_PUBLIC_INTERFACE(SparseDiagonalProduct) typedef internal::sparse_diagonal_product_inner_iterator_selector - <_LhsNested,_RhsNested,SparseDiagonalProduct,LhsMode,RhsMode> InnerIterator; + <_LhsNested,_RhsNested,SparseDiagonalProduct,LhsMode,RhsMode> InnerIterator; + + // We do not want ReverseInnerIterator for diagonal-sparse products, + // but this dummy declaration is needed to make diag * sparse * diag compile. + class ReverseInnerIterator; EIGEN_STRONG_INLINE SparseDiagonalProduct(const Lhs& lhs, const Rhs& rhs) : m_lhs(lhs), m_rhs(rhs) @@ -118,19 +122,23 @@ class sparse_diagonal_product_inner_iterator_selector <Lhs,Rhs,SparseDiagonalProductType,SDP_IsDiagonal,SDP_IsSparseColMajor> : public CwiseBinaryOp< scalar_product_op<typename Lhs::Scalar>, - SparseInnerVectorSet<Rhs,1>, - typename Lhs::DiagonalVectorType>::InnerIterator + const typename Rhs::ConstInnerVectorReturnType, + const typename Lhs::DiagonalVectorType>::InnerIterator { typedef typename CwiseBinaryOp< scalar_product_op<typename Lhs::Scalar>, - SparseInnerVectorSet<Rhs,1>, - typename Lhs::DiagonalVectorType>::InnerIterator Base; + const typename Rhs::ConstInnerVectorReturnType, + const typename Lhs::DiagonalVectorType>::InnerIterator Base; typedef typename Lhs::Index Index; + Index m_outer; public: inline sparse_diagonal_product_inner_iterator_selector( const SparseDiagonalProductType& expr, Index outer) - : Base(expr.rhs().innerVector(outer) .cwiseProduct(expr.lhs().diagonal()), 0) + : Base(expr.rhs().innerVector(outer) .cwiseProduct(expr.lhs().diagonal()), 0), m_outer(outer) {} + + inline Index outer() const { return m_outer; } + inline Index col() const { return m_outer; } }; template<typename Lhs, typename Rhs, typename SparseDiagonalProductType> @@ -152,19 +160,23 @@ class sparse_diagonal_product_inner_iterator_selector <Lhs,Rhs,SparseDiagonalProductType,SDP_IsSparseRowMajor,SDP_IsDiagonal> : public CwiseBinaryOp< scalar_product_op<typename Rhs::Scalar>, - SparseInnerVectorSet<Lhs,1>, - Transpose<const typename Rhs::DiagonalVectorType> >::InnerIterator + const typename Lhs::ConstInnerVectorReturnType, + const Transpose<const typename Rhs::DiagonalVectorType> >::InnerIterator { typedef typename CwiseBinaryOp< scalar_product_op<typename Rhs::Scalar>, - SparseInnerVectorSet<Lhs,1>, - Transpose<const typename Rhs::DiagonalVectorType> >::InnerIterator Base; + const typename Lhs::ConstInnerVectorReturnType, + const Transpose<const typename Rhs::DiagonalVectorType> >::InnerIterator Base; typedef typename Lhs::Index Index; + Index m_outer; public: inline sparse_diagonal_product_inner_iterator_selector( const SparseDiagonalProductType& expr, Index outer) - : Base(expr.lhs().innerVector(outer) .cwiseProduct(expr.rhs().diagonal().transpose()), 0) + : Base(expr.lhs().innerVector(outer) .cwiseProduct(expr.rhs().diagonal().transpose()), 0), m_outer(outer) {} + + inline Index outer() const { return m_outer; } + inline Index row() const { return m_outer; } }; } // end namespace internal diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseDot.h b/extern/Eigen3/Eigen/src/SparseCore/SparseDot.h index 5c4a593dc01..db39c9aecc7 100644 --- a/extern/Eigen3/Eigen/src/SparseCore/SparseDot.h +++ b/extern/Eigen3/Eigen/src/SparseCore/SparseDot.h @@ -30,7 +30,7 @@ SparseMatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const Scalar res(0); while (i) { - res += internal::conj(i.value()) * other.coeff(i.index()); + res += numext::conj(i.value()) * other.coeff(i.index()); ++i; } return res; @@ -54,8 +54,8 @@ SparseMatrixBase<Derived>::dot(const SparseMatrixBase<OtherDerived>& other) cons typedef typename internal::remove_all<Nested>::type NestedCleaned; typedef typename internal::remove_all<OtherNested>::type OtherNestedCleaned; - const Nested nthis(derived()); - const OtherNested nother(other.derived()); + Nested nthis(derived()); + OtherNested nother(other.derived()); typename NestedCleaned::InnerIterator i(nthis,0); typename OtherNestedCleaned::InnerIterator j(nother,0); @@ -64,7 +64,7 @@ SparseMatrixBase<Derived>::dot(const SparseMatrixBase<OtherDerived>& other) cons { if (i.index()==j.index()) { - res += internal::conj(i.value()) * j.value(); + res += numext::conj(i.value()) * j.value(); ++i; ++j; } else if (i.index()<j.index()) @@ -79,16 +79,23 @@ template<typename Derived> inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real SparseMatrixBase<Derived>::squaredNorm() const { - return internal::real((*this).cwiseAbs2().sum()); + return numext::real((*this).cwiseAbs2().sum()); } template<typename Derived> inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real SparseMatrixBase<Derived>::norm() const { - return internal::sqrt(squaredNorm()); + using std::sqrt; + return sqrt(squaredNorm()); } +template<typename Derived> +inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real +SparseMatrixBase<Derived>::blueNorm() const +{ + return internal::blueNorm_impl(*this); +} } // end namespace Eigen #endif // EIGEN_SPARSE_DOT_H diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseMatrix.h b/extern/Eigen3/Eigen/src/SparseCore/SparseMatrix.h index efb774f031b..01ce0dcfee3 100644 --- a/extern/Eigen3/Eigen/src/SparseCore/SparseMatrix.h +++ b/extern/Eigen3/Eigen/src/SparseCore/SparseMatrix.h @@ -31,7 +31,7 @@ namespace Eigen { * * \tparam _Scalar the scalar type, i.e. the type of the coefficients * \tparam _Options Union of bit flags controlling the storage scheme. Currently the only possibility - * is RowMajor. The default is 0 which means column-major. + * is ColMajor or RowMajor. The default is 0 which means column-major. * \tparam _Index the type of the indices. It has to be a \b signed type (e.g., short, int, std::ptrdiff_t). Default is \c int. * * This class can be extended with the help of the plugin mechanism described on the page @@ -170,6 +170,8 @@ class SparseMatrix * This function returns Scalar(0) if the element is an explicit \em zero */ inline Scalar coeff(Index row, Index col) const { + eigen_assert(row>=0 && row<rows() && col>=0 && col<cols()); + const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1]; @@ -186,6 +188,8 @@ class SparseMatrix */ inline Scalar& coeffRef(Index row, Index col) { + eigen_assert(row>=0 && row<rows() && col>=0 && col<cols()); + const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; @@ -213,11 +217,13 @@ class SparseMatrix * inserted in increasing inner index order, and in O(nnz_j) for a random insertion. * */ - EIGEN_DONT_INLINE Scalar& insert(Index row, Index col) + Scalar& insert(Index row, Index col) { + eigen_assert(row>=0 && row<rows() && col>=0 && col<cols()); + if(isCompressed()) { - reserve(VectorXi::Constant(outerSize(), 2)); + reserve(Matrix<Index,Dynamic,1>::Constant(outerSize(), 2)); } return insertUncompressed(row,col); } @@ -281,12 +287,12 @@ class SparseMatrix template<class SizesType> inline void reserveInnerVectors(const SizesType& reserveSizes) { - if(isCompressed()) { std::size_t totalReserveSize = 0; // turn the matrix into non-compressed mode - m_innerNonZeros = new Index[m_outerSize]; + m_innerNonZeros = static_cast<Index*>(std::malloc(m_outerSize * sizeof(Index))); + if (!m_innerNonZeros) internal::throw_std_bad_alloc(); // temporarily use m_innerSizes to hold the new starting points. Index* newOuterIndex = m_innerNonZeros; @@ -299,11 +305,11 @@ class SparseMatrix totalReserveSize += reserveSizes[j]; } m_data.reserve(totalReserveSize); - std::ptrdiff_t previousOuterIndex = m_outerIndex[m_outerSize]; - for(std::ptrdiff_t j=m_outerSize-1; j>=0; --j) + Index previousOuterIndex = m_outerIndex[m_outerSize]; + for(Index j=m_outerSize-1; j>=0; --j) { - ptrdiff_t innerNNZ = previousOuterIndex - m_outerIndex[j]; - for(std::ptrdiff_t i=innerNNZ-1; i>=0; --i) + Index innerNNZ = previousOuterIndex - m_outerIndex[j]; + for(Index i=innerNNZ-1; i>=0; --i) { m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i); m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i); @@ -318,25 +324,27 @@ class SparseMatrix } else { - Index* newOuterIndex = new Index[m_outerSize+1]; + Index* newOuterIndex = static_cast<Index*>(std::malloc((m_outerSize+1)*sizeof(Index))); + if (!newOuterIndex) internal::throw_std_bad_alloc(); + Index count = 0; for(Index j=0; j<m_outerSize; ++j) { newOuterIndex[j] = count; Index alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j]; - Index toReserve = std::max<std::ptrdiff_t>(reserveSizes[j], alreadyReserved); + Index toReserve = std::max<Index>(reserveSizes[j], alreadyReserved); count += toReserve + m_innerNonZeros[j]; } newOuterIndex[m_outerSize] = count; m_data.resize(count); - for(ptrdiff_t j=m_outerSize-1; j>=0; --j) + for(Index j=m_outerSize-1; j>=0; --j) { - std::ptrdiff_t offset = newOuterIndex[j] - m_outerIndex[j]; + Index offset = newOuterIndex[j] - m_outerIndex[j]; if(offset>0) { - std::ptrdiff_t innerNNZ = m_innerNonZeros[j]; - for(std::ptrdiff_t i=innerNNZ-1; i>=0; --i) + Index innerNNZ = m_innerNonZeros[j]; + for(Index i=innerNNZ-1; i>=0; --i) { m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i); m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i); @@ -345,7 +353,7 @@ class SparseMatrix } std::swap(m_outerIndex, newOuterIndex); - delete[] newOuterIndex; + std::free(newOuterIndex); } } @@ -394,7 +402,7 @@ class SparseMatrix * \sa insertBack, insertBackByOuterInner */ inline void startVec(Index outer) { - eigen_assert(m_outerIndex[outer]==int(m_data.size()) && "You must call startVec for each inner vector sequentially"); + eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially"); eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially"); m_outerIndex[outer+1] = m_outerIndex[outer]; } @@ -431,7 +439,7 @@ class SparseMatrix /** \internal * same as insert(Index,Index) except that the indices are given relative to the storage order */ - EIGEN_DONT_INLINE Scalar& insertByOuterInner(Index j, Index i) + Scalar& insertByOuterInner(Index j, Index i) { return insert(IsRowMajor ? j : i, IsRowMajor ? i : j); } @@ -448,7 +456,7 @@ class SparseMatrix for(Index j=1; j<m_outerSize; ++j) { Index nextOldStart = m_outerIndex[j+1]; - std::ptrdiff_t offset = oldStart - m_outerIndex[j]; + Index offset = oldStart - m_outerIndex[j]; if(offset>0) { for(Index k=0; k<m_innerNonZeros[j]; ++k) @@ -460,14 +468,26 @@ class SparseMatrix m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j]; oldStart = nextOldStart; } - delete[] m_innerNonZeros; + std::free(m_innerNonZeros); m_innerNonZeros = 0; m_data.resize(m_outerIndex[m_outerSize]); m_data.squeeze(); } + /** Turns the matrix into the uncompressed mode */ + void uncompress() + { + if(m_innerNonZeros != 0) + return; + m_innerNonZeros = static_cast<Index*>(std::malloc(m_outerSize * sizeof(Index))); + for (Index i = 0; i < m_outerSize; i++) + { + m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i]; + } + } + /** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerence \a epsilon */ - void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision()) + void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision()) { prune(default_prunning_func(reference,epsilon)); } @@ -506,6 +526,70 @@ class SparseMatrix m_data.resize(k,0); } + /** Resizes the matrix to a \a rows x \a cols matrix leaving old values untouched. + * \sa resizeNonZeros(Index), reserve(), setZero() + */ + void conservativeResize(Index rows, Index cols) + { + // No change + if (this->rows() == rows && this->cols() == cols) return; + + // If one dimension is null, then there is nothing to be preserved + if(rows==0 || cols==0) return resize(rows,cols); + + Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows(); + Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols(); + Index newInnerSize = IsRowMajor ? cols : rows; + + // Deals with inner non zeros + if (m_innerNonZeros) + { + // Resize m_innerNonZeros + Index *newInnerNonZeros = static_cast<Index*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(Index))); + if (!newInnerNonZeros) internal::throw_std_bad_alloc(); + m_innerNonZeros = newInnerNonZeros; + + for(Index i=m_outerSize; i<m_outerSize+outerChange; i++) + m_innerNonZeros[i] = 0; + } + else if (innerChange < 0) + { + // Inner size decreased: allocate a new m_innerNonZeros + m_innerNonZeros = static_cast<Index*>(std::malloc((m_outerSize+outerChange+1) * sizeof(Index))); + if (!m_innerNonZeros) internal::throw_std_bad_alloc(); + for(Index i = 0; i < m_outerSize; i++) + m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i]; + } + + // Change the m_innerNonZeros in case of a decrease of inner size + if (m_innerNonZeros && innerChange < 0) + { + for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++) + { + Index &n = m_innerNonZeros[i]; + Index start = m_outerIndex[i]; + while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n; + } + } + + m_innerSize = newInnerSize; + + // Re-allocate outer index structure if necessary + if (outerChange == 0) + return; + + Index *newOuterIndex = static_cast<Index*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(Index))); + if (!newOuterIndex) internal::throw_std_bad_alloc(); + m_outerIndex = newOuterIndex; + if (outerChange > 0) + { + Index last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize]; + for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++) + m_outerIndex[i] = last; + } + m_outerSize += outerChange; + } + /** Resizes the matrix to a \a rows x \a cols matrix and initializes it to zero. * \sa resizeNonZeros(Index), reserve(), setZero() */ @@ -516,13 +600,15 @@ class SparseMatrix m_data.clear(); if (m_outerSize != outerSize || m_outerSize==0) { - delete[] m_outerIndex; - m_outerIndex = new Index [outerSize+1]; + std::free(m_outerIndex); + m_outerIndex = static_cast<Index*>(std::malloc((outerSize + 1) * sizeof(Index))); + if (!m_outerIndex) internal::throw_std_bad_alloc(); + m_outerSize = outerSize; } if(m_innerNonZeros) { - delete[] m_innerNonZeros; + std::free(m_innerNonZeros); m_innerNonZeros = 0; } memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index)); @@ -560,9 +646,20 @@ class SparseMatrix inline SparseMatrix(const SparseMatrixBase<OtherDerived>& other) : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) { + EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value), + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) check_template_parameters(); *this = other.derived(); } + + /** Constructs a sparse matrix from the sparse selfadjoint view \a other */ + template<typename OtherDerived, unsigned int UpLo> + inline SparseMatrix(const SparseSelfAdjointView<OtherDerived, UpLo>& other) + : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) + { + check_template_parameters(); + *this = other; + } /** Copy constructor (it performs a deep copy) */ inline SparseMatrix(const SparseMatrix& other) @@ -572,6 +669,16 @@ class SparseMatrix *this = other.derived(); } + /** \brief Copy constructor with in-place evaluation */ + template<typename OtherDerived> + SparseMatrix(const ReturnByValue<OtherDerived>& other) + : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0) + { + check_template_parameters(); + initAssignment(other); + other.evalTo(*this); + } + /** Swaps the content of two sparse matrices of the same type. * This is a fast operation that simply swaps the underlying pointers and parameters. */ inline void swap(SparseMatrix& other) @@ -584,13 +691,22 @@ class SparseMatrix m_data.swap(other.m_data); } + /** Sets *this to the identity matrix */ + inline void setIdentity() + { + eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES"); + this->m_data.resize(rows()); + Eigen::Map<Matrix<Index, Dynamic, 1> >(&this->m_data.index(0), rows()).setLinSpaced(0, rows()-1); + Eigen::Map<Matrix<Scalar, Dynamic, 1> >(&this->m_data.value(0), rows()).setOnes(); + Eigen::Map<Matrix<Index, Dynamic, 1> >(this->m_outerIndex, rows()+1).setLinSpaced(0, rows()); + } inline SparseMatrix& operator=(const SparseMatrix& other) { if (other.isRValue()) { swap(other.const_cast_derived()); } - else + else if(this!=&other) { initAssignment(other); if(other.isCompressed()) @@ -613,7 +729,10 @@ class SparseMatrix template<typename OtherDerived> inline SparseMatrix& operator=(const ReturnByValue<OtherDerived>& other) - { return Base::operator=(other.derived()); } + { + initAssignment(other); + return Base::operator=(other.derived()); + } template<typename OtherDerived> inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other) @@ -621,58 +740,7 @@ class SparseMatrix #endif template<typename OtherDerived> - EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other) - { - initAssignment(other.derived()); - const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit); - if (needToTranspose) - { - // two passes algorithm: - // 1 - compute the number of coeffs per dest inner vector - // 2 - do the actual copy/eval - // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed - typedef typename internal::nested<OtherDerived,2>::type OtherCopy; - typedef typename internal::remove_all<OtherCopy>::type _OtherCopy; - OtherCopy otherCopy(other.derived()); - - Eigen::Map<Matrix<Index, Dynamic, 1> > (m_outerIndex,outerSize()).setZero(); - // pass 1 - // FIXME the above copy could be merged with that pass - for (Index j=0; j<otherCopy.outerSize(); ++j) - for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it) - ++m_outerIndex[it.index()]; - - // prefix sum - Index count = 0; - VectorXi positions(outerSize()); - for (Index j=0; j<outerSize(); ++j) - { - Index tmp = m_outerIndex[j]; - m_outerIndex[j] = count; - positions[j] = count; - count += tmp; - } - m_outerIndex[outerSize()] = count; - // alloc - m_data.resize(count); - // pass 2 - for (Index j=0; j<otherCopy.outerSize(); ++j) - { - for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it) - { - Index pos = positions[it.index()]++; - m_data.index(pos) = j; - m_data.value(pos) = it.value(); - } - } - return *this; - } - else - { - // there is no special optimization - return Base::operator=(other.derived()); - } - } + EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other); friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m) { @@ -684,8 +752,8 @@ class SparseMatrix else for (Index i=0; i<m.outerSize(); ++i) { - int p = m.m_outerIndex[i]; - int pe = m.m_outerIndex[i]+m.m_innerNonZeros[i]; + Index p = m.m_outerIndex[i]; + Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i]; Index k=p; for (; k<pe; ++k) s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") "; @@ -714,8 +782,8 @@ class SparseMatrix /** Destructor */ inline ~SparseMatrix() { - delete[] m_outerIndex; - delete[] m_innerNonZeros; + std::free(m_outerIndex); + std::free(m_innerNonZeros); } #ifndef EIGEN_PARSED_BY_DOXYGEN @@ -735,118 +803,14 @@ protected: resize(other.rows(), other.cols()); if(m_innerNonZeros) { - delete[] m_innerNonZeros; + std::free(m_innerNonZeros); m_innerNonZeros = 0; } } /** \internal * \sa insert(Index,Index) */ - EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col) - { - eigen_assert(isCompressed()); - - const Index outer = IsRowMajor ? row : col; - const Index inner = IsRowMajor ? col : row; - - Index previousOuter = outer; - if (m_outerIndex[outer+1]==0) - { - // we start a new inner vector - while (previousOuter>=0 && m_outerIndex[previousOuter]==0) - { - m_outerIndex[previousOuter] = static_cast<Index>(m_data.size()); - --previousOuter; - } - m_outerIndex[outer+1] = m_outerIndex[outer]; - } - - // here we have to handle the tricky case where the outerIndex array - // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g., - // the 2nd inner vector... - bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0)) - && (size_t(m_outerIndex[outer+1]) == m_data.size()); - - size_t startId = m_outerIndex[outer]; - // FIXME let's make sure sizeof(long int) == sizeof(size_t) - size_t p = m_outerIndex[outer+1]; - ++m_outerIndex[outer+1]; - - float reallocRatio = 1; - if (m_data.allocatedSize()<=m_data.size()) - { - // if there is no preallocated memory, let's reserve a minimum of 32 elements - if (m_data.size()==0) - { - m_data.reserve(32); - } - else - { - // we need to reallocate the data, to reduce multiple reallocations - // we use a smart resize algorithm based on the current filling ratio - // in addition, we use float to avoid integers overflows - float nnzEstimate = float(m_outerIndex[outer])*float(m_outerSize)/float(outer+1); - reallocRatio = (nnzEstimate-float(m_data.size()))/float(m_data.size()); - // furthermore we bound the realloc ratio to: - // 1) reduce multiple minor realloc when the matrix is almost filled - // 2) avoid to allocate too much memory when the matrix is almost empty - reallocRatio = (std::min)((std::max)(reallocRatio,1.5f),8.f); - } - } - m_data.resize(m_data.size()+1,reallocRatio); - - if (!isLastVec) - { - if (previousOuter==-1) - { - // oops wrong guess. - // let's correct the outer offsets - for (Index k=0; k<=(outer+1); ++k) - m_outerIndex[k] = 0; - Index k=outer+1; - while(m_outerIndex[k]==0) - m_outerIndex[k++] = 1; - while (k<=m_outerSize && m_outerIndex[k]!=0) - m_outerIndex[k++]++; - p = 0; - --k; - k = m_outerIndex[k]-1; - while (k>0) - { - m_data.index(k) = m_data.index(k-1); - m_data.value(k) = m_data.value(k-1); - k--; - } - } - else - { - // we are not inserting into the last inner vec - // update outer indices: - Index j = outer+2; - while (j<=m_outerSize && m_outerIndex[j]!=0) - m_outerIndex[j++]++; - --j; - // shift data of last vecs: - Index k = m_outerIndex[j]-1; - while (k>=Index(p)) - { - m_data.index(k) = m_data.index(k-1); - m_data.value(k) = m_data.value(k-1); - k--; - } - } - } - - while ( (p > startId) && (m_data.index(p-1) > inner) ) - { - m_data.index(p) = m_data.index(p-1); - m_data.value(p) = m_data.value(p-1); - --p; - } - - m_data.index(p) = inner; - return (m_data.value(p) = 0); - } + EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col); /** \internal * A vector object that is equal to 0 everywhere but v at the position i */ @@ -865,40 +829,12 @@ protected: /** \internal * \sa insert(Index,Index) */ - EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col) - { - eigen_assert(!isCompressed()); - - const Index outer = IsRowMajor ? row : col; - const Index inner = IsRowMajor ? col : row; - - std::ptrdiff_t room = m_outerIndex[outer+1] - m_outerIndex[outer]; - std::ptrdiff_t innerNNZ = m_innerNonZeros[outer]; - if(innerNNZ>=room) - { - // this inner vector is full, we need to reallocate the whole buffer :( - reserve(SingletonVector(outer,std::max<std::ptrdiff_t>(2,innerNNZ))); - } - - Index startId = m_outerIndex[outer]; - Index p = startId + m_innerNonZeros[outer]; - while ( (p > startId) && (m_data.index(p-1) > inner) ) - { - m_data.index(p) = m_data.index(p-1); - m_data.value(p) = m_data.value(p-1); - --p; - } - - m_innerNonZeros[outer]++; - - m_data.index(p) = inner; - return (m_data.value(p) = 0); - } + EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col); public: /** \internal * \sa insert(Index,Index) */ - inline Scalar& insertBackUncompressed(Index row, Index col) + EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col) { const Index outer = IsRowMajor ? row : col; const Index inner = IsRowMajor ? col : row; @@ -906,8 +842,7 @@ public: eigen_assert(!isCompressed()); eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer])); - Index p = m_outerIndex[outer] + m_innerNonZeros[outer]; - m_innerNonZeros[outer]++; + Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++; m_data.index(p) = inner; return (m_data.value(p) = 0); } @@ -916,10 +851,11 @@ private: static void check_template_parameters() { EIGEN_STATIC_ASSERT(NumTraits<Index>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE); + EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS); } struct default_prunning_func { - default_prunning_func(Scalar ref, RealScalar eps) : reference(ref), epsilon(eps) {} + default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {} inline bool operator() (const Index&, const Index&, const Scalar& value) const { return !internal::isMuchSmallerThan(value, reference, epsilon); @@ -1006,19 +942,25 @@ void set_from_triplets(const InputIterator& begin, const InputIterator& end, Spa typedef typename SparseMatrixType::Index Index; SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor> trMat(mat.rows(),mat.cols()); - // pass 1: count the nnz per inner-vector - VectorXi wi(trMat.outerSize()); - wi.setZero(); - for(InputIterator it(begin); it!=end; ++it) - wi(IsRowMajor ? it->col() : it->row())++; + if(begin!=end) + { + // pass 1: count the nnz per inner-vector + Matrix<Index,Dynamic,1> wi(trMat.outerSize()); + wi.setZero(); + for(InputIterator it(begin); it!=end; ++it) + { + eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols()); + wi(IsRowMajor ? it->col() : it->row())++; + } - // pass 2: insert all the elements into trMat - trMat.reserve(wi); - for(InputIterator it(begin); it!=end; ++it) - trMat.insertBackUncompressed(it->row(),it->col()) = it->value(); + // pass 2: insert all the elements into trMat + trMat.reserve(wi); + for(InputIterator it(begin); it!=end; ++it) + trMat.insertBackUncompressed(it->row(),it->col()) = it->value(); - // pass 3: - trMat.sumupDuplicates(); + // pass 3: + trMat.sumupDuplicates(); + } // pass 4: transposed copy -> implicit sorting mat = trMat; @@ -1027,7 +969,7 @@ void set_from_triplets(const InputIterator& begin, const InputIterator& end, Spa } -/** Fill the matrix \c *this with the list of \em triplets defined by the iterator range \a begin - \b. +/** Fill the matrix \c *this with the list of \em triplets defined by the iterator range \a begin - \a end. * * A \em triplet is a tuple (i,j,value) defining a non-zero element. * The input list of triplets does not have to be sorted, and can contains duplicated elements. @@ -1077,11 +1019,11 @@ void SparseMatrix<Scalar,_Options,_Index>::sumupDuplicates() { eigen_assert(!isCompressed()); // TODO, in practice we should be able to use m_innerNonZeros for that task - VectorXi wi(innerSize()); + Matrix<Index,Dynamic,1> wi(innerSize()); wi.fill(-1); Index count = 0; // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers - for(int j=0; j<outerSize(); ++j) + for(Index j=0; j<outerSize(); ++j) { Index start = count; Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j]; @@ -1106,11 +1048,212 @@ void SparseMatrix<Scalar,_Options,_Index>::sumupDuplicates() m_outerIndex[m_outerSize] = count; // turn the matrix into compressed form - delete[] m_innerNonZeros; + std::free(m_innerNonZeros); m_innerNonZeros = 0; m_data.resize(m_outerIndex[m_outerSize]); } +template<typename Scalar, int _Options, typename _Index> +template<typename OtherDerived> +EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_Index>& SparseMatrix<Scalar,_Options,_Index>::operator=(const SparseMatrixBase<OtherDerived>& other) +{ + EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value), + YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY) + + const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit); + if (needToTranspose) + { + // two passes algorithm: + // 1 - compute the number of coeffs per dest inner vector + // 2 - do the actual copy/eval + // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed + typedef typename internal::nested<OtherDerived,2>::type OtherCopy; + typedef typename internal::remove_all<OtherCopy>::type _OtherCopy; + OtherCopy otherCopy(other.derived()); + + SparseMatrix dest(other.rows(),other.cols()); + Eigen::Map<Matrix<Index, Dynamic, 1> > (dest.m_outerIndex,dest.outerSize()).setZero(); + + // pass 1 + // FIXME the above copy could be merged with that pass + for (Index j=0; j<otherCopy.outerSize(); ++j) + for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it) + ++dest.m_outerIndex[it.index()]; + + // prefix sum + Index count = 0; + Matrix<Index,Dynamic,1> positions(dest.outerSize()); + for (Index j=0; j<dest.outerSize(); ++j) + { + Index tmp = dest.m_outerIndex[j]; + dest.m_outerIndex[j] = count; + positions[j] = count; + count += tmp; + } + dest.m_outerIndex[dest.outerSize()] = count; + // alloc + dest.m_data.resize(count); + // pass 2 + for (Index j=0; j<otherCopy.outerSize(); ++j) + { + for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it) + { + Index pos = positions[it.index()]++; + dest.m_data.index(pos) = j; + dest.m_data.value(pos) = it.value(); + } + } + this->swap(dest); + return *this; + } + else + { + if(other.isRValue()) + initAssignment(other.derived()); + // there is no special optimization + return Base::operator=(other.derived()); + } +} + +template<typename _Scalar, int _Options, typename _Index> +EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertUncompressed(Index row, Index col) +{ + eigen_assert(!isCompressed()); + + const Index outer = IsRowMajor ? row : col; + const Index inner = IsRowMajor ? col : row; + + Index room = m_outerIndex[outer+1] - m_outerIndex[outer]; + Index innerNNZ = m_innerNonZeros[outer]; + if(innerNNZ>=room) + { + // this inner vector is full, we need to reallocate the whole buffer :( + reserve(SingletonVector(outer,std::max<Index>(2,innerNNZ))); + } + + Index startId = m_outerIndex[outer]; + Index p = startId + m_innerNonZeros[outer]; + while ( (p > startId) && (m_data.index(p-1) > inner) ) + { + m_data.index(p) = m_data.index(p-1); + m_data.value(p) = m_data.value(p-1); + --p; + } + eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exist, you must call coeffRef to this end"); + + m_innerNonZeros[outer]++; + + m_data.index(p) = inner; + return (m_data.value(p) = 0); +} + +template<typename _Scalar, int _Options, typename _Index> +EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertCompressed(Index row, Index col) +{ + eigen_assert(isCompressed()); + + const Index outer = IsRowMajor ? row : col; + const Index inner = IsRowMajor ? col : row; + + Index previousOuter = outer; + if (m_outerIndex[outer+1]==0) + { + // we start a new inner vector + while (previousOuter>=0 && m_outerIndex[previousOuter]==0) + { + m_outerIndex[previousOuter] = static_cast<Index>(m_data.size()); + --previousOuter; + } + m_outerIndex[outer+1] = m_outerIndex[outer]; + } + + // here we have to handle the tricky case where the outerIndex array + // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g., + // the 2nd inner vector... + bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0)) + && (size_t(m_outerIndex[outer+1]) == m_data.size()); + + size_t startId = m_outerIndex[outer]; + // FIXME let's make sure sizeof(long int) == sizeof(size_t) + size_t p = m_outerIndex[outer+1]; + ++m_outerIndex[outer+1]; + + float reallocRatio = 1; + if (m_data.allocatedSize()<=m_data.size()) + { + // if there is no preallocated memory, let's reserve a minimum of 32 elements + if (m_data.size()==0) + { + m_data.reserve(32); + } + else + { + // we need to reallocate the data, to reduce multiple reallocations + // we use a smart resize algorithm based on the current filling ratio + // in addition, we use float to avoid integers overflows + float nnzEstimate = float(m_outerIndex[outer])*float(m_outerSize)/float(outer+1); + reallocRatio = (nnzEstimate-float(m_data.size()))/float(m_data.size()); + // furthermore we bound the realloc ratio to: + // 1) reduce multiple minor realloc when the matrix is almost filled + // 2) avoid to allocate too much memory when the matrix is almost empty + reallocRatio = (std::min)((std::max)(reallocRatio,1.5f),8.f); + } + } + m_data.resize(m_data.size()+1,reallocRatio); + + if (!isLastVec) + { + if (previousOuter==-1) + { + // oops wrong guess. + // let's correct the outer offsets + for (Index k=0; k<=(outer+1); ++k) + m_outerIndex[k] = 0; + Index k=outer+1; + while(m_outerIndex[k]==0) + m_outerIndex[k++] = 1; + while (k<=m_outerSize && m_outerIndex[k]!=0) + m_outerIndex[k++]++; + p = 0; + --k; + k = m_outerIndex[k]-1; + while (k>0) + { + m_data.index(k) = m_data.index(k-1); + m_data.value(k) = m_data.value(k-1); + k--; + } + } + else + { + // we are not inserting into the last inner vec + // update outer indices: + Index j = outer+2; + while (j<=m_outerSize && m_outerIndex[j]!=0) + m_outerIndex[j++]++; + --j; + // shift data of last vecs: + Index k = m_outerIndex[j]-1; + while (k>=Index(p)) + { + m_data.index(k) = m_data.index(k-1); + m_data.value(k) = m_data.value(k-1); + k--; + } + } + } + + while ( (p > startId) && (m_data.index(p-1) > inner) ) + { + m_data.index(p) = m_data.index(p-1); + m_data.value(p) = m_data.value(p-1); + --p; + } + + m_data.index(p) = inner; + return (m_data.value(p) = 0); +} + } // end namespace Eigen #endif // EIGEN_SPARSEMATRIX_H diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseMatrixBase.h b/extern/Eigen3/Eigen/src/SparseCore/SparseMatrixBase.h index 9a1258097fe..bbcf7fb1c62 100644 --- a/extern/Eigen3/Eigen/src/SparseCore/SparseMatrixBase.h +++ b/extern/Eigen3/Eigen/src/SparseCore/SparseMatrixBase.h @@ -89,6 +89,9 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived> */ IsRowMajor = Flags&RowMajorBit ? 1 : 0, + + InnerSizeAtCompileTime = int(IsVectorAtCompileTime) ? int(SizeAtCompileTime) + : int(IsRowMajor) ? int(ColsAtCompileTime) : int(RowsAtCompileTime), #ifndef EIGEN_PARSED_BY_DOXYGEN _HasDirectAccess = (int(Flags)&DirectAccessBit) ? 1 : 0 // workaround sunCC @@ -102,7 +105,7 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived> >::type AdjointReturnType; - typedef SparseMatrix<Scalar, Flags&RowMajorBit ? RowMajor : ColMajor> PlainObject; + typedef SparseMatrix<Scalar, Flags&RowMajorBit ? RowMajor : ColMajor, Index> PlainObject; #ifndef EIGEN_PARSED_BY_DOXYGEN @@ -136,13 +139,13 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived> # include "../plugins/CommonCwiseBinaryOps.h" # include "../plugins/MatrixCwiseUnaryOps.h" # include "../plugins/MatrixCwiseBinaryOps.h" +# include "../plugins/BlockMethods.h" # ifdef EIGEN_SPARSEMATRIXBASE_PLUGIN # include EIGEN_SPARSEMATRIXBASE_PLUGIN # endif # undef EIGEN_CURRENT_STORAGE_BASE_CLASS #undef EIGEN_CURRENT_STORAGE_BASE_CLASS - /** \returns the number of rows. \sa cols() */ inline Index rows() const { return derived().rows(); } /** \returns the number of columns. \sa rows() */ @@ -299,8 +302,8 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived> } else { - SparseMatrix<Scalar, RowMajorBit> trans = m; - s << static_cast<const SparseMatrixBase<SparseMatrix<Scalar, RowMajorBit> >&>(trans); + SparseMatrix<Scalar, RowMajorBit, Index> trans = m; + s << static_cast<const SparseMatrixBase<SparseMatrix<Scalar, RowMajorBit, Index> >&>(trans); } } return s; @@ -322,8 +325,8 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived> typename internal::traits<OtherDerived>::Scalar \ >::ReturnType \ >, \ - Derived, \ - OtherDerived \ + const Derived, \ + const OtherDerived \ > template<typename OtherDerived> @@ -387,55 +390,45 @@ template<typename Derived> class SparseMatrixBase : public EigenBase<Derived> template<typename OtherDerived> Scalar dot(const SparseMatrixBase<OtherDerived>& other) const; RealScalar squaredNorm() const; RealScalar norm() const; + RealScalar blueNorm() const; Transpose<Derived> transpose() { return derived(); } const Transpose<const Derived> transpose() const { return derived(); } const AdjointReturnType adjoint() const { return transpose(); } - // sub-vector - SparseInnerVectorSet<Derived,1> row(Index i); - const SparseInnerVectorSet<Derived,1> row(Index i) const; - SparseInnerVectorSet<Derived,1> col(Index j); - const SparseInnerVectorSet<Derived,1> col(Index j) const; - SparseInnerVectorSet<Derived,1> innerVector(Index outer); - const SparseInnerVectorSet<Derived,1> innerVector(Index outer) const; - - // set of sub-vectors - SparseInnerVectorSet<Derived,Dynamic> subrows(Index start, Index size); - const SparseInnerVectorSet<Derived,Dynamic> subrows(Index start, Index size) const; - SparseInnerVectorSet<Derived,Dynamic> subcols(Index start, Index size); - const SparseInnerVectorSet<Derived,Dynamic> subcols(Index start, Index size) const; - - SparseInnerVectorSet<Derived,Dynamic> middleRows(Index start, Index size); - const SparseInnerVectorSet<Derived,Dynamic> middleRows(Index start, Index size) const; - SparseInnerVectorSet<Derived,Dynamic> middleCols(Index start, Index size); - const SparseInnerVectorSet<Derived,Dynamic> middleCols(Index start, Index size) const; - SparseInnerVectorSet<Derived,Dynamic> innerVectors(Index outerStart, Index outerSize); - const SparseInnerVectorSet<Derived,Dynamic> innerVectors(Index outerStart, Index outerSize) const; - - /** \internal use operator= */ - template<typename DenseDerived> - void evalTo(MatrixBase<DenseDerived>& dst) const - { - dst.setZero(); - for (Index j=0; j<outerSize(); ++j) - for (typename Derived::InnerIterator i(derived(),j); i; ++i) - dst.coeffRef(i.row(),i.col()) = i.value(); - } + // inner-vector + typedef Block<Derived,IsRowMajor?1:Dynamic,IsRowMajor?Dynamic:1,true> InnerVectorReturnType; + typedef Block<const Derived,IsRowMajor?1:Dynamic,IsRowMajor?Dynamic:1,true> ConstInnerVectorReturnType; + InnerVectorReturnType innerVector(Index outer); + const ConstInnerVectorReturnType innerVector(Index outer) const; - Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> toDense() const - { - return derived(); - } + // set of inner-vectors + Block<Derived,Dynamic,Dynamic,true> innerVectors(Index outerStart, Index outerSize); + const Block<const Derived,Dynamic,Dynamic,true> innerVectors(Index outerStart, Index outerSize) const; + + /** \internal use operator= */ + template<typename DenseDerived> + void evalTo(MatrixBase<DenseDerived>& dst) const + { + dst.setZero(); + for (Index j=0; j<outerSize(); ++j) + for (typename Derived::InnerIterator i(derived(),j); i; ++i) + dst.coeffRef(i.row(),i.col()) = i.value(); + } + + Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> toDense() const + { + return derived(); + } template<typename OtherDerived> bool isApprox(const SparseMatrixBase<OtherDerived>& other, - RealScalar prec = NumTraits<Scalar>::dummy_precision()) const + const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const { return toDense().isApprox(other.toDense(),prec); } template<typename OtherDerived> bool isApprox(const MatrixBase<OtherDerived>& other, - RealScalar prec = NumTraits<Scalar>::dummy_precision()) const + const RealScalar& prec = NumTraits<Scalar>::dummy_precision()) const { return toDense().isApprox(other,prec); } /** \returns the matrix or vector obtained by evaluating this expression. diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparsePermutation.h b/extern/Eigen3/Eigen/src/SparseCore/SparsePermutation.h index b897b7595b5..b85be93f6f9 100644 --- a/extern/Eigen3/Eigen/src/SparseCore/SparsePermutation.h +++ b/extern/Eigen3/Eigen/src/SparseCore/SparsePermutation.h @@ -57,7 +57,7 @@ struct permut_sparsematrix_product_retval if(MoveOuter) { SparseMatrix<Scalar,SrcStorageOrder,Index> tmp(m_matrix.rows(), m_matrix.cols()); - VectorXi sizes(m_matrix.outerSize()); + Matrix<Index,Dynamic,1> sizes(m_matrix.outerSize()); for(Index j=0; j<m_matrix.outerSize(); ++j) { Index jp = m_permutation.indices().coeff(j); @@ -77,7 +77,7 @@ struct permut_sparsematrix_product_retval else { SparseMatrix<Scalar,int(SrcStorageOrder)==RowMajor?ColMajor:RowMajor,Index> tmp(m_matrix.rows(), m_matrix.cols()); - VectorXi sizes(tmp.outerSize()); + Matrix<Index,Dynamic,1> sizes(tmp.outerSize()); sizes.setZero(); PermutationMatrix<Dynamic,Dynamic,Index> perm; if((Side==OnTheLeft) ^ Transposed) diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseProduct.h b/extern/Eigen3/Eigen/src/SparseCore/SparseProduct.h index 6a555b83434..cf766307008 100644 --- a/extern/Eigen3/Eigen/src/SparseCore/SparseProduct.h +++ b/extern/Eigen3/Eigen/src/SparseCore/SparseProduct.h @@ -16,6 +16,7 @@ template<typename Lhs, typename Rhs> struct SparseSparseProductReturnType { typedef typename internal::traits<Lhs>::Scalar Scalar; + typedef typename internal::traits<Lhs>::Index Index; enum { LhsRowMajor = internal::traits<Lhs>::Flags & RowMajorBit, RhsRowMajor = internal::traits<Rhs>::Flags & RowMajorBit, @@ -24,11 +25,11 @@ struct SparseSparseProductReturnType }; typedef typename internal::conditional<TransposeLhs, - SparseMatrix<Scalar,0>, + SparseMatrix<Scalar,0,Index>, typename internal::nested<Lhs,Rhs::RowsAtCompileTime>::type>::type LhsNested; typedef typename internal::conditional<TransposeRhs, - SparseMatrix<Scalar,0>, + SparseMatrix<Scalar,0,Index>, typename internal::nested<Rhs,Lhs::RowsAtCompileTime>::type>::type RhsNested; typedef SparseSparseProduct<LhsNested, RhsNested> Type; @@ -99,15 +100,16 @@ class SparseSparseProduct : internal::no_assignment_operator, } template<typename Lhs, typename Rhs> - EIGEN_STRONG_INLINE SparseSparseProduct(const Lhs& lhs, const Rhs& rhs, RealScalar tolerance) + EIGEN_STRONG_INLINE SparseSparseProduct(const Lhs& lhs, const Rhs& rhs, const RealScalar& tolerance) : m_lhs(lhs), m_rhs(rhs), m_tolerance(tolerance), m_conservative(false) { init(); } - SparseSparseProduct pruned(Scalar reference = 0, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision()) const + SparseSparseProduct pruned(const Scalar& reference = 0, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision()) const { - return SparseSparseProduct(m_lhs,m_rhs,internal::abs(reference)*epsilon); + using std::abs; + return SparseSparseProduct(m_lhs,m_rhs,abs(reference)*epsilon); } template<typename Dest> diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseSelfAdjointView.h b/extern/Eigen3/Eigen/src/SparseCore/SparseSelfAdjointView.h index 86ec0a6c5e2..0eda96bc471 100644 --- a/extern/Eigen3/Eigen/src/SparseCore/SparseSelfAdjointView.h +++ b/extern/Eigen3/Eigen/src/SparseCore/SparseSelfAdjointView.h @@ -69,6 +69,30 @@ template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView const _MatrixTypeNested& matrix() const { return m_matrix; } _MatrixTypeNested& matrix() { return m_matrix.const_cast_derived(); } + /** \returns an expression of the matrix product between a sparse self-adjoint matrix \c *this and a sparse matrix \a rhs. + * + * Note that there is no algorithmic advantage of performing such a product compared to a general sparse-sparse matrix product. + * Indeed, the SparseSelfadjointView operand is first copied into a temporary SparseMatrix before computing the product. + */ + template<typename OtherDerived> + SparseSparseProduct<typename OtherDerived::PlainObject, OtherDerived> + operator*(const SparseMatrixBase<OtherDerived>& rhs) const + { + return SparseSparseProduct<typename OtherDerived::PlainObject, OtherDerived>(*this, rhs.derived()); + } + + /** \returns an expression of the matrix product between a sparse matrix \a lhs and a sparse self-adjoint matrix \a rhs. + * + * Note that there is no algorithmic advantage of performing such a product compared to a general sparse-sparse matrix product. + * Indeed, the SparseSelfadjointView operand is first copied into a temporary SparseMatrix before computing the product. + */ + template<typename OtherDerived> friend + SparseSparseProduct<OtherDerived, typename OtherDerived::PlainObject > + operator*(const SparseMatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs) + { + return SparseSparseProduct<OtherDerived, typename OtherDerived::PlainObject>(lhs.derived(), rhs); + } + /** Efficient sparse self-adjoint matrix times dense vector/matrix product */ template<typename OtherDerived> SparseSelfAdjointTimeDenseProduct<MatrixType,OtherDerived,UpLo> @@ -94,7 +118,7 @@ template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView * call this function with u.adjoint(). */ template<typename DerivedU> - SparseSelfAdjointView& rankUpdate(const SparseMatrixBase<DerivedU>& u, Scalar alpha = Scalar(1)); + SparseSelfAdjointView& rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha = Scalar(1)); /** \internal triggered by sparse_matrix = SparseSelfadjointView; */ template<typename DestScalar,int StorageOrder> void evalTo(SparseMatrix<DestScalar,StorageOrder,Index>& _dest) const @@ -173,7 +197,7 @@ SparseSelfAdjointView<Derived, UpLo> SparseMatrixBase<Derived>::selfadjointView( template<typename MatrixType, unsigned int UpLo> template<typename DerivedU> SparseSelfAdjointView<MatrixType,UpLo>& -SparseSelfAdjointView<MatrixType,UpLo>::rankUpdate(const SparseMatrixBase<DerivedU>& u, Scalar alpha) +SparseSelfAdjointView<MatrixType,UpLo>::rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha) { SparseMatrix<Scalar,MatrixType::Flags&RowMajorBit?RowMajor:ColMajor> tmp = u * u.adjoint(); if(alpha==Scalar(0)) @@ -207,12 +231,12 @@ class SparseSelfAdjointTimeDenseProduct SparseSelfAdjointTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {} - template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const + template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const { + EIGEN_ONLY_USED_FOR_DEBUG(alpha); // TODO use alpha eigen_assert(alpha==Scalar(1) && "alpha != 1 is not implemented yet, sorry"); typedef typename internal::remove_all<Lhs>::type _Lhs; - typedef typename internal::remove_all<Rhs>::type _Rhs; typedef typename _Lhs::InnerIterator LhsInnerIterator; enum { LhsIsRowMajor = (_Lhs::Flags&RowMajorBit)==RowMajorBit, @@ -240,7 +264,7 @@ class SparseSelfAdjointTimeDenseProduct Index b = LhsIsRowMajor ? i.index() : j; typename Lhs::Scalar v = i.value(); dest.row(a) += (v) * m_rhs.row(b); - dest.row(b) += internal::conj(v) * m_rhs.row(a); + dest.row(b) += numext::conj(v) * m_rhs.row(a); } if (ProcessFirstHalf && i && (i.index()==j)) dest.row(j) += i.value() * m_rhs.row(j); @@ -268,7 +292,7 @@ class DenseTimeSparseSelfAdjointProduct DenseTimeSparseSelfAdjointProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {} - template<typename Dest> void scaleAndAddTo(Dest& /*dest*/, Scalar /*alpha*/) const + template<typename Dest> void scaleAndAddTo(Dest& /*dest*/, const Scalar& /*alpha*/) const { // TODO } @@ -367,7 +391,7 @@ void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename Matri dest.valuePtr()[k] = it.value(); k = count[ip]++; dest.innerIndexPtr()[k] = jp; - dest.valuePtr()[k] = internal::conj(it.value()); + dest.valuePtr()[k] = numext::conj(it.value()); } } } @@ -428,7 +452,7 @@ void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixTyp if(!StorageOrderMatch) std::swap(ip,jp); if( ((int(DstUpLo)==int(Lower) && ip<jp) || (int(DstUpLo)==int(Upper) && ip>jp))) - dest.valuePtr()[k] = conj(it.value()); + dest.valuePtr()[k] = numext::conj(it.value()); else dest.valuePtr()[k] = it.value(); } @@ -461,7 +485,10 @@ class SparseSymmetricPermutationProduct template<typename DestScalar, int Options, typename DstIndex> void evalTo(SparseMatrix<DestScalar,Options,DstIndex>& _dest) const { - internal::permute_symm_to_fullsymm<UpLo>(m_matrix,_dest,m_perm.indices().data()); +// internal::permute_symm_to_fullsymm<UpLo>(m_matrix,_dest,m_perm.indices().data()); + SparseMatrix<DestScalar,(Options&RowMajor)==RowMajor ? ColMajor : RowMajor, DstIndex> tmp; + internal::permute_symm_to_fullsymm<UpLo>(m_matrix,tmp,m_perm.indices().data()); + _dest = tmp; } template<typename DestType,unsigned int DestUpLo> void evalTo(SparseSelfAdjointView<DestType,DestUpLo>& dest) const diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseSparseProductWithPruning.h b/extern/Eigen3/Eigen/src/SparseCore/SparseSparseProductWithPruning.h index 2438ac573d0..fcc18f5c9c8 100644 --- a/extern/Eigen3/Eigen/src/SparseCore/SparseSparseProductWithPruning.h +++ b/extern/Eigen3/Eigen/src/SparseCore/SparseSparseProductWithPruning.h @@ -17,7 +17,7 @@ namespace internal { // perform a pseudo in-place sparse * sparse product assuming all matrices are col major template<typename Lhs, typename Rhs, typename ResultType> -static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res, typename ResultType::RealScalar tolerance) +static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res, const typename ResultType::RealScalar& tolerance) { // return sparse_sparse_product_with_pruning_impl2(lhs,rhs,res); @@ -27,7 +27,7 @@ static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& r // make sure to call innerSize/outerSize since we fake the storage order. Index rows = lhs.innerSize(); Index cols = rhs.outerSize(); - //int size = lhs.outerSize(); + //Index size = lhs.outerSize(); eigen_assert(lhs.outerSize() == rhs.innerSize()); // allocate a temporary buffer @@ -85,7 +85,7 @@ struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,C typedef typename traits<typename remove_all<Lhs>::type>::Scalar Scalar; typedef typename ResultType::RealScalar RealScalar; - static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, RealScalar tolerance) + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance) { typename remove_all<ResultType>::type _res(res.rows(), res.cols()); internal::sparse_sparse_product_with_pruning_impl<Lhs,Rhs,ResultType>(lhs, rhs, _res, tolerance); @@ -97,10 +97,10 @@ template<typename Lhs, typename Rhs, typename ResultType> struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,RowMajor> { typedef typename ResultType::RealScalar RealScalar; - static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, RealScalar tolerance) + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance) { // we need a col-major matrix to hold the result - typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType; + typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename ResultType::Index> SparseTemporaryType; SparseTemporaryType _res(res.rows(), res.cols()); internal::sparse_sparse_product_with_pruning_impl<Lhs,Rhs,SparseTemporaryType>(lhs, rhs, _res, tolerance); res = _res; @@ -111,7 +111,7 @@ template<typename Lhs, typename Rhs, typename ResultType> struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,RowMajor> { typedef typename ResultType::RealScalar RealScalar; - static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, RealScalar tolerance) + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance) { // let's transpose the product to get a column x column product typename remove_all<ResultType>::type _res(res.rows(), res.cols()); @@ -124,12 +124,13 @@ template<typename Lhs, typename Rhs, typename ResultType> struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,ColMajor> { typedef typename ResultType::RealScalar RealScalar; - static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, RealScalar tolerance) + static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, const RealScalar& tolerance) { - typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix; - ColMajorMatrix colLhs(lhs); - ColMajorMatrix colRhs(rhs); - internal::sparse_sparse_product_with_pruning_impl<ColMajorMatrix,ColMajorMatrix,ResultType>(colLhs, colRhs, res, tolerance); + typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::Index> ColMajorMatrixLhs; + typedef SparseMatrix<typename ResultType::Scalar,ColMajor,typename Lhs::Index> ColMajorMatrixRhs; + ColMajorMatrixLhs colLhs(lhs); + ColMajorMatrixRhs colRhs(rhs); + internal::sparse_sparse_product_with_pruning_impl<ColMajorMatrixLhs,ColMajorMatrixRhs,ResultType>(colLhs, colRhs, res, tolerance); // let's transpose the product to get a column x column product // typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType; diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseTranspose.h b/extern/Eigen3/Eigen/src/SparseCore/SparseTranspose.h index 273f9de688f..7c300ee8dbc 100644 --- a/extern/Eigen3/Eigen/src/SparseCore/SparseTranspose.h +++ b/extern/Eigen3/Eigen/src/SparseCore/SparseTranspose.h @@ -18,7 +18,7 @@ template<typename MatrixType> class TransposeImpl<MatrixType,Sparse> typedef typename internal::remove_all<typename MatrixType::Nested>::type _MatrixTypeNested; public: - EIGEN_SPARSE_PUBLIC_INTERFACE(Transpose<MatrixType>) + EIGEN_SPARSE_PUBLIC_INTERFACE(Transpose<MatrixType> ) class InnerIterator; class ReverseInnerIterator; @@ -34,26 +34,28 @@ template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>::InnerItera : public _MatrixTypeNested::InnerIterator { typedef typename _MatrixTypeNested::InnerIterator Base; + typedef typename TransposeImpl::Index Index; public: EIGEN_STRONG_INLINE InnerIterator(const TransposeImpl& trans, typename TransposeImpl<MatrixType,Sparse>::Index outer) : Base(trans.derived().nestedExpression(), outer) {} - inline typename TransposeImpl<MatrixType,Sparse>::Index row() const { return Base::col(); } - inline typename TransposeImpl<MatrixType,Sparse>::Index col() const { return Base::row(); } + Index row() const { return Base::col(); } + Index col() const { return Base::row(); } }; template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>::ReverseInnerIterator : public _MatrixTypeNested::ReverseInnerIterator { typedef typename _MatrixTypeNested::ReverseInnerIterator Base; + typedef typename TransposeImpl::Index Index; public: EIGEN_STRONG_INLINE ReverseInnerIterator(const TransposeImpl& xpr, typename TransposeImpl<MatrixType,Sparse>::Index outer) : Base(xpr.derived().nestedExpression(), outer) {} - inline typename TransposeImpl<MatrixType,Sparse>::Index row() const { return Base::col(); } - inline typename TransposeImpl<MatrixType,Sparse>::Index col() const { return Base::row(); } + Index row() const { return Base::col(); } + Index col() const { return Base::row(); } }; } // end namespace Eigen diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseTriangularView.h b/extern/Eigen3/Eigen/src/SparseCore/SparseTriangularView.h index 477e4bd94b0..333127b78e8 100644 --- a/extern/Eigen3/Eigen/src/SparseCore/SparseTriangularView.h +++ b/extern/Eigen3/Eigen/src/SparseCore/SparseTriangularView.h @@ -2,6 +2,7 @@ // for linear algebra. // // Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr> +// Copyright (C) 2012 Désiré Nuentsa-Wakam <desire.nuentsa_wakam@inria.fr> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed @@ -27,6 +28,7 @@ template<typename MatrixType, int Mode> class SparseTriangularView enum { SkipFirst = ((Mode&Lower) && !(MatrixType::Flags&RowMajorBit)) || ((Mode&Upper) && (MatrixType::Flags&RowMajorBit)), SkipLast = !SkipFirst, + SkipDiag = (Mode&ZeroDiag) ? 1 : 0, HasUnitDiag = (Mode&UnitDiag) ? 1 : 0 }; @@ -64,6 +66,7 @@ template<typename MatrixType, int Mode> class SparseTriangularView<MatrixType,Mode>::InnerIterator : public MatrixTypeNestedCleaned::InnerIterator { typedef typename MatrixTypeNestedCleaned::InnerIterator Base; + typedef typename SparseTriangularView::Index Index; public: EIGEN_STRONG_INLINE InnerIterator(const SparseTriangularView& view, Index outer) @@ -71,7 +74,7 @@ class SparseTriangularView<MatrixType,Mode>::InnerIterator : public MatrixTypeNe { if(SkipFirst) { - while((*this) && (HasUnitDiag ? this->index()<=outer : this->index()<outer)) + while((*this) && ((HasUnitDiag||SkipDiag) ? this->index()<=outer : this->index()<outer)) Base::operator++(); if(HasUnitDiag) m_returnOne = true; @@ -101,8 +104,8 @@ class SparseTriangularView<MatrixType,Mode>::InnerIterator : public MatrixTypeNe return *this; } - inline Index row() const { return Base::row(); } - inline Index col() const { return Base::col(); } + inline Index row() const { return (MatrixType::Flags&RowMajorBit ? Base::outer() : this->index()); } + inline Index col() const { return (MatrixType::Flags&RowMajorBit ? this->index() : Base::outer()); } inline Index index() const { if(HasUnitDiag && m_returnOne) return Base::outer(); @@ -118,7 +121,12 @@ class SparseTriangularView<MatrixType,Mode>::InnerIterator : public MatrixTypeNe { if(HasUnitDiag && m_returnOne) return true; - return (SkipFirst ? Base::operator bool() : (Base::operator bool() && this->index() <= this->outer())); + if(SkipFirst) return Base::operator bool(); + else + { + if (SkipDiag) return (Base::operator bool() && this->index() < this->outer()); + else return (Base::operator bool() && this->index() <= this->outer()); + } } protected: bool m_returnOne; @@ -128,18 +136,20 @@ template<typename MatrixType, int Mode> class SparseTriangularView<MatrixType,Mode>::ReverseInnerIterator : public MatrixTypeNestedCleaned::ReverseInnerIterator { typedef typename MatrixTypeNestedCleaned::ReverseInnerIterator Base; + typedef typename SparseTriangularView::Index Index; public: EIGEN_STRONG_INLINE ReverseInnerIterator(const SparseTriangularView& view, Index outer) : Base(view.nestedExpression(), outer) { eigen_assert((!HasUnitDiag) && "ReverseInnerIterator does not support yet triangular views with a unit diagonal"); - if(SkipLast) - while((*this) && this->index()>outer) + if(SkipLast) { + while((*this) && (SkipDiag ? this->index()>=outer : this->index()>outer)) --(*this); + } } - EIGEN_STRONG_INLINE InnerIterator& operator--() + EIGEN_STRONG_INLINE ReverseInnerIterator& operator--() { Base::operator--(); return *this; } inline Index row() const { return Base::row(); } @@ -147,7 +157,12 @@ class SparseTriangularView<MatrixType,Mode>::ReverseInnerIterator : public Matri EIGEN_STRONG_INLINE operator bool() const { - return SkipLast ? Base::operator bool() : (Base::operator bool() && this->index() >= this->outer()); + if (SkipLast) return Base::operator bool() ; + else + { + if(SkipDiag) return (Base::operator bool() && this->index() > this->outer()); + else return (Base::operator bool() && this->index() >= this->outer()); + } } }; diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseUtil.h b/extern/Eigen3/Eigen/src/SparseCore/SparseUtil.h index 6062a086ff7..05023858b16 100644 --- a/extern/Eigen3/Eigen/src/SparseCore/SparseUtil.h +++ b/extern/Eigen3/Eigen/src/SparseCore/SparseUtil.h @@ -73,7 +73,6 @@ template<typename _Scalar, int _Flags = 0, typename _Index = int> class Dynamic template<typename _Scalar, int _Flags = 0, typename _Index = int> class SparseVector; template<typename _Scalar, int _Flags = 0, typename _Index = int> class MappedSparseMatrix; -template<typename MatrixType, int Size> class SparseInnerVectorSet; template<typename MatrixType, int Mode> class SparseTriangularView; template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView; template<typename Lhs, typename Rhs> class SparseDiagonalProduct; @@ -99,23 +98,24 @@ template<typename T> struct eval<T,Sparse> template<typename T,int Cols> struct sparse_eval<T,1,Cols> { typedef typename traits<T>::Scalar _Scalar; - enum { _Flags = traits<T>::Flags| RowMajorBit }; + typedef typename traits<T>::Index _Index; public: - typedef SparseVector<_Scalar, _Flags> type; + typedef SparseVector<_Scalar, RowMajor, _Index> type; }; template<typename T,int Rows> struct sparse_eval<T,Rows,1> { typedef typename traits<T>::Scalar _Scalar; - enum { _Flags = traits<T>::Flags & (~RowMajorBit) }; + typedef typename traits<T>::Index _Index; public: - typedef SparseVector<_Scalar, _Flags> type; + typedef SparseVector<_Scalar, ColMajor, _Index> type; }; template<typename T,int Rows,int Cols> struct sparse_eval { typedef typename traits<T>::Scalar _Scalar; - enum { _Flags = traits<T>::Flags }; + typedef typename traits<T>::Index _Index; + enum { _Options = ((traits<T>::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor }; public: - typedef SparseMatrix<_Scalar, _Flags> type; + typedef SparseMatrix<_Scalar, _Options, _Index> type; }; template<typename T> struct sparse_eval<T,1,1> { @@ -127,12 +127,10 @@ template<typename T> struct sparse_eval<T,1,1> { template<typename T> struct plain_matrix_type<T,Sparse> { typedef typename traits<T>::Scalar _Scalar; - enum { - _Flags = traits<T>::Flags - }; - + typedef typename traits<T>::Index _Index; + enum { _Options = ((traits<T>::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor }; public: - typedef SparseMatrix<_Scalar, _Flags> type; + typedef SparseMatrix<_Scalar, _Options, _Index> type; }; } // end namespace internal @@ -145,7 +143,7 @@ template<typename T> struct plain_matrix_type<T,Sparse> * * \sa SparseMatrix::setFromTriplets() */ -template<typename Scalar, typename Index=unsigned int> +template<typename Scalar, typename Index=typename SparseMatrix<Scalar>::Index > class Triplet { public: diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseVector.h b/extern/Eigen3/Eigen/src/SparseCore/SparseVector.h index c952f654038..7e15c814b6f 100644 --- a/extern/Eigen3/Eigen/src/SparseCore/SparseVector.h +++ b/extern/Eigen3/Eigen/src/SparseCore/SparseVector.h @@ -45,35 +45,40 @@ struct traits<SparseVector<_Scalar, _Options, _Index> > SupportedAccessPatterns = InnerRandomAccessPattern }; }; + +// Sparse-Vector-Assignment kinds: +enum { + SVA_RuntimeSwitch, + SVA_Inner, + SVA_Outer +}; + +template< typename Dest, typename Src, + int AssignmentKind = !bool(Src::IsVectorAtCompileTime) ? SVA_RuntimeSwitch + : Src::InnerSizeAtCompileTime==1 ? SVA_Outer + : SVA_Inner> +struct sparse_vector_assign_selector; + } template<typename _Scalar, int _Options, typename _Index> class SparseVector : public SparseMatrixBase<SparseVector<_Scalar, _Options, _Index> > { + typedef SparseMatrixBase<SparseVector> SparseBase; + public: EIGEN_SPARSE_PUBLIC_INTERFACE(SparseVector) EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, +=) EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, -=) - - protected: - public: - - typedef SparseMatrixBase<SparseVector> SparseBase; + + typedef internal::CompressedStorage<Scalar,Index> Storage; enum { IsColVector = internal::traits<SparseVector>::IsColVector }; enum { Options = _Options }; - - internal::CompressedStorage<Scalar,Index> m_data; - Index m_size; - - internal::CompressedStorage<Scalar,Index>& _data() { return m_data; } - internal::CompressedStorage<Scalar,Index>& _data() const { return m_data; } - - public: - + EIGEN_STRONG_INLINE Index rows() const { return IsColVector ? m_size : 1; } EIGEN_STRONG_INLINE Index cols() const { return IsColVector ? 1 : m_size; } EIGEN_STRONG_INLINE Index innerSize() const { return m_size; } @@ -84,17 +89,26 @@ class SparseVector EIGEN_STRONG_INLINE const Index* innerIndexPtr() const { return &m_data.index(0); } EIGEN_STRONG_INLINE Index* innerIndexPtr() { return &m_data.index(0); } + + /** \internal */ + inline Storage& data() { return m_data; } + /** \internal */ + inline const Storage& data() const { return m_data; } inline Scalar coeff(Index row, Index col) const { - eigen_assert((IsColVector ? col : row)==0); + eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size)); return coeff(IsColVector ? row : col); } - inline Scalar coeff(Index i) const { return m_data.at(i); } + inline Scalar coeff(Index i) const + { + eigen_assert(i>=0 && i<m_size); + return m_data.at(i); + } inline Scalar& coeffRef(Index row, Index col) { - eigen_assert((IsColVector ? col : row)==0); + eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size)); return coeff(IsColVector ? row : col); } @@ -106,6 +120,7 @@ class SparseVector */ inline Scalar& coeffRef(Index i) { + eigen_assert(i>=0 && i<m_size); return m_data.atWithInsertion(i); } @@ -139,6 +154,8 @@ class SparseVector inline Scalar& insert(Index row, Index col) { + eigen_assert(IsColVector ? (col==0 && row>=0 && row<m_size) : (row==0 && col>=0 && col<m_size)); + Index inner = IsColVector ? row : col; Index outer = IsColVector ? col : row; eigen_assert(outer==0); @@ -146,6 +163,8 @@ class SparseVector } Scalar& insert(Index i) { + eigen_assert(i>=0 && i<m_size); + Index startId = 0; Index p = Index(m_data.size()) - 1; // TODO smart realloc @@ -169,7 +188,7 @@ class SparseVector inline void finalize() {} - void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision()) + void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision()) { m_data.prune(reference,epsilon); } @@ -188,25 +207,31 @@ class SparseVector void resizeNonZeros(Index size) { m_data.resize(size); } - inline SparseVector() : m_size(0) { resize(0); } + inline SparseVector() : m_size(0) { check_template_parameters(); resize(0); } - inline SparseVector(Index size) : m_size(0) { resize(size); } + inline SparseVector(Index size) : m_size(0) { check_template_parameters(); resize(size); } - inline SparseVector(Index rows, Index cols) : m_size(0) { resize(rows,cols); } + inline SparseVector(Index rows, Index cols) : m_size(0) { check_template_parameters(); resize(rows,cols); } template<typename OtherDerived> inline SparseVector(const SparseMatrixBase<OtherDerived>& other) : m_size(0) { + check_template_parameters(); *this = other.derived(); } inline SparseVector(const SparseVector& other) - : m_size(0) + : SparseBase(other), m_size(0) { + check_template_parameters(); *this = other.derived(); } + /** Swaps the values of \c *this and \a other. + * Overloaded for performance: this version performs a \em shallow swap by swaping pointers and attributes only. + * \sa SparseMatrixBase::swap() + */ inline void swap(SparseVector& other) { std::swap(m_size, other.m_size); @@ -230,10 +255,10 @@ class SparseVector template<typename OtherDerived> inline SparseVector& operator=(const SparseMatrixBase<OtherDerived>& other) { - if (int(RowsAtCompileTime)!=int(OtherDerived::RowsAtCompileTime)) - return assign(other.transpose()); - else - return assign(other); + SparseVector tmp(other.size()); + internal::sparse_vector_assign_selector<SparseVector,OtherDerived>::run(tmp,other.derived()); + this->swap(tmp); + return *this; } #ifndef EIGEN_PARSED_BY_DOXYGEN @@ -260,73 +285,63 @@ class SparseVector public: - /** \deprecated use setZero() and reserve() */ + /** \internal \deprecated use setZero() and reserve() */ EIGEN_DEPRECATED void startFill(Index reserve) { setZero(); m_data.reserve(reserve); } - /** \deprecated use insertBack(Index,Index) */ + /** \internal \deprecated use insertBack(Index,Index) */ EIGEN_DEPRECATED Scalar& fill(Index r, Index c) { eigen_assert(r==0 || c==0); return fill(IsColVector ? r : c); } - /** \deprecated use insertBack(Index) */ + /** \internal \deprecated use insertBack(Index) */ EIGEN_DEPRECATED Scalar& fill(Index i) { m_data.append(0, i); return m_data.value(m_data.size()-1); } - /** \deprecated use insert(Index,Index) */ + /** \internal \deprecated use insert(Index,Index) */ EIGEN_DEPRECATED Scalar& fillrand(Index r, Index c) { eigen_assert(r==0 || c==0); return fillrand(IsColVector ? r : c); } - /** \deprecated use insert(Index) */ + /** \internal \deprecated use insert(Index) */ EIGEN_DEPRECATED Scalar& fillrand(Index i) { return insert(i); } - /** \deprecated use finalize() */ + /** \internal \deprecated use finalize() */ EIGEN_DEPRECATED void endFill() {} + // These two functions were here in the 3.1 release, so let's keep them in case some code rely on them. + /** \internal \deprecated use data() */ + EIGEN_DEPRECATED Storage& _data() { return m_data; } + /** \internal \deprecated use data() */ + EIGEN_DEPRECATED const Storage& _data() const { return m_data; } + # ifdef EIGEN_SPARSEVECTOR_PLUGIN # include EIGEN_SPARSEVECTOR_PLUGIN # endif protected: - template<typename OtherDerived> - EIGEN_DONT_INLINE SparseVector& assign(const SparseMatrixBase<OtherDerived>& _other) + + static void check_template_parameters() { - const OtherDerived& other(_other.derived()); - const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit); - if(needToTranspose) - { - Index size = other.size(); - Index nnz = other.nonZeros(); - resize(size); - reserve(nnz); - for(Index i=0; i<size; ++i) - { - typename OtherDerived::InnerIterator it(other, i); - if(it) - insert(i) = it.value(); - } - return *this; - } - else - { - // there is no special optimization - return Base::operator=(other); - } + EIGEN_STATIC_ASSERT(NumTraits<Index>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE); + EIGEN_STATIC_ASSERT((_Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS); } + + Storage m_data; + Index m_size; }; template<typename Scalar, int _Options, typename _Index> @@ -393,6 +408,40 @@ class SparseVector<Scalar,_Options,_Index>::ReverseInnerIterator const Index m_start; }; +namespace internal { + +template< typename Dest, typename Src> +struct sparse_vector_assign_selector<Dest,Src,SVA_Inner> { + static void run(Dest& dst, const Src& src) { + eigen_internal_assert(src.innerSize()==src.size()); + for(typename Src::InnerIterator it(src, 0); it; ++it) + dst.insert(it.index()) = it.value(); + } +}; + +template< typename Dest, typename Src> +struct sparse_vector_assign_selector<Dest,Src,SVA_Outer> { + static void run(Dest& dst, const Src& src) { + eigen_internal_assert(src.outerSize()==src.size()); + for(typename Dest::Index i=0; i<src.size(); ++i) + { + typename Src::InnerIterator it(src, i); + if(it) + dst.insert(i) = it.value(); + } + } +}; + +template< typename Dest, typename Src> +struct sparse_vector_assign_selector<Dest,Src,SVA_RuntimeSwitch> { + static void run(Dest& dst, const Src& src) { + if(src.outerSize()==1) sparse_vector_assign_selector<Dest,Src,SVA_Inner>::run(dst, src); + else sparse_vector_assign_selector<Dest,Src,SVA_Outer>::run(dst, src); + } +}; + +} + } // end namespace Eigen #endif // EIGEN_SPARSEVECTOR_H diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseView.h b/extern/Eigen3/Eigen/src/SparseCore/SparseView.h index 8b0b9ea0304..fd8450463fe 100644 --- a/extern/Eigen3/Eigen/src/SparseCore/SparseView.h +++ b/extern/Eigen3/Eigen/src/SparseCore/SparseView.h @@ -18,7 +18,7 @@ namespace internal { template<typename MatrixType> struct traits<SparseView<MatrixType> > : traits<MatrixType> { - typedef int Index; + typedef typename MatrixType::Index Index; typedef Sparse StorageKind; enum { Flags = int(traits<MatrixType>::Flags) & (RowMajorBit) @@ -56,6 +56,7 @@ protected: template<typename MatrixType> class SparseView<MatrixType>::InnerIterator : public _MatrixTypeNested::InnerIterator { + typedef typename SparseView::Index Index; public: typedef typename _MatrixTypeNested::InnerIterator IterBase; InnerIterator(const SparseView& view, Index outer) : @@ -88,7 +89,7 @@ private: template<typename Derived> const SparseView<Derived> MatrixBase<Derived>::sparseView(const Scalar& m_reference, - typename NumTraits<Scalar>::Real m_epsilon) const + const typename NumTraits<Scalar>::Real& m_epsilon) const { return SparseView<Derived>(derived(), m_reference, m_epsilon); } |