Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'extern/Eigen3/Eigen/src/SparseCore/SparseMatrix.h')
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseMatrix.h647
1 files changed, 394 insertions, 253 deletions
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseMatrix.h b/extern/Eigen3/Eigen/src/SparseCore/SparseMatrix.h
index 2ff2015512f..0a2490bcc3d 100644
--- a/extern/Eigen3/Eigen/src/SparseCore/SparseMatrix.h
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseMatrix.h
@@ -1,7 +1,7 @@
// This file is part of Eigen, a lightweight C++ template library
// for linear algebra.
//
-// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
//
// This Source Code Form is subject to the terms of the Mozilla
// Public License v. 2.0. If a copy of the MPL was not distributed
@@ -32,18 +32,22 @@ namespace Eigen {
* \tparam _Scalar the scalar type, i.e. the type of the coefficients
* \tparam _Options Union of bit flags controlling the storage scheme. Currently the only possibility
* is ColMajor or RowMajor. The default is 0 which means column-major.
- * \tparam _Index the type of the indices. It has to be a \b signed type (e.g., short, int, std::ptrdiff_t). Default is \c int.
+ * \tparam _StorageIndex the type of the indices. It has to be a \b signed type (e.g., short, int, std::ptrdiff_t). Default is \c int.
+ *
+ * \warning In %Eigen 3.2, the undocumented type \c SparseMatrix::Index was improperly defined as the storage index type (e.g., int),
+ * whereas it is now (starting from %Eigen 3.3) deprecated and always defined as Eigen::Index.
+ * Codes making use of \c SparseMatrix::Index, might thus likely have to be changed to use \c SparseMatrix::StorageIndex instead.
*
* This class can be extended with the help of the plugin mechanism described on the page
- * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEMATRIX_PLUGIN.
+ * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_SPARSEMATRIX_PLUGIN.
*/
namespace internal {
-template<typename _Scalar, int _Options, typename _Index>
-struct traits<SparseMatrix<_Scalar, _Options, _Index> >
+template<typename _Scalar, int _Options, typename _StorageIndex>
+struct traits<SparseMatrix<_Scalar, _Options, _StorageIndex> >
{
typedef _Scalar Scalar;
- typedef _Index Index;
+ typedef _StorageIndex StorageIndex;
typedef Sparse StorageKind;
typedef MatrixXpr XprKind;
enum {
@@ -51,22 +55,21 @@ struct traits<SparseMatrix<_Scalar, _Options, _Index> >
ColsAtCompileTime = Dynamic,
MaxRowsAtCompileTime = Dynamic,
MaxColsAtCompileTime = Dynamic,
- Flags = _Options | NestByRefBit | LvalueBit,
- CoeffReadCost = NumTraits<Scalar>::ReadCost,
+ Flags = _Options | NestByRefBit | LvalueBit | CompressedAccessBit,
SupportedAccessPatterns = InnerRandomAccessPattern
};
};
-template<typename _Scalar, int _Options, typename _Index, int DiagIndex>
-struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _Index>, DiagIndex> >
+template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
+struct traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
{
- typedef SparseMatrix<_Scalar, _Options, _Index> MatrixType;
- typedef typename nested<MatrixType>::type MatrixTypeNested;
+ typedef SparseMatrix<_Scalar, _Options, _StorageIndex> MatrixType;
+ typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
typedef _Scalar Scalar;
typedef Dense StorageKind;
- typedef _Index Index;
+ typedef _StorageIndex StorageIndex;
typedef MatrixXpr XprKind;
enum {
@@ -74,47 +77,61 @@ struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _Index>, DiagIndex>
ColsAtCompileTime = 1,
MaxRowsAtCompileTime = Dynamic,
MaxColsAtCompileTime = 1,
- Flags = 0,
- CoeffReadCost = _MatrixTypeNested::CoeffReadCost*10
+ Flags = LvalueBit
+ };
+};
+
+template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
+struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
+ : public traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
+{
+ enum {
+ Flags = 0
};
};
} // end namespace internal
-template<typename _Scalar, int _Options, typename _Index>
+template<typename _Scalar, int _Options, typename _StorageIndex>
class SparseMatrix
- : public SparseMatrixBase<SparseMatrix<_Scalar, _Options, _Index> >
+ : public SparseCompressedBase<SparseMatrix<_Scalar, _Options, _StorageIndex> >
{
+ typedef SparseCompressedBase<SparseMatrix> Base;
+ using Base::convert_index;
+ friend class SparseVector<_Scalar,0,_StorageIndex>;
public:
+ using Base::isCompressed;
+ using Base::nonZeros;
EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
- EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, +=)
- EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, -=)
+ using Base::operator+=;
+ using Base::operator-=;
typedef MappedSparseMatrix<Scalar,Flags> Map;
+ typedef Diagonal<SparseMatrix> DiagonalReturnType;
+ typedef Diagonal<const SparseMatrix> ConstDiagonalReturnType;
+ typedef typename Base::InnerIterator InnerIterator;
+ typedef typename Base::ReverseInnerIterator ReverseInnerIterator;
+
+
using Base::IsRowMajor;
- typedef internal::CompressedStorage<Scalar,Index> Storage;
+ typedef internal::CompressedStorage<Scalar,StorageIndex> Storage;
enum {
Options = _Options
};
+ typedef typename Base::IndexVector IndexVector;
+ typedef typename Base::ScalarVector ScalarVector;
protected:
-
typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
Index m_outerSize;
Index m_innerSize;
- Index* m_outerIndex;
- Index* m_innerNonZeros; // optional, if null then the data is compressed
+ StorageIndex* m_outerIndex;
+ StorageIndex* m_innerNonZeros; // optional, if null then the data is compressed
Storage m_data;
-
- Eigen::Map<Matrix<Index,Dynamic,1> > innerNonZeros() { return Eigen::Map<Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
- const Eigen::Map<const Matrix<Index,Dynamic,1> > innerNonZeros() const { return Eigen::Map<const Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
public:
- /** \returns whether \c *this is in compressed form. */
- inline bool isCompressed() const { return m_innerNonZeros==0; }
-
/** \returns the number of rows of the matrix */
inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
/** \returns the number of columns of the matrix */
@@ -128,38 +145,38 @@ class SparseMatrix
/** \returns a const pointer to the array of values.
* This function is aimed at interoperability with other libraries.
* \sa innerIndexPtr(), outerIndexPtr() */
- inline const Scalar* valuePtr() const { return &m_data.value(0); }
+ inline const Scalar* valuePtr() const { return m_data.valuePtr(); }
/** \returns a non-const pointer to the array of values.
* This function is aimed at interoperability with other libraries.
* \sa innerIndexPtr(), outerIndexPtr() */
- inline Scalar* valuePtr() { return &m_data.value(0); }
+ inline Scalar* valuePtr() { return m_data.valuePtr(); }
/** \returns a const pointer to the array of inner indices.
* This function is aimed at interoperability with other libraries.
* \sa valuePtr(), outerIndexPtr() */
- inline const Index* innerIndexPtr() const { return &m_data.index(0); }
+ inline const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }
/** \returns a non-const pointer to the array of inner indices.
* This function is aimed at interoperability with other libraries.
* \sa valuePtr(), outerIndexPtr() */
- inline Index* innerIndexPtr() { return &m_data.index(0); }
+ inline StorageIndex* innerIndexPtr() { return m_data.indexPtr(); }
/** \returns a const pointer to the array of the starting positions of the inner vectors.
* This function is aimed at interoperability with other libraries.
* \sa valuePtr(), innerIndexPtr() */
- inline const Index* outerIndexPtr() const { return m_outerIndex; }
+ inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }
/** \returns a non-const pointer to the array of the starting positions of the inner vectors.
* This function is aimed at interoperability with other libraries.
* \sa valuePtr(), innerIndexPtr() */
- inline Index* outerIndexPtr() { return m_outerIndex; }
+ inline StorageIndex* outerIndexPtr() { return m_outerIndex; }
/** \returns a const pointer to the array of the number of non zeros of the inner vectors.
* This function is aimed at interoperability with other libraries.
* \warning it returns the null pointer 0 in compressed mode */
- inline const Index* innerNonZeroPtr() const { return m_innerNonZeros; }
+ inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; }
/** \returns a non-const pointer to the array of the number of non zeros of the inner vectors.
* This function is aimed at interoperability with other libraries.
* \warning it returns the null pointer 0 in compressed mode */
- inline Index* innerNonZeroPtr() { return m_innerNonZeros; }
+ inline StorageIndex* innerNonZeroPtr() { return m_innerNonZeros; }
/** \internal */
inline Storage& data() { return m_data; }
@@ -175,7 +192,7 @@ class SparseMatrix
const Index outer = IsRowMajor ? row : col;
const Index inner = IsRowMajor ? col : row;
Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
- return m_data.atInRange(m_outerIndex[outer], end, inner);
+ return m_data.atInRange(m_outerIndex[outer], end, StorageIndex(inner));
}
/** \returns a non-const reference to the value of the matrix at position \a i, \a j
@@ -198,7 +215,7 @@ class SparseMatrix
eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
if(end<=start)
return insert(row,col);
- const Index p = m_data.searchLowerIndex(start,end-1,inner);
+ const Index p = m_data.searchLowerIndex(start,end-1,StorageIndex(inner));
if((p<end) && (m_data.index(p)==inner))
return m_data.value(p);
else
@@ -209,45 +226,34 @@ class SparseMatrix
* The non zero coefficient must \b not already exist.
*
* If the matrix \c *this is in compressed mode, then \c *this is turned into uncompressed
- * mode while reserving room for 2 non zeros per inner vector. It is strongly recommended to first
- * call reserve(const SizesType &) to reserve a more appropriate number of elements per
- * inner vector that better match your scenario.
+ * mode while reserving room for 2 x this->innerSize() non zeros if reserve(Index) has not been called earlier.
+ * In this case, the insertion procedure is optimized for a \e sequential insertion mode where elements are assumed to be
+ * inserted by increasing outer-indices.
+ *
+ * If that's not the case, then it is strongly recommended to either use a triplet-list to assemble the matrix, or to first
+ * call reserve(const SizesType &) to reserve the appropriate number of non-zero elements per inner vector.
*
- * This function performs a sorted insertion in O(1) if the elements of each inner vector are
- * inserted in increasing inner index order, and in O(nnz_j) for a random insertion.
+ * Assuming memory has been appropriately reserved, this function performs a sorted insertion in O(1)
+ * if the elements of each inner vector are inserted in increasing inner index order, and in O(nnz_j) for a random insertion.
*
*/
- Scalar& insert(Index row, Index col)
- {
- eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
-
- if(isCompressed())
- {
- reserve(Matrix<Index,Dynamic,1>::Constant(outerSize(), 2));
- }
- return insertUncompressed(row,col);
- }
+ Scalar& insert(Index row, Index col);
public:
- class InnerIterator;
- class ReverseInnerIterator;
-
- /** Removes all non zeros but keep allocated memory */
+ /** Removes all non zeros but keep allocated memory
+ *
+ * This function does not free the currently allocated memory. To release as much as memory as possible,
+ * call \code mat.data().squeeze(); \endcode after resizing it.
+ *
+ * \sa resize(Index,Index), data()
+ */
inline void setZero()
{
m_data.clear();
- memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
- if(m_innerNonZeros)
- memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(Index));
- }
-
- /** \returns the number of non zero coefficients */
- inline Index nonZeros() const
- {
+ memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
if(m_innerNonZeros)
- return innerNonZeros().sum();
- return static_cast<Index>(m_data.size());
+ memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
}
/** Preallocates \a reserveSize non zeros.
@@ -262,22 +268,25 @@ class SparseMatrix
#ifdef EIGEN_PARSED_BY_DOXYGEN
/** Preallocates \a reserveSize[\c j] non zeros for each column (resp. row) \c j.
*
- * This function turns the matrix in non-compressed mode */
+ * This function turns the matrix in non-compressed mode.
+ *
+ * The type \c SizesType must expose the following interface:
+ \code
+ typedef value_type;
+ const value_type& operator[](i) const;
+ \endcode
+ * for \c i in the [0,this->outerSize()[ range.
+ * Typical choices include std::vector<int>, Eigen::VectorXi, Eigen::VectorXi::Constant, etc.
+ */
template<class SizesType>
inline void reserve(const SizesType& reserveSizes);
#else
template<class SizesType>
- inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif = typename SizesType::value_type())
- {
- EIGEN_UNUSED_VARIABLE(enableif);
- reserveInnerVectors(reserveSizes);
- }
- template<class SizesType>
- inline void reserve(const SizesType& reserveSizes, const typename SizesType::Scalar& enableif =
- #if (!defined(_MSC_VER)) || (_MSC_VER>=1500) // MSVC 2005 fails to compile with this typename
+ inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif =
+ #if (!EIGEN_COMP_MSVC) || (EIGEN_COMP_MSVC>=1500) // MSVC 2005 fails to compile with this typename
typename
#endif
- SizesType::Scalar())
+ SizesType::value_type())
{
EIGEN_UNUSED_VARIABLE(enableif);
reserveInnerVectors(reserveSizes);
@@ -289,15 +298,15 @@ class SparseMatrix
{
if(isCompressed())
{
- std::size_t totalReserveSize = 0;
+ Index totalReserveSize = 0;
// turn the matrix into non-compressed mode
- m_innerNonZeros = static_cast<Index*>(std::malloc(m_outerSize * sizeof(Index)));
+ m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
if (!m_innerNonZeros) internal::throw_std_bad_alloc();
// temporarily use m_innerSizes to hold the new starting points.
- Index* newOuterIndex = m_innerNonZeros;
+ StorageIndex* newOuterIndex = m_innerNonZeros;
- Index count = 0;
+ StorageIndex count = 0;
for(Index j=0; j<m_outerSize; ++j)
{
newOuterIndex[j] = count;
@@ -305,10 +314,10 @@ class SparseMatrix
totalReserveSize += reserveSizes[j];
}
m_data.reserve(totalReserveSize);
- Index previousOuterIndex = m_outerIndex[m_outerSize];
+ StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
for(Index j=m_outerSize-1; j>=0; --j)
{
- Index innerNNZ = previousOuterIndex - m_outerIndex[j];
+ StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];
for(Index i=innerNNZ-1; i>=0; --i)
{
m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
@@ -324,15 +333,15 @@ class SparseMatrix
}
else
{
- Index* newOuterIndex = static_cast<Index*>(std::malloc((m_outerSize+1)*sizeof(Index)));
+ StorageIndex* newOuterIndex = static_cast<StorageIndex*>(std::malloc((m_outerSize+1)*sizeof(StorageIndex)));
if (!newOuterIndex) internal::throw_std_bad_alloc();
- Index count = 0;
+ StorageIndex count = 0;
for(Index j=0; j<m_outerSize; ++j)
{
newOuterIndex[j] = count;
- Index alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
- Index toReserve = std::max<Index>(reserveSizes[j], alreadyReserved);
+ StorageIndex alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
+ StorageIndex toReserve = std::max<StorageIndex>(reserveSizes[j], alreadyReserved);
count += toReserve + m_innerNonZeros[j];
}
newOuterIndex[m_outerSize] = count;
@@ -343,7 +352,7 @@ class SparseMatrix
Index offset = newOuterIndex[j] - m_outerIndex[j];
if(offset>0)
{
- Index innerNNZ = m_innerNonZeros[j];
+ StorageIndex innerNNZ = m_innerNonZeros[j];
for(Index i=innerNNZ-1; i>=0; --i)
{
m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
@@ -380,11 +389,11 @@ class SparseMatrix
* \sa insertBack, startVec */
inline Scalar& insertBackByOuterInner(Index outer, Index inner)
{
- eigen_assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
+ eigen_assert(Index(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
Index p = m_outerIndex[outer+1];
++m_outerIndex[outer+1];
- m_data.append(0, inner);
+ m_data.append(Scalar(0), inner);
return m_data.value(p);
}
@@ -394,7 +403,7 @@ class SparseMatrix
{
Index p = m_outerIndex[outer+1];
++m_outerIndex[outer+1];
- m_data.append(0, inner);
+ m_data.append(Scalar(0), inner);
return m_data.value(p);
}
@@ -414,7 +423,7 @@ class SparseMatrix
{
if(isCompressed())
{
- Index size = static_cast<Index>(m_data.size());
+ StorageIndex size = internal::convert_index<StorageIndex>(m_data.size());
Index i = m_outerSize;
// find the last filled column
while (i>=0 && m_outerIndex[i]==0)
@@ -433,7 +442,13 @@ class SparseMatrix
template<typename InputIterators>
void setFromTriplets(const InputIterators& begin, const InputIterators& end);
- void sumupDuplicates();
+ template<typename InputIterators,typename DupFunctor>
+ void setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
+
+ void sumupDuplicates() { collapseDuplicates(internal::scalar_sum_op<Scalar,Scalar>()); }
+
+ template<typename DupFunctor>
+ void collapseDuplicates(DupFunctor dup_func = DupFunctor());
//---
@@ -451,6 +466,8 @@ class SparseMatrix
if(isCompressed())
return;
+ eigen_internal_assert(m_outerIndex!=0 && m_outerSize>0);
+
Index oldStart = m_outerIndex[1];
m_outerIndex[1] = m_innerNonZeros[0];
for(Index j=1; j<m_outerSize; ++j)
@@ -479,7 +496,7 @@ class SparseMatrix
{
if(m_innerNonZeros != 0)
return;
- m_innerNonZeros = static_cast<Index*>(std::malloc(m_outerSize * sizeof(Index)));
+ m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
for (Index i = 0; i < m_outerSize; i++)
{
m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
@@ -503,10 +520,9 @@ class SparseMatrix
void prune(const KeepFunc& keep = KeepFunc())
{
// TODO optimize the uncompressed mode to avoid moving and allocating the data twice
- // TODO also implement a unit test
makeCompressed();
- Index k = 0;
+ StorageIndex k = 0;
for(Index j=0; j<m_outerSize; ++j)
{
Index previousStart = m_outerIndex[j];
@@ -527,7 +543,12 @@ class SparseMatrix
}
/** Resizes the matrix to a \a rows x \a cols matrix leaving old values untouched.
- * \sa resizeNonZeros(Index), reserve(), setZero()
+ *
+ * If the sizes of the matrix are decreased, then the matrix is turned to \b uncompressed-mode
+ * and the storage of the out of bounds coefficients is kept and reserved.
+ * Call makeCompressed() to pack the entries and squeeze extra memory.
+ *
+ * \sa reserve(), setZero(), makeCompressed()
*/
void conservativeResize(Index rows, Index cols)
{
@@ -539,13 +560,13 @@ class SparseMatrix
Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
- Index newInnerSize = IsRowMajor ? cols : rows;
+ StorageIndex newInnerSize = convert_index(IsRowMajor ? cols : rows);
// Deals with inner non zeros
if (m_innerNonZeros)
{
// Resize m_innerNonZeros
- Index *newInnerNonZeros = static_cast<Index*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(Index)));
+ StorageIndex *newInnerNonZeros = static_cast<StorageIndex*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(StorageIndex)));
if (!newInnerNonZeros) internal::throw_std_bad_alloc();
m_innerNonZeros = newInnerNonZeros;
@@ -555,7 +576,7 @@ class SparseMatrix
else if (innerChange < 0)
{
// Inner size decreased: allocate a new m_innerNonZeros
- m_innerNonZeros = static_cast<Index*>(std::malloc((m_outerSize+outerChange+1) * sizeof(Index)));
+ m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize+outerChange+1) * sizeof(StorageIndex)));
if (!m_innerNonZeros) internal::throw_std_bad_alloc();
for(Index i = 0; i < m_outerSize; i++)
m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
@@ -566,8 +587,8 @@ class SparseMatrix
{
for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
{
- Index &n = m_innerNonZeros[i];
- Index start = m_outerIndex[i];
+ StorageIndex &n = m_innerNonZeros[i];
+ StorageIndex start = m_outerIndex[i];
while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
}
}
@@ -578,12 +599,12 @@ class SparseMatrix
if (outerChange == 0)
return;
- Index *newOuterIndex = static_cast<Index*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(Index)));
+ StorageIndex *newOuterIndex = static_cast<StorageIndex*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(StorageIndex)));
if (!newOuterIndex) internal::throw_std_bad_alloc();
m_outerIndex = newOuterIndex;
if (outerChange > 0)
{
- Index last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
+ StorageIndex last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
m_outerIndex[i] = last;
}
@@ -591,7 +612,11 @@ class SparseMatrix
}
/** Resizes the matrix to a \a rows x \a cols matrix and initializes it to zero.
- * \sa resizeNonZeros(Index), reserve(), setZero()
+ *
+ * This function does not free the currently allocated memory. To release as much as memory as possible,
+ * call \code mat.data().squeeze(); \endcode after resizing it.
+ *
+ * \sa reserve(), setZero()
*/
void resize(Index rows, Index cols)
{
@@ -601,7 +626,7 @@ class SparseMatrix
if (m_outerSize != outerSize || m_outerSize==0)
{
std::free(m_outerIndex);
- m_outerIndex = static_cast<Index*>(std::malloc((outerSize + 1) * sizeof(Index)));
+ m_outerIndex = static_cast<StorageIndex*>(std::malloc((outerSize + 1) * sizeof(StorageIndex)));
if (!m_outerIndex) internal::throw_std_bad_alloc();
m_outerSize = outerSize;
@@ -611,19 +636,24 @@ class SparseMatrix
std::free(m_innerNonZeros);
m_innerNonZeros = 0;
}
- memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
+ memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
}
/** \internal
* Resize the nonzero vector to \a size */
void resizeNonZeros(Index size)
{
- // TODO remove this function
m_data.resize(size);
}
- /** \returns a const expression of the diagonal coefficients */
- const Diagonal<const SparseMatrix> diagonal() const { return *this; }
+ /** \returns a const expression of the diagonal coefficients. */
+ const ConstDiagonalReturnType diagonal() const { return ConstDiagonalReturnType(*this); }
+
+ /** \returns a read-write expression of the diagonal coefficients.
+ * \warning If the diagonal entries are written, then all diagonal
+ * entries \b must already exist, otherwise an assertion will be raised.
+ */
+ DiagonalReturnType diagonal() { return DiagonalReturnType(*this); }
/** Default constructor yielding an empty \c 0 \c x \c 0 matrix */
inline SparseMatrix()
@@ -649,7 +679,16 @@ class SparseMatrix
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
check_template_parameters();
- *this = other.derived();
+ const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
+ if (needToTranspose)
+ *this = other.derived();
+ else
+ {
+ #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
+ EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
+ #endif
+ internal::call_assignment_no_alias(*this, other.derived());
+ }
}
/** Constructs a sparse matrix from the sparse selfadjoint view \a other */
@@ -658,7 +697,7 @@ class SparseMatrix
: m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
{
check_template_parameters();
- *this = other;
+ Base::operator=(other);
}
/** Copy constructor (it performs a deep copy) */
@@ -678,6 +717,15 @@ class SparseMatrix
initAssignment(other);
other.evalTo(*this);
}
+
+ /** \brief Copy constructor with in-place evaluation */
+ template<typename OtherDerived>
+ explicit SparseMatrix(const DiagonalBase<OtherDerived>& other)
+ : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
+ {
+ check_template_parameters();
+ *this = other.derived();
+ }
/** Swaps the content of two sparse matrices of the same type.
* This is a fast operation that simply swaps the underlying pointers and parameters. */
@@ -697,9 +745,9 @@ class SparseMatrix
{
eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
this->m_data.resize(rows());
- Eigen::Map<Matrix<Index, Dynamic, 1> >(&this->m_data.index(0), rows()).setLinSpaced(0, rows()-1);
- Eigen::Map<Matrix<Scalar, Dynamic, 1> >(&this->m_data.value(0), rows()).setOnes();
- Eigen::Map<Matrix<Index, Dynamic, 1> >(this->m_outerIndex, rows()+1).setLinSpaced(0, rows());
+ Eigen::Map<IndexVector>(this->m_data.indexPtr(), rows()).setLinSpaced(0, StorageIndex(rows()-1));
+ Eigen::Map<ScalarVector>(this->m_data.valuePtr(), rows()).setOnes();
+ Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, StorageIndex(rows()));
std::free(m_innerNonZeros);
m_innerNonZeros = 0;
}
@@ -711,10 +759,13 @@ class SparseMatrix
}
else if(this!=&other)
{
+ #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
+ EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
+ #endif
initAssignment(other);
if(other.isCompressed())
{
- memcpy(m_outerIndex, other.m_outerIndex, (m_outerSize+1)*sizeof(Index));
+ internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex);
m_data = other.m_data;
}
else
@@ -725,22 +776,11 @@ class SparseMatrix
return *this;
}
- #ifndef EIGEN_PARSED_BY_DOXYGEN
- template<typename Lhs, typename Rhs>
- inline SparseMatrix& operator=(const SparseSparseProduct<Lhs,Rhs>& product)
- { return Base::operator=(product); }
-
- template<typename OtherDerived>
- inline SparseMatrix& operator=(const ReturnByValue<OtherDerived>& other)
- {
- initAssignment(other);
- return Base::operator=(other.derived());
- }
-
+#ifndef EIGEN_PARSED_BY_DOXYGEN
template<typename OtherDerived>
inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
{ return Base::operator=(other.derived()); }
- #endif
+#endif // EIGEN_PARSED_BY_DOXYGEN
template<typename OtherDerived>
EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other);
@@ -750,30 +790,38 @@ class SparseMatrix
EIGEN_DBG_SPARSE(
s << "Nonzero entries:\n";
if(m.isCompressed())
+ {
for (Index i=0; i<m.nonZeros(); ++i)
s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
+ }
else
+ {
for (Index i=0; i<m.outerSize(); ++i)
{
Index p = m.m_outerIndex[i];
Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
Index k=p;
- for (; k<pe; ++k)
+ for (; k<pe; ++k) {
s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
- for (; k<m.m_outerIndex[i+1]; ++k)
+ }
+ for (; k<m.m_outerIndex[i+1]; ++k) {
s << "(_,_) ";
+ }
}
+ }
s << std::endl;
s << std::endl;
s << "Outer pointers:\n";
- for (Index i=0; i<m.outerSize(); ++i)
+ for (Index i=0; i<m.outerSize(); ++i) {
s << m.m_outerIndex[i] << " ";
+ }
s << " $" << std::endl;
if(!m.isCompressed())
{
s << "Inner non zeros:\n";
- for (Index i=0; i<m.outerSize(); ++i)
+ for (Index i=0; i<m.outerSize(); ++i) {
s << m.m_innerNonZeros[i] << " ";
+ }
s << " $" << std::endl;
}
s << std::endl;
@@ -789,10 +837,8 @@ class SparseMatrix
std::free(m_innerNonZeros);
}
-#ifndef EIGEN_PARSED_BY_DOXYGEN
/** Overloaded for performance */
Scalar sum() const;
-#endif
# ifdef EIGEN_SPARSEMATRIX_PLUGIN
# include EIGEN_SPARSEMATRIX_PLUGIN
@@ -819,15 +865,15 @@ protected:
* A vector object that is equal to 0 everywhere but v at the position i */
class SingletonVector
{
- Index m_index;
- Index m_value;
+ StorageIndex m_index;
+ StorageIndex m_value;
public:
- typedef Index value_type;
+ typedef StorageIndex value_type;
SingletonVector(Index i, Index v)
- : m_index(i), m_value(v)
+ : m_index(convert_index(i)), m_value(convert_index(v))
{}
- Index operator[](Index i) const { return i==m_index ? m_value : 0; }
+ StorageIndex operator[](Index i) const { return i==m_index ? m_value : 0; }
};
/** \internal
@@ -846,14 +892,14 @@ public:
eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
- m_data.index(p) = inner;
- return (m_data.value(p) = 0);
+ m_data.index(p) = convert_index(inner);
+ return (m_data.value(p) = Scalar(0));
}
private:
static void check_template_parameters()
{
- EIGEN_STATIC_ASSERT(NumTraits<Index>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
+ EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
}
@@ -868,87 +914,20 @@ private:
};
};
-template<typename Scalar, int _Options, typename _Index>
-class SparseMatrix<Scalar,_Options,_Index>::InnerIterator
-{
- public:
- InnerIterator(const SparseMatrix& mat, Index outer)
- : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_id(mat.m_outerIndex[outer])
- {
- if(mat.isCompressed())
- m_end = mat.m_outerIndex[outer+1];
- else
- m_end = m_id + mat.m_innerNonZeros[outer];
- }
-
- inline InnerIterator& operator++() { m_id++; return *this; }
-
- inline const Scalar& value() const { return m_values[m_id]; }
- inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id]); }
-
- inline Index index() const { return m_indices[m_id]; }
- inline Index outer() const { return m_outer; }
- inline Index row() const { return IsRowMajor ? m_outer : index(); }
- inline Index col() const { return IsRowMajor ? index() : m_outer; }
-
- inline operator bool() const { return (m_id < m_end); }
-
- protected:
- const Scalar* m_values;
- const Index* m_indices;
- const Index m_outer;
- Index m_id;
- Index m_end;
-};
-
-template<typename Scalar, int _Options, typename _Index>
-class SparseMatrix<Scalar,_Options,_Index>::ReverseInnerIterator
-{
- public:
- ReverseInnerIterator(const SparseMatrix& mat, Index outer)
- : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_start(mat.m_outerIndex[outer])
- {
- if(mat.isCompressed())
- m_id = mat.m_outerIndex[outer+1];
- else
- m_id = m_start + mat.m_innerNonZeros[outer];
- }
-
- inline ReverseInnerIterator& operator--() { --m_id; return *this; }
-
- inline const Scalar& value() const { return m_values[m_id-1]; }
- inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id-1]); }
-
- inline Index index() const { return m_indices[m_id-1]; }
- inline Index outer() const { return m_outer; }
- inline Index row() const { return IsRowMajor ? m_outer : index(); }
- inline Index col() const { return IsRowMajor ? index() : m_outer; }
-
- inline operator bool() const { return (m_id > m_start); }
-
- protected:
- const Scalar* m_values;
- const Index* m_indices;
- const Index m_outer;
- Index m_id;
- const Index m_start;
-};
-
namespace internal {
-template<typename InputIterator, typename SparseMatrixType>
-void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, int Options = 0)
+template<typename InputIterator, typename SparseMatrixType, typename DupFunctor>
+void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, DupFunctor dup_func)
{
- EIGEN_UNUSED_VARIABLE(Options);
enum { IsRowMajor = SparseMatrixType::IsRowMajor };
typedef typename SparseMatrixType::Scalar Scalar;
- typedef typename SparseMatrixType::Index Index;
- SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,Index> trMat(mat.rows(),mat.cols());
+ typedef typename SparseMatrixType::StorageIndex StorageIndex;
+ SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,StorageIndex> trMat(mat.rows(),mat.cols());
if(begin!=end)
{
// pass 1: count the nnz per inner-vector
- Matrix<Index,Dynamic,1> wi(trMat.outerSize());
+ typename SparseMatrixType::IndexVector wi(trMat.outerSize());
wi.setZero();
for(InputIterator it(begin); it!=end; ++it)
{
@@ -962,7 +941,7 @@ void set_from_triplets(const InputIterator& begin, const InputIterator& end, Spa
trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
// pass 3:
- trMat.sumupDuplicates();
+ trMat.collapseDuplicates(dup_func);
}
// pass 4: transposed copy -> implicit sorting
@@ -1009,26 +988,43 @@ void set_from_triplets(const InputIterator& begin, const InputIterator& end, Spa
* an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather
* be explicitely stored into a std::vector for instance.
*/
-template<typename Scalar, int _Options, typename _Index>
+template<typename Scalar, int _Options, typename _StorageIndex>
template<typename InputIterators>
-void SparseMatrix<Scalar,_Options,_Index>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
+void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
+{
+ internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex> >(begin, end, *this, internal::scalar_sum_op<Scalar,Scalar>());
+}
+
+/** The same as setFromTriplets but when duplicates are met the functor \a dup_func is applied:
+ * \code
+ * value = dup_func(OldValue, NewValue)
+ * \endcode
+ * Here is a C++11 example keeping the latest entry only:
+ * \code
+ * mat.setFromTriplets(triplets.begin(), triplets.end(), [] (const Scalar&,const Scalar &b) { return b; });
+ * \endcode
+ */
+template<typename Scalar, int _Options, typename _StorageIndex>
+template<typename InputIterators,typename DupFunctor>
+void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func)
{
- internal::set_from_triplets(begin, end, *this);
+ internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex>, DupFunctor>(begin, end, *this, dup_func);
}
/** \internal */
-template<typename Scalar, int _Options, typename _Index>
-void SparseMatrix<Scalar,_Options,_Index>::sumupDuplicates()
+template<typename Scalar, int _Options, typename _StorageIndex>
+template<typename DupFunctor>
+void SparseMatrix<Scalar,_Options,_StorageIndex>::collapseDuplicates(DupFunctor dup_func)
{
eigen_assert(!isCompressed());
// TODO, in practice we should be able to use m_innerNonZeros for that task
- Matrix<Index,Dynamic,1> wi(innerSize());
+ IndexVector wi(innerSize());
wi.fill(-1);
- Index count = 0;
+ StorageIndex count = 0;
// for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
for(Index j=0; j<outerSize(); ++j)
{
- Index start = count;
+ StorageIndex start = count;
Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
{
@@ -1036,7 +1032,7 @@ void SparseMatrix<Scalar,_Options,_Index>::sumupDuplicates()
if(wi(i)>=start)
{
// we already meet this entry => accumulate it
- m_data.value(wi(i)) += m_data.value(k);
+ m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k));
}
else
{
@@ -1056,39 +1052,48 @@ void SparseMatrix<Scalar,_Options,_Index>::sumupDuplicates()
m_data.resize(m_outerIndex[m_outerSize]);
}
-template<typename Scalar, int _Options, typename _Index>
+template<typename Scalar, int _Options, typename _StorageIndex>
template<typename OtherDerived>
-EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_Index>& SparseMatrix<Scalar,_Options,_Index>::operator=(const SparseMatrixBase<OtherDerived>& other)
+EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_StorageIndex>& SparseMatrix<Scalar,_Options,_StorageIndex>::operator=(const SparseMatrixBase<OtherDerived>& other)
{
EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
-
- const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
+
+ #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
+ EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
+ #endif
+
+ const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
if (needToTranspose)
{
+ #ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
+ EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
+ #endif
// two passes algorithm:
// 1 - compute the number of coeffs per dest inner vector
// 2 - do the actual copy/eval
// Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
- typedef typename internal::nested<OtherDerived,2>::type OtherCopy;
+ typedef typename internal::nested_eval<OtherDerived,2,typename internal::plain_matrix_type<OtherDerived>::type >::type OtherCopy;
typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
+ typedef internal::evaluator<_OtherCopy> OtherCopyEval;
OtherCopy otherCopy(other.derived());
+ OtherCopyEval otherCopyEval(otherCopy);
SparseMatrix dest(other.rows(),other.cols());
- Eigen::Map<Matrix<Index, Dynamic, 1> > (dest.m_outerIndex,dest.outerSize()).setZero();
+ Eigen::Map<IndexVector> (dest.m_outerIndex,dest.outerSize()).setZero();
// pass 1
// FIXME the above copy could be merged with that pass
for (Index j=0; j<otherCopy.outerSize(); ++j)
- for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
+ for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
++dest.m_outerIndex[it.index()];
// prefix sum
- Index count = 0;
- Matrix<Index,Dynamic,1> positions(dest.outerSize());
+ StorageIndex count = 0;
+ IndexVector positions(dest.outerSize());
for (Index j=0; j<dest.outerSize(); ++j)
{
- Index tmp = dest.m_outerIndex[j];
+ StorageIndex tmp = dest.m_outerIndex[j];
dest.m_outerIndex[j] = count;
positions[j] = count;
count += tmp;
@@ -1097,9 +1102,9 @@ EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_Index>& SparseMatrix<Scalar,_Opt
// alloc
dest.m_data.resize(count);
// pass 2
- for (Index j=0; j<otherCopy.outerSize(); ++j)
+ for (StorageIndex j=0; j<otherCopy.outerSize(); ++j)
{
- for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
+ for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
{
Index pos = positions[it.index()]++;
dest.m_data.index(pos) = j;
@@ -1112,26 +1117,148 @@ EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_Index>& SparseMatrix<Scalar,_Opt
else
{
if(other.isRValue())
+ {
initAssignment(other.derived());
+ }
// there is no special optimization
return Base::operator=(other.derived());
}
}
-template<typename _Scalar, int _Options, typename _Index>
-EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertUncompressed(Index row, Index col)
+template<typename _Scalar, int _Options, typename _StorageIndex>
+typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insert(Index row, Index col)
+{
+ eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
+
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ if(isCompressed())
+ {
+ if(nonZeros()==0)
+ {
+ // reserve space if not already done
+ if(m_data.allocatedSize()==0)
+ m_data.reserve(2*m_innerSize);
+
+ // turn the matrix into non-compressed mode
+ m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
+ if(!m_innerNonZeros) internal::throw_std_bad_alloc();
+
+ memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
+
+ // pack all inner-vectors to the end of the pre-allocated space
+ // and allocate the entire free-space to the first inner-vector
+ StorageIndex end = convert_index(m_data.allocatedSize());
+ for(Index j=1; j<=m_outerSize; ++j)
+ m_outerIndex[j] = end;
+ }
+ else
+ {
+ // turn the matrix into non-compressed mode
+ m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
+ if(!m_innerNonZeros) internal::throw_std_bad_alloc();
+ for(Index j=0; j<m_outerSize; ++j)
+ m_innerNonZeros[j] = m_outerIndex[j+1]-m_outerIndex[j];
+ }
+ }
+
+ // check whether we can do a fast "push back" insertion
+ Index data_end = m_data.allocatedSize();
+
+ // First case: we are filling a new inner vector which is packed at the end.
+ // We assume that all remaining inner-vectors are also empty and packed to the end.
+ if(m_outerIndex[outer]==data_end)
+ {
+ eigen_internal_assert(m_innerNonZeros[outer]==0);
+
+ // pack previous empty inner-vectors to end of the used-space
+ // and allocate the entire free-space to the current inner-vector.
+ StorageIndex p = convert_index(m_data.size());
+ Index j = outer;
+ while(j>=0 && m_innerNonZeros[j]==0)
+ m_outerIndex[j--] = p;
+
+ // push back the new element
+ ++m_innerNonZeros[outer];
+ m_data.append(Scalar(0), inner);
+
+ // check for reallocation
+ if(data_end != m_data.allocatedSize())
+ {
+ // m_data has been reallocated
+ // -> move remaining inner-vectors back to the end of the free-space
+ // so that the entire free-space is allocated to the current inner-vector.
+ eigen_internal_assert(data_end < m_data.allocatedSize());
+ StorageIndex new_end = convert_index(m_data.allocatedSize());
+ for(Index k=outer+1; k<=m_outerSize; ++k)
+ if(m_outerIndex[k]==data_end)
+ m_outerIndex[k] = new_end;
+ }
+ return m_data.value(p);
+ }
+
+ // Second case: the next inner-vector is packed to the end
+ // and the current inner-vector end match the used-space.
+ if(m_outerIndex[outer+1]==data_end && m_outerIndex[outer]+m_innerNonZeros[outer]==m_data.size())
+ {
+ eigen_internal_assert(outer+1==m_outerSize || m_innerNonZeros[outer+1]==0);
+
+ // add space for the new element
+ ++m_innerNonZeros[outer];
+ m_data.resize(m_data.size()+1);
+
+ // check for reallocation
+ if(data_end != m_data.allocatedSize())
+ {
+ // m_data has been reallocated
+ // -> move remaining inner-vectors back to the end of the free-space
+ // so that the entire free-space is allocated to the current inner-vector.
+ eigen_internal_assert(data_end < m_data.allocatedSize());
+ StorageIndex new_end = convert_index(m_data.allocatedSize());
+ for(Index k=outer+1; k<=m_outerSize; ++k)
+ if(m_outerIndex[k]==data_end)
+ m_outerIndex[k] = new_end;
+ }
+
+ // and insert it at the right position (sorted insertion)
+ Index startId = m_outerIndex[outer];
+ Index p = m_outerIndex[outer]+m_innerNonZeros[outer]-1;
+ while ( (p > startId) && (m_data.index(p-1) > inner) )
+ {
+ m_data.index(p) = m_data.index(p-1);
+ m_data.value(p) = m_data.value(p-1);
+ --p;
+ }
+
+ m_data.index(p) = convert_index(inner);
+ return (m_data.value(p) = 0);
+ }
+
+ if(m_data.size() != m_data.allocatedSize())
+ {
+ // make sure the matrix is compatible to random un-compressed insertion:
+ m_data.resize(m_data.allocatedSize());
+ this->reserveInnerVectors(Array<StorageIndex,Dynamic,1>::Constant(m_outerSize, 2));
+ }
+
+ return insertUncompressed(row,col);
+}
+
+template<typename _Scalar, int _Options, typename _StorageIndex>
+EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertUncompressed(Index row, Index col)
{
eigen_assert(!isCompressed());
const Index outer = IsRowMajor ? row : col;
- const Index inner = IsRowMajor ? col : row;
+ const StorageIndex inner = convert_index(IsRowMajor ? col : row);
Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
- Index innerNNZ = m_innerNonZeros[outer];
+ StorageIndex innerNNZ = m_innerNonZeros[outer];
if(innerNNZ>=room)
{
// this inner vector is full, we need to reallocate the whole buffer :(
- reserve(SingletonVector(outer,std::max<Index>(2,innerNNZ)));
+ reserve(SingletonVector(outer,std::max<StorageIndex>(2,innerNNZ)));
}
Index startId = m_outerIndex[outer];
@@ -1142,16 +1269,16 @@ EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& Sparse
m_data.value(p) = m_data.value(p-1);
--p;
}
- eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exist, you must call coeffRef to this end");
+ eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exists, you must call coeffRef to this end");
m_innerNonZeros[outer]++;
m_data.index(p) = inner;
- return (m_data.value(p) = 0);
+ return (m_data.value(p) = Scalar(0));
}
-template<typename _Scalar, int _Options, typename _Index>
-EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertCompressed(Index row, Index col)
+template<typename _Scalar, int _Options, typename _StorageIndex>
+EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertCompressed(Index row, Index col)
{
eigen_assert(isCompressed());
@@ -1164,7 +1291,7 @@ EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& Sparse
// we start a new inner vector
while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
{
- m_outerIndex[previousOuter] = static_cast<Index>(m_data.size());
+ m_outerIndex[previousOuter] = convert_index(m_data.size());
--previousOuter;
}
m_outerIndex[outer+1] = m_outerIndex[outer];
@@ -1174,11 +1301,11 @@ EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& Sparse
// starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
// the 2nd inner vector...
bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
- && (size_t(m_outerIndex[outer+1]) == m_data.size());
+ && (std::size_t(m_outerIndex[outer+1]) == m_data.size());
- size_t startId = m_outerIndex[outer];
- // FIXME let's make sure sizeof(long int) == sizeof(size_t)
- size_t p = m_outerIndex[outer+1];
+ std::size_t startId = m_outerIndex[outer];
+ // FIXME let's make sure sizeof(long int) == sizeof(std::size_t)
+ std::size_t p = m_outerIndex[outer+1];
++m_outerIndex[outer+1];
double reallocRatio = 1;
@@ -1254,7 +1381,21 @@ EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& Sparse
}
m_data.index(p) = inner;
- return (m_data.value(p) = 0);
+ return (m_data.value(p) = Scalar(0));
+}
+
+namespace internal {
+
+template<typename _Scalar, int _Options, typename _StorageIndex>
+struct evaluator<SparseMatrix<_Scalar,_Options,_StorageIndex> >
+ : evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > >
+{
+ typedef evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > > Base;
+ typedef SparseMatrix<_Scalar,_Options,_StorageIndex> SparseMatrixType;
+ evaluator() : Base() {}
+ explicit evaluator(const SparseMatrixType &mat) : Base(mat) {}
+};
+
}
} // end namespace Eigen