Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'extern/Eigen3/Eigen/src/SparseCore')
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/AmbiVector.h371
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/CompressedStorage.h233
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h245
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/CoreIterators.h61
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/MappedSparseMatrix.h179
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseAssign.h0
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseBlock.h387
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseCwiseBinaryOp.h324
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseCwiseUnaryOp.h163
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseDenseProduct.h300
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseDiagonalProduct.h184
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseDot.h94
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseFuzzy.h26
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseMatrix.h1116
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseMatrixBase.h458
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparsePermutation.h148
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseProduct.h186
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseRedux.h45
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseSelfAdjointView.h480
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseSparseProductWithPruning.h149
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseTranspose.h61
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseTriangularView.h164
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseUtil.h173
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseVector.h398
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/SparseView.h98
-rw-r--r--extern/Eigen3/Eigen/src/SparseCore/TriangularSolver.h334
26 files changed, 6377 insertions, 0 deletions
diff --git a/extern/Eigen3/Eigen/src/SparseCore/AmbiVector.h b/extern/Eigen3/Eigen/src/SparseCore/AmbiVector.h
new file mode 100644
index 00000000000..6cfaadbaa9a
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/AmbiVector.h
@@ -0,0 +1,371 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_AMBIVECTOR_H
+#define EIGEN_AMBIVECTOR_H
+
+namespace Eigen {
+
+namespace internal {
+
+/** \internal
+ * Hybrid sparse/dense vector class designed for intensive read-write operations.
+ *
+ * See BasicSparseLLT and SparseProduct for usage examples.
+ */
+template<typename _Scalar, typename _Index>
+class AmbiVector
+{
+ public:
+ typedef _Scalar Scalar;
+ typedef _Index Index;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
+ AmbiVector(Index size)
+ : m_buffer(0), m_zero(0), m_size(0), m_allocatedSize(0), m_allocatedElements(0), m_mode(-1)
+ {
+ resize(size);
+ }
+
+ void init(double estimatedDensity);
+ void init(int mode);
+
+ Index nonZeros() const;
+
+ /** Specifies a sub-vector to work on */
+ void setBounds(Index start, Index end) { m_start = start; m_end = end; }
+
+ void setZero();
+
+ void restart();
+ Scalar& coeffRef(Index i);
+ Scalar& coeff(Index i);
+
+ class Iterator;
+
+ ~AmbiVector() { delete[] m_buffer; }
+
+ void resize(Index size)
+ {
+ if (m_allocatedSize < size)
+ reallocate(size);
+ m_size = size;
+ }
+
+ Index size() const { return m_size; }
+
+ protected:
+
+ void reallocate(Index size)
+ {
+ // if the size of the matrix is not too large, let's allocate a bit more than needed such
+ // that we can handle dense vector even in sparse mode.
+ delete[] m_buffer;
+ if (size<1000)
+ {
+ Index allocSize = (size * sizeof(ListEl))/sizeof(Scalar);
+ m_allocatedElements = (allocSize*sizeof(Scalar))/sizeof(ListEl);
+ m_buffer = new Scalar[allocSize];
+ }
+ else
+ {
+ m_allocatedElements = (size*sizeof(Scalar))/sizeof(ListEl);
+ m_buffer = new Scalar[size];
+ }
+ m_size = size;
+ m_start = 0;
+ m_end = m_size;
+ }
+
+ void reallocateSparse()
+ {
+ Index copyElements = m_allocatedElements;
+ m_allocatedElements = (std::min)(Index(m_allocatedElements*1.5),m_size);
+ Index allocSize = m_allocatedElements * sizeof(ListEl);
+ allocSize = allocSize/sizeof(Scalar) + (allocSize%sizeof(Scalar)>0?1:0);
+ Scalar* newBuffer = new Scalar[allocSize];
+ memcpy(newBuffer, m_buffer, copyElements * sizeof(ListEl));
+ delete[] m_buffer;
+ m_buffer = newBuffer;
+ }
+
+ protected:
+ // element type of the linked list
+ struct ListEl
+ {
+ Index next;
+ Index index;
+ Scalar value;
+ };
+
+ // used to store data in both mode
+ Scalar* m_buffer;
+ Scalar m_zero;
+ Index m_size;
+ Index m_start;
+ Index m_end;
+ Index m_allocatedSize;
+ Index m_allocatedElements;
+ Index m_mode;
+
+ // linked list mode
+ Index m_llStart;
+ Index m_llCurrent;
+ Index m_llSize;
+};
+
+/** \returns the number of non zeros in the current sub vector */
+template<typename _Scalar,typename _Index>
+_Index AmbiVector<_Scalar,_Index>::nonZeros() const
+{
+ if (m_mode==IsSparse)
+ return m_llSize;
+ else
+ return m_end - m_start;
+}
+
+template<typename _Scalar,typename _Index>
+void AmbiVector<_Scalar,_Index>::init(double estimatedDensity)
+{
+ if (estimatedDensity>0.1)
+ init(IsDense);
+ else
+ init(IsSparse);
+}
+
+template<typename _Scalar,typename _Index>
+void AmbiVector<_Scalar,_Index>::init(int mode)
+{
+ m_mode = mode;
+ if (m_mode==IsSparse)
+ {
+ m_llSize = 0;
+ m_llStart = -1;
+ }
+}
+
+/** Must be called whenever we might perform a write access
+ * with an index smaller than the previous one.
+ *
+ * Don't worry, this function is extremely cheap.
+ */
+template<typename _Scalar,typename _Index>
+void AmbiVector<_Scalar,_Index>::restart()
+{
+ m_llCurrent = m_llStart;
+}
+
+/** Set all coefficients of current subvector to zero */
+template<typename _Scalar,typename _Index>
+void AmbiVector<_Scalar,_Index>::setZero()
+{
+ if (m_mode==IsDense)
+ {
+ for (Index i=m_start; i<m_end; ++i)
+ m_buffer[i] = Scalar(0);
+ }
+ else
+ {
+ eigen_assert(m_mode==IsSparse);
+ m_llSize = 0;
+ m_llStart = -1;
+ }
+}
+
+template<typename _Scalar,typename _Index>
+_Scalar& AmbiVector<_Scalar,_Index>::coeffRef(_Index i)
+{
+ if (m_mode==IsDense)
+ return m_buffer[i];
+ else
+ {
+ ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_buffer);
+ // TODO factorize the following code to reduce code generation
+ eigen_assert(m_mode==IsSparse);
+ if (m_llSize==0)
+ {
+ // this is the first element
+ m_llStart = 0;
+ m_llCurrent = 0;
+ ++m_llSize;
+ llElements[0].value = Scalar(0);
+ llElements[0].index = i;
+ llElements[0].next = -1;
+ return llElements[0].value;
+ }
+ else if (i<llElements[m_llStart].index)
+ {
+ // this is going to be the new first element of the list
+ ListEl& el = llElements[m_llSize];
+ el.value = Scalar(0);
+ el.index = i;
+ el.next = m_llStart;
+ m_llStart = m_llSize;
+ ++m_llSize;
+ m_llCurrent = m_llStart;
+ return el.value;
+ }
+ else
+ {
+ Index nextel = llElements[m_llCurrent].next;
+ eigen_assert(i>=llElements[m_llCurrent].index && "you must call restart() before inserting an element with lower or equal index");
+ while (nextel >= 0 && llElements[nextel].index<=i)
+ {
+ m_llCurrent = nextel;
+ nextel = llElements[nextel].next;
+ }
+
+ if (llElements[m_llCurrent].index==i)
+ {
+ // the coefficient already exists and we found it !
+ return llElements[m_llCurrent].value;
+ }
+ else
+ {
+ if (m_llSize>=m_allocatedElements)
+ {
+ reallocateSparse();
+ llElements = reinterpret_cast<ListEl*>(m_buffer);
+ }
+ eigen_internal_assert(m_llSize<m_allocatedElements && "internal error: overflow in sparse mode");
+ // let's insert a new coefficient
+ ListEl& el = llElements[m_llSize];
+ el.value = Scalar(0);
+ el.index = i;
+ el.next = llElements[m_llCurrent].next;
+ llElements[m_llCurrent].next = m_llSize;
+ ++m_llSize;
+ return el.value;
+ }
+ }
+ }
+}
+
+template<typename _Scalar,typename _Index>
+_Scalar& AmbiVector<_Scalar,_Index>::coeff(_Index i)
+{
+ if (m_mode==IsDense)
+ return m_buffer[i];
+ else
+ {
+ ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_buffer);
+ eigen_assert(m_mode==IsSparse);
+ if ((m_llSize==0) || (i<llElements[m_llStart].index))
+ {
+ return m_zero;
+ }
+ else
+ {
+ Index elid = m_llStart;
+ while (elid >= 0 && llElements[elid].index<i)
+ elid = llElements[elid].next;
+
+ if (llElements[elid].index==i)
+ return llElements[m_llCurrent].value;
+ else
+ return m_zero;
+ }
+ }
+}
+
+/** Iterator over the nonzero coefficients */
+template<typename _Scalar,typename _Index>
+class AmbiVector<_Scalar,_Index>::Iterator
+{
+ public:
+ typedef _Scalar Scalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
+ /** Default constructor
+ * \param vec the vector on which we iterate
+ * \param epsilon the minimal value used to prune zero coefficients.
+ * In practice, all coefficients having a magnitude smaller than \a epsilon
+ * are skipped.
+ */
+ Iterator(const AmbiVector& vec, RealScalar epsilon = 0)
+ : m_vector(vec)
+ {
+ m_epsilon = epsilon;
+ m_isDense = m_vector.m_mode==IsDense;
+ if (m_isDense)
+ {
+ m_currentEl = 0; // this is to avoid a compilation warning
+ m_cachedValue = 0; // this is to avoid a compilation warning
+ m_cachedIndex = m_vector.m_start-1;
+ ++(*this);
+ }
+ else
+ {
+ ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
+ m_currentEl = m_vector.m_llStart;
+ while (m_currentEl>=0 && internal::abs(llElements[m_currentEl].value)<=m_epsilon)
+ m_currentEl = llElements[m_currentEl].next;
+ if (m_currentEl<0)
+ {
+ m_cachedValue = 0; // this is to avoid a compilation warning
+ m_cachedIndex = -1;
+ }
+ else
+ {
+ m_cachedIndex = llElements[m_currentEl].index;
+ m_cachedValue = llElements[m_currentEl].value;
+ }
+ }
+ }
+
+ Index index() const { return m_cachedIndex; }
+ Scalar value() const { return m_cachedValue; }
+
+ operator bool() const { return m_cachedIndex>=0; }
+
+ Iterator& operator++()
+ {
+ if (m_isDense)
+ {
+ do {
+ ++m_cachedIndex;
+ } while (m_cachedIndex<m_vector.m_end && internal::abs(m_vector.m_buffer[m_cachedIndex])<m_epsilon);
+ if (m_cachedIndex<m_vector.m_end)
+ m_cachedValue = m_vector.m_buffer[m_cachedIndex];
+ else
+ m_cachedIndex=-1;
+ }
+ else
+ {
+ ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
+ do {
+ m_currentEl = llElements[m_currentEl].next;
+ } while (m_currentEl>=0 && internal::abs(llElements[m_currentEl].value)<m_epsilon);
+ if (m_currentEl<0)
+ {
+ m_cachedIndex = -1;
+ }
+ else
+ {
+ m_cachedIndex = llElements[m_currentEl].index;
+ m_cachedValue = llElements[m_currentEl].value;
+ }
+ }
+ return *this;
+ }
+
+ protected:
+ const AmbiVector& m_vector; // the target vector
+ Index m_currentEl; // the current element in sparse/linked-list mode
+ RealScalar m_epsilon; // epsilon used to prune zero coefficients
+ Index m_cachedIndex; // current coordinate
+ Scalar m_cachedValue; // current value
+ bool m_isDense; // mode of the vector
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_AMBIVECTOR_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/CompressedStorage.h b/extern/Eigen3/Eigen/src/SparseCore/CompressedStorage.h
new file mode 100644
index 00000000000..85a998aff10
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/CompressedStorage.h
@@ -0,0 +1,233 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_COMPRESSED_STORAGE_H
+#define EIGEN_COMPRESSED_STORAGE_H
+
+namespace Eigen {
+
+namespace internal {
+
+/** \internal
+ * Stores a sparse set of values as a list of values and a list of indices.
+ *
+ */
+template<typename _Scalar,typename _Index>
+class CompressedStorage
+{
+ public:
+
+ typedef _Scalar Scalar;
+ typedef _Index Index;
+
+ protected:
+
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
+ public:
+
+ CompressedStorage()
+ : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
+ {}
+
+ CompressedStorage(size_t size)
+ : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
+ {
+ resize(size);
+ }
+
+ CompressedStorage(const CompressedStorage& other)
+ : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
+ {
+ *this = other;
+ }
+
+ CompressedStorage& operator=(const CompressedStorage& other)
+ {
+ resize(other.size());
+ memcpy(m_values, other.m_values, m_size * sizeof(Scalar));
+ memcpy(m_indices, other.m_indices, m_size * sizeof(Index));
+ return *this;
+ }
+
+ void swap(CompressedStorage& other)
+ {
+ std::swap(m_values, other.m_values);
+ std::swap(m_indices, other.m_indices);
+ std::swap(m_size, other.m_size);
+ std::swap(m_allocatedSize, other.m_allocatedSize);
+ }
+
+ ~CompressedStorage()
+ {
+ delete[] m_values;
+ delete[] m_indices;
+ }
+
+ void reserve(size_t size)
+ {
+ size_t newAllocatedSize = m_size + size;
+ if (newAllocatedSize > m_allocatedSize)
+ reallocate(newAllocatedSize);
+ }
+
+ void squeeze()
+ {
+ if (m_allocatedSize>m_size)
+ reallocate(m_size);
+ }
+
+ void resize(size_t size, float reserveSizeFactor = 0)
+ {
+ if (m_allocatedSize<size)
+ reallocate(size + size_t(reserveSizeFactor*size));
+ m_size = size;
+ }
+
+ void append(const Scalar& v, Index i)
+ {
+ Index id = static_cast<Index>(m_size);
+ resize(m_size+1, 1);
+ m_values[id] = v;
+ m_indices[id] = i;
+ }
+
+ inline size_t size() const { return m_size; }
+ inline size_t allocatedSize() const { return m_allocatedSize; }
+ inline void clear() { m_size = 0; }
+
+ inline Scalar& value(size_t i) { return m_values[i]; }
+ inline const Scalar& value(size_t i) const { return m_values[i]; }
+
+ inline Index& index(size_t i) { return m_indices[i]; }
+ inline const Index& index(size_t i) const { return m_indices[i]; }
+
+ static CompressedStorage Map(Index* indices, Scalar* values, size_t size)
+ {
+ CompressedStorage res;
+ res.m_indices = indices;
+ res.m_values = values;
+ res.m_allocatedSize = res.m_size = size;
+ return res;
+ }
+
+ /** \returns the largest \c k such that for all \c j in [0,k) index[\c j]\<\a key */
+ inline Index searchLowerIndex(Index key) const
+ {
+ return searchLowerIndex(0, m_size, key);
+ }
+
+ /** \returns the largest \c k in [start,end) such that for all \c j in [start,k) index[\c j]\<\a key */
+ inline Index searchLowerIndex(size_t start, size_t end, Index key) const
+ {
+ while(end>start)
+ {
+ size_t mid = (end+start)>>1;
+ if (m_indices[mid]<key)
+ start = mid+1;
+ else
+ end = mid;
+ }
+ return static_cast<Index>(start);
+ }
+
+ /** \returns the stored value at index \a key
+ * If the value does not exist, then the value \a defaultValue is returned without any insertion. */
+ inline Scalar at(Index key, Scalar defaultValue = Scalar(0)) const
+ {
+ if (m_size==0)
+ return defaultValue;
+ else if (key==m_indices[m_size-1])
+ return m_values[m_size-1];
+ // ^^ optimization: let's first check if it is the last coefficient
+ // (very common in high level algorithms)
+ const size_t id = searchLowerIndex(0,m_size-1,key);
+ return ((id<m_size) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
+ }
+
+ /** Like at(), but the search is performed in the range [start,end) */
+ inline Scalar atInRange(size_t start, size_t end, Index key, Scalar defaultValue = Scalar(0)) const
+ {
+ if (start>=end)
+ return Scalar(0);
+ else if (end>start && key==m_indices[end-1])
+ return m_values[end-1];
+ // ^^ optimization: let's first check if it is the last coefficient
+ // (very common in high level algorithms)
+ const size_t id = searchLowerIndex(start,end-1,key);
+ return ((id<end) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
+ }
+
+ /** \returns a reference to the value at index \a key
+ * If the value does not exist, then the value \a defaultValue is inserted
+ * such that the keys are sorted. */
+ inline Scalar& atWithInsertion(Index key, Scalar defaultValue = Scalar(0))
+ {
+ size_t id = searchLowerIndex(0,m_size,key);
+ if (id>=m_size || m_indices[id]!=key)
+ {
+ resize(m_size+1,1);
+ for (size_t j=m_size-1; j>id; --j)
+ {
+ m_indices[j] = m_indices[j-1];
+ m_values[j] = m_values[j-1];
+ }
+ m_indices[id] = key;
+ m_values[id] = defaultValue;
+ }
+ return m_values[id];
+ }
+
+ void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
+ {
+ size_t k = 0;
+ size_t n = size();
+ for (size_t i=0; i<n; ++i)
+ {
+ if (!internal::isMuchSmallerThan(value(i), reference, epsilon))
+ {
+ value(k) = value(i);
+ index(k) = index(i);
+ ++k;
+ }
+ }
+ resize(k,0);
+ }
+
+ protected:
+
+ inline void reallocate(size_t size)
+ {
+ Scalar* newValues = new Scalar[size];
+ Index* newIndices = new Index[size];
+ size_t copySize = (std::min)(size, m_size);
+ // copy
+ internal::smart_copy(m_values, m_values+copySize, newValues);
+ internal::smart_copy(m_indices, m_indices+copySize, newIndices);
+ // delete old stuff
+ delete[] m_values;
+ delete[] m_indices;
+ m_values = newValues;
+ m_indices = newIndices;
+ m_allocatedSize = size;
+ }
+
+ protected:
+ Scalar* m_values;
+ Index* m_indices;
+ size_t m_size;
+ size_t m_allocatedSize;
+
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_COMPRESSED_STORAGE_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h b/extern/Eigen3/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
new file mode 100644
index 00000000000..16b5e1dba6c
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/ConservativeSparseSparseProduct.h
@@ -0,0 +1,245 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H
+#define EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H
+
+namespace Eigen {
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, typename ResultType>
+static void conservative_sparse_sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+{
+ typedef typename remove_all<Lhs>::type::Scalar Scalar;
+ typedef typename remove_all<Lhs>::type::Index Index;
+
+ // make sure to call innerSize/outerSize since we fake the storage order.
+ Index rows = lhs.innerSize();
+ Index cols = rhs.outerSize();
+ eigen_assert(lhs.outerSize() == rhs.innerSize());
+
+ std::vector<bool> mask(rows,false);
+ Matrix<Scalar,Dynamic,1> values(rows);
+ Matrix<Index,Dynamic,1> indices(rows);
+
+ // estimate the number of non zero entries
+ // given a rhs column containing Y non zeros, we assume that the respective Y columns
+ // of the lhs differs in average of one non zeros, thus the number of non zeros for
+ // the product of a rhs column with the lhs is X+Y where X is the average number of non zero
+ // per column of the lhs.
+ // Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs)
+ Index estimated_nnz_prod = lhs.nonZeros() + rhs.nonZeros();
+
+ res.setZero();
+ res.reserve(Index(estimated_nnz_prod));
+ // we compute each column of the result, one after the other
+ for (Index j=0; j<cols; ++j)
+ {
+
+ res.startVec(j);
+ Index nnz = 0;
+ for (typename Rhs::InnerIterator rhsIt(rhs, j); rhsIt; ++rhsIt)
+ {
+ Scalar y = rhsIt.value();
+ Index k = rhsIt.index();
+ for (typename Lhs::InnerIterator lhsIt(lhs, k); lhsIt; ++lhsIt)
+ {
+ Index i = lhsIt.index();
+ Scalar x = lhsIt.value();
+ if(!mask[i])
+ {
+ mask[i] = true;
+ values[i] = x * y;
+ indices[nnz] = i;
+ ++nnz;
+ }
+ else
+ values[i] += x * y;
+ }
+ }
+
+ // unordered insertion
+ for(int k=0; k<nnz; ++k)
+ {
+ int i = indices[k];
+ res.insertBackByOuterInnerUnordered(j,i) = values[i];
+ mask[i] = false;
+ }
+
+#if 0
+ // alternative ordered insertion code:
+
+ int t200 = rows/(log2(200)*1.39);
+ int t = (rows*100)/139;
+
+ // FIXME reserve nnz non zeros
+ // FIXME implement fast sort algorithms for very small nnz
+ // if the result is sparse enough => use a quick sort
+ // otherwise => loop through the entire vector
+ // In order to avoid to perform an expensive log2 when the
+ // result is clearly very sparse we use a linear bound up to 200.
+ //if((nnz<200 && nnz<t200) || nnz * log2(nnz) < t)
+ //res.startVec(j);
+ if(true)
+ {
+ if(nnz>1) std::sort(indices.data(),indices.data()+nnz);
+ for(int k=0; k<nnz; ++k)
+ {
+ int i = indices[k];
+ res.insertBackByOuterInner(j,i) = values[i];
+ mask[i] = false;
+ }
+ }
+ else
+ {
+ // dense path
+ for(int i=0; i<rows; ++i)
+ {
+ if(mask[i])
+ {
+ mask[i] = false;
+ res.insertBackByOuterInner(j,i) = values[i];
+ }
+ }
+ }
+#endif
+
+ }
+ res.finalize();
+}
+
+
+} // end namespace internal
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, typename ResultType,
+ int LhsStorageOrder = traits<Lhs>::Flags&RowMajorBit,
+ int RhsStorageOrder = traits<Rhs>::Flags&RowMajorBit,
+ int ResStorageOrder = traits<ResultType>::Flags&RowMajorBit>
+struct conservative_sparse_sparse_product_selector;
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,ColMajor>
+{
+ typedef typename remove_all<Lhs>::type LhsCleaned;
+ typedef typename LhsCleaned::Scalar Scalar;
+
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+ ColMajorMatrix resCol(lhs.rows(),rhs.cols());
+ internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol);
+ // sort the non zeros:
+ RowMajorMatrix resRow(resCol);
+ res = resRow;
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,ColMajor,ColMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
+ RowMajorMatrix rhsRow = rhs;
+ RowMajorMatrix resRow(lhs.rows(), rhs.cols());
+ internal::conservative_sparse_sparse_product_impl<RowMajorMatrix,Lhs,RowMajorMatrix>(rhsRow, lhs, resRow);
+ res = resRow;
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,RowMajor,ColMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
+ RowMajorMatrix lhsRow = lhs;
+ RowMajorMatrix resRow(lhs.rows(), rhs.cols());
+ internal::conservative_sparse_sparse_product_impl<Rhs,RowMajorMatrix,RowMajorMatrix>(rhs, lhsRow, resRow);
+ res = resRow;
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,ColMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
+ RowMajorMatrix resRow(lhs.rows(), rhs.cols());
+ internal::conservative_sparse_sparse_product_impl<Rhs,Lhs,RowMajorMatrix>(rhs, lhs, resRow);
+ res = resRow;
+ }
+};
+
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,RowMajor>
+{
+ typedef typename traits<typename remove_all<Lhs>::type>::Scalar Scalar;
+
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+ ColMajorMatrix resCol(lhs.rows(), rhs.cols());
+ internal::conservative_sparse_sparse_product_impl<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol);
+ res = resCol;
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,ColMajor,RowMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+ ColMajorMatrix lhsCol = lhs;
+ ColMajorMatrix resCol(lhs.rows(), rhs.cols());
+ internal::conservative_sparse_sparse_product_impl<ColMajorMatrix,Rhs,ColMajorMatrix>(lhsCol, rhs, resCol);
+ res = resCol;
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,RowMajor,RowMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+ ColMajorMatrix rhsCol = rhs;
+ ColMajorMatrix resCol(lhs.rows(), rhs.cols());
+ internal::conservative_sparse_sparse_product_impl<Lhs,ColMajorMatrix,ColMajorMatrix>(lhs, rhsCol, resCol);
+ res = resCol;
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct conservative_sparse_sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,RowMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+ RowMajorMatrix resRow(lhs.rows(),rhs.cols());
+ internal::conservative_sparse_sparse_product_impl<Rhs,Lhs,RowMajorMatrix>(rhs, lhs, resRow);
+ // sort the non zeros:
+ ColMajorMatrix resCol(resRow);
+ res = resCol;
+ }
+};
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_CONSERVATIVESPARSESPARSEPRODUCT_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/CoreIterators.h b/extern/Eigen3/Eigen/src/SparseCore/CoreIterators.h
new file mode 100644
index 00000000000..6da4683d2c2
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/CoreIterators.h
@@ -0,0 +1,61 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_COREITERATORS_H
+#define EIGEN_COREITERATORS_H
+
+namespace Eigen {
+
+/* This file contains the respective InnerIterator definition of the expressions defined in Eigen/Core
+ */
+
+/** \ingroup SparseCore_Module
+ * \class InnerIterator
+ * \brief An InnerIterator allows to loop over the element of a sparse (or dense) matrix or expression
+ *
+ * todo
+ */
+
+// generic version for dense matrix and expressions
+template<typename Derived> class DenseBase<Derived>::InnerIterator
+{
+ protected:
+ typedef typename Derived::Scalar Scalar;
+ typedef typename Derived::Index Index;
+
+ enum { IsRowMajor = (Derived::Flags&RowMajorBit)==RowMajorBit };
+ public:
+ EIGEN_STRONG_INLINE InnerIterator(const Derived& expr, Index outer)
+ : m_expression(expr), m_inner(0), m_outer(outer), m_end(expr.innerSize())
+ {}
+
+ EIGEN_STRONG_INLINE Scalar value() const
+ {
+ return (IsRowMajor) ? m_expression.coeff(m_outer, m_inner)
+ : m_expression.coeff(m_inner, m_outer);
+ }
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++() { m_inner++; return *this; }
+
+ EIGEN_STRONG_INLINE Index index() const { return m_inner; }
+ inline Index row() const { return IsRowMajor ? m_outer : index(); }
+ inline Index col() const { return IsRowMajor ? index() : m_outer; }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_inner < m_end && m_inner>=0; }
+
+ protected:
+ const Derived& m_expression;
+ Index m_inner;
+ const Index m_outer;
+ const Index m_end;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_COREITERATORS_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/MappedSparseMatrix.h b/extern/Eigen3/Eigen/src/SparseCore/MappedSparseMatrix.h
new file mode 100644
index 00000000000..93cd4832dea
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/MappedSparseMatrix.h
@@ -0,0 +1,179 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_MAPPED_SPARSEMATRIX_H
+#define EIGEN_MAPPED_SPARSEMATRIX_H
+
+namespace Eigen {
+
+/** \class MappedSparseMatrix
+ *
+ * \brief Sparse matrix
+ *
+ * \param _Scalar the scalar type, i.e. the type of the coefficients
+ *
+ * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
+ *
+ */
+namespace internal {
+template<typename _Scalar, int _Flags, typename _Index>
+struct traits<MappedSparseMatrix<_Scalar, _Flags, _Index> > : traits<SparseMatrix<_Scalar, _Flags, _Index> >
+{};
+}
+
+template<typename _Scalar, int _Flags, typename _Index>
+class MappedSparseMatrix
+ : public SparseMatrixBase<MappedSparseMatrix<_Scalar, _Flags, _Index> >
+{
+ public:
+ EIGEN_SPARSE_PUBLIC_INTERFACE(MappedSparseMatrix)
+ enum { IsRowMajor = Base::IsRowMajor };
+
+ protected:
+
+ Index m_outerSize;
+ Index m_innerSize;
+ Index m_nnz;
+ Index* m_outerIndex;
+ Index* m_innerIndices;
+ Scalar* m_values;
+
+ public:
+
+ inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
+ inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
+ inline Index innerSize() const { return m_innerSize; }
+ inline Index outerSize() const { return m_outerSize; }
+
+ //----------------------------------------
+ // direct access interface
+ inline const Scalar* valuePtr() const { return m_values; }
+ inline Scalar* valuePtr() { return m_values; }
+
+ inline const Index* innerIndexPtr() const { return m_innerIndices; }
+ inline Index* innerIndexPtr() { return m_innerIndices; }
+
+ inline const Index* outerIndexPtr() const { return m_outerIndex; }
+ inline Index* outerIndexPtr() { return m_outerIndex; }
+ //----------------------------------------
+
+ inline Scalar coeff(Index row, Index col) const
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ Index start = m_outerIndex[outer];
+ Index end = m_outerIndex[outer+1];
+ if (start==end)
+ return Scalar(0);
+ else if (end>0 && inner==m_innerIndices[end-1])
+ return m_values[end-1];
+ // ^^ optimization: let's first check if it is the last coefficient
+ // (very common in high level algorithms)
+
+ const Index* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end-1],inner);
+ const Index id = r-&m_innerIndices[0];
+ return ((*r==inner) && (id<end)) ? m_values[id] : Scalar(0);
+ }
+
+ inline Scalar& coeffRef(Index row, Index col)
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ Index start = m_outerIndex[outer];
+ Index end = m_outerIndex[outer+1];
+ eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
+ eigen_assert(end>start && "coeffRef cannot be called on a zero coefficient");
+ Index* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end],inner);
+ const Index id = r-&m_innerIndices[0];
+ eigen_assert((*r==inner) && (id<end) && "coeffRef cannot be called on a zero coefficient");
+ return m_values[id];
+ }
+
+ class InnerIterator;
+ class ReverseInnerIterator;
+
+ /** \returns the number of non zero coefficients */
+ inline Index nonZeros() const { return m_nnz; }
+
+ inline MappedSparseMatrix(Index rows, Index cols, Index nnz, Index* outerIndexPtr, Index* innerIndexPtr, Scalar* valuePtr)
+ : m_outerSize(IsRowMajor?rows:cols), m_innerSize(IsRowMajor?cols:rows), m_nnz(nnz), m_outerIndex(outerIndexPtr),
+ m_innerIndices(innerIndexPtr), m_values(valuePtr)
+ {}
+
+ /** Empty destructor */
+ inline ~MappedSparseMatrix() {}
+};
+
+template<typename Scalar, int _Flags, typename _Index>
+class MappedSparseMatrix<Scalar,_Flags,_Index>::InnerIterator
+{
+ public:
+ InnerIterator(const MappedSparseMatrix& mat, Index outer)
+ : m_matrix(mat),
+ m_outer(outer),
+ m_id(mat.outerIndexPtr()[outer]),
+ m_start(m_id),
+ m_end(mat.outerIndexPtr()[outer+1])
+ {}
+
+ inline InnerIterator& operator++() { m_id++; return *this; }
+
+ inline Scalar value() const { return m_matrix.valuePtr()[m_id]; }
+ inline Scalar& valueRef() { return const_cast<Scalar&>(m_matrix.valuePtr()[m_id]); }
+
+ inline Index index() const { return m_matrix.innerIndexPtr()[m_id]; }
+ inline Index row() const { return IsRowMajor ? m_outer : index(); }
+ inline Index col() const { return IsRowMajor ? index() : m_outer; }
+
+ inline operator bool() const { return (m_id < m_end) && (m_id>=m_start); }
+
+ protected:
+ const MappedSparseMatrix& m_matrix;
+ const Index m_outer;
+ Index m_id;
+ const Index m_start;
+ const Index m_end;
+};
+
+template<typename Scalar, int _Flags, typename _Index>
+class MappedSparseMatrix<Scalar,_Flags,_Index>::ReverseInnerIterator
+{
+ public:
+ ReverseInnerIterator(const MappedSparseMatrix& mat, Index outer)
+ : m_matrix(mat),
+ m_outer(outer),
+ m_id(mat.outerIndexPtr()[outer+1]),
+ m_start(mat.outerIndexPtr()[outer]),
+ m_end(m_id)
+ {}
+
+ inline ReverseInnerIterator& operator--() { m_id--; return *this; }
+
+ inline Scalar value() const { return m_matrix.valuePtr()[m_id-1]; }
+ inline Scalar& valueRef() { return const_cast<Scalar&>(m_matrix.valuePtr()[m_id-1]); }
+
+ inline Index index() const { return m_matrix.innerIndexPtr()[m_id-1]; }
+ inline Index row() const { return IsRowMajor ? m_outer : index(); }
+ inline Index col() const { return IsRowMajor ? index() : m_outer; }
+
+ inline operator bool() const { return (m_id <= m_end) && (m_id>m_start); }
+
+ protected:
+ const MappedSparseMatrix& m_matrix;
+ const Index m_outer;
+ Index m_id;
+ const Index m_start;
+ const Index m_end;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_MAPPED_SPARSEMATRIX_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseAssign.h b/extern/Eigen3/Eigen/src/SparseCore/SparseAssign.h
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseAssign.h
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseBlock.h b/extern/Eigen3/Eigen/src/SparseCore/SparseBlock.h
new file mode 100644
index 00000000000..eefd8070251
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseBlock.h
@@ -0,0 +1,387 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSE_BLOCK_H
+#define EIGEN_SPARSE_BLOCK_H
+
+namespace Eigen {
+
+namespace internal {
+template<typename MatrixType, int Size>
+struct traits<SparseInnerVectorSet<MatrixType, Size> >
+{
+ typedef typename traits<MatrixType>::Scalar Scalar;
+ typedef typename traits<MatrixType>::Index Index;
+ typedef typename traits<MatrixType>::StorageKind StorageKind;
+ typedef MatrixXpr XprKind;
+ enum {
+ IsRowMajor = (int(MatrixType::Flags)&RowMajorBit)==RowMajorBit,
+ Flags = MatrixType::Flags,
+ RowsAtCompileTime = IsRowMajor ? Size : MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = IsRowMajor ? MatrixType::ColsAtCompileTime : Size,
+ MaxRowsAtCompileTime = RowsAtCompileTime,
+ MaxColsAtCompileTime = ColsAtCompileTime,
+ CoeffReadCost = MatrixType::CoeffReadCost
+ };
+};
+} // end namespace internal
+
+template<typename MatrixType, int Size>
+class SparseInnerVectorSet : internal::no_assignment_operator,
+ public SparseMatrixBase<SparseInnerVectorSet<MatrixType, Size> >
+{
+ public:
+
+ enum { IsRowMajor = internal::traits<SparseInnerVectorSet>::IsRowMajor };
+
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseInnerVectorSet)
+ class InnerIterator: public MatrixType::InnerIterator
+ {
+ public:
+ inline InnerIterator(const SparseInnerVectorSet& xpr, Index outer)
+ : MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
+ {}
+ inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
+ inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
+ protected:
+ Index m_outer;
+ };
+ class ReverseInnerIterator: public MatrixType::ReverseInnerIterator
+ {
+ public:
+ inline ReverseInnerIterator(const SparseInnerVectorSet& xpr, Index outer)
+ : MatrixType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
+ {}
+ inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
+ inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
+ protected:
+ Index m_outer;
+ };
+
+ inline SparseInnerVectorSet(const MatrixType& matrix, Index outerStart, Index outerSize)
+ : m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize)
+ {
+ eigen_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) );
+ }
+
+ inline SparseInnerVectorSet(const MatrixType& matrix, Index outer)
+ : m_matrix(matrix), m_outerStart(outer), m_outerSize(Size)
+ {
+ eigen_assert(Size!=Dynamic);
+ eigen_assert( (outer>=0) && (outer<matrix.outerSize()) );
+ }
+
+// template<typename OtherDerived>
+// inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
+// {
+// return *this;
+// }
+
+// template<typename Sparse>
+// inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
+// {
+// return *this;
+// }
+
+ EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
+
+ protected:
+
+ const typename MatrixType::Nested m_matrix;
+ Index m_outerStart;
+ const internal::variable_if_dynamic<Index, Size> m_outerSize;
+};
+
+
+/***************************************************************************
+* specialisation for SparseMatrix
+***************************************************************************/
+
+template<typename _Scalar, int _Options, typename _Index, int Size>
+class SparseInnerVectorSet<SparseMatrix<_Scalar, _Options, _Index>, Size>
+ : public SparseMatrixBase<SparseInnerVectorSet<SparseMatrix<_Scalar, _Options, _Index>, Size> >
+{
+ typedef SparseMatrix<_Scalar, _Options, _Index> MatrixType;
+ public:
+
+ enum { IsRowMajor = internal::traits<SparseInnerVectorSet>::IsRowMajor };
+
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseInnerVectorSet)
+ class InnerIterator: public MatrixType::InnerIterator
+ {
+ public:
+ inline InnerIterator(const SparseInnerVectorSet& xpr, Index outer)
+ : MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
+ {}
+ inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
+ inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
+ protected:
+ Index m_outer;
+ };
+ class ReverseInnerIterator: public MatrixType::ReverseInnerIterator
+ {
+ public:
+ inline ReverseInnerIterator(const SparseInnerVectorSet& xpr, Index outer)
+ : MatrixType::ReverseInnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
+ {}
+ inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
+ inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
+ protected:
+ Index m_outer;
+ };
+
+ inline SparseInnerVectorSet(const MatrixType& matrix, Index outerStart, Index outerSize)
+ : m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize)
+ {
+ eigen_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) );
+ }
+
+ inline SparseInnerVectorSet(const MatrixType& matrix, Index outer)
+ : m_matrix(matrix), m_outerStart(outer), m_outerSize(Size)
+ {
+ eigen_assert(Size==1);
+ eigen_assert( (outer>=0) && (outer<matrix.outerSize()) );
+ }
+
+ template<typename OtherDerived>
+ inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
+ {
+ typedef typename internal::remove_all<typename MatrixType::Nested>::type _NestedMatrixType;
+ _NestedMatrixType& matrix = const_cast<_NestedMatrixType&>(m_matrix);;
+ // This assignement is slow if this vector set is not empty
+ // and/or it is not at the end of the nonzeros of the underlying matrix.
+
+ // 1 - eval to a temporary to avoid transposition and/or aliasing issues
+ SparseMatrix<Scalar, IsRowMajor ? RowMajor : ColMajor, Index> tmp(other);
+
+ // 2 - let's check whether there is enough allocated memory
+ Index nnz = tmp.nonZeros();
+ Index nnz_previous = nonZeros();
+ Index free_size = Index(matrix.data().allocatedSize()) + nnz_previous;
+ Index nnz_head = m_outerStart==0 ? 0 : matrix.outerIndexPtr()[m_outerStart];
+ Index tail = m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()];
+ Index nnz_tail = matrix.nonZeros() - tail;
+
+ if(nnz>free_size)
+ {
+ // realloc manually to reduce copies
+ typename MatrixType::Storage newdata(m_matrix.nonZeros() - nnz_previous + nnz);
+
+ std::memcpy(&newdata.value(0), &m_matrix.data().value(0), nnz_head*sizeof(Scalar));
+ std::memcpy(&newdata.index(0), &m_matrix.data().index(0), nnz_head*sizeof(Index));
+
+ std::memcpy(&newdata.value(nnz_head), &tmp.data().value(0), nnz*sizeof(Scalar));
+ std::memcpy(&newdata.index(nnz_head), &tmp.data().index(0), nnz*sizeof(Index));
+
+ std::memcpy(&newdata.value(nnz_head+nnz), &matrix.data().value(tail), nnz_tail*sizeof(Scalar));
+ std::memcpy(&newdata.index(nnz_head+nnz), &matrix.data().index(tail), nnz_tail*sizeof(Index));
+
+ matrix.data().swap(newdata);
+ }
+ else
+ {
+ // no need to realloc, simply copy the tail at its respective position and insert tmp
+ matrix.data().resize(nnz_head + nnz + nnz_tail);
+
+ if(nnz<nnz_previous)
+ {
+ std::memcpy(&matrix.data().value(nnz_head+nnz), &matrix.data().value(tail), nnz_tail*sizeof(Scalar));
+ std::memcpy(&matrix.data().index(nnz_head+nnz), &matrix.data().index(tail), nnz_tail*sizeof(Index));
+ }
+ else
+ {
+ for(Index i=nnz_tail-1; i>=0; --i)
+ {
+ matrix.data().value(nnz_head+nnz+i) = matrix.data().value(tail+i);
+ matrix.data().index(nnz_head+nnz+i) = matrix.data().index(tail+i);
+ }
+ }
+
+ std::memcpy(&matrix.data().value(nnz_head), &tmp.data().value(0), nnz*sizeof(Scalar));
+ std::memcpy(&matrix.data().index(nnz_head), &tmp.data().index(0), nnz*sizeof(Index));
+ }
+
+ // update outer index pointers
+ Index p = nnz_head;
+ for(Index k=0; k<m_outerSize.value(); ++k)
+ {
+ matrix.outerIndexPtr()[m_outerStart+k] = p;
+ p += tmp.innerVector(k).nonZeros();
+ }
+ std::ptrdiff_t offset = nnz - nnz_previous;
+ for(Index k = m_outerStart + m_outerSize.value(); k<=matrix.outerSize(); ++k)
+ {
+ matrix.outerIndexPtr()[k] += offset;
+ }
+
+ return *this;
+ }
+
+ inline SparseInnerVectorSet& operator=(const SparseInnerVectorSet& other)
+ {
+ return operator=<SparseInnerVectorSet>(other);
+ }
+
+ inline const Scalar* valuePtr() const
+ { return m_matrix.valuePtr() + m_matrix.outerIndexPtr()[m_outerStart]; }
+ inline Scalar* valuePtr()
+ { return m_matrix.const_cast_derived().valuePtr() + m_matrix.outerIndexPtr()[m_outerStart]; }
+
+ inline const Index* innerIndexPtr() const
+ { return m_matrix.innerIndexPtr() + m_matrix.outerIndexPtr()[m_outerStart]; }
+ inline Index* innerIndexPtr()
+ { return m_matrix.const_cast_derived().innerIndexPtr() + m_matrix.outerIndexPtr()[m_outerStart]; }
+
+ inline const Index* outerIndexPtr() const
+ { return m_matrix.outerIndexPtr() + m_outerStart; }
+ inline Index* outerIndexPtr()
+ { return m_matrix.const_cast_derived().outerIndexPtr() + m_outerStart; }
+
+ Index nonZeros() const
+ {
+ if(m_matrix.isCompressed())
+ return std::size_t(m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()])
+ - std::size_t(m_matrix.outerIndexPtr()[m_outerStart]);
+ else if(m_outerSize.value()==0)
+ return 0;
+ else
+ return Map<const Matrix<Index,Size,1> >(m_matrix.innerNonZeroPtr()+m_outerStart, m_outerSize.value()).sum();
+ }
+
+ const Scalar& lastCoeff() const
+ {
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(SparseInnerVectorSet);
+ eigen_assert(nonZeros()>0);
+ if(m_matrix.isCompressed())
+ return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart+1]-1];
+ else
+ return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart]+m_matrix.innerNonZeroPtr()[m_outerStart]-1];
+ }
+
+// template<typename Sparse>
+// inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
+// {
+// return *this;
+// }
+
+ EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
+
+ protected:
+
+ typename MatrixType::Nested m_matrix;
+ Index m_outerStart;
+ const internal::variable_if_dynamic<Index, Size> m_outerSize;
+
+};
+
+//----------
+
+/** \returns the i-th row of the matrix \c *this. For row-major matrix only. */
+template<typename Derived>
+SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::row(Index i)
+{
+ EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES);
+ return innerVector(i);
+}
+
+/** \returns the i-th row of the matrix \c *this. For row-major matrix only.
+ * (read-only version) */
+template<typename Derived>
+const SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::row(Index i) const
+{
+ EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES);
+ return innerVector(i);
+}
+
+/** \returns the i-th column of the matrix \c *this. For column-major matrix only. */
+template<typename Derived>
+SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::col(Index i)
+{
+ EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
+ return innerVector(i);
+}
+
+/** \returns the i-th column of the matrix \c *this. For column-major matrix only.
+ * (read-only version) */
+template<typename Derived>
+const SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::col(Index i) const
+{
+ EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
+ return innerVector(i);
+}
+
+/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
+ * is col-major (resp. row-major).
+ */
+template<typename Derived>
+SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::innerVector(Index outer)
+{ return SparseInnerVectorSet<Derived,1>(derived(), outer); }
+
+/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
+ * is col-major (resp. row-major). Read-only.
+ */
+template<typename Derived>
+const SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::innerVector(Index outer) const
+{ return SparseInnerVectorSet<Derived,1>(derived(), outer); }
+
+/** \returns the i-th row of the matrix \c *this. For row-major matrix only. */
+template<typename Derived>
+SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::middleRows(Index start, Index size)
+{
+ EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES);
+ return innerVectors(start, size);
+}
+
+/** \returns the i-th row of the matrix \c *this. For row-major matrix only.
+ * (read-only version) */
+template<typename Derived>
+const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::middleRows(Index start, Index size) const
+{
+ EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES);
+ return innerVectors(start, size);
+}
+
+/** \returns the i-th column of the matrix \c *this. For column-major matrix only. */
+template<typename Derived>
+SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::middleCols(Index start, Index size)
+{
+ EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
+ return innerVectors(start, size);
+}
+
+/** \returns the i-th column of the matrix \c *this. For column-major matrix only.
+ * (read-only version) */
+template<typename Derived>
+const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::middleCols(Index start, Index size) const
+{
+ EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
+ return innerVectors(start, size);
+}
+
+
+
+/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
+ * is col-major (resp. row-major).
+ */
+template<typename Derived>
+SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize)
+{ return SparseInnerVectorSet<Derived,Dynamic>(derived(), outerStart, outerSize); }
+
+/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
+ * is col-major (resp. row-major). Read-only.
+ */
+template<typename Derived>
+const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize) const
+{ return SparseInnerVectorSet<Derived,Dynamic>(derived(), outerStart, outerSize); }
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_BLOCK_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseCwiseBinaryOp.h b/extern/Eigen3/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
new file mode 100644
index 00000000000..d5f97f78fc9
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseCwiseBinaryOp.h
@@ -0,0 +1,324 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSE_CWISE_BINARY_OP_H
+#define EIGEN_SPARSE_CWISE_BINARY_OP_H
+
+namespace Eigen {
+
+// Here we have to handle 3 cases:
+// 1 - sparse op dense
+// 2 - dense op sparse
+// 3 - sparse op sparse
+// We also need to implement a 4th iterator for:
+// 4 - dense op dense
+// Finally, we also need to distinguish between the product and other operations :
+// configuration returned mode
+// 1 - sparse op dense product sparse
+// generic dense
+// 2 - dense op sparse product sparse
+// generic dense
+// 3 - sparse op sparse product sparse
+// generic sparse
+// 4 - dense op dense product dense
+// generic dense
+
+namespace internal {
+
+template<> struct promote_storage_type<Dense,Sparse>
+{ typedef Sparse ret; };
+
+template<> struct promote_storage_type<Sparse,Dense>
+{ typedef Sparse ret; };
+
+template<typename BinaryOp, typename Lhs, typename Rhs, typename Derived,
+ typename _LhsStorageMode = typename traits<Lhs>::StorageKind,
+ typename _RhsStorageMode = typename traits<Rhs>::StorageKind>
+class sparse_cwise_binary_op_inner_iterator_selector;
+
+} // end namespace internal
+
+template<typename BinaryOp, typename Lhs, typename Rhs>
+class CwiseBinaryOpImpl<BinaryOp, Lhs, Rhs, Sparse>
+ : public SparseMatrixBase<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
+{
+ public:
+ class InnerIterator;
+ class ReverseInnerIterator;
+ typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> Derived;
+ EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
+ CwiseBinaryOpImpl()
+ {
+ typedef typename internal::traits<Lhs>::StorageKind LhsStorageKind;
+ typedef typename internal::traits<Rhs>::StorageKind RhsStorageKind;
+ EIGEN_STATIC_ASSERT((
+ (!internal::is_same<LhsStorageKind,RhsStorageKind>::value)
+ || ((Lhs::Flags&RowMajorBit) == (Rhs::Flags&RowMajorBit))),
+ THE_STORAGE_ORDER_OF_BOTH_SIDES_MUST_MATCH);
+ }
+};
+
+template<typename BinaryOp, typename Lhs, typename Rhs>
+class CwiseBinaryOpImpl<BinaryOp,Lhs,Rhs,Sparse>::InnerIterator
+ : public internal::sparse_cwise_binary_op_inner_iterator_selector<BinaryOp,Lhs,Rhs,typename CwiseBinaryOpImpl<BinaryOp,Lhs,Rhs,Sparse>::InnerIterator>
+{
+ public:
+ typedef typename Lhs::Index Index;
+ typedef internal::sparse_cwise_binary_op_inner_iterator_selector<
+ BinaryOp,Lhs,Rhs, InnerIterator> Base;
+
+ EIGEN_STRONG_INLINE InnerIterator(const CwiseBinaryOpImpl& binOp, typename CwiseBinaryOpImpl::Index outer)
+ : Base(binOp.derived(),outer)
+ {}
+};
+
+/***************************************************************************
+* Implementation of inner-iterators
+***************************************************************************/
+
+// template<typename T> struct internal::func_is_conjunction { enum { ret = false }; };
+// template<typename T> struct internal::func_is_conjunction<internal::scalar_product_op<T> > { enum { ret = true }; };
+
+// TODO generalize the internal::scalar_product_op specialization to all conjunctions if any !
+
+namespace internal {
+
+// sparse - sparse (generic)
+template<typename BinaryOp, typename Lhs, typename Rhs, typename Derived>
+class sparse_cwise_binary_op_inner_iterator_selector<BinaryOp, Lhs, Rhs, Derived, Sparse, Sparse>
+{
+ typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> CwiseBinaryXpr;
+ typedef typename traits<CwiseBinaryXpr>::Scalar Scalar;
+ typedef typename traits<CwiseBinaryXpr>::_LhsNested _LhsNested;
+ typedef typename traits<CwiseBinaryXpr>::_RhsNested _RhsNested;
+ typedef typename _LhsNested::InnerIterator LhsIterator;
+ typedef typename _RhsNested::InnerIterator RhsIterator;
+ typedef typename Lhs::Index Index;
+
+ public:
+
+ EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer)
+ : m_lhsIter(xpr.lhs(),outer), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor())
+ {
+ this->operator++();
+ }
+
+ EIGEN_STRONG_INLINE Derived& operator++()
+ {
+ if (m_lhsIter && m_rhsIter && (m_lhsIter.index() == m_rhsIter.index()))
+ {
+ m_id = m_lhsIter.index();
+ m_value = m_functor(m_lhsIter.value(), m_rhsIter.value());
+ ++m_lhsIter;
+ ++m_rhsIter;
+ }
+ else if (m_lhsIter && (!m_rhsIter || (m_lhsIter.index() < m_rhsIter.index())))
+ {
+ m_id = m_lhsIter.index();
+ m_value = m_functor(m_lhsIter.value(), Scalar(0));
+ ++m_lhsIter;
+ }
+ else if (m_rhsIter && (!m_lhsIter || (m_lhsIter.index() > m_rhsIter.index())))
+ {
+ m_id = m_rhsIter.index();
+ m_value = m_functor(Scalar(0), m_rhsIter.value());
+ ++m_rhsIter;
+ }
+ else
+ {
+ m_value = 0; // this is to avoid a compilation warning
+ m_id = -1;
+ }
+ return *static_cast<Derived*>(this);
+ }
+
+ EIGEN_STRONG_INLINE Scalar value() const { return m_value; }
+
+ EIGEN_STRONG_INLINE Index index() const { return m_id; }
+ EIGEN_STRONG_INLINE Index row() const { return Lhs::IsRowMajor ? m_lhsIter.row() : index(); }
+ EIGEN_STRONG_INLINE Index col() const { return Lhs::IsRowMajor ? index() : m_lhsIter.col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_id>=0; }
+
+ protected:
+ LhsIterator m_lhsIter;
+ RhsIterator m_rhsIter;
+ const BinaryOp& m_functor;
+ Scalar m_value;
+ Index m_id;
+};
+
+// sparse - sparse (product)
+template<typename T, typename Lhs, typename Rhs, typename Derived>
+class sparse_cwise_binary_op_inner_iterator_selector<scalar_product_op<T>, Lhs, Rhs, Derived, Sparse, Sparse>
+{
+ typedef scalar_product_op<T> BinaryFunc;
+ typedef CwiseBinaryOp<BinaryFunc, Lhs, Rhs> CwiseBinaryXpr;
+ typedef typename CwiseBinaryXpr::Scalar Scalar;
+ typedef typename traits<CwiseBinaryXpr>::_LhsNested _LhsNested;
+ typedef typename _LhsNested::InnerIterator LhsIterator;
+ typedef typename traits<CwiseBinaryXpr>::_RhsNested _RhsNested;
+ typedef typename _RhsNested::InnerIterator RhsIterator;
+ typedef typename Lhs::Index Index;
+ public:
+
+ EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer)
+ : m_lhsIter(xpr.lhs(),outer), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor())
+ {
+ while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index()))
+ {
+ if (m_lhsIter.index() < m_rhsIter.index())
+ ++m_lhsIter;
+ else
+ ++m_rhsIter;
+ }
+ }
+
+ EIGEN_STRONG_INLINE Derived& operator++()
+ {
+ ++m_lhsIter;
+ ++m_rhsIter;
+ while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index()))
+ {
+ if (m_lhsIter.index() < m_rhsIter.index())
+ ++m_lhsIter;
+ else
+ ++m_rhsIter;
+ }
+ return *static_cast<Derived*>(this);
+ }
+
+ EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_lhsIter.value(), m_rhsIter.value()); }
+
+ EIGEN_STRONG_INLINE Index index() const { return m_lhsIter.index(); }
+ EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); }
+ EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return (m_lhsIter && m_rhsIter); }
+
+ protected:
+ LhsIterator m_lhsIter;
+ RhsIterator m_rhsIter;
+ const BinaryFunc& m_functor;
+};
+
+// sparse - dense (product)
+template<typename T, typename Lhs, typename Rhs, typename Derived>
+class sparse_cwise_binary_op_inner_iterator_selector<scalar_product_op<T>, Lhs, Rhs, Derived, Sparse, Dense>
+{
+ typedef scalar_product_op<T> BinaryFunc;
+ typedef CwiseBinaryOp<BinaryFunc, Lhs, Rhs> CwiseBinaryXpr;
+ typedef typename CwiseBinaryXpr::Scalar Scalar;
+ typedef typename traits<CwiseBinaryXpr>::_LhsNested _LhsNested;
+ typedef typename traits<CwiseBinaryXpr>::RhsNested RhsNested;
+ typedef typename _LhsNested::InnerIterator LhsIterator;
+ typedef typename Lhs::Index Index;
+ enum { IsRowMajor = (int(Lhs::Flags)&RowMajorBit)==RowMajorBit };
+ public:
+
+ EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer)
+ : m_rhs(xpr.rhs()), m_lhsIter(xpr.lhs(),outer), m_functor(xpr.functor()), m_outer(outer)
+ {}
+
+ EIGEN_STRONG_INLINE Derived& operator++()
+ {
+ ++m_lhsIter;
+ return *static_cast<Derived*>(this);
+ }
+
+ EIGEN_STRONG_INLINE Scalar value() const
+ { return m_functor(m_lhsIter.value(),
+ m_rhs.coeff(IsRowMajor?m_outer:m_lhsIter.index(),IsRowMajor?m_lhsIter.index():m_outer)); }
+
+ EIGEN_STRONG_INLINE Index index() const { return m_lhsIter.index(); }
+ EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); }
+ EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_lhsIter; }
+
+ protected:
+ RhsNested m_rhs;
+ LhsIterator m_lhsIter;
+ const BinaryFunc m_functor;
+ const Index m_outer;
+};
+
+// sparse - dense (product)
+template<typename T, typename Lhs, typename Rhs, typename Derived>
+class sparse_cwise_binary_op_inner_iterator_selector<scalar_product_op<T>, Lhs, Rhs, Derived, Dense, Sparse>
+{
+ typedef scalar_product_op<T> BinaryFunc;
+ typedef CwiseBinaryOp<BinaryFunc, Lhs, Rhs> CwiseBinaryXpr;
+ typedef typename CwiseBinaryXpr::Scalar Scalar;
+ typedef typename traits<CwiseBinaryXpr>::_RhsNested _RhsNested;
+ typedef typename _RhsNested::InnerIterator RhsIterator;
+ typedef typename Lhs::Index Index;
+
+ enum { IsRowMajor = (int(Rhs::Flags)&RowMajorBit)==RowMajorBit };
+ public:
+
+ EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer)
+ : m_xpr(xpr), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor()), m_outer(outer)
+ {}
+
+ EIGEN_STRONG_INLINE Derived& operator++()
+ {
+ ++m_rhsIter;
+ return *static_cast<Derived*>(this);
+ }
+
+ EIGEN_STRONG_INLINE Scalar value() const
+ { return m_functor(m_xpr.lhs().coeff(IsRowMajor?m_outer:m_rhsIter.index(),IsRowMajor?m_rhsIter.index():m_outer), m_rhsIter.value()); }
+
+ EIGEN_STRONG_INLINE Index index() const { return m_rhsIter.index(); }
+ EIGEN_STRONG_INLINE Index row() const { return m_rhsIter.row(); }
+ EIGEN_STRONG_INLINE Index col() const { return m_rhsIter.col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_rhsIter; }
+
+ protected:
+ const CwiseBinaryXpr& m_xpr;
+ RhsIterator m_rhsIter;
+ const BinaryFunc& m_functor;
+ const Index m_outer;
+};
+
+} // end namespace internal
+
+/***************************************************************************
+* Implementation of SparseMatrixBase and SparseCwise functions/operators
+***************************************************************************/
+
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE Derived &
+SparseMatrixBase<Derived>::operator-=(const SparseMatrixBase<OtherDerived> &other)
+{
+ return *this = derived() - other.derived();
+}
+
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE Derived &
+SparseMatrixBase<Derived>::operator+=(const SparseMatrixBase<OtherDerived>& other)
+{
+ return *this = derived() + other.derived();
+}
+
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE
+SparseMatrixBase<Derived>::cwiseProduct(const MatrixBase<OtherDerived> &other) const
+{
+ return EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE(derived(), other.derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_CWISE_BINARY_OP_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseCwiseUnaryOp.h b/extern/Eigen3/Eigen/src/SparseCore/SparseCwiseUnaryOp.h
new file mode 100644
index 00000000000..5a50c780303
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseCwiseUnaryOp.h
@@ -0,0 +1,163 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSE_CWISE_UNARY_OP_H
+#define EIGEN_SPARSE_CWISE_UNARY_OP_H
+
+namespace Eigen {
+
+template<typename UnaryOp, typename MatrixType>
+class CwiseUnaryOpImpl<UnaryOp,MatrixType,Sparse>
+ : public SparseMatrixBase<CwiseUnaryOp<UnaryOp, MatrixType> >
+{
+ public:
+
+ class InnerIterator;
+ class ReverseInnerIterator;
+
+ typedef CwiseUnaryOp<UnaryOp, MatrixType> Derived;
+ EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
+
+ protected:
+ typedef typename internal::traits<Derived>::_XprTypeNested _MatrixTypeNested;
+ typedef typename _MatrixTypeNested::InnerIterator MatrixTypeIterator;
+ typedef typename _MatrixTypeNested::ReverseInnerIterator MatrixTypeReverseIterator;
+};
+
+template<typename UnaryOp, typename MatrixType>
+class CwiseUnaryOpImpl<UnaryOp,MatrixType,Sparse>::InnerIterator
+ : public CwiseUnaryOpImpl<UnaryOp,MatrixType,Sparse>::MatrixTypeIterator
+{
+ typedef typename CwiseUnaryOpImpl::Scalar Scalar;
+ typedef typename CwiseUnaryOpImpl<UnaryOp,MatrixType,Sparse>::MatrixTypeIterator Base;
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const CwiseUnaryOpImpl& unaryOp, typename CwiseUnaryOpImpl::Index outer)
+ : Base(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor())
+ {}
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
+ { Base::operator++(); return *this; }
+
+ EIGEN_STRONG_INLINE typename CwiseUnaryOpImpl::Scalar value() const { return m_functor(Base::value()); }
+
+ protected:
+ const UnaryOp m_functor;
+ private:
+ typename CwiseUnaryOpImpl::Scalar& valueRef();
+};
+
+template<typename UnaryOp, typename MatrixType>
+class CwiseUnaryOpImpl<UnaryOp,MatrixType,Sparse>::ReverseInnerIterator
+ : public CwiseUnaryOpImpl<UnaryOp,MatrixType,Sparse>::MatrixTypeReverseIterator
+{
+ typedef typename CwiseUnaryOpImpl::Scalar Scalar;
+ typedef typename CwiseUnaryOpImpl<UnaryOp,MatrixType,Sparse>::MatrixTypeReverseIterator Base;
+ public:
+
+ EIGEN_STRONG_INLINE ReverseInnerIterator(const CwiseUnaryOpImpl& unaryOp, typename CwiseUnaryOpImpl::Index outer)
+ : Base(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor())
+ {}
+
+ EIGEN_STRONG_INLINE ReverseInnerIterator& operator--()
+ { Base::operator--(); return *this; }
+
+ EIGEN_STRONG_INLINE typename CwiseUnaryOpImpl::Scalar value() const { return m_functor(Base::value()); }
+
+ protected:
+ const UnaryOp m_functor;
+ private:
+ typename CwiseUnaryOpImpl::Scalar& valueRef();
+};
+
+template<typename ViewOp, typename MatrixType>
+class CwiseUnaryViewImpl<ViewOp,MatrixType,Sparse>
+ : public SparseMatrixBase<CwiseUnaryView<ViewOp, MatrixType> >
+{
+ public:
+
+ class InnerIterator;
+ class ReverseInnerIterator;
+
+ typedef CwiseUnaryView<ViewOp, MatrixType> Derived;
+ EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
+
+ protected:
+ typedef typename internal::traits<Derived>::_MatrixTypeNested _MatrixTypeNested;
+ typedef typename _MatrixTypeNested::InnerIterator MatrixTypeIterator;
+ typedef typename _MatrixTypeNested::ReverseInnerIterator MatrixTypeReverseIterator;
+};
+
+template<typename ViewOp, typename MatrixType>
+class CwiseUnaryViewImpl<ViewOp,MatrixType,Sparse>::InnerIterator
+ : public CwiseUnaryViewImpl<ViewOp,MatrixType,Sparse>::MatrixTypeIterator
+{
+ typedef typename CwiseUnaryViewImpl::Scalar Scalar;
+ typedef typename CwiseUnaryViewImpl<ViewOp,MatrixType,Sparse>::MatrixTypeIterator Base;
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const CwiseUnaryViewImpl& unaryOp, typename CwiseUnaryViewImpl::Index outer)
+ : Base(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor())
+ {}
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
+ { Base::operator++(); return *this; }
+
+ EIGEN_STRONG_INLINE typename CwiseUnaryViewImpl::Scalar value() const { return m_functor(Base::value()); }
+ EIGEN_STRONG_INLINE typename CwiseUnaryViewImpl::Scalar& valueRef() { return m_functor(Base::valueRef()); }
+
+ protected:
+ const ViewOp m_functor;
+};
+
+template<typename ViewOp, typename MatrixType>
+class CwiseUnaryViewImpl<ViewOp,MatrixType,Sparse>::ReverseInnerIterator
+ : public CwiseUnaryViewImpl<ViewOp,MatrixType,Sparse>::MatrixTypeReverseIterator
+{
+ typedef typename CwiseUnaryViewImpl::Scalar Scalar;
+ typedef typename CwiseUnaryViewImpl<ViewOp,MatrixType,Sparse>::MatrixTypeReverseIterator Base;
+ public:
+
+ EIGEN_STRONG_INLINE ReverseInnerIterator(const CwiseUnaryViewImpl& unaryOp, typename CwiseUnaryViewImpl::Index outer)
+ : Base(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor())
+ {}
+
+ EIGEN_STRONG_INLINE ReverseInnerIterator& operator--()
+ { Base::operator--(); return *this; }
+
+ EIGEN_STRONG_INLINE typename CwiseUnaryViewImpl::Scalar value() const { return m_functor(Base::value()); }
+ EIGEN_STRONG_INLINE typename CwiseUnaryViewImpl::Scalar& valueRef() { return m_functor(Base::valueRef()); }
+
+ protected:
+ const ViewOp m_functor;
+};
+
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+SparseMatrixBase<Derived>::operator*=(const Scalar& other)
+{
+ for (Index j=0; j<outerSize(); ++j)
+ for (typename Derived::InnerIterator i(derived(),j); i; ++i)
+ i.valueRef() *= other;
+ return derived();
+}
+
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+SparseMatrixBase<Derived>::operator/=(const Scalar& other)
+{
+ for (Index j=0; j<outerSize(); ++j)
+ for (typename Derived::InnerIterator i(derived(),j); i; ++i)
+ i.valueRef() /= other;
+ return derived();
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_CWISE_UNARY_OP_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseDenseProduct.h b/extern/Eigen3/Eigen/src/SparseCore/SparseDenseProduct.h
new file mode 100644
index 00000000000..6f32940d6c1
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseDenseProduct.h
@@ -0,0 +1,300 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSEDENSEPRODUCT_H
+#define EIGEN_SPARSEDENSEPRODUCT_H
+
+namespace Eigen {
+
+template<typename Lhs, typename Rhs, int InnerSize> struct SparseDenseProductReturnType
+{
+ typedef SparseTimeDenseProduct<Lhs,Rhs> Type;
+};
+
+template<typename Lhs, typename Rhs> struct SparseDenseProductReturnType<Lhs,Rhs,1>
+{
+ typedef SparseDenseOuterProduct<Lhs,Rhs,false> Type;
+};
+
+template<typename Lhs, typename Rhs, int InnerSize> struct DenseSparseProductReturnType
+{
+ typedef DenseTimeSparseProduct<Lhs,Rhs> Type;
+};
+
+template<typename Lhs, typename Rhs> struct DenseSparseProductReturnType<Lhs,Rhs,1>
+{
+ typedef SparseDenseOuterProduct<Rhs,Lhs,true> Type;
+};
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, bool Tr>
+struct traits<SparseDenseOuterProduct<Lhs,Rhs,Tr> >
+{
+ typedef Sparse StorageKind;
+ typedef typename scalar_product_traits<typename traits<Lhs>::Scalar,
+ typename traits<Rhs>::Scalar>::ReturnType Scalar;
+ typedef typename Lhs::Index Index;
+ typedef typename Lhs::Nested LhsNested;
+ typedef typename Rhs::Nested RhsNested;
+ typedef typename remove_all<LhsNested>::type _LhsNested;
+ typedef typename remove_all<RhsNested>::type _RhsNested;
+
+ enum {
+ LhsCoeffReadCost = traits<_LhsNested>::CoeffReadCost,
+ RhsCoeffReadCost = traits<_RhsNested>::CoeffReadCost,
+
+ RowsAtCompileTime = Tr ? int(traits<Rhs>::RowsAtCompileTime) : int(traits<Lhs>::RowsAtCompileTime),
+ ColsAtCompileTime = Tr ? int(traits<Lhs>::ColsAtCompileTime) : int(traits<Rhs>::ColsAtCompileTime),
+ MaxRowsAtCompileTime = Tr ? int(traits<Rhs>::MaxRowsAtCompileTime) : int(traits<Lhs>::MaxRowsAtCompileTime),
+ MaxColsAtCompileTime = Tr ? int(traits<Lhs>::MaxColsAtCompileTime) : int(traits<Rhs>::MaxColsAtCompileTime),
+
+ Flags = Tr ? RowMajorBit : 0,
+
+ CoeffReadCost = LhsCoeffReadCost + RhsCoeffReadCost + NumTraits<Scalar>::MulCost
+ };
+};
+
+} // end namespace internal
+
+template<typename Lhs, typename Rhs, bool Tr>
+class SparseDenseOuterProduct
+ : public SparseMatrixBase<SparseDenseOuterProduct<Lhs,Rhs,Tr> >
+{
+ public:
+
+ typedef SparseMatrixBase<SparseDenseOuterProduct> Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(SparseDenseOuterProduct)
+ typedef internal::traits<SparseDenseOuterProduct> Traits;
+
+ private:
+
+ typedef typename Traits::LhsNested LhsNested;
+ typedef typename Traits::RhsNested RhsNested;
+ typedef typename Traits::_LhsNested _LhsNested;
+ typedef typename Traits::_RhsNested _RhsNested;
+
+ public:
+
+ class InnerIterator;
+
+ EIGEN_STRONG_INLINE SparseDenseOuterProduct(const Lhs& lhs, const Rhs& rhs)
+ : m_lhs(lhs), m_rhs(rhs)
+ {
+ EIGEN_STATIC_ASSERT(!Tr,YOU_MADE_A_PROGRAMMING_MISTAKE);
+ }
+
+ EIGEN_STRONG_INLINE SparseDenseOuterProduct(const Rhs& rhs, const Lhs& lhs)
+ : m_lhs(lhs), m_rhs(rhs)
+ {
+ EIGEN_STATIC_ASSERT(Tr,YOU_MADE_A_PROGRAMMING_MISTAKE);
+ }
+
+ EIGEN_STRONG_INLINE Index rows() const { return Tr ? m_rhs.rows() : m_lhs.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return Tr ? m_lhs.cols() : m_rhs.cols(); }
+
+ EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; }
+ EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; }
+
+ protected:
+ LhsNested m_lhs;
+ RhsNested m_rhs;
+};
+
+template<typename Lhs, typename Rhs, bool Transpose>
+class SparseDenseOuterProduct<Lhs,Rhs,Transpose>::InnerIterator : public _LhsNested::InnerIterator
+{
+ typedef typename _LhsNested::InnerIterator Base;
+ public:
+ EIGEN_STRONG_INLINE InnerIterator(const SparseDenseOuterProduct& prod, Index outer)
+ : Base(prod.lhs(), 0), m_outer(outer), m_factor(prod.rhs().coeff(outer))
+ {
+ }
+
+ inline Index outer() const { return m_outer; }
+ inline Index row() const { return Transpose ? Base::row() : m_outer; }
+ inline Index col() const { return Transpose ? m_outer : Base::row(); }
+
+ inline Scalar value() const { return Base::value() * m_factor; }
+
+ protected:
+ int m_outer;
+ Scalar m_factor;
+};
+
+namespace internal {
+template<typename Lhs, typename Rhs>
+struct traits<SparseTimeDenseProduct<Lhs,Rhs> >
+ : traits<ProductBase<SparseTimeDenseProduct<Lhs,Rhs>, Lhs, Rhs> >
+{
+ typedef Dense StorageKind;
+ typedef MatrixXpr XprKind;
+};
+
+template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,
+ int LhsStorageOrder = ((SparseLhsType::Flags&RowMajorBit)==RowMajorBit) ? RowMajor : ColMajor,
+ bool ColPerCol = ((DenseRhsType::Flags&RowMajorBit)==0) || DenseRhsType::ColsAtCompileTime==1>
+struct sparse_time_dense_product_impl;
+
+template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
+struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, RowMajor, true>
+{
+ typedef typename internal::remove_all<SparseLhsType>::type Lhs;
+ typedef typename internal::remove_all<DenseRhsType>::type Rhs;
+ typedef typename internal::remove_all<DenseResType>::type Res;
+ typedef typename Lhs::Index Index;
+ typedef typename Lhs::InnerIterator LhsInnerIterator;
+ static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, typename Res::Scalar alpha)
+ {
+ for(Index c=0; c<rhs.cols(); ++c)
+ {
+ int n = lhs.outerSize();
+ for(Index j=0; j<n; ++j)
+ {
+ typename Res::Scalar tmp(0);
+ for(LhsInnerIterator it(lhs,j); it ;++it)
+ tmp += it.value() * rhs.coeff(it.index(),c);
+ res.coeffRef(j,c) = alpha * tmp;
+ }
+ }
+ }
+};
+
+template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
+struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, ColMajor, true>
+{
+ typedef typename internal::remove_all<SparseLhsType>::type Lhs;
+ typedef typename internal::remove_all<DenseRhsType>::type Rhs;
+ typedef typename internal::remove_all<DenseResType>::type Res;
+ typedef typename Lhs::InnerIterator LhsInnerIterator;
+ typedef typename Lhs::Index Index;
+ static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, typename Res::Scalar alpha)
+ {
+ for(Index c=0; c<rhs.cols(); ++c)
+ {
+ for(Index j=0; j<lhs.outerSize(); ++j)
+ {
+ typename Res::Scalar rhs_j = alpha * rhs.coeff(j,c);
+ for(LhsInnerIterator it(lhs,j); it ;++it)
+ res.coeffRef(it.index(),c) += it.value() * rhs_j;
+ }
+ }
+ }
+};
+
+template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
+struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, RowMajor, false>
+{
+ typedef typename internal::remove_all<SparseLhsType>::type Lhs;
+ typedef typename internal::remove_all<DenseRhsType>::type Rhs;
+ typedef typename internal::remove_all<DenseResType>::type Res;
+ typedef typename Lhs::InnerIterator LhsInnerIterator;
+ typedef typename Lhs::Index Index;
+ static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, typename Res::Scalar alpha)
+ {
+ for(Index j=0; j<lhs.outerSize(); ++j)
+ {
+ typename Res::RowXpr res_j(res.row(j));
+ for(LhsInnerIterator it(lhs,j); it ;++it)
+ res_j += (alpha*it.value()) * rhs.row(it.index());
+ }
+ }
+};
+
+template<typename SparseLhsType, typename DenseRhsType, typename DenseResType>
+struct sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType, ColMajor, false>
+{
+ typedef typename internal::remove_all<SparseLhsType>::type Lhs;
+ typedef typename internal::remove_all<DenseRhsType>::type Rhs;
+ typedef typename internal::remove_all<DenseResType>::type Res;
+ typedef typename Lhs::InnerIterator LhsInnerIterator;
+ typedef typename Lhs::Index Index;
+ static void run(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, typename Res::Scalar alpha)
+ {
+ for(Index j=0; j<lhs.outerSize(); ++j)
+ {
+ typename Rhs::ConstRowXpr rhs_j(rhs.row(j));
+ for(LhsInnerIterator it(lhs,j); it ;++it)
+ res.row(it.index()) += (alpha*it.value()) * rhs_j;
+ }
+ }
+};
+
+template<typename SparseLhsType, typename DenseRhsType, typename DenseResType,typename AlphaType>
+inline void sparse_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
+{
+ sparse_time_dense_product_impl<SparseLhsType,DenseRhsType,DenseResType>::run(lhs, rhs, res, alpha);
+}
+
+} // end namespace internal
+
+template<typename Lhs, typename Rhs>
+class SparseTimeDenseProduct
+ : public ProductBase<SparseTimeDenseProduct<Lhs,Rhs>, Lhs, Rhs>
+{
+ public:
+ EIGEN_PRODUCT_PUBLIC_INTERFACE(SparseTimeDenseProduct)
+
+ SparseTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+ {}
+
+ template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
+ {
+ internal::sparse_time_dense_product(m_lhs, m_rhs, dest, alpha);
+ }
+
+ private:
+ SparseTimeDenseProduct& operator=(const SparseTimeDenseProduct&);
+};
+
+
+// dense = dense * sparse
+namespace internal {
+template<typename Lhs, typename Rhs>
+struct traits<DenseTimeSparseProduct<Lhs,Rhs> >
+ : traits<ProductBase<DenseTimeSparseProduct<Lhs,Rhs>, Lhs, Rhs> >
+{
+ typedef Dense StorageKind;
+};
+} // end namespace internal
+
+template<typename Lhs, typename Rhs>
+class DenseTimeSparseProduct
+ : public ProductBase<DenseTimeSparseProduct<Lhs,Rhs>, Lhs, Rhs>
+{
+ public:
+ EIGEN_PRODUCT_PUBLIC_INTERFACE(DenseTimeSparseProduct)
+
+ DenseTimeSparseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+ {}
+
+ template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
+ {
+ Transpose<const _LhsNested> lhs_t(m_lhs);
+ Transpose<const _RhsNested> rhs_t(m_rhs);
+ Transpose<Dest> dest_t(dest);
+ internal::sparse_time_dense_product(rhs_t, lhs_t, dest_t, alpha);
+ }
+
+ private:
+ DenseTimeSparseProduct& operator=(const DenseTimeSparseProduct&);
+};
+
+// sparse * dense
+template<typename Derived>
+template<typename OtherDerived>
+inline const typename SparseDenseProductReturnType<Derived,OtherDerived>::Type
+SparseMatrixBase<Derived>::operator*(const MatrixBase<OtherDerived> &other) const
+{
+ return typename SparseDenseProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSEDENSEPRODUCT_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseDiagonalProduct.h b/extern/Eigen3/Eigen/src/SparseCore/SparseDiagonalProduct.h
new file mode 100644
index 00000000000..095bf6863fc
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseDiagonalProduct.h
@@ -0,0 +1,184 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSE_DIAGONAL_PRODUCT_H
+#define EIGEN_SPARSE_DIAGONAL_PRODUCT_H
+
+namespace Eigen {
+
+// The product of a diagonal matrix with a sparse matrix can be easily
+// implemented using expression template.
+// We have two consider very different cases:
+// 1 - diag * row-major sparse
+// => each inner vector <=> scalar * sparse vector product
+// => so we can reuse CwiseUnaryOp::InnerIterator
+// 2 - diag * col-major sparse
+// => each inner vector <=> densevector * sparse vector cwise product
+// => again, we can reuse specialization of CwiseBinaryOp::InnerIterator
+// for that particular case
+// The two other cases are symmetric.
+
+namespace internal {
+
+template<typename Lhs, typename Rhs>
+struct traits<SparseDiagonalProduct<Lhs, Rhs> >
+{
+ typedef typename remove_all<Lhs>::type _Lhs;
+ typedef typename remove_all<Rhs>::type _Rhs;
+ typedef typename _Lhs::Scalar Scalar;
+ typedef typename promote_index_type<typename traits<Lhs>::Index,
+ typename traits<Rhs>::Index>::type Index;
+ typedef Sparse StorageKind;
+ typedef MatrixXpr XprKind;
+ enum {
+ RowsAtCompileTime = _Lhs::RowsAtCompileTime,
+ ColsAtCompileTime = _Rhs::ColsAtCompileTime,
+
+ MaxRowsAtCompileTime = _Lhs::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = _Rhs::MaxColsAtCompileTime,
+
+ SparseFlags = is_diagonal<_Lhs>::ret ? int(_Rhs::Flags) : int(_Lhs::Flags),
+ Flags = (SparseFlags&RowMajorBit),
+ CoeffReadCost = Dynamic
+ };
+};
+
+enum {SDP_IsDiagonal, SDP_IsSparseRowMajor, SDP_IsSparseColMajor};
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType, int RhsMode, int LhsMode>
+class sparse_diagonal_product_inner_iterator_selector;
+
+} // end namespace internal
+
+template<typename Lhs, typename Rhs>
+class SparseDiagonalProduct
+ : public SparseMatrixBase<SparseDiagonalProduct<Lhs,Rhs> >,
+ internal::no_assignment_operator
+{
+ typedef typename Lhs::Nested LhsNested;
+ typedef typename Rhs::Nested RhsNested;
+
+ typedef typename internal::remove_all<LhsNested>::type _LhsNested;
+ typedef typename internal::remove_all<RhsNested>::type _RhsNested;
+
+ enum {
+ LhsMode = internal::is_diagonal<_LhsNested>::ret ? internal::SDP_IsDiagonal
+ : (_LhsNested::Flags&RowMajorBit) ? internal::SDP_IsSparseRowMajor : internal::SDP_IsSparseColMajor,
+ RhsMode = internal::is_diagonal<_RhsNested>::ret ? internal::SDP_IsDiagonal
+ : (_RhsNested::Flags&RowMajorBit) ? internal::SDP_IsSparseRowMajor : internal::SDP_IsSparseColMajor
+ };
+
+ public:
+
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseDiagonalProduct)
+
+ typedef internal::sparse_diagonal_product_inner_iterator_selector
+ <_LhsNested,_RhsNested,SparseDiagonalProduct,LhsMode,RhsMode> InnerIterator;
+
+ EIGEN_STRONG_INLINE SparseDiagonalProduct(const Lhs& lhs, const Rhs& rhs)
+ : m_lhs(lhs), m_rhs(rhs)
+ {
+ eigen_assert(lhs.cols() == rhs.rows() && "invalid sparse matrix * diagonal matrix product");
+ }
+
+ EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return m_rhs.cols(); }
+
+ EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; }
+ EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; }
+
+ protected:
+ LhsNested m_lhs;
+ RhsNested m_rhs;
+};
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
+class sparse_diagonal_product_inner_iterator_selector
+<Lhs,Rhs,SparseDiagonalProductType,SDP_IsDiagonal,SDP_IsSparseRowMajor>
+ : public CwiseUnaryOp<scalar_multiple_op<typename Lhs::Scalar>,const Rhs>::InnerIterator
+{
+ typedef typename CwiseUnaryOp<scalar_multiple_op<typename Lhs::Scalar>,const Rhs>::InnerIterator Base;
+ typedef typename Lhs::Index Index;
+ public:
+ inline sparse_diagonal_product_inner_iterator_selector(
+ const SparseDiagonalProductType& expr, Index outer)
+ : Base(expr.rhs()*(expr.lhs().diagonal().coeff(outer)), outer)
+ {}
+};
+
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
+class sparse_diagonal_product_inner_iterator_selector
+<Lhs,Rhs,SparseDiagonalProductType,SDP_IsDiagonal,SDP_IsSparseColMajor>
+ : public CwiseBinaryOp<
+ scalar_product_op<typename Lhs::Scalar>,
+ SparseInnerVectorSet<Rhs,1>,
+ typename Lhs::DiagonalVectorType>::InnerIterator
+{
+ typedef typename CwiseBinaryOp<
+ scalar_product_op<typename Lhs::Scalar>,
+ SparseInnerVectorSet<Rhs,1>,
+ typename Lhs::DiagonalVectorType>::InnerIterator Base;
+ typedef typename Lhs::Index Index;
+ public:
+ inline sparse_diagonal_product_inner_iterator_selector(
+ const SparseDiagonalProductType& expr, Index outer)
+ : Base(expr.rhs().innerVector(outer) .cwiseProduct(expr.lhs().diagonal()), 0)
+ {}
+};
+
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
+class sparse_diagonal_product_inner_iterator_selector
+<Lhs,Rhs,SparseDiagonalProductType,SDP_IsSparseColMajor,SDP_IsDiagonal>
+ : public CwiseUnaryOp<scalar_multiple_op<typename Rhs::Scalar>,const Lhs>::InnerIterator
+{
+ typedef typename CwiseUnaryOp<scalar_multiple_op<typename Rhs::Scalar>,const Lhs>::InnerIterator Base;
+ typedef typename Lhs::Index Index;
+ public:
+ inline sparse_diagonal_product_inner_iterator_selector(
+ const SparseDiagonalProductType& expr, Index outer)
+ : Base(expr.lhs()*expr.rhs().diagonal().coeff(outer), outer)
+ {}
+};
+
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
+class sparse_diagonal_product_inner_iterator_selector
+<Lhs,Rhs,SparseDiagonalProductType,SDP_IsSparseRowMajor,SDP_IsDiagonal>
+ : public CwiseBinaryOp<
+ scalar_product_op<typename Rhs::Scalar>,
+ SparseInnerVectorSet<Lhs,1>,
+ Transpose<const typename Rhs::DiagonalVectorType> >::InnerIterator
+{
+ typedef typename CwiseBinaryOp<
+ scalar_product_op<typename Rhs::Scalar>,
+ SparseInnerVectorSet<Lhs,1>,
+ Transpose<const typename Rhs::DiagonalVectorType> >::InnerIterator Base;
+ typedef typename Lhs::Index Index;
+ public:
+ inline sparse_diagonal_product_inner_iterator_selector(
+ const SparseDiagonalProductType& expr, Index outer)
+ : Base(expr.lhs().innerVector(outer) .cwiseProduct(expr.rhs().diagonal().transpose()), 0)
+ {}
+};
+
+} // end namespace internal
+
+// SparseMatrixBase functions
+
+template<typename Derived>
+template<typename OtherDerived>
+const SparseDiagonalProduct<Derived,OtherDerived>
+SparseMatrixBase<Derived>::operator*(const DiagonalBase<OtherDerived> &other) const
+{
+ return SparseDiagonalProduct<Derived,OtherDerived>(this->derived(), other.derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_DIAGONAL_PRODUCT_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseDot.h b/extern/Eigen3/Eigen/src/SparseCore/SparseDot.h
new file mode 100644
index 00000000000..5c4a593dc01
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseDot.h
@@ -0,0 +1,94 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSE_DOT_H
+#define EIGEN_SPARSE_DOT_H
+
+namespace Eigen {
+
+template<typename Derived>
+template<typename OtherDerived>
+typename internal::traits<Derived>::Scalar
+SparseMatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
+ EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)
+ EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
+ YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+
+ eigen_assert(size() == other.size());
+ eigen_assert(other.size()>0 && "you are using a non initialized vector");
+
+ typename Derived::InnerIterator i(derived(),0);
+ Scalar res(0);
+ while (i)
+ {
+ res += internal::conj(i.value()) * other.coeff(i.index());
+ ++i;
+ }
+ return res;
+}
+
+template<typename Derived>
+template<typename OtherDerived>
+typename internal::traits<Derived>::Scalar
+SparseMatrixBase<Derived>::dot(const SparseMatrixBase<OtherDerived>& other) const
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
+ EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)
+ EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
+ YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+
+ eigen_assert(size() == other.size());
+
+ typedef typename Derived::Nested Nested;
+ typedef typename OtherDerived::Nested OtherNested;
+ typedef typename internal::remove_all<Nested>::type NestedCleaned;
+ typedef typename internal::remove_all<OtherNested>::type OtherNestedCleaned;
+
+ const Nested nthis(derived());
+ const OtherNested nother(other.derived());
+
+ typename NestedCleaned::InnerIterator i(nthis,0);
+ typename OtherNestedCleaned::InnerIterator j(nother,0);
+ Scalar res(0);
+ while (i && j)
+ {
+ if (i.index()==j.index())
+ {
+ res += internal::conj(i.value()) * j.value();
+ ++i; ++j;
+ }
+ else if (i.index()<j.index())
+ ++i;
+ else
+ ++j;
+ }
+ return res;
+}
+
+template<typename Derived>
+inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
+SparseMatrixBase<Derived>::squaredNorm() const
+{
+ return internal::real((*this).cwiseAbs2().sum());
+}
+
+template<typename Derived>
+inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
+SparseMatrixBase<Derived>::norm() const
+{
+ return internal::sqrt(squaredNorm());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_DOT_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseFuzzy.h b/extern/Eigen3/Eigen/src/SparseCore/SparseFuzzy.h
new file mode 100644
index 00000000000..45f36e9eb90
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseFuzzy.h
@@ -0,0 +1,26 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSE_FUZZY_H
+#define EIGEN_SPARSE_FUZZY_H
+
+// template<typename Derived>
+// template<typename OtherDerived>
+// bool SparseMatrixBase<Derived>::isApprox(
+// const OtherDerived& other,
+// typename NumTraits<Scalar>::Real prec
+// ) const
+// {
+// const typename internal::nested<Derived,2>::type nested(derived());
+// const typename internal::nested<OtherDerived,2>::type otherNested(other.derived());
+// return (nested - otherNested).cwise().abs2().sum()
+// <= prec * prec * (std::min)(nested.cwise().abs2().sum(), otherNested.cwise().abs2().sum());
+// }
+
+#endif // EIGEN_SPARSE_FUZZY_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseMatrix.h b/extern/Eigen3/Eigen/src/SparseCore/SparseMatrix.h
new file mode 100644
index 00000000000..efb774f031b
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseMatrix.h
@@ -0,0 +1,1116 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSEMATRIX_H
+#define EIGEN_SPARSEMATRIX_H
+
+namespace Eigen {
+
+/** \ingroup SparseCore_Module
+ *
+ * \class SparseMatrix
+ *
+ * \brief A versatible sparse matrix representation
+ *
+ * This class implements a more versatile variants of the common \em compressed row/column storage format.
+ * Each colmun's (resp. row) non zeros are stored as a pair of value with associated row (resp. colmiun) index.
+ * All the non zeros are stored in a single large buffer. Unlike the \em compressed format, there might be extra
+ * space inbetween the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero
+ * can be done with limited memory reallocation and copies.
+ *
+ * A call to the function makeCompressed() turns the matrix into the standard \em compressed format
+ * compatible with many library.
+ *
+ * More details on this storage sceheme are given in the \ref TutorialSparse "manual pages".
+ *
+ * \tparam _Scalar the scalar type, i.e. the type of the coefficients
+ * \tparam _Options Union of bit flags controlling the storage scheme. Currently the only possibility
+ * is RowMajor. The default is 0 which means column-major.
+ * \tparam _Index the type of the indices. It has to be a \b signed type (e.g., short, int, std::ptrdiff_t). Default is \c int.
+ *
+ * This class can be extended with the help of the plugin mechanism described on the page
+ * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEMATRIX_PLUGIN.
+ */
+
+namespace internal {
+template<typename _Scalar, int _Options, typename _Index>
+struct traits<SparseMatrix<_Scalar, _Options, _Index> >
+{
+ typedef _Scalar Scalar;
+ typedef _Index Index;
+ typedef Sparse StorageKind;
+ typedef MatrixXpr XprKind;
+ enum {
+ RowsAtCompileTime = Dynamic,
+ ColsAtCompileTime = Dynamic,
+ MaxRowsAtCompileTime = Dynamic,
+ MaxColsAtCompileTime = Dynamic,
+ Flags = _Options | NestByRefBit | LvalueBit,
+ CoeffReadCost = NumTraits<Scalar>::ReadCost,
+ SupportedAccessPatterns = InnerRandomAccessPattern
+ };
+};
+
+template<typename _Scalar, int _Options, typename _Index, int DiagIndex>
+struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _Index>, DiagIndex> >
+{
+ typedef SparseMatrix<_Scalar, _Options, _Index> MatrixType;
+ typedef typename nested<MatrixType>::type MatrixTypeNested;
+ typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
+
+ typedef _Scalar Scalar;
+ typedef Dense StorageKind;
+ typedef _Index Index;
+ typedef MatrixXpr XprKind;
+
+ enum {
+ RowsAtCompileTime = Dynamic,
+ ColsAtCompileTime = 1,
+ MaxRowsAtCompileTime = Dynamic,
+ MaxColsAtCompileTime = 1,
+ Flags = 0,
+ CoeffReadCost = _MatrixTypeNested::CoeffReadCost*10
+ };
+};
+
+} // end namespace internal
+
+template<typename _Scalar, int _Options, typename _Index>
+class SparseMatrix
+ : public SparseMatrixBase<SparseMatrix<_Scalar, _Options, _Index> >
+{
+ public:
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
+ EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, +=)
+ EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, -=)
+
+ typedef MappedSparseMatrix<Scalar,Flags> Map;
+ using Base::IsRowMajor;
+ typedef internal::CompressedStorage<Scalar,Index> Storage;
+ enum {
+ Options = _Options
+ };
+
+ protected:
+
+ typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
+
+ Index m_outerSize;
+ Index m_innerSize;
+ Index* m_outerIndex;
+ Index* m_innerNonZeros; // optional, if null then the data is compressed
+ Storage m_data;
+
+ Eigen::Map<Matrix<Index,Dynamic,1> > innerNonZeros() { return Eigen::Map<Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
+ const Eigen::Map<const Matrix<Index,Dynamic,1> > innerNonZeros() const { return Eigen::Map<const Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
+
+ public:
+
+ /** \returns whether \c *this is in compressed form. */
+ inline bool isCompressed() const { return m_innerNonZeros==0; }
+
+ /** \returns the number of rows of the matrix */
+ inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
+ /** \returns the number of columns of the matrix */
+ inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
+
+ /** \returns the number of rows (resp. columns) of the matrix if the storage order column major (resp. row major) */
+ inline Index innerSize() const { return m_innerSize; }
+ /** \returns the number of columns (resp. rows) of the matrix if the storage order column major (resp. row major) */
+ inline Index outerSize() const { return m_outerSize; }
+
+ /** \returns a const pointer to the array of values.
+ * This function is aimed at interoperability with other libraries.
+ * \sa innerIndexPtr(), outerIndexPtr() */
+ inline const Scalar* valuePtr() const { return &m_data.value(0); }
+ /** \returns a non-const pointer to the array of values.
+ * This function is aimed at interoperability with other libraries.
+ * \sa innerIndexPtr(), outerIndexPtr() */
+ inline Scalar* valuePtr() { return &m_data.value(0); }
+
+ /** \returns a const pointer to the array of inner indices.
+ * This function is aimed at interoperability with other libraries.
+ * \sa valuePtr(), outerIndexPtr() */
+ inline const Index* innerIndexPtr() const { return &m_data.index(0); }
+ /** \returns a non-const pointer to the array of inner indices.
+ * This function is aimed at interoperability with other libraries.
+ * \sa valuePtr(), outerIndexPtr() */
+ inline Index* innerIndexPtr() { return &m_data.index(0); }
+
+ /** \returns a const pointer to the array of the starting positions of the inner vectors.
+ * This function is aimed at interoperability with other libraries.
+ * \sa valuePtr(), innerIndexPtr() */
+ inline const Index* outerIndexPtr() const { return m_outerIndex; }
+ /** \returns a non-const pointer to the array of the starting positions of the inner vectors.
+ * This function is aimed at interoperability with other libraries.
+ * \sa valuePtr(), innerIndexPtr() */
+ inline Index* outerIndexPtr() { return m_outerIndex; }
+
+ /** \returns a const pointer to the array of the number of non zeros of the inner vectors.
+ * This function is aimed at interoperability with other libraries.
+ * \warning it returns the null pointer 0 in compressed mode */
+ inline const Index* innerNonZeroPtr() const { return m_innerNonZeros; }
+ /** \returns a non-const pointer to the array of the number of non zeros of the inner vectors.
+ * This function is aimed at interoperability with other libraries.
+ * \warning it returns the null pointer 0 in compressed mode */
+ inline Index* innerNonZeroPtr() { return m_innerNonZeros; }
+
+ /** \internal */
+ inline Storage& data() { return m_data; }
+ /** \internal */
+ inline const Storage& data() const { return m_data; }
+
+ /** \returns the value of the matrix at position \a i, \a j
+ * This function returns Scalar(0) if the element is an explicit \em zero */
+ inline Scalar coeff(Index row, Index col) const
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+ Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
+ return m_data.atInRange(m_outerIndex[outer], end, inner);
+ }
+
+ /** \returns a non-const reference to the value of the matrix at position \a i, \a j
+ *
+ * If the element does not exist then it is inserted via the insert(Index,Index) function
+ * which itself turns the matrix into a non compressed form if that was not the case.
+ *
+ * This is a O(log(nnz_j)) operation (binary search) plus the cost of insert(Index,Index)
+ * function if the element does not already exist.
+ */
+ inline Scalar& coeffRef(Index row, Index col)
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ Index start = m_outerIndex[outer];
+ Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
+ eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
+ if(end<=start)
+ return insert(row,col);
+ const Index p = m_data.searchLowerIndex(start,end-1,inner);
+ if((p<end) && (m_data.index(p)==inner))
+ return m_data.value(p);
+ else
+ return insert(row,col);
+ }
+
+ /** \returns a reference to a novel non zero coefficient with coordinates \a row x \a col.
+ * The non zero coefficient must \b not already exist.
+ *
+ * If the matrix \c *this is in compressed mode, then \c *this is turned into uncompressed
+ * mode while reserving room for 2 non zeros per inner vector. It is strongly recommended to first
+ * call reserve(const SizesType &) to reserve a more appropriate number of elements per
+ * inner vector that better match your scenario.
+ *
+ * This function performs a sorted insertion in O(1) if the elements of each inner vector are
+ * inserted in increasing inner index order, and in O(nnz_j) for a random insertion.
+ *
+ */
+ EIGEN_DONT_INLINE Scalar& insert(Index row, Index col)
+ {
+ if(isCompressed())
+ {
+ reserve(VectorXi::Constant(outerSize(), 2));
+ }
+ return insertUncompressed(row,col);
+ }
+
+ public:
+
+ class InnerIterator;
+ class ReverseInnerIterator;
+
+ /** Removes all non zeros but keep allocated memory */
+ inline void setZero()
+ {
+ m_data.clear();
+ memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
+ if(m_innerNonZeros)
+ memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(Index));
+ }
+
+ /** \returns the number of non zero coefficients */
+ inline Index nonZeros() const
+ {
+ if(m_innerNonZeros)
+ return innerNonZeros().sum();
+ return static_cast<Index>(m_data.size());
+ }
+
+ /** Preallocates \a reserveSize non zeros.
+ *
+ * Precondition: the matrix must be in compressed mode. */
+ inline void reserve(Index reserveSize)
+ {
+ eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
+ m_data.reserve(reserveSize);
+ }
+
+ #ifdef EIGEN_PARSED_BY_DOXYGEN
+ /** Preallocates \a reserveSize[\c j] non zeros for each column (resp. row) \c j.
+ *
+ * This function turns the matrix in non-compressed mode */
+ template<class SizesType>
+ inline void reserve(const SizesType& reserveSizes);
+ #else
+ template<class SizesType>
+ inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif = typename SizesType::value_type())
+ {
+ EIGEN_UNUSED_VARIABLE(enableif);
+ reserveInnerVectors(reserveSizes);
+ }
+ template<class SizesType>
+ inline void reserve(const SizesType& reserveSizes, const typename SizesType::Scalar& enableif =
+ #if (!defined(_MSC_VER)) || (_MSC_VER>=1500) // MSVC 2005 fails to compile with this typename
+ typename
+ #endif
+ SizesType::Scalar())
+ {
+ EIGEN_UNUSED_VARIABLE(enableif);
+ reserveInnerVectors(reserveSizes);
+ }
+ #endif // EIGEN_PARSED_BY_DOXYGEN
+ protected:
+ template<class SizesType>
+ inline void reserveInnerVectors(const SizesType& reserveSizes)
+ {
+
+ if(isCompressed())
+ {
+ std::size_t totalReserveSize = 0;
+ // turn the matrix into non-compressed mode
+ m_innerNonZeros = new Index[m_outerSize];
+
+ // temporarily use m_innerSizes to hold the new starting points.
+ Index* newOuterIndex = m_innerNonZeros;
+
+ Index count = 0;
+ for(Index j=0; j<m_outerSize; ++j)
+ {
+ newOuterIndex[j] = count;
+ count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
+ totalReserveSize += reserveSizes[j];
+ }
+ m_data.reserve(totalReserveSize);
+ std::ptrdiff_t previousOuterIndex = m_outerIndex[m_outerSize];
+ for(std::ptrdiff_t j=m_outerSize-1; j>=0; --j)
+ {
+ ptrdiff_t innerNNZ = previousOuterIndex - m_outerIndex[j];
+ for(std::ptrdiff_t i=innerNNZ-1; i>=0; --i)
+ {
+ m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
+ m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
+ }
+ previousOuterIndex = m_outerIndex[j];
+ m_outerIndex[j] = newOuterIndex[j];
+ m_innerNonZeros[j] = innerNNZ;
+ }
+ m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
+
+ m_data.resize(m_outerIndex[m_outerSize]);
+ }
+ else
+ {
+ Index* newOuterIndex = new Index[m_outerSize+1];
+ Index count = 0;
+ for(Index j=0; j<m_outerSize; ++j)
+ {
+ newOuterIndex[j] = count;
+ Index alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
+ Index toReserve = std::max<std::ptrdiff_t>(reserveSizes[j], alreadyReserved);
+ count += toReserve + m_innerNonZeros[j];
+ }
+ newOuterIndex[m_outerSize] = count;
+
+ m_data.resize(count);
+ for(ptrdiff_t j=m_outerSize-1; j>=0; --j)
+ {
+ std::ptrdiff_t offset = newOuterIndex[j] - m_outerIndex[j];
+ if(offset>0)
+ {
+ std::ptrdiff_t innerNNZ = m_innerNonZeros[j];
+ for(std::ptrdiff_t i=innerNNZ-1; i>=0; --i)
+ {
+ m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
+ m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
+ }
+ }
+ }
+
+ std::swap(m_outerIndex, newOuterIndex);
+ delete[] newOuterIndex;
+ }
+
+ }
+ public:
+
+ //--- low level purely coherent filling ---
+
+ /** \internal
+ * \returns a reference to the non zero coefficient at position \a row, \a col assuming that:
+ * - the nonzero does not already exist
+ * - the new coefficient is the last one according to the storage order
+ *
+ * Before filling a given inner vector you must call the statVec(Index) function.
+ *
+ * After an insertion session, you should call the finalize() function.
+ *
+ * \sa insert, insertBackByOuterInner, startVec */
+ inline Scalar& insertBack(Index row, Index col)
+ {
+ return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
+ }
+
+ /** \internal
+ * \sa insertBack, startVec */
+ inline Scalar& insertBackByOuterInner(Index outer, Index inner)
+ {
+ eigen_assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
+ eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
+ Index p = m_outerIndex[outer+1];
+ ++m_outerIndex[outer+1];
+ m_data.append(0, inner);
+ return m_data.value(p);
+ }
+
+ /** \internal
+ * \warning use it only if you know what you are doing */
+ inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
+ {
+ Index p = m_outerIndex[outer+1];
+ ++m_outerIndex[outer+1];
+ m_data.append(0, inner);
+ return m_data.value(p);
+ }
+
+ /** \internal
+ * \sa insertBack, insertBackByOuterInner */
+ inline void startVec(Index outer)
+ {
+ eigen_assert(m_outerIndex[outer]==int(m_data.size()) && "You must call startVec for each inner vector sequentially");
+ eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
+ m_outerIndex[outer+1] = m_outerIndex[outer];
+ }
+
+ /** \internal
+ * Must be called after inserting a set of non zero entries using the low level compressed API.
+ */
+ inline void finalize()
+ {
+ if(isCompressed())
+ {
+ Index size = static_cast<Index>(m_data.size());
+ Index i = m_outerSize;
+ // find the last filled column
+ while (i>=0 && m_outerIndex[i]==0)
+ --i;
+ ++i;
+ while (i<=m_outerSize)
+ {
+ m_outerIndex[i] = size;
+ ++i;
+ }
+ }
+ }
+
+ //---
+
+ template<typename InputIterators>
+ void setFromTriplets(const InputIterators& begin, const InputIterators& end);
+
+ void sumupDuplicates();
+
+ //---
+
+ /** \internal
+ * same as insert(Index,Index) except that the indices are given relative to the storage order */
+ EIGEN_DONT_INLINE Scalar& insertByOuterInner(Index j, Index i)
+ {
+ return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
+ }
+
+ /** Turns the matrix into the \em compressed format.
+ */
+ void makeCompressed()
+ {
+ if(isCompressed())
+ return;
+
+ Index oldStart = m_outerIndex[1];
+ m_outerIndex[1] = m_innerNonZeros[0];
+ for(Index j=1; j<m_outerSize; ++j)
+ {
+ Index nextOldStart = m_outerIndex[j+1];
+ std::ptrdiff_t offset = oldStart - m_outerIndex[j];
+ if(offset>0)
+ {
+ for(Index k=0; k<m_innerNonZeros[j]; ++k)
+ {
+ m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
+ m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
+ }
+ }
+ m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
+ oldStart = nextOldStart;
+ }
+ delete[] m_innerNonZeros;
+ m_innerNonZeros = 0;
+ m_data.resize(m_outerIndex[m_outerSize]);
+ m_data.squeeze();
+ }
+
+ /** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerence \a epsilon */
+ void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
+ {
+ prune(default_prunning_func(reference,epsilon));
+ }
+
+ /** Turns the matrix into compressed format, and suppresses all nonzeros which do not satisfy the predicate \a keep.
+ * The functor type \a KeepFunc must implement the following function:
+ * \code
+ * bool operator() (const Index& row, const Index& col, const Scalar& value) const;
+ * \endcode
+ * \sa prune(Scalar,RealScalar)
+ */
+ template<typename KeepFunc>
+ void prune(const KeepFunc& keep = KeepFunc())
+ {
+ // TODO optimize the uncompressed mode to avoid moving and allocating the data twice
+ // TODO also implement a unit test
+ makeCompressed();
+
+ Index k = 0;
+ for(Index j=0; j<m_outerSize; ++j)
+ {
+ Index previousStart = m_outerIndex[j];
+ m_outerIndex[j] = k;
+ Index end = m_outerIndex[j+1];
+ for(Index i=previousStart; i<end; ++i)
+ {
+ if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
+ {
+ m_data.value(k) = m_data.value(i);
+ m_data.index(k) = m_data.index(i);
+ ++k;
+ }
+ }
+ }
+ m_outerIndex[m_outerSize] = k;
+ m_data.resize(k,0);
+ }
+
+ /** Resizes the matrix to a \a rows x \a cols matrix and initializes it to zero.
+ * \sa resizeNonZeros(Index), reserve(), setZero()
+ */
+ void resize(Index rows, Index cols)
+ {
+ const Index outerSize = IsRowMajor ? rows : cols;
+ m_innerSize = IsRowMajor ? cols : rows;
+ m_data.clear();
+ if (m_outerSize != outerSize || m_outerSize==0)
+ {
+ delete[] m_outerIndex;
+ m_outerIndex = new Index [outerSize+1];
+ m_outerSize = outerSize;
+ }
+ if(m_innerNonZeros)
+ {
+ delete[] m_innerNonZeros;
+ m_innerNonZeros = 0;
+ }
+ memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
+ }
+
+ /** \internal
+ * Resize the nonzero vector to \a size */
+ void resizeNonZeros(Index size)
+ {
+ // TODO remove this function
+ m_data.resize(size);
+ }
+
+ /** \returns a const expression of the diagonal coefficients */
+ const Diagonal<const SparseMatrix> diagonal() const { return *this; }
+
+ /** Default constructor yielding an empty \c 0 \c x \c 0 matrix */
+ inline SparseMatrix()
+ : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
+ {
+ check_template_parameters();
+ resize(0, 0);
+ }
+
+ /** Constructs a \a rows \c x \a cols empty matrix */
+ inline SparseMatrix(Index rows, Index cols)
+ : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
+ {
+ check_template_parameters();
+ resize(rows, cols);
+ }
+
+ /** Constructs a sparse matrix from the sparse expression \a other */
+ template<typename OtherDerived>
+ inline SparseMatrix(const SparseMatrixBase<OtherDerived>& other)
+ : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
+ {
+ check_template_parameters();
+ *this = other.derived();
+ }
+
+ /** Copy constructor (it performs a deep copy) */
+ inline SparseMatrix(const SparseMatrix& other)
+ : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
+ {
+ check_template_parameters();
+ *this = other.derived();
+ }
+
+ /** Swaps the content of two sparse matrices of the same type.
+ * This is a fast operation that simply swaps the underlying pointers and parameters. */
+ inline void swap(SparseMatrix& other)
+ {
+ //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
+ std::swap(m_outerIndex, other.m_outerIndex);
+ std::swap(m_innerSize, other.m_innerSize);
+ std::swap(m_outerSize, other.m_outerSize);
+ std::swap(m_innerNonZeros, other.m_innerNonZeros);
+ m_data.swap(other.m_data);
+ }
+
+ inline SparseMatrix& operator=(const SparseMatrix& other)
+ {
+ if (other.isRValue())
+ {
+ swap(other.const_cast_derived());
+ }
+ else
+ {
+ initAssignment(other);
+ if(other.isCompressed())
+ {
+ memcpy(m_outerIndex, other.m_outerIndex, (m_outerSize+1)*sizeof(Index));
+ m_data = other.m_data;
+ }
+ else
+ {
+ Base::operator=(other);
+ }
+ }
+ return *this;
+ }
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ template<typename Lhs, typename Rhs>
+ inline SparseMatrix& operator=(const SparseSparseProduct<Lhs,Rhs>& product)
+ { return Base::operator=(product); }
+
+ template<typename OtherDerived>
+ inline SparseMatrix& operator=(const ReturnByValue<OtherDerived>& other)
+ { return Base::operator=(other.derived()); }
+
+ template<typename OtherDerived>
+ inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
+ { return Base::operator=(other.derived()); }
+ #endif
+
+ template<typename OtherDerived>
+ EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other)
+ {
+ initAssignment(other.derived());
+ const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
+ if (needToTranspose)
+ {
+ // two passes algorithm:
+ // 1 - compute the number of coeffs per dest inner vector
+ // 2 - do the actual copy/eval
+ // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
+ typedef typename internal::nested<OtherDerived,2>::type OtherCopy;
+ typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
+ OtherCopy otherCopy(other.derived());
+
+ Eigen::Map<Matrix<Index, Dynamic, 1> > (m_outerIndex,outerSize()).setZero();
+ // pass 1
+ // FIXME the above copy could be merged with that pass
+ for (Index j=0; j<otherCopy.outerSize(); ++j)
+ for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
+ ++m_outerIndex[it.index()];
+
+ // prefix sum
+ Index count = 0;
+ VectorXi positions(outerSize());
+ for (Index j=0; j<outerSize(); ++j)
+ {
+ Index tmp = m_outerIndex[j];
+ m_outerIndex[j] = count;
+ positions[j] = count;
+ count += tmp;
+ }
+ m_outerIndex[outerSize()] = count;
+ // alloc
+ m_data.resize(count);
+ // pass 2
+ for (Index j=0; j<otherCopy.outerSize(); ++j)
+ {
+ for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
+ {
+ Index pos = positions[it.index()]++;
+ m_data.index(pos) = j;
+ m_data.value(pos) = it.value();
+ }
+ }
+ return *this;
+ }
+ else
+ {
+ // there is no special optimization
+ return Base::operator=(other.derived());
+ }
+ }
+
+ friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
+ {
+ EIGEN_DBG_SPARSE(
+ s << "Nonzero entries:\n";
+ if(m.isCompressed())
+ for (Index i=0; i<m.nonZeros(); ++i)
+ s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
+ else
+ for (Index i=0; i<m.outerSize(); ++i)
+ {
+ int p = m.m_outerIndex[i];
+ int pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
+ Index k=p;
+ for (; k<pe; ++k)
+ s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
+ for (; k<m.m_outerIndex[i+1]; ++k)
+ s << "(_,_) ";
+ }
+ s << std::endl;
+ s << std::endl;
+ s << "Outer pointers:\n";
+ for (Index i=0; i<m.outerSize(); ++i)
+ s << m.m_outerIndex[i] << " ";
+ s << " $" << std::endl;
+ if(!m.isCompressed())
+ {
+ s << "Inner non zeros:\n";
+ for (Index i=0; i<m.outerSize(); ++i)
+ s << m.m_innerNonZeros[i] << " ";
+ s << " $" << std::endl;
+ }
+ s << std::endl;
+ );
+ s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
+ return s;
+ }
+
+ /** Destructor */
+ inline ~SparseMatrix()
+ {
+ delete[] m_outerIndex;
+ delete[] m_innerNonZeros;
+ }
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+ /** Overloaded for performance */
+ Scalar sum() const;
+#endif
+
+# ifdef EIGEN_SPARSEMATRIX_PLUGIN
+# include EIGEN_SPARSEMATRIX_PLUGIN
+# endif
+
+protected:
+
+ template<typename Other>
+ void initAssignment(const Other& other)
+ {
+ resize(other.rows(), other.cols());
+ if(m_innerNonZeros)
+ {
+ delete[] m_innerNonZeros;
+ m_innerNonZeros = 0;
+ }
+ }
+
+ /** \internal
+ * \sa insert(Index,Index) */
+ EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col)
+ {
+ eigen_assert(isCompressed());
+
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ Index previousOuter = outer;
+ if (m_outerIndex[outer+1]==0)
+ {
+ // we start a new inner vector
+ while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
+ {
+ m_outerIndex[previousOuter] = static_cast<Index>(m_data.size());
+ --previousOuter;
+ }
+ m_outerIndex[outer+1] = m_outerIndex[outer];
+ }
+
+ // here we have to handle the tricky case where the outerIndex array
+ // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
+ // the 2nd inner vector...
+ bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
+ && (size_t(m_outerIndex[outer+1]) == m_data.size());
+
+ size_t startId = m_outerIndex[outer];
+ // FIXME let's make sure sizeof(long int) == sizeof(size_t)
+ size_t p = m_outerIndex[outer+1];
+ ++m_outerIndex[outer+1];
+
+ float reallocRatio = 1;
+ if (m_data.allocatedSize()<=m_data.size())
+ {
+ // if there is no preallocated memory, let's reserve a minimum of 32 elements
+ if (m_data.size()==0)
+ {
+ m_data.reserve(32);
+ }
+ else
+ {
+ // we need to reallocate the data, to reduce multiple reallocations
+ // we use a smart resize algorithm based on the current filling ratio
+ // in addition, we use float to avoid integers overflows
+ float nnzEstimate = float(m_outerIndex[outer])*float(m_outerSize)/float(outer+1);
+ reallocRatio = (nnzEstimate-float(m_data.size()))/float(m_data.size());
+ // furthermore we bound the realloc ratio to:
+ // 1) reduce multiple minor realloc when the matrix is almost filled
+ // 2) avoid to allocate too much memory when the matrix is almost empty
+ reallocRatio = (std::min)((std::max)(reallocRatio,1.5f),8.f);
+ }
+ }
+ m_data.resize(m_data.size()+1,reallocRatio);
+
+ if (!isLastVec)
+ {
+ if (previousOuter==-1)
+ {
+ // oops wrong guess.
+ // let's correct the outer offsets
+ for (Index k=0; k<=(outer+1); ++k)
+ m_outerIndex[k] = 0;
+ Index k=outer+1;
+ while(m_outerIndex[k]==0)
+ m_outerIndex[k++] = 1;
+ while (k<=m_outerSize && m_outerIndex[k]!=0)
+ m_outerIndex[k++]++;
+ p = 0;
+ --k;
+ k = m_outerIndex[k]-1;
+ while (k>0)
+ {
+ m_data.index(k) = m_data.index(k-1);
+ m_data.value(k) = m_data.value(k-1);
+ k--;
+ }
+ }
+ else
+ {
+ // we are not inserting into the last inner vec
+ // update outer indices:
+ Index j = outer+2;
+ while (j<=m_outerSize && m_outerIndex[j]!=0)
+ m_outerIndex[j++]++;
+ --j;
+ // shift data of last vecs:
+ Index k = m_outerIndex[j]-1;
+ while (k>=Index(p))
+ {
+ m_data.index(k) = m_data.index(k-1);
+ m_data.value(k) = m_data.value(k-1);
+ k--;
+ }
+ }
+ }
+
+ while ( (p > startId) && (m_data.index(p-1) > inner) )
+ {
+ m_data.index(p) = m_data.index(p-1);
+ m_data.value(p) = m_data.value(p-1);
+ --p;
+ }
+
+ m_data.index(p) = inner;
+ return (m_data.value(p) = 0);
+ }
+
+ /** \internal
+ * A vector object that is equal to 0 everywhere but v at the position i */
+ class SingletonVector
+ {
+ Index m_index;
+ Index m_value;
+ public:
+ typedef Index value_type;
+ SingletonVector(Index i, Index v)
+ : m_index(i), m_value(v)
+ {}
+
+ Index operator[](Index i) const { return i==m_index ? m_value : 0; }
+ };
+
+ /** \internal
+ * \sa insert(Index,Index) */
+ EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col)
+ {
+ eigen_assert(!isCompressed());
+
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ std::ptrdiff_t room = m_outerIndex[outer+1] - m_outerIndex[outer];
+ std::ptrdiff_t innerNNZ = m_innerNonZeros[outer];
+ if(innerNNZ>=room)
+ {
+ // this inner vector is full, we need to reallocate the whole buffer :(
+ reserve(SingletonVector(outer,std::max<std::ptrdiff_t>(2,innerNNZ)));
+ }
+
+ Index startId = m_outerIndex[outer];
+ Index p = startId + m_innerNonZeros[outer];
+ while ( (p > startId) && (m_data.index(p-1) > inner) )
+ {
+ m_data.index(p) = m_data.index(p-1);
+ m_data.value(p) = m_data.value(p-1);
+ --p;
+ }
+
+ m_innerNonZeros[outer]++;
+
+ m_data.index(p) = inner;
+ return (m_data.value(p) = 0);
+ }
+
+public:
+ /** \internal
+ * \sa insert(Index,Index) */
+ inline Scalar& insertBackUncompressed(Index row, Index col)
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ eigen_assert(!isCompressed());
+ eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
+
+ Index p = m_outerIndex[outer] + m_innerNonZeros[outer];
+ m_innerNonZeros[outer]++;
+ m_data.index(p) = inner;
+ return (m_data.value(p) = 0);
+ }
+
+private:
+ static void check_template_parameters()
+ {
+ EIGEN_STATIC_ASSERT(NumTraits<Index>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
+ }
+
+ struct default_prunning_func {
+ default_prunning_func(Scalar ref, RealScalar eps) : reference(ref), epsilon(eps) {}
+ inline bool operator() (const Index&, const Index&, const Scalar& value) const
+ {
+ return !internal::isMuchSmallerThan(value, reference, epsilon);
+ }
+ Scalar reference;
+ RealScalar epsilon;
+ };
+};
+
+template<typename Scalar, int _Options, typename _Index>
+class SparseMatrix<Scalar,_Options,_Index>::InnerIterator
+{
+ public:
+ InnerIterator(const SparseMatrix& mat, Index outer)
+ : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_id(mat.m_outerIndex[outer])
+ {
+ if(mat.isCompressed())
+ m_end = mat.m_outerIndex[outer+1];
+ else
+ m_end = m_id + mat.m_innerNonZeros[outer];
+ }
+
+ inline InnerIterator& operator++() { m_id++; return *this; }
+
+ inline const Scalar& value() const { return m_values[m_id]; }
+ inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id]); }
+
+ inline Index index() const { return m_indices[m_id]; }
+ inline Index outer() const { return m_outer; }
+ inline Index row() const { return IsRowMajor ? m_outer : index(); }
+ inline Index col() const { return IsRowMajor ? index() : m_outer; }
+
+ inline operator bool() const { return (m_id < m_end); }
+
+ protected:
+ const Scalar* m_values;
+ const Index* m_indices;
+ const Index m_outer;
+ Index m_id;
+ Index m_end;
+};
+
+template<typename Scalar, int _Options, typename _Index>
+class SparseMatrix<Scalar,_Options,_Index>::ReverseInnerIterator
+{
+ public:
+ ReverseInnerIterator(const SparseMatrix& mat, Index outer)
+ : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_start(mat.m_outerIndex[outer])
+ {
+ if(mat.isCompressed())
+ m_id = mat.m_outerIndex[outer+1];
+ else
+ m_id = m_start + mat.m_innerNonZeros[outer];
+ }
+
+ inline ReverseInnerIterator& operator--() { --m_id; return *this; }
+
+ inline const Scalar& value() const { return m_values[m_id-1]; }
+ inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id-1]); }
+
+ inline Index index() const { return m_indices[m_id-1]; }
+ inline Index outer() const { return m_outer; }
+ inline Index row() const { return IsRowMajor ? m_outer : index(); }
+ inline Index col() const { return IsRowMajor ? index() : m_outer; }
+
+ inline operator bool() const { return (m_id > m_start); }
+
+ protected:
+ const Scalar* m_values;
+ const Index* m_indices;
+ const Index m_outer;
+ Index m_id;
+ const Index m_start;
+};
+
+namespace internal {
+
+template<typename InputIterator, typename SparseMatrixType>
+void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, int Options = 0)
+{
+ EIGEN_UNUSED_VARIABLE(Options);
+ enum { IsRowMajor = SparseMatrixType::IsRowMajor };
+ typedef typename SparseMatrixType::Scalar Scalar;
+ typedef typename SparseMatrixType::Index Index;
+ SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor> trMat(mat.rows(),mat.cols());
+
+ // pass 1: count the nnz per inner-vector
+ VectorXi wi(trMat.outerSize());
+ wi.setZero();
+ for(InputIterator it(begin); it!=end; ++it)
+ wi(IsRowMajor ? it->col() : it->row())++;
+
+ // pass 2: insert all the elements into trMat
+ trMat.reserve(wi);
+ for(InputIterator it(begin); it!=end; ++it)
+ trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
+
+ // pass 3:
+ trMat.sumupDuplicates();
+
+ // pass 4: transposed copy -> implicit sorting
+ mat = trMat;
+}
+
+}
+
+
+/** Fill the matrix \c *this with the list of \em triplets defined by the iterator range \a begin - \b.
+ *
+ * A \em triplet is a tuple (i,j,value) defining a non-zero element.
+ * The input list of triplets does not have to be sorted, and can contains duplicated elements.
+ * In any case, the result is a \b sorted and \b compressed sparse matrix where the duplicates have been summed up.
+ * This is a \em O(n) operation, with \em n the number of triplet elements.
+ * The initial contents of \c *this is destroyed.
+ * The matrix \c *this must be properly resized beforehand using the SparseMatrix(Index,Index) constructor,
+ * or the resize(Index,Index) method. The sizes are not extracted from the triplet list.
+ *
+ * The \a InputIterators value_type must provide the following interface:
+ * \code
+ * Scalar value() const; // the value
+ * Scalar row() const; // the row index i
+ * Scalar col() const; // the column index j
+ * \endcode
+ * See for instance the Eigen::Triplet template class.
+ *
+ * Here is a typical usage example:
+ * \code
+ typedef Triplet<double> T;
+ std::vector<T> tripletList;
+ triplets.reserve(estimation_of_entries);
+ for(...)
+ {
+ // ...
+ tripletList.push_back(T(i,j,v_ij));
+ }
+ SparseMatrixType m(rows,cols);
+ m.setFromTriplets(tripletList.begin(), tripletList.end());
+ // m is ready to go!
+ * \endcode
+ *
+ * \warning The list of triplets is read multiple times (at least twice). Therefore, it is not recommended to define
+ * an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather
+ * be explicitely stored into a std::vector for instance.
+ */
+template<typename Scalar, int _Options, typename _Index>
+template<typename InputIterators>
+void SparseMatrix<Scalar,_Options,_Index>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
+{
+ internal::set_from_triplets(begin, end, *this);
+}
+
+/** \internal */
+template<typename Scalar, int _Options, typename _Index>
+void SparseMatrix<Scalar,_Options,_Index>::sumupDuplicates()
+{
+ eigen_assert(!isCompressed());
+ // TODO, in practice we should be able to use m_innerNonZeros for that task
+ VectorXi wi(innerSize());
+ wi.fill(-1);
+ Index count = 0;
+ // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
+ for(int j=0; j<outerSize(); ++j)
+ {
+ Index start = count;
+ Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
+ for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
+ {
+ Index i = m_data.index(k);
+ if(wi(i)>=start)
+ {
+ // we already meet this entry => accumulate it
+ m_data.value(wi(i)) += m_data.value(k);
+ }
+ else
+ {
+ m_data.value(count) = m_data.value(k);
+ m_data.index(count) = m_data.index(k);
+ wi(i) = count;
+ ++count;
+ }
+ }
+ m_outerIndex[j] = start;
+ }
+ m_outerIndex[m_outerSize] = count;
+
+ // turn the matrix into compressed form
+ delete[] m_innerNonZeros;
+ m_innerNonZeros = 0;
+ m_data.resize(m_outerIndex[m_outerSize]);
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSEMATRIX_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseMatrixBase.h b/extern/Eigen3/Eigen/src/SparseCore/SparseMatrixBase.h
new file mode 100644
index 00000000000..9a1258097fe
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseMatrixBase.h
@@ -0,0 +1,458 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSEMATRIXBASE_H
+#define EIGEN_SPARSEMATRIXBASE_H
+
+namespace Eigen {
+
+/** \ingroup SparseCore_Module
+ *
+ * \class SparseMatrixBase
+ *
+ * \brief Base class of any sparse matrices or sparse expressions
+ *
+ * \tparam Derived
+ *
+ * This class can be extended with the help of the plugin mechanism described on the page
+ * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEMATRIXBASE_PLUGIN.
+ */
+template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
+{
+ public:
+
+ typedef typename internal::traits<Derived>::Scalar Scalar;
+ typedef typename internal::packet_traits<Scalar>::type PacketScalar;
+ typedef typename internal::traits<Derived>::StorageKind StorageKind;
+ typedef typename internal::traits<Derived>::Index Index;
+ typedef typename internal::add_const_on_value_type_if_arithmetic<
+ typename internal::packet_traits<Scalar>::type
+ >::type PacketReturnType;
+
+ typedef SparseMatrixBase StorageBaseType;
+ typedef EigenBase<Derived> Base;
+
+ template<typename OtherDerived>
+ Derived& operator=(const EigenBase<OtherDerived> &other)
+ {
+ other.derived().evalTo(derived());
+ return derived();
+ }
+
+ enum {
+
+ RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
+ /**< The number of rows at compile-time. This is just a copy of the value provided
+ * by the \a Derived type. If a value is not known at compile-time,
+ * it is set to the \a Dynamic constant.
+ * \sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */
+
+ ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
+ /**< The number of columns at compile-time. This is just a copy of the value provided
+ * by the \a Derived type. If a value is not known at compile-time,
+ * it is set to the \a Dynamic constant.
+ * \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */
+
+
+ SizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::RowsAtCompileTime,
+ internal::traits<Derived>::ColsAtCompileTime>::ret),
+ /**< This is equal to the number of coefficients, i.e. the number of
+ * rows times the number of columns, or to \a Dynamic if this is not
+ * known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */
+
+ MaxRowsAtCompileTime = RowsAtCompileTime,
+ MaxColsAtCompileTime = ColsAtCompileTime,
+
+ MaxSizeAtCompileTime = (internal::size_at_compile_time<MaxRowsAtCompileTime,
+ MaxColsAtCompileTime>::ret),
+
+ IsVectorAtCompileTime = RowsAtCompileTime == 1 || ColsAtCompileTime == 1,
+ /**< This is set to true if either the number of rows or the number of
+ * columns is known at compile-time to be equal to 1. Indeed, in that case,
+ * we are dealing with a column-vector (if there is only one column) or with
+ * a row-vector (if there is only one row). */
+
+ Flags = internal::traits<Derived>::Flags,
+ /**< This stores expression \ref flags flags which may or may not be inherited by new expressions
+ * constructed from this one. See the \ref flags "list of flags".
+ */
+
+ CoeffReadCost = internal::traits<Derived>::CoeffReadCost,
+ /**< This is a rough measure of how expensive it is to read one coefficient from
+ * this expression.
+ */
+
+ IsRowMajor = Flags&RowMajorBit ? 1 : 0,
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ _HasDirectAccess = (int(Flags)&DirectAccessBit) ? 1 : 0 // workaround sunCC
+ #endif
+ };
+
+ /** \internal the return type of MatrixBase::adjoint() */
+ typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
+ CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, Eigen::Transpose<const Derived> >,
+ Transpose<const Derived>
+ >::type AdjointReturnType;
+
+
+ typedef SparseMatrix<Scalar, Flags&RowMajorBit ? RowMajor : ColMajor> PlainObject;
+
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+ /** This is the "real scalar" type; if the \a Scalar type is already real numbers
+ * (e.g. int, float or double) then \a RealScalar is just the same as \a Scalar. If
+ * \a Scalar is \a std::complex<T> then RealScalar is \a T.
+ *
+ * \sa class NumTraits
+ */
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
+ /** \internal the return type of coeff()
+ */
+ typedef typename internal::conditional<_HasDirectAccess, const Scalar&, Scalar>::type CoeffReturnType;
+
+ /** \internal Represents a matrix with all coefficients equal to one another*/
+ typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>,Matrix<Scalar,Dynamic,Dynamic> > ConstantReturnType;
+
+ /** type of the equivalent square matrix */
+ typedef Matrix<Scalar,EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime),
+ EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime)> SquareMatrixType;
+
+ inline const Derived& derived() const { return *static_cast<const Derived*>(this); }
+ inline Derived& derived() { return *static_cast<Derived*>(this); }
+ inline Derived& const_cast_derived() const
+ { return *static_cast<Derived*>(const_cast<SparseMatrixBase*>(this)); }
+#endif // not EIGEN_PARSED_BY_DOXYGEN
+
+#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::SparseMatrixBase
+# include "../plugins/CommonCwiseUnaryOps.h"
+# include "../plugins/CommonCwiseBinaryOps.h"
+# include "../plugins/MatrixCwiseUnaryOps.h"
+# include "../plugins/MatrixCwiseBinaryOps.h"
+# ifdef EIGEN_SPARSEMATRIXBASE_PLUGIN
+# include EIGEN_SPARSEMATRIXBASE_PLUGIN
+# endif
+# undef EIGEN_CURRENT_STORAGE_BASE_CLASS
+#undef EIGEN_CURRENT_STORAGE_BASE_CLASS
+
+
+ /** \returns the number of rows. \sa cols() */
+ inline Index rows() const { return derived().rows(); }
+ /** \returns the number of columns. \sa rows() */
+ inline Index cols() const { return derived().cols(); }
+ /** \returns the number of coefficients, which is \a rows()*cols().
+ * \sa rows(), cols(). */
+ inline Index size() const { return rows() * cols(); }
+ /** \returns the number of nonzero coefficients which is in practice the number
+ * of stored coefficients. */
+ inline Index nonZeros() const { return derived().nonZeros(); }
+ /** \returns true if either the number of rows or the number of columns is equal to 1.
+ * In other words, this function returns
+ * \code rows()==1 || cols()==1 \endcode
+ * \sa rows(), cols(), IsVectorAtCompileTime. */
+ inline bool isVector() const { return rows()==1 || cols()==1; }
+ /** \returns the size of the storage major dimension,
+ * i.e., the number of columns for a columns major matrix, and the number of rows otherwise */
+ Index outerSize() const { return (int(Flags)&RowMajorBit) ? this->rows() : this->cols(); }
+ /** \returns the size of the inner dimension according to the storage order,
+ * i.e., the number of rows for a columns major matrix, and the number of cols otherwise */
+ Index innerSize() const { return (int(Flags)&RowMajorBit) ? this->cols() : this->rows(); }
+
+ bool isRValue() const { return m_isRValue; }
+ Derived& markAsRValue() { m_isRValue = true; return derived(); }
+
+ SparseMatrixBase() : m_isRValue(false) { /* TODO check flags */ }
+
+
+ template<typename OtherDerived>
+ Derived& operator=(const ReturnByValue<OtherDerived>& other)
+ {
+ other.evalTo(derived());
+ return derived();
+ }
+
+
+ template<typename OtherDerived>
+ inline Derived& operator=(const SparseMatrixBase<OtherDerived>& other)
+ {
+ return assign(other.derived());
+ }
+
+ inline Derived& operator=(const Derived& other)
+ {
+// if (other.isRValue())
+// derived().swap(other.const_cast_derived());
+// else
+ return assign(other.derived());
+ }
+
+ protected:
+
+ template<typename OtherDerived>
+ inline Derived& assign(const OtherDerived& other)
+ {
+ const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
+ const Index outerSize = (int(OtherDerived::Flags) & RowMajorBit) ? other.rows() : other.cols();
+ if ((!transpose) && other.isRValue())
+ {
+ // eval without temporary
+ derived().resize(other.rows(), other.cols());
+ derived().setZero();
+ derived().reserve((std::max)(this->rows(),this->cols())*2);
+ for (Index j=0; j<outerSize; ++j)
+ {
+ derived().startVec(j);
+ for (typename OtherDerived::InnerIterator it(other, j); it; ++it)
+ {
+ Scalar v = it.value();
+ derived().insertBackByOuterInner(j,it.index()) = v;
+ }
+ }
+ derived().finalize();
+ }
+ else
+ {
+ assignGeneric(other);
+ }
+ return derived();
+ }
+
+ template<typename OtherDerived>
+ inline void assignGeneric(const OtherDerived& other)
+ {
+ //const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
+ eigen_assert(( ((internal::traits<Derived>::SupportedAccessPatterns&OuterRandomAccessPattern)==OuterRandomAccessPattern) ||
+ (!((Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit)))) &&
+ "the transpose operation is supposed to be handled in SparseMatrix::operator=");
+
+ enum { Flip = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit) };
+
+ const Index outerSize = other.outerSize();
+ //typedef typename internal::conditional<transpose, LinkedVectorMatrix<Scalar,Flags&RowMajorBit>, Derived>::type TempType;
+ // thanks to shallow copies, we always eval to a tempary
+ Derived temp(other.rows(), other.cols());
+
+ temp.reserve((std::max)(this->rows(),this->cols())*2);
+ for (Index j=0; j<outerSize; ++j)
+ {
+ temp.startVec(j);
+ for (typename OtherDerived::InnerIterator it(other.derived(), j); it; ++it)
+ {
+ Scalar v = it.value();
+ temp.insertBackByOuterInner(Flip?it.index():j,Flip?j:it.index()) = v;
+ }
+ }
+ temp.finalize();
+
+ derived() = temp.markAsRValue();
+ }
+
+ public:
+
+ template<typename Lhs, typename Rhs>
+ inline Derived& operator=(const SparseSparseProduct<Lhs,Rhs>& product);
+
+ friend std::ostream & operator << (std::ostream & s, const SparseMatrixBase& m)
+ {
+ typedef typename Derived::Nested Nested;
+ typedef typename internal::remove_all<Nested>::type NestedCleaned;
+
+ if (Flags&RowMajorBit)
+ {
+ const Nested nm(m.derived());
+ for (Index row=0; row<nm.outerSize(); ++row)
+ {
+ Index col = 0;
+ for (typename NestedCleaned::InnerIterator it(nm.derived(), row); it; ++it)
+ {
+ for ( ; col<it.index(); ++col)
+ s << "0 ";
+ s << it.value() << " ";
+ ++col;
+ }
+ for ( ; col<m.cols(); ++col)
+ s << "0 ";
+ s << std::endl;
+ }
+ }
+ else
+ {
+ const Nested nm(m.derived());
+ if (m.cols() == 1) {
+ Index row = 0;
+ for (typename NestedCleaned::InnerIterator it(nm.derived(), 0); it; ++it)
+ {
+ for ( ; row<it.index(); ++row)
+ s << "0" << std::endl;
+ s << it.value() << std::endl;
+ ++row;
+ }
+ for ( ; row<m.rows(); ++row)
+ s << "0" << std::endl;
+ }
+ else
+ {
+ SparseMatrix<Scalar, RowMajorBit> trans = m;
+ s << static_cast<const SparseMatrixBase<SparseMatrix<Scalar, RowMajorBit> >&>(trans);
+ }
+ }
+ return s;
+ }
+
+ template<typename OtherDerived>
+ Derived& operator+=(const SparseMatrixBase<OtherDerived>& other);
+ template<typename OtherDerived>
+ Derived& operator-=(const SparseMatrixBase<OtherDerived>& other);
+
+ Derived& operator*=(const Scalar& other);
+ Derived& operator/=(const Scalar& other);
+
+ #define EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE \
+ CwiseBinaryOp< \
+ internal::scalar_product_op< \
+ typename internal::scalar_product_traits< \
+ typename internal::traits<Derived>::Scalar, \
+ typename internal::traits<OtherDerived>::Scalar \
+ >::ReturnType \
+ >, \
+ Derived, \
+ OtherDerived \
+ >
+
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE
+ cwiseProduct(const MatrixBase<OtherDerived> &other) const;
+
+ // sparse * sparse
+ template<typename OtherDerived>
+ const typename SparseSparseProductReturnType<Derived,OtherDerived>::Type
+ operator*(const SparseMatrixBase<OtherDerived> &other) const;
+
+ // sparse * diagonal
+ template<typename OtherDerived>
+ const SparseDiagonalProduct<Derived,OtherDerived>
+ operator*(const DiagonalBase<OtherDerived> &other) const;
+
+ // diagonal * sparse
+ template<typename OtherDerived> friend
+ const SparseDiagonalProduct<OtherDerived,Derived>
+ operator*(const DiagonalBase<OtherDerived> &lhs, const SparseMatrixBase& rhs)
+ { return SparseDiagonalProduct<OtherDerived,Derived>(lhs.derived(), rhs.derived()); }
+
+ /** dense * sparse (return a dense object unless it is an outer product) */
+ template<typename OtherDerived> friend
+ const typename DenseSparseProductReturnType<OtherDerived,Derived>::Type
+ operator*(const MatrixBase<OtherDerived>& lhs, const Derived& rhs)
+ { return typename DenseSparseProductReturnType<OtherDerived,Derived>::Type(lhs.derived(),rhs); }
+
+ /** sparse * dense (returns a dense object unless it is an outer product) */
+ template<typename OtherDerived>
+ const typename SparseDenseProductReturnType<Derived,OtherDerived>::Type
+ operator*(const MatrixBase<OtherDerived> &other) const;
+
+ /** \returns an expression of P H P^-1 where H is the matrix represented by \c *this */
+ SparseSymmetricPermutationProduct<Derived,Upper|Lower> twistedBy(const PermutationMatrix<Dynamic,Dynamic,Index>& perm) const
+ {
+ return SparseSymmetricPermutationProduct<Derived,Upper|Lower>(derived(), perm);
+ }
+
+ template<typename OtherDerived>
+ Derived& operator*=(const SparseMatrixBase<OtherDerived>& other);
+
+ #ifdef EIGEN2_SUPPORT
+ // deprecated
+ template<typename OtherDerived>
+ typename internal::plain_matrix_type_column_major<OtherDerived>::type
+ solveTriangular(const MatrixBase<OtherDerived>& other) const;
+
+ // deprecated
+ template<typename OtherDerived>
+ void solveTriangularInPlace(MatrixBase<OtherDerived>& other) const;
+ #endif // EIGEN2_SUPPORT
+
+ template<int Mode>
+ inline const SparseTriangularView<Derived, Mode> triangularView() const;
+
+ template<unsigned int UpLo> inline const SparseSelfAdjointView<Derived, UpLo> selfadjointView() const;
+ template<unsigned int UpLo> inline SparseSelfAdjointView<Derived, UpLo> selfadjointView();
+
+ template<typename OtherDerived> Scalar dot(const MatrixBase<OtherDerived>& other) const;
+ template<typename OtherDerived> Scalar dot(const SparseMatrixBase<OtherDerived>& other) const;
+ RealScalar squaredNorm() const;
+ RealScalar norm() const;
+
+ Transpose<Derived> transpose() { return derived(); }
+ const Transpose<const Derived> transpose() const { return derived(); }
+ const AdjointReturnType adjoint() const { return transpose(); }
+
+ // sub-vector
+ SparseInnerVectorSet<Derived,1> row(Index i);
+ const SparseInnerVectorSet<Derived,1> row(Index i) const;
+ SparseInnerVectorSet<Derived,1> col(Index j);
+ const SparseInnerVectorSet<Derived,1> col(Index j) const;
+ SparseInnerVectorSet<Derived,1> innerVector(Index outer);
+ const SparseInnerVectorSet<Derived,1> innerVector(Index outer) const;
+
+ // set of sub-vectors
+ SparseInnerVectorSet<Derived,Dynamic> subrows(Index start, Index size);
+ const SparseInnerVectorSet<Derived,Dynamic> subrows(Index start, Index size) const;
+ SparseInnerVectorSet<Derived,Dynamic> subcols(Index start, Index size);
+ const SparseInnerVectorSet<Derived,Dynamic> subcols(Index start, Index size) const;
+
+ SparseInnerVectorSet<Derived,Dynamic> middleRows(Index start, Index size);
+ const SparseInnerVectorSet<Derived,Dynamic> middleRows(Index start, Index size) const;
+ SparseInnerVectorSet<Derived,Dynamic> middleCols(Index start, Index size);
+ const SparseInnerVectorSet<Derived,Dynamic> middleCols(Index start, Index size) const;
+ SparseInnerVectorSet<Derived,Dynamic> innerVectors(Index outerStart, Index outerSize);
+ const SparseInnerVectorSet<Derived,Dynamic> innerVectors(Index outerStart, Index outerSize) const;
+
+ /** \internal use operator= */
+ template<typename DenseDerived>
+ void evalTo(MatrixBase<DenseDerived>& dst) const
+ {
+ dst.setZero();
+ for (Index j=0; j<outerSize(); ++j)
+ for (typename Derived::InnerIterator i(derived(),j); i; ++i)
+ dst.coeffRef(i.row(),i.col()) = i.value();
+ }
+
+ Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> toDense() const
+ {
+ return derived();
+ }
+
+ template<typename OtherDerived>
+ bool isApprox(const SparseMatrixBase<OtherDerived>& other,
+ RealScalar prec = NumTraits<Scalar>::dummy_precision()) const
+ { return toDense().isApprox(other.toDense(),prec); }
+
+ template<typename OtherDerived>
+ bool isApprox(const MatrixBase<OtherDerived>& other,
+ RealScalar prec = NumTraits<Scalar>::dummy_precision()) const
+ { return toDense().isApprox(other,prec); }
+
+ /** \returns the matrix or vector obtained by evaluating this expression.
+ *
+ * Notice that in the case of a plain matrix or vector (not an expression) this function just returns
+ * a const reference, in order to avoid a useless copy.
+ */
+ inline const typename internal::eval<Derived>::type eval() const
+ { return typename internal::eval<Derived>::type(derived()); }
+
+ Scalar sum() const;
+
+ protected:
+
+ bool m_isRValue;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSEMATRIXBASE_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparsePermutation.h b/extern/Eigen3/Eigen/src/SparseCore/SparsePermutation.h
new file mode 100644
index 00000000000..b897b7595b5
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparsePermutation.h
@@ -0,0 +1,148 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2012 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSE_PERMUTATION_H
+#define EIGEN_SPARSE_PERMUTATION_H
+
+// This file implements sparse * permutation products
+
+namespace Eigen {
+
+namespace internal {
+
+template<typename PermutationType, typename MatrixType, int Side, bool Transposed>
+struct traits<permut_sparsematrix_product_retval<PermutationType, MatrixType, Side, Transposed> >
+{
+ typedef typename remove_all<typename MatrixType::Nested>::type MatrixTypeNestedCleaned;
+ typedef typename MatrixTypeNestedCleaned::Scalar Scalar;
+ typedef typename MatrixTypeNestedCleaned::Index Index;
+ enum {
+ SrcStorageOrder = MatrixTypeNestedCleaned::Flags&RowMajorBit ? RowMajor : ColMajor,
+ MoveOuter = SrcStorageOrder==RowMajor ? Side==OnTheLeft : Side==OnTheRight
+ };
+
+ typedef typename internal::conditional<MoveOuter,
+ SparseMatrix<Scalar,SrcStorageOrder,Index>,
+ SparseMatrix<Scalar,int(SrcStorageOrder)==RowMajor?ColMajor:RowMajor,Index> >::type ReturnType;
+};
+
+template<typename PermutationType, typename MatrixType, int Side, bool Transposed>
+struct permut_sparsematrix_product_retval
+ : public ReturnByValue<permut_sparsematrix_product_retval<PermutationType, MatrixType, Side, Transposed> >
+{
+ typedef typename remove_all<typename MatrixType::Nested>::type MatrixTypeNestedCleaned;
+ typedef typename MatrixTypeNestedCleaned::Scalar Scalar;
+ typedef typename MatrixTypeNestedCleaned::Index Index;
+
+ enum {
+ SrcStorageOrder = MatrixTypeNestedCleaned::Flags&RowMajorBit ? RowMajor : ColMajor,
+ MoveOuter = SrcStorageOrder==RowMajor ? Side==OnTheLeft : Side==OnTheRight
+ };
+
+ permut_sparsematrix_product_retval(const PermutationType& perm, const MatrixType& matrix)
+ : m_permutation(perm), m_matrix(matrix)
+ {}
+
+ inline int rows() const { return m_matrix.rows(); }
+ inline int cols() const { return m_matrix.cols(); }
+
+ template<typename Dest> inline void evalTo(Dest& dst) const
+ {
+ if(MoveOuter)
+ {
+ SparseMatrix<Scalar,SrcStorageOrder,Index> tmp(m_matrix.rows(), m_matrix.cols());
+ VectorXi sizes(m_matrix.outerSize());
+ for(Index j=0; j<m_matrix.outerSize(); ++j)
+ {
+ Index jp = m_permutation.indices().coeff(j);
+ sizes[((Side==OnTheLeft) ^ Transposed) ? jp : j] = m_matrix.innerVector(((Side==OnTheRight) ^ Transposed) ? jp : j).size();
+ }
+ tmp.reserve(sizes);
+ for(Index j=0; j<m_matrix.outerSize(); ++j)
+ {
+ Index jp = m_permutation.indices().coeff(j);
+ Index jsrc = ((Side==OnTheRight) ^ Transposed) ? jp : j;
+ Index jdst = ((Side==OnTheLeft) ^ Transposed) ? jp : j;
+ for(typename MatrixTypeNestedCleaned::InnerIterator it(m_matrix,jsrc); it; ++it)
+ tmp.insertByOuterInner(jdst,it.index()) = it.value();
+ }
+ dst = tmp;
+ }
+ else
+ {
+ SparseMatrix<Scalar,int(SrcStorageOrder)==RowMajor?ColMajor:RowMajor,Index> tmp(m_matrix.rows(), m_matrix.cols());
+ VectorXi sizes(tmp.outerSize());
+ sizes.setZero();
+ PermutationMatrix<Dynamic,Dynamic,Index> perm;
+ if((Side==OnTheLeft) ^ Transposed)
+ perm = m_permutation;
+ else
+ perm = m_permutation.transpose();
+
+ for(Index j=0; j<m_matrix.outerSize(); ++j)
+ for(typename MatrixTypeNestedCleaned::InnerIterator it(m_matrix,j); it; ++it)
+ sizes[perm.indices().coeff(it.index())]++;
+ tmp.reserve(sizes);
+ for(Index j=0; j<m_matrix.outerSize(); ++j)
+ for(typename MatrixTypeNestedCleaned::InnerIterator it(m_matrix,j); it; ++it)
+ tmp.insertByOuterInner(perm.indices().coeff(it.index()),j) = it.value();
+ dst = tmp;
+ }
+ }
+
+ protected:
+ const PermutationType& m_permutation;
+ typename MatrixType::Nested m_matrix;
+};
+
+}
+
+
+
+/** \returns the matrix with the permutation applied to the columns
+ */
+template<typename SparseDerived, typename PermDerived>
+inline const internal::permut_sparsematrix_product_retval<PermutationBase<PermDerived>, SparseDerived, OnTheRight, false>
+operator*(const SparseMatrixBase<SparseDerived>& matrix, const PermutationBase<PermDerived>& perm)
+{
+ return internal::permut_sparsematrix_product_retval<PermutationBase<PermDerived>, SparseDerived, OnTheRight, false>(perm, matrix.derived());
+}
+
+/** \returns the matrix with the permutation applied to the rows
+ */
+template<typename SparseDerived, typename PermDerived>
+inline const internal::permut_sparsematrix_product_retval<PermutationBase<PermDerived>, SparseDerived, OnTheLeft, false>
+operator*( const PermutationBase<PermDerived>& perm, const SparseMatrixBase<SparseDerived>& matrix)
+{
+ return internal::permut_sparsematrix_product_retval<PermutationBase<PermDerived>, SparseDerived, OnTheLeft, false>(perm, matrix.derived());
+}
+
+
+
+/** \returns the matrix with the inverse permutation applied to the columns.
+ */
+template<typename SparseDerived, typename PermDerived>
+inline const internal::permut_sparsematrix_product_retval<PermutationBase<PermDerived>, SparseDerived, OnTheRight, true>
+operator*(const SparseMatrixBase<SparseDerived>& matrix, const Transpose<PermutationBase<PermDerived> >& tperm)
+{
+ return internal::permut_sparsematrix_product_retval<PermutationBase<PermDerived>, SparseDerived, OnTheRight, true>(tperm.nestedPermutation(), matrix.derived());
+}
+
+/** \returns the matrix with the inverse permutation applied to the rows.
+ */
+template<typename SparseDerived, typename PermDerived>
+inline const internal::permut_sparsematrix_product_retval<PermutationBase<PermDerived>, SparseDerived, OnTheLeft, true>
+operator*(const Transpose<PermutationBase<PermDerived> >& tperm, const SparseMatrixBase<SparseDerived>& matrix)
+{
+ return internal::permut_sparsematrix_product_retval<PermutationBase<PermDerived>, SparseDerived, OnTheLeft, true>(tperm.nestedPermutation(), matrix.derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_SELFADJOINTVIEW_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseProduct.h b/extern/Eigen3/Eigen/src/SparseCore/SparseProduct.h
new file mode 100644
index 00000000000..6a555b83434
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseProduct.h
@@ -0,0 +1,186 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSEPRODUCT_H
+#define EIGEN_SPARSEPRODUCT_H
+
+namespace Eigen {
+
+template<typename Lhs, typename Rhs>
+struct SparseSparseProductReturnType
+{
+ typedef typename internal::traits<Lhs>::Scalar Scalar;
+ enum {
+ LhsRowMajor = internal::traits<Lhs>::Flags & RowMajorBit,
+ RhsRowMajor = internal::traits<Rhs>::Flags & RowMajorBit,
+ TransposeRhs = (!LhsRowMajor) && RhsRowMajor,
+ TransposeLhs = LhsRowMajor && (!RhsRowMajor)
+ };
+
+ typedef typename internal::conditional<TransposeLhs,
+ SparseMatrix<Scalar,0>,
+ typename internal::nested<Lhs,Rhs::RowsAtCompileTime>::type>::type LhsNested;
+
+ typedef typename internal::conditional<TransposeRhs,
+ SparseMatrix<Scalar,0>,
+ typename internal::nested<Rhs,Lhs::RowsAtCompileTime>::type>::type RhsNested;
+
+ typedef SparseSparseProduct<LhsNested, RhsNested> Type;
+};
+
+namespace internal {
+template<typename LhsNested, typename RhsNested>
+struct traits<SparseSparseProduct<LhsNested, RhsNested> >
+{
+ typedef MatrixXpr XprKind;
+ // clean the nested types:
+ typedef typename remove_all<LhsNested>::type _LhsNested;
+ typedef typename remove_all<RhsNested>::type _RhsNested;
+ typedef typename _LhsNested::Scalar Scalar;
+ typedef typename promote_index_type<typename traits<_LhsNested>::Index,
+ typename traits<_RhsNested>::Index>::type Index;
+
+ enum {
+ LhsCoeffReadCost = _LhsNested::CoeffReadCost,
+ RhsCoeffReadCost = _RhsNested::CoeffReadCost,
+ LhsFlags = _LhsNested::Flags,
+ RhsFlags = _RhsNested::Flags,
+
+ RowsAtCompileTime = _LhsNested::RowsAtCompileTime,
+ ColsAtCompileTime = _RhsNested::ColsAtCompileTime,
+ MaxRowsAtCompileTime = _LhsNested::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = _RhsNested::MaxColsAtCompileTime,
+
+ InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(_LhsNested::ColsAtCompileTime, _RhsNested::RowsAtCompileTime),
+
+ EvalToRowMajor = (RhsFlags & LhsFlags & RowMajorBit),
+
+ RemovedBits = ~(EvalToRowMajor ? 0 : RowMajorBit),
+
+ Flags = (int(LhsFlags | RhsFlags) & HereditaryBits & RemovedBits)
+ | EvalBeforeAssigningBit
+ | EvalBeforeNestingBit,
+
+ CoeffReadCost = Dynamic
+ };
+
+ typedef Sparse StorageKind;
+};
+
+} // end namespace internal
+
+template<typename LhsNested, typename RhsNested>
+class SparseSparseProduct : internal::no_assignment_operator,
+ public SparseMatrixBase<SparseSparseProduct<LhsNested, RhsNested> >
+{
+ public:
+
+ typedef SparseMatrixBase<SparseSparseProduct> Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(SparseSparseProduct)
+
+ private:
+
+ typedef typename internal::traits<SparseSparseProduct>::_LhsNested _LhsNested;
+ typedef typename internal::traits<SparseSparseProduct>::_RhsNested _RhsNested;
+
+ public:
+
+ template<typename Lhs, typename Rhs>
+ EIGEN_STRONG_INLINE SparseSparseProduct(const Lhs& lhs, const Rhs& rhs)
+ : m_lhs(lhs), m_rhs(rhs), m_tolerance(0), m_conservative(true)
+ {
+ init();
+ }
+
+ template<typename Lhs, typename Rhs>
+ EIGEN_STRONG_INLINE SparseSparseProduct(const Lhs& lhs, const Rhs& rhs, RealScalar tolerance)
+ : m_lhs(lhs), m_rhs(rhs), m_tolerance(tolerance), m_conservative(false)
+ {
+ init();
+ }
+
+ SparseSparseProduct pruned(Scalar reference = 0, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision()) const
+ {
+ return SparseSparseProduct(m_lhs,m_rhs,internal::abs(reference)*epsilon);
+ }
+
+ template<typename Dest>
+ void evalTo(Dest& result) const
+ {
+ if(m_conservative)
+ internal::conservative_sparse_sparse_product_selector<_LhsNested, _RhsNested, Dest>::run(lhs(),rhs(),result);
+ else
+ internal::sparse_sparse_product_with_pruning_selector<_LhsNested, _RhsNested, Dest>::run(lhs(),rhs(),result,m_tolerance);
+ }
+
+ EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return m_rhs.cols(); }
+
+ EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; }
+ EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; }
+
+ protected:
+ void init()
+ {
+ eigen_assert(m_lhs.cols() == m_rhs.rows());
+
+ enum {
+ ProductIsValid = _LhsNested::ColsAtCompileTime==Dynamic
+ || _RhsNested::RowsAtCompileTime==Dynamic
+ || int(_LhsNested::ColsAtCompileTime)==int(_RhsNested::RowsAtCompileTime),
+ AreVectors = _LhsNested::IsVectorAtCompileTime && _RhsNested::IsVectorAtCompileTime,
+ SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(_LhsNested,_RhsNested)
+ };
+ // note to the lost user:
+ // * for a dot product use: v1.dot(v2)
+ // * for a coeff-wise product use: v1.cwise()*v2
+ EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes),
+ INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS)
+ EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors),
+ INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION)
+ EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT)
+ }
+
+ LhsNested m_lhs;
+ RhsNested m_rhs;
+ RealScalar m_tolerance;
+ bool m_conservative;
+};
+
+// sparse = sparse * sparse
+template<typename Derived>
+template<typename Lhs, typename Rhs>
+inline Derived& SparseMatrixBase<Derived>::operator=(const SparseSparseProduct<Lhs,Rhs>& product)
+{
+ product.evalTo(derived());
+ return derived();
+}
+
+/** \returns an expression of the product of two sparse matrices.
+ * By default a conservative product preserving the symbolic non zeros is performed.
+ * The automatic pruning of the small values can be achieved by calling the pruned() function
+ * in which case a totally different product algorithm is employed:
+ * \code
+ * C = (A*B).pruned(); // supress numerical zeros (exact)
+ * C = (A*B).pruned(ref);
+ * C = (A*B).pruned(ref,epsilon);
+ * \endcode
+ * where \c ref is a meaningful non zero reference value.
+ * */
+template<typename Derived>
+template<typename OtherDerived>
+inline const typename SparseSparseProductReturnType<Derived,OtherDerived>::Type
+SparseMatrixBase<Derived>::operator*(const SparseMatrixBase<OtherDerived> &other) const
+{
+ return typename SparseSparseProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived());
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSEPRODUCT_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseRedux.h b/extern/Eigen3/Eigen/src/SparseCore/SparseRedux.h
new file mode 100644
index 00000000000..f3da93a71d4
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseRedux.h
@@ -0,0 +1,45 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSEREDUX_H
+#define EIGEN_SPARSEREDUX_H
+
+namespace Eigen {
+
+template<typename Derived>
+typename internal::traits<Derived>::Scalar
+SparseMatrixBase<Derived>::sum() const
+{
+ eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
+ Scalar res(0);
+ for (Index j=0; j<outerSize(); ++j)
+ for (typename Derived::InnerIterator iter(derived(),j); iter; ++iter)
+ res += iter.value();
+ return res;
+}
+
+template<typename _Scalar, int _Options, typename _Index>
+typename internal::traits<SparseMatrix<_Scalar,_Options,_Index> >::Scalar
+SparseMatrix<_Scalar,_Options,_Index>::sum() const
+{
+ eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
+ return Matrix<Scalar,1,Dynamic>::Map(&m_data.value(0), m_data.size()).sum();
+}
+
+template<typename _Scalar, int _Options, typename _Index>
+typename internal::traits<SparseVector<_Scalar,_Options, _Index> >::Scalar
+SparseVector<_Scalar,_Options,_Index>::sum() const
+{
+ eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
+ return Matrix<Scalar,1,Dynamic>::Map(&m_data.value(0), m_data.size()).sum();
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSEREDUX_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseSelfAdjointView.h b/extern/Eigen3/Eigen/src/SparseCore/SparseSelfAdjointView.h
new file mode 100644
index 00000000000..86ec0a6c5e2
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseSelfAdjointView.h
@@ -0,0 +1,480 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSE_SELFADJOINTVIEW_H
+#define EIGEN_SPARSE_SELFADJOINTVIEW_H
+
+namespace Eigen {
+
+/** \ingroup SparseCore_Module
+ * \class SparseSelfAdjointView
+ *
+ * \brief Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
+ *
+ * \param MatrixType the type of the dense matrix storing the coefficients
+ * \param UpLo can be either \c #Lower or \c #Upper
+ *
+ * This class is an expression of a sefladjoint matrix from a triangular part of a matrix
+ * with given dense storage of the coefficients. It is the return type of MatrixBase::selfadjointView()
+ * and most of the time this is the only way that it is used.
+ *
+ * \sa SparseMatrixBase::selfadjointView()
+ */
+template<typename Lhs, typename Rhs, int UpLo>
+class SparseSelfAdjointTimeDenseProduct;
+
+template<typename Lhs, typename Rhs, int UpLo>
+class DenseTimeSparseSelfAdjointProduct;
+
+namespace internal {
+
+template<typename MatrixType, unsigned int UpLo>
+struct traits<SparseSelfAdjointView<MatrixType,UpLo> > : traits<MatrixType> {
+};
+
+template<int SrcUpLo,int DstUpLo,typename MatrixType,int DestOrder>
+void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm = 0);
+
+template<int UpLo,typename MatrixType,int DestOrder>
+void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm = 0);
+
+}
+
+template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView
+ : public EigenBase<SparseSelfAdjointView<MatrixType,UpLo> >
+{
+ public:
+
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ typedef Matrix<Index,Dynamic,1> VectorI;
+ typedef typename MatrixType::Nested MatrixTypeNested;
+ typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
+
+ inline SparseSelfAdjointView(const MatrixType& matrix) : m_matrix(matrix)
+ {
+ eigen_assert(rows()==cols() && "SelfAdjointView is only for squared matrices");
+ }
+
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
+
+ /** \internal \returns a reference to the nested matrix */
+ const _MatrixTypeNested& matrix() const { return m_matrix; }
+ _MatrixTypeNested& matrix() { return m_matrix.const_cast_derived(); }
+
+ /** Efficient sparse self-adjoint matrix times dense vector/matrix product */
+ template<typename OtherDerived>
+ SparseSelfAdjointTimeDenseProduct<MatrixType,OtherDerived,UpLo>
+ operator*(const MatrixBase<OtherDerived>& rhs) const
+ {
+ return SparseSelfAdjointTimeDenseProduct<MatrixType,OtherDerived,UpLo>(m_matrix, rhs.derived());
+ }
+
+ /** Efficient dense vector/matrix times sparse self-adjoint matrix product */
+ template<typename OtherDerived> friend
+ DenseTimeSparseSelfAdjointProduct<OtherDerived,MatrixType,UpLo>
+ operator*(const MatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)
+ {
+ return DenseTimeSparseSelfAdjointProduct<OtherDerived,_MatrixTypeNested,UpLo>(lhs.derived(), rhs.m_matrix);
+ }
+
+ /** Perform a symmetric rank K update of the selfadjoint matrix \c *this:
+ * \f$ this = this + \alpha ( u u^* ) \f$ where \a u is a vector or matrix.
+ *
+ * \returns a reference to \c *this
+ *
+ * To perform \f$ this = this + \alpha ( u^* u ) \f$ you can simply
+ * call this function with u.adjoint().
+ */
+ template<typename DerivedU>
+ SparseSelfAdjointView& rankUpdate(const SparseMatrixBase<DerivedU>& u, Scalar alpha = Scalar(1));
+
+ /** \internal triggered by sparse_matrix = SparseSelfadjointView; */
+ template<typename DestScalar,int StorageOrder> void evalTo(SparseMatrix<DestScalar,StorageOrder,Index>& _dest) const
+ {
+ internal::permute_symm_to_fullsymm<UpLo>(m_matrix, _dest);
+ }
+
+ template<typename DestScalar> void evalTo(DynamicSparseMatrix<DestScalar,ColMajor,Index>& _dest) const
+ {
+ // TODO directly evaluate into _dest;
+ SparseMatrix<DestScalar,ColMajor,Index> tmp(_dest.rows(),_dest.cols());
+ internal::permute_symm_to_fullsymm<UpLo>(m_matrix, tmp);
+ _dest = tmp;
+ }
+
+ /** \returns an expression of P H P^-1 */
+ SparseSymmetricPermutationProduct<_MatrixTypeNested,UpLo> twistedBy(const PermutationMatrix<Dynamic,Dynamic,Index>& perm) const
+ {
+ return SparseSymmetricPermutationProduct<_MatrixTypeNested,UpLo>(m_matrix, perm);
+ }
+
+ template<typename SrcMatrixType,int SrcUpLo>
+ SparseSelfAdjointView& operator=(const SparseSymmetricPermutationProduct<SrcMatrixType,SrcUpLo>& permutedMatrix)
+ {
+ permutedMatrix.evalTo(*this);
+ return *this;
+ }
+
+
+ SparseSelfAdjointView& operator=(const SparseSelfAdjointView& src)
+ {
+ PermutationMatrix<Dynamic> pnull;
+ return *this = src.twistedBy(pnull);
+ }
+
+ template<typename SrcMatrixType,unsigned int SrcUpLo>
+ SparseSelfAdjointView& operator=(const SparseSelfAdjointView<SrcMatrixType,SrcUpLo>& src)
+ {
+ PermutationMatrix<Dynamic> pnull;
+ return *this = src.twistedBy(pnull);
+ }
+
+
+ // const SparseLLT<PlainObject, UpLo> llt() const;
+ // const SparseLDLT<PlainObject, UpLo> ldlt() const;
+
+ protected:
+
+ typename MatrixType::Nested m_matrix;
+ mutable VectorI m_countPerRow;
+ mutable VectorI m_countPerCol;
+};
+
+/***************************************************************************
+* Implementation of SparseMatrixBase methods
+***************************************************************************/
+
+template<typename Derived>
+template<unsigned int UpLo>
+const SparseSelfAdjointView<Derived, UpLo> SparseMatrixBase<Derived>::selfadjointView() const
+{
+ return derived();
+}
+
+template<typename Derived>
+template<unsigned int UpLo>
+SparseSelfAdjointView<Derived, UpLo> SparseMatrixBase<Derived>::selfadjointView()
+{
+ return derived();
+}
+
+/***************************************************************************
+* Implementation of SparseSelfAdjointView methods
+***************************************************************************/
+
+template<typename MatrixType, unsigned int UpLo>
+template<typename DerivedU>
+SparseSelfAdjointView<MatrixType,UpLo>&
+SparseSelfAdjointView<MatrixType,UpLo>::rankUpdate(const SparseMatrixBase<DerivedU>& u, Scalar alpha)
+{
+ SparseMatrix<Scalar,MatrixType::Flags&RowMajorBit?RowMajor:ColMajor> tmp = u * u.adjoint();
+ if(alpha==Scalar(0))
+ m_matrix.const_cast_derived() = tmp.template triangularView<UpLo>();
+ else
+ m_matrix.const_cast_derived() += alpha * tmp.template triangularView<UpLo>();
+
+ return *this;
+}
+
+/***************************************************************************
+* Implementation of sparse self-adjoint time dense matrix
+***************************************************************************/
+
+namespace internal {
+template<typename Lhs, typename Rhs, int UpLo>
+struct traits<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,UpLo> >
+ : traits<ProductBase<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,UpLo>, Lhs, Rhs> >
+{
+ typedef Dense StorageKind;
+};
+}
+
+template<typename Lhs, typename Rhs, int UpLo>
+class SparseSelfAdjointTimeDenseProduct
+ : public ProductBase<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,UpLo>, Lhs, Rhs>
+{
+ public:
+ EIGEN_PRODUCT_PUBLIC_INTERFACE(SparseSelfAdjointTimeDenseProduct)
+
+ SparseSelfAdjointTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+ {}
+
+ template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
+ {
+ // TODO use alpha
+ eigen_assert(alpha==Scalar(1) && "alpha != 1 is not implemented yet, sorry");
+ typedef typename internal::remove_all<Lhs>::type _Lhs;
+ typedef typename internal::remove_all<Rhs>::type _Rhs;
+ typedef typename _Lhs::InnerIterator LhsInnerIterator;
+ enum {
+ LhsIsRowMajor = (_Lhs::Flags&RowMajorBit)==RowMajorBit,
+ ProcessFirstHalf =
+ ((UpLo&(Upper|Lower))==(Upper|Lower))
+ || ( (UpLo&Upper) && !LhsIsRowMajor)
+ || ( (UpLo&Lower) && LhsIsRowMajor),
+ ProcessSecondHalf = !ProcessFirstHalf
+ };
+ for (Index j=0; j<m_lhs.outerSize(); ++j)
+ {
+ LhsInnerIterator i(m_lhs,j);
+ if (ProcessSecondHalf)
+ {
+ while (i && i.index()<j) ++i;
+ if(i && i.index()==j)
+ {
+ dest.row(j) += i.value() * m_rhs.row(j);
+ ++i;
+ }
+ }
+ for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i)
+ {
+ Index a = LhsIsRowMajor ? j : i.index();
+ Index b = LhsIsRowMajor ? i.index() : j;
+ typename Lhs::Scalar v = i.value();
+ dest.row(a) += (v) * m_rhs.row(b);
+ dest.row(b) += internal::conj(v) * m_rhs.row(a);
+ }
+ if (ProcessFirstHalf && i && (i.index()==j))
+ dest.row(j) += i.value() * m_rhs.row(j);
+ }
+ }
+
+ private:
+ SparseSelfAdjointTimeDenseProduct& operator=(const SparseSelfAdjointTimeDenseProduct&);
+};
+
+namespace internal {
+template<typename Lhs, typename Rhs, int UpLo>
+struct traits<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,UpLo> >
+ : traits<ProductBase<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,UpLo>, Lhs, Rhs> >
+{};
+}
+
+template<typename Lhs, typename Rhs, int UpLo>
+class DenseTimeSparseSelfAdjointProduct
+ : public ProductBase<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,UpLo>, Lhs, Rhs>
+{
+ public:
+ EIGEN_PRODUCT_PUBLIC_INTERFACE(DenseTimeSparseSelfAdjointProduct)
+
+ DenseTimeSparseSelfAdjointProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+ {}
+
+ template<typename Dest> void scaleAndAddTo(Dest& /*dest*/, Scalar /*alpha*/) const
+ {
+ // TODO
+ }
+
+ private:
+ DenseTimeSparseSelfAdjointProduct& operator=(const DenseTimeSparseSelfAdjointProduct&);
+};
+
+/***************************************************************************
+* Implementation of symmetric copies and permutations
+***************************************************************************/
+namespace internal {
+
+template<typename MatrixType, int UpLo>
+struct traits<SparseSymmetricPermutationProduct<MatrixType,UpLo> > : traits<MatrixType> {
+};
+
+template<int UpLo,typename MatrixType,int DestOrder>
+void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm)
+{
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef SparseMatrix<Scalar,DestOrder,Index> Dest;
+ typedef Matrix<Index,Dynamic,1> VectorI;
+
+ Dest& dest(_dest.derived());
+ enum {
+ StorageOrderMatch = int(Dest::IsRowMajor) == int(MatrixType::IsRowMajor)
+ };
+
+ Index size = mat.rows();
+ VectorI count;
+ count.resize(size);
+ count.setZero();
+ dest.resize(size,size);
+ for(Index j = 0; j<size; ++j)
+ {
+ Index jp = perm ? perm[j] : j;
+ for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
+ {
+ Index i = it.index();
+ Index r = it.row();
+ Index c = it.col();
+ Index ip = perm ? perm[i] : i;
+ if(UpLo==(Upper|Lower))
+ count[StorageOrderMatch ? jp : ip]++;
+ else if(r==c)
+ count[ip]++;
+ else if(( UpLo==Lower && r>c) || ( UpLo==Upper && r<c))
+ {
+ count[ip]++;
+ count[jp]++;
+ }
+ }
+ }
+ Index nnz = count.sum();
+
+ // reserve space
+ dest.resizeNonZeros(nnz);
+ dest.outerIndexPtr()[0] = 0;
+ for(Index j=0; j<size; ++j)
+ dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
+ for(Index j=0; j<size; ++j)
+ count[j] = dest.outerIndexPtr()[j];
+
+ // copy data
+ for(Index j = 0; j<size; ++j)
+ {
+ for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
+ {
+ Index i = it.index();
+ Index r = it.row();
+ Index c = it.col();
+
+ Index jp = perm ? perm[j] : j;
+ Index ip = perm ? perm[i] : i;
+
+ if(UpLo==(Upper|Lower))
+ {
+ Index k = count[StorageOrderMatch ? jp : ip]++;
+ dest.innerIndexPtr()[k] = StorageOrderMatch ? ip : jp;
+ dest.valuePtr()[k] = it.value();
+ }
+ else if(r==c)
+ {
+ Index k = count[ip]++;
+ dest.innerIndexPtr()[k] = ip;
+ dest.valuePtr()[k] = it.value();
+ }
+ else if(( (UpLo&Lower)==Lower && r>c) || ( (UpLo&Upper)==Upper && r<c))
+ {
+ if(!StorageOrderMatch)
+ std::swap(ip,jp);
+ Index k = count[jp]++;
+ dest.innerIndexPtr()[k] = ip;
+ dest.valuePtr()[k] = it.value();
+ k = count[ip]++;
+ dest.innerIndexPtr()[k] = jp;
+ dest.valuePtr()[k] = internal::conj(it.value());
+ }
+ }
+ }
+}
+
+template<int _SrcUpLo,int _DstUpLo,typename MatrixType,int DstOrder>
+void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DstOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm)
+{
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::Scalar Scalar;
+ SparseMatrix<Scalar,DstOrder,Index>& dest(_dest.derived());
+ typedef Matrix<Index,Dynamic,1> VectorI;
+ enum {
+ SrcOrder = MatrixType::IsRowMajor ? RowMajor : ColMajor,
+ StorageOrderMatch = int(SrcOrder) == int(DstOrder),
+ DstUpLo = DstOrder==RowMajor ? (_DstUpLo==Upper ? Lower : Upper) : _DstUpLo,
+ SrcUpLo = SrcOrder==RowMajor ? (_SrcUpLo==Upper ? Lower : Upper) : _SrcUpLo
+ };
+
+ Index size = mat.rows();
+ VectorI count(size);
+ count.setZero();
+ dest.resize(size,size);
+ for(Index j = 0; j<size; ++j)
+ {
+ Index jp = perm ? perm[j] : j;
+ for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
+ {
+ Index i = it.index();
+ if((int(SrcUpLo)==int(Lower) && i<j) || (int(SrcUpLo)==int(Upper) && i>j))
+ continue;
+
+ Index ip = perm ? perm[i] : i;
+ count[int(DstUpLo)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
+ }
+ }
+ dest.outerIndexPtr()[0] = 0;
+ for(Index j=0; j<size; ++j)
+ dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
+ dest.resizeNonZeros(dest.outerIndexPtr()[size]);
+ for(Index j=0; j<size; ++j)
+ count[j] = dest.outerIndexPtr()[j];
+
+ for(Index j = 0; j<size; ++j)
+ {
+
+ for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
+ {
+ Index i = it.index();
+ if((int(SrcUpLo)==int(Lower) && i<j) || (int(SrcUpLo)==int(Upper) && i>j))
+ continue;
+
+ Index jp = perm ? perm[j] : j;
+ Index ip = perm? perm[i] : i;
+
+ Index k = count[int(DstUpLo)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
+ dest.innerIndexPtr()[k] = int(DstUpLo)==int(Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp);
+
+ if(!StorageOrderMatch) std::swap(ip,jp);
+ if( ((int(DstUpLo)==int(Lower) && ip<jp) || (int(DstUpLo)==int(Upper) && ip>jp)))
+ dest.valuePtr()[k] = conj(it.value());
+ else
+ dest.valuePtr()[k] = it.value();
+ }
+ }
+}
+
+}
+
+template<typename MatrixType,int UpLo>
+class SparseSymmetricPermutationProduct
+ : public EigenBase<SparseSymmetricPermutationProduct<MatrixType,UpLo> >
+{
+ public:
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ protected:
+ typedef PermutationMatrix<Dynamic,Dynamic,Index> Perm;
+ public:
+ typedef Matrix<Index,Dynamic,1> VectorI;
+ typedef typename MatrixType::Nested MatrixTypeNested;
+ typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
+
+ SparseSymmetricPermutationProduct(const MatrixType& mat, const Perm& perm)
+ : m_matrix(mat), m_perm(perm)
+ {}
+
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
+
+ template<typename DestScalar, int Options, typename DstIndex>
+ void evalTo(SparseMatrix<DestScalar,Options,DstIndex>& _dest) const
+ {
+ internal::permute_symm_to_fullsymm<UpLo>(m_matrix,_dest,m_perm.indices().data());
+ }
+
+ template<typename DestType,unsigned int DestUpLo> void evalTo(SparseSelfAdjointView<DestType,DestUpLo>& dest) const
+ {
+ internal::permute_symm_to_symm<UpLo,DestUpLo>(m_matrix,dest.matrix(),m_perm.indices().data());
+ }
+
+ protected:
+ MatrixTypeNested m_matrix;
+ const Perm& m_perm;
+
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_SELFADJOINTVIEW_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseSparseProductWithPruning.h b/extern/Eigen3/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
new file mode 100644
index 00000000000..2438ac573d0
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseSparseProductWithPruning.h
@@ -0,0 +1,149 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H
+#define EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H
+
+namespace Eigen {
+
+namespace internal {
+
+
+// perform a pseudo in-place sparse * sparse product assuming all matrices are col major
+template<typename Lhs, typename Rhs, typename ResultType>
+static void sparse_sparse_product_with_pruning_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res, typename ResultType::RealScalar tolerance)
+{
+ // return sparse_sparse_product_with_pruning_impl2(lhs,rhs,res);
+
+ typedef typename remove_all<Lhs>::type::Scalar Scalar;
+ typedef typename remove_all<Lhs>::type::Index Index;
+
+ // make sure to call innerSize/outerSize since we fake the storage order.
+ Index rows = lhs.innerSize();
+ Index cols = rhs.outerSize();
+ //int size = lhs.outerSize();
+ eigen_assert(lhs.outerSize() == rhs.innerSize());
+
+ // allocate a temporary buffer
+ AmbiVector<Scalar,Index> tempVector(rows);
+
+ // estimate the number of non zero entries
+ // given a rhs column containing Y non zeros, we assume that the respective Y columns
+ // of the lhs differs in average of one non zeros, thus the number of non zeros for
+ // the product of a rhs column with the lhs is X+Y where X is the average number of non zero
+ // per column of the lhs.
+ // Therefore, we have nnz(lhs*rhs) = nnz(lhs) + nnz(rhs)
+ Index estimated_nnz_prod = lhs.nonZeros() + rhs.nonZeros();
+
+ // mimics a resizeByInnerOuter:
+ if(ResultType::IsRowMajor)
+ res.resize(cols, rows);
+ else
+ res.resize(rows, cols);
+
+ res.reserve(estimated_nnz_prod);
+ double ratioColRes = double(estimated_nnz_prod)/double(lhs.rows()*rhs.cols());
+ for (Index j=0; j<cols; ++j)
+ {
+ // FIXME:
+ //double ratioColRes = (double(rhs.innerVector(j).nonZeros()) + double(lhs.nonZeros())/double(lhs.cols()))/double(lhs.rows());
+ // let's do a more accurate determination of the nnz ratio for the current column j of res
+ tempVector.init(ratioColRes);
+ tempVector.setZero();
+ for (typename Rhs::InnerIterator rhsIt(rhs, j); rhsIt; ++rhsIt)
+ {
+ // FIXME should be written like this: tmp += rhsIt.value() * lhs.col(rhsIt.index())
+ tempVector.restart();
+ Scalar x = rhsIt.value();
+ for (typename Lhs::InnerIterator lhsIt(lhs, rhsIt.index()); lhsIt; ++lhsIt)
+ {
+ tempVector.coeffRef(lhsIt.index()) += lhsIt.value() * x;
+ }
+ }
+ res.startVec(j);
+ for (typename AmbiVector<Scalar,Index>::Iterator it(tempVector,tolerance); it; ++it)
+ res.insertBackByOuterInner(j,it.index()) = it.value();
+ }
+ res.finalize();
+}
+
+template<typename Lhs, typename Rhs, typename ResultType,
+ int LhsStorageOrder = traits<Lhs>::Flags&RowMajorBit,
+ int RhsStorageOrder = traits<Rhs>::Flags&RowMajorBit,
+ int ResStorageOrder = traits<ResultType>::Flags&RowMajorBit>
+struct sparse_sparse_product_with_pruning_selector;
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,ColMajor>
+{
+ typedef typename traits<typename remove_all<Lhs>::type>::Scalar Scalar;
+ typedef typename ResultType::RealScalar RealScalar;
+
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, RealScalar tolerance)
+ {
+ typename remove_all<ResultType>::type _res(res.rows(), res.cols());
+ internal::sparse_sparse_product_with_pruning_impl<Lhs,Rhs,ResultType>(lhs, rhs, _res, tolerance);
+ res.swap(_res);
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,RowMajor>
+{
+ typedef typename ResultType::RealScalar RealScalar;
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, RealScalar tolerance)
+ {
+ // we need a col-major matrix to hold the result
+ typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType;
+ SparseTemporaryType _res(res.rows(), res.cols());
+ internal::sparse_sparse_product_with_pruning_impl<Lhs,Rhs,SparseTemporaryType>(lhs, rhs, _res, tolerance);
+ res = _res;
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,RowMajor>
+{
+ typedef typename ResultType::RealScalar RealScalar;
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, RealScalar tolerance)
+ {
+ // let's transpose the product to get a column x column product
+ typename remove_all<ResultType>::type _res(res.rows(), res.cols());
+ internal::sparse_sparse_product_with_pruning_impl<Rhs,Lhs,ResultType>(rhs, lhs, _res, tolerance);
+ res.swap(_res);
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_sparse_product_with_pruning_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,ColMajor>
+{
+ typedef typename ResultType::RealScalar RealScalar;
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res, RealScalar tolerance)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+ ColMajorMatrix colLhs(lhs);
+ ColMajorMatrix colRhs(rhs);
+ internal::sparse_sparse_product_with_pruning_impl<ColMajorMatrix,ColMajorMatrix,ResultType>(colLhs, colRhs, res, tolerance);
+
+ // let's transpose the product to get a column x column product
+// typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType;
+// SparseTemporaryType _res(res.cols(), res.rows());
+// sparse_sparse_product_with_pruning_impl<Rhs,Lhs,SparseTemporaryType>(rhs, lhs, _res);
+// res = _res.transpose();
+ }
+};
+
+// NOTE the 2 others cases (col row *) must never occur since they are caught
+// by ProductReturnType which transforms it to (col col *) by evaluating rhs.
+
+} // end namespace internal
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSESPARSEPRODUCTWITHPRUNING_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseTranspose.h b/extern/Eigen3/Eigen/src/SparseCore/SparseTranspose.h
new file mode 100644
index 00000000000..273f9de688f
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseTranspose.h
@@ -0,0 +1,61 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSETRANSPOSE_H
+#define EIGEN_SPARSETRANSPOSE_H
+
+namespace Eigen {
+
+template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>
+ : public SparseMatrixBase<Transpose<MatrixType> >
+{
+ typedef typename internal::remove_all<typename MatrixType::Nested>::type _MatrixTypeNested;
+ public:
+
+ EIGEN_SPARSE_PUBLIC_INTERFACE(Transpose<MatrixType>)
+
+ class InnerIterator;
+ class ReverseInnerIterator;
+
+ inline Index nonZeros() const { return derived().nestedExpression().nonZeros(); }
+};
+
+// NOTE: VC10 trigger an ICE if don't put typename TransposeImpl<MatrixType,Sparse>:: in front of Index,
+// a typedef typename TransposeImpl<MatrixType,Sparse>::Index Index;
+// does not fix the issue.
+// An alternative is to define the nested class in the parent class itself.
+template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>::InnerIterator
+ : public _MatrixTypeNested::InnerIterator
+{
+ typedef typename _MatrixTypeNested::InnerIterator Base;
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const TransposeImpl& trans, typename TransposeImpl<MatrixType,Sparse>::Index outer)
+ : Base(trans.derived().nestedExpression(), outer)
+ {}
+ inline typename TransposeImpl<MatrixType,Sparse>::Index row() const { return Base::col(); }
+ inline typename TransposeImpl<MatrixType,Sparse>::Index col() const { return Base::row(); }
+};
+
+template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>::ReverseInnerIterator
+ : public _MatrixTypeNested::ReverseInnerIterator
+{
+ typedef typename _MatrixTypeNested::ReverseInnerIterator Base;
+ public:
+
+ EIGEN_STRONG_INLINE ReverseInnerIterator(const TransposeImpl& xpr, typename TransposeImpl<MatrixType,Sparse>::Index outer)
+ : Base(xpr.derived().nestedExpression(), outer)
+ {}
+ inline typename TransposeImpl<MatrixType,Sparse>::Index row() const { return Base::col(); }
+ inline typename TransposeImpl<MatrixType,Sparse>::Index col() const { return Base::row(); }
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSETRANSPOSE_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseTriangularView.h b/extern/Eigen3/Eigen/src/SparseCore/SparseTriangularView.h
new file mode 100644
index 00000000000..477e4bd94b0
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseTriangularView.h
@@ -0,0 +1,164 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSE_TRIANGULARVIEW_H
+#define EIGEN_SPARSE_TRIANGULARVIEW_H
+
+namespace Eigen {
+
+namespace internal {
+
+template<typename MatrixType, int Mode>
+struct traits<SparseTriangularView<MatrixType,Mode> >
+: public traits<MatrixType>
+{};
+
+} // namespace internal
+
+template<typename MatrixType, int Mode> class SparseTriangularView
+ : public SparseMatrixBase<SparseTriangularView<MatrixType,Mode> >
+{
+ enum { SkipFirst = ((Mode&Lower) && !(MatrixType::Flags&RowMajorBit))
+ || ((Mode&Upper) && (MatrixType::Flags&RowMajorBit)),
+ SkipLast = !SkipFirst,
+ HasUnitDiag = (Mode&UnitDiag) ? 1 : 0
+ };
+
+ public:
+
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseTriangularView)
+
+ class InnerIterator;
+ class ReverseInnerIterator;
+
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
+
+ typedef typename MatrixType::Nested MatrixTypeNested;
+ typedef typename internal::remove_reference<MatrixTypeNested>::type MatrixTypeNestedNonRef;
+ typedef typename internal::remove_all<MatrixTypeNested>::type MatrixTypeNestedCleaned;
+
+ inline SparseTriangularView(const MatrixType& matrix) : m_matrix(matrix) {}
+
+ /** \internal */
+ inline const MatrixTypeNestedCleaned& nestedExpression() const { return m_matrix; }
+
+ template<typename OtherDerived>
+ typename internal::plain_matrix_type_column_major<OtherDerived>::type
+ solve(const MatrixBase<OtherDerived>& other) const;
+
+ template<typename OtherDerived> void solveInPlace(MatrixBase<OtherDerived>& other) const;
+ template<typename OtherDerived> void solveInPlace(SparseMatrixBase<OtherDerived>& other) const;
+
+ protected:
+ MatrixTypeNested m_matrix;
+};
+
+template<typename MatrixType, int Mode>
+class SparseTriangularView<MatrixType,Mode>::InnerIterator : public MatrixTypeNestedCleaned::InnerIterator
+{
+ typedef typename MatrixTypeNestedCleaned::InnerIterator Base;
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const SparseTriangularView& view, Index outer)
+ : Base(view.nestedExpression(), outer), m_returnOne(false)
+ {
+ if(SkipFirst)
+ {
+ while((*this) && (HasUnitDiag ? this->index()<=outer : this->index()<outer))
+ Base::operator++();
+ if(HasUnitDiag)
+ m_returnOne = true;
+ }
+ else if(HasUnitDiag && ((!Base::operator bool()) || Base::index()>=Base::outer()))
+ {
+ if((!SkipFirst) && Base::operator bool())
+ Base::operator++();
+ m_returnOne = true;
+ }
+ }
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
+ {
+ if(HasUnitDiag && m_returnOne)
+ m_returnOne = false;
+ else
+ {
+ Base::operator++();
+ if(HasUnitDiag && (!SkipFirst) && ((!Base::operator bool()) || Base::index()>=Base::outer()))
+ {
+ if((!SkipFirst) && Base::operator bool())
+ Base::operator++();
+ m_returnOne = true;
+ }
+ }
+ return *this;
+ }
+
+ inline Index row() const { return Base::row(); }
+ inline Index col() const { return Base::col(); }
+ inline Index index() const
+ {
+ if(HasUnitDiag && m_returnOne) return Base::outer();
+ else return Base::index();
+ }
+ inline Scalar value() const
+ {
+ if(HasUnitDiag && m_returnOne) return Scalar(1);
+ else return Base::value();
+ }
+
+ EIGEN_STRONG_INLINE operator bool() const
+ {
+ if(HasUnitDiag && m_returnOne)
+ return true;
+ return (SkipFirst ? Base::operator bool() : (Base::operator bool() && this->index() <= this->outer()));
+ }
+ protected:
+ bool m_returnOne;
+};
+
+template<typename MatrixType, int Mode>
+class SparseTriangularView<MatrixType,Mode>::ReverseInnerIterator : public MatrixTypeNestedCleaned::ReverseInnerIterator
+{
+ typedef typename MatrixTypeNestedCleaned::ReverseInnerIterator Base;
+ public:
+
+ EIGEN_STRONG_INLINE ReverseInnerIterator(const SparseTriangularView& view, Index outer)
+ : Base(view.nestedExpression(), outer)
+ {
+ eigen_assert((!HasUnitDiag) && "ReverseInnerIterator does not support yet triangular views with a unit diagonal");
+ if(SkipLast)
+ while((*this) && this->index()>outer)
+ --(*this);
+ }
+
+ EIGEN_STRONG_INLINE InnerIterator& operator--()
+ { Base::operator--(); return *this; }
+
+ inline Index row() const { return Base::row(); }
+ inline Index col() const { return Base::col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const
+ {
+ return SkipLast ? Base::operator bool() : (Base::operator bool() && this->index() >= this->outer());
+ }
+};
+
+template<typename Derived>
+template<int Mode>
+inline const SparseTriangularView<Derived, Mode>
+SparseMatrixBase<Derived>::triangularView() const
+{
+ return derived();
+}
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSE_TRIANGULARVIEW_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseUtil.h b/extern/Eigen3/Eigen/src/SparseCore/SparseUtil.h
new file mode 100644
index 00000000000..6062a086ff7
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseUtil.h
@@ -0,0 +1,173 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSEUTIL_H
+#define EIGEN_SPARSEUTIL_H
+
+namespace Eigen {
+
+#ifdef NDEBUG
+#define EIGEN_DBG_SPARSE(X)
+#else
+#define EIGEN_DBG_SPARSE(X) X
+#endif
+
+#define EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, Op) \
+template<typename OtherDerived> \
+EIGEN_STRONG_INLINE Derived& operator Op(const Eigen::SparseMatrixBase<OtherDerived>& other) \
+{ \
+ return Base::operator Op(other.derived()); \
+} \
+EIGEN_STRONG_INLINE Derived& operator Op(const Derived& other) \
+{ \
+ return Base::operator Op(other); \
+}
+
+#define EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, Op) \
+template<typename Other> \
+EIGEN_STRONG_INLINE Derived& operator Op(const Other& scalar) \
+{ \
+ return Base::operator Op(scalar); \
+}
+
+#define EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATORS(Derived) \
+EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, =) \
+EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, +=) \
+EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, -=) \
+EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, *=) \
+EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, /=)
+
+#define _EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, BaseClass) \
+ typedef BaseClass Base; \
+ typedef typename Eigen::internal::traits<Derived >::Scalar Scalar; \
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; \
+ typedef typename Eigen::internal::nested<Derived >::type Nested; \
+ typedef typename Eigen::internal::traits<Derived >::StorageKind StorageKind; \
+ typedef typename Eigen::internal::traits<Derived >::Index Index; \
+ enum { RowsAtCompileTime = Eigen::internal::traits<Derived >::RowsAtCompileTime, \
+ ColsAtCompileTime = Eigen::internal::traits<Derived >::ColsAtCompileTime, \
+ Flags = Eigen::internal::traits<Derived >::Flags, \
+ CoeffReadCost = Eigen::internal::traits<Derived >::CoeffReadCost, \
+ SizeAtCompileTime = Base::SizeAtCompileTime, \
+ IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; \
+ using Base::derived; \
+ using Base::const_cast_derived;
+
+#define EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) \
+ _EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, Eigen::SparseMatrixBase<Derived >)
+
+const int CoherentAccessPattern = 0x1;
+const int InnerRandomAccessPattern = 0x2 | CoherentAccessPattern;
+const int OuterRandomAccessPattern = 0x4 | CoherentAccessPattern;
+const int RandomAccessPattern = 0x8 | OuterRandomAccessPattern | InnerRandomAccessPattern;
+
+template<typename Derived> class SparseMatrixBase;
+template<typename _Scalar, int _Flags = 0, typename _Index = int> class SparseMatrix;
+template<typename _Scalar, int _Flags = 0, typename _Index = int> class DynamicSparseMatrix;
+template<typename _Scalar, int _Flags = 0, typename _Index = int> class SparseVector;
+template<typename _Scalar, int _Flags = 0, typename _Index = int> class MappedSparseMatrix;
+
+template<typename MatrixType, int Size> class SparseInnerVectorSet;
+template<typename MatrixType, int Mode> class SparseTriangularView;
+template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView;
+template<typename Lhs, typename Rhs> class SparseDiagonalProduct;
+template<typename MatrixType> class SparseView;
+
+template<typename Lhs, typename Rhs> class SparseSparseProduct;
+template<typename Lhs, typename Rhs> class SparseTimeDenseProduct;
+template<typename Lhs, typename Rhs> class DenseTimeSparseProduct;
+template<typename Lhs, typename Rhs, bool Transpose> class SparseDenseOuterProduct;
+
+template<typename Lhs, typename Rhs> struct SparseSparseProductReturnType;
+template<typename Lhs, typename Rhs, int InnerSize = internal::traits<Lhs>::ColsAtCompileTime> struct DenseSparseProductReturnType;
+template<typename Lhs, typename Rhs, int InnerSize = internal::traits<Lhs>::ColsAtCompileTime> struct SparseDenseProductReturnType;
+template<typename MatrixType,int UpLo> class SparseSymmetricPermutationProduct;
+
+namespace internal {
+
+template<typename T,int Rows,int Cols> struct sparse_eval;
+
+template<typename T> struct eval<T,Sparse>
+ : public sparse_eval<T, traits<T>::RowsAtCompileTime,traits<T>::ColsAtCompileTime>
+{};
+
+template<typename T,int Cols> struct sparse_eval<T,1,Cols> {
+ typedef typename traits<T>::Scalar _Scalar;
+ enum { _Flags = traits<T>::Flags| RowMajorBit };
+ public:
+ typedef SparseVector<_Scalar, _Flags> type;
+};
+
+template<typename T,int Rows> struct sparse_eval<T,Rows,1> {
+ typedef typename traits<T>::Scalar _Scalar;
+ enum { _Flags = traits<T>::Flags & (~RowMajorBit) };
+ public:
+ typedef SparseVector<_Scalar, _Flags> type;
+};
+
+template<typename T,int Rows,int Cols> struct sparse_eval {
+ typedef typename traits<T>::Scalar _Scalar;
+ enum { _Flags = traits<T>::Flags };
+ public:
+ typedef SparseMatrix<_Scalar, _Flags> type;
+};
+
+template<typename T> struct sparse_eval<T,1,1> {
+ typedef typename traits<T>::Scalar _Scalar;
+ public:
+ typedef Matrix<_Scalar, 1, 1> type;
+};
+
+template<typename T> struct plain_matrix_type<T,Sparse>
+{
+ typedef typename traits<T>::Scalar _Scalar;
+ enum {
+ _Flags = traits<T>::Flags
+ };
+
+ public:
+ typedef SparseMatrix<_Scalar, _Flags> type;
+};
+
+} // end namespace internal
+
+/** \ingroup SparseCore_Module
+ *
+ * \class Triplet
+ *
+ * \brief A small structure to hold a non zero as a triplet (i,j,value).
+ *
+ * \sa SparseMatrix::setFromTriplets()
+ */
+template<typename Scalar, typename Index=unsigned int>
+class Triplet
+{
+public:
+ Triplet() : m_row(0), m_col(0), m_value(0) {}
+
+ Triplet(const Index& i, const Index& j, const Scalar& v = Scalar(0))
+ : m_row(i), m_col(j), m_value(v)
+ {}
+
+ /** \returns the row index of the element */
+ const Index& row() const { return m_row; }
+
+ /** \returns the column index of the element */
+ const Index& col() const { return m_col; }
+
+ /** \returns the value of the element */
+ const Scalar& value() const { return m_value; }
+protected:
+ Index m_row, m_col;
+ Scalar m_value;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSEUTIL_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseVector.h b/extern/Eigen3/Eigen/src/SparseCore/SparseVector.h
new file mode 100644
index 00000000000..c952f654038
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseVector.h
@@ -0,0 +1,398 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSEVECTOR_H
+#define EIGEN_SPARSEVECTOR_H
+
+namespace Eigen {
+
+/** \ingroup SparseCore_Module
+ * \class SparseVector
+ *
+ * \brief a sparse vector class
+ *
+ * \tparam _Scalar the scalar type, i.e. the type of the coefficients
+ *
+ * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
+ *
+ * This class can be extended with the help of the plugin mechanism described on the page
+ * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEVECTOR_PLUGIN.
+ */
+
+namespace internal {
+template<typename _Scalar, int _Options, typename _Index>
+struct traits<SparseVector<_Scalar, _Options, _Index> >
+{
+ typedef _Scalar Scalar;
+ typedef _Index Index;
+ typedef Sparse StorageKind;
+ typedef MatrixXpr XprKind;
+ enum {
+ IsColVector = (_Options & RowMajorBit) ? 0 : 1,
+
+ RowsAtCompileTime = IsColVector ? Dynamic : 1,
+ ColsAtCompileTime = IsColVector ? 1 : Dynamic,
+ MaxRowsAtCompileTime = RowsAtCompileTime,
+ MaxColsAtCompileTime = ColsAtCompileTime,
+ Flags = _Options | NestByRefBit | LvalueBit | (IsColVector ? 0 : RowMajorBit),
+ CoeffReadCost = NumTraits<Scalar>::ReadCost,
+ SupportedAccessPatterns = InnerRandomAccessPattern
+ };
+};
+}
+
+template<typename _Scalar, int _Options, typename _Index>
+class SparseVector
+ : public SparseMatrixBase<SparseVector<_Scalar, _Options, _Index> >
+{
+ public:
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseVector)
+ EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, +=)
+ EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, -=)
+
+ protected:
+ public:
+
+ typedef SparseMatrixBase<SparseVector> SparseBase;
+ enum { IsColVector = internal::traits<SparseVector>::IsColVector };
+
+ enum {
+ Options = _Options
+ };
+
+ internal::CompressedStorage<Scalar,Index> m_data;
+ Index m_size;
+
+ internal::CompressedStorage<Scalar,Index>& _data() { return m_data; }
+ internal::CompressedStorage<Scalar,Index>& _data() const { return m_data; }
+
+ public:
+
+ EIGEN_STRONG_INLINE Index rows() const { return IsColVector ? m_size : 1; }
+ EIGEN_STRONG_INLINE Index cols() const { return IsColVector ? 1 : m_size; }
+ EIGEN_STRONG_INLINE Index innerSize() const { return m_size; }
+ EIGEN_STRONG_INLINE Index outerSize() const { return 1; }
+
+ EIGEN_STRONG_INLINE const Scalar* valuePtr() const { return &m_data.value(0); }
+ EIGEN_STRONG_INLINE Scalar* valuePtr() { return &m_data.value(0); }
+
+ EIGEN_STRONG_INLINE const Index* innerIndexPtr() const { return &m_data.index(0); }
+ EIGEN_STRONG_INLINE Index* innerIndexPtr() { return &m_data.index(0); }
+
+ inline Scalar coeff(Index row, Index col) const
+ {
+ eigen_assert((IsColVector ? col : row)==0);
+ return coeff(IsColVector ? row : col);
+ }
+ inline Scalar coeff(Index i) const { return m_data.at(i); }
+
+ inline Scalar& coeffRef(Index row, Index col)
+ {
+ eigen_assert((IsColVector ? col : row)==0);
+ return coeff(IsColVector ? row : col);
+ }
+
+ /** \returns a reference to the coefficient value at given index \a i
+ * This operation involes a log(rho*size) binary search. If the coefficient does not
+ * exist yet, then a sorted insertion into a sequential buffer is performed.
+ *
+ * This insertion might be very costly if the number of nonzeros above \a i is large.
+ */
+ inline Scalar& coeffRef(Index i)
+ {
+ return m_data.atWithInsertion(i);
+ }
+
+ public:
+
+ class InnerIterator;
+ class ReverseInnerIterator;
+
+ inline void setZero() { m_data.clear(); }
+
+ /** \returns the number of non zero coefficients */
+ inline Index nonZeros() const { return static_cast<Index>(m_data.size()); }
+
+ inline void startVec(Index outer)
+ {
+ EIGEN_UNUSED_VARIABLE(outer);
+ eigen_assert(outer==0);
+ }
+
+ inline Scalar& insertBackByOuterInner(Index outer, Index inner)
+ {
+ EIGEN_UNUSED_VARIABLE(outer);
+ eigen_assert(outer==0);
+ return insertBack(inner);
+ }
+ inline Scalar& insertBack(Index i)
+ {
+ m_data.append(0, i);
+ return m_data.value(m_data.size()-1);
+ }
+
+ inline Scalar& insert(Index row, Index col)
+ {
+ Index inner = IsColVector ? row : col;
+ Index outer = IsColVector ? col : row;
+ eigen_assert(outer==0);
+ return insert(inner);
+ }
+ Scalar& insert(Index i)
+ {
+ Index startId = 0;
+ Index p = Index(m_data.size()) - 1;
+ // TODO smart realloc
+ m_data.resize(p+2,1);
+
+ while ( (p >= startId) && (m_data.index(p) > i) )
+ {
+ m_data.index(p+1) = m_data.index(p);
+ m_data.value(p+1) = m_data.value(p);
+ --p;
+ }
+ m_data.index(p+1) = i;
+ m_data.value(p+1) = 0;
+ return m_data.value(p+1);
+ }
+
+ /**
+ */
+ inline void reserve(Index reserveSize) { m_data.reserve(reserveSize); }
+
+
+ inline void finalize() {}
+
+ void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
+ {
+ m_data.prune(reference,epsilon);
+ }
+
+ void resize(Index rows, Index cols)
+ {
+ eigen_assert(rows==1 || cols==1);
+ resize(IsColVector ? rows : cols);
+ }
+
+ void resize(Index newSize)
+ {
+ m_size = newSize;
+ m_data.clear();
+ }
+
+ void resizeNonZeros(Index size) { m_data.resize(size); }
+
+ inline SparseVector() : m_size(0) { resize(0); }
+
+ inline SparseVector(Index size) : m_size(0) { resize(size); }
+
+ inline SparseVector(Index rows, Index cols) : m_size(0) { resize(rows,cols); }
+
+ template<typename OtherDerived>
+ inline SparseVector(const SparseMatrixBase<OtherDerived>& other)
+ : m_size(0)
+ {
+ *this = other.derived();
+ }
+
+ inline SparseVector(const SparseVector& other)
+ : m_size(0)
+ {
+ *this = other.derived();
+ }
+
+ inline void swap(SparseVector& other)
+ {
+ std::swap(m_size, other.m_size);
+ m_data.swap(other.m_data);
+ }
+
+ inline SparseVector& operator=(const SparseVector& other)
+ {
+ if (other.isRValue())
+ {
+ swap(other.const_cast_derived());
+ }
+ else
+ {
+ resize(other.size());
+ m_data = other.m_data;
+ }
+ return *this;
+ }
+
+ template<typename OtherDerived>
+ inline SparseVector& operator=(const SparseMatrixBase<OtherDerived>& other)
+ {
+ if (int(RowsAtCompileTime)!=int(OtherDerived::RowsAtCompileTime))
+ return assign(other.transpose());
+ else
+ return assign(other);
+ }
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ template<typename Lhs, typename Rhs>
+ inline SparseVector& operator=(const SparseSparseProduct<Lhs,Rhs>& product)
+ {
+ return Base::operator=(product);
+ }
+ #endif
+
+ friend std::ostream & operator << (std::ostream & s, const SparseVector& m)
+ {
+ for (Index i=0; i<m.nonZeros(); ++i)
+ s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
+ s << std::endl;
+ return s;
+ }
+
+ /** Destructor */
+ inline ~SparseVector() {}
+
+ /** Overloaded for performance */
+ Scalar sum() const;
+
+ public:
+
+ /** \deprecated use setZero() and reserve() */
+ EIGEN_DEPRECATED void startFill(Index reserve)
+ {
+ setZero();
+ m_data.reserve(reserve);
+ }
+
+ /** \deprecated use insertBack(Index,Index) */
+ EIGEN_DEPRECATED Scalar& fill(Index r, Index c)
+ {
+ eigen_assert(r==0 || c==0);
+ return fill(IsColVector ? r : c);
+ }
+
+ /** \deprecated use insertBack(Index) */
+ EIGEN_DEPRECATED Scalar& fill(Index i)
+ {
+ m_data.append(0, i);
+ return m_data.value(m_data.size()-1);
+ }
+
+ /** \deprecated use insert(Index,Index) */
+ EIGEN_DEPRECATED Scalar& fillrand(Index r, Index c)
+ {
+ eigen_assert(r==0 || c==0);
+ return fillrand(IsColVector ? r : c);
+ }
+
+ /** \deprecated use insert(Index) */
+ EIGEN_DEPRECATED Scalar& fillrand(Index i)
+ {
+ return insert(i);
+ }
+
+ /** \deprecated use finalize() */
+ EIGEN_DEPRECATED void endFill() {}
+
+# ifdef EIGEN_SPARSEVECTOR_PLUGIN
+# include EIGEN_SPARSEVECTOR_PLUGIN
+# endif
+
+protected:
+ template<typename OtherDerived>
+ EIGEN_DONT_INLINE SparseVector& assign(const SparseMatrixBase<OtherDerived>& _other)
+ {
+ const OtherDerived& other(_other.derived());
+ const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
+ if(needToTranspose)
+ {
+ Index size = other.size();
+ Index nnz = other.nonZeros();
+ resize(size);
+ reserve(nnz);
+ for(Index i=0; i<size; ++i)
+ {
+ typename OtherDerived::InnerIterator it(other, i);
+ if(it)
+ insert(i) = it.value();
+ }
+ return *this;
+ }
+ else
+ {
+ // there is no special optimization
+ return Base::operator=(other);
+ }
+ }
+};
+
+template<typename Scalar, int _Options, typename _Index>
+class SparseVector<Scalar,_Options,_Index>::InnerIterator
+{
+ public:
+ InnerIterator(const SparseVector& vec, Index outer=0)
+ : m_data(vec.m_data), m_id(0), m_end(static_cast<Index>(m_data.size()))
+ {
+ EIGEN_UNUSED_VARIABLE(outer);
+ eigen_assert(outer==0);
+ }
+
+ InnerIterator(const internal::CompressedStorage<Scalar,Index>& data)
+ : m_data(data), m_id(0), m_end(static_cast<Index>(m_data.size()))
+ {}
+
+ inline InnerIterator& operator++() { m_id++; return *this; }
+
+ inline Scalar value() const { return m_data.value(m_id); }
+ inline Scalar& valueRef() { return const_cast<Scalar&>(m_data.value(m_id)); }
+
+ inline Index index() const { return m_data.index(m_id); }
+ inline Index row() const { return IsColVector ? index() : 0; }
+ inline Index col() const { return IsColVector ? 0 : index(); }
+
+ inline operator bool() const { return (m_id < m_end); }
+
+ protected:
+ const internal::CompressedStorage<Scalar,Index>& m_data;
+ Index m_id;
+ const Index m_end;
+};
+
+template<typename Scalar, int _Options, typename _Index>
+class SparseVector<Scalar,_Options,_Index>::ReverseInnerIterator
+{
+ public:
+ ReverseInnerIterator(const SparseVector& vec, Index outer=0)
+ : m_data(vec.m_data), m_id(static_cast<Index>(m_data.size())), m_start(0)
+ {
+ EIGEN_UNUSED_VARIABLE(outer);
+ eigen_assert(outer==0);
+ }
+
+ ReverseInnerIterator(const internal::CompressedStorage<Scalar,Index>& data)
+ : m_data(data), m_id(static_cast<Index>(m_data.size())), m_start(0)
+ {}
+
+ inline ReverseInnerIterator& operator--() { m_id--; return *this; }
+
+ inline Scalar value() const { return m_data.value(m_id-1); }
+ inline Scalar& valueRef() { return const_cast<Scalar&>(m_data.value(m_id-1)); }
+
+ inline Index index() const { return m_data.index(m_id-1); }
+ inline Index row() const { return IsColVector ? index() : 0; }
+ inline Index col() const { return IsColVector ? 0 : index(); }
+
+ inline operator bool() const { return (m_id > m_start); }
+
+ protected:
+ const internal::CompressedStorage<Scalar,Index>& m_data;
+ Index m_id;
+ const Index m_start;
+};
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSEVECTOR_H
diff --git a/extern/Eigen3/Eigen/src/SparseCore/SparseView.h b/extern/Eigen3/Eigen/src/SparseCore/SparseView.h
new file mode 100644
index 00000000000..8b0b9ea0304
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/SparseView.h
@@ -0,0 +1,98 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2011 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010 Daniel Lowengrub <lowdanie@gmail.com>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSEVIEW_H
+#define EIGEN_SPARSEVIEW_H
+
+namespace Eigen {
+
+namespace internal {
+
+template<typename MatrixType>
+struct traits<SparseView<MatrixType> > : traits<MatrixType>
+{
+ typedef int Index;
+ typedef Sparse StorageKind;
+ enum {
+ Flags = int(traits<MatrixType>::Flags) & (RowMajorBit)
+ };
+};
+
+} // end namespace internal
+
+template<typename MatrixType>
+class SparseView : public SparseMatrixBase<SparseView<MatrixType> >
+{
+ typedef typename MatrixType::Nested MatrixTypeNested;
+ typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
+public:
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseView)
+
+ SparseView(const MatrixType& mat, const Scalar& m_reference = Scalar(0),
+ typename NumTraits<Scalar>::Real m_epsilon = NumTraits<Scalar>::dummy_precision()) :
+ m_matrix(mat), m_reference(m_reference), m_epsilon(m_epsilon) {}
+
+ class InnerIterator;
+
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
+
+ inline Index innerSize() const { return m_matrix.innerSize(); }
+ inline Index outerSize() const { return m_matrix.outerSize(); }
+
+protected:
+ MatrixTypeNested m_matrix;
+ Scalar m_reference;
+ typename NumTraits<Scalar>::Real m_epsilon;
+};
+
+template<typename MatrixType>
+class SparseView<MatrixType>::InnerIterator : public _MatrixTypeNested::InnerIterator
+{
+public:
+ typedef typename _MatrixTypeNested::InnerIterator IterBase;
+ InnerIterator(const SparseView& view, Index outer) :
+ IterBase(view.m_matrix, outer), m_view(view)
+ {
+ incrementToNonZero();
+ }
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
+ {
+ IterBase::operator++();
+ incrementToNonZero();
+ return *this;
+ }
+
+ using IterBase::value;
+
+protected:
+ const SparseView& m_view;
+
+private:
+ void incrementToNonZero()
+ {
+ while((bool(*this)) && internal::isMuchSmallerThan(value(), m_view.m_reference, m_view.m_epsilon))
+ {
+ IterBase::operator++();
+ }
+ }
+};
+
+template<typename Derived>
+const SparseView<Derived> MatrixBase<Derived>::sparseView(const Scalar& m_reference,
+ typename NumTraits<Scalar>::Real m_epsilon) const
+{
+ return SparseView<Derived>(derived(), m_reference, m_epsilon);
+}
+
+} // end namespace Eigen
+
+#endif
diff --git a/extern/Eigen3/Eigen/src/SparseCore/TriangularSolver.h b/extern/Eigen3/Eigen/src/SparseCore/TriangularSolver.h
new file mode 100644
index 00000000000..cb8ad82b4f6
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/SparseCore/TriangularSolver.h
@@ -0,0 +1,334 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// This Source Code Form is subject to the terms of the Mozilla
+// Public License v. 2.0. If a copy of the MPL was not distributed
+// with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+#ifndef EIGEN_SPARSETRIANGULARSOLVER_H
+#define EIGEN_SPARSETRIANGULARSOLVER_H
+
+namespace Eigen {
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, int Mode,
+ int UpLo = (Mode & Lower)
+ ? Lower
+ : (Mode & Upper)
+ ? Upper
+ : -1,
+ int StorageOrder = int(traits<Lhs>::Flags) & RowMajorBit>
+struct sparse_solve_triangular_selector;
+
+// forward substitution, row-major
+template<typename Lhs, typename Rhs, int Mode>
+struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Lower,RowMajor>
+{
+ typedef typename Rhs::Scalar Scalar;
+ static void run(const Lhs& lhs, Rhs& other)
+ {
+ for(int col=0 ; col<other.cols() ; ++col)
+ {
+ for(int i=0; i<lhs.rows(); ++i)
+ {
+ Scalar tmp = other.coeff(i,col);
+ Scalar lastVal(0);
+ int lastIndex = 0;
+ for(typename Lhs::InnerIterator it(lhs, i); it; ++it)
+ {
+ lastVal = it.value();
+ lastIndex = it.index();
+ if(lastIndex==i)
+ break;
+ tmp -= lastVal * other.coeff(lastIndex,col);
+ }
+ if (Mode & UnitDiag)
+ other.coeffRef(i,col) = tmp;
+ else
+ {
+ eigen_assert(lastIndex==i);
+ other.coeffRef(i,col) = tmp/lastVal;
+ }
+ }
+ }
+ }
+};
+
+// backward substitution, row-major
+template<typename Lhs, typename Rhs, int Mode>
+struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Upper,RowMajor>
+{
+ typedef typename Rhs::Scalar Scalar;
+ static void run(const Lhs& lhs, Rhs& other)
+ {
+ for(int col=0 ; col<other.cols() ; ++col)
+ {
+ for(int i=lhs.rows()-1 ; i>=0 ; --i)
+ {
+ Scalar tmp = other.coeff(i,col);
+ Scalar l_ii = 0;
+ typename Lhs::InnerIterator it(lhs, i);
+ while(it && it.index()<i)
+ ++it;
+ if(!(Mode & UnitDiag))
+ {
+ eigen_assert(it && it.index()==i);
+ l_ii = it.value();
+ ++it;
+ }
+ else if (it && it.index() == i)
+ ++it;
+ for(; it; ++it)
+ {
+ tmp -= it.value() * other.coeff(it.index(),col);
+ }
+
+ if (Mode & UnitDiag)
+ other.coeffRef(i,col) = tmp;
+ else
+ other.coeffRef(i,col) = tmp/l_ii;
+ }
+ }
+ }
+};
+
+// forward substitution, col-major
+template<typename Lhs, typename Rhs, int Mode>
+struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Lower,ColMajor>
+{
+ typedef typename Rhs::Scalar Scalar;
+ static void run(const Lhs& lhs, Rhs& other)
+ {
+ for(int col=0 ; col<other.cols() ; ++col)
+ {
+ for(int i=0; i<lhs.cols(); ++i)
+ {
+ Scalar& tmp = other.coeffRef(i,col);
+ if (tmp!=Scalar(0)) // optimization when other is actually sparse
+ {
+ typename Lhs::InnerIterator it(lhs, i);
+ while(it && it.index()<i)
+ ++it;
+ if(!(Mode & UnitDiag))
+ {
+ eigen_assert(it && it.index()==i);
+ tmp /= it.value();
+ }
+ if (it && it.index()==i)
+ ++it;
+ for(; it; ++it)
+ other.coeffRef(it.index(), col) -= tmp * it.value();
+ }
+ }
+ }
+ }
+};
+
+// backward substitution, col-major
+template<typename Lhs, typename Rhs, int Mode>
+struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Upper,ColMajor>
+{
+ typedef typename Rhs::Scalar Scalar;
+ static void run(const Lhs& lhs, Rhs& other)
+ {
+ for(int col=0 ; col<other.cols() ; ++col)
+ {
+ for(int i=lhs.cols()-1; i>=0; --i)
+ {
+ Scalar& tmp = other.coeffRef(i,col);
+ if (tmp!=Scalar(0)) // optimization when other is actually sparse
+ {
+ if(!(Mode & UnitDiag))
+ {
+ // TODO replace this by a binary search. make sure the binary search is safe for partially sorted elements
+ typename Lhs::ReverseInnerIterator it(lhs, i);
+ while(it && it.index()!=i)
+ --it;
+ eigen_assert(it && it.index()==i);
+ other.coeffRef(i,col) /= it.value();
+ }
+ typename Lhs::InnerIterator it(lhs, i);
+ for(; it && it.index()<i; ++it)
+ other.coeffRef(it.index(), col) -= tmp * it.value();
+ }
+ }
+ }
+ }
+};
+
+} // end namespace internal
+
+template<typename ExpressionType,int Mode>
+template<typename OtherDerived>
+void SparseTriangularView<ExpressionType,Mode>::solveInPlace(MatrixBase<OtherDerived>& other) const
+{
+ eigen_assert(m_matrix.cols() == m_matrix.rows() && m_matrix.cols() == other.rows());
+ eigen_assert((!(Mode & ZeroDiag)) && bool(Mode & (Upper|Lower)));
+
+ enum { copy = internal::traits<OtherDerived>::Flags & RowMajorBit };
+
+ typedef typename internal::conditional<copy,
+ typename internal::plain_matrix_type_column_major<OtherDerived>::type, OtherDerived&>::type OtherCopy;
+ OtherCopy otherCopy(other.derived());
+
+ internal::sparse_solve_triangular_selector<ExpressionType, typename internal::remove_reference<OtherCopy>::type, Mode>::run(m_matrix, otherCopy);
+
+ if (copy)
+ other = otherCopy;
+}
+
+template<typename ExpressionType,int Mode>
+template<typename OtherDerived>
+typename internal::plain_matrix_type_column_major<OtherDerived>::type
+SparseTriangularView<ExpressionType,Mode>::solve(const MatrixBase<OtherDerived>& other) const
+{
+ typename internal::plain_matrix_type_column_major<OtherDerived>::type res(other);
+ solveInPlace(res);
+ return res;
+}
+
+// pure sparse path
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, int Mode,
+ int UpLo = (Mode & Lower)
+ ? Lower
+ : (Mode & Upper)
+ ? Upper
+ : -1,
+ int StorageOrder = int(Lhs::Flags) & (RowMajorBit)>
+struct sparse_solve_triangular_sparse_selector;
+
+// forward substitution, col-major
+template<typename Lhs, typename Rhs, int Mode, int UpLo>
+struct sparse_solve_triangular_sparse_selector<Lhs,Rhs,Mode,UpLo,ColMajor>
+{
+ typedef typename Rhs::Scalar Scalar;
+ typedef typename promote_index_type<typename traits<Lhs>::Index,
+ typename traits<Rhs>::Index>::type Index;
+ static void run(const Lhs& lhs, Rhs& other)
+ {
+ const bool IsLower = (UpLo==Lower);
+ AmbiVector<Scalar,Index> tempVector(other.rows()*2);
+ tempVector.setBounds(0,other.rows());
+
+ Rhs res(other.rows(), other.cols());
+ res.reserve(other.nonZeros());
+
+ for(int col=0 ; col<other.cols() ; ++col)
+ {
+ // FIXME estimate number of non zeros
+ tempVector.init(.99/*float(other.col(col).nonZeros())/float(other.rows())*/);
+ tempVector.setZero();
+ tempVector.restart();
+ for (typename Rhs::InnerIterator rhsIt(other, col); rhsIt; ++rhsIt)
+ {
+ tempVector.coeffRef(rhsIt.index()) = rhsIt.value();
+ }
+
+ for(int i=IsLower?0:lhs.cols()-1;
+ IsLower?i<lhs.cols():i>=0;
+ i+=IsLower?1:-1)
+ {
+ tempVector.restart();
+ Scalar& ci = tempVector.coeffRef(i);
+ if (ci!=Scalar(0))
+ {
+ // find
+ typename Lhs::InnerIterator it(lhs, i);
+ if(!(Mode & UnitDiag))
+ {
+ if (IsLower)
+ {
+ eigen_assert(it.index()==i);
+ ci /= it.value();
+ }
+ else
+ ci /= lhs.coeff(i,i);
+ }
+ tempVector.restart();
+ if (IsLower)
+ {
+ if (it.index()==i)
+ ++it;
+ for(; it; ++it)
+ tempVector.coeffRef(it.index()) -= ci * it.value();
+ }
+ else
+ {
+ for(; it && it.index()<i; ++it)
+ tempVector.coeffRef(it.index()) -= ci * it.value();
+ }
+ }
+ }
+
+
+ int count = 0;
+ // FIXME compute a reference value to filter zeros
+ for (typename AmbiVector<Scalar,Index>::Iterator it(tempVector/*,1e-12*/); it; ++it)
+ {
+ ++ count;
+// std::cerr << "fill " << it.index() << ", " << col << "\n";
+// std::cout << it.value() << " ";
+ // FIXME use insertBack
+ res.insert(it.index(), col) = it.value();
+ }
+// std::cout << "tempVector.nonZeros() == " << int(count) << " / " << (other.rows()) << "\n";
+ }
+ res.finalize();
+ other = res.markAsRValue();
+ }
+};
+
+} // end namespace internal
+
+template<typename ExpressionType,int Mode>
+template<typename OtherDerived>
+void SparseTriangularView<ExpressionType,Mode>::solveInPlace(SparseMatrixBase<OtherDerived>& other) const
+{
+ eigen_assert(m_matrix.cols() == m_matrix.rows() && m_matrix.cols() == other.rows());
+ eigen_assert( (!(Mode & ZeroDiag)) && bool(Mode & (Upper|Lower)));
+
+// enum { copy = internal::traits<OtherDerived>::Flags & RowMajorBit };
+
+// typedef typename internal::conditional<copy,
+// typename internal::plain_matrix_type_column_major<OtherDerived>::type, OtherDerived&>::type OtherCopy;
+// OtherCopy otherCopy(other.derived());
+
+ internal::sparse_solve_triangular_sparse_selector<ExpressionType, OtherDerived, Mode>::run(m_matrix, other.derived());
+
+// if (copy)
+// other = otherCopy;
+}
+
+#ifdef EIGEN2_SUPPORT
+
+// deprecated stuff:
+
+/** \deprecated */
+template<typename Derived>
+template<typename OtherDerived>
+void SparseMatrixBase<Derived>::solveTriangularInPlace(MatrixBase<OtherDerived>& other) const
+{
+ this->template triangular<Flags&(Upper|Lower)>().solveInPlace(other);
+}
+
+/** \deprecated */
+template<typename Derived>
+template<typename OtherDerived>
+typename internal::plain_matrix_type_column_major<OtherDerived>::type
+SparseMatrixBase<Derived>::solveTriangular(const MatrixBase<OtherDerived>& other) const
+{
+ typename internal::plain_matrix_type_column_major<OtherDerived>::type res(other);
+ derived().solveTriangularInPlace(res);
+ return res;
+}
+#endif // EIGEN2_SUPPORT
+
+} // end namespace Eigen
+
+#endif // EIGEN_SPARSETRIANGULARSOLVER_H