Welcome to mirror list, hosted at ThFree Co, Russian Federation.

git.blender.org/blender.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'extern/Eigen3/Eigen/src/Sparse')
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/AmbiVector.h379
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/CompressedStorage.h239
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/CoreIterators.h71
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/DynamicSparseMatrix.h346
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/MappedSparseMatrix.h165
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseAssign.h0
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseBlock.h465
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseCwiseBinaryOp.h375
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseCwiseUnaryOp.h146
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseDenseProduct.h231
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseDiagonalProduct.h195
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseDot.h97
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseFuzzy.h41
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseMatrix.h651
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseMatrixBase.h706
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseProduct.h141
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseRedux.h56
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseSelfAdjointView.h454
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseSparseProduct.h401
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseTranspose.h68
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseTriangularView.h100
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseUtil.h130
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseVector.h431
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/SparseView.h109
-rw-r--r--extern/Eigen3/Eigen/src/Sparse/TriangularSolver.h339
25 files changed, 6336 insertions, 0 deletions
diff --git a/extern/Eigen3/Eigen/src/Sparse/AmbiVector.h b/extern/Eigen3/Eigen/src/Sparse/AmbiVector.h
new file mode 100644
index 00000000000..2ea8ba3096b
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/AmbiVector.h
@@ -0,0 +1,379 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_AMBIVECTOR_H
+#define EIGEN_AMBIVECTOR_H
+
+/** \internal
+ * Hybrid sparse/dense vector class designed for intensive read-write operations.
+ *
+ * See BasicSparseLLT and SparseProduct for usage examples.
+ */
+template<typename _Scalar, typename _Index>
+class AmbiVector
+{
+ public:
+ typedef _Scalar Scalar;
+ typedef _Index Index;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
+ AmbiVector(Index size)
+ : m_buffer(0), m_zero(0), m_size(0), m_allocatedSize(0), m_allocatedElements(0), m_mode(-1)
+ {
+ resize(size);
+ }
+
+ void init(double estimatedDensity);
+ void init(int mode);
+
+ Index nonZeros() const;
+
+ /** Specifies a sub-vector to work on */
+ void setBounds(Index start, Index end) { m_start = start; m_end = end; }
+
+ void setZero();
+
+ void restart();
+ Scalar& coeffRef(Index i);
+ Scalar& coeff(Index i);
+
+ class Iterator;
+
+ ~AmbiVector() { delete[] m_buffer; }
+
+ void resize(Index size)
+ {
+ if (m_allocatedSize < size)
+ reallocate(size);
+ m_size = size;
+ }
+
+ Index size() const { return m_size; }
+
+ protected:
+
+ void reallocate(Index size)
+ {
+ // if the size of the matrix is not too large, let's allocate a bit more than needed such
+ // that we can handle dense vector even in sparse mode.
+ delete[] m_buffer;
+ if (size<1000)
+ {
+ Index allocSize = (size * sizeof(ListEl))/sizeof(Scalar);
+ m_allocatedElements = (allocSize*sizeof(Scalar))/sizeof(ListEl);
+ m_buffer = new Scalar[allocSize];
+ }
+ else
+ {
+ m_allocatedElements = (size*sizeof(Scalar))/sizeof(ListEl);
+ m_buffer = new Scalar[size];
+ }
+ m_size = size;
+ m_start = 0;
+ m_end = m_size;
+ }
+
+ void reallocateSparse()
+ {
+ Index copyElements = m_allocatedElements;
+ m_allocatedElements = (std::min)(Index(m_allocatedElements*1.5),m_size);
+ Index allocSize = m_allocatedElements * sizeof(ListEl);
+ allocSize = allocSize/sizeof(Scalar) + (allocSize%sizeof(Scalar)>0?1:0);
+ Scalar* newBuffer = new Scalar[allocSize];
+ memcpy(newBuffer, m_buffer, copyElements * sizeof(ListEl));
+ delete[] m_buffer;
+ m_buffer = newBuffer;
+ }
+
+ protected:
+ // element type of the linked list
+ struct ListEl
+ {
+ Index next;
+ Index index;
+ Scalar value;
+ };
+
+ // used to store data in both mode
+ Scalar* m_buffer;
+ Scalar m_zero;
+ Index m_size;
+ Index m_start;
+ Index m_end;
+ Index m_allocatedSize;
+ Index m_allocatedElements;
+ Index m_mode;
+
+ // linked list mode
+ Index m_llStart;
+ Index m_llCurrent;
+ Index m_llSize;
+};
+
+/** \returns the number of non zeros in the current sub vector */
+template<typename _Scalar,typename _Index>
+_Index AmbiVector<_Scalar,_Index>::nonZeros() const
+{
+ if (m_mode==IsSparse)
+ return m_llSize;
+ else
+ return m_end - m_start;
+}
+
+template<typename _Scalar,typename _Index>
+void AmbiVector<_Scalar,_Index>::init(double estimatedDensity)
+{
+ if (estimatedDensity>0.1)
+ init(IsDense);
+ else
+ init(IsSparse);
+}
+
+template<typename _Scalar,typename _Index>
+void AmbiVector<_Scalar,_Index>::init(int mode)
+{
+ m_mode = mode;
+ if (m_mode==IsSparse)
+ {
+ m_llSize = 0;
+ m_llStart = -1;
+ }
+}
+
+/** Must be called whenever we might perform a write access
+ * with an index smaller than the previous one.
+ *
+ * Don't worry, this function is extremely cheap.
+ */
+template<typename _Scalar,typename _Index>
+void AmbiVector<_Scalar,_Index>::restart()
+{
+ m_llCurrent = m_llStart;
+}
+
+/** Set all coefficients of current subvector to zero */
+template<typename _Scalar,typename _Index>
+void AmbiVector<_Scalar,_Index>::setZero()
+{
+ if (m_mode==IsDense)
+ {
+ for (Index i=m_start; i<m_end; ++i)
+ m_buffer[i] = Scalar(0);
+ }
+ else
+ {
+ eigen_assert(m_mode==IsSparse);
+ m_llSize = 0;
+ m_llStart = -1;
+ }
+}
+
+template<typename _Scalar,typename _Index>
+_Scalar& AmbiVector<_Scalar,_Index>::coeffRef(_Index i)
+{
+ if (m_mode==IsDense)
+ return m_buffer[i];
+ else
+ {
+ ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_buffer);
+ // TODO factorize the following code to reduce code generation
+ eigen_assert(m_mode==IsSparse);
+ if (m_llSize==0)
+ {
+ // this is the first element
+ m_llStart = 0;
+ m_llCurrent = 0;
+ ++m_llSize;
+ llElements[0].value = Scalar(0);
+ llElements[0].index = i;
+ llElements[0].next = -1;
+ return llElements[0].value;
+ }
+ else if (i<llElements[m_llStart].index)
+ {
+ // this is going to be the new first element of the list
+ ListEl& el = llElements[m_llSize];
+ el.value = Scalar(0);
+ el.index = i;
+ el.next = m_llStart;
+ m_llStart = m_llSize;
+ ++m_llSize;
+ m_llCurrent = m_llStart;
+ return el.value;
+ }
+ else
+ {
+ Index nextel = llElements[m_llCurrent].next;
+ eigen_assert(i>=llElements[m_llCurrent].index && "you must call restart() before inserting an element with lower or equal index");
+ while (nextel >= 0 && llElements[nextel].index<=i)
+ {
+ m_llCurrent = nextel;
+ nextel = llElements[nextel].next;
+ }
+
+ if (llElements[m_llCurrent].index==i)
+ {
+ // the coefficient already exists and we found it !
+ return llElements[m_llCurrent].value;
+ }
+ else
+ {
+ if (m_llSize>=m_allocatedElements)
+ {
+ reallocateSparse();
+ llElements = reinterpret_cast<ListEl*>(m_buffer);
+ }
+ eigen_internal_assert(m_llSize<m_allocatedElements && "internal error: overflow in sparse mode");
+ // let's insert a new coefficient
+ ListEl& el = llElements[m_llSize];
+ el.value = Scalar(0);
+ el.index = i;
+ el.next = llElements[m_llCurrent].next;
+ llElements[m_llCurrent].next = m_llSize;
+ ++m_llSize;
+ return el.value;
+ }
+ }
+ }
+}
+
+template<typename _Scalar,typename _Index>
+_Scalar& AmbiVector<_Scalar,_Index>::coeff(_Index i)
+{
+ if (m_mode==IsDense)
+ return m_buffer[i];
+ else
+ {
+ ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_buffer);
+ eigen_assert(m_mode==IsSparse);
+ if ((m_llSize==0) || (i<llElements[m_llStart].index))
+ {
+ return m_zero;
+ }
+ else
+ {
+ Index elid = m_llStart;
+ while (elid >= 0 && llElements[elid].index<i)
+ elid = llElements[elid].next;
+
+ if (llElements[elid].index==i)
+ return llElements[m_llCurrent].value;
+ else
+ return m_zero;
+ }
+ }
+}
+
+/** Iterator over the nonzero coefficients */
+template<typename _Scalar,typename _Index>
+class AmbiVector<_Scalar,_Index>::Iterator
+{
+ public:
+ typedef _Scalar Scalar;
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
+ /** Default constructor
+ * \param vec the vector on which we iterate
+ * \param epsilon the minimal value used to prune zero coefficients.
+ * In practice, all coefficients having a magnitude smaller than \a epsilon
+ * are skipped.
+ */
+ Iterator(const AmbiVector& vec, RealScalar epsilon = RealScalar(0.1)*NumTraits<RealScalar>::dummy_precision())
+ : m_vector(vec)
+ {
+ m_epsilon = epsilon;
+ m_isDense = m_vector.m_mode==IsDense;
+ if (m_isDense)
+ {
+ m_currentEl = 0; // this is to avoid a compilation warning
+ m_cachedValue = 0; // this is to avoid a compilation warning
+ m_cachedIndex = m_vector.m_start-1;
+ ++(*this);
+ }
+ else
+ {
+ ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
+ m_currentEl = m_vector.m_llStart;
+ while (m_currentEl>=0 && internal::abs(llElements[m_currentEl].value)<m_epsilon)
+ m_currentEl = llElements[m_currentEl].next;
+ if (m_currentEl<0)
+ {
+ m_cachedValue = 0; // this is to avoid a compilation warning
+ m_cachedIndex = -1;
+ }
+ else
+ {
+ m_cachedIndex = llElements[m_currentEl].index;
+ m_cachedValue = llElements[m_currentEl].value;
+ }
+ }
+ }
+
+ Index index() const { return m_cachedIndex; }
+ Scalar value() const { return m_cachedValue; }
+
+ operator bool() const { return m_cachedIndex>=0; }
+
+ Iterator& operator++()
+ {
+ if (m_isDense)
+ {
+ do {
+ ++m_cachedIndex;
+ } while (m_cachedIndex<m_vector.m_end && internal::abs(m_vector.m_buffer[m_cachedIndex])<m_epsilon);
+ if (m_cachedIndex<m_vector.m_end)
+ m_cachedValue = m_vector.m_buffer[m_cachedIndex];
+ else
+ m_cachedIndex=-1;
+ }
+ else
+ {
+ ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
+ do {
+ m_currentEl = llElements[m_currentEl].next;
+ } while (m_currentEl>=0 && internal::abs(llElements[m_currentEl].value)<m_epsilon);
+ if (m_currentEl<0)
+ {
+ m_cachedIndex = -1;
+ }
+ else
+ {
+ m_cachedIndex = llElements[m_currentEl].index;
+ m_cachedValue = llElements[m_currentEl].value;
+ }
+ }
+ return *this;
+ }
+
+ protected:
+ const AmbiVector& m_vector; // the target vector
+ Index m_currentEl; // the current element in sparse/linked-list mode
+ RealScalar m_epsilon; // epsilon used to prune zero coefficients
+ Index m_cachedIndex; // current coordinate
+ Scalar m_cachedValue; // current value
+ bool m_isDense; // mode of the vector
+};
+
+
+#endif // EIGEN_AMBIVECTOR_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/CompressedStorage.h b/extern/Eigen3/Eigen/src/Sparse/CompressedStorage.h
new file mode 100644
index 00000000000..b3bde272ec2
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/CompressedStorage.h
@@ -0,0 +1,239 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_COMPRESSED_STORAGE_H
+#define EIGEN_COMPRESSED_STORAGE_H
+
+/** Stores a sparse set of values as a list of values and a list of indices.
+ *
+ */
+template<typename _Scalar,typename _Index>
+class CompressedStorage
+{
+ public:
+
+ typedef _Scalar Scalar;
+ typedef _Index Index;
+
+ protected:
+
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
+ public:
+
+ CompressedStorage()
+ : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
+ {}
+
+ CompressedStorage(size_t size)
+ : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
+ {
+ resize(size);
+ }
+
+ CompressedStorage(const CompressedStorage& other)
+ : m_values(0), m_indices(0), m_size(0), m_allocatedSize(0)
+ {
+ *this = other;
+ }
+
+ CompressedStorage& operator=(const CompressedStorage& other)
+ {
+ resize(other.size());
+ memcpy(m_values, other.m_values, m_size * sizeof(Scalar));
+ memcpy(m_indices, other.m_indices, m_size * sizeof(Index));
+ return *this;
+ }
+
+ void swap(CompressedStorage& other)
+ {
+ std::swap(m_values, other.m_values);
+ std::swap(m_indices, other.m_indices);
+ std::swap(m_size, other.m_size);
+ std::swap(m_allocatedSize, other.m_allocatedSize);
+ }
+
+ ~CompressedStorage()
+ {
+ delete[] m_values;
+ delete[] m_indices;
+ }
+
+ void reserve(size_t size)
+ {
+ size_t newAllocatedSize = m_size + size;
+ if (newAllocatedSize > m_allocatedSize)
+ reallocate(newAllocatedSize);
+ }
+
+ void squeeze()
+ {
+ if (m_allocatedSize>m_size)
+ reallocate(m_size);
+ }
+
+ void resize(size_t size, float reserveSizeFactor = 0)
+ {
+ if (m_allocatedSize<size)
+ reallocate(size + size_t(reserveSizeFactor*size));
+ m_size = size;
+ }
+
+ void append(const Scalar& v, Index i)
+ {
+ Index id = static_cast<Index>(m_size);
+ resize(m_size+1, 1);
+ m_values[id] = v;
+ m_indices[id] = i;
+ }
+
+ inline size_t size() const { return m_size; }
+ inline size_t allocatedSize() const { return m_allocatedSize; }
+ inline void clear() { m_size = 0; }
+
+ inline Scalar& value(size_t i) { return m_values[i]; }
+ inline const Scalar& value(size_t i) const { return m_values[i]; }
+
+ inline Index& index(size_t i) { return m_indices[i]; }
+ inline const Index& index(size_t i) const { return m_indices[i]; }
+
+ static CompressedStorage Map(Index* indices, Scalar* values, size_t size)
+ {
+ CompressedStorage res;
+ res.m_indices = indices;
+ res.m_values = values;
+ res.m_allocatedSize = res.m_size = size;
+ return res;
+ }
+
+ /** \returns the largest \c k such that for all \c j in [0,k) index[\c j]\<\a key */
+ inline Index searchLowerIndex(Index key) const
+ {
+ return searchLowerIndex(0, m_size, key);
+ }
+
+ /** \returns the largest \c k in [start,end) such that for all \c j in [start,k) index[\c j]\<\a key */
+ inline Index searchLowerIndex(size_t start, size_t end, Index key) const
+ {
+ while(end>start)
+ {
+ size_t mid = (end+start)>>1;
+ if (m_indices[mid]<key)
+ start = mid+1;
+ else
+ end = mid;
+ }
+ return static_cast<Index>(start);
+ }
+
+ /** \returns the stored value at index \a key
+ * If the value does not exist, then the value \a defaultValue is returned without any insertion. */
+ inline Scalar at(Index key, Scalar defaultValue = Scalar(0)) const
+ {
+ if (m_size==0)
+ return defaultValue;
+ else if (key==m_indices[m_size-1])
+ return m_values[m_size-1];
+ // ^^ optimization: let's first check if it is the last coefficient
+ // (very common in high level algorithms)
+ const size_t id = searchLowerIndex(0,m_size-1,key);
+ return ((id<m_size) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
+ }
+
+ /** Like at(), but the search is performed in the range [start,end) */
+ inline Scalar atInRange(size_t start, size_t end, Index key, Scalar defaultValue = Scalar(0)) const
+ {
+ if (start>=end)
+ return Scalar(0);
+ else if (end>start && key==m_indices[end-1])
+ return m_values[end-1];
+ // ^^ optimization: let's first check if it is the last coefficient
+ // (very common in high level algorithms)
+ const size_t id = searchLowerIndex(start,end-1,key);
+ return ((id<end) && (m_indices[id]==key)) ? m_values[id] : defaultValue;
+ }
+
+ /** \returns a reference to the value at index \a key
+ * If the value does not exist, then the value \a defaultValue is inserted
+ * such that the keys are sorted. */
+ inline Scalar& atWithInsertion(Index key, Scalar defaultValue = Scalar(0))
+ {
+ size_t id = searchLowerIndex(0,m_size,key);
+ if (id>=m_size || m_indices[id]!=key)
+ {
+ resize(m_size+1,1);
+ for (size_t j=m_size-1; j>id; --j)
+ {
+ m_indices[j] = m_indices[j-1];
+ m_values[j] = m_values[j-1];
+ }
+ m_indices[id] = key;
+ m_values[id] = defaultValue;
+ }
+ return m_values[id];
+ }
+
+ void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
+ {
+ size_t k = 0;
+ size_t n = size();
+ for (size_t i=0; i<n; ++i)
+ {
+ if (!internal::isMuchSmallerThan(value(i), reference, epsilon))
+ {
+ value(k) = value(i);
+ index(k) = index(i);
+ ++k;
+ }
+ }
+ resize(k,0);
+ }
+
+ protected:
+
+ inline void reallocate(size_t size)
+ {
+ Scalar* newValues = new Scalar[size];
+ Index* newIndices = new Index[size];
+ size_t copySize = (std::min)(size, m_size);
+ // copy
+ memcpy(newValues, m_values, copySize * sizeof(Scalar));
+ memcpy(newIndices, m_indices, copySize * sizeof(Index));
+ // delete old stuff
+ delete[] m_values;
+ delete[] m_indices;
+ m_values = newValues;
+ m_indices = newIndices;
+ m_allocatedSize = size;
+ }
+
+ protected:
+ Scalar* m_values;
+ Index* m_indices;
+ size_t m_size;
+ size_t m_allocatedSize;
+
+};
+
+#endif // EIGEN_COMPRESSED_STORAGE_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/CoreIterators.h b/extern/Eigen3/Eigen/src/Sparse/CoreIterators.h
new file mode 100644
index 00000000000..b4beaeee69e
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/CoreIterators.h
@@ -0,0 +1,71 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_COREITERATORS_H
+#define EIGEN_COREITERATORS_H
+
+/* This file contains the respective InnerIterator definition of the expressions defined in Eigen/Core
+ */
+
+/** \class InnerIterator
+ * \brief An InnerIterator allows to loop over the element of a sparse (or dense) matrix or expression
+ *
+ * todo
+ */
+
+// generic version for dense matrix and expressions
+template<typename Derived> class DenseBase<Derived>::InnerIterator
+{
+ protected:
+ typedef typename Derived::Scalar Scalar;
+ typedef typename Derived::Index Index;
+
+ enum { IsRowMajor = (Derived::Flags&RowMajorBit)==RowMajorBit };
+ public:
+ EIGEN_STRONG_INLINE InnerIterator(const Derived& expr, Index outer)
+ : m_expression(expr), m_inner(0), m_outer(outer), m_end(expr.innerSize())
+ {}
+
+ EIGEN_STRONG_INLINE Scalar value() const
+ {
+ return (IsRowMajor) ? m_expression.coeff(m_outer, m_inner)
+ : m_expression.coeff(m_inner, m_outer);
+ }
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++() { m_inner++; return *this; }
+
+ EIGEN_STRONG_INLINE Index index() const { return m_inner; }
+ inline Index row() const { return IsRowMajor ? m_outer : index(); }
+ inline Index col() const { return IsRowMajor ? index() : m_outer; }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_inner < m_end && m_inner>=0; }
+
+ protected:
+ const Derived& m_expression;
+ Index m_inner;
+ const Index m_outer;
+ const Index m_end;
+};
+
+#endif // EIGEN_COREITERATORS_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/DynamicSparseMatrix.h b/extern/Eigen3/Eigen/src/Sparse/DynamicSparseMatrix.h
new file mode 100644
index 00000000000..93e75f4c601
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/DynamicSparseMatrix.h
@@ -0,0 +1,346 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_DYNAMIC_SPARSEMATRIX_H
+#define EIGEN_DYNAMIC_SPARSEMATRIX_H
+
+/** \class DynamicSparseMatrix
+ *
+ * \brief A sparse matrix class designed for matrix assembly purpose
+ *
+ * \param _Scalar the scalar type, i.e. the type of the coefficients
+ *
+ * Unlike SparseMatrix, this class provides a much higher degree of flexibility. In particular, it allows
+ * random read/write accesses in log(rho*outer_size) where \c rho is the probability that a coefficient is
+ * nonzero and outer_size is the number of columns if the matrix is column-major and the number of rows
+ * otherwise.
+ *
+ * Internally, the data are stored as a std::vector of compressed vector. The performances of random writes might
+ * decrease as the number of nonzeros per inner-vector increase. In practice, we observed very good performance
+ * till about 100 nonzeros/vector, and the performance remains relatively good till 500 nonzeros/vectors.
+ *
+ * \see SparseMatrix
+ */
+
+namespace internal {
+template<typename _Scalar, int _Options, typename _Index>
+struct traits<DynamicSparseMatrix<_Scalar, _Options, _Index> >
+{
+ typedef _Scalar Scalar;
+ typedef _Index Index;
+ typedef Sparse StorageKind;
+ typedef MatrixXpr XprKind;
+ enum {
+ RowsAtCompileTime = Dynamic,
+ ColsAtCompileTime = Dynamic,
+ MaxRowsAtCompileTime = Dynamic,
+ MaxColsAtCompileTime = Dynamic,
+ Flags = _Options | NestByRefBit | LvalueBit,
+ CoeffReadCost = NumTraits<Scalar>::ReadCost,
+ SupportedAccessPatterns = OuterRandomAccessPattern
+ };
+};
+}
+
+template<typename _Scalar, int _Options, typename _Index>
+class DynamicSparseMatrix
+ : public SparseMatrixBase<DynamicSparseMatrix<_Scalar, _Options, _Index> >
+{
+ public:
+ EIGEN_SPARSE_PUBLIC_INTERFACE(DynamicSparseMatrix)
+ // FIXME: why are these operator already alvailable ???
+ // EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(DynamicSparseMatrix, +=)
+ // EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(DynamicSparseMatrix, -=)
+ typedef MappedSparseMatrix<Scalar,Flags> Map;
+ using Base::IsRowMajor;
+ using Base::operator=;
+ enum {
+ Options = _Options
+ };
+
+ protected:
+
+ typedef DynamicSparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
+
+ Index m_innerSize;
+ std::vector<CompressedStorage<Scalar,Index> > m_data;
+
+ public:
+
+ inline Index rows() const { return IsRowMajor ? outerSize() : m_innerSize; }
+ inline Index cols() const { return IsRowMajor ? m_innerSize : outerSize(); }
+ inline Index innerSize() const { return m_innerSize; }
+ inline Index outerSize() const { return static_cast<Index>(m_data.size()); }
+ inline Index innerNonZeros(Index j) const { return m_data[j].size(); }
+
+ std::vector<CompressedStorage<Scalar,Index> >& _data() { return m_data; }
+ const std::vector<CompressedStorage<Scalar,Index> >& _data() const { return m_data; }
+
+ /** \returns the coefficient value at given position \a row, \a col
+ * This operation involes a log(rho*outer_size) binary search.
+ */
+ inline Scalar coeff(Index row, Index col) const
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+ return m_data[outer].at(inner);
+ }
+
+ /** \returns a reference to the coefficient value at given position \a row, \a col
+ * This operation involes a log(rho*outer_size) binary search. If the coefficient does not
+ * exist yet, then a sorted insertion into a sequential buffer is performed.
+ */
+ inline Scalar& coeffRef(Index row, Index col)
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+ return m_data[outer].atWithInsertion(inner);
+ }
+
+ class InnerIterator;
+
+ void setZero()
+ {
+ for (Index j=0; j<outerSize(); ++j)
+ m_data[j].clear();
+ }
+
+ /** \returns the number of non zero coefficients */
+ Index nonZeros() const
+ {
+ Index res = 0;
+ for (Index j=0; j<outerSize(); ++j)
+ res += static_cast<Index>(m_data[j].size());
+ return res;
+ }
+
+
+
+ void reserve(Index reserveSize = 1000)
+ {
+ if (outerSize()>0)
+ {
+ Index reserveSizePerVector = (std::max)(reserveSize/outerSize(),Index(4));
+ for (Index j=0; j<outerSize(); ++j)
+ {
+ m_data[j].reserve(reserveSizePerVector);
+ }
+ }
+ }
+
+ /** Does nothing: provided for compatibility with SparseMatrix */
+ inline void startVec(Index /*outer*/) {}
+
+ /** \returns a reference to the non zero coefficient at position \a row, \a col assuming that:
+ * - the nonzero does not already exist
+ * - the new coefficient is the last one of the given inner vector.
+ *
+ * \sa insert, insertBackByOuterInner */
+ inline Scalar& insertBack(Index row, Index col)
+ {
+ return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
+ }
+
+ /** \sa insertBack */
+ inline Scalar& insertBackByOuterInner(Index outer, Index inner)
+ {
+ eigen_assert(outer<Index(m_data.size()) && inner<m_innerSize && "out of range");
+ eigen_assert(((m_data[outer].size()==0) || (m_data[outer].index(m_data[outer].size()-1)<inner))
+ && "wrong sorted insertion");
+ m_data[outer].append(0, inner);
+ return m_data[outer].value(m_data[outer].size()-1);
+ }
+
+ inline Scalar& insert(Index row, Index col)
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ Index startId = 0;
+ Index id = static_cast<Index>(m_data[outer].size()) - 1;
+ m_data[outer].resize(id+2,1);
+
+ while ( (id >= startId) && (m_data[outer].index(id) > inner) )
+ {
+ m_data[outer].index(id+1) = m_data[outer].index(id);
+ m_data[outer].value(id+1) = m_data[outer].value(id);
+ --id;
+ }
+ m_data[outer].index(id+1) = inner;
+ m_data[outer].value(id+1) = 0;
+ return m_data[outer].value(id+1);
+ }
+
+ /** Does nothing: provided for compatibility with SparseMatrix */
+ inline void finalize() {}
+
+ /** Suppress all nonzeros which are smaller than \a reference under the tolerence \a epsilon */
+ void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
+ {
+ for (Index j=0; j<outerSize(); ++j)
+ m_data[j].prune(reference,epsilon);
+ }
+
+ /** Resize the matrix without preserving the data (the matrix is set to zero)
+ */
+ void resize(Index rows, Index cols)
+ {
+ const Index outerSize = IsRowMajor ? rows : cols;
+ m_innerSize = IsRowMajor ? cols : rows;
+ setZero();
+ if (Index(m_data.size()) != outerSize)
+ {
+ m_data.resize(outerSize);
+ }
+ }
+
+ void resizeAndKeepData(Index rows, Index cols)
+ {
+ const Index outerSize = IsRowMajor ? rows : cols;
+ const Index innerSize = IsRowMajor ? cols : rows;
+ if (m_innerSize>innerSize)
+ {
+ // remove all coefficients with innerCoord>=innerSize
+ // TODO
+ //std::cerr << "not implemented yet\n";
+ exit(2);
+ }
+ if (m_data.size() != outerSize)
+ {
+ m_data.resize(outerSize);
+ }
+ }
+
+ inline DynamicSparseMatrix()
+ : m_innerSize(0), m_data(0)
+ {
+ eigen_assert(innerSize()==0 && outerSize()==0);
+ }
+
+ inline DynamicSparseMatrix(Index rows, Index cols)
+ : m_innerSize(0)
+ {
+ resize(rows, cols);
+ }
+
+ template<typename OtherDerived>
+ explicit inline DynamicSparseMatrix(const SparseMatrixBase<OtherDerived>& other)
+ : m_innerSize(0)
+ {
+ Base::operator=(other.derived());
+ }
+
+ inline DynamicSparseMatrix(const DynamicSparseMatrix& other)
+ : Base(), m_innerSize(0)
+ {
+ *this = other.derived();
+ }
+
+ inline void swap(DynamicSparseMatrix& other)
+ {
+ //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
+ std::swap(m_innerSize, other.m_innerSize);
+ //std::swap(m_outerSize, other.m_outerSize);
+ m_data.swap(other.m_data);
+ }
+
+ inline DynamicSparseMatrix& operator=(const DynamicSparseMatrix& other)
+ {
+ if (other.isRValue())
+ {
+ swap(other.const_cast_derived());
+ }
+ else
+ {
+ resize(other.rows(), other.cols());
+ m_data = other.m_data;
+ }
+ return *this;
+ }
+
+ /** Destructor */
+ inline ~DynamicSparseMatrix() {}
+
+ public:
+
+ /** \deprecated
+ * Set the matrix to zero and reserve the memory for \a reserveSize nonzero coefficients. */
+ EIGEN_DEPRECATED void startFill(Index reserveSize = 1000)
+ {
+ setZero();
+ reserve(reserveSize);
+ }
+
+ /** \deprecated use insert()
+ * inserts a nonzero coefficient at given coordinates \a row, \a col and returns its reference assuming that:
+ * 1 - the coefficient does not exist yet
+ * 2 - this the coefficient with greater inner coordinate for the given outer coordinate.
+ * In other words, assuming \c *this is column-major, then there must not exists any nonzero coefficient of coordinates
+ * \c i \c x \a col such that \c i >= \a row. Otherwise the matrix is invalid.
+ *
+ * \see fillrand(), coeffRef()
+ */
+ EIGEN_DEPRECATED Scalar& fill(Index row, Index col)
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+ return insertBack(outer,inner);
+ }
+
+ /** \deprecated use insert()
+ * Like fill() but with random inner coordinates.
+ * Compared to the generic coeffRef(), the unique limitation is that we assume
+ * the coefficient does not exist yet.
+ */
+ EIGEN_DEPRECATED Scalar& fillrand(Index row, Index col)
+ {
+ return insert(row,col);
+ }
+
+ /** \deprecated use finalize()
+ * Does nothing. Provided for compatibility with SparseMatrix. */
+ EIGEN_DEPRECATED void endFill() {}
+
+# ifdef EIGEN_DYNAMICSPARSEMATRIX_PLUGIN
+# include EIGEN_DYNAMICSPARSEMATRIX_PLUGIN
+# endif
+};
+
+template<typename Scalar, int _Options, typename _Index>
+class DynamicSparseMatrix<Scalar,_Options,_Index>::InnerIterator : public SparseVector<Scalar,_Options>::InnerIterator
+{
+ typedef typename SparseVector<Scalar,_Options>::InnerIterator Base;
+ public:
+ InnerIterator(const DynamicSparseMatrix& mat, Index outer)
+ : Base(mat.m_data[outer]), m_outer(outer)
+ {}
+
+ inline Index row() const { return IsRowMajor ? m_outer : Base::index(); }
+ inline Index col() const { return IsRowMajor ? Base::index() : m_outer; }
+
+ protected:
+ const Index m_outer;
+};
+
+#endif // EIGEN_DYNAMIC_SPARSEMATRIX_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/MappedSparseMatrix.h b/extern/Eigen3/Eigen/src/Sparse/MappedSparseMatrix.h
new file mode 100644
index 00000000000..31a431fb224
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/MappedSparseMatrix.h
@@ -0,0 +1,165 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_MAPPED_SPARSEMATRIX_H
+#define EIGEN_MAPPED_SPARSEMATRIX_H
+
+/** \class MappedSparseMatrix
+ *
+ * \brief Sparse matrix
+ *
+ * \param _Scalar the scalar type, i.e. the type of the coefficients
+ *
+ * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
+ *
+ */
+namespace internal {
+template<typename _Scalar, int _Flags, typename _Index>
+struct traits<MappedSparseMatrix<_Scalar, _Flags, _Index> > : traits<SparseMatrix<_Scalar, _Flags, _Index> >
+{};
+}
+
+template<typename _Scalar, int _Flags, typename _Index>
+class MappedSparseMatrix
+ : public SparseMatrixBase<MappedSparseMatrix<_Scalar, _Flags, _Index> >
+{
+ public:
+ EIGEN_SPARSE_PUBLIC_INTERFACE(MappedSparseMatrix)
+
+ protected:
+ enum { IsRowMajor = Base::IsRowMajor };
+
+ Index m_outerSize;
+ Index m_innerSize;
+ Index m_nnz;
+ Index* m_outerIndex;
+ Index* m_innerIndices;
+ Scalar* m_values;
+
+ public:
+
+ inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
+ inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
+ inline Index innerSize() const { return m_innerSize; }
+ inline Index outerSize() const { return m_outerSize; }
+ inline Index innerNonZeros(Index j) const { return m_outerIndex[j+1]-m_outerIndex[j]; }
+
+ //----------------------------------------
+ // direct access interface
+ inline const Scalar* _valuePtr() const { return m_values; }
+ inline Scalar* _valuePtr() { return m_values; }
+
+ inline const Index* _innerIndexPtr() const { return m_innerIndices; }
+ inline Index* _innerIndexPtr() { return m_innerIndices; }
+
+ inline const Index* _outerIndexPtr() const { return m_outerIndex; }
+ inline Index* _outerIndexPtr() { return m_outerIndex; }
+ //----------------------------------------
+
+ inline Scalar coeff(Index row, Index col) const
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ Index start = m_outerIndex[outer];
+ Index end = m_outerIndex[outer+1];
+ if (start==end)
+ return Scalar(0);
+ else if (end>0 && inner==m_innerIndices[end-1])
+ return m_values[end-1];
+ // ^^ optimization: let's first check if it is the last coefficient
+ // (very common in high level algorithms)
+
+ const Index* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end-1],inner);
+ const Index id = r-&m_innerIndices[0];
+ return ((*r==inner) && (id<end)) ? m_values[id] : Scalar(0);
+ }
+
+ inline Scalar& coeffRef(Index row, Index col)
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ Index start = m_outerIndex[outer];
+ Index end = m_outerIndex[outer+1];
+ eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
+ eigen_assert(end>start && "coeffRef cannot be called on a zero coefficient");
+ Index* r = std::lower_bound(&m_innerIndices[start],&m_innerIndices[end],inner);
+ const Index id = r-&m_innerIndices[0];
+ eigen_assert((*r==inner) && (id<end) && "coeffRef cannot be called on a zero coefficient");
+ return m_values[id];
+ }
+
+ class InnerIterator;
+
+ /** \returns the number of non zero coefficients */
+ inline Index nonZeros() const { return m_nnz; }
+
+ inline MappedSparseMatrix(Index rows, Index cols, Index nnz, Index* outerIndexPtr, Index* innerIndexPtr, Scalar* valuePtr)
+ : m_outerSize(IsRowMajor?rows:cols), m_innerSize(IsRowMajor?cols:rows), m_nnz(nnz), m_outerIndex(outerIndexPtr),
+ m_innerIndices(innerIndexPtr), m_values(valuePtr)
+ {}
+
+ /** Empty destructor */
+ inline ~MappedSparseMatrix() {}
+};
+
+template<typename Scalar, int _Flags, typename _Index>
+class MappedSparseMatrix<Scalar,_Flags,_Index>::InnerIterator
+{
+ public:
+ InnerIterator(const MappedSparseMatrix& mat, Index outer)
+ : m_matrix(mat),
+ m_outer(outer),
+ m_id(mat._outerIndexPtr()[outer]),
+ m_start(m_id),
+ m_end(mat._outerIndexPtr()[outer+1])
+ {}
+
+ template<unsigned int Added, unsigned int Removed>
+ InnerIterator(const Flagged<MappedSparseMatrix,Added,Removed>& mat, Index outer)
+ : m_matrix(mat._expression()), m_id(m_matrix._outerIndexPtr()[outer]),
+ m_start(m_id), m_end(m_matrix._outerIndexPtr()[outer+1])
+ {}
+
+ inline InnerIterator& operator++() { m_id++; return *this; }
+
+ inline Scalar value() const { return m_matrix._valuePtr()[m_id]; }
+ inline Scalar& valueRef() { return const_cast<Scalar&>(m_matrix._valuePtr()[m_id]); }
+
+ inline Index index() const { return m_matrix._innerIndexPtr()[m_id]; }
+ inline Index row() const { return IsRowMajor ? m_outer : index(); }
+ inline Index col() const { return IsRowMajor ? index() : m_outer; }
+
+ inline operator bool() const { return (m_id < m_end) && (m_id>=m_start); }
+
+ protected:
+ const MappedSparseMatrix& m_matrix;
+ const Index m_outer;
+ Index m_id;
+ const Index m_start;
+ const Index m_end;
+};
+
+#endif // EIGEN_MAPPED_SPARSEMATRIX_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseAssign.h b/extern/Eigen3/Eigen/src/Sparse/SparseAssign.h
new file mode 100644
index 00000000000..e69de29bb2d
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseAssign.h
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseBlock.h b/extern/Eigen3/Eigen/src/Sparse/SparseBlock.h
new file mode 100644
index 00000000000..8079c999994
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseBlock.h
@@ -0,0 +1,465 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_BLOCK_H
+#define EIGEN_SPARSE_BLOCK_H
+
+namespace internal {
+template<typename MatrixType, int Size>
+struct traits<SparseInnerVectorSet<MatrixType, Size> >
+{
+ typedef typename traits<MatrixType>::Scalar Scalar;
+ typedef typename traits<MatrixType>::Index Index;
+ typedef typename traits<MatrixType>::StorageKind StorageKind;
+ typedef MatrixXpr XprKind;
+ enum {
+ IsRowMajor = (int(MatrixType::Flags)&RowMajorBit)==RowMajorBit,
+ Flags = MatrixType::Flags,
+ RowsAtCompileTime = IsRowMajor ? Size : MatrixType::RowsAtCompileTime,
+ ColsAtCompileTime = IsRowMajor ? MatrixType::ColsAtCompileTime : Size,
+ MaxRowsAtCompileTime = RowsAtCompileTime,
+ MaxColsAtCompileTime = ColsAtCompileTime,
+ CoeffReadCost = MatrixType::CoeffReadCost
+ };
+};
+} // end namespace internal
+
+template<typename MatrixType, int Size>
+class SparseInnerVectorSet : internal::no_assignment_operator,
+ public SparseMatrixBase<SparseInnerVectorSet<MatrixType, Size> >
+{
+ public:
+
+ enum { IsRowMajor = internal::traits<SparseInnerVectorSet>::IsRowMajor };
+
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseInnerVectorSet)
+ class InnerIterator: public MatrixType::InnerIterator
+ {
+ public:
+ inline InnerIterator(const SparseInnerVectorSet& xpr, Index outer)
+ : MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
+ {}
+ inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
+ inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
+ protected:
+ Index m_outer;
+ };
+
+ inline SparseInnerVectorSet(const MatrixType& matrix, Index outerStart, Index outerSize)
+ : m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize)
+ {
+ eigen_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) );
+ }
+
+ inline SparseInnerVectorSet(const MatrixType& matrix, Index outer)
+ : m_matrix(matrix), m_outerStart(outer), m_outerSize(Size)
+ {
+ eigen_assert(Size!=Dynamic);
+ eigen_assert( (outer>=0) && (outer<matrix.outerSize()) );
+ }
+
+// template<typename OtherDerived>
+// inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
+// {
+// return *this;
+// }
+
+// template<typename Sparse>
+// inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
+// {
+// return *this;
+// }
+
+ EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
+
+ protected:
+
+ const typename MatrixType::Nested m_matrix;
+ Index m_outerStart;
+ const internal::variable_if_dynamic<Index, Size> m_outerSize;
+};
+
+/***************************************************************************
+* specialisation for DynamicSparseMatrix
+***************************************************************************/
+
+template<typename _Scalar, int _Options, int Size>
+class SparseInnerVectorSet<DynamicSparseMatrix<_Scalar, _Options>, Size>
+ : public SparseMatrixBase<SparseInnerVectorSet<DynamicSparseMatrix<_Scalar, _Options>, Size> >
+{
+ typedef DynamicSparseMatrix<_Scalar, _Options> MatrixType;
+ public:
+
+ enum { IsRowMajor = internal::traits<SparseInnerVectorSet>::IsRowMajor };
+
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseInnerVectorSet)
+ class InnerIterator: public MatrixType::InnerIterator
+ {
+ public:
+ inline InnerIterator(const SparseInnerVectorSet& xpr, Index outer)
+ : MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
+ {}
+ inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
+ inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
+ protected:
+ Index m_outer;
+ };
+
+ inline SparseInnerVectorSet(const MatrixType& matrix, Index outerStart, Index outerSize)
+ : m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize)
+ {
+ eigen_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) );
+ }
+
+ inline SparseInnerVectorSet(const MatrixType& matrix, Index outer)
+ : m_matrix(matrix), m_outerStart(outer), m_outerSize(Size)
+ {
+ eigen_assert(Size!=Dynamic);
+ eigen_assert( (outer>=0) && (outer<matrix.outerSize()) );
+ }
+
+ template<typename OtherDerived>
+ inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
+ {
+ if (IsRowMajor != ((OtherDerived::Flags&RowMajorBit)==RowMajorBit))
+ {
+ // need to transpose => perform a block evaluation followed by a big swap
+ DynamicSparseMatrix<Scalar,IsRowMajor?RowMajorBit:0> aux(other);
+ *this = aux.markAsRValue();
+ }
+ else
+ {
+ // evaluate/copy vector per vector
+ for (Index j=0; j<m_outerSize.value(); ++j)
+ {
+ SparseVector<Scalar,IsRowMajor ? RowMajorBit : 0> aux(other.innerVector(j));
+ m_matrix.const_cast_derived()._data()[m_outerStart+j].swap(aux._data());
+ }
+ }
+ return *this;
+ }
+
+ inline SparseInnerVectorSet& operator=(const SparseInnerVectorSet& other)
+ {
+ return operator=<SparseInnerVectorSet>(other);
+ }
+
+ Index nonZeros() const
+ {
+ Index count = 0;
+ for (Index j=0; j<m_outerSize.value(); ++j)
+ count += m_matrix._data()[m_outerStart+j].size();
+ return count;
+ }
+
+ const Scalar& lastCoeff() const
+ {
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(SparseInnerVectorSet);
+ eigen_assert(m_matrix.data()[m_outerStart].size()>0);
+ return m_matrix.data()[m_outerStart].vale(m_matrix.data()[m_outerStart].size()-1);
+ }
+
+// template<typename Sparse>
+// inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
+// {
+// return *this;
+// }
+
+ EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
+
+ protected:
+
+ const typename MatrixType::Nested m_matrix;
+ Index m_outerStart;
+ const internal::variable_if_dynamic<Index, Size> m_outerSize;
+
+};
+
+
+/***************************************************************************
+* specialisation for SparseMatrix
+***************************************************************************/
+
+template<typename _Scalar, int _Options, typename _Index, int Size>
+class SparseInnerVectorSet<SparseMatrix<_Scalar, _Options, _Index>, Size>
+ : public SparseMatrixBase<SparseInnerVectorSet<SparseMatrix<_Scalar, _Options>, Size> >
+{
+ typedef SparseMatrix<_Scalar, _Options> MatrixType;
+ public:
+
+ enum { IsRowMajor = internal::traits<SparseInnerVectorSet>::IsRowMajor };
+
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseInnerVectorSet)
+ class InnerIterator: public MatrixType::InnerIterator
+ {
+ public:
+ inline InnerIterator(const SparseInnerVectorSet& xpr, Index outer)
+ : MatrixType::InnerIterator(xpr.m_matrix, xpr.m_outerStart + outer), m_outer(outer)
+ {}
+ inline Index row() const { return IsRowMajor ? m_outer : this->index(); }
+ inline Index col() const { return IsRowMajor ? this->index() : m_outer; }
+ protected:
+ Index m_outer;
+ };
+
+ inline SparseInnerVectorSet(const MatrixType& matrix, Index outerStart, Index outerSize)
+ : m_matrix(matrix), m_outerStart(outerStart), m_outerSize(outerSize)
+ {
+ eigen_assert( (outerStart>=0) && ((outerStart+outerSize)<=matrix.outerSize()) );
+ }
+
+ inline SparseInnerVectorSet(const MatrixType& matrix, Index outer)
+ : m_matrix(matrix), m_outerStart(outer), m_outerSize(Size)
+ {
+ eigen_assert(Size==1);
+ eigen_assert( (outer>=0) && (outer<matrix.outerSize()) );
+ }
+
+ template<typename OtherDerived>
+ inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
+ {
+ typedef typename internal::remove_all<typename MatrixType::Nested>::type _NestedMatrixType;
+ _NestedMatrixType& matrix = const_cast<_NestedMatrixType&>(m_matrix);;
+ // This assignement is slow if this vector set not empty
+ // and/or it is not at the end of the nonzeros of the underlying matrix.
+
+ // 1 - eval to a temporary to avoid transposition and/or aliasing issues
+ SparseMatrix<Scalar, IsRowMajor ? RowMajor : ColMajor, Index> tmp(other);
+
+ // 2 - let's check whether there is enough allocated memory
+ Index nnz = tmp.nonZeros();
+ Index nnz_previous = nonZeros();
+ Index free_size = matrix.data().allocatedSize() - nnz_previous;
+ std::size_t nnz_head = m_outerStart==0 ? 0 : matrix._outerIndexPtr()[m_outerStart];
+ std::size_t tail = m_matrix._outerIndexPtr()[m_outerStart+m_outerSize.value()];
+ std::size_t nnz_tail = matrix.nonZeros() - tail;
+
+ if(nnz>free_size)
+ {
+ // realloc manually to reduce copies
+ typename MatrixType::Storage newdata(m_matrix.nonZeros() - nnz_previous + nnz);
+
+ std::memcpy(&newdata.value(0), &m_matrix.data().value(0), nnz_head*sizeof(Scalar));
+ std::memcpy(&newdata.index(0), &m_matrix.data().index(0), nnz_head*sizeof(Index));
+
+ std::memcpy(&newdata.value(nnz_head), &tmp.data().value(0), nnz*sizeof(Scalar));
+ std::memcpy(&newdata.index(nnz_head), &tmp.data().index(0), nnz*sizeof(Index));
+
+ std::memcpy(&newdata.value(nnz_head+nnz), &matrix.data().value(tail), nnz_tail*sizeof(Scalar));
+ std::memcpy(&newdata.index(nnz_head+nnz), &matrix.data().index(tail), nnz_tail*sizeof(Index));
+
+ matrix.data().swap(newdata);
+ }
+ else
+ {
+ // no need to realloc, simply copy the tail at its respective position and insert tmp
+ matrix.data().resize(nnz_head + nnz + nnz_tail);
+
+ if(nnz<nnz_previous)
+ {
+ std::memcpy(&matrix.data().value(nnz_head+nnz), &matrix.data().value(tail), nnz_tail*sizeof(Scalar));
+ std::memcpy(&matrix.data().index(nnz_head+nnz), &matrix.data().index(tail), nnz_tail*sizeof(Index));
+ }
+ else
+ {
+ for(Index i=nnz_tail-1; i>=0; --i)
+ {
+ matrix.data().value(nnz_head+nnz+i) = matrix.data().value(tail+i);
+ matrix.data().index(nnz_head+nnz+i) = matrix.data().index(tail+i);
+ }
+ }
+
+ std::memcpy(&matrix.data().value(nnz_head), &tmp.data().value(0), nnz*sizeof(Scalar));
+ std::memcpy(&matrix.data().index(nnz_head), &tmp.data().index(0), nnz*sizeof(Index));
+ }
+
+ // update outer index pointers
+ Index p = nnz_head;
+ for(Index k=1; k<m_outerSize.value(); ++k)
+ {
+ matrix._outerIndexPtr()[m_outerStart+k] = p;
+ p += tmp.innerVector(k).nonZeros();
+ }
+ std::ptrdiff_t offset = nnz - nnz_previous;
+ for(Index k = m_outerStart + m_outerSize.value(); k<=matrix.outerSize(); ++k)
+ {
+ matrix._outerIndexPtr()[k] += offset;
+ }
+
+ return *this;
+ }
+
+ inline SparseInnerVectorSet& operator=(const SparseInnerVectorSet& other)
+ {
+ return operator=<SparseInnerVectorSet>(other);
+ }
+
+ inline const Scalar* _valuePtr() const
+ { return m_matrix._valuePtr() + m_matrix._outerIndexPtr()[m_outerStart]; }
+ inline Scalar* _valuePtr()
+ { return m_matrix.const_cast_derived()._valuePtr() + m_matrix._outerIndexPtr()[m_outerStart]; }
+
+ inline const Index* _innerIndexPtr() const
+ { return m_matrix._innerIndexPtr() + m_matrix._outerIndexPtr()[m_outerStart]; }
+ inline Index* _innerIndexPtr()
+ { return m_matrix.const_cast_derived()._innerIndexPtr() + m_matrix._outerIndexPtr()[m_outerStart]; }
+
+ inline const Index* _outerIndexPtr() const
+ { return m_matrix._outerIndexPtr() + m_outerStart; }
+ inline Index* _outerIndexPtr()
+ { return m_matrix.const_cast_derived()._outerIndexPtr() + m_outerStart; }
+
+ Index nonZeros() const
+ {
+ return std::size_t(m_matrix._outerIndexPtr()[m_outerStart+m_outerSize.value()])
+ - std::size_t(m_matrix._outerIndexPtr()[m_outerStart]);
+ }
+
+ const Scalar& lastCoeff() const
+ {
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(SparseInnerVectorSet);
+ eigen_assert(nonZeros()>0);
+ return m_matrix._valuePtr()[m_matrix._outerIndexPtr()[m_outerStart+1]-1];
+ }
+
+// template<typename Sparse>
+// inline SparseInnerVectorSet& operator=(const SparseMatrixBase<OtherDerived>& other)
+// {
+// return *this;
+// }
+
+ EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
+
+ protected:
+
+ const typename MatrixType::Nested m_matrix;
+ Index m_outerStart;
+ const internal::variable_if_dynamic<Index, Size> m_outerSize;
+
+};
+
+//----------
+
+/** \returns the i-th row of the matrix \c *this. For row-major matrix only. */
+template<typename Derived>
+SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::row(Index i)
+{
+ EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES);
+ return innerVector(i);
+}
+
+/** \returns the i-th row of the matrix \c *this. For row-major matrix only.
+ * (read-only version) */
+template<typename Derived>
+const SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::row(Index i) const
+{
+ EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES);
+ return innerVector(i);
+}
+
+/** \returns the i-th column of the matrix \c *this. For column-major matrix only. */
+template<typename Derived>
+SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::col(Index i)
+{
+ EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
+ return innerVector(i);
+}
+
+/** \returns the i-th column of the matrix \c *this. For column-major matrix only.
+ * (read-only version) */
+template<typename Derived>
+const SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::col(Index i) const
+{
+ EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
+ return innerVector(i);
+}
+
+/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
+ * is col-major (resp. row-major).
+ */
+template<typename Derived>
+SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::innerVector(Index outer)
+{ return SparseInnerVectorSet<Derived,1>(derived(), outer); }
+
+/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
+ * is col-major (resp. row-major). Read-only.
+ */
+template<typename Derived>
+const SparseInnerVectorSet<Derived,1> SparseMatrixBase<Derived>::innerVector(Index outer) const
+{ return SparseInnerVectorSet<Derived,1>(derived(), outer); }
+
+//----------
+
+/** \returns the i-th row of the matrix \c *this. For row-major matrix only. */
+template<typename Derived>
+SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::subrows(Index start, Index size)
+{
+ EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES);
+ return innerVectors(start, size);
+}
+
+/** \returns the i-th row of the matrix \c *this. For row-major matrix only.
+ * (read-only version) */
+template<typename Derived>
+const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::subrows(Index start, Index size) const
+{
+ EIGEN_STATIC_ASSERT(IsRowMajor,THIS_METHOD_IS_ONLY_FOR_ROW_MAJOR_MATRICES);
+ return innerVectors(start, size);
+}
+
+/** \returns the i-th column of the matrix \c *this. For column-major matrix only. */
+template<typename Derived>
+SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::subcols(Index start, Index size)
+{
+ EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
+ return innerVectors(start, size);
+}
+
+/** \returns the i-th column of the matrix \c *this. For column-major matrix only.
+ * (read-only version) */
+template<typename Derived>
+const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::subcols(Index start, Index size) const
+{
+ EIGEN_STATIC_ASSERT(!IsRowMajor,THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
+ return innerVectors(start, size);
+}
+
+/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
+ * is col-major (resp. row-major).
+ */
+template<typename Derived>
+SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize)
+{ return SparseInnerVectorSet<Derived,Dynamic>(derived(), outerStart, outerSize); }
+
+/** \returns the \a outer -th column (resp. row) of the matrix \c *this if \c *this
+ * is col-major (resp. row-major). Read-only.
+ */
+template<typename Derived>
+const SparseInnerVectorSet<Derived,Dynamic> SparseMatrixBase<Derived>::innerVectors(Index outerStart, Index outerSize) const
+{ return SparseInnerVectorSet<Derived,Dynamic>(derived(), outerStart, outerSize); }
+
+#endif // EIGEN_SPARSE_BLOCK_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseCwiseBinaryOp.h b/extern/Eigen3/Eigen/src/Sparse/SparseCwiseBinaryOp.h
new file mode 100644
index 00000000000..cde5bbc0300
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseCwiseBinaryOp.h
@@ -0,0 +1,375 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_CWISE_BINARY_OP_H
+#define EIGEN_SPARSE_CWISE_BINARY_OP_H
+
+// Here we have to handle 3 cases:
+// 1 - sparse op dense
+// 2 - dense op sparse
+// 3 - sparse op sparse
+// We also need to implement a 4th iterator for:
+// 4 - dense op dense
+// Finally, we also need to distinguish between the product and other operations :
+// configuration returned mode
+// 1 - sparse op dense product sparse
+// generic dense
+// 2 - dense op sparse product sparse
+// generic dense
+// 3 - sparse op sparse product sparse
+// generic sparse
+// 4 - dense op dense product dense
+// generic dense
+
+namespace internal {
+
+template<> struct promote_storage_type<Dense,Sparse>
+{ typedef Sparse ret; };
+
+template<> struct promote_storage_type<Sparse,Dense>
+{ typedef Sparse ret; };
+
+template<typename BinaryOp, typename Lhs, typename Rhs, typename Derived,
+ typename _LhsStorageMode = typename traits<Lhs>::StorageKind,
+ typename _RhsStorageMode = typename traits<Rhs>::StorageKind>
+class sparse_cwise_binary_op_inner_iterator_selector;
+
+} // end namespace internal
+
+template<typename BinaryOp, typename Lhs, typename Rhs>
+class CwiseBinaryOpImpl<BinaryOp, Lhs, Rhs, Sparse>
+ : public SparseMatrixBase<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
+{
+ public:
+ class InnerIterator;
+ typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> Derived;
+ EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
+};
+
+template<typename BinaryOp, typename Lhs, typename Rhs>
+class CwiseBinaryOpImpl<BinaryOp,Lhs,Rhs,Sparse>::InnerIterator
+ : public internal::sparse_cwise_binary_op_inner_iterator_selector<BinaryOp,Lhs,Rhs,typename CwiseBinaryOpImpl<BinaryOp,Lhs,Rhs,Sparse>::InnerIterator>
+{
+ public:
+ typedef typename Lhs::Index Index;
+ typedef internal::sparse_cwise_binary_op_inner_iterator_selector<
+ BinaryOp,Lhs,Rhs, InnerIterator> Base;
+
+ EIGEN_STRONG_INLINE InnerIterator(const CwiseBinaryOpImpl& binOp, Index outer)
+ : Base(binOp.derived(),outer)
+ {}
+};
+
+/***************************************************************************
+* Implementation of inner-iterators
+***************************************************************************/
+
+// template<typename T> struct internal::func_is_conjunction { enum { ret = false }; };
+// template<typename T> struct internal::func_is_conjunction<internal::scalar_product_op<T> > { enum { ret = true }; };
+
+// TODO generalize the internal::scalar_product_op specialization to all conjunctions if any !
+
+namespace internal {
+
+// sparse - sparse (generic)
+template<typename BinaryOp, typename Lhs, typename Rhs, typename Derived>
+class sparse_cwise_binary_op_inner_iterator_selector<BinaryOp, Lhs, Rhs, Derived, Sparse, Sparse>
+{
+ typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> CwiseBinaryXpr;
+ typedef typename traits<CwiseBinaryXpr>::Scalar Scalar;
+ typedef typename traits<CwiseBinaryXpr>::_LhsNested _LhsNested;
+ typedef typename traits<CwiseBinaryXpr>::_RhsNested _RhsNested;
+ typedef typename _LhsNested::InnerIterator LhsIterator;
+ typedef typename _RhsNested::InnerIterator RhsIterator;
+ typedef typename Lhs::Index Index;
+
+ public:
+
+ EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer)
+ : m_lhsIter(xpr.lhs(),outer), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor())
+ {
+ this->operator++();
+ }
+
+ EIGEN_STRONG_INLINE Derived& operator++()
+ {
+ if (m_lhsIter && m_rhsIter && (m_lhsIter.index() == m_rhsIter.index()))
+ {
+ m_id = m_lhsIter.index();
+ m_value = m_functor(m_lhsIter.value(), m_rhsIter.value());
+ ++m_lhsIter;
+ ++m_rhsIter;
+ }
+ else if (m_lhsIter && (!m_rhsIter || (m_lhsIter.index() < m_rhsIter.index())))
+ {
+ m_id = m_lhsIter.index();
+ m_value = m_functor(m_lhsIter.value(), Scalar(0));
+ ++m_lhsIter;
+ }
+ else if (m_rhsIter && (!m_lhsIter || (m_lhsIter.index() > m_rhsIter.index())))
+ {
+ m_id = m_rhsIter.index();
+ m_value = m_functor(Scalar(0), m_rhsIter.value());
+ ++m_rhsIter;
+ }
+ else
+ {
+ m_value = 0; // this is to avoid a compilation warning
+ m_id = -1;
+ }
+ return *static_cast<Derived*>(this);
+ }
+
+ EIGEN_STRONG_INLINE Scalar value() const { return m_value; }
+
+ EIGEN_STRONG_INLINE Index index() const { return m_id; }
+ EIGEN_STRONG_INLINE Index row() const { return Lhs::IsRowMajor ? m_lhsIter.row() : index(); }
+ EIGEN_STRONG_INLINE Index col() const { return Lhs::IsRowMajor ? index() : m_lhsIter.col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_id>=0; }
+
+ protected:
+ LhsIterator m_lhsIter;
+ RhsIterator m_rhsIter;
+ const BinaryOp& m_functor;
+ Scalar m_value;
+ Index m_id;
+};
+
+// sparse - sparse (product)
+template<typename T, typename Lhs, typename Rhs, typename Derived>
+class sparse_cwise_binary_op_inner_iterator_selector<scalar_product_op<T>, Lhs, Rhs, Derived, Sparse, Sparse>
+{
+ typedef scalar_product_op<T> BinaryFunc;
+ typedef CwiseBinaryOp<BinaryFunc, Lhs, Rhs> CwiseBinaryXpr;
+ typedef typename CwiseBinaryXpr::Scalar Scalar;
+ typedef typename traits<CwiseBinaryXpr>::_LhsNested _LhsNested;
+ typedef typename _LhsNested::InnerIterator LhsIterator;
+ typedef typename traits<CwiseBinaryXpr>::_RhsNested _RhsNested;
+ typedef typename _RhsNested::InnerIterator RhsIterator;
+ typedef typename Lhs::Index Index;
+ public:
+
+ EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer)
+ : m_lhsIter(xpr.lhs(),outer), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor())
+ {
+ while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index()))
+ {
+ if (m_lhsIter.index() < m_rhsIter.index())
+ ++m_lhsIter;
+ else
+ ++m_rhsIter;
+ }
+ }
+
+ EIGEN_STRONG_INLINE Derived& operator++()
+ {
+ ++m_lhsIter;
+ ++m_rhsIter;
+ while (m_lhsIter && m_rhsIter && (m_lhsIter.index() != m_rhsIter.index()))
+ {
+ if (m_lhsIter.index() < m_rhsIter.index())
+ ++m_lhsIter;
+ else
+ ++m_rhsIter;
+ }
+ return *static_cast<Derived*>(this);
+ }
+
+ EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_lhsIter.value(), m_rhsIter.value()); }
+
+ EIGEN_STRONG_INLINE Index index() const { return m_lhsIter.index(); }
+ EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); }
+ EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return (m_lhsIter && m_rhsIter); }
+
+ protected:
+ LhsIterator m_lhsIter;
+ RhsIterator m_rhsIter;
+ const BinaryFunc& m_functor;
+};
+
+// sparse - dense (product)
+template<typename T, typename Lhs, typename Rhs, typename Derived>
+class sparse_cwise_binary_op_inner_iterator_selector<scalar_product_op<T>, Lhs, Rhs, Derived, Sparse, Dense>
+{
+ typedef scalar_product_op<T> BinaryFunc;
+ typedef CwiseBinaryOp<BinaryFunc, Lhs, Rhs> CwiseBinaryXpr;
+ typedef typename CwiseBinaryXpr::Scalar Scalar;
+ typedef typename traits<CwiseBinaryXpr>::_LhsNested _LhsNested;
+ typedef typename traits<CwiseBinaryXpr>::RhsNested RhsNested;
+ typedef typename _LhsNested::InnerIterator LhsIterator;
+ typedef typename Lhs::Index Index;
+ enum { IsRowMajor = (int(Lhs::Flags)&RowMajorBit)==RowMajorBit };
+ public:
+
+ EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer)
+ : m_rhs(xpr.rhs()), m_lhsIter(xpr.lhs(),outer), m_functor(xpr.functor()), m_outer(outer)
+ {}
+
+ EIGEN_STRONG_INLINE Derived& operator++()
+ {
+ ++m_lhsIter;
+ return *static_cast<Derived*>(this);
+ }
+
+ EIGEN_STRONG_INLINE Scalar value() const
+ { return m_functor(m_lhsIter.value(),
+ m_rhs.coeff(IsRowMajor?m_outer:m_lhsIter.index(),IsRowMajor?m_lhsIter.index():m_outer)); }
+
+ EIGEN_STRONG_INLINE Index index() const { return m_lhsIter.index(); }
+ EIGEN_STRONG_INLINE Index row() const { return m_lhsIter.row(); }
+ EIGEN_STRONG_INLINE Index col() const { return m_lhsIter.col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_lhsIter; }
+
+ protected:
+ const RhsNested m_rhs;
+ LhsIterator m_lhsIter;
+ const BinaryFunc m_functor;
+ const Index m_outer;
+};
+
+// sparse - dense (product)
+template<typename T, typename Lhs, typename Rhs, typename Derived>
+class sparse_cwise_binary_op_inner_iterator_selector<scalar_product_op<T>, Lhs, Rhs, Derived, Dense, Sparse>
+{
+ typedef scalar_product_op<T> BinaryFunc;
+ typedef CwiseBinaryOp<BinaryFunc, Lhs, Rhs> CwiseBinaryXpr;
+ typedef typename CwiseBinaryXpr::Scalar Scalar;
+ typedef typename traits<CwiseBinaryXpr>::_RhsNested _RhsNested;
+ typedef typename _RhsNested::InnerIterator RhsIterator;
+ typedef typename Lhs::Index Index;
+
+ enum { IsRowMajor = (int(Rhs::Flags)&RowMajorBit)==RowMajorBit };
+ public:
+
+ EIGEN_STRONG_INLINE sparse_cwise_binary_op_inner_iterator_selector(const CwiseBinaryXpr& xpr, Index outer)
+ : m_xpr(xpr), m_rhsIter(xpr.rhs(),outer), m_functor(xpr.functor()), m_outer(outer)
+ {}
+
+ EIGEN_STRONG_INLINE Derived& operator++()
+ {
+ ++m_rhsIter;
+ return *static_cast<Derived*>(this);
+ }
+
+ EIGEN_STRONG_INLINE Scalar value() const
+ { return m_functor(m_xpr.lhs().coeff(IsRowMajor?m_outer:m_rhsIter.index(),IsRowMajor?m_rhsIter.index():m_outer), m_rhsIter.value()); }
+
+ EIGEN_STRONG_INLINE Index index() const { return m_rhsIter.index(); }
+ EIGEN_STRONG_INLINE Index row() const { return m_rhsIter.row(); }
+ EIGEN_STRONG_INLINE Index col() const { return m_rhsIter.col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_rhsIter; }
+
+ protected:
+ const CwiseBinaryXpr& m_xpr;
+ RhsIterator m_rhsIter;
+ const BinaryFunc& m_functor;
+ const Index m_outer;
+};
+
+} // end namespace internal
+
+/***************************************************************************
+* Implementation of SparseMatrixBase and SparseCwise functions/operators
+***************************************************************************/
+
+// template<typename Derived>
+// template<typename OtherDerived>
+// EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_difference_op<typename internal::traits<Derived>::Scalar>,
+// Derived, OtherDerived>
+// SparseMatrixBase<Derived>::operator-(const SparseMatrixBase<OtherDerived> &other) const
+// {
+// return CwiseBinaryOp<internal::scalar_difference_op<Scalar>,
+// Derived, OtherDerived>(derived(), other.derived());
+// }
+
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE Derived &
+SparseMatrixBase<Derived>::operator-=(const SparseMatrixBase<OtherDerived> &other)
+{
+ return *this = derived() - other.derived();
+}
+
+// template<typename Derived>
+// template<typename OtherDerived>
+// EIGEN_STRONG_INLINE const CwiseBinaryOp<internal::scalar_sum_op<typename internal::traits<Derived>::Scalar>, Derived, OtherDerived>
+// SparseMatrixBase<Derived>::operator+(const SparseMatrixBase<OtherDerived> &other) const
+// {
+// return CwiseBinaryOp<internal::scalar_sum_op<Scalar>, Derived, OtherDerived>(derived(), other.derived());
+// }
+
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE Derived &
+SparseMatrixBase<Derived>::operator+=(const SparseMatrixBase<OtherDerived>& other)
+{
+ return *this = derived() + other.derived();
+}
+
+// template<typename ExpressionType>
+// template<typename OtherDerived>
+// EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE
+// SparseCwise<ExpressionType>::operator*(const SparseMatrixBase<OtherDerived> &other) const
+// {
+// return EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE(_expression(), other.derived());
+// }
+
+template<typename Derived>
+template<typename OtherDerived>
+EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE
+SparseMatrixBase<Derived>::cwiseProduct(const MatrixBase<OtherDerived> &other) const
+{
+ return EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE(derived(), other.derived());
+}
+
+// template<typename ExpressionType>
+// template<typename OtherDerived>
+// EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(internal::scalar_quotient_op)
+// SparseCwise<ExpressionType>::operator/(const SparseMatrixBase<OtherDerived> &other) const
+// {
+// return EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(internal::scalar_quotient_op)(_expression(), other.derived());
+// }
+//
+// template<typename ExpressionType>
+// template<typename OtherDerived>
+// EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(internal::scalar_quotient_op)
+// SparseCwise<ExpressionType>::operator/(const MatrixBase<OtherDerived> &other) const
+// {
+// return EIGEN_SPARSE_CWISE_BINOP_RETURN_TYPE(internal::scalar_quotient_op)(_expression(), other.derived());
+// }
+
+// template<typename ExpressionType>
+// template<typename OtherDerived>
+// inline ExpressionType& SparseCwise<ExpressionType>::operator*=(const SparseMatrixBase<OtherDerived> &other)
+// {
+// return m_matrix.const_cast_derived() = _expression() * other.derived();
+// }
+
+
+#endif // EIGEN_SPARSE_CWISE_BINARY_OP_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseCwiseUnaryOp.h b/extern/Eigen3/Eigen/src/Sparse/SparseCwiseUnaryOp.h
new file mode 100644
index 00000000000..aa068835fbb
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseCwiseUnaryOp.h
@@ -0,0 +1,146 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_CWISE_UNARY_OP_H
+#define EIGEN_SPARSE_CWISE_UNARY_OP_H
+
+// template<typename UnaryOp, typename MatrixType>
+// struct internal::traits<SparseCwiseUnaryOp<UnaryOp, MatrixType> > : internal::traits<MatrixType>
+// {
+// typedef typename internal::result_of<
+// UnaryOp(typename MatrixType::Scalar)
+// >::type Scalar;
+// typedef typename MatrixType::Nested MatrixTypeNested;
+// typedef typename internal::remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
+// enum {
+// CoeffReadCost = _MatrixTypeNested::CoeffReadCost + internal::functor_traits<UnaryOp>::Cost
+// };
+// };
+
+template<typename UnaryOp, typename MatrixType>
+class CwiseUnaryOpImpl<UnaryOp,MatrixType,Sparse>
+ : public SparseMatrixBase<CwiseUnaryOp<UnaryOp, MatrixType> >
+{
+ public:
+
+ class InnerIterator;
+// typedef typename internal::remove_reference<LhsNested>::type _LhsNested;
+
+ typedef CwiseUnaryOp<UnaryOp, MatrixType> Derived;
+ EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
+};
+
+template<typename UnaryOp, typename MatrixType>
+class CwiseUnaryOpImpl<UnaryOp,MatrixType,Sparse>::InnerIterator
+{
+ typedef typename CwiseUnaryOpImpl::Scalar Scalar;
+ typedef typename internal::traits<Derived>::_XprTypeNested _MatrixTypeNested;
+ typedef typename _MatrixTypeNested::InnerIterator MatrixTypeIterator;
+ typedef typename MatrixType::Index Index;
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const CwiseUnaryOpImpl& unaryOp, Index outer)
+ : m_iter(unaryOp.derived().nestedExpression(),outer), m_functor(unaryOp.derived().functor())
+ {}
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
+ { ++m_iter; return *this; }
+
+ EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_iter.value()); }
+
+ EIGEN_STRONG_INLINE Index index() const { return m_iter.index(); }
+ EIGEN_STRONG_INLINE Index row() const { return m_iter.row(); }
+ EIGEN_STRONG_INLINE Index col() const { return m_iter.col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_iter; }
+
+ protected:
+ MatrixTypeIterator m_iter;
+ const UnaryOp m_functor;
+};
+
+template<typename ViewOp, typename MatrixType>
+class CwiseUnaryViewImpl<ViewOp,MatrixType,Sparse>
+ : public SparseMatrixBase<CwiseUnaryView<ViewOp, MatrixType> >
+{
+ public:
+
+ class InnerIterator;
+// typedef typename internal::remove_reference<LhsNested>::type _LhsNested;
+
+ typedef CwiseUnaryView<ViewOp, MatrixType> Derived;
+ EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
+};
+
+template<typename ViewOp, typename MatrixType>
+class CwiseUnaryViewImpl<ViewOp,MatrixType,Sparse>::InnerIterator
+{
+ typedef typename CwiseUnaryViewImpl::Scalar Scalar;
+ typedef typename internal::traits<Derived>::_MatrixTypeNested _MatrixTypeNested;
+ typedef typename _MatrixTypeNested::InnerIterator MatrixTypeIterator;
+ typedef typename MatrixType::Index Index;
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const CwiseUnaryViewImpl& unaryView, Index outer)
+ : m_iter(unaryView.derived().nestedExpression(),outer), m_functor(unaryView.derived().functor())
+ {}
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
+ { ++m_iter; return *this; }
+
+ EIGEN_STRONG_INLINE Scalar value() const { return m_functor(m_iter.value()); }
+ EIGEN_STRONG_INLINE Scalar& valueRef() { return m_functor(m_iter.valueRef()); }
+
+ EIGEN_STRONG_INLINE Index index() const { return m_iter.index(); }
+ EIGEN_STRONG_INLINE Index row() const { return m_iter.row(); }
+ EIGEN_STRONG_INLINE Index col() const { return m_iter.col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const { return m_iter; }
+
+ protected:
+ MatrixTypeIterator m_iter;
+ const ViewOp m_functor;
+};
+
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+SparseMatrixBase<Derived>::operator*=(const Scalar& other)
+{
+ for (Index j=0; j<outerSize(); ++j)
+ for (typename Derived::InnerIterator i(derived(),j); i; ++i)
+ i.valueRef() *= other;
+ return derived();
+}
+
+template<typename Derived>
+EIGEN_STRONG_INLINE Derived&
+SparseMatrixBase<Derived>::operator/=(const Scalar& other)
+{
+ for (Index j=0; j<outerSize(); ++j)
+ for (typename Derived::InnerIterator i(derived(),j); i; ++i)
+ i.valueRef() /= other;
+ return derived();
+}
+
+#endif // EIGEN_SPARSE_CWISE_UNARY_OP_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseDenseProduct.h b/extern/Eigen3/Eigen/src/Sparse/SparseDenseProduct.h
new file mode 100644
index 00000000000..0f77aa5be99
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseDenseProduct.h
@@ -0,0 +1,231 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSEDENSEPRODUCT_H
+#define EIGEN_SPARSEDENSEPRODUCT_H
+
+template<typename Lhs, typename Rhs, int InnerSize> struct SparseDenseProductReturnType
+{
+ typedef SparseTimeDenseProduct<Lhs,Rhs> Type;
+};
+
+template<typename Lhs, typename Rhs> struct SparseDenseProductReturnType<Lhs,Rhs,1>
+{
+ typedef SparseDenseOuterProduct<Lhs,Rhs,false> Type;
+};
+
+template<typename Lhs, typename Rhs, int InnerSize> struct DenseSparseProductReturnType
+{
+ typedef DenseTimeSparseProduct<Lhs,Rhs> Type;
+};
+
+template<typename Lhs, typename Rhs> struct DenseSparseProductReturnType<Lhs,Rhs,1>
+{
+ typedef SparseDenseOuterProduct<Rhs,Lhs,true> Type;
+};
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, bool Tr>
+struct traits<SparseDenseOuterProduct<Lhs,Rhs,Tr> >
+{
+ typedef Sparse StorageKind;
+ typedef typename scalar_product_traits<typename traits<Lhs>::Scalar,
+ typename traits<Rhs>::Scalar>::ReturnType Scalar;
+ typedef typename Lhs::Index Index;
+ typedef typename Lhs::Nested LhsNested;
+ typedef typename Rhs::Nested RhsNested;
+ typedef typename remove_all<LhsNested>::type _LhsNested;
+ typedef typename remove_all<RhsNested>::type _RhsNested;
+
+ enum {
+ LhsCoeffReadCost = traits<_LhsNested>::CoeffReadCost,
+ RhsCoeffReadCost = traits<_RhsNested>::CoeffReadCost,
+
+ RowsAtCompileTime = Tr ? int(traits<Rhs>::RowsAtCompileTime) : int(traits<Lhs>::RowsAtCompileTime),
+ ColsAtCompileTime = Tr ? int(traits<Lhs>::ColsAtCompileTime) : int(traits<Rhs>::ColsAtCompileTime),
+ MaxRowsAtCompileTime = Tr ? int(traits<Rhs>::MaxRowsAtCompileTime) : int(traits<Lhs>::MaxRowsAtCompileTime),
+ MaxColsAtCompileTime = Tr ? int(traits<Lhs>::MaxColsAtCompileTime) : int(traits<Rhs>::MaxColsAtCompileTime),
+
+ Flags = Tr ? RowMajorBit : 0,
+
+ CoeffReadCost = LhsCoeffReadCost + RhsCoeffReadCost + NumTraits<Scalar>::MulCost
+ };
+};
+
+} // end namespace internal
+
+template<typename Lhs, typename Rhs, bool Tr>
+class SparseDenseOuterProduct
+ : public SparseMatrixBase<SparseDenseOuterProduct<Lhs,Rhs,Tr> >
+{
+ public:
+
+ typedef SparseMatrixBase<SparseDenseOuterProduct> Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(SparseDenseOuterProduct)
+ typedef internal::traits<SparseDenseOuterProduct> Traits;
+
+ private:
+
+ typedef typename Traits::LhsNested LhsNested;
+ typedef typename Traits::RhsNested RhsNested;
+ typedef typename Traits::_LhsNested _LhsNested;
+ typedef typename Traits::_RhsNested _RhsNested;
+
+ public:
+
+ class InnerIterator;
+
+ EIGEN_STRONG_INLINE SparseDenseOuterProduct(const Lhs& lhs, const Rhs& rhs)
+ : m_lhs(lhs), m_rhs(rhs)
+ {
+ EIGEN_STATIC_ASSERT(!Tr,YOU_MADE_A_PROGRAMMING_MISTAKE);
+ }
+
+ EIGEN_STRONG_INLINE SparseDenseOuterProduct(const Rhs& rhs, const Lhs& lhs)
+ : m_lhs(lhs), m_rhs(rhs)
+ {
+ EIGEN_STATIC_ASSERT(Tr,YOU_MADE_A_PROGRAMMING_MISTAKE);
+ }
+
+ EIGEN_STRONG_INLINE Index rows() const { return Tr ? m_rhs.rows() : m_lhs.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return Tr ? m_lhs.cols() : m_rhs.cols(); }
+
+ EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; }
+ EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; }
+
+ protected:
+ LhsNested m_lhs;
+ RhsNested m_rhs;
+};
+
+template<typename Lhs, typename Rhs, bool Transpose>
+class SparseDenseOuterProduct<Lhs,Rhs,Transpose>::InnerIterator : public _LhsNested::InnerIterator
+{
+ typedef typename _LhsNested::InnerIterator Base;
+ public:
+ EIGEN_STRONG_INLINE InnerIterator(const SparseDenseOuterProduct& prod, Index outer)
+ : Base(prod.lhs(), 0), m_outer(outer), m_factor(prod.rhs().coeff(outer))
+ {
+ }
+
+ inline Index outer() const { return m_outer; }
+ inline Index row() const { return Transpose ? Base::row() : m_outer; }
+ inline Index col() const { return Transpose ? m_outer : Base::row(); }
+
+ inline Scalar value() const { return Base::value() * m_factor; }
+
+ protected:
+ int m_outer;
+ Scalar m_factor;
+};
+
+namespace internal {
+template<typename Lhs, typename Rhs>
+struct traits<SparseTimeDenseProduct<Lhs,Rhs> >
+ : traits<ProductBase<SparseTimeDenseProduct<Lhs,Rhs>, Lhs, Rhs> >
+{
+ typedef Dense StorageKind;
+ typedef MatrixXpr XprKind;
+};
+} // end namespace internal
+
+template<typename Lhs, typename Rhs>
+class SparseTimeDenseProduct
+ : public ProductBase<SparseTimeDenseProduct<Lhs,Rhs>, Lhs, Rhs>
+{
+ public:
+ EIGEN_PRODUCT_PUBLIC_INTERFACE(SparseTimeDenseProduct)
+
+ SparseTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+ {}
+
+ template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
+ {
+ typedef typename internal::remove_all<Lhs>::type _Lhs;
+ typedef typename internal::remove_all<Rhs>::type _Rhs;
+ typedef typename _Lhs::InnerIterator LhsInnerIterator;
+ enum { LhsIsRowMajor = (_Lhs::Flags&RowMajorBit)==RowMajorBit };
+ for(Index j=0; j<m_lhs.outerSize(); ++j)
+ {
+ typename Rhs::Scalar rhs_j = alpha * m_rhs.coeff(LhsIsRowMajor ? 0 : j,0);
+ typename Dest::RowXpr dest_j(dest.row(LhsIsRowMajor ? j : 0));
+ for(LhsInnerIterator it(m_lhs,j); it ;++it)
+ {
+ if(LhsIsRowMajor) dest_j += (alpha*it.value()) * m_rhs.row(it.index());
+ else if(Rhs::ColsAtCompileTime==1) dest.coeffRef(it.index()) += it.value() * rhs_j;
+ else dest.row(it.index()) += (alpha*it.value()) * m_rhs.row(j);
+ }
+ }
+ }
+
+ private:
+ SparseTimeDenseProduct& operator=(const SparseTimeDenseProduct&);
+};
+
+
+// dense = dense * sparse
+namespace internal {
+template<typename Lhs, typename Rhs>
+struct traits<DenseTimeSparseProduct<Lhs,Rhs> >
+ : traits<ProductBase<DenseTimeSparseProduct<Lhs,Rhs>, Lhs, Rhs> >
+{
+ typedef Dense StorageKind;
+};
+} // end namespace internal
+
+template<typename Lhs, typename Rhs>
+class DenseTimeSparseProduct
+ : public ProductBase<DenseTimeSparseProduct<Lhs,Rhs>, Lhs, Rhs>
+{
+ public:
+ EIGEN_PRODUCT_PUBLIC_INTERFACE(DenseTimeSparseProduct)
+
+ DenseTimeSparseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+ {}
+
+ template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
+ {
+ typedef typename internal::remove_all<Rhs>::type _Rhs;
+ typedef typename _Rhs::InnerIterator RhsInnerIterator;
+ enum { RhsIsRowMajor = (_Rhs::Flags&RowMajorBit)==RowMajorBit };
+ for(Index j=0; j<m_rhs.outerSize(); ++j)
+ for(RhsInnerIterator i(m_rhs,j); i; ++i)
+ dest.col(RhsIsRowMajor ? i.index() : j) += (alpha*i.value()) * m_lhs.col(RhsIsRowMajor ? j : i.index());
+ }
+
+ private:
+ DenseTimeSparseProduct& operator=(const DenseTimeSparseProduct&);
+};
+
+// sparse * dense
+template<typename Derived>
+template<typename OtherDerived>
+inline const typename SparseDenseProductReturnType<Derived,OtherDerived>::Type
+SparseMatrixBase<Derived>::operator*(const MatrixBase<OtherDerived> &other) const
+{
+ return typename SparseDenseProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived());
+}
+
+#endif // EIGEN_SPARSEDENSEPRODUCT_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseDiagonalProduct.h b/extern/Eigen3/Eigen/src/Sparse/SparseDiagonalProduct.h
new file mode 100644
index 00000000000..fb9a29c051b
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseDiagonalProduct.h
@@ -0,0 +1,195 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_DIAGONAL_PRODUCT_H
+#define EIGEN_SPARSE_DIAGONAL_PRODUCT_H
+
+// The product of a diagonal matrix with a sparse matrix can be easily
+// implemented using expression template.
+// We have two consider very different cases:
+// 1 - diag * row-major sparse
+// => each inner vector <=> scalar * sparse vector product
+// => so we can reuse CwiseUnaryOp::InnerIterator
+// 2 - diag * col-major sparse
+// => each inner vector <=> densevector * sparse vector cwise product
+// => again, we can reuse specialization of CwiseBinaryOp::InnerIterator
+// for that particular case
+// The two other cases are symmetric.
+
+namespace internal {
+
+template<typename Lhs, typename Rhs>
+struct traits<SparseDiagonalProduct<Lhs, Rhs> >
+{
+ typedef typename remove_all<Lhs>::type _Lhs;
+ typedef typename remove_all<Rhs>::type _Rhs;
+ typedef typename _Lhs::Scalar Scalar;
+ typedef typename promote_index_type<typename traits<Lhs>::Index,
+ typename traits<Rhs>::Index>::type Index;
+ typedef Sparse StorageKind;
+ typedef MatrixXpr XprKind;
+ enum {
+ RowsAtCompileTime = _Lhs::RowsAtCompileTime,
+ ColsAtCompileTime = _Rhs::ColsAtCompileTime,
+
+ MaxRowsAtCompileTime = _Lhs::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = _Rhs::MaxColsAtCompileTime,
+
+ SparseFlags = is_diagonal<_Lhs>::ret ? int(_Rhs::Flags) : int(_Lhs::Flags),
+ Flags = (SparseFlags&RowMajorBit),
+ CoeffReadCost = Dynamic
+ };
+};
+
+enum {SDP_IsDiagonal, SDP_IsSparseRowMajor, SDP_IsSparseColMajor};
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType, int RhsMode, int LhsMode>
+class sparse_diagonal_product_inner_iterator_selector;
+
+} // end namespace internal
+
+template<typename Lhs, typename Rhs>
+class SparseDiagonalProduct
+ : public SparseMatrixBase<SparseDiagonalProduct<Lhs,Rhs> >,
+ internal::no_assignment_operator
+{
+ typedef typename Lhs::Nested LhsNested;
+ typedef typename Rhs::Nested RhsNested;
+
+ typedef typename internal::remove_all<LhsNested>::type _LhsNested;
+ typedef typename internal::remove_all<RhsNested>::type _RhsNested;
+
+ enum {
+ LhsMode = internal::is_diagonal<_LhsNested>::ret ? internal::SDP_IsDiagonal
+ : (_LhsNested::Flags&RowMajorBit) ? internal::SDP_IsSparseRowMajor : internal::SDP_IsSparseColMajor,
+ RhsMode = internal::is_diagonal<_RhsNested>::ret ? internal::SDP_IsDiagonal
+ : (_RhsNested::Flags&RowMajorBit) ? internal::SDP_IsSparseRowMajor : internal::SDP_IsSparseColMajor
+ };
+
+ public:
+
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseDiagonalProduct)
+
+ typedef internal::sparse_diagonal_product_inner_iterator_selector
+ <_LhsNested,_RhsNested,SparseDiagonalProduct,LhsMode,RhsMode> InnerIterator;
+
+ EIGEN_STRONG_INLINE SparseDiagonalProduct(const Lhs& lhs, const Rhs& rhs)
+ : m_lhs(lhs), m_rhs(rhs)
+ {
+ eigen_assert(lhs.cols() == rhs.rows() && "invalid sparse matrix * diagonal matrix product");
+ }
+
+ EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return m_rhs.cols(); }
+
+ EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; }
+ EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; }
+
+ protected:
+ LhsNested m_lhs;
+ RhsNested m_rhs;
+};
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
+class sparse_diagonal_product_inner_iterator_selector
+<Lhs,Rhs,SparseDiagonalProductType,SDP_IsDiagonal,SDP_IsSparseRowMajor>
+ : public CwiseUnaryOp<scalar_multiple_op<typename Lhs::Scalar>,const Rhs>::InnerIterator
+{
+ typedef typename CwiseUnaryOp<scalar_multiple_op<typename Lhs::Scalar>,const Rhs>::InnerIterator Base;
+ typedef typename Lhs::Index Index;
+ public:
+ inline sparse_diagonal_product_inner_iterator_selector(
+ const SparseDiagonalProductType& expr, Index outer)
+ : Base(expr.rhs()*(expr.lhs().diagonal().coeff(outer)), outer)
+ {}
+};
+
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
+class sparse_diagonal_product_inner_iterator_selector
+<Lhs,Rhs,SparseDiagonalProductType,SDP_IsDiagonal,SDP_IsSparseColMajor>
+ : public CwiseBinaryOp<
+ scalar_product_op<typename Lhs::Scalar>,
+ SparseInnerVectorSet<Rhs,1>,
+ typename Lhs::DiagonalVectorType>::InnerIterator
+{
+ typedef typename CwiseBinaryOp<
+ scalar_product_op<typename Lhs::Scalar>,
+ SparseInnerVectorSet<Rhs,1>,
+ typename Lhs::DiagonalVectorType>::InnerIterator Base;
+ typedef typename Lhs::Index Index;
+ public:
+ inline sparse_diagonal_product_inner_iterator_selector(
+ const SparseDiagonalProductType& expr, Index outer)
+ : Base(expr.rhs().innerVector(outer) .cwiseProduct(expr.lhs().diagonal()), 0)
+ {}
+};
+
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
+class sparse_diagonal_product_inner_iterator_selector
+<Lhs,Rhs,SparseDiagonalProductType,SDP_IsSparseColMajor,SDP_IsDiagonal>
+ : public CwiseUnaryOp<scalar_multiple_op<typename Rhs::Scalar>,const Lhs>::InnerIterator
+{
+ typedef typename CwiseUnaryOp<scalar_multiple_op<typename Rhs::Scalar>,const Lhs>::InnerIterator Base;
+ typedef typename Lhs::Index Index;
+ public:
+ inline sparse_diagonal_product_inner_iterator_selector(
+ const SparseDiagonalProductType& expr, Index outer)
+ : Base(expr.lhs()*expr.rhs().diagonal().coeff(outer), outer)
+ {}
+};
+
+template<typename Lhs, typename Rhs, typename SparseDiagonalProductType>
+class sparse_diagonal_product_inner_iterator_selector
+<Lhs,Rhs,SparseDiagonalProductType,SDP_IsSparseRowMajor,SDP_IsDiagonal>
+ : public CwiseBinaryOp<
+ scalar_product_op<typename Rhs::Scalar>,
+ SparseInnerVectorSet<Lhs,1>,
+ Transpose<const typename Rhs::DiagonalVectorType> >::InnerIterator
+{
+ typedef typename CwiseBinaryOp<
+ scalar_product_op<typename Rhs::Scalar>,
+ SparseInnerVectorSet<Lhs,1>,
+ Transpose<const typename Rhs::DiagonalVectorType> >::InnerIterator Base;
+ typedef typename Lhs::Index Index;
+ public:
+ inline sparse_diagonal_product_inner_iterator_selector(
+ const SparseDiagonalProductType& expr, Index outer)
+ : Base(expr.lhs().innerVector(outer) .cwiseProduct(expr.rhs().diagonal().transpose()), 0)
+ {}
+};
+
+} // end namespace internal
+
+// SparseMatrixBase functions
+
+template<typename Derived>
+template<typename OtherDerived>
+const SparseDiagonalProduct<Derived,OtherDerived>
+SparseMatrixBase<Derived>::operator*(const DiagonalBase<OtherDerived> &other) const
+{
+ return SparseDiagonalProduct<Derived,OtherDerived>(this->derived(), other.derived());
+}
+
+#endif // EIGEN_SPARSE_DIAGONAL_PRODUCT_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseDot.h b/extern/Eigen3/Eigen/src/Sparse/SparseDot.h
new file mode 100644
index 00000000000..1f10f71a402
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseDot.h
@@ -0,0 +1,97 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_DOT_H
+#define EIGEN_SPARSE_DOT_H
+
+template<typename Derived>
+template<typename OtherDerived>
+typename internal::traits<Derived>::Scalar
+SparseMatrixBase<Derived>::dot(const MatrixBase<OtherDerived>& other) const
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
+ EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)
+ EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
+ YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+
+ eigen_assert(size() == other.size());
+ eigen_assert(other.size()>0 && "you are using a non initialized vector");
+
+ typename Derived::InnerIterator i(derived(),0);
+ Scalar res = 0;
+ while (i)
+ {
+ res += internal::conj(i.value()) * other.coeff(i.index());
+ ++i;
+ }
+ return res;
+}
+
+template<typename Derived>
+template<typename OtherDerived>
+typename internal::traits<Derived>::Scalar
+SparseMatrixBase<Derived>::dot(const SparseMatrixBase<OtherDerived>& other) const
+{
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+ EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
+ EIGEN_STATIC_ASSERT_SAME_VECTOR_SIZE(Derived,OtherDerived)
+ EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
+ YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+
+ eigen_assert(size() == other.size());
+
+ typename Derived::InnerIterator i(derived(),0);
+ typename OtherDerived::InnerIterator j(other.derived(),0);
+ Scalar res = 0;
+ while (i && j)
+ {
+ if (i.index()==j.index())
+ {
+ res += internal::conj(i.value()) * j.value();
+ ++i; ++j;
+ }
+ else if (i.index()<j.index())
+ ++i;
+ else
+ ++j;
+ }
+ return res;
+}
+
+template<typename Derived>
+inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
+SparseMatrixBase<Derived>::squaredNorm() const
+{
+ return internal::real((*this).cwiseAbs2().sum());
+}
+
+template<typename Derived>
+inline typename NumTraits<typename internal::traits<Derived>::Scalar>::Real
+SparseMatrixBase<Derived>::norm() const
+{
+ return internal::sqrt(squaredNorm());
+}
+
+#endif // EIGEN_SPARSE_DOT_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseFuzzy.h b/extern/Eigen3/Eigen/src/Sparse/SparseFuzzy.h
new file mode 100644
index 00000000000..f00b3d6469b
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseFuzzy.h
@@ -0,0 +1,41 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_FUZZY_H
+#define EIGEN_SPARSE_FUZZY_H
+
+// template<typename Derived>
+// template<typename OtherDerived>
+// bool SparseMatrixBase<Derived>::isApprox(
+// const OtherDerived& other,
+// typename NumTraits<Scalar>::Real prec
+// ) const
+// {
+// const typename internal::nested<Derived,2>::type nested(derived());
+// const typename internal::nested<OtherDerived,2>::type otherNested(other.derived());
+// return (nested - otherNested).cwise().abs2().sum()
+// <= prec * prec * (std::min)(nested.cwise().abs2().sum(), otherNested.cwise().abs2().sum());
+// }
+
+#endif // EIGEN_SPARSE_FUZZY_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseMatrix.h b/extern/Eigen3/Eigen/src/Sparse/SparseMatrix.h
new file mode 100644
index 00000000000..0e175ec6e71
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseMatrix.h
@@ -0,0 +1,651 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSEMATRIX_H
+#define EIGEN_SPARSEMATRIX_H
+
+/** \ingroup Sparse_Module
+ *
+ * \class SparseMatrix
+ *
+ * \brief The main sparse matrix class
+ *
+ * This class implements a sparse matrix using the very common compressed row/column storage
+ * scheme.
+ *
+ * \tparam _Scalar the scalar type, i.e. the type of the coefficients
+ * \tparam _Options Union of bit flags controlling the storage scheme. Currently the only possibility
+ * is RowMajor. The default is 0 which means column-major.
+ * \tparam _Index the type of the indices. Default is \c int.
+ *
+ * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
+ *
+ * This class can be extended with the help of the plugin mechanism described on the page
+ * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEMATRIX_PLUGIN.
+ */
+
+namespace internal {
+template<typename _Scalar, int _Options, typename _Index>
+struct traits<SparseMatrix<_Scalar, _Options, _Index> >
+{
+ typedef _Scalar Scalar;
+ typedef _Index Index;
+ typedef Sparse StorageKind;
+ typedef MatrixXpr XprKind;
+ enum {
+ RowsAtCompileTime = Dynamic,
+ ColsAtCompileTime = Dynamic,
+ MaxRowsAtCompileTime = Dynamic,
+ MaxColsAtCompileTime = Dynamic,
+ Flags = _Options | NestByRefBit | LvalueBit,
+ CoeffReadCost = NumTraits<Scalar>::ReadCost,
+ SupportedAccessPatterns = InnerRandomAccessPattern
+ };
+};
+
+} // end namespace internal
+
+template<typename _Scalar, int _Options, typename _Index>
+class SparseMatrix
+ : public SparseMatrixBase<SparseMatrix<_Scalar, _Options, _Index> >
+{
+ public:
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
+// using Base::operator=;
+ EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, +=)
+ EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, -=)
+ // FIXME: why are these operator already alvailable ???
+ // EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(SparseMatrix, *=)
+ // EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(SparseMatrix, /=)
+
+ typedef MappedSparseMatrix<Scalar,Flags> Map;
+ using Base::IsRowMajor;
+ typedef CompressedStorage<Scalar,Index> Storage;
+ enum {
+ Options = _Options
+ };
+
+ protected:
+
+ typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
+
+ Index m_outerSize;
+ Index m_innerSize;
+ Index* m_outerIndex;
+ CompressedStorage<Scalar,Index> m_data;
+
+ public:
+
+ inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
+ inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
+
+ inline Index innerSize() const { return m_innerSize; }
+ inline Index outerSize() const { return m_outerSize; }
+ inline Index innerNonZeros(Index j) const { return m_outerIndex[j+1]-m_outerIndex[j]; }
+
+ inline const Scalar* _valuePtr() const { return &m_data.value(0); }
+ inline Scalar* _valuePtr() { return &m_data.value(0); }
+
+ inline const Index* _innerIndexPtr() const { return &m_data.index(0); }
+ inline Index* _innerIndexPtr() { return &m_data.index(0); }
+
+ inline const Index* _outerIndexPtr() const { return m_outerIndex; }
+ inline Index* _outerIndexPtr() { return m_outerIndex; }
+
+ inline Storage& data() { return m_data; }
+ inline const Storage& data() const { return m_data; }
+
+ inline Scalar coeff(Index row, Index col) const
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+ return m_data.atInRange(m_outerIndex[outer], m_outerIndex[outer+1], inner);
+ }
+
+ inline Scalar& coeffRef(Index row, Index col)
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ Index start = m_outerIndex[outer];
+ Index end = m_outerIndex[outer+1];
+ eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
+ eigen_assert(end>start && "coeffRef cannot be called on a zero coefficient");
+ const Index p = m_data.searchLowerIndex(start,end-1,inner);
+ eigen_assert((p<end) && (m_data.index(p)==inner) && "coeffRef cannot be called on a zero coefficient");
+ return m_data.value(p);
+ }
+
+ public:
+
+ class InnerIterator;
+
+ /** Removes all non zeros */
+ inline void setZero()
+ {
+ m_data.clear();
+ memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
+ }
+
+ /** \returns the number of non zero coefficients */
+ inline Index nonZeros() const { return static_cast<Index>(m_data.size()); }
+
+ /** Preallocates \a reserveSize non zeros */
+ inline void reserve(Index reserveSize)
+ {
+ m_data.reserve(reserveSize);
+ }
+
+ //--- low level purely coherent filling ---
+
+ /** \returns a reference to the non zero coefficient at position \a row, \a col assuming that:
+ * - the nonzero does not already exist
+ * - the new coefficient is the last one according to the storage order
+ *
+ * Before filling a given inner vector you must call the statVec(Index) function.
+ *
+ * After an insertion session, you should call the finalize() function.
+ *
+ * \sa insert, insertBackByOuterInner, startVec */
+ inline Scalar& insertBack(Index row, Index col)
+ {
+ return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
+ }
+
+ /** \sa insertBack, startVec */
+ inline Scalar& insertBackByOuterInner(Index outer, Index inner)
+ {
+ eigen_assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
+ eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
+ Index p = m_outerIndex[outer+1];
+ ++m_outerIndex[outer+1];
+ m_data.append(0, inner);
+ return m_data.value(p);
+ }
+
+ /** \warning use it only if you know what you are doing */
+ inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
+ {
+ Index p = m_outerIndex[outer+1];
+ ++m_outerIndex[outer+1];
+ m_data.append(0, inner);
+ return m_data.value(p);
+ }
+
+ /** \sa insertBack, insertBackByOuterInner */
+ inline void startVec(Index outer)
+ {
+ eigen_assert(m_outerIndex[outer]==int(m_data.size()) && "You must call startVec for each inner vector sequentially");
+ eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
+ m_outerIndex[outer+1] = m_outerIndex[outer];
+ }
+
+ //---
+
+ /** \returns a reference to a novel non zero coefficient with coordinates \a row x \a col.
+ * The non zero coefficient must \b not already exist.
+ *
+ * \warning This function can be extremely slow if the non zero coefficients
+ * are not inserted in a coherent order.
+ *
+ * After an insertion session, you should call the finalize() function.
+ */
+ EIGEN_DONT_INLINE Scalar& insert(Index row, Index col)
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ Index previousOuter = outer;
+ if (m_outerIndex[outer+1]==0)
+ {
+ // we start a new inner vector
+ while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
+ {
+ m_outerIndex[previousOuter] = static_cast<Index>(m_data.size());
+ --previousOuter;
+ }
+ m_outerIndex[outer+1] = m_outerIndex[outer];
+ }
+
+ // here we have to handle the tricky case where the outerIndex array
+ // starts with: [ 0 0 0 0 0 1 ...] and we are inserting in, e.g.,
+ // the 2nd inner vector...
+ bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
+ && (size_t(m_outerIndex[outer+1]) == m_data.size());
+
+ size_t startId = m_outerIndex[outer];
+ // FIXME let's make sure sizeof(long int) == sizeof(size_t)
+ size_t p = m_outerIndex[outer+1];
+ ++m_outerIndex[outer+1];
+
+ float reallocRatio = 1;
+ if (m_data.allocatedSize()<=m_data.size())
+ {
+ // if there is no preallocated memory, let's reserve a minimum of 32 elements
+ if (m_data.size()==0)
+ {
+ m_data.reserve(32);
+ }
+ else
+ {
+ // we need to reallocate the data, to reduce multiple reallocations
+ // we use a smart resize algorithm based on the current filling ratio
+ // in addition, we use float to avoid integers overflows
+ float nnzEstimate = float(m_outerIndex[outer])*float(m_outerSize)/float(outer+1);
+ reallocRatio = (nnzEstimate-float(m_data.size()))/float(m_data.size());
+ // furthermore we bound the realloc ratio to:
+ // 1) reduce multiple minor realloc when the matrix is almost filled
+ // 2) avoid to allocate too much memory when the matrix is almost empty
+ reallocRatio = (std::min)((std::max)(reallocRatio,1.5f),8.f);
+ }
+ }
+ m_data.resize(m_data.size()+1,reallocRatio);
+
+ if (!isLastVec)
+ {
+ if (previousOuter==-1)
+ {
+ // oops wrong guess.
+ // let's correct the outer offsets
+ for (Index k=0; k<=(outer+1); ++k)
+ m_outerIndex[k] = 0;
+ Index k=outer+1;
+ while(m_outerIndex[k]==0)
+ m_outerIndex[k++] = 1;
+ while (k<=m_outerSize && m_outerIndex[k]!=0)
+ m_outerIndex[k++]++;
+ p = 0;
+ --k;
+ k = m_outerIndex[k]-1;
+ while (k>0)
+ {
+ m_data.index(k) = m_data.index(k-1);
+ m_data.value(k) = m_data.value(k-1);
+ k--;
+ }
+ }
+ else
+ {
+ // we are not inserting into the last inner vec
+ // update outer indices:
+ Index j = outer+2;
+ while (j<=m_outerSize && m_outerIndex[j]!=0)
+ m_outerIndex[j++]++;
+ --j;
+ // shift data of last vecs:
+ Index k = m_outerIndex[j]-1;
+ while (k>=Index(p))
+ {
+ m_data.index(k) = m_data.index(k-1);
+ m_data.value(k) = m_data.value(k-1);
+ k--;
+ }
+ }
+ }
+
+ while ( (p > startId) && (m_data.index(p-1) > inner) )
+ {
+ m_data.index(p) = m_data.index(p-1);
+ m_data.value(p) = m_data.value(p-1);
+ --p;
+ }
+
+ m_data.index(p) = inner;
+ return (m_data.value(p) = 0);
+ }
+
+
+
+
+ /** Must be called after inserting a set of non zero entries.
+ */
+ inline void finalize()
+ {
+ Index size = static_cast<Index>(m_data.size());
+ Index i = m_outerSize;
+ // find the last filled column
+ while (i>=0 && m_outerIndex[i]==0)
+ --i;
+ ++i;
+ while (i<=m_outerSize)
+ {
+ m_outerIndex[i] = size;
+ ++i;
+ }
+ }
+
+ /** Suppress all nonzeros which are smaller than \a reference under the tolerence \a epsilon */
+ void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
+ {
+ prune(default_prunning_func(reference,epsilon));
+ }
+
+ /** Suppress all nonzeros which do not satisfy the predicate \a keep.
+ * The functor type \a KeepFunc must implement the following function:
+ * \code
+ * bool operator() (const Index& row, const Index& col, const Scalar& value) const;
+ * \endcode
+ * \sa prune(Scalar,RealScalar)
+ */
+ template<typename KeepFunc>
+ void prune(const KeepFunc& keep = KeepFunc())
+ {
+ Index k = 0;
+ for(Index j=0; j<m_outerSize; ++j)
+ {
+ Index previousStart = m_outerIndex[j];
+ m_outerIndex[j] = k;
+ Index end = m_outerIndex[j+1];
+ for(Index i=previousStart; i<end; ++i)
+ {
+ if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
+ {
+ m_data.value(k) = m_data.value(i);
+ m_data.index(k) = m_data.index(i);
+ ++k;
+ }
+ }
+ }
+ m_outerIndex[m_outerSize] = k;
+ m_data.resize(k,0);
+ }
+
+ /** Resizes the matrix to a \a rows x \a cols matrix and initializes it to zero
+ * \sa resizeNonZeros(Index), reserve(), setZero()
+ */
+ void resize(Index rows, Index cols)
+ {
+ const Index outerSize = IsRowMajor ? rows : cols;
+ m_innerSize = IsRowMajor ? cols : rows;
+ m_data.clear();
+ if (m_outerSize != outerSize || m_outerSize==0)
+ {
+ delete[] m_outerIndex;
+ m_outerIndex = new Index [outerSize+1];
+ m_outerSize = outerSize;
+ }
+ memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
+ }
+
+ /** Low level API
+ * Resize the nonzero vector to \a size */
+ void resizeNonZeros(Index size)
+ {
+ m_data.resize(size);
+ }
+
+ /** Default constructor yielding an empty \c 0 \c x \c 0 matrix */
+ inline SparseMatrix()
+ : m_outerSize(-1), m_innerSize(0), m_outerIndex(0)
+ {
+ resize(0, 0);
+ }
+
+ /** Constructs a \a rows \c x \a cols empty matrix */
+ inline SparseMatrix(Index rows, Index cols)
+ : m_outerSize(0), m_innerSize(0), m_outerIndex(0)
+ {
+ resize(rows, cols);
+ }
+
+ /** Constructs a sparse matrix from the sparse expression \a other */
+ template<typename OtherDerived>
+ inline SparseMatrix(const SparseMatrixBase<OtherDerived>& other)
+ : m_outerSize(0), m_innerSize(0), m_outerIndex(0)
+ {
+ *this = other.derived();
+ }
+
+ /** Copy constructor */
+ inline SparseMatrix(const SparseMatrix& other)
+ : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0)
+ {
+ *this = other.derived();
+ }
+
+ /** Swap the content of two sparse matrices of same type (optimization) */
+ inline void swap(SparseMatrix& other)
+ {
+ //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
+ std::swap(m_outerIndex, other.m_outerIndex);
+ std::swap(m_innerSize, other.m_innerSize);
+ std::swap(m_outerSize, other.m_outerSize);
+ m_data.swap(other.m_data);
+ }
+
+ inline SparseMatrix& operator=(const SparseMatrix& other)
+ {
+// std::cout << "SparseMatrix& operator=(const SparseMatrix& other)\n";
+ if (other.isRValue())
+ {
+ swap(other.const_cast_derived());
+ }
+ else
+ {
+ resize(other.rows(), other.cols());
+ memcpy(m_outerIndex, other.m_outerIndex, (m_outerSize+1)*sizeof(Index));
+ m_data = other.m_data;
+ }
+ return *this;
+ }
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ template<typename Lhs, typename Rhs>
+ inline SparseMatrix& operator=(const SparseSparseProduct<Lhs,Rhs>& product)
+ { return Base::operator=(product); }
+
+ template<typename OtherDerived>
+ inline SparseMatrix& operator=(const ReturnByValue<OtherDerived>& other)
+ { return Base::operator=(other); }
+
+ template<typename OtherDerived>
+ inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
+ { return Base::operator=(other); }
+ #endif
+
+ template<typename OtherDerived>
+ EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other)
+ {
+ const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
+ if (needToTranspose)
+ {
+ // two passes algorithm:
+ // 1 - compute the number of coeffs per dest inner vector
+ // 2 - do the actual copy/eval
+ // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
+ typedef typename internal::nested<OtherDerived,2>::type OtherCopy;
+ typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
+ OtherCopy otherCopy(other.derived());
+
+ resize(other.rows(), other.cols());
+ Eigen::Map<Matrix<Index, Dynamic, 1> > (m_outerIndex,outerSize()).setZero();
+ // pass 1
+ // FIXME the above copy could be merged with that pass
+ for (Index j=0; j<otherCopy.outerSize(); ++j)
+ for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
+ ++m_outerIndex[it.index()];
+
+ // prefix sum
+ Index count = 0;
+ VectorXi positions(outerSize());
+ for (Index j=0; j<outerSize(); ++j)
+ {
+ Index tmp = m_outerIndex[j];
+ m_outerIndex[j] = count;
+ positions[j] = count;
+ count += tmp;
+ }
+ m_outerIndex[outerSize()] = count;
+ // alloc
+ m_data.resize(count);
+ // pass 2
+ for (Index j=0; j<otherCopy.outerSize(); ++j)
+ {
+ for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
+ {
+ Index pos = positions[it.index()]++;
+ m_data.index(pos) = j;
+ m_data.value(pos) = it.value();
+ }
+ }
+ return *this;
+ }
+ else
+ {
+ // there is no special optimization
+ return SparseMatrixBase<SparseMatrix>::operator=(other.derived());
+ }
+ }
+
+ friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
+ {
+ EIGEN_DBG_SPARSE(
+ s << "Nonzero entries:\n";
+ for (Index i=0; i<m.nonZeros(); ++i)
+ {
+ s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
+ }
+ s << std::endl;
+ s << std::endl;
+ s << "Column pointers:\n";
+ for (Index i=0; i<m.outerSize(); ++i)
+ {
+ s << m.m_outerIndex[i] << " ";
+ }
+ s << " $" << std::endl;
+ s << std::endl;
+ );
+ s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
+ return s;
+ }
+
+ /** Destructor */
+ inline ~SparseMatrix()
+ {
+ delete[] m_outerIndex;
+ }
+
+ /** Overloaded for performance */
+ Scalar sum() const;
+
+ public:
+
+ /** \deprecated use setZero() and reserve()
+ * Initializes the filling process of \c *this.
+ * \param reserveSize approximate number of nonzeros
+ * Note that the matrix \c *this is zero-ed.
+ */
+ EIGEN_DEPRECATED void startFill(Index reserveSize = 1000)
+ {
+ setZero();
+ m_data.reserve(reserveSize);
+ }
+
+ /** \deprecated use insert()
+ * Like fill() but with random inner coordinates.
+ */
+ EIGEN_DEPRECATED Scalar& fillrand(Index row, Index col)
+ {
+ return insert(row,col);
+ }
+
+ /** \deprecated use insert()
+ */
+ EIGEN_DEPRECATED Scalar& fill(Index row, Index col)
+ {
+ const Index outer = IsRowMajor ? row : col;
+ const Index inner = IsRowMajor ? col : row;
+
+ if (m_outerIndex[outer+1]==0)
+ {
+ // we start a new inner vector
+ Index i = outer;
+ while (i>=0 && m_outerIndex[i]==0)
+ {
+ m_outerIndex[i] = m_data.size();
+ --i;
+ }
+ m_outerIndex[outer+1] = m_outerIndex[outer];
+ }
+ else
+ {
+ eigen_assert(m_data.index(m_data.size()-1)<inner && "wrong sorted insertion");
+ }
+// std::cerr << size_t(m_outerIndex[outer+1]) << " == " << m_data.size() << "\n";
+ assert(size_t(m_outerIndex[outer+1]) == m_data.size());
+ Index p = m_outerIndex[outer+1];
+ ++m_outerIndex[outer+1];
+
+ m_data.append(0, inner);
+ return m_data.value(p);
+ }
+
+ /** \deprecated use finalize */
+ EIGEN_DEPRECATED void endFill() { finalize(); }
+
+# ifdef EIGEN_SPARSEMATRIX_PLUGIN
+# include EIGEN_SPARSEMATRIX_PLUGIN
+# endif
+
+private:
+ struct default_prunning_func {
+ default_prunning_func(Scalar ref, RealScalar eps) : reference(ref), epsilon(eps) {}
+ inline bool operator() (const Index&, const Index&, const Scalar& value) const
+ {
+ return !internal::isMuchSmallerThan(value, reference, epsilon);
+ }
+ Scalar reference;
+ RealScalar epsilon;
+ };
+};
+
+template<typename Scalar, int _Options, typename _Index>
+class SparseMatrix<Scalar,_Options,_Index>::InnerIterator
+{
+ public:
+ InnerIterator(const SparseMatrix& mat, Index outer)
+ : m_values(mat._valuePtr()), m_indices(mat._innerIndexPtr()), m_outer(outer), m_id(mat.m_outerIndex[outer]), m_end(mat.m_outerIndex[outer+1])
+ {}
+
+ inline InnerIterator& operator++() { m_id++; return *this; }
+
+ inline const Scalar& value() const { return m_values[m_id]; }
+ inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id]); }
+
+ inline Index index() const { return m_indices[m_id]; }
+ inline Index outer() const { return m_outer; }
+ inline Index row() const { return IsRowMajor ? m_outer : index(); }
+ inline Index col() const { return IsRowMajor ? index() : m_outer; }
+
+ inline operator bool() const { return (m_id < m_end); }
+
+ protected:
+ const Scalar* m_values;
+ const Index* m_indices;
+ const Index m_outer;
+ Index m_id;
+ const Index m_end;
+};
+
+#endif // EIGEN_SPARSEMATRIX_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseMatrixBase.h b/extern/Eigen3/Eigen/src/Sparse/SparseMatrixBase.h
new file mode 100644
index 00000000000..c01981bc935
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseMatrixBase.h
@@ -0,0 +1,706 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSEMATRIXBASE_H
+#define EIGEN_SPARSEMATRIXBASE_H
+
+/** \ingroup Sparse_Module
+ *
+ * \class SparseMatrixBase
+ *
+ * \brief Base class of any sparse matrices or sparse expressions
+ *
+ * \tparam Derived
+ *
+ * This class can be extended with the help of the plugin mechanism described on the page
+ * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEMATRIXBASE_PLUGIN.
+ */
+template<typename Derived> class SparseMatrixBase : public EigenBase<Derived>
+{
+ public:
+
+ typedef typename internal::traits<Derived>::Scalar Scalar;
+ typedef typename internal::packet_traits<Scalar>::type PacketScalar;
+ typedef typename internal::traits<Derived>::StorageKind StorageKind;
+ typedef typename internal::traits<Derived>::Index Index;
+
+ typedef SparseMatrixBase StorageBaseType;
+ typedef EigenBase<Derived> Base;
+
+ template<typename OtherDerived>
+ Derived& operator=(const EigenBase<OtherDerived> &other)
+ {
+ other.derived().evalTo(derived());
+ return derived();
+ }
+
+// using Base::operator=;
+
+ enum {
+
+ RowsAtCompileTime = internal::traits<Derived>::RowsAtCompileTime,
+ /**< The number of rows at compile-time. This is just a copy of the value provided
+ * by the \a Derived type. If a value is not known at compile-time,
+ * it is set to the \a Dynamic constant.
+ * \sa MatrixBase::rows(), MatrixBase::cols(), ColsAtCompileTime, SizeAtCompileTime */
+
+ ColsAtCompileTime = internal::traits<Derived>::ColsAtCompileTime,
+ /**< The number of columns at compile-time. This is just a copy of the value provided
+ * by the \a Derived type. If a value is not known at compile-time,
+ * it is set to the \a Dynamic constant.
+ * \sa MatrixBase::rows(), MatrixBase::cols(), RowsAtCompileTime, SizeAtCompileTime */
+
+
+ SizeAtCompileTime = (internal::size_at_compile_time<internal::traits<Derived>::RowsAtCompileTime,
+ internal::traits<Derived>::ColsAtCompileTime>::ret),
+ /**< This is equal to the number of coefficients, i.e. the number of
+ * rows times the number of columns, or to \a Dynamic if this is not
+ * known at compile-time. \sa RowsAtCompileTime, ColsAtCompileTime */
+
+ MaxRowsAtCompileTime = RowsAtCompileTime,
+ MaxColsAtCompileTime = ColsAtCompileTime,
+
+ MaxSizeAtCompileTime = (internal::size_at_compile_time<MaxRowsAtCompileTime,
+ MaxColsAtCompileTime>::ret),
+
+ IsVectorAtCompileTime = RowsAtCompileTime == 1 || ColsAtCompileTime == 1,
+ /**< This is set to true if either the number of rows or the number of
+ * columns is known at compile-time to be equal to 1. Indeed, in that case,
+ * we are dealing with a column-vector (if there is only one column) or with
+ * a row-vector (if there is only one row). */
+
+ Flags = internal::traits<Derived>::Flags,
+ /**< This stores expression \ref flags flags which may or may not be inherited by new expressions
+ * constructed from this one. See the \ref flags "list of flags".
+ */
+
+ CoeffReadCost = internal::traits<Derived>::CoeffReadCost,
+ /**< This is a rough measure of how expensive it is to read one coefficient from
+ * this expression.
+ */
+
+ IsRowMajor = Flags&RowMajorBit ? 1 : 0,
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ _HasDirectAccess = (int(Flags)&DirectAccessBit) ? 1 : 0 // workaround sunCC
+ #endif
+ };
+
+ /* \internal the return type of MatrixBase::conjugate() */
+// typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
+// const SparseCwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, Derived>,
+// const Derived&
+// >::type ConjugateReturnType;
+ /* \internal the return type of MatrixBase::real() */
+// typedef SparseCwiseUnaryOp<internal::scalar_real_op<Scalar>, Derived> RealReturnType;
+ /* \internal the return type of MatrixBase::imag() */
+// typedef SparseCwiseUnaryOp<internal::scalar_imag_op<Scalar>, Derived> ImagReturnType;
+ /** \internal the return type of MatrixBase::adjoint() */
+ typedef typename internal::conditional<NumTraits<Scalar>::IsComplex,
+ CwiseUnaryOp<internal::scalar_conjugate_op<Scalar>, Eigen::Transpose<const Derived> >,
+ Transpose<const Derived>
+ >::type AdjointReturnType;
+
+
+ typedef SparseMatrix<Scalar, Flags&RowMajorBit ? RowMajor : ColMajor> PlainObject;
+
+#define EIGEN_CURRENT_STORAGE_BASE_CLASS Eigen::SparseMatrixBase
+# include "../plugins/CommonCwiseUnaryOps.h"
+# include "../plugins/CommonCwiseBinaryOps.h"
+# include "../plugins/MatrixCwiseUnaryOps.h"
+# include "../plugins/MatrixCwiseBinaryOps.h"
+# ifdef EIGEN_SPARSEMATRIXBASE_PLUGIN
+# include EIGEN_SPARSEMATRIXBASE_PLUGIN
+# endif
+# undef EIGEN_CURRENT_STORAGE_BASE_CLASS
+#undef EIGEN_CURRENT_STORAGE_BASE_CLASS
+
+#ifndef EIGEN_PARSED_BY_DOXYGEN
+ /** This is the "real scalar" type; if the \a Scalar type is already real numbers
+ * (e.g. int, float or double) then \a RealScalar is just the same as \a Scalar. If
+ * \a Scalar is \a std::complex<T> then RealScalar is \a T.
+ *
+ * \sa class NumTraits
+ */
+ typedef typename NumTraits<Scalar>::Real RealScalar;
+
+ /** \internal the return type of coeff()
+ */
+ typedef typename internal::conditional<_HasDirectAccess, const Scalar&, Scalar>::type CoeffReturnType;
+
+ /** \internal Represents a matrix with all coefficients equal to one another*/
+ typedef CwiseNullaryOp<internal::scalar_constant_op<Scalar>,Matrix<Scalar,Dynamic,Dynamic> > ConstantReturnType;
+
+ /** type of the equivalent square matrix */
+ typedef Matrix<Scalar,EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime),
+ EIGEN_SIZE_MAX(RowsAtCompileTime,ColsAtCompileTime)> SquareMatrixType;
+
+ inline const Derived& derived() const { return *static_cast<const Derived*>(this); }
+ inline Derived& derived() { return *static_cast<Derived*>(this); }
+ inline Derived& const_cast_derived() const
+ { return *static_cast<Derived*>(const_cast<SparseMatrixBase*>(this)); }
+#endif // not EIGEN_PARSED_BY_DOXYGEN
+
+ /** \returns the number of rows. \sa cols(), RowsAtCompileTime */
+ inline Index rows() const { return derived().rows(); }
+ /** \returns the number of columns. \sa rows(), ColsAtCompileTime*/
+ inline Index cols() const { return derived().cols(); }
+ /** \returns the number of coefficients, which is \a rows()*cols().
+ * \sa rows(), cols(), SizeAtCompileTime. */
+ inline Index size() const { return rows() * cols(); }
+ /** \returns the number of nonzero coefficients which is in practice the number
+ * of stored coefficients. */
+ inline Index nonZeros() const { return derived().nonZeros(); }
+ /** \returns true if either the number of rows or the number of columns is equal to 1.
+ * In other words, this function returns
+ * \code rows()==1 || cols()==1 \endcode
+ * \sa rows(), cols(), IsVectorAtCompileTime. */
+ inline bool isVector() const { return rows()==1 || cols()==1; }
+ /** \returns the size of the storage major dimension,
+ * i.e., the number of columns for a columns major matrix, and the number of rows otherwise */
+ Index outerSize() const { return (int(Flags)&RowMajorBit) ? this->rows() : this->cols(); }
+ /** \returns the size of the inner dimension according to the storage order,
+ * i.e., the number of rows for a columns major matrix, and the number of cols otherwise */
+ Index innerSize() const { return (int(Flags)&RowMajorBit) ? this->cols() : this->rows(); }
+
+ bool isRValue() const { return m_isRValue; }
+ Derived& markAsRValue() { m_isRValue = true; return derived(); }
+
+ SparseMatrixBase() : m_isRValue(false) { /* TODO check flags */ }
+
+ inline Derived& operator=(const Derived& other)
+ {
+// std::cout << "Derived& operator=(const Derived& other)\n";
+// if (other.isRValue())
+// derived().swap(other.const_cast_derived());
+// else
+ this->operator=<Derived>(other);
+ return derived();
+ }
+
+ template<typename OtherDerived>
+ Derived& operator=(const ReturnByValue<OtherDerived>& other)
+ {
+ other.evalTo(derived());
+ return derived();
+ }
+
+
+ template<typename OtherDerived>
+ inline void assignGeneric(const OtherDerived& other)
+ {
+// std::cout << "Derived& operator=(const MatrixBase<OtherDerived>& other)\n";
+ //const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
+ eigen_assert(( ((internal::traits<Derived>::SupportedAccessPatterns&OuterRandomAccessPattern)==OuterRandomAccessPattern) ||
+ (!((Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit)))) &&
+ "the transpose operation is supposed to be handled in SparseMatrix::operator=");
+
+ enum { Flip = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit) };
+
+ const Index outerSize = other.outerSize();
+ //typedef typename internal::conditional<transpose, LinkedVectorMatrix<Scalar,Flags&RowMajorBit>, Derived>::type TempType;
+ // thanks to shallow copies, we always eval to a tempary
+ Derived temp(other.rows(), other.cols());
+
+ temp.reserve((std::max)(this->rows(),this->cols())*2);
+ for (Index j=0; j<outerSize; ++j)
+ {
+ temp.startVec(j);
+ for (typename OtherDerived::InnerIterator it(other.derived(), j); it; ++it)
+ {
+ Scalar v = it.value();
+ if (v!=Scalar(0))
+ temp.insertBackByOuterInner(Flip?it.index():j,Flip?j:it.index()) = v;
+ }
+ }
+ temp.finalize();
+
+ derived() = temp.markAsRValue();
+ }
+
+
+ template<typename OtherDerived>
+ inline Derived& operator=(const SparseMatrixBase<OtherDerived>& other)
+ {
+// std::cout << typeid(OtherDerived).name() << "\n";
+// std::cout << Flags << " " << OtherDerived::Flags << "\n";
+ const bool transpose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
+// std::cout << "eval transpose = " << transpose << "\n";
+ const Index outerSize = (int(OtherDerived::Flags) & RowMajorBit) ? other.rows() : other.cols();
+ if ((!transpose) && other.isRValue())
+ {
+ // eval without temporary
+ derived().resize(other.rows(), other.cols());
+ derived().setZero();
+ derived().reserve((std::max)(this->rows(),this->cols())*2);
+ for (Index j=0; j<outerSize; ++j)
+ {
+ derived().startVec(j);
+ for (typename OtherDerived::InnerIterator it(other.derived(), j); it; ++it)
+ {
+ Scalar v = it.value();
+ if (v!=Scalar(0))
+ derived().insertBackByOuterInner(j,it.index()) = v;
+ }
+ }
+ derived().finalize();
+ }
+ else
+ {
+ assignGeneric(other.derived());
+ }
+ return derived();
+ }
+
+ template<typename Lhs, typename Rhs>
+ inline Derived& operator=(const SparseSparseProduct<Lhs,Rhs>& product);
+
+ template<typename Lhs, typename Rhs>
+ inline void _experimentalNewProduct(const Lhs& lhs, const Rhs& rhs);
+
+ friend std::ostream & operator << (std::ostream & s, const SparseMatrixBase& m)
+ {
+ if (Flags&RowMajorBit)
+ {
+ for (Index row=0; row<m.outerSize(); ++row)
+ {
+ Index col = 0;
+ for (typename Derived::InnerIterator it(m.derived(), row); it; ++it)
+ {
+ for ( ; col<it.index(); ++col)
+ s << "0 ";
+ s << it.value() << " ";
+ ++col;
+ }
+ for ( ; col<m.cols(); ++col)
+ s << "0 ";
+ s << std::endl;
+ }
+ }
+ else
+ {
+ if (m.cols() == 1) {
+ Index row = 0;
+ for (typename Derived::InnerIterator it(m.derived(), 0); it; ++it)
+ {
+ for ( ; row<it.index(); ++row)
+ s << "0" << std::endl;
+ s << it.value() << std::endl;
+ ++row;
+ }
+ for ( ; row<m.rows(); ++row)
+ s << "0" << std::endl;
+ }
+ else
+ {
+ SparseMatrix<Scalar, RowMajorBit> trans = m.derived();
+ s << trans;
+ }
+ }
+ return s;
+ }
+
+// const SparseCwiseUnaryOp<internal::scalar_opposite_op<typename internal::traits<Derived>::Scalar>,Derived> operator-() const;
+
+// template<typename OtherDerived>
+// const CwiseBinaryOp<internal::scalar_sum_op<typename internal::traits<Derived>::Scalar>, Derived, OtherDerived>
+// operator+(const SparseMatrixBase<OtherDerived> &other) const;
+
+// template<typename OtherDerived>
+// const CwiseBinaryOp<internal::scalar_difference_op<typename internal::traits<Derived>::Scalar>, Derived, OtherDerived>
+// operator-(const SparseMatrixBase<OtherDerived> &other) const;
+
+ template<typename OtherDerived>
+ Derived& operator+=(const SparseMatrixBase<OtherDerived>& other);
+ template<typename OtherDerived>
+ Derived& operator-=(const SparseMatrixBase<OtherDerived>& other);
+
+// template<typename Lhs,typename Rhs>
+// Derived& operator+=(const Flagged<Product<Lhs,Rhs,CacheFriendlyProduct>, 0, EvalBeforeNestingBit | EvalBeforeAssigningBit>& other);
+
+ Derived& operator*=(const Scalar& other);
+ Derived& operator/=(const Scalar& other);
+
+ #define EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE \
+ CwiseBinaryOp< \
+ internal::scalar_product_op< \
+ typename internal::scalar_product_traits< \
+ typename internal::traits<Derived>::Scalar, \
+ typename internal::traits<OtherDerived>::Scalar \
+ >::ReturnType \
+ >, \
+ Derived, \
+ OtherDerived \
+ >
+
+ template<typename OtherDerived>
+ EIGEN_STRONG_INLINE const EIGEN_SPARSE_CWISE_PRODUCT_RETURN_TYPE
+ cwiseProduct(const MatrixBase<OtherDerived> &other) const;
+
+// const SparseCwiseUnaryOp<internal::scalar_multiple_op<typename internal::traits<Derived>::Scalar>, Derived>
+// operator*(const Scalar& scalar) const;
+// const SparseCwiseUnaryOp<internal::scalar_quotient1_op<typename internal::traits<Derived>::Scalar>, Derived>
+// operator/(const Scalar& scalar) const;
+
+// inline friend const SparseCwiseUnaryOp<internal::scalar_multiple_op<typename internal::traits<Derived>::Scalar>, Derived>
+// operator*(const Scalar& scalar, const SparseMatrixBase& matrix)
+// { return matrix*scalar; }
+
+
+ // sparse * sparse
+ template<typename OtherDerived>
+ const typename SparseSparseProductReturnType<Derived,OtherDerived>::Type
+ operator*(const SparseMatrixBase<OtherDerived> &other) const;
+
+ // sparse * diagonal
+ template<typename OtherDerived>
+ const SparseDiagonalProduct<Derived,OtherDerived>
+ operator*(const DiagonalBase<OtherDerived> &other) const;
+
+ // diagonal * sparse
+ template<typename OtherDerived> friend
+ const SparseDiagonalProduct<OtherDerived,Derived>
+ operator*(const DiagonalBase<OtherDerived> &lhs, const SparseMatrixBase& rhs)
+ { return SparseDiagonalProduct<OtherDerived,Derived>(lhs.derived(), rhs.derived()); }
+
+ /** dense * sparse (return a dense object unless it is an outer product) */
+ template<typename OtherDerived> friend
+ const typename DenseSparseProductReturnType<OtherDerived,Derived>::Type
+ operator*(const MatrixBase<OtherDerived>& lhs, const Derived& rhs)
+ { return typename DenseSparseProductReturnType<OtherDerived,Derived>::Type(lhs.derived(),rhs); }
+
+ /** sparse * dense (returns a dense object unless it is an outer product) */
+ template<typename OtherDerived>
+ const typename SparseDenseProductReturnType<Derived,OtherDerived>::Type
+ operator*(const MatrixBase<OtherDerived> &other) const;
+
+ template<typename OtherDerived>
+ Derived& operator*=(const SparseMatrixBase<OtherDerived>& other);
+
+ #ifdef EIGEN2_SUPPORT
+ // deprecated
+ template<typename OtherDerived>
+ typename internal::plain_matrix_type_column_major<OtherDerived>::type
+ solveTriangular(const MatrixBase<OtherDerived>& other) const;
+
+ // deprecated
+ template<typename OtherDerived>
+ void solveTriangularInPlace(MatrixBase<OtherDerived>& other) const;
+// template<typename OtherDerived>
+// void solveTriangularInPlace(SparseMatrixBase<OtherDerived>& other) const;
+ #endif // EIGEN2_SUPPORT
+
+ template<int Mode>
+ inline const SparseTriangularView<Derived, Mode> triangularView() const;
+
+ template<unsigned int UpLo> inline const SparseSelfAdjointView<Derived, UpLo> selfadjointView() const;
+ template<unsigned int UpLo> inline SparseSelfAdjointView<Derived, UpLo> selfadjointView();
+
+ template<typename OtherDerived> Scalar dot(const MatrixBase<OtherDerived>& other) const;
+ template<typename OtherDerived> Scalar dot(const SparseMatrixBase<OtherDerived>& other) const;
+ RealScalar squaredNorm() const;
+ RealScalar norm() const;
+// const PlainObject normalized() const;
+// void normalize();
+
+ Transpose<Derived> transpose() { return derived(); }
+ const Transpose<const Derived> transpose() const { return derived(); }
+ // void transposeInPlace();
+ const AdjointReturnType adjoint() const { return transpose(); }
+
+ // sub-vector
+ SparseInnerVectorSet<Derived,1> row(Index i);
+ const SparseInnerVectorSet<Derived,1> row(Index i) const;
+ SparseInnerVectorSet<Derived,1> col(Index j);
+ const SparseInnerVectorSet<Derived,1> col(Index j) const;
+ SparseInnerVectorSet<Derived,1> innerVector(Index outer);
+ const SparseInnerVectorSet<Derived,1> innerVector(Index outer) const;
+
+ // set of sub-vectors
+ SparseInnerVectorSet<Derived,Dynamic> subrows(Index start, Index size);
+ const SparseInnerVectorSet<Derived,Dynamic> subrows(Index start, Index size) const;
+ SparseInnerVectorSet<Derived,Dynamic> subcols(Index start, Index size);
+ const SparseInnerVectorSet<Derived,Dynamic> subcols(Index start, Index size) const;
+ SparseInnerVectorSet<Derived,Dynamic> innerVectors(Index outerStart, Index outerSize);
+ const SparseInnerVectorSet<Derived,Dynamic> innerVectors(Index outerStart, Index outerSize) const;
+
+// typename BlockReturnType<Derived>::Type block(int startRow, int startCol, int blockRows, int blockCols);
+// const typename BlockReturnType<Derived>::Type
+// block(int startRow, int startCol, int blockRows, int blockCols) const;
+//
+// typename BlockReturnType<Derived>::SubVectorType segment(int start, int size);
+// const typename BlockReturnType<Derived>::SubVectorType segment(int start, int size) const;
+//
+// typename BlockReturnType<Derived,Dynamic>::SubVectorType start(int size);
+// const typename BlockReturnType<Derived,Dynamic>::SubVectorType start(int size) const;
+//
+// typename BlockReturnType<Derived,Dynamic>::SubVectorType end(int size);
+// const typename BlockReturnType<Derived,Dynamic>::SubVectorType end(int size) const;
+//
+// template<int BlockRows, int BlockCols>
+// typename BlockReturnType<Derived, BlockRows, BlockCols>::Type block(int startRow, int startCol);
+// template<int BlockRows, int BlockCols>
+// const typename BlockReturnType<Derived, BlockRows, BlockCols>::Type block(int startRow, int startCol) const;
+
+// template<int Size> typename BlockReturnType<Derived,Size>::SubVectorType start(void);
+// template<int Size> const typename BlockReturnType<Derived,Size>::SubVectorType start() const;
+
+// template<int Size> typename BlockReturnType<Derived,Size>::SubVectorType end();
+// template<int Size> const typename BlockReturnType<Derived,Size>::SubVectorType end() const;
+
+// template<int Size> typename BlockReturnType<Derived,Size>::SubVectorType segment(int start);
+// template<int Size> const typename BlockReturnType<Derived,Size>::SubVectorType segment(int start) const;
+
+// Diagonal<Derived> diagonal();
+// const Diagonal<Derived> diagonal() const;
+
+// template<unsigned int Mode> Part<Derived, Mode> part();
+// template<unsigned int Mode> const Part<Derived, Mode> part() const;
+
+
+// static const ConstantReturnType Constant(int rows, int cols, const Scalar& value);
+// static const ConstantReturnType Constant(int size, const Scalar& value);
+// static const ConstantReturnType Constant(const Scalar& value);
+
+// template<typename CustomNullaryOp>
+// static const CwiseNullaryOp<CustomNullaryOp, Derived> NullaryExpr(int rows, int cols, const CustomNullaryOp& func);
+// template<typename CustomNullaryOp>
+// static const CwiseNullaryOp<CustomNullaryOp, Derived> NullaryExpr(int size, const CustomNullaryOp& func);
+// template<typename CustomNullaryOp>
+// static const CwiseNullaryOp<CustomNullaryOp, Derived> NullaryExpr(const CustomNullaryOp& func);
+
+// static const ConstantReturnType Zero(int rows, int cols);
+// static const ConstantReturnType Zero(int size);
+// static const ConstantReturnType Zero();
+// static const ConstantReturnType Ones(int rows, int cols);
+// static const ConstantReturnType Ones(int size);
+// static const ConstantReturnType Ones();
+// static const IdentityReturnType Identity();
+// static const IdentityReturnType Identity(int rows, int cols);
+// static const BasisReturnType Unit(int size, int i);
+// static const BasisReturnType Unit(int i);
+// static const BasisReturnType UnitX();
+// static const BasisReturnType UnitY();
+// static const BasisReturnType UnitZ();
+// static const BasisReturnType UnitW();
+
+// const DiagonalMatrix<Derived> asDiagonal() const;
+
+// Derived& setConstant(const Scalar& value);
+// Derived& setZero();
+// Derived& setOnes();
+// Derived& setRandom();
+// Derived& setIdentity();
+
+ /** \internal use operator= */
+ template<typename DenseDerived>
+ void evalTo(MatrixBase<DenseDerived>& dst) const
+ {
+ dst.setZero();
+ for (Index j=0; j<outerSize(); ++j)
+ for (typename Derived::InnerIterator i(derived(),j); i; ++i)
+ dst.coeffRef(i.row(),i.col()) = i.value();
+ }
+
+ Matrix<Scalar,RowsAtCompileTime,ColsAtCompileTime> toDense() const
+ {
+ return derived();
+ }
+
+ template<typename OtherDerived>
+ bool isApprox(const SparseMatrixBase<OtherDerived>& other,
+ RealScalar prec = NumTraits<Scalar>::dummy_precision()) const
+ { return toDense().isApprox(other.toDense(),prec); }
+
+ template<typename OtherDerived>
+ bool isApprox(const MatrixBase<OtherDerived>& other,
+ RealScalar prec = NumTraits<Scalar>::dummy_precision()) const
+ { return toDense().isApprox(other,prec); }
+// bool isMuchSmallerThan(const RealScalar& other,
+// RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+// template<typename OtherDerived>
+// bool isMuchSmallerThan(const MatrixBase<OtherDerived>& other,
+// RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+
+// bool isApproxToConstant(const Scalar& value, RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+// bool isZero(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+// bool isOnes(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+// bool isIdentity(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+// bool isDiagonal(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+
+// bool isUpper(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+// bool isLower(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+
+// template<typename OtherDerived>
+// bool isOrthogonal(const MatrixBase<OtherDerived>& other,
+// RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+// bool isUnitary(RealScalar prec = NumTraits<Scalar>::dummy_precision()) const;
+
+// template<typename OtherDerived>
+// inline bool operator==(const MatrixBase<OtherDerived>& other) const
+// { return (cwise() == other).all(); }
+
+// template<typename OtherDerived>
+// inline bool operator!=(const MatrixBase<OtherDerived>& other) const
+// { return (cwise() != other).any(); }
+
+
+// template<typename NewType>
+// const SparseCwiseUnaryOp<internal::scalar_cast_op<typename internal::traits<Derived>::Scalar, NewType>, Derived> cast() const;
+
+ /** \returns the matrix or vector obtained by evaluating this expression.
+ *
+ * Notice that in the case of a plain matrix or vector (not an expression) this function just returns
+ * a const reference, in order to avoid a useless copy.
+ */
+ inline const typename internal::eval<Derived>::type eval() const
+ { return typename internal::eval<Derived>::type(derived()); }
+
+// template<typename OtherDerived>
+// void swap(MatrixBase<OtherDerived> const & other);
+
+// template<unsigned int Added>
+// const SparseFlagged<Derived, Added, 0> marked() const;
+// const Flagged<Derived, 0, EvalBeforeNestingBit | EvalBeforeAssigningBit> lazy() const;
+
+ /** \returns number of elements to skip to pass from one row (resp. column) to another
+ * for a row-major (resp. column-major) matrix.
+ * Combined with coeffRef() and the \ref flags flags, it allows a direct access to the data
+ * of the underlying matrix.
+ */
+// inline int stride(void) const { return derived().stride(); }
+
+// FIXME
+// ConjugateReturnType conjugate() const;
+// const RealReturnType real() const;
+// const ImagReturnType imag() const;
+
+// template<typename CustomUnaryOp>
+// const SparseCwiseUnaryOp<CustomUnaryOp, Derived> unaryExpr(const CustomUnaryOp& func = CustomUnaryOp()) const;
+
+// template<typename CustomBinaryOp, typename OtherDerived>
+// const CwiseBinaryOp<CustomBinaryOp, Derived, OtherDerived>
+// binaryExpr(const MatrixBase<OtherDerived> &other, const CustomBinaryOp& func = CustomBinaryOp()) const;
+
+
+ Scalar sum() const;
+// Scalar trace() const;
+
+// typename internal::traits<Derived>::Scalar minCoeff() const;
+// typename internal::traits<Derived>::Scalar maxCoeff() const;
+
+// typename internal::traits<Derived>::Scalar minCoeff(int* row, int* col = 0) const;
+// typename internal::traits<Derived>::Scalar maxCoeff(int* row, int* col = 0) const;
+
+// template<typename BinaryOp>
+// typename internal::result_of<BinaryOp(typename internal::traits<Derived>::Scalar)>::type
+// redux(const BinaryOp& func) const;
+
+// template<typename Visitor>
+// void visit(Visitor& func) const;
+
+
+// const SparseCwise<Derived> cwise() const;
+// SparseCwise<Derived> cwise();
+
+// inline const WithFormat<Derived> format(const IOFormat& fmt) const;
+
+/////////// Array module ///////////
+ /*
+ bool all(void) const;
+ bool any(void) const;
+
+ const VectorwiseOp<Derived,Horizontal> rowwise() const;
+ const VectorwiseOp<Derived,Vertical> colwise() const;
+
+ static const CwiseNullaryOp<internal::scalar_random_op<Scalar>,Derived> Random(int rows, int cols);
+ static const CwiseNullaryOp<internal::scalar_random_op<Scalar>,Derived> Random(int size);
+ static const CwiseNullaryOp<internal::scalar_random_op<Scalar>,Derived> Random();
+
+ template<typename ThenDerived,typename ElseDerived>
+ const Select<Derived,ThenDerived,ElseDerived>
+ select(const MatrixBase<ThenDerived>& thenMatrix,
+ const MatrixBase<ElseDerived>& elseMatrix) const;
+
+ template<typename ThenDerived>
+ inline const Select<Derived,ThenDerived, typename ThenDerived::ConstantReturnType>
+ select(const MatrixBase<ThenDerived>& thenMatrix, typename ThenDerived::Scalar elseScalar) const;
+
+ template<typename ElseDerived>
+ inline const Select<Derived, typename ElseDerived::ConstantReturnType, ElseDerived >
+ select(typename ElseDerived::Scalar thenScalar, const MatrixBase<ElseDerived>& elseMatrix) const;
+
+ template<int p> RealScalar lpNorm() const;
+ */
+
+
+// template<typename OtherDerived>
+// Scalar dot(const MatrixBase<OtherDerived>& other) const
+// {
+// EIGEN_STATIC_ASSERT_VECTOR_ONLY(Derived)
+// EIGEN_STATIC_ASSERT_VECTOR_ONLY(OtherDerived)
+// EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
+// YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
+//
+// eigen_assert(derived().size() == other.size());
+// // short version, but the assembly looks more complicated because
+// // of the CwiseBinaryOp iterator complexity
+// // return res = (derived().cwise() * other.derived().conjugate()).sum();
+//
+// // optimized, generic version
+// typename Derived::InnerIterator i(derived(),0);
+// typename OtherDerived::InnerIterator j(other.derived(),0);
+// Scalar res = 0;
+// while (i && j)
+// {
+// if (i.index()==j.index())
+// {
+// // std::cerr << i.value() << " * " << j.value() << "\n";
+// res += i.value() * internal::conj(j.value());
+// ++i; ++j;
+// }
+// else if (i.index()<j.index())
+// ++i;
+// else
+// ++j;
+// }
+// return res;
+// }
+//
+// Scalar sum() const
+// {
+// Scalar res = 0;
+// for (typename Derived::InnerIterator iter(*this,0); iter; ++iter)
+// {
+// res += iter.value();
+// }
+// return res;
+// }
+
+ protected:
+
+ bool m_isRValue;
+};
+
+#endif // EIGEN_SPARSEMATRIXBASE_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseProduct.h b/extern/Eigen3/Eigen/src/Sparse/SparseProduct.h
new file mode 100644
index 00000000000..1c1f54706ac
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseProduct.h
@@ -0,0 +1,141 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSEPRODUCT_H
+#define EIGEN_SPARSEPRODUCT_H
+
+template<typename Lhs, typename Rhs>
+struct SparseSparseProductReturnType
+{
+ typedef typename internal::traits<Lhs>::Scalar Scalar;
+ enum {
+ LhsRowMajor = internal::traits<Lhs>::Flags & RowMajorBit,
+ RhsRowMajor = internal::traits<Rhs>::Flags & RowMajorBit,
+ TransposeRhs = (!LhsRowMajor) && RhsRowMajor,
+ TransposeLhs = LhsRowMajor && (!RhsRowMajor)
+ };
+
+ typedef typename internal::conditional<TransposeLhs,
+ SparseMatrix<Scalar,0>,
+ const typename internal::nested<Lhs,Rhs::RowsAtCompileTime>::type>::type LhsNested;
+
+ typedef typename internal::conditional<TransposeRhs,
+ SparseMatrix<Scalar,0>,
+ const typename internal::nested<Rhs,Lhs::RowsAtCompileTime>::type>::type RhsNested;
+
+ typedef SparseSparseProduct<LhsNested, RhsNested> Type;
+};
+
+namespace internal {
+template<typename LhsNested, typename RhsNested>
+struct traits<SparseSparseProduct<LhsNested, RhsNested> >
+{
+ typedef MatrixXpr XprKind;
+ // clean the nested types:
+ typedef typename remove_all<LhsNested>::type _LhsNested;
+ typedef typename remove_all<RhsNested>::type _RhsNested;
+ typedef typename _LhsNested::Scalar Scalar;
+ typedef typename promote_index_type<typename traits<_LhsNested>::Index,
+ typename traits<_RhsNested>::Index>::type Index;
+
+ enum {
+ LhsCoeffReadCost = _LhsNested::CoeffReadCost,
+ RhsCoeffReadCost = _RhsNested::CoeffReadCost,
+ LhsFlags = _LhsNested::Flags,
+ RhsFlags = _RhsNested::Flags,
+
+ RowsAtCompileTime = _LhsNested::RowsAtCompileTime,
+ ColsAtCompileTime = _RhsNested::ColsAtCompileTime,
+ MaxRowsAtCompileTime = _LhsNested::MaxRowsAtCompileTime,
+ MaxColsAtCompileTime = _RhsNested::MaxColsAtCompileTime,
+
+ InnerSize = EIGEN_SIZE_MIN_PREFER_FIXED(_LhsNested::ColsAtCompileTime, _RhsNested::RowsAtCompileTime),
+
+ EvalToRowMajor = (RhsFlags & LhsFlags & RowMajorBit),
+
+ RemovedBits = ~(EvalToRowMajor ? 0 : RowMajorBit),
+
+ Flags = (int(LhsFlags | RhsFlags) & HereditaryBits & RemovedBits)
+ | EvalBeforeAssigningBit
+ | EvalBeforeNestingBit,
+
+ CoeffReadCost = Dynamic
+ };
+
+ typedef Sparse StorageKind;
+};
+
+} // end namespace internal
+
+template<typename LhsNested, typename RhsNested>
+class SparseSparseProduct : internal::no_assignment_operator,
+ public SparseMatrixBase<SparseSparseProduct<LhsNested, RhsNested> >
+{
+ public:
+
+ typedef SparseMatrixBase<SparseSparseProduct> Base;
+ EIGEN_DENSE_PUBLIC_INTERFACE(SparseSparseProduct)
+
+ private:
+
+ typedef typename internal::traits<SparseSparseProduct>::_LhsNested _LhsNested;
+ typedef typename internal::traits<SparseSparseProduct>::_RhsNested _RhsNested;
+
+ public:
+
+ template<typename Lhs, typename Rhs>
+ EIGEN_STRONG_INLINE SparseSparseProduct(const Lhs& lhs, const Rhs& rhs)
+ : m_lhs(lhs), m_rhs(rhs)
+ {
+ eigen_assert(lhs.cols() == rhs.rows());
+
+ enum {
+ ProductIsValid = _LhsNested::ColsAtCompileTime==Dynamic
+ || _RhsNested::RowsAtCompileTime==Dynamic
+ || int(_LhsNested::ColsAtCompileTime)==int(_RhsNested::RowsAtCompileTime),
+ AreVectors = _LhsNested::IsVectorAtCompileTime && _RhsNested::IsVectorAtCompileTime,
+ SameSizes = EIGEN_PREDICATE_SAME_MATRIX_SIZE(_LhsNested,_RhsNested)
+ };
+ // note to the lost user:
+ // * for a dot product use: v1.dot(v2)
+ // * for a coeff-wise product use: v1.cwise()*v2
+ EIGEN_STATIC_ASSERT(ProductIsValid || !(AreVectors && SameSizes),
+ INVALID_VECTOR_VECTOR_PRODUCT__IF_YOU_WANTED_A_DOT_OR_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTIONS)
+ EIGEN_STATIC_ASSERT(ProductIsValid || !(SameSizes && !AreVectors),
+ INVALID_MATRIX_PRODUCT__IF_YOU_WANTED_A_COEFF_WISE_PRODUCT_YOU_MUST_USE_THE_EXPLICIT_FUNCTION)
+ EIGEN_STATIC_ASSERT(ProductIsValid || SameSizes, INVALID_MATRIX_PRODUCT)
+ }
+
+ EIGEN_STRONG_INLINE Index rows() const { return m_lhs.rows(); }
+ EIGEN_STRONG_INLINE Index cols() const { return m_rhs.cols(); }
+
+ EIGEN_STRONG_INLINE const _LhsNested& lhs() const { return m_lhs; }
+ EIGEN_STRONG_INLINE const _RhsNested& rhs() const { return m_rhs; }
+
+ protected:
+ LhsNested m_lhs;
+ RhsNested m_rhs;
+};
+
+#endif // EIGEN_SPARSEPRODUCT_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseRedux.h b/extern/Eigen3/Eigen/src/Sparse/SparseRedux.h
new file mode 100644
index 00000000000..afc49de7aad
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseRedux.h
@@ -0,0 +1,56 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSEREDUX_H
+#define EIGEN_SPARSEREDUX_H
+
+template<typename Derived>
+typename internal::traits<Derived>::Scalar
+SparseMatrixBase<Derived>::sum() const
+{
+ eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
+ Scalar res = 0;
+ for (Index j=0; j<outerSize(); ++j)
+ for (typename Derived::InnerIterator iter(derived(),j); iter; ++iter)
+ res += iter.value();
+ return res;
+}
+
+template<typename _Scalar, int _Options, typename _Index>
+typename internal::traits<SparseMatrix<_Scalar,_Options,_Index> >::Scalar
+SparseMatrix<_Scalar,_Options,_Index>::sum() const
+{
+ eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
+ return Matrix<Scalar,1,Dynamic>::Map(&m_data.value(0), m_data.size()).sum();
+}
+
+template<typename _Scalar, int _Options, typename _Index>
+typename internal::traits<SparseVector<_Scalar,_Options, _Index> >::Scalar
+SparseVector<_Scalar,_Options,_Index>::sum() const
+{
+ eigen_assert(rows()>0 && cols()>0 && "you are using a non initialized matrix");
+ return Matrix<Scalar,1,Dynamic>::Map(&m_data.value(0), m_data.size()).sum();
+}
+
+#endif // EIGEN_SPARSEREDUX_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseSelfAdjointView.h b/extern/Eigen3/Eigen/src/Sparse/SparseSelfAdjointView.h
new file mode 100644
index 00000000000..d82044c789c
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseSelfAdjointView.h
@@ -0,0 +1,454 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_SELFADJOINTVIEW_H
+#define EIGEN_SPARSE_SELFADJOINTVIEW_H
+
+/** \class SparseSelfAdjointView
+ *
+ *
+ * \brief Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
+ *
+ * \param MatrixType the type of the dense matrix storing the coefficients
+ * \param UpLo can be either \c #Lower or \c #Upper
+ *
+ * This class is an expression of a sefladjoint matrix from a triangular part of a matrix
+ * with given dense storage of the coefficients. It is the return type of MatrixBase::selfadjointView()
+ * and most of the time this is the only way that it is used.
+ *
+ * \sa SparseMatrixBase::selfadjointView()
+ */
+template<typename Lhs, typename Rhs, int UpLo>
+class SparseSelfAdjointTimeDenseProduct;
+
+template<typename Lhs, typename Rhs, int UpLo>
+class DenseTimeSparseSelfAdjointProduct;
+
+template<typename MatrixType,int UpLo>
+class SparseSymmetricPermutationProduct;
+
+namespace internal {
+
+template<typename MatrixType, unsigned int UpLo>
+struct traits<SparseSelfAdjointView<MatrixType,UpLo> > : traits<MatrixType> {
+};
+
+template<int SrcUpLo,int DstUpLo,typename MatrixType,int DestOrder>
+void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm = 0);
+
+template<int UpLo,typename MatrixType,int DestOrder>
+void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm = 0);
+
+}
+
+template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView
+ : public EigenBase<SparseSelfAdjointView<MatrixType,UpLo> >
+{
+ public:
+
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ typedef Matrix<Index,Dynamic,1> VectorI;
+ typedef typename MatrixType::Nested MatrixTypeNested;
+ typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
+
+ inline SparseSelfAdjointView(const MatrixType& matrix) : m_matrix(matrix)
+ {
+ eigen_assert(rows()==cols() && "SelfAdjointView is only for squared matrices");
+ }
+
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
+
+ /** \internal \returns a reference to the nested matrix */
+ const _MatrixTypeNested& matrix() const { return m_matrix; }
+ _MatrixTypeNested& matrix() { return m_matrix.const_cast_derived(); }
+
+ /** Efficient sparse self-adjoint matrix times dense vector/matrix product */
+ template<typename OtherDerived>
+ SparseSelfAdjointTimeDenseProduct<MatrixType,OtherDerived,UpLo>
+ operator*(const MatrixBase<OtherDerived>& rhs) const
+ {
+ return SparseSelfAdjointTimeDenseProduct<MatrixType,OtherDerived,UpLo>(m_matrix, rhs.derived());
+ }
+
+ /** Efficient dense vector/matrix times sparse self-adjoint matrix product */
+ template<typename OtherDerived> friend
+ DenseTimeSparseSelfAdjointProduct<OtherDerived,MatrixType,UpLo>
+ operator*(const MatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)
+ {
+ return DenseTimeSparseSelfAdjointProduct<OtherDerived,_MatrixTypeNested,UpLo>(lhs.derived(), rhs.m_matrix);
+ }
+
+ /** Perform a symmetric rank K update of the selfadjoint matrix \c *this:
+ * \f$ this = this + \alpha ( u u^* ) \f$ where \a u is a vector or matrix.
+ *
+ * \returns a reference to \c *this
+ *
+ * Note that it is faster to set alpha=0 than initializing the matrix to zero
+ * and then keep the default value alpha=1.
+ *
+ * To perform \f$ this = this + \alpha ( u^* u ) \f$ you can simply
+ * call this function with u.adjoint().
+ */
+ template<typename DerivedU>
+ SparseSelfAdjointView& rankUpdate(const SparseMatrixBase<DerivedU>& u, Scalar alpha = Scalar(1));
+
+ /** \internal triggered by sparse_matrix = SparseSelfadjointView; */
+ template<typename DestScalar> void evalTo(SparseMatrix<DestScalar>& _dest) const
+ {
+ internal::permute_symm_to_fullsymm<UpLo>(m_matrix, _dest);
+ }
+
+ template<typename DestScalar> void evalTo(DynamicSparseMatrix<DestScalar>& _dest) const
+ {
+ // TODO directly evaluate into _dest;
+ SparseMatrix<DestScalar> tmp(_dest.rows(),_dest.cols());
+ internal::permute_symm_to_fullsymm<UpLo>(m_matrix, tmp);
+ _dest = tmp;
+ }
+
+ /** \returns an expression of P^-1 H P */
+ SparseSymmetricPermutationProduct<_MatrixTypeNested,UpLo> twistedBy(const PermutationMatrix<Dynamic>& perm) const
+ {
+ return SparseSymmetricPermutationProduct<_MatrixTypeNested,UpLo>(m_matrix, perm);
+ }
+
+ template<typename SrcMatrixType,int SrcUpLo>
+ SparseSelfAdjointView& operator=(const SparseSymmetricPermutationProduct<SrcMatrixType,SrcUpLo>& permutedMatrix)
+ {
+ permutedMatrix.evalTo(*this);
+ return *this;
+ }
+
+
+ // const SparseLLT<PlainObject, UpLo> llt() const;
+ // const SparseLDLT<PlainObject, UpLo> ldlt() const;
+
+ protected:
+
+ const typename MatrixType::Nested m_matrix;
+ mutable VectorI m_countPerRow;
+ mutable VectorI m_countPerCol;
+};
+
+/***************************************************************************
+* Implementation of SparseMatrixBase methods
+***************************************************************************/
+
+template<typename Derived>
+template<unsigned int UpLo>
+const SparseSelfAdjointView<Derived, UpLo> SparseMatrixBase<Derived>::selfadjointView() const
+{
+ return derived();
+}
+
+template<typename Derived>
+template<unsigned int UpLo>
+SparseSelfAdjointView<Derived, UpLo> SparseMatrixBase<Derived>::selfadjointView()
+{
+ return derived();
+}
+
+/***************************************************************************
+* Implementation of SparseSelfAdjointView methods
+***************************************************************************/
+
+template<typename MatrixType, unsigned int UpLo>
+template<typename DerivedU>
+SparseSelfAdjointView<MatrixType,UpLo>&
+SparseSelfAdjointView<MatrixType,UpLo>::rankUpdate(const SparseMatrixBase<DerivedU>& u, Scalar alpha)
+{
+ SparseMatrix<Scalar,MatrixType::Flags&RowMajorBit?RowMajor:ColMajor> tmp = u * u.adjoint();
+ if(alpha==Scalar(0))
+ m_matrix.const_cast_derived() = tmp.template triangularView<UpLo>();
+ else
+ m_matrix.const_cast_derived() += alpha * tmp.template triangularView<UpLo>();
+
+ return *this;
+}
+
+/***************************************************************************
+* Implementation of sparse self-adjoint time dense matrix
+***************************************************************************/
+
+namespace internal {
+template<typename Lhs, typename Rhs, int UpLo>
+struct traits<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,UpLo> >
+ : traits<ProductBase<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,UpLo>, Lhs, Rhs> >
+{
+ typedef Dense StorageKind;
+};
+}
+
+template<typename Lhs, typename Rhs, int UpLo>
+class SparseSelfAdjointTimeDenseProduct
+ : public ProductBase<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,UpLo>, Lhs, Rhs>
+{
+ public:
+ EIGEN_PRODUCT_PUBLIC_INTERFACE(SparseSelfAdjointTimeDenseProduct)
+
+ SparseSelfAdjointTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+ {}
+
+ template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
+ {
+ // TODO use alpha
+ eigen_assert(alpha==Scalar(1) && "alpha != 1 is not implemented yet, sorry");
+ typedef typename internal::remove_all<Lhs>::type _Lhs;
+ typedef typename internal::remove_all<Rhs>::type _Rhs;
+ typedef typename _Lhs::InnerIterator LhsInnerIterator;
+ enum {
+ LhsIsRowMajor = (_Lhs::Flags&RowMajorBit)==RowMajorBit,
+ ProcessFirstHalf =
+ ((UpLo&(Upper|Lower))==(Upper|Lower))
+ || ( (UpLo&Upper) && !LhsIsRowMajor)
+ || ( (UpLo&Lower) && LhsIsRowMajor),
+ ProcessSecondHalf = !ProcessFirstHalf
+ };
+ for (Index j=0; j<m_lhs.outerSize(); ++j)
+ {
+ LhsInnerIterator i(m_lhs,j);
+ if (ProcessSecondHalf && i && (i.index()==j))
+ {
+ dest.row(j) += i.value() * m_rhs.row(j);
+ ++i;
+ }
+ Block<Dest,1,Dest::ColsAtCompileTime> dest_j(dest.row(LhsIsRowMajor ? j : 0));
+ for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i)
+ {
+ Index a = LhsIsRowMajor ? j : i.index();
+ Index b = LhsIsRowMajor ? i.index() : j;
+ typename Lhs::Scalar v = i.value();
+ dest.row(a) += (v) * m_rhs.row(b);
+ dest.row(b) += internal::conj(v) * m_rhs.row(a);
+ }
+ if (ProcessFirstHalf && i && (i.index()==j))
+ dest.row(j) += i.value() * m_rhs.row(j);
+ }
+ }
+
+ private:
+ SparseSelfAdjointTimeDenseProduct& operator=(const SparseSelfAdjointTimeDenseProduct&);
+};
+
+namespace internal {
+template<typename Lhs, typename Rhs, int UpLo>
+struct traits<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,UpLo> >
+ : traits<ProductBase<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,UpLo>, Lhs, Rhs> >
+{};
+}
+
+template<typename Lhs, typename Rhs, int UpLo>
+class DenseTimeSparseSelfAdjointProduct
+ : public ProductBase<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,UpLo>, Lhs, Rhs>
+{
+ public:
+ EIGEN_PRODUCT_PUBLIC_INTERFACE(DenseTimeSparseSelfAdjointProduct)
+
+ DenseTimeSparseSelfAdjointProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
+ {}
+
+ template<typename Dest> void scaleAndAddTo(Dest& /*dest*/, Scalar /*alpha*/) const
+ {
+ // TODO
+ }
+
+ private:
+ DenseTimeSparseSelfAdjointProduct& operator=(const DenseTimeSparseSelfAdjointProduct&);
+};
+
+/***************************************************************************
+* Implementation of symmetric copies and permutations
+***************************************************************************/
+namespace internal {
+
+template<typename MatrixType, int UpLo>
+struct traits<SparseSymmetricPermutationProduct<MatrixType,UpLo> > : traits<MatrixType> {
+};
+
+template<int UpLo,typename MatrixType,int DestOrder>
+void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm)
+{
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef SparseMatrix<Scalar,DestOrder,Index> Dest;
+ typedef Matrix<Index,Dynamic,1> VectorI;
+
+ Dest& dest(_dest.derived());
+ enum {
+ StorageOrderMatch = int(Dest::IsRowMajor) == int(MatrixType::IsRowMajor)
+ };
+ eigen_assert(perm==0);
+ Index size = mat.rows();
+ VectorI count;
+ count.resize(size);
+ count.setZero();
+ dest.resize(size,size);
+ for(Index j = 0; j<size; ++j)
+ {
+ Index jp = perm ? perm[j] : j;
+ for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
+ {
+ Index i = it.index();
+ Index ip = perm ? perm[i] : i;
+ if(i==j)
+ count[ip]++;
+ else if((UpLo==Lower && i>j) || (UpLo==Upper && i<j))
+ {
+ count[ip]++;
+ count[jp]++;
+ }
+ }
+ }
+ Index nnz = count.sum();
+
+ // reserve space
+ dest.reserve(nnz);
+ dest._outerIndexPtr()[0] = 0;
+ for(Index j=0; j<size; ++j)
+ dest._outerIndexPtr()[j+1] = dest._outerIndexPtr()[j] + count[j];
+ for(Index j=0; j<size; ++j)
+ count[j] = dest._outerIndexPtr()[j];
+
+ // copy data
+ for(Index j = 0; j<size; ++j)
+ {
+ Index jp = perm ? perm[j] : j;
+ for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
+ {
+ Index i = it.index();
+ Index ip = perm ? perm[i] : i;
+ if(i==j)
+ {
+ int k = count[ip]++;
+ dest._innerIndexPtr()[k] = ip;
+ dest._valuePtr()[k] = it.value();
+ }
+ else if((UpLo==Lower && i>j) || (UpLo==Upper && i<j))
+ {
+ int k = count[jp]++;
+ dest._innerIndexPtr()[k] = ip;
+ dest._valuePtr()[k] = it.value();
+ k = count[ip]++;
+ dest._innerIndexPtr()[k] = jp;
+ dest._valuePtr()[k] = internal::conj(it.value());
+ }
+ }
+ }
+}
+
+template<int SrcUpLo,int DstUpLo,typename MatrixType,int DestOrder>
+void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm)
+{
+ typedef typename MatrixType::Index Index;
+ typedef typename MatrixType::Scalar Scalar;
+ typedef SparseMatrix<Scalar,DestOrder,Index> Dest;
+ Dest& dest(_dest.derived());
+ typedef Matrix<Index,Dynamic,1> VectorI;
+ //internal::conj_if<SrcUpLo!=DstUpLo> cj;
+
+ Index size = mat.rows();
+ VectorI count(size);
+ count.setZero();
+ dest.resize(size,size);
+ for(Index j = 0; j<size; ++j)
+ {
+ Index jp = perm ? perm[j] : j;
+ for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
+ {
+ Index i = it.index();
+ if((SrcUpLo==Lower && i<j) || (SrcUpLo==Upper && i>j))
+ continue;
+
+ Index ip = perm ? perm[i] : i;
+ count[DstUpLo==Lower ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
+ }
+ }
+ dest._outerIndexPtr()[0] = 0;
+ for(Index j=0; j<size; ++j)
+ dest._outerIndexPtr()[j+1] = dest._outerIndexPtr()[j] + count[j];
+ dest.resizeNonZeros(dest._outerIndexPtr()[size]);
+ for(Index j=0; j<size; ++j)
+ count[j] = dest._outerIndexPtr()[j];
+
+ for(Index j = 0; j<size; ++j)
+ {
+ Index jp = perm ? perm[j] : j;
+ for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
+ {
+ Index i = it.index();
+ if((SrcUpLo==Lower && i<j) || (SrcUpLo==Upper && i>j))
+ continue;
+
+ Index ip = perm? perm[i] : i;
+ Index k = count[DstUpLo==Lower ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
+ dest._innerIndexPtr()[k] = DstUpLo==Lower ? (std::max)(ip,jp) : (std::min)(ip,jp);
+
+ if((DstUpLo==Lower && ip<jp) || (DstUpLo==Upper && ip>jp))
+ dest._valuePtr()[k] = conj(it.value());
+ else
+ dest._valuePtr()[k] = it.value();
+ }
+ }
+}
+
+}
+
+template<typename MatrixType,int UpLo>
+class SparseSymmetricPermutationProduct
+ : public EigenBase<SparseSymmetricPermutationProduct<MatrixType,UpLo> >
+{
+ typedef PermutationMatrix<Dynamic> Perm;
+ public:
+ typedef typename MatrixType::Scalar Scalar;
+ typedef typename MatrixType::Index Index;
+ typedef Matrix<Index,Dynamic,1> VectorI;
+ typedef typename MatrixType::Nested MatrixTypeNested;
+ typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
+
+ SparseSymmetricPermutationProduct(const MatrixType& mat, const Perm& perm)
+ : m_matrix(mat), m_perm(perm)
+ {}
+
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
+
+ template<typename DestScalar> void evalTo(SparseMatrix<DestScalar>& _dest) const
+ {
+ internal::permute_symm_to_fullsymm<UpLo>(m_matrix,_dest,m_perm.indices().data());
+ }
+
+ template<typename DestType,unsigned int DestUpLo> void evalTo(SparseSelfAdjointView<DestType,DestUpLo>& dest) const
+ {
+ internal::permute_symm_to_symm<UpLo,DestUpLo>(m_matrix,dest.matrix(),m_perm.indices().data());
+ }
+
+ protected:
+ const MatrixTypeNested m_matrix;
+ const Perm& m_perm;
+
+};
+
+#endif // EIGEN_SPARSE_SELFADJOINTVIEW_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseSparseProduct.h b/extern/Eigen3/Eigen/src/Sparse/SparseSparseProduct.h
new file mode 100644
index 00000000000..19abcd1f8e4
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseSparseProduct.h
@@ -0,0 +1,401 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSESPARSEPRODUCT_H
+#define EIGEN_SPARSESPARSEPRODUCT_H
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, typename ResultType>
+static void sparse_product_impl2(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+{
+ typedef typename remove_all<Lhs>::type::Scalar Scalar;
+ typedef typename remove_all<Lhs>::type::Index Index;
+
+ // make sure to call innerSize/outerSize since we fake the storage order.
+ Index rows = lhs.innerSize();
+ Index cols = rhs.outerSize();
+ eigen_assert(lhs.outerSize() == rhs.innerSize());
+
+ std::vector<bool> mask(rows,false);
+ Matrix<Scalar,Dynamic,1> values(rows);
+ Matrix<Index,Dynamic,1> indices(rows);
+
+ // estimate the number of non zero entries
+ float ratioLhs = float(lhs.nonZeros())/(float(lhs.rows())*float(lhs.cols()));
+ float avgNnzPerRhsColumn = float(rhs.nonZeros())/float(cols);
+ float ratioRes = (std::min)(ratioLhs * avgNnzPerRhsColumn, 1.f);
+
+// int t200 = rows/(log2(200)*1.39);
+// int t = (rows*100)/139;
+
+ res.resize(rows, cols);
+ res.reserve(Index(ratioRes*rows*cols));
+ // we compute each column of the result, one after the other
+ for (Index j=0; j<cols; ++j)
+ {
+
+ res.startVec(j);
+ Index nnz = 0;
+ for (typename Rhs::InnerIterator rhsIt(rhs, j); rhsIt; ++rhsIt)
+ {
+ Scalar y = rhsIt.value();
+ Index k = rhsIt.index();
+ for (typename Lhs::InnerIterator lhsIt(lhs, k); lhsIt; ++lhsIt)
+ {
+ Index i = lhsIt.index();
+ Scalar x = lhsIt.value();
+ if(!mask[i])
+ {
+ mask[i] = true;
+// values[i] = x * y;
+// indices[nnz] = i;
+ ++nnz;
+ }
+ else
+ values[i] += x * y;
+ }
+ }
+ // FIXME reserve nnz non zeros
+ // FIXME implement fast sort algorithms for very small nnz
+ // if the result is sparse enough => use a quick sort
+ // otherwise => loop through the entire vector
+ // In order to avoid to perform an expensive log2 when the
+ // result is clearly very sparse we use a linear bound up to 200.
+// if((nnz<200 && nnz<t200) || nnz * log2(nnz) < t)
+// {
+// if(nnz>1) std::sort(indices.data(),indices.data()+nnz);
+// for(int k=0; k<nnz; ++k)
+// {
+// int i = indices[k];
+// res.insertBackNoCheck(j,i) = values[i];
+// mask[i] = false;
+// }
+// }
+// else
+// {
+// // dense path
+// for(int i=0; i<rows; ++i)
+// {
+// if(mask[i])
+// {
+// mask[i] = false;
+// res.insertBackNoCheck(j,i) = values[i];
+// }
+// }
+// }
+
+ }
+ res.finalize();
+}
+
+// perform a pseudo in-place sparse * sparse product assuming all matrices are col major
+template<typename Lhs, typename Rhs, typename ResultType>
+static void sparse_product_impl(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+{
+// return sparse_product_impl2(lhs,rhs,res);
+
+ typedef typename remove_all<Lhs>::type::Scalar Scalar;
+ typedef typename remove_all<Lhs>::type::Index Index;
+
+ // make sure to call innerSize/outerSize since we fake the storage order.
+ Index rows = lhs.innerSize();
+ Index cols = rhs.outerSize();
+ //int size = lhs.outerSize();
+ eigen_assert(lhs.outerSize() == rhs.innerSize());
+
+ // allocate a temporary buffer
+ AmbiVector<Scalar,Index> tempVector(rows);
+
+ // estimate the number of non zero entries
+ float ratioLhs = float(lhs.nonZeros())/(float(lhs.rows())*float(lhs.cols()));
+ float avgNnzPerRhsColumn = float(rhs.nonZeros())/float(cols);
+ float ratioRes = (std::min)(ratioLhs * avgNnzPerRhsColumn, 1.f);
+
+ // mimics a resizeByInnerOuter:
+ if(ResultType::IsRowMajor)
+ res.resize(cols, rows);
+ else
+ res.resize(rows, cols);
+
+ res.reserve(Index(ratioRes*rows*cols));
+ for (Index j=0; j<cols; ++j)
+ {
+ // let's do a more accurate determination of the nnz ratio for the current column j of res
+ //float ratioColRes = (std::min)(ratioLhs * rhs.innerNonZeros(j), 1.f);
+ // FIXME find a nice way to get the number of nonzeros of a sub matrix (here an inner vector)
+ float ratioColRes = ratioRes;
+ tempVector.init(ratioColRes);
+ tempVector.setZero();
+ for (typename Rhs::InnerIterator rhsIt(rhs, j); rhsIt; ++rhsIt)
+ {
+ // FIXME should be written like this: tmp += rhsIt.value() * lhs.col(rhsIt.index())
+ tempVector.restart();
+ Scalar x = rhsIt.value();
+ for (typename Lhs::InnerIterator lhsIt(lhs, rhsIt.index()); lhsIt; ++lhsIt)
+ {
+ tempVector.coeffRef(lhsIt.index()) += lhsIt.value() * x;
+ }
+ }
+ res.startVec(j);
+ for (typename AmbiVector<Scalar,Index>::Iterator it(tempVector); it; ++it)
+ res.insertBackByOuterInner(j,it.index()) = it.value();
+ }
+ res.finalize();
+}
+
+template<typename Lhs, typename Rhs, typename ResultType,
+ int LhsStorageOrder = traits<Lhs>::Flags&RowMajorBit,
+ int RhsStorageOrder = traits<Rhs>::Flags&RowMajorBit,
+ int ResStorageOrder = traits<ResultType>::Flags&RowMajorBit>
+struct sparse_product_selector;
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,ColMajor>
+{
+ typedef typename traits<typename remove_all<Lhs>::type>::Scalar Scalar;
+
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+// std::cerr << __LINE__ << "\n";
+ typename remove_all<ResultType>::type _res(res.rows(), res.cols());
+ sparse_product_impl<Lhs,Rhs,ResultType>(lhs, rhs, _res);
+ res.swap(_res);
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_product_selector<Lhs,Rhs,ResultType,ColMajor,ColMajor,RowMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+// std::cerr << __LINE__ << "\n";
+ // we need a col-major matrix to hold the result
+ typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType;
+ SparseTemporaryType _res(res.rows(), res.cols());
+ sparse_product_impl<Lhs,Rhs,SparseTemporaryType>(lhs, rhs, _res);
+ res = _res;
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,RowMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+// std::cerr << __LINE__ << "\n";
+ // let's transpose the product to get a column x column product
+ typename remove_all<ResultType>::type _res(res.rows(), res.cols());
+ sparse_product_impl<Rhs,Lhs,ResultType>(rhs, lhs, _res);
+ res.swap(_res);
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_product_selector<Lhs,Rhs,ResultType,RowMajor,RowMajor,ColMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+// std::cerr << "here...\n";
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+ ColMajorMatrix colLhs(lhs);
+ ColMajorMatrix colRhs(rhs);
+// std::cerr << "more...\n";
+ sparse_product_impl<ColMajorMatrix,ColMajorMatrix,ResultType>(colLhs, colRhs, res);
+// std::cerr << "OK.\n";
+
+ // let's transpose the product to get a column x column product
+
+// typedef SparseMatrix<typename ResultType::Scalar> SparseTemporaryType;
+// SparseTemporaryType _res(res.cols(), res.rows());
+// sparse_product_impl<Rhs,Lhs,SparseTemporaryType>(rhs, lhs, _res);
+// res = _res.transpose();
+ }
+};
+
+// NOTE the 2 others cases (col row *) must never occur since they are caught
+// by ProductReturnType which transforms it to (col col *) by evaluating rhs.
+
+} // end namespace internal
+
+// sparse = sparse * sparse
+template<typename Derived>
+template<typename Lhs, typename Rhs>
+inline Derived& SparseMatrixBase<Derived>::operator=(const SparseSparseProduct<Lhs,Rhs>& product)
+{
+// std::cerr << "there..." << typeid(Lhs).name() << " " << typeid(Lhs).name() << " " << (Derived::Flags&&RowMajorBit) << "\n";
+ internal::sparse_product_selector<
+ typename internal::remove_all<Lhs>::type,
+ typename internal::remove_all<Rhs>::type,
+ Derived>::run(product.lhs(),product.rhs(),derived());
+ return derived();
+}
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, typename ResultType,
+ int LhsStorageOrder = traits<Lhs>::Flags&RowMajorBit,
+ int RhsStorageOrder = traits<Rhs>::Flags&RowMajorBit,
+ int ResStorageOrder = traits<ResultType>::Flags&RowMajorBit>
+struct sparse_product_selector2;
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_product_selector2<Lhs,Rhs,ResultType,ColMajor,ColMajor,ColMajor>
+{
+ typedef typename traits<typename remove_all<Lhs>::type>::Scalar Scalar;
+
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ sparse_product_impl2<Lhs,Rhs,ResultType>(lhs, rhs, res);
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_product_selector2<Lhs,Rhs,ResultType,RowMajor,ColMajor,ColMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ // prevent warnings until the code is fixed
+ EIGEN_UNUSED_VARIABLE(lhs);
+ EIGEN_UNUSED_VARIABLE(rhs);
+ EIGEN_UNUSED_VARIABLE(res);
+
+// typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
+// RowMajorMatrix rhsRow = rhs;
+// RowMajorMatrix resRow(res.rows(), res.cols());
+// sparse_product_impl2<RowMajorMatrix,Lhs,RowMajorMatrix>(rhsRow, lhs, resRow);
+// res = resRow;
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_product_selector2<Lhs,Rhs,ResultType,ColMajor,RowMajor,ColMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
+ RowMajorMatrix lhsRow = lhs;
+ RowMajorMatrix resRow(res.rows(), res.cols());
+ sparse_product_impl2<Rhs,RowMajorMatrix,RowMajorMatrix>(rhs, lhsRow, resRow);
+ res = resRow;
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_product_selector2<Lhs,Rhs,ResultType,RowMajor,RowMajor,ColMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,RowMajor> RowMajorMatrix;
+ RowMajorMatrix resRow(res.rows(), res.cols());
+ sparse_product_impl2<Rhs,Lhs,RowMajorMatrix>(rhs, lhs, resRow);
+ res = resRow;
+ }
+};
+
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_product_selector2<Lhs,Rhs,ResultType,ColMajor,ColMajor,RowMajor>
+{
+ typedef typename traits<typename remove_all<Lhs>::type>::Scalar Scalar;
+
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+ ColMajorMatrix resCol(res.rows(), res.cols());
+ sparse_product_impl2<Lhs,Rhs,ColMajorMatrix>(lhs, rhs, resCol);
+ res = resCol;
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_product_selector2<Lhs,Rhs,ResultType,RowMajor,ColMajor,RowMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+ ColMajorMatrix lhsCol = lhs;
+ ColMajorMatrix resCol(res.rows(), res.cols());
+ sparse_product_impl2<ColMajorMatrix,Rhs,ColMajorMatrix>(lhsCol, rhs, resCol);
+ res = resCol;
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_product_selector2<Lhs,Rhs,ResultType,ColMajor,RowMajor,RowMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+ ColMajorMatrix rhsCol = rhs;
+ ColMajorMatrix resCol(res.rows(), res.cols());
+ sparse_product_impl2<Lhs,ColMajorMatrix,ColMajorMatrix>(lhs, rhsCol, resCol);
+ res = resCol;
+ }
+};
+
+template<typename Lhs, typename Rhs, typename ResultType>
+struct sparse_product_selector2<Lhs,Rhs,ResultType,RowMajor,RowMajor,RowMajor>
+{
+ static void run(const Lhs& lhs, const Rhs& rhs, ResultType& res)
+ {
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+// ColMajorMatrix lhsTr(lhs);
+// ColMajorMatrix rhsTr(rhs);
+// ColMajorMatrix aux(res.rows(), res.cols());
+// sparse_product_impl2<Rhs,Lhs,ColMajorMatrix>(rhs, lhs, aux);
+// // ColMajorMatrix aux2 = aux.transpose();
+// res = aux;
+ typedef SparseMatrix<typename ResultType::Scalar,ColMajor> ColMajorMatrix;
+ ColMajorMatrix lhsCol(lhs);
+ ColMajorMatrix rhsCol(rhs);
+ ColMajorMatrix resCol(res.rows(), res.cols());
+ sparse_product_impl2<ColMajorMatrix,ColMajorMatrix,ColMajorMatrix>(lhsCol, rhsCol, resCol);
+ res = resCol;
+ }
+};
+
+} // end namespace internal
+
+template<typename Derived>
+template<typename Lhs, typename Rhs>
+inline void SparseMatrixBase<Derived>::_experimentalNewProduct(const Lhs& lhs, const Rhs& rhs)
+{
+ //derived().resize(lhs.rows(), rhs.cols());
+ internal::sparse_product_selector2<
+ typename internal::remove_all<Lhs>::type,
+ typename internal::remove_all<Rhs>::type,
+ Derived>::run(lhs,rhs,derived());
+}
+
+// sparse * sparse
+template<typename Derived>
+template<typename OtherDerived>
+inline const typename SparseSparseProductReturnType<Derived,OtherDerived>::Type
+SparseMatrixBase<Derived>::operator*(const SparseMatrixBase<OtherDerived> &other) const
+{
+ return typename SparseSparseProductReturnType<Derived,OtherDerived>::Type(derived(), other.derived());
+}
+
+#endif // EIGEN_SPARSESPARSEPRODUCT_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseTranspose.h b/extern/Eigen3/Eigen/src/Sparse/SparseTranspose.h
new file mode 100644
index 00000000000..2aea2fa32c7
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseTranspose.h
@@ -0,0 +1,68 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSETRANSPOSE_H
+#define EIGEN_SPARSETRANSPOSE_H
+
+template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>
+ : public SparseMatrixBase<Transpose<MatrixType> >
+{
+ typedef typename internal::remove_all<typename MatrixType::Nested>::type _MatrixTypeNested;
+ public:
+
+ EIGEN_SPARSE_PUBLIC_INTERFACE(Transpose<MatrixType>)
+
+ class InnerIterator;
+ class ReverseInnerIterator;
+
+ inline Index nonZeros() const { return derived().nestedExpression().nonZeros(); }
+};
+
+template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>::InnerIterator
+ : public _MatrixTypeNested::InnerIterator
+{
+ typedef typename _MatrixTypeNested::InnerIterator Base;
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const TransposeImpl& trans, Index outer)
+ : Base(trans.derived().nestedExpression(), outer)
+ {}
+ inline Index row() const { return Base::col(); }
+ inline Index col() const { return Base::row(); }
+};
+
+template<typename MatrixType> class TransposeImpl<MatrixType,Sparse>::ReverseInnerIterator
+ : public _MatrixTypeNested::ReverseInnerIterator
+{
+ typedef typename _MatrixTypeNested::ReverseInnerIterator Base;
+ public:
+
+ EIGEN_STRONG_INLINE ReverseInnerIterator(const TransposeImpl& xpr, Index outer)
+ : Base(xpr.derived().nestedExpression(), outer)
+ {}
+ inline Index row() const { return Base::col(); }
+ inline Index col() const { return Base::row(); }
+};
+
+#endif // EIGEN_SPARSETRANSPOSE_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseTriangularView.h b/extern/Eigen3/Eigen/src/Sparse/SparseTriangularView.h
new file mode 100644
index 00000000000..319eaf06638
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseTriangularView.h
@@ -0,0 +1,100 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSE_TRIANGULARVIEW_H
+#define EIGEN_SPARSE_TRIANGULARVIEW_H
+
+namespace internal {
+
+template<typename MatrixType, int Mode>
+struct traits<SparseTriangularView<MatrixType,Mode> >
+: public traits<MatrixType>
+{};
+
+} // namespace internal
+
+template<typename MatrixType, int Mode> class SparseTriangularView
+ : public SparseMatrixBase<SparseTriangularView<MatrixType,Mode> >
+{
+ enum { SkipFirst = (Mode==Lower && !(MatrixType::Flags&RowMajorBit))
+ || (Mode==Upper && (MatrixType::Flags&RowMajorBit)) };
+ public:
+
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseTriangularView)
+
+ class InnerIterator;
+
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
+
+ typedef typename internal::conditional<internal::must_nest_by_value<MatrixType>::ret,
+ MatrixType, const MatrixType&>::type MatrixTypeNested;
+
+ inline SparseTriangularView(const MatrixType& matrix) : m_matrix(matrix) {}
+
+ /** \internal */
+ inline const MatrixType& nestedExpression() const { return m_matrix; }
+
+ template<typename OtherDerived>
+ typename internal::plain_matrix_type_column_major<OtherDerived>::type
+ solve(const MatrixBase<OtherDerived>& other) const;
+
+ template<typename OtherDerived> void solveInPlace(MatrixBase<OtherDerived>& other) const;
+ template<typename OtherDerived> void solveInPlace(SparseMatrixBase<OtherDerived>& other) const;
+
+ protected:
+ MatrixTypeNested m_matrix;
+};
+
+template<typename MatrixType, int Mode>
+class SparseTriangularView<MatrixType,Mode>::InnerIterator : public MatrixType::InnerIterator
+{
+ typedef typename MatrixType::InnerIterator Base;
+ public:
+
+ EIGEN_STRONG_INLINE InnerIterator(const SparseTriangularView& view, Index outer)
+ : Base(view.nestedExpression(), outer)
+ {
+ if(SkipFirst)
+ while((*this) && this->index()<outer)
+ ++(*this);
+ }
+ inline Index row() const { return Base::row(); }
+ inline Index col() const { return Base::col(); }
+
+ EIGEN_STRONG_INLINE operator bool() const
+ {
+ return SkipFirst ? Base::operator bool() : (Base::operator bool() && this->index() <= this->outer());
+ }
+};
+
+template<typename Derived>
+template<int Mode>
+inline const SparseTriangularView<Derived, Mode>
+SparseMatrixBase<Derived>::triangularView() const
+{
+ return derived();
+}
+
+#endif // EIGEN_SPARSE_TRIANGULARVIEW_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseUtil.h b/extern/Eigen3/Eigen/src/Sparse/SparseUtil.h
new file mode 100644
index 00000000000..db9ae98e7a0
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseUtil.h
@@ -0,0 +1,130 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSEUTIL_H
+#define EIGEN_SPARSEUTIL_H
+
+#ifdef NDEBUG
+#define EIGEN_DBG_SPARSE(X)
+#else
+#define EIGEN_DBG_SPARSE(X) X
+#endif
+
+#define EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, Op) \
+template<typename OtherDerived> \
+EIGEN_STRONG_INLINE Derived& operator Op(const Eigen::SparseMatrixBase<OtherDerived>& other) \
+{ \
+ return Base::operator Op(other.derived()); \
+} \
+EIGEN_STRONG_INLINE Derived& operator Op(const Derived& other) \
+{ \
+ return Base::operator Op(other); \
+}
+
+#define EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, Op) \
+template<typename Other> \
+EIGEN_STRONG_INLINE Derived& operator Op(const Other& scalar) \
+{ \
+ return Base::operator Op(scalar); \
+}
+
+#define EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATORS(Derived) \
+EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, =) \
+EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, +=) \
+EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, -=) \
+EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, *=) \
+EIGEN_SPARSE_INHERIT_SCALAR_ASSIGNMENT_OPERATOR(Derived, /=)
+
+#define _EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, BaseClass) \
+ typedef BaseClass Base; \
+ typedef typename Eigen::internal::traits<Derived>::Scalar Scalar; \
+ typedef typename Eigen::NumTraits<Scalar>::Real RealScalar; \
+ typedef typename Eigen::internal::nested<Derived>::type Nested; \
+ typedef typename Eigen::internal::traits<Derived>::StorageKind StorageKind; \
+ typedef typename Eigen::internal::traits<Derived>::Index Index; \
+ enum { RowsAtCompileTime = Eigen::internal::traits<Derived>::RowsAtCompileTime, \
+ ColsAtCompileTime = Eigen::internal::traits<Derived>::ColsAtCompileTime, \
+ Flags = Eigen::internal::traits<Derived>::Flags, \
+ CoeffReadCost = Eigen::internal::traits<Derived>::CoeffReadCost, \
+ SizeAtCompileTime = Base::SizeAtCompileTime, \
+ IsVectorAtCompileTime = Base::IsVectorAtCompileTime }; \
+ using Base::derived; \
+ using Base::const_cast_derived;
+
+#define EIGEN_SPARSE_PUBLIC_INTERFACE(Derived) \
+ _EIGEN_SPARSE_PUBLIC_INTERFACE(Derived, Eigen::SparseMatrixBase<Derived>)
+
+const int CoherentAccessPattern = 0x1;
+const int InnerRandomAccessPattern = 0x2 | CoherentAccessPattern;
+const int OuterRandomAccessPattern = 0x4 | CoherentAccessPattern;
+const int RandomAccessPattern = 0x8 | OuterRandomAccessPattern | InnerRandomAccessPattern;
+
+template<typename Derived> class SparseMatrixBase;
+template<typename _Scalar, int _Flags = 0, typename _Index = int> class SparseMatrix;
+template<typename _Scalar, int _Flags = 0, typename _Index = int> class DynamicSparseMatrix;
+template<typename _Scalar, int _Flags = 0, typename _Index = int> class SparseVector;
+template<typename _Scalar, int _Flags = 0, typename _Index = int> class MappedSparseMatrix;
+
+template<typename MatrixType, int Size> class SparseInnerVectorSet;
+template<typename MatrixType, int Mode> class SparseTriangularView;
+template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView;
+template<typename Lhs, typename Rhs> class SparseDiagonalProduct;
+template<typename MatrixType> class SparseView;
+
+template<typename Lhs, typename Rhs> class SparseSparseProduct;
+template<typename Lhs, typename Rhs> class SparseTimeDenseProduct;
+template<typename Lhs, typename Rhs> class DenseTimeSparseProduct;
+template<typename Lhs, typename Rhs, bool Transpose> class SparseDenseOuterProduct;
+
+template<typename Lhs, typename Rhs> struct SparseSparseProductReturnType;
+template<typename Lhs, typename Rhs, int InnerSize = internal::traits<Lhs>::ColsAtCompileTime> struct DenseSparseProductReturnType;
+template<typename Lhs, typename Rhs, int InnerSize = internal::traits<Lhs>::ColsAtCompileTime> struct SparseDenseProductReturnType;
+
+namespace internal {
+
+template<typename T> struct eval<T,Sparse>
+{
+ typedef typename traits<T>::Scalar _Scalar;
+ enum {
+ _Flags = traits<T>::Flags
+ };
+
+ public:
+ typedef SparseMatrix<_Scalar, _Flags> type;
+};
+
+template<typename T> struct plain_matrix_type<T,Sparse>
+{
+ typedef typename traits<T>::Scalar _Scalar;
+ enum {
+ _Flags = traits<T>::Flags
+ };
+
+ public:
+ typedef SparseMatrix<_Scalar, _Flags> type;
+};
+
+} // end namespace internal
+
+#endif // EIGEN_SPARSEUTIL_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseVector.h b/extern/Eigen3/Eigen/src/Sparse/SparseVector.h
new file mode 100644
index 00000000000..ce4bb51a27e
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseVector.h
@@ -0,0 +1,431 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSEVECTOR_H
+#define EIGEN_SPARSEVECTOR_H
+
+/** \class SparseVector
+ *
+ * \brief a sparse vector class
+ *
+ * \tparam _Scalar the scalar type, i.e. the type of the coefficients
+ *
+ * See http://www.netlib.org/linalg/html_templates/node91.html for details on the storage scheme.
+ *
+ * This class can be extended with the help of the plugin mechanism described on the page
+ * \ref TopicCustomizingEigen by defining the preprocessor symbol \c EIGEN_SPARSEVECTOR_PLUGIN.
+ */
+
+namespace internal {
+template<typename _Scalar, int _Options, typename _Index>
+struct traits<SparseVector<_Scalar, _Options, _Index> >
+{
+ typedef _Scalar Scalar;
+ typedef _Index Index;
+ typedef Sparse StorageKind;
+ typedef MatrixXpr XprKind;
+ enum {
+ IsColVector = _Options & RowMajorBit ? 0 : 1,
+
+ RowsAtCompileTime = IsColVector ? Dynamic : 1,
+ ColsAtCompileTime = IsColVector ? 1 : Dynamic,
+ MaxRowsAtCompileTime = RowsAtCompileTime,
+ MaxColsAtCompileTime = ColsAtCompileTime,
+ Flags = _Options | NestByRefBit | LvalueBit,
+ CoeffReadCost = NumTraits<Scalar>::ReadCost,
+ SupportedAccessPatterns = InnerRandomAccessPattern
+ };
+};
+}
+
+template<typename _Scalar, int _Options, typename _Index>
+class SparseVector
+ : public SparseMatrixBase<SparseVector<_Scalar, _Options, _Index> >
+{
+ public:
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseVector)
+ EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, +=)
+ EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, -=)
+// EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseVector, =)
+
+ protected:
+ public:
+
+ typedef SparseMatrixBase<SparseVector> SparseBase;
+ enum { IsColVector = internal::traits<SparseVector>::IsColVector };
+
+ enum {
+ Options = _Options
+ };
+
+ CompressedStorage<Scalar,Index> m_data;
+ Index m_size;
+
+ CompressedStorage<Scalar,Index>& _data() { return m_data; }
+ CompressedStorage<Scalar,Index>& _data() const { return m_data; }
+
+ public:
+
+ EIGEN_STRONG_INLINE Index rows() const { return IsColVector ? m_size : 1; }
+ EIGEN_STRONG_INLINE Index cols() const { return IsColVector ? 1 : m_size; }
+ EIGEN_STRONG_INLINE Index innerSize() const { return m_size; }
+ EIGEN_STRONG_INLINE Index outerSize() const { return 1; }
+ EIGEN_STRONG_INLINE Index innerNonZeros(Index j) const { eigen_assert(j==0); return m_size; }
+
+ EIGEN_STRONG_INLINE const Scalar* _valuePtr() const { return &m_data.value(0); }
+ EIGEN_STRONG_INLINE Scalar* _valuePtr() { return &m_data.value(0); }
+
+ EIGEN_STRONG_INLINE const Index* _innerIndexPtr() const { return &m_data.index(0); }
+ EIGEN_STRONG_INLINE Index* _innerIndexPtr() { return &m_data.index(0); }
+
+ inline Scalar coeff(Index row, Index col) const
+ {
+ eigen_assert((IsColVector ? col : row)==0);
+ return coeff(IsColVector ? row : col);
+ }
+ inline Scalar coeff(Index i) const { return m_data.at(i); }
+
+ inline Scalar& coeffRef(Index row, Index col)
+ {
+ eigen_assert((IsColVector ? col : row)==0);
+ return coeff(IsColVector ? row : col);
+ }
+
+ /** \returns a reference to the coefficient value at given index \a i
+ * This operation involes a log(rho*size) binary search. If the coefficient does not
+ * exist yet, then a sorted insertion into a sequential buffer is performed.
+ *
+ * This insertion might be very costly if the number of nonzeros above \a i is large.
+ */
+ inline Scalar& coeffRef(Index i)
+ {
+ return m_data.atWithInsertion(i);
+ }
+
+ public:
+
+ class InnerIterator;
+
+ inline void setZero() { m_data.clear(); }
+
+ /** \returns the number of non zero coefficients */
+ inline Index nonZeros() const { return static_cast<Index>(m_data.size()); }
+
+ inline void startVec(Index outer)
+ {
+ eigen_assert(outer==0);
+ }
+
+ inline Scalar& insertBackByOuterInner(Index outer, Index inner)
+ {
+ eigen_assert(outer==0);
+ return insertBack(inner);
+ }
+ inline Scalar& insertBack(Index i)
+ {
+ m_data.append(0, i);
+ return m_data.value(m_data.size()-1);
+ }
+
+ inline Scalar& insert(Index row, Index col)
+ {
+ Index inner = IsColVector ? row : col;
+ Index outer = IsColVector ? col : row;
+ eigen_assert(outer==0);
+ return insert(inner);
+ }
+ Scalar& insert(Index i)
+ {
+ Index startId = 0;
+ Index p = m_data.size() - 1;
+ // TODO smart realloc
+ m_data.resize(p+2,1);
+
+ while ( (p >= startId) && (m_data.index(p) > i) )
+ {
+ m_data.index(p+1) = m_data.index(p);
+ m_data.value(p+1) = m_data.value(p);
+ --p;
+ }
+ m_data.index(p+1) = i;
+ m_data.value(p+1) = 0;
+ return m_data.value(p+1);
+ }
+
+ /**
+ */
+ inline void reserve(Index reserveSize) { m_data.reserve(reserveSize); }
+
+
+ inline void finalize() {}
+
+ void prune(Scalar reference, RealScalar epsilon = NumTraits<RealScalar>::dummy_precision())
+ {
+ m_data.prune(reference,epsilon);
+ }
+
+ void resize(Index rows, Index cols)
+ {
+ eigen_assert(rows==1 || cols==1);
+ resize(IsColVector ? rows : cols);
+ }
+
+ void resize(Index newSize)
+ {
+ m_size = newSize;
+ m_data.clear();
+ }
+
+ void resizeNonZeros(Index size) { m_data.resize(size); }
+
+ inline SparseVector() : m_size(0) { resize(0); }
+
+ inline SparseVector(Index size) : m_size(0) { resize(size); }
+
+ inline SparseVector(Index rows, Index cols) : m_size(0) { resize(rows,cols); }
+
+ template<typename OtherDerived>
+ inline SparseVector(const MatrixBase<OtherDerived>& other)
+ : m_size(0)
+ {
+ *this = other.derived();
+ }
+
+ template<typename OtherDerived>
+ inline SparseVector(const SparseMatrixBase<OtherDerived>& other)
+ : m_size(0)
+ {
+ *this = other.derived();
+ }
+
+ inline SparseVector(const SparseVector& other)
+ : m_size(0)
+ {
+ *this = other.derived();
+ }
+
+ inline void swap(SparseVector& other)
+ {
+ std::swap(m_size, other.m_size);
+ m_data.swap(other.m_data);
+ }
+
+ inline SparseVector& operator=(const SparseVector& other)
+ {
+ if (other.isRValue())
+ {
+ swap(other.const_cast_derived());
+ }
+ else
+ {
+ resize(other.size());
+ m_data = other.m_data;
+ }
+ return *this;
+ }
+
+ template<typename OtherDerived>
+ inline SparseVector& operator=(const SparseMatrixBase<OtherDerived>& other)
+ {
+ if (int(RowsAtCompileTime)!=int(OtherDerived::RowsAtCompileTime))
+ return Base::operator=(other.transpose());
+ else
+ return Base::operator=(other);
+ }
+
+ #ifndef EIGEN_PARSED_BY_DOXYGEN
+ template<typename Lhs, typename Rhs>
+ inline SparseVector& operator=(const SparseSparseProduct<Lhs,Rhs>& product)
+ {
+ return Base::operator=(product);
+ }
+ #endif
+
+// const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
+// if (needToTranspose)
+// {
+// // two passes algorithm:
+// // 1 - compute the number of coeffs per dest inner vector
+// // 2 - do the actual copy/eval
+// // Since each coeff of the rhs has to be evaluated twice, let's evauluate it if needed
+// typedef typename internal::nested<OtherDerived,2>::type OtherCopy;
+// OtherCopy otherCopy(other.derived());
+// typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
+//
+// resize(other.rows(), other.cols());
+// Eigen::Map<VectorXi>(m_outerIndex,outerSize()).setZero();
+// // pass 1
+// // FIXME the above copy could be merged with that pass
+// for (int j=0; j<otherCopy.outerSize(); ++j)
+// for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
+// ++m_outerIndex[it.index()];
+//
+// // prefix sum
+// int count = 0;
+// VectorXi positions(outerSize());
+// for (int j=0; j<outerSize(); ++j)
+// {
+// int tmp = m_outerIndex[j];
+// m_outerIndex[j] = count;
+// positions[j] = count;
+// count += tmp;
+// }
+// m_outerIndex[outerSize()] = count;
+// // alloc
+// m_data.resize(count);
+// // pass 2
+// for (int j=0; j<otherCopy.outerSize(); ++j)
+// for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
+// {
+// int pos = positions[it.index()]++;
+// m_data.index(pos) = j;
+// m_data.value(pos) = it.value();
+// }
+//
+// return *this;
+// }
+// else
+// {
+// // there is no special optimization
+// return SparseMatrixBase<SparseMatrix>::operator=(other.derived());
+// }
+// }
+
+ friend std::ostream & operator << (std::ostream & s, const SparseVector& m)
+ {
+ for (Index i=0; i<m.nonZeros(); ++i)
+ s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
+ s << std::endl;
+ return s;
+ }
+
+ // this specialized version does not seems to be faster
+// Scalar dot(const SparseVector& other) const
+// {
+// int i=0, j=0;
+// Scalar res = 0;
+// asm("#begindot");
+// while (i<nonZeros() && j<other.nonZeros())
+// {
+// if (m_data.index(i)==other.m_data.index(j))
+// {
+// res += m_data.value(i) * internal::conj(other.m_data.value(j));
+// ++i; ++j;
+// }
+// else if (m_data.index(i)<other.m_data.index(j))
+// ++i;
+// else
+// ++j;
+// }
+// asm("#enddot");
+// return res;
+// }
+
+ /** Destructor */
+ inline ~SparseVector() {}
+
+ /** Overloaded for performance */
+ Scalar sum() const;
+
+ public:
+
+ /** \deprecated use setZero() and reserve() */
+ EIGEN_DEPRECATED void startFill(Index reserve)
+ {
+ setZero();
+ m_data.reserve(reserve);
+ }
+
+ /** \deprecated use insertBack(Index,Index) */
+ EIGEN_DEPRECATED Scalar& fill(Index r, Index c)
+ {
+ eigen_assert(r==0 || c==0);
+ return fill(IsColVector ? r : c);
+ }
+
+ /** \deprecated use insertBack(Index) */
+ EIGEN_DEPRECATED Scalar& fill(Index i)
+ {
+ m_data.append(0, i);
+ return m_data.value(m_data.size()-1);
+ }
+
+ /** \deprecated use insert(Index,Index) */
+ EIGEN_DEPRECATED Scalar& fillrand(Index r, Index c)
+ {
+ eigen_assert(r==0 || c==0);
+ return fillrand(IsColVector ? r : c);
+ }
+
+ /** \deprecated use insert(Index) */
+ EIGEN_DEPRECATED Scalar& fillrand(Index i)
+ {
+ return insert(i);
+ }
+
+ /** \deprecated use finalize() */
+ EIGEN_DEPRECATED void endFill() {}
+
+# ifdef EIGEN_SPARSEVECTOR_PLUGIN
+# include EIGEN_SPARSEVECTOR_PLUGIN
+# endif
+};
+
+template<typename Scalar, int _Options, typename _Index>
+class SparseVector<Scalar,_Options,_Index>::InnerIterator
+{
+ public:
+ InnerIterator(const SparseVector& vec, Index outer=0)
+ : m_data(vec.m_data), m_id(0), m_end(static_cast<Index>(m_data.size()))
+ {
+ eigen_assert(outer==0);
+ }
+
+ InnerIterator(const CompressedStorage<Scalar,Index>& data)
+ : m_data(data), m_id(0), m_end(static_cast<Index>(m_data.size()))
+ {}
+
+ template<unsigned int Added, unsigned int Removed>
+ InnerIterator(const Flagged<SparseVector,Added,Removed>& vec, Index )
+ : m_data(vec._expression().m_data), m_id(0), m_end(m_data.size())
+ {}
+
+ inline InnerIterator& operator++() { m_id++; return *this; }
+
+ inline Scalar value() const { return m_data.value(m_id); }
+ inline Scalar& valueRef() { return const_cast<Scalar&>(m_data.value(m_id)); }
+
+ inline Index index() const { return m_data.index(m_id); }
+ inline Index row() const { return IsColVector ? index() : 0; }
+ inline Index col() const { return IsColVector ? 0 : index(); }
+
+ inline operator bool() const { return (m_id < m_end); }
+
+ protected:
+ const CompressedStorage<Scalar,Index>& m_data;
+ Index m_id;
+ const Index m_end;
+};
+
+#endif // EIGEN_SPARSEVECTOR_H
diff --git a/extern/Eigen3/Eigen/src/Sparse/SparseView.h b/extern/Eigen3/Eigen/src/Sparse/SparseView.h
new file mode 100644
index 00000000000..24306561098
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/SparseView.h
@@ -0,0 +1,109 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr>
+// Copyright (C) 2010 Daniel Lowengrub <lowdanie@gmail.com>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSEVIEW_H
+#define EIGEN_SPARSEVIEW_H
+
+namespace internal {
+
+template<typename MatrixType>
+struct traits<SparseView<MatrixType> > : traits<MatrixType>
+{
+ typedef int Index;
+ typedef Sparse StorageKind;
+ enum {
+ Flags = int(traits<MatrixType>::Flags) & (RowMajorBit)
+ };
+};
+
+} // end namespace internal
+
+template<typename MatrixType>
+class SparseView : public SparseMatrixBase<SparseView<MatrixType> >
+{
+ typedef typename MatrixType::Nested MatrixTypeNested;
+ typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
+public:
+ EIGEN_SPARSE_PUBLIC_INTERFACE(SparseView)
+
+ SparseView(const MatrixType& mat, const Scalar& m_reference = Scalar(0),
+ typename NumTraits<Scalar>::Real m_epsilon = NumTraits<Scalar>::dummy_precision()) :
+ m_matrix(mat), m_reference(m_reference), m_epsilon(m_epsilon) {}
+
+ class InnerIterator;
+
+ inline Index rows() const { return m_matrix.rows(); }
+ inline Index cols() const { return m_matrix.cols(); }
+
+ inline Index innerSize() const { return m_matrix.innerSize(); }
+ inline Index outerSize() const { return m_matrix.outerSize(); }
+
+protected:
+ const MatrixTypeNested m_matrix;
+ Scalar m_reference;
+ typename NumTraits<Scalar>::Real m_epsilon;
+};
+
+template<typename MatrixType>
+class SparseView<MatrixType>::InnerIterator : public _MatrixTypeNested::InnerIterator
+{
+public:
+ typedef typename _MatrixTypeNested::InnerIterator IterBase;
+ InnerIterator(const SparseView& view, Index outer) :
+ IterBase(view.m_matrix, outer), m_view(view)
+ {
+ incrementToNonZero();
+ }
+
+ EIGEN_STRONG_INLINE InnerIterator& operator++()
+ {
+ IterBase::operator++();
+ incrementToNonZero();
+ return *this;
+ }
+
+ using IterBase::value;
+
+protected:
+ const SparseView& m_view;
+
+private:
+ void incrementToNonZero()
+ {
+ while(internal::isMuchSmallerThan(value(), m_view.m_reference, m_view.m_epsilon) && (bool(*this)))
+ {
+ IterBase::operator++();
+ }
+ }
+};
+
+template<typename Derived>
+const SparseView<Derived> MatrixBase<Derived>::sparseView(const Scalar& m_reference,
+ typename NumTraits<Scalar>::Real m_epsilon) const
+{
+ return SparseView<Derived>(derived(), m_reference, m_epsilon);
+}
+
+#endif
diff --git a/extern/Eigen3/Eigen/src/Sparse/TriangularSolver.h b/extern/Eigen3/Eigen/src/Sparse/TriangularSolver.h
new file mode 100644
index 00000000000..73468e0446c
--- /dev/null
+++ b/extern/Eigen3/Eigen/src/Sparse/TriangularSolver.h
@@ -0,0 +1,339 @@
+// This file is part of Eigen, a lightweight C++ template library
+// for linear algebra.
+//
+// Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
+//
+// Eigen is free software; you can redistribute it and/or
+// modify it under the terms of the GNU Lesser General Public
+// License as published by the Free Software Foundation; either
+// version 3 of the License, or (at your option) any later version.
+//
+// Alternatively, you can redistribute it and/or
+// modify it under the terms of the GNU General Public License as
+// published by the Free Software Foundation; either version 2 of
+// the License, or (at your option) any later version.
+//
+// Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
+// WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+// FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
+// GNU General Public License for more details.
+//
+// You should have received a copy of the GNU Lesser General Public
+// License and a copy of the GNU General Public License along with
+// Eigen. If not, see <http://www.gnu.org/licenses/>.
+
+#ifndef EIGEN_SPARSETRIANGULARSOLVER_H
+#define EIGEN_SPARSETRIANGULARSOLVER_H
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, int Mode,
+ int UpLo = (Mode & Lower)
+ ? Lower
+ : (Mode & Upper)
+ ? Upper
+ : -1,
+ int StorageOrder = int(traits<Lhs>::Flags) & RowMajorBit>
+struct sparse_solve_triangular_selector;
+
+// forward substitution, row-major
+template<typename Lhs, typename Rhs, int Mode>
+struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Lower,RowMajor>
+{
+ typedef typename Rhs::Scalar Scalar;
+ static void run(const Lhs& lhs, Rhs& other)
+ {
+ for(int col=0 ; col<other.cols() ; ++col)
+ {
+ for(int i=0; i<lhs.rows(); ++i)
+ {
+ Scalar tmp = other.coeff(i,col);
+ Scalar lastVal = 0;
+ int lastIndex = 0;
+ for(typename Lhs::InnerIterator it(lhs, i); it; ++it)
+ {
+ lastVal = it.value();
+ lastIndex = it.index();
+ if(lastIndex==i)
+ break;
+ tmp -= lastVal * other.coeff(lastIndex,col);
+ }
+ if (Mode & UnitDiag)
+ other.coeffRef(i,col) = tmp;
+ else
+ {
+ eigen_assert(lastIndex==i);
+ other.coeffRef(i,col) = tmp/lastVal;
+ }
+ }
+ }
+ }
+};
+
+// backward substitution, row-major
+template<typename Lhs, typename Rhs, int Mode>
+struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Upper,RowMajor>
+{
+ typedef typename Rhs::Scalar Scalar;
+ static void run(const Lhs& lhs, Rhs& other)
+ {
+ for(int col=0 ; col<other.cols() ; ++col)
+ {
+ for(int i=lhs.rows()-1 ; i>=0 ; --i)
+ {
+ Scalar tmp = other.coeff(i,col);
+ typename Lhs::InnerIterator it(lhs, i);
+ if (it && it.index() == i)
+ ++it;
+ for(; it; ++it)
+ {
+ tmp -= it.value() * other.coeff(it.index(),col);
+ }
+
+ if (Mode & UnitDiag)
+ other.coeffRef(i,col) = tmp;
+ else
+ {
+ typename Lhs::InnerIterator it(lhs, i);
+ eigen_assert(it && it.index() == i);
+ other.coeffRef(i,col) = tmp/it.value();
+ }
+ }
+ }
+ }
+};
+
+// forward substitution, col-major
+template<typename Lhs, typename Rhs, int Mode>
+struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Lower,ColMajor>
+{
+ typedef typename Rhs::Scalar Scalar;
+ static void run(const Lhs& lhs, Rhs& other)
+ {
+ for(int col=0 ; col<other.cols() ; ++col)
+ {
+ for(int i=0; i<lhs.cols(); ++i)
+ {
+ Scalar& tmp = other.coeffRef(i,col);
+ if (tmp!=Scalar(0)) // optimization when other is actually sparse
+ {
+ typename Lhs::InnerIterator it(lhs, i);
+ if(!(Mode & UnitDiag))
+ {
+ eigen_assert(it.index()==i);
+ tmp /= it.value();
+ }
+ if (it && it.index()==i)
+ ++it;
+ for(; it; ++it)
+ other.coeffRef(it.index(), col) -= tmp * it.value();
+ }
+ }
+ }
+ }
+};
+
+// backward substitution, col-major
+template<typename Lhs, typename Rhs, int Mode>
+struct sparse_solve_triangular_selector<Lhs,Rhs,Mode,Upper,ColMajor>
+{
+ typedef typename Rhs::Scalar Scalar;
+ static void run(const Lhs& lhs, Rhs& other)
+ {
+ for(int col=0 ; col<other.cols() ; ++col)
+ {
+ for(int i=lhs.cols()-1; i>=0; --i)
+ {
+ Scalar& tmp = other.coeffRef(i,col);
+ if (tmp!=Scalar(0)) // optimization when other is actually sparse
+ {
+ if(!(Mode & UnitDiag))
+ {
+ // FIXME lhs.coeff(i,i) might not be always efficient while it must simply be the
+ // last element of the column !
+ other.coeffRef(i,col) /= lhs.innerVector(i).lastCoeff();
+ }
+ typename Lhs::InnerIterator it(lhs, i);
+ for(; it && it.index()<i; ++it)
+ other.coeffRef(it.index(), col) -= tmp * it.value();
+ }
+ }
+ }
+ }
+};
+
+} // end namespace internal
+
+template<typename ExpressionType,int Mode>
+template<typename OtherDerived>
+void SparseTriangularView<ExpressionType,Mode>::solveInPlace(MatrixBase<OtherDerived>& other) const
+{
+ eigen_assert(m_matrix.cols() == m_matrix.rows());
+ eigen_assert(m_matrix.cols() == other.rows());
+ eigen_assert(!(Mode & ZeroDiag));
+ eigen_assert(Mode & (Upper|Lower));
+
+ enum { copy = internal::traits<OtherDerived>::Flags & RowMajorBit };
+
+ typedef typename internal::conditional<copy,
+ typename internal::plain_matrix_type_column_major<OtherDerived>::type, OtherDerived&>::type OtherCopy;
+ OtherCopy otherCopy(other.derived());
+
+ internal::sparse_solve_triangular_selector<ExpressionType, typename internal::remove_reference<OtherCopy>::type, Mode>::run(m_matrix, otherCopy);
+
+ if (copy)
+ other = otherCopy;
+}
+
+template<typename ExpressionType,int Mode>
+template<typename OtherDerived>
+typename internal::plain_matrix_type_column_major<OtherDerived>::type
+SparseTriangularView<ExpressionType,Mode>::solve(const MatrixBase<OtherDerived>& other) const
+{
+ typename internal::plain_matrix_type_column_major<OtherDerived>::type res(other);
+ solveInPlace(res);
+ return res;
+}
+
+// pure sparse path
+
+namespace internal {
+
+template<typename Lhs, typename Rhs, int Mode,
+ int UpLo = (Mode & Lower)
+ ? Lower
+ : (Mode & Upper)
+ ? Upper
+ : -1,
+ int StorageOrder = int(Lhs::Flags) & (RowMajorBit)>
+struct sparse_solve_triangular_sparse_selector;
+
+// forward substitution, col-major
+template<typename Lhs, typename Rhs, int Mode, int UpLo>
+struct sparse_solve_triangular_sparse_selector<Lhs,Rhs,Mode,UpLo,ColMajor>
+{
+ typedef typename Rhs::Scalar Scalar;
+ typedef typename promote_index_type<typename traits<Lhs>::Index,
+ typename traits<Rhs>::Index>::type Index;
+ static void run(const Lhs& lhs, Rhs& other)
+ {
+ const bool IsLower = (UpLo==Lower);
+ AmbiVector<Scalar,Index> tempVector(other.rows()*2);
+ tempVector.setBounds(0,other.rows());
+
+ Rhs res(other.rows(), other.cols());
+ res.reserve(other.nonZeros());
+
+ for(int col=0 ; col<other.cols() ; ++col)
+ {
+ // FIXME estimate number of non zeros
+ tempVector.init(.99/*float(other.col(col).nonZeros())/float(other.rows())*/);
+ tempVector.setZero();
+ tempVector.restart();
+ for (typename Rhs::InnerIterator rhsIt(other, col); rhsIt; ++rhsIt)
+ {
+ tempVector.coeffRef(rhsIt.index()) = rhsIt.value();
+ }
+
+ for(int i=IsLower?0:lhs.cols()-1;
+ IsLower?i<lhs.cols():i>=0;
+ i+=IsLower?1:-1)
+ {
+ tempVector.restart();
+ Scalar& ci = tempVector.coeffRef(i);
+ if (ci!=Scalar(0))
+ {
+ // find
+ typename Lhs::InnerIterator it(lhs, i);
+ if(!(Mode & UnitDiag))
+ {
+ if (IsLower)
+ {
+ eigen_assert(it.index()==i);
+ ci /= it.value();
+ }
+ else
+ ci /= lhs.coeff(i,i);
+ }
+ tempVector.restart();
+ if (IsLower)
+ {
+ if (it.index()==i)
+ ++it;
+ for(; it; ++it)
+ tempVector.coeffRef(it.index()) -= ci * it.value();
+ }
+ else
+ {
+ for(; it && it.index()<i; ++it)
+ tempVector.coeffRef(it.index()) -= ci * it.value();
+ }
+ }
+ }
+
+
+ int count = 0;
+ // FIXME compute a reference value to filter zeros
+ for (typename AmbiVector<Scalar,Index>::Iterator it(tempVector/*,1e-12*/); it; ++it)
+ {
+ ++ count;
+// std::cerr << "fill " << it.index() << ", " << col << "\n";
+// std::cout << it.value() << " ";
+ // FIXME use insertBack
+ res.insert(it.index(), col) = it.value();
+ }
+// std::cout << "tempVector.nonZeros() == " << int(count) << " / " << (other.rows()) << "\n";
+ }
+ res.finalize();
+ other = res.markAsRValue();
+ }
+};
+
+} // end namespace internal
+
+template<typename ExpressionType,int Mode>
+template<typename OtherDerived>
+void SparseTriangularView<ExpressionType,Mode>::solveInPlace(SparseMatrixBase<OtherDerived>& other) const
+{
+ eigen_assert(m_matrix.cols() == m_matrix.rows());
+ eigen_assert(m_matrix.cols() == other.rows());
+ eigen_assert(!(Mode & ZeroDiag));
+ eigen_assert(Mode & (Upper|Lower));
+
+// enum { copy = internal::traits<OtherDerived>::Flags & RowMajorBit };
+
+// typedef typename internal::conditional<copy,
+// typename internal::plain_matrix_type_column_major<OtherDerived>::type, OtherDerived&>::type OtherCopy;
+// OtherCopy otherCopy(other.derived());
+
+ internal::sparse_solve_triangular_sparse_selector<ExpressionType, OtherDerived, Mode>::run(m_matrix, other.derived());
+
+// if (copy)
+// other = otherCopy;
+}
+
+#ifdef EIGEN2_SUPPORT
+
+// deprecated stuff:
+
+/** \deprecated */
+template<typename Derived>
+template<typename OtherDerived>
+void SparseMatrixBase<Derived>::solveTriangularInPlace(MatrixBase<OtherDerived>& other) const
+{
+ this->template triangular<Flags&(Upper|Lower)>().solveInPlace(other);
+}
+
+/** \deprecated */
+template<typename Derived>
+template<typename OtherDerived>
+typename internal::plain_matrix_type_column_major<OtherDerived>::type
+SparseMatrixBase<Derived>::solveTriangular(const MatrixBase<OtherDerived>& other) const
+{
+ typename internal::plain_matrix_type_column_major<OtherDerived>::type res(other);
+ derived().solveTriangularInPlace(res);
+ return res;
+}
+#endif // EIGEN2_SUPPORT
+
+#endif // EIGEN_SPARSETRIANGULARSOLVER_H